BUILD: threads: Rename SPIN/RWLOCK macros using HA_ prefix

This remove any name conflicts, especially on Solaris.
diff --git a/include/common/buffer.h b/include/common/buffer.h
index c6fb2fe..acaa79a 100644
--- a/include/common/buffer.h
+++ b/include/common/buffer.h
@@ -751,13 +751,13 @@
 
 static inline void offer_buffers(void *from, unsigned int threshold)
 {
-	SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+	HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 	if (LIST_ISEMPTY(&buffer_wq)) {
-		SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		return;
 	}
 	__offer_buffer(from, threshold);
-	SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+	HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 }
 
 /*************************************************************************/
diff --git a/include/common/hathreads.h b/include/common/hathreads.h
index 0a9098d..460c2ea 100644
--- a/include/common/hathreads.h
+++ b/include/common/hathreads.h
@@ -70,20 +70,20 @@
 #define THREAD_NO_SYNC()     ({ 0; })
 #define THREAD_NEED_SYNC()   ({ 1; })
 
-#define SPIN_INIT(l)         do { /* do nothing */ } while(0)
-#define SPIN_DESTROY(l)      do { /* do nothing */ } while(0)
-#define SPIN_LOCK(lbl, l)    do { /* do nothing */ } while(0)
-#define SPIN_TRYLOCK(lbl, l) ({ 0; })
-#define SPIN_UNLOCK(lbl, l)  do { /* do nothing */ } while(0)
+#define HA_SPIN_INIT(l)         do { /* do nothing */ } while(0)
+#define HA_SPIN_DESTROY(l)      do { /* do nothing */ } while(0)
+#define HA_SPIN_LOCK(lbl, l)    do { /* do nothing */ } while(0)
+#define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
+#define HA_SPIN_UNLOCK(lbl, l)  do { /* do nothing */ } while(0)
 
-#define RWLOCK_INIT(l)          do { /* do nothing */ } while(0)
-#define RWLOCK_DESTROY(l)       do { /* do nothing */ } while(0)
-#define RWLOCK_WRLOCK(lbl, l)   do { /* do nothing */ } while(0)
-#define RWLOCK_TRYWRLOCK(lbl, l)   ({ 0; })
-#define RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
-#define RWLOCK_RDLOCK(lbl, l)   do { /* do nothing */ } while(0)
-#define RWLOCK_TRYRDLOCK(lbl, l)   ({ 0; })
-#define RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_INIT(l)          do { /* do nothing */ } while(0)
+#define HA_RWLOCK_DESTROY(l)       do { /* do nothing */ } while(0)
+#define HA_RWLOCK_WRLOCK(lbl, l)   do { /* do nothing */ } while(0)
+#define HA_RWLOCK_TRYWRLOCK(lbl, l)   ({ 0; })
+#define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_RDLOCK(lbl, l)   do { /* do nothing */ } while(0)
+#define HA_RWLOCK_TRYRDLOCK(lbl, l)   ({ 0; })
+#define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
 
 #else /* USE_THREAD */
 
@@ -208,23 +208,23 @@
 
 #define HA_SPINLOCK_T       struct ha_spinlock
 
-#define SPIN_INIT(l)         __spin_init(l)
-#define SPIN_DESTROY(l)      __spin_destroy(l)
+#define HA_SPIN_INIT(l)        __spin_init(l)
+#define HA_SPIN_DESTROY(l)      __spin_destroy(l)
 
-#define SPIN_LOCK(lbl, l)    __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
-#define SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
-#define SPIN_UNLOCK(lbl, l)  __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_SPIN_LOCK(lbl, l)    __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_SPIN_UNLOCK(lbl, l)  __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
 
 #define HA_RWLOCK_T         struct ha_rwlock
 
-#define RWLOCK_INIT(l)          __ha_rwlock_init((l))
-#define RWLOCK_DESTROY(l)       __ha_rwlock_destroy((l))
-#define RWLOCK_WRLOCK(lbl,l)    __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
-#define RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
-#define RWLOCK_WRUNLOCK(lbl,l)  __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
-#define RWLOCK_RDLOCK(lbl,l)    __ha_rwlock_rdlock(lbl, l)
-#define RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
-#define RWLOCK_RDUNLOCK(lbl,l)  __ha_rwlock_rdunlock(lbl, l)
+#define HA_RWLOCK_INIT(l)          __ha_rwlock_init((l))
+#define HA_RWLOCK_DESTROY(l)       __ha_rwlock_destroy((l))
+#define HA_RWLOCK_WRLOCK(lbl,l)    __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_WRUNLOCK(lbl,l)  __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_RDLOCK(lbl,l)    __ha_rwlock_rdlock(lbl, l)
+#define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
+#define HA_RWLOCK_RDUNLOCK(lbl,l)  __ha_rwlock_rdunlock(lbl, l)
 
 struct ha_spinlock {
 	__HA_SPINLOCK_T lock;
@@ -550,22 +550,22 @@
 
 #define HA_SPINLOCK_T        unsigned long
 
-#define SPIN_INIT(l)         ({ (*l) = 0; })
-#define SPIN_DESTROY(l)      ({ (*l) = 0; })
-#define SPIN_LOCK(lbl, l)    pl_take_s(l)
-#define SPIN_TRYLOCK(lbl, l) !pl_try_s(l)
-#define SPIN_UNLOCK(lbl, l)  pl_drop_s(l)
+#define HA_SPIN_INIT(l)         ({ (*l) = 0; })
+#define HA_SPIN_DESTROY(l)      ({ (*l) = 0; })
+#define HA_SPIN_LOCK(lbl, l)    pl_take_s(l)
+#define HA_SPIN_TRYLOCK(lbl, l) !pl_try_s(l)
+#define HA_SPIN_UNLOCK(lbl, l)  pl_drop_s(l)
 
 #define HA_RWLOCK_T		unsigned long
 
-#define RWLOCK_INIT(l)          ({ (*l) = 0; })
-#define RWLOCK_DESTROY(l)       ({ (*l) = 0; })
-#define RWLOCK_WRLOCK(lbl,l)    pl_take_w(l)
-#define RWLOCK_TRYWRLOCK(lbl,l) !pl_try_w(l)
-#define RWLOCK_WRUNLOCK(lbl,l)  pl_drop_w(l)
-#define RWLOCK_RDLOCK(lbl,l)    pl_take_r(l)
-#define RWLOCK_TRYRDLOCK(lbl,l) !pl_try_r(l)
-#define RWLOCK_RDUNLOCK(lbl,l)  pl_drop_r(l)
+#define HA_RWLOCK_INIT(l)          ({ (*l) = 0; })
+#define HA_RWLOCK_DESTROY(l)       ({ (*l) = 0; })
+#define HA_RWLOCK_WRLOCK(lbl,l)    pl_take_w(l)
+#define HA_RWLOCK_TRYWRLOCK(lbl,l) !pl_try_w(l)
+#define HA_RWLOCK_WRUNLOCK(lbl,l)  pl_drop_w(l)
+#define HA_RWLOCK_RDLOCK(lbl,l)    pl_take_r(l)
+#define HA_RWLOCK_TRYRDLOCK(lbl,l) !pl_try_r(l)
+#define HA_RWLOCK_RDUNLOCK(lbl,l)  pl_drop_r(l)
 
 #endif  /* DEBUG_THREAD */
 
diff --git a/include/common/memory.h b/include/common/memory.h
index 999150d..ee394f8 100644
--- a/include/common/memory.h
+++ b/include/common/memory.h
@@ -135,9 +135,9 @@
 {
 	void *ret;
 
-	SPIN_LOCK(POOL_LOCK, &pool->lock);
+	HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
 	ret = __pool_get_first(pool);
-	SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+	HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
 	return ret;
 }
 /*
@@ -150,10 +150,10 @@
 {
 	void *p;
 
-	SPIN_LOCK(POOL_LOCK, &pool->lock);
+	HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
 	if ((p = __pool_get_first(pool)) == NULL)
 		p = __pool_refill_alloc(pool, 0);
-	SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+	HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
 	return p;
 }
 
@@ -169,10 +169,10 @@
 	p = pool_alloc_dirty(pool);
 #ifdef DEBUG_MEMORY_POOLS
 	if (p) {
-		SPIN_LOCK(POOL_LOCK, &pool->lock);
+		HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
 		/* keep track of where the element was allocated from */
 		*POOL_LINK(pool, p) = (void *)pool;
-		SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+		HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
 	}
 #endif
 	if (p && mem_poison_byte >= 0) {
@@ -194,7 +194,7 @@
 static inline void pool_free2(struct pool_head *pool, void *ptr)
 {
         if (likely(ptr != NULL)) {
-		SPIN_LOCK(POOL_LOCK, &pool->lock);
+		HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
 #ifdef DEBUG_MEMORY_POOLS
 		/* we'll get late corruption if we refill to the wrong pool or double-free */
 		if (*POOL_LINK(pool, ptr) != (void *)pool)
@@ -203,7 +203,7 @@
 		*POOL_LINK(pool, ptr) = (void *)pool->free_list;
                 pool->free_list = (void *)ptr;
                 pool->used--;
-		SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+		HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
 	}
 }
 #endif /* _COMMON_MEMORY_H */
diff --git a/include/proto/applet.h b/include/proto/applet.h
index 51a7e26..1a621a1 100644
--- a/include/proto/applet.h
+++ b/include/proto/applet.h
@@ -88,10 +88,10 @@
 	}
 
 	if (!LIST_ISEMPTY(&appctx->buffer_wait.list)) {
-		SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		LIST_DEL(&appctx->buffer_wait.list);
 		LIST_INIT(&appctx->buffer_wait.list);
-		SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 	}
 
 	pool_free2(pool2_connection, appctx);
@@ -99,14 +99,14 @@
 }
 static inline void appctx_free(struct appctx *appctx)
 {
-	SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
+	HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
 	if (appctx->state & APPLET_RUNNING) {
 		appctx->state |= APPLET_WANT_DIE;
-		SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
+		HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
 		return;
 	}
 	__appctx_free(appctx);
-	SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
+	HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
 }
 
 /* wakes up an applet when conditions have changed */
@@ -120,14 +120,14 @@
 
 static inline void appctx_wakeup(struct appctx *appctx)
 {
-	SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
+	HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
 	if (appctx->state & APPLET_RUNNING) {
 		appctx->state |= APPLET_WOKEN_UP;
-		SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
+		HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
 		return;
 	}
 	__appctx_wakeup(appctx);
-	SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
+	HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
 }
 
 /* Callback used to wake up an applet when a buffer is available. The applet
@@ -137,18 +137,18 @@
  * requested */
 static inline int appctx_res_wakeup(struct appctx *appctx)
 {
-	SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
+	HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
 	if (appctx->state & APPLET_RUNNING) {
 		if (appctx->state & APPLET_WOKEN_UP) {
-			SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
+			HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
 			return 0;
 		}
 		appctx->state |= APPLET_WOKEN_UP;
-		SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
+		HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
 		return 1;
 	}
 	__appctx_wakeup(appctx);
-	SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
+	HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
 	return 1;
 }
 
diff --git a/include/proto/channel.h b/include/proto/channel.h
index 83ad0aa..d6f355e 100644
--- a/include/proto/channel.h
+++ b/include/proto/channel.h
@@ -441,9 +441,9 @@
 		return 1;
 
 	if (LIST_ISEMPTY(&wait->list)) {
-		SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		LIST_ADDQ(&buffer_wq, &wait->list);
-		SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 	}
 
 	return 0;
diff --git a/include/proto/checks.h b/include/proto/checks.h
index b0b8c7d..2b285f3 100644
--- a/include/proto/checks.h
+++ b/include/proto/checks.h
@@ -37,15 +37,15 @@
  */
 static inline void health_adjust(struct server *s, short status)
 {
-	SPIN_LOCK(SERVER_LOCK, &s->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
 	/* return now if observing nor health check is not enabled */
 	if (!s->observe || !s->check.task) {
-		SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+		HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
 		return;
 	}
 
 	__health_adjust(s, status);
-	SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
 }
 
 const char *init_check(struct check *check, int type);
diff --git a/include/proto/fd.h b/include/proto/fd.h
index e240f5c..813e4f3 100644
--- a/include/proto/fd.h
+++ b/include/proto/fd.h
@@ -113,14 +113,14 @@
  */
 static inline void fd_alloc_cache_entry(const int fd)
 {
-	RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock);
+	HA_RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock);
 	if (fdtab[fd].cache)
 		goto end;
 	fd_cache_num++;
 	fdtab[fd].cache = fd_cache_num;
 	fd_cache[fd_cache_num-1] = fd;
   end:
-	RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock);
+	HA_RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock);
 }
 
 /* Removes entry used by fd <fd> from the FD cache and replaces it with the
@@ -131,7 +131,7 @@
 {
 	unsigned int pos;
 
-	RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock);
+	HA_RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock);
 	pos = fdtab[fd].cache;
 	if (!pos)
 		goto end;
@@ -144,7 +144,7 @@
 		fdtab[fd].cache = pos;
 	}
   end:
-	RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock);
+	HA_RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock);
 }
 
 /* Computes the new polled status based on the active and ready statuses, for
@@ -267,56 +267,56 @@
 /* Disable processing recv events on fd <fd> */
 static inline void fd_stop_recv(int fd)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	if (fd_recv_active(fd)) {
 		fdtab[fd].state &= ~FD_EV_ACTIVE_R;
 		fd_update_cache(fd); /* need an update entry to change the state */
 	}
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 }
 
 /* Disable processing send events on fd <fd> */
 static inline void fd_stop_send(int fd)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	if (fd_send_active(fd)) {
 		fdtab[fd].state &= ~FD_EV_ACTIVE_W;
 		fd_update_cache(fd); /* need an update entry to change the state */
 	}
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 }
 
 /* Disable processing of events on fd <fd> for both directions. */
 static inline void fd_stop_both(int fd)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	if (fd_active(fd)) {
 		fdtab[fd].state &= ~FD_EV_ACTIVE_RW;
 		fd_update_cache(fd); /* need an update entry to change the state */
 	}
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 }
 
 /* Report that FD <fd> cannot receive anymore without polling (EAGAIN detected). */
 static inline void fd_cant_recv(const int fd)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	if (fd_recv_ready(fd)) {
 		fdtab[fd].state &= ~FD_EV_READY_R;
 		fd_update_cache(fd); /* need an update entry to change the state */
 	}
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 }
 
 /* Report that FD <fd> can receive anymore without polling. */
 static inline void fd_may_recv(const int fd)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	if (!fd_recv_ready(fd)) {
 		fdtab[fd].state |= FD_EV_READY_R;
 		fd_update_cache(fd); /* need an update entry to change the state */
 	}
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 }
 
 /* Disable readiness when polled. This is useful to interrupt reading when it
@@ -326,66 +326,66 @@
  */
 static inline void fd_done_recv(const int fd)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	if (fd_recv_polled(fd) && fd_recv_ready(fd)) {
 		fdtab[fd].state &= ~FD_EV_READY_R;
 		fd_update_cache(fd); /* need an update entry to change the state */
 	}
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 }
 
 /* Report that FD <fd> cannot send anymore without polling (EAGAIN detected). */
 static inline void fd_cant_send(const int fd)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	if (fd_send_ready(fd)) {
 		fdtab[fd].state &= ~FD_EV_READY_W;
 		fd_update_cache(fd); /* need an update entry to change the state */
 	}
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 }
 
 /* Report that FD <fd> can send anymore without polling (EAGAIN detected). */
 static inline void fd_may_send(const int fd)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	if (!fd_send_ready(fd)) {
 		fdtab[fd].state |= FD_EV_READY_W;
 		fd_update_cache(fd); /* need an update entry to change the state */
 	}
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 }
 
 /* Prepare FD <fd> to try to receive */
 static inline void fd_want_recv(int fd)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	if (!fd_recv_active(fd)) {
 		fdtab[fd].state |= FD_EV_ACTIVE_R;
 		fd_update_cache(fd); /* need an update entry to change the state */
 	}
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 }
 
 /* Prepare FD <fd> to try to send */
 static inline void fd_want_send(int fd)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	if (!fd_send_active(fd)) {
 		fdtab[fd].state |= FD_EV_ACTIVE_W;
 		fd_update_cache(fd); /* need an update entry to change the state */
 	}
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 }
 
 /* Update events seen for FD <fd> and its state if needed. This should be called
  * by the poller to set FD_POLL_* flags. */
 static inline void fd_update_events(int fd, int evts)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	fdtab[fd].ev &= FD_POLL_STICKY;
 	fdtab[fd].ev |= evts;
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 
 	if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
 		fd_may_recv(fd);
@@ -397,7 +397,7 @@
 /* Prepares <fd> for being polled */
 static inline void fd_insert(int fd, unsigned long thread_mask)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	fdtab[fd].ev = 0;
 	fdtab[fd].new = 1;
 	fdtab[fd].updated = 0;
@@ -405,12 +405,12 @@
 	fdtab[fd].cloned = 0;
 	fdtab[fd].cache = 0;
 	fdtab[fd].thread_mask = thread_mask;
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 
-	SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
+	HA_SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
 	if (fd + 1 > maxfd)
 		maxfd = fd + 1;
-	SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock);
+	HA_SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock);
 }
 
 
diff --git a/include/proto/session.h b/include/proto/session.h
index 3dead44..7f95e2a 100644
--- a/include/proto/session.h
+++ b/include/proto/session.h
@@ -57,11 +57,11 @@
 
 		ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_CONN_CUR);
 		if (ptr) {
-			RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 			stktable_data_cast(ptr, conn_cur)--;
 
-			RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 		}
 
 		stkctr_set_entry(stkctr, NULL);
diff --git a/include/proto/stick_table.h b/include/proto/stick_table.h
index 4e92538..59674f6 100644
--- a/include/proto/stick_table.h
+++ b/include/proto/stick_table.h
@@ -141,7 +141,7 @@
 
 static inline void stksess_kill_if_expired(struct stktable *t, struct stksess *ts, int decrefcnt)
 {
-	SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
 
 	if (decrefcnt)
 		ts->ref_cnt--;
@@ -149,7 +149,7 @@
 	if (t->expire != TICK_ETERNITY && tick_is_expired(ts->expire, now_ms))
 		__stksess_kill_if_expired(t, ts);
 
-	SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
 }
 
 /* sets the stick counter's entry pointer */
diff --git a/include/proto/stream.h b/include/proto/stream.h
index f0edc2e..1a31930 100644
--- a/include/proto/stream.h
+++ b/include/proto/stream.h
@@ -102,11 +102,11 @@
 
 		ptr = stktable_data_ptr(s->stkctr[i].table, ts, STKTABLE_DT_CONN_CUR);
 		if (ptr) {
-			RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 			stktable_data_cast(ptr, conn_cur)--;
 
-			RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 		}
 		stkctr_set_entry(&s->stkctr[i], NULL);
 		stksess_kill_if_expired(s->stkctr[i].table, ts, 1);
@@ -137,11 +137,11 @@
 
 		ptr = stktable_data_ptr(s->stkctr[i].table, ts, STKTABLE_DT_CONN_CUR);
 		if (ptr) {
-			RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 			stktable_data_cast(ptr, conn_cur)--;
 
-			RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 		}
 		stkctr_set_entry(&s->stkctr[i], NULL);
 		stksess_kill_if_expired(s->stkctr[i].table, ts, 1);
@@ -156,7 +156,7 @@
 {
 	void *ptr;
 
-	RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+	HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 	ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_CUR);
 	if (ptr)
@@ -173,7 +173,7 @@
 	if (tick_isset(t->expire))
 		ts->expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
 
-	RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+	HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 }
 
 /* Enable tracking of stream counters as <stkctr> on stksess <ts>. The caller is
@@ -209,7 +209,7 @@
 				continue;
 		}
 
-		RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 		ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_REQ_CNT);
 		if (ptr)
@@ -220,7 +220,7 @@
 			update_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
 					       stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
 
-		RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 	}
 }
 
@@ -243,7 +243,7 @@
 		if (!(stkctr_flags(&s->stkctr[i]) & STKCTR_TRACK_BACKEND))
 			continue;
 
-		RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 		ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_REQ_CNT);
 		if (ptr)
@@ -254,7 +254,7 @@
 			update_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
 			                       stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
 
-		RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 	}
 }
 
@@ -281,7 +281,7 @@
 				continue;
 		}
 
-		RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 		ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_ERR_CNT);
 		if (ptr)
@@ -292,16 +292,16 @@
 			update_freq_ctr_period(&stktable_data_cast(ptr, http_err_rate),
 			                       stkctr->table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1);
 
-		RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 	}
 }
 
 static void inline stream_add_srv_conn(struct stream *sess, struct server *srv)
 {
-	SPIN_LOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 	sess->srv_conn = srv;
 	LIST_ADD(&srv->actconns, &sess->by_srv);
-	SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 }
 
 static void inline stream_del_srv_conn(struct stream *sess)
@@ -311,10 +311,10 @@
 	if (!srv)
 		return;
 
-	SPIN_LOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 	sess->srv_conn = NULL;
 	LIST_DEL(&sess->by_srv);
-	SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 }
 
 static void inline stream_init_srv_conn(struct stream *sess)
diff --git a/include/proto/task.h b/include/proto/task.h
index 24edaac..e6ba461 100644
--- a/include/proto/task.h
+++ b/include/proto/task.h
@@ -110,20 +110,20 @@
 struct task *__task_wakeup(struct task *t);
 static inline struct task *task_wakeup(struct task *t, unsigned int f)
 {
-	SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
+	HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
 
 	/* If task is running, we postpone the call
 	 * and backup the state.
 	 */
 	if (unlikely(t->state & TASK_RUNNING)) {
 		t->pending_state |= f;
-		SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+		HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
 		return t;
 	}
 	if (likely(!task_in_rq(t)))
 		__task_wakeup(t);
 	t->state |= f;
-	SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+	HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
 
 	return t;
 }
@@ -148,10 +148,10 @@
 
 static inline struct task *task_unlink_wq(struct task *t)
 {
-	SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
+	HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
 	if (likely(task_in_wq(t)))
 		__task_unlink_wq(t);
-	SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
+	HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
 	return t;
 }
 
@@ -176,10 +176,10 @@
  */
 static inline struct task *task_unlink_rq(struct task *t)
 {
-	SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
+	HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
 	if (likely(task_in_rq(t)))
 		__task_unlink_rq(t);
-	SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+	HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
 	return t;
 }
 
@@ -256,10 +256,10 @@
 	if (!tick_isset(task->expire))
 		return;
 
-	SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
+	HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
 	if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
 		__task_queue(task);
-	SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
+	HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
 }
 
 /* Ensure <task> will be woken up at most at <when>. If the task is already in
@@ -272,14 +272,14 @@
 	if (task_in_rq(task))
 		return;
 
-	SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
+	HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
 	if (task_in_wq(task))
 		when = tick_first(when, task->expire);
 
 	task->expire = when;
 	if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
 		__task_queue(task);
-	SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
+	HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
 }
 
 /* This function register a new signal. "lua" is the current lua
@@ -296,7 +296,7 @@
 		return NULL;
 	LIST_ADDQ(purge, &com->purge_me);
 	LIST_ADDQ(event, &com->wake_me);
-	SPIN_INIT(&com->lock);
+	HA_SPIN_INIT(&com->lock);
 	com->task = wakeup;
 	return com;
 }
@@ -311,15 +311,15 @@
 
 	/* Delete all pending communication signals. */
 	list_for_each_entry_safe(com, back, purge, purge_me) {
-		SPIN_LOCK(NOTIF_LOCK, &com->lock);
+		HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
 		LIST_DEL(&com->purge_me);
 		if (!com->task) {
-			SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
+			HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
 			pool_free2(pool2_notification, com);
 			continue;
 		}
 		com->task = NULL;
-		SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
+		HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
 	}
 }
 
@@ -333,16 +333,16 @@
 
 	/* Wake task and delete all pending communication signals. */
 	list_for_each_entry_safe(com, back, wake, wake_me) {
-		SPIN_LOCK(NOTIF_LOCK, &com->lock);
+		HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
 		LIST_DEL(&com->wake_me);
 		if (!com->task) {
-			SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
+			HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
 			pool_free2(pool2_notification, com);
 			continue;
 		}
 		task_wakeup(com->task, TASK_WOKEN_MSG);
 		com->task = NULL;
-		SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
+		HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
 	}
 }
 
diff --git a/src/applet.c b/src/applet.c
index 47f30c4..0e550c2 100644
--- a/src/applet.c
+++ b/src/applet.c
@@ -38,7 +38,7 @@
 	if (!applets_active_queue)
 		return;
 
-	SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
+	HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
 
 	curr = LIST_NEXT(&applet_active_queue, typeof(curr), runq);
 	while (&curr->runq != &applet_active_queue) {
@@ -52,7 +52,7 @@
 		curr = next;
 	}
 
-	SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
+	HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
 
 	/* The list is only scanned from the head. This guarantees that if any
 	 * applet removes another one, there is no side effect while walking
@@ -84,7 +84,7 @@
 			/* curr was left in the list, move it back to the active list */
 			LIST_DEL(&curr->runq);
 			LIST_INIT(&curr->runq);
-			SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
+			HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
 			if (curr->state & APPLET_WANT_DIE) {
 				curr->state = APPLET_SLEEPING;
 				__appctx_free(curr);
@@ -98,7 +98,7 @@
 					curr->state = APPLET_SLEEPING;
 				}
 			}
-			SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
+			HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
 		}
 	}
 }
@@ -106,5 +106,5 @@
 __attribute__((constructor))
 static void __applet_init(void)
 {
-	SPIN_INIT(&applet_active_lock);
+	HA_SPIN_INIT(&applet_active_lock);
 }
diff --git a/src/buffer.c b/src/buffer.c
index db2e053..b365888 100644
--- a/src/buffer.c
+++ b/src/buffer.c
@@ -75,7 +75,7 @@
 	if (global.tune.buf_limit)
 		pool2_buffer->limit = global.tune.buf_limit;
 
-	SPIN_INIT(&buffer_wq_lock);
+	HA_SPIN_INIT(&buffer_wq_lock);
 
 	buffer = pool_refill_alloc(pool2_buffer, pool2_buffer->minavail - 1);
 	if (!buffer)
diff --git a/src/cfgparse.c b/src/cfgparse.c
index 4414f59..b42dd54 100644
--- a/src/cfgparse.c
+++ b/src/cfgparse.c
@@ -2108,7 +2108,7 @@
 		newpeer->proto = proto;
 		newpeer->xprt  = xprt_get(XPRT_RAW);
 		newpeer->sock_init_arg = NULL;
-		SPIN_INIT(&newpeer->lock);
+		HA_SPIN_INIT(&newpeer->lock);
 
 		if (strcmp(newpeer->id, localpeer) == 0) {
 			/* Current is local peer, it define a frontend */
@@ -2251,7 +2251,7 @@
 		LIST_INIT(&curr_resolvers->nameservers);
 		LIST_INIT(&curr_resolvers->resolutions.curr);
 		LIST_INIT(&curr_resolvers->resolutions.wait);
-		SPIN_INIT(&curr_resolvers->lock);
+		HA_SPIN_INIT(&curr_resolvers->lock);
 	}
 	else if (strcmp(args[0], "nameserver") == 0) { /* nameserver definition */
 		struct sockaddr_storage *sk;
@@ -8505,7 +8505,7 @@
 			}
 			break;
 		}
-		SPIN_INIT(&curproxy->lbprm.lock);
+		HA_SPIN_INIT(&curproxy->lbprm.lock);
 
 		if (curproxy->options & PR_O_LOGASAP)
 			curproxy->to_log &= ~LW_BYTES;
diff --git a/src/checks.c b/src/checks.c
index f669f8a..8d5822a 100644
--- a/src/checks.c
+++ b/src/checks.c
@@ -715,7 +715,7 @@
 	struct server *s = check->server;
 	struct task *t = check->task;
 
-	SPIN_LOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
 	if (unlikely(check->result == CHK_RES_FAILED))
 		goto out_wakeup;
 
@@ -768,7 +768,7 @@
  out_nowake:
 	__cs_stop_send(cs);   /* nothing more to write */
  out_unlock:
-	SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
 }
 
 /*
@@ -798,7 +798,7 @@
 	int done;
 	unsigned short msglen;
 
-	SPIN_LOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
 
 	if (unlikely(check->result == CHK_RES_FAILED))
 		goto out_wakeup;
@@ -1354,7 +1354,7 @@
 
 	task_wakeup(t, TASK_WOKEN_IO);
  out_unlock:
-	SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
 	return;
 
  wait_more_data:
@@ -1374,7 +1374,7 @@
 	struct check *check = cs->data;
 	int ret = 0;
 
-	SPIN_LOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
 
 	/* we may have to make progress on the TCP checks */
 	if (check->type == PR_O2_TCPCHK_CHK) {
@@ -1411,7 +1411,7 @@
 		ret = -1;
 	}
 
-	SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
 
 	/* if a connection got replaced, we must absolutely prevent the connection
 	 * handler from touching its fd, and perform the FD polling updates ourselves
@@ -1647,9 +1647,9 @@
 	check->curpid = elem;
 	LIST_INIT(&elem->list);
 
-	SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
+	HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
 	LIST_ADD(&pid_list, &elem->list);
-	SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
+	HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
 
 	return elem;
 }
@@ -1661,9 +1661,9 @@
 	if (!elem)
 		return;
 
-	SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
+	HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
 	LIST_DEL(&elem->list);
-	SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
+	HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
 
 	if (!elem->exited)
 		kill(elem->pid, SIGTERM);
@@ -1678,7 +1678,7 @@
 {
 	struct pid_list *elem;
 
-	SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
+	HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
 	list_for_each_entry(elem, &pid_list, list) {
 		if (elem->pid == pid) {
 			elem->t->expire = now_ms;
@@ -1688,7 +1688,7 @@
 			break;
 		}
 	}
-	SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
+	HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
 }
 
 static void sigchld_handler(struct sig_handler *sh)
@@ -1719,7 +1719,7 @@
 		return 1;
 	}
 
-	SPIN_INIT(&pid_list_lock);
+	HA_SPIN_INIT(&pid_list_lock);
 
 	return 0;
 }
@@ -1979,7 +1979,7 @@
 	int ret;
 	int expired = tick_is_expired(t->expire, now_ms);
 
-	SPIN_LOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
 	if (!(check->state & CHK_ST_INPROGRESS)) {
 		/* no check currently running */
 		if (!expired) /* woke up too early */
@@ -2092,7 +2092,7 @@
 		t->expire = tick_add(t->expire, MS_TO_TICKS(check->inter));
 
  out_unlock:
-	SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
 	return t;
 }
 
@@ -2113,7 +2113,7 @@
 	int ret;
 	int expired = tick_is_expired(t->expire, now_ms);
 
-	SPIN_LOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
 	if (!(check->state & CHK_ST_INPROGRESS)) {
 		/* no check currently running */
 		if (!expired) /* woke up too early */
@@ -2268,7 +2268,7 @@
 	while (tick_is_expired(t->expire, now_ms))
 		t->expire = tick_add(t->expire, MS_TO_TICKS(check->inter));
  out_unlock:
-	SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
 	return t;
 }
 
@@ -2597,7 +2597,7 @@
 	struct list *head = check->tcpcheck_rules;
 	int retcode = 0;
 
-	SPIN_LOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
 
 	/* here, we know that the check is complete or that it failed */
 	if (check->result != CHK_RES_UNKNOWN)
@@ -3077,7 +3077,7 @@
 	__cs_stop_both(cs);
 
  out_unlock:
-	SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
 	return retcode;
 }
 
@@ -3137,7 +3137,7 @@
 
 	q = container_of(check, typeof(*q), check);
 
-	SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
+	HA_SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
 	while (1) {
 		if (!(check->state & CHK_ST_ENABLED)) {
 			if (LIST_ISEMPTY(&q->email_alerts)) {
@@ -3167,7 +3167,7 @@
 		check->state         &= ~CHK_ST_ENABLED;
 	}
   end:
-	SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
+	HA_SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
 	return t;
 }
 
@@ -3194,7 +3194,7 @@
 		struct task         *t;
 
 		LIST_INIT(&q->email_alerts);
-		SPIN_INIT(&q->lock);
+		HA_SPIN_INIT(&q->lock);
 		check->inter = mls->timeout.mail;
 		check->rise = DEF_AGENT_RISETIME;
 		check->fall = DEF_AGENT_FALLTIME;
@@ -3398,10 +3398,10 @@
 	if (!add_tcpcheck_expect_str(&alert->tcpcheck_rules, "221 "))
 		goto error;
 
-	SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
+	HA_SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
 	task_wakeup(check->task, TASK_WOKEN_MSG);
 	LIST_ADDQ(&q->email_alerts, &alert->list);
-	SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
+	HA_SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
 	return 1;
 
 error:
diff --git a/src/compression.c b/src/compression.c
index ead9934..d476da5 100644
--- a/src/compression.c
+++ b/src/compression.c
@@ -160,10 +160,10 @@
 #endif
 
 	if (unlikely(pool_comp_ctx == NULL)) {
-		SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
+		HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
 		if (unlikely(pool_comp_ctx == NULL))
 			pool_comp_ctx = create_pool("comp_ctx", sizeof(struct comp_ctx), MEM_F_SHARED);
-		SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
+		HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
 	}
 
 	*comp_ctx = pool_alloc2(pool_comp_ctx);
@@ -412,10 +412,10 @@
 	switch (round) {
 		case 0:
 			if (zlib_pool_deflate_state == NULL) {
-				SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
+				HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
 				if (zlib_pool_deflate_state == NULL)
 					zlib_pool_deflate_state = create_pool("zlib_state", size * items, MEM_F_SHARED);
-				SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
+				HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
 			}
 			pool = zlib_pool_deflate_state;
 			ctx->zlib_deflate_state = buf = pool_alloc2(pool);
@@ -423,10 +423,10 @@
 
 		case 1:
 			if (zlib_pool_window == NULL) {
-				SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
+				HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
 				if (zlib_pool_window == NULL)
 					zlib_pool_window = create_pool("zlib_window", size * items, MEM_F_SHARED);
-				SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
+				HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
 			}
 			pool = zlib_pool_window;
 			ctx->zlib_window = buf = pool_alloc2(pool);
@@ -434,10 +434,10 @@
 
 		case 2:
 			if (zlib_pool_prev == NULL) {
-				SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
+				HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
 				if (zlib_pool_prev == NULL)
 					zlib_pool_prev = create_pool("zlib_prev", size * items, MEM_F_SHARED);
-				SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
+				HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
 			}
 			pool = zlib_pool_prev;
 			ctx->zlib_prev = buf = pool_alloc2(pool);
@@ -445,10 +445,10 @@
 
 		case 3:
 			if (zlib_pool_head == NULL) {
-				SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
+				HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
 				if (zlib_pool_head == NULL)
 					zlib_pool_head = create_pool("zlib_head", size * items, MEM_F_SHARED);
-				SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
+				HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
 			}
 			pool = zlib_pool_head;
 			ctx->zlib_head = buf = pool_alloc2(pool);
@@ -456,10 +456,10 @@
 
 		case 4:
 			if (zlib_pool_pending_buf == NULL) {
-				SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
+				HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
 				if (zlib_pool_pending_buf == NULL)
 					zlib_pool_pending_buf = create_pool("zlib_pending_buf", size * items, MEM_F_SHARED);
-				SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
+				HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
 			}
 			pool = zlib_pool_pending_buf;
 			ctx->zlib_pending_buf = buf = pool_alloc2(pool);
@@ -721,7 +721,7 @@
 	global.tune.maxzlibmem = DEFAULT_MAXZLIBMEM * 1024U * 1024U,
 #endif
 #ifdef USE_ZLIB
-	SPIN_INIT(&comp_pool_lock);
+	HA_SPIN_INIT(&comp_pool_lock);
 	memprintf(&ptr, "Built with zlib version : " ZLIB_VERSION);
 	memprintf(&ptr, "%s\nRunning on zlib version : %s", ptr, zlibVersion());
 #elif defined(USE_SLZ)
diff --git a/src/dns.c b/src/dns.c
index 0f93f3c..8f01f05 100644
--- a/src/dns.c
+++ b/src/dns.c
@@ -486,7 +486,7 @@
 
 				/* Remove any associated server */
 				for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) {
-					SPIN_LOCK(SERVER_LOCK, &srv->lock);
+					HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 					if (srv->srvrq == srvrq && srv->svc_port == item->port &&
 					    item->data_len == srv->hostname_dn_len &&
 					    !memcmp(srv->hostname_dn, item->target, item->data_len)) {
@@ -498,7 +498,7 @@
 						srv->hostname_dn_len = 0;
 						dns_unlink_resolution(srv->dns_requester);
 					}
-					SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+					HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 				}
 			}
 
@@ -518,7 +518,7 @@
 
 			/* Check if a server already uses that hostname */
 			for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) {
-				SPIN_LOCK(SERVER_LOCK, &srv->lock);
+				HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 				if (srv->srvrq == srvrq && srv->svc_port == item->port &&
 				    item->data_len == srv->hostname_dn_len &&
 				    !memcmp(srv->hostname_dn, item->target, item->data_len)) {
@@ -528,20 +528,20 @@
 						snprintf(weight, sizeof(weight), "%d", item->weight);
 						server_parse_weight_change_request(srv, weight);
 					}
-					SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+					HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 					break;
 				}
-				SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+				HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 			}
 			if (srv)
 				continue;
 
 			/* If not, try to find a server with undefined hostname */
 			for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) {
-				SPIN_LOCK(SERVER_LOCK, &srv->lock);
+				HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 				if (srv->srvrq == srvrq && !srv->hostname_dn)
 					break;
-				SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+				HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 			}
 			/* And update this server, if found */
 			if (srv) {
@@ -551,7 +551,7 @@
 
 				if (dns_dn_label_to_str(item->target, item->data_len+1,
 							hostname, DNS_MAX_NAME_SIZE) == -1) {
-					SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+					HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 					continue;
 				}
 				msg = update_server_fqdn(srv, hostname, "SRV record", 1);
@@ -565,7 +565,7 @@
 					srv->check.port = item->port;
 				snprintf(weight, sizeof(weight), "%d", item->weight);
 				server_parse_weight_change_request(srv, weight);
-				SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+				HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 			}
 		}
 	}
@@ -1348,11 +1348,11 @@
 
 	if (srv) {
 		if (!requester_locked)
-			SPIN_LOCK(SERVER_LOCK, &srv->lock);
+			HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 		if (srv->dns_requester == NULL) {
 			if ((req = calloc(1, sizeof(*req))) == NULL) {
 				if (!requester_locked)
-					SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+					HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 				goto err;
 			}
 			req->owner         = &srv->obj_type;
@@ -1361,7 +1361,7 @@
 		else
 			req = srv->dns_requester;
 		if (!requester_locked)
-			SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+			HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 	}
 	else if (srvrq) {
 		if (srvrq->dns_requester == NULL) {
@@ -1463,7 +1463,7 @@
 		return;
 
 	resolvers = ns->resolvers;
-	SPIN_LOCK(DNS_LOCK, &resolvers->lock);
+	HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
 
 	/* process all pending input messages */
 	while (1) {
@@ -1617,10 +1617,10 @@
 			struct server *s = objt_server(req->owner);
 
 			if (s)
-				SPIN_LOCK(SERVER_LOCK, &s->lock);
+				HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
 			req->requester_cb(req, tmpns);
 			if (s)
-				SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+				HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
 			tmpns = NULL;
 		}
 
@@ -1630,7 +1630,7 @@
 		continue;
 	}
 	dns_update_resolvers_timeout(resolvers);
-	SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
+	HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
 }
 
 /* Called when a resolvers network socket is ready to send data */
@@ -1655,7 +1655,7 @@
 		return;
 
 	resolvers = ns->resolvers;
-	SPIN_LOCK(DNS_LOCK, &resolvers->lock);
+	HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
 
 	list_for_each_entry(res, &resolvers->resolutions.curr, list) {
 		int ret;
@@ -1682,7 +1682,7 @@
 		ns->counters.snd_error++;
 		res->nb_queries++;
 	}
-	SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
+	HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
 }
 
 /* Processes DNS resolution. First, it checks the active list to detect expired
@@ -1695,7 +1695,7 @@
 	struct dns_resolution *res, *resback;
 	int exp;
 
-	SPIN_LOCK(DNS_LOCK, &resolvers->lock);
+	HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
 
 	/* Handle all expired resolutions from the active list */
 	list_for_each_entry_safe(res, resback, &resolvers->resolutions.curr, list) {
@@ -1765,7 +1765,7 @@
 	}
 
 	dns_update_resolvers_timeout(resolvers);
-	SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
+	HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
 	return t;
 }
 
diff --git a/src/ev_epoll.c b/src/ev_epoll.c
index 65e5164..602a243 100644
--- a/src/ev_epoll.c
+++ b/src/ev_epoll.c
@@ -71,14 +71,14 @@
 		if (!fdtab[fd].owner)
 			continue;
 
-		SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+		HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 		fdtab[fd].updated = 0;
 		fdtab[fd].new = 0;
 
 		eo = fdtab[fd].state;
 		en = fd_compute_new_polled_status(eo);
 		fdtab[fd].state = en;
-		SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+		HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 
 		if ((eo ^ en) & FD_EV_POLLED_RW) {
 			/* poll status changed */
diff --git a/src/ev_kqueue.c b/src/ev_kqueue.c
index 00dc965..b61fe65 100644
--- a/src/ev_kqueue.c
+++ b/src/ev_kqueue.c
@@ -50,14 +50,14 @@
 		if (!fdtab[fd].owner)
 			continue;
 
-		SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+		HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 		fdtab[fd].updated = 0;
 		fdtab[fd].new = 0;
 
 		eo = fdtab[fd].state;
 		en = fd_compute_new_polled_status(eo);
 		fdtab[fd].state = en;
-		SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+		HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 
 		if ((eo ^ en) & FD_EV_POLLED_RW) {
 			/* poll status changed */
diff --git a/src/ev_poll.c b/src/ev_poll.c
index edeffa8..f9e4451 100644
--- a/src/ev_poll.c
+++ b/src/ev_poll.c
@@ -50,10 +50,10 @@
 
 REGPRM1 static void __fd_clo(int fd)
 {
-	SPIN_LOCK(POLL_LOCK, &poll_lock);
+	HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
 	hap_fd_clr(fd, fd_evts[DIR_RD]);
 	hap_fd_clr(fd, fd_evts[DIR_WR]);
-	SPIN_UNLOCK(POLL_LOCK, &poll_lock);
+	HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
 }
 
 /*
@@ -76,18 +76,18 @@
 		if (!fdtab[fd].owner)
 			continue;
 
-		SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+		HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 		fdtab[fd].updated = 0;
 		fdtab[fd].new = 0;
 
 		eo = fdtab[fd].state;
 		en = fd_compute_new_polled_status(eo);
 		fdtab[fd].state = en;
-		SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+		HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 
 		if ((eo ^ en) & FD_EV_POLLED_RW) {
 			/* poll status changed, update the lists */
-			SPIN_LOCK(POLL_LOCK, &poll_lock);
+			HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
 			if ((eo & ~en) & FD_EV_POLLED_R)
 				hap_fd_clr(fd, fd_evts[DIR_RD]);
 			else if ((en & ~eo) & FD_EV_POLLED_R)
@@ -97,7 +97,7 @@
 				hap_fd_clr(fd, fd_evts[DIR_WR]);
 			else if ((en & ~eo) & FD_EV_POLLED_W)
 				hap_fd_set(fd, fd_evts[DIR_WR]);
-			SPIN_UNLOCK(POLL_LOCK, &poll_lock);
+			HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
 		}
 	}
 	fd_nbupdt = 0;
diff --git a/src/ev_select.c b/src/ev_select.c
index 5dad408..b2b4e50 100644
--- a/src/ev_select.c
+++ b/src/ev_select.c
@@ -31,10 +31,10 @@
 /* Immediately remove the entry upon close() */
 REGPRM1 static void __fd_clo(int fd)
 {
-	SPIN_LOCK(POLL_LOCK, &poll_lock);
+	HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
 	FD_CLR(fd, fd_evts[DIR_RD]);
 	FD_CLR(fd, fd_evts[DIR_WR]);
-	SPIN_UNLOCK(POLL_LOCK, &poll_lock);
+	HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
 }
 
 /*
@@ -58,18 +58,18 @@
 		if (!fdtab[fd].owner)
 			continue;
 
-		SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+		HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 		fdtab[fd].updated = 0;
 		fdtab[fd].new = 0;
 
 		eo = fdtab[fd].state;
 		en = fd_compute_new_polled_status(eo);
 		fdtab[fd].state = en;
-		SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+		HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 
 		if ((eo ^ en) & FD_EV_POLLED_RW) {
 			/* poll status changed, update the lists */
-			SPIN_LOCK(POLL_LOCK, &poll_lock);
+			HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
 			if ((eo & ~en) & FD_EV_POLLED_R)
 				FD_CLR(fd, fd_evts[DIR_RD]);
 			else if ((en & ~eo) & FD_EV_POLLED_R)
@@ -79,7 +79,7 @@
 				FD_CLR(fd, fd_evts[DIR_WR]);
 			else if ((en & ~eo) & FD_EV_POLLED_W)
 				FD_SET(fd, fd_evts[DIR_WR]);
-			SPIN_UNLOCK(POLL_LOCK, &poll_lock);
+			HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
 		}
 	}
 	fd_nbupdt = 0;
diff --git a/src/fd.c b/src/fd.c
index e8419aa..d2e9569 100644
--- a/src/fd.c
+++ b/src/fd.c
@@ -185,7 +185,7 @@
  */
 static void fd_dodelete(int fd, int do_close)
 {
-	SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
 	if (fdtab[fd].linger_risk) {
 		/* this is generally set when connecting to servers */
 		setsockopt(fd, SOL_SOCKET, SO_LINGER,
@@ -205,12 +205,12 @@
 	fdtab[fd].thread_mask = 0;
 	if (do_close)
 		close(fd);
-	SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+	HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 
-	SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
+	HA_SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
 	while ((maxfd-1 >= 0) && !fdtab[maxfd-1].owner)
 		maxfd--;
-	SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock);
+	HA_SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock);
 }
 
 /* Deletes an FD from the fdsets, and recomputes the maxfd limit.
@@ -241,16 +241,16 @@
 	if (!fd_cache_num)
 		return;
 
-	RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock);
+	HA_RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock);
 	for (entry = 0; entry < fd_cache_num; ) {
 		fd = fd_cache[entry];
 
 		if (!(fdtab[fd].thread_mask & tid_bit))
 			goto next;
-		if (SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock))
+		if (HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock))
 			goto next;
 
-		RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock);
+		HA_RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock);
 
 		e = fdtab[fd].state;
 		fdtab[fd].ev &= FD_POLL_STICKY;
@@ -262,15 +262,15 @@
 			fdtab[fd].ev |= FD_POLL_OUT;
 
 		if (fdtab[fd].iocb && fdtab[fd].owner && fdtab[fd].ev) {
-			SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+			HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 			fdtab[fd].iocb(fd);
 		}
 		else {
 			fd_release_cache_entry(fd);
-			SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+			HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
 		}
 
-		RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock);
+		HA_RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock);
 		/* If the fd was removed from the cache, it has been
 		 * replaced by the next one that we don't want to skip !
 		 */
@@ -279,7 +279,7 @@
 	  next:
 		entry++;
 	}
-	RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock);
+	HA_RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock);
 }
 
 /* disable the specified poller */
@@ -329,11 +329,11 @@
 	hap_register_per_thread_deinit(deinit_pollers_per_thread);
 
 	for (p = 0; p < global.maxsock; p++)
-		SPIN_INIT(&fdtab[p].lock);
+		HA_SPIN_INIT(&fdtab[p].lock);
 
-	SPIN_INIT(&fdtab_lock);
-	RWLOCK_INIT(&fdcache_lock);
-	SPIN_INIT(&poll_lock);
+	HA_SPIN_INIT(&fdtab_lock);
+	HA_RWLOCK_INIT(&fdcache_lock);
+	HA_SPIN_INIT(&poll_lock);
 	do {
 		bp = NULL;
 		for (p = 0; p < nbpollers; p++)
@@ -367,7 +367,7 @@
 	int p;
 
 	for (p = 0; p < global.maxsock; p++)
-		SPIN_DESTROY(&fdtab[p].lock);
+		HA_SPIN_DESTROY(&fdtab[p].lock);
 
 	for (p = 0; p < nbpollers; p++) {
 		bp = &pollers[p];
@@ -380,9 +380,9 @@
 	free(fdinfo);   fdinfo   = NULL;
 	free(fdtab);    fdtab    = NULL;
 
-	SPIN_DESTROY(&fdtab_lock);
-	RWLOCK_DESTROY(&fdcache_lock);
-	SPIN_DESTROY(&poll_lock);
+	HA_SPIN_DESTROY(&fdtab_lock);
+	HA_RWLOCK_DESTROY(&fdcache_lock);
+	HA_SPIN_DESTROY(&poll_lock);
 }
 
 /*
diff --git a/src/flt_spoe.c b/src/flt_spoe.c
index cdb6238..5a8c9e6 100644
--- a/src/flt_spoe.c
+++ b/src/flt_spoe.c
@@ -171,7 +171,7 @@
 		spoe_release_group(grp);
 	}
 	for (i = 0; i < global.nbthread; ++i)
-		SPIN_DESTROY(&agent->rt[i].lock);
+		HA_SPIN_DESTROY(&agent->rt[i].lock);
 	free(agent->rt);
 	free(agent);
 }
@@ -1426,10 +1426,10 @@
 			 * add the applet in the list of running applets. */
 			agent->rt[tid].applets_idle++;
 			appctx->st0 = SPOE_APPCTX_ST_IDLE;
-			SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
+			HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
 			LIST_DEL(&SPOE_APPCTX(appctx)->list);
 			LIST_ADD(&agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list);
-			SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
+			HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
 
 			/* Update runtinme agent info */
 			HA_ATOMIC_UPDATE_MIN(&agent->rt[tid].frame_size, SPOE_APPCTX(appctx)->max_frame_size);
@@ -1710,10 +1710,10 @@
 		agent->rt[tid].applets_idle++;
 	}
 	if (fpa || (SPOE_APPCTX(appctx)->flags & SPOE_APPCTX_FL_PERSIST)) {
-		SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
+		HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
 		LIST_DEL(&SPOE_APPCTX(appctx)->list);
 		LIST_ADD(&agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list);
-		SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
+		HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
 		if (fpa)
 			SPOE_APPCTX(appctx)->task->expire =
 				tick_add_ifset(now_ms, agent->timeout.idle);
@@ -1985,9 +1985,9 @@
 	strm->do_log = NULL;
 	strm->res.flags |= CF_READ_DONTWAIT;
 
-	SPIN_LOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
+	HA_SPIN_LOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
 	LIST_ADDQ(&conf->agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list);
-	SPIN_UNLOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
+	HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
 	conf->agent->rt[tid].applets_act++;
 
 	task_wakeup(SPOE_APPCTX(appctx)->task, TASK_WOKEN_INIT);
@@ -2096,10 +2096,10 @@
 		appctx = spoe_appctx->owner;
 		if (appctx->st0 == SPOE_APPCTX_ST_IDLE) {
 			spoe_wakeup_appctx(appctx);
-			SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
+			HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
 			LIST_DEL(&spoe_appctx->list);
 			LIST_ADDQ(&agent->rt[tid].applets, &spoe_appctx->list);
-			SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
+			HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
 			break;
 		}
 	}
@@ -2699,18 +2699,18 @@
 		return 1;
 
 	if (!LIST_ISEMPTY(&buffer_wait->list)) {
-		SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		LIST_DEL(&buffer_wait->list);
 		LIST_INIT(&buffer_wait->list);
-		SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 	}
 
 	if (b_alloc_margin(buf, global.tune.reserved_bufs))
 		return 1;
 
-	SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+	HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 	LIST_ADDQ(&buffer_wq, &buffer_wait->list);
-	SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+	HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 	return 0;
 }
 
@@ -2718,10 +2718,10 @@
 spoe_release_buffer(struct buffer **buf, struct buffer_wait *buffer_wait)
 {
 	if (!LIST_ISEMPTY(&buffer_wait->list)) {
-		SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		LIST_DEL(&buffer_wait->list);
 		LIST_INIT(&buffer_wait->list);
-		SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 	}
 
 	/* Release the buffer if needed */
@@ -2813,10 +2813,10 @@
 			agent = conf->agent;
 
 			for (i = 0; i < global.nbthread; ++i) {
-				SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
+				HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
 				list_for_each_entry(spoe_appctx, &agent->rt[i].applets, list)
 					spoe_wakeup_appctx(spoe_appctx->owner);
-				SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
+				HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
 			}
 		}
 		p = p->next;
@@ -3221,7 +3221,7 @@
 			LIST_INIT(&curagent->rt[i].applets);
 			LIST_INIT(&curagent->rt[i].sending_queue);
 			LIST_INIT(&curagent->rt[i].waiting_queue);
-			SPIN_INIT(&curagent->rt[i].lock);
+			HA_SPIN_INIT(&curagent->rt[i].lock);
 		}
 	}
 	else if (!strcmp(args[0], "use-backend")) {
diff --git a/src/haproxy.c b/src/haproxy.c
index 4d4bd3b..8993b89 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -2075,7 +2075,7 @@
 				if (xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->destroy_srv)
 					xprt_get(XPRT_SSL)->destroy_srv(s);
 			}
-			SPIN_DESTROY(&s->lock);
+			HA_SPIN_DESTROY(&s->lock);
 			free(s);
 			s = s_next;
 		}/* end while(s) */
@@ -2124,8 +2124,8 @@
 
 		p0 = p;
 		p = p->next;
-		SPIN_DESTROY(&p0->lbprm.lock);
-		SPIN_DESTROY(&p0->lock);
+		HA_SPIN_DESTROY(&p0->lbprm.lock);
+		HA_SPIN_DESTROY(&p0->lock);
 		free(p0);
 	}/* end while(p) */
 
diff --git a/src/hathreads.c b/src/hathreads.c
index 36bdca2..2856a9f 100644
--- a/src/hathreads.c
+++ b/src/hathreads.c
@@ -120,7 +120,7 @@
 
 	thread_sync_barrier(&barrier);
 	if (threads_want_sync & tid_bit)
-		SPIN_LOCK(THREAD_SYNC_LOCK, &sync_lock);
+		HA_SPIN_LOCK(THREAD_SYNC_LOCK, &sync_lock);
 }
 
 /* Exit from the sync point and unlock it if it was previously locked. If the
@@ -135,7 +135,7 @@
 		return;
 
 	if (threads_want_sync & tid_bit)
-		SPIN_UNLOCK(THREAD_SYNC_LOCK, &sync_lock);
+		HA_SPIN_UNLOCK(THREAD_SYNC_LOCK, &sync_lock);
 
 	if (HA_ATOMIC_AND(&threads_want_sync, ~tid_bit) == 0) {
 		char c;
@@ -151,7 +151,7 @@
 __attribute__((constructor))
 static void __hathreads_init(void)
 {
-	SPIN_INIT(&sync_lock);
+	HA_SPIN_INIT(&sync_lock);
 #if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
 	memset(lock_stats, 0, sizeof(lock_stats));
 #endif
diff --git a/src/hlua.c b/src/hlua.c
index 761fa7f..439bbf4 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -125,11 +125,11 @@
 #define SET_SAFE_LJMP(__L) \
 	({ \
 		int ret; \
-		SPIN_LOCK(LUA_LOCK, &hlua_global_lock); \
+		HA_SPIN_LOCK(LUA_LOCK, &hlua_global_lock); \
 		if (setjmp(safe_ljmp_env) != 0) { \
 			lua_atpanic(__L, hlua_panic_safe); \
 			ret = 0; \
-			SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \
+			HA_SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \
 		} else { \
 			lua_atpanic(__L, hlua_panic_ljmp); \
 			ret = 1; \
@@ -143,7 +143,7 @@
 #define RESET_SAFE_LJMP(__L) \
 	do { \
 		lua_atpanic(__L, hlua_panic_safe); \
-		SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \
+		HA_SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \
 	} while(0)
 
 /* Applet status flags */
@@ -994,7 +994,7 @@
 	/* Lock the whole Lua execution. This lock must be before the
 	 * label "resume_execution".
 	 */
-	SPIN_LOCK(LUA_LOCK, &hlua_global_lock);
+	HA_SPIN_LOCK(LUA_LOCK, &hlua_global_lock);
 
 resume_execution:
 
@@ -1154,7 +1154,7 @@
 	}
 
 	/* This is the main exit point, remove the Lua lock. */
-	SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock);
+	HA_SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock);
 
 	return ret;
 }
@@ -7370,7 +7370,7 @@
 	};
 #endif
 
-	SPIN_INIT(&hlua_global_lock);
+	HA_SPIN_INIT(&hlua_global_lock);
 
 	/* Initialise struct hlua and com signals pool */
 	pool2_hlua = create_pool("hlua", sizeof(struct hlua), MEM_F_SHARED);
diff --git a/src/hlua_fcn.c b/src/hlua_fcn.c
index 566b0e5..54fbfa7 100644
--- a/src/hlua_fcn.c
+++ b/src/hlua_fcn.c
@@ -587,9 +587,9 @@
 	srv = hlua_check_server(L, 1);
 	weight = luaL_checkstring(L, 2);
 
-	SPIN_LOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 	err = server_parse_weight_change_request(srv, weight);
-	SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 	if (!err)
 		lua_pushnil(L);
 	else
@@ -615,9 +615,9 @@
 	srv = hlua_check_server(L, 1);
 	addr = luaL_checkstring(L, 2);
 
-	SPIN_LOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 	err = server_parse_addr_change_request(srv, addr, "Lua script");
-	SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 	if (!err)
 		lua_pushnil(L);
 	else
@@ -630,9 +630,9 @@
 	struct server *srv;
 
 	srv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 	srv_shutdown_streams(srv, SF_ERR_KILLED);
-	SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 	return 0;
 }
 
@@ -641,9 +641,9 @@
 	struct server *srv;
 
 	srv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 	srv_adm_set_drain(srv);
-	SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 	return 0;
 }
 
@@ -652,9 +652,9 @@
 	struct server *srv;
 
 	srv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 	srv_adm_set_maint(srv);
-	SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 	return 0;
 }
 
@@ -663,9 +663,9 @@
 	struct server *srv;
 
 	srv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 	srv_adm_set_ready(srv);
-	SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 	return 0;
 }
 
@@ -674,11 +674,11 @@
 	struct server *sv;
 
 	sv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 	if (sv->check.state & CHK_ST_CONFIGURED) {
 		sv->check.state |= CHK_ST_ENABLED;
 	}
-	SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 	return 0;
 }
 
@@ -687,11 +687,11 @@
 	struct server *sv;
 
 	sv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 	if (sv->check.state & CHK_ST_CONFIGURED) {
 		sv->check.state &= ~CHK_ST_ENABLED;
 	}
-	SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 	return 0;
 }
 
@@ -700,12 +700,12 @@
 	struct server *sv;
 
 	sv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 	if (!(sv->track)) {
 		sv->check.health = sv->check.rise + sv->check.fall - 1;
 		srv_set_running(sv, "changed from Lua script", NULL);
 	}
-	SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 	return 0;
 }
 
@@ -714,12 +714,12 @@
 	struct server *sv;
 
 	sv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 	if (!(sv->track)) {
 		sv->check.health = sv->check.rise + sv->check.fall - 1;
 		srv_set_stopping(sv, "changed from Lua script", NULL);
 	}
-	SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 	return 0;
 }
 
@@ -728,12 +728,12 @@
 	struct server *sv;
 
 	sv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 	if (!(sv->track)) {
 		sv->check.health = 0;
 		srv_set_stopped(sv, "changed from Lua script", NULL);
 	}
-	SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 	return 0;
 }
 
@@ -742,11 +742,11 @@
 	struct server *sv;
 
 	sv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 	if (sv->agent.state & CHK_ST_CONFIGURED) {
 		sv->agent.state |= CHK_ST_ENABLED;
 	}
-	SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 	return 0;
 }
 
@@ -755,11 +755,11 @@
 	struct server *sv;
 
 	sv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 	if (sv->agent.state & CHK_ST_CONFIGURED) {
 		sv->agent.state &= ~CHK_ST_ENABLED;
 	}
-	SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 	return 0;
 }
 
@@ -768,12 +768,12 @@
 	struct server *sv;
 
 	sv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 	if (sv->agent.state & CHK_ST_ENABLED) {
 		sv->agent.health = sv->agent.rise + sv->agent.fall - 1;
 		srv_set_running(sv, "changed from Lua script", NULL);
 	}
-	SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 	return 0;
 }
 
@@ -782,12 +782,12 @@
 	struct server *sv;
 
 	sv = hlua_check_server(L, 1);
-	SPIN_LOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 	if (sv->agent.state & CHK_ST_ENABLED) {
 		sv->agent.health = 0;
 		srv_set_stopped(sv, "changed from Lua script", NULL);
 	}
-	SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 	return 0;
 }
 
diff --git a/src/lb_chash.c b/src/lb_chash.c
index 70a455d..e3bf65d 100644
--- a/src/lb_chash.c
+++ b/src/lb_chash.c
@@ -364,7 +364,7 @@
 	srv = avoided = NULL;
 	avoided_node = NULL;
 
-	SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+	HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
 	if (p->srv_act)
 		root = &p->lbprm.chash.act;
 	else if (p->lbprm.fbck) {
@@ -423,7 +423,7 @@
 	}
 
  out:
-	SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+	HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
 	return srv;
 }
 
diff --git a/src/lb_fas.c b/src/lb_fas.c
index db292db..d301143 100644
--- a/src/lb_fas.c
+++ b/src/lb_fas.c
@@ -64,10 +64,10 @@
 	if (!s->lb_tree)
 		return;
 
-	SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+	HA_SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
 	fas_dequeue_srv(s);
 	fas_queue_srv(s);
-	SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+	HA_SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
 }
 
 /* This function updates the server trees according to server <srv>'s new
@@ -277,7 +277,7 @@
 
 	srv = avoided = NULL;
 
-	SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+	HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
 	if (p->srv_act)
 		node = eb32_first(&p->lbprm.fas.act);
 	else if (p->lbprm.fbck) {
@@ -313,7 +313,7 @@
 	if (!srv)
 		srv = avoided;
   out:
-	SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+	HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
 	return srv;
 }
 
diff --git a/src/lb_fwlc.c b/src/lb_fwlc.c
index 8bd3ac2..fd9b437 100644
--- a/src/lb_fwlc.c
+++ b/src/lb_fwlc.c
@@ -56,10 +56,10 @@
 	if (!s->lb_tree)
 		return;
 
-	SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+	HA_SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
 	fwlc_dequeue_srv(s);
 	fwlc_queue_srv(s);
-	SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+	HA_SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
 }
 
 /* This function updates the server trees according to server <srv>'s new
@@ -269,7 +269,7 @@
 
 	srv = avoided = NULL;
 
-	SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+	HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
 	if (p->srv_act)
 		node = eb32_first(&p->lbprm.fwlc.act);
 	else if (p->lbprm.fbck) {
@@ -305,7 +305,7 @@
 	if (!srv)
 		srv = avoided;
  out:
-	SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+	HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
 	return srv;
 }
 
diff --git a/src/lb_fwrr.c b/src/lb_fwrr.c
index fe2777d..cba7db5 100644
--- a/src/lb_fwrr.c
+++ b/src/lb_fwrr.c
@@ -470,7 +470,7 @@
 	struct fwrr_group *grp;
 	int switched;
 
-	SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+	HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
 	if (p->srv_act)
 		grp = &p->lbprm.fwrr.act;
 	else if (p->lbprm.fbck) {
@@ -564,7 +564,7 @@
 		}
 	}
  out:
-	SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+	HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
 	return srv;
 }
 
diff --git a/src/lb_map.c b/src/lb_map.c
index df0e185..ecab4de 100644
--- a/src/lb_map.c
+++ b/src/lb_map.c
@@ -208,7 +208,7 @@
 	int newidx, avoididx;
 	struct server *srv, *avoided;
 
-	SPIN_LOCK(LBPRM_LOCK, &px->lbprm.lock);
+	HA_SPIN_LOCK(LBPRM_LOCK, &px->lbprm.lock);
 	if (px->lbprm.tot_weight == 0) {
 		avoided = NULL;
 		goto out;
@@ -240,7 +240,7 @@
 		px->lbprm.map.rr_idx = avoididx;
 
   out:
-	SPIN_UNLOCK(LBPRM_LOCK, &px->lbprm.lock);
+	HA_SPIN_UNLOCK(LBPRM_LOCK, &px->lbprm.lock);
 	/* return NULL or srvtoavoid if found */
 	return avoided;
 }
diff --git a/src/listener.c b/src/listener.c
index c88d76d..2ac25fd 100644
--- a/src/listener.c
+++ b/src/listener.c
@@ -60,7 +60,7 @@
  */
 static void enable_listener(struct listener *listener)
 {
-	SPIN_LOCK(LISTENER_LOCK, &listener->lock);
+	HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
 	if (listener->state == LI_LISTEN) {
 		if ((global.mode & (MODE_DAEMON | MODE_MWORKER)) &&
 		    listener->bind_conf->bind_proc &&
@@ -83,7 +83,7 @@
 			listener->state = LI_FULL;
 		}
 	}
-	SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
+	HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
 }
 
 /* This function removes the specified listener's file descriptor from the
@@ -92,19 +92,19 @@
  */
 static void disable_listener(struct listener *listener)
 {
-	SPIN_LOCK(LISTENER_LOCK, &listener->lock);
+	HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
 	if (listener->state < LI_READY)
 		goto end;
 	if (listener->state == LI_READY)
 		fd_stop_recv(listener->fd);
 	if (listener->state == LI_LIMITED) {
-		SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+		HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 		LIST_DEL(&listener->wait_queue);
-		SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+		HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 	}
 	listener->state = LI_LISTEN;
   end:
-	SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
+	HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
 }
 
 /* This function tries to temporarily disable a listener, depending on the OS
@@ -118,7 +118,7 @@
 {
 	int ret = 1;
 
-	SPIN_LOCK(LISTENER_LOCK, &l->lock);
+	HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
 
 	if (l->state <= LI_ZOMBIE)
 		goto end;
@@ -138,15 +138,15 @@
 	}
 
 	if (l->state == LI_LIMITED) {
-		SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+		HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 		LIST_DEL(&l->wait_queue);
-		SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+		HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 	}
 
 	fd_stop_recv(l->fd);
 	l->state = LI_PAUSED;
   end:
-	SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
+	HA_SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
 	return ret;
 }
 
@@ -164,7 +164,7 @@
 {
 	int ret = 1;
 
-	SPIN_LOCK(LISTENER_LOCK, &l->lock);
+	HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
 
 	if ((global.mode & (MODE_DAEMON | MODE_MWORKER)) &&
 	    l->bind_conf->bind_proc &&
@@ -213,7 +213,7 @@
 	fd_want_recv(l->fd);
 	l->state = LI_READY;
   end:
-	SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
+	HA_SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
 	return ret;
 }
 
@@ -221,9 +221,9 @@
 {
 	int ret;
 
-	SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+	HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 	ret = __resume_listener(l);
-	SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+	HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 	return ret;
 }
 
@@ -237,9 +237,9 @@
 {
 	if (l->state >= LI_READY) {
 		if (l->state == LI_LIMITED) {
-			SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+			HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 			LIST_DEL(&l->wait_queue);
-			SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+			HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 		}
 
 		fd_stop_recv(l->fd);
@@ -256,9 +256,9 @@
 static void limit_listener(struct listener *l, struct list *list)
 {
 	if (l->state == LI_READY) {
-		SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+		HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 		LIST_ADDQ(list, &l->wait_queue);
-		SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+		HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 		fd_stop_recv(l->fd);
 		l->state = LI_LIMITED;
 	}
@@ -298,7 +298,7 @@
 {
 	struct listener *listener, *l_back;
 
-	SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+	HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 	list_for_each_entry_safe(listener, l_back, list, wait_queue) {
 		/* This cannot fail because the listeners are by definition in
 		 * the LI_LIMITED state. The function also removes the entry
@@ -306,7 +306,7 @@
 		 */
 		__resume_listener(listener);
 	}
-	SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+	HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 }
 
 /* must be called with the lock held */
@@ -316,9 +316,9 @@
 		fd_stop_recv(listener->fd);
 
 	if (listener->state == LI_LIMITED) {
-		SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+		HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 		LIST_DEL(&listener->wait_queue);
-		SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
+		HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
 	}
 
 	if (listener->state >= LI_PAUSED) {
@@ -334,9 +334,9 @@
 
 static void do_unbind_listener(struct listener *listener, int do_close)
 {
-	SPIN_LOCK(LISTENER_LOCK, &listener->lock);
+	HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
 	__do_unbind_listener(listener, do_close);
-	SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
+	HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
 }
 
 /* This function closes the listening socket for the specified listener,
@@ -406,7 +406,7 @@
 
 		proto->add(l, port);
 
-		SPIN_INIT(&l->lock);
+		HA_SPIN_INIT(&l->lock);
 		HA_ATOMIC_ADD(&jobs, 1);
 		HA_ATOMIC_ADD(&listeners, 1);
 	}
@@ -424,13 +424,13 @@
 	if (listener->state != LI_ASSIGNED)
 		return;
 
-	SPIN_LOCK(LISTENER_LOCK, &listener->lock);
+	HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
 	listener->state = LI_INIT;
 	LIST_DEL(&listener->proto_list);
 	listener->proto->nb_listeners--;
 	HA_ATOMIC_SUB(&jobs, 1);
 	HA_ATOMIC_SUB(&listeners, 1);
-	SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
+	HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
 }
 
 /* This function is called on a read event from a listening socket, corresponding
@@ -449,7 +449,7 @@
 	static int accept4_broken;
 #endif
 
-	if (SPIN_TRYLOCK(LISTENER_LOCK, &l->lock))
+	if (HA_SPIN_TRYLOCK(LISTENER_LOCK, &l->lock))
 		return;
 
 	if (unlikely(l->nbconn >= l->maxconn)) {
@@ -657,7 +657,7 @@
 	limit_listener(l, &global_listener_queue);
 	task_schedule(global_listener_queue_task, tick_first(expire, global_listener_queue_task->expire));
  end:
-	SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
+	HA_SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
 }
 
 /* Notify the listener that a connection initiated from it was released. This
@@ -1019,7 +1019,7 @@
 	sample_register_fetches(&smp_kws);
 	acl_register_keywords(&acl_kws);
 	bind_register_keywords(&bind_kws);
-	SPIN_INIT(&lq_lock);
+	HA_SPIN_INIT(&lq_lock);
 }
 
 /*
diff --git a/src/map.c b/src/map.c
index 99ea334..fb41173 100644
--- a/src/map.c
+++ b/src/map.c
@@ -325,16 +325,16 @@
 		 * this pointer. We know we have reached the end when this
 		 * pointer points back to the head of the streams list.
 		 */
-		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		LIST_INIT(&appctx->ctx.map.bref.users);
 		appctx->ctx.map.bref.ref = appctx->ctx.map.ref->head.n;
-		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		appctx->st2 = STAT_ST_LIST;
 		/* fall through */
 
 	case STAT_ST_LIST:
 
-		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 
 		if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users)) {
 			LIST_DEL(&appctx->ctx.map.bref.users);
@@ -360,7 +360,7 @@
 				 * this stream's users so that it can remove us upon termination.
 				 */
 				LIST_ADDQ(&elt->back_refs, &appctx->ctx.map.bref.users);
-				SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+				HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 				si_applet_cant_put(si);
 				return 0;
 			}
@@ -368,7 +368,7 @@
 			/* get next list entry and check the end of the list */
 			appctx->ctx.map.bref.ref = elt->list.n;
 		}
-		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		appctx->st2 = STAT_ST_FIN;
 		/* fall through */
 
@@ -456,7 +456,7 @@
 		/* fall through */
 
 	case STAT_ST_LIST:
-		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		/* for each lookup type */
 		while (appctx->ctx.map.expr) {
 			/* initialise chunk to build new message */
@@ -542,7 +542,7 @@
 				/* let's try again later from this stream. We add ourselves into
 				 * this stream's users so that it can remove us upon termination.
 				 */
-				SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+				HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 				si_applet_cant_put(si);
 				return 0;
 			}
@@ -551,7 +551,7 @@
 			appctx->ctx.map.expr = pat_expr_get_next(appctx->ctx.map.expr,
 			                                         &appctx->ctx.map.ref->pat);
 		}
-		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		appctx->st2 = STAT_ST_FIN;
 		/* fall through */
 
@@ -628,10 +628,10 @@
 static void cli_release_show_map(struct appctx *appctx)
 {
 	if (appctx->st2 == STAT_ST_LIST) {
-		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users))
 			LIST_DEL(&appctx->ctx.map.bref.users);
-		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 	}
 }
 
@@ -728,32 +728,32 @@
 
 			/* Try to delete the entry. */
 			err = NULL;
-			SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+			HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 			if (!pat_ref_set_by_id(appctx->ctx.map.ref, ref, args[4], &err)) {
-				SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+				HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 				if (err)
 					memprintf(&err, "%s.\n", err);
 				appctx->ctx.cli.err = err;
 				appctx->st0 = CLI_ST_PRINT_FREE;
 				return 1;
 			}
-			SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+			HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		}
 		else {
 			/* Else, use the entry identifier as pattern
 			 * string, and update the value.
 			 */
 			err = NULL;
-			SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+			HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 			if (!pat_ref_set(appctx->ctx.map.ref, args[3], args[4], &err)) {
-				SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+				HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 				if (err)
 					memprintf(&err, "%s.\n", err);
 				appctx->ctx.cli.err = err;
 				appctx->st0 = CLI_ST_PRINT_FREE;
 				return 1;
 			}
-			SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+			HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		}
 
 		/* The set is done, send message. */
@@ -825,12 +825,12 @@
 
 		/* Add value. */
 		err = NULL;
-		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		if (appctx->ctx.map.display_flags == PAT_REF_MAP)
 			ret = pat_ref_add(appctx->ctx.map.ref, args[3], args[4], &err);
 		else
 			ret = pat_ref_add(appctx->ctx.map.ref, args[3], NULL, &err);
-		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		if (!ret) {
 			if (err)
 				memprintf(&err, "%s.\n", err);
@@ -910,31 +910,31 @@
 		}
 
 		/* Try to delete the entry. */
-		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		if (!pat_ref_delete_by_id(appctx->ctx.map.ref, ref)) {
-			SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+			HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 			/* The entry is not found, send message. */
 			appctx->ctx.cli.severity = LOG_ERR;
 			appctx->ctx.cli.msg = "Key not found.\n";
 			appctx->st0 = CLI_ST_PRINT;
 			return 1;
 		}
-		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 	}
 	else {
 		/* Else, use the entry identifier as pattern
 		 * string and try to delete the entry.
 		 */
-		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		if (!pat_ref_delete(appctx->ctx.map.ref, args[3])) {
-			SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+			HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 			/* The entry is not found, send message. */
 			appctx->ctx.cli.severity = LOG_ERR;
 			appctx->ctx.cli.msg = "Key not found.\n";
 			appctx->st0 = CLI_ST_PRINT;
 			return 1;
 		}
-		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 	}
 
 	/* The deletion is done, send message. */
@@ -983,9 +983,9 @@
 		}
 
 		/* Clear all. */
-		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		pat_ref_prune(appctx->ctx.map.ref);
-		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+		HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 
 		/* return response */
 		appctx->st0 = CLI_ST_PROMPT;
diff --git a/src/memory.c b/src/memory.c
index 9313aa9..b0b32e7 100644
--- a/src/memory.c
+++ b/src/memory.c
@@ -93,7 +93,7 @@
 		LIST_ADDQ(start, &pool->list);
 	}
 	pool->users++;
-	SPIN_INIT(&pool->lock);
+	HA_SPIN_INIT(&pool->lock);
 	return pool;
 }
 
@@ -143,9 +143,9 @@
 {
 	void *ptr;
 
-	SPIN_LOCK(POOL_LOCK, &pool->lock);
+	HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
 	ptr = __pool_refill_alloc(pool, avail);
-	SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+	HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
 	return ptr;
 }
 /*
@@ -157,7 +157,7 @@
 	if (!pool)
 		return;
 
-	SPIN_LOCK(POOL_LOCK, &pool->lock);
+	HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
 	next = pool->free_list;
 	while (next) {
 		temp = next;
@@ -166,7 +166,7 @@
 		free(temp);
 	}
 	pool->free_list = next;
-	SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+	HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
 	/* here, we should have pool->allocate == pool->used */
 }
 
@@ -192,7 +192,7 @@
 		void *temp, *next;
 		//qfprintf(stderr, "Flushing pool %s\n", entry->name);
 		if (entry != pool_ctx)
-			SPIN_LOCK(POOL_LOCK, &entry->lock);
+			HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
 		next = entry->free_list;
 		while (next &&
 		       (int)(entry->allocated - entry->used) > (int)entry->minavail) {
@@ -203,7 +203,7 @@
 		}
 		entry->free_list = next;
 		if (entry != pool_ctx)
-			SPIN_UNLOCK(POOL_LOCK, &entry->lock);
+			HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
 	}
 
 	HA_ATOMIC_STORE(&recurse, 0);
@@ -225,7 +225,7 @@
 		pool->users--;
 		if (!pool->users) {
 			LIST_DEL(&pool->list);
-			SPIN_DESTROY(&pool->lock);
+			HA_SPIN_DESTROY(&pool->lock);
 			free(pool);
 		}
 	}
@@ -242,7 +242,7 @@
 	allocated = used = nbpools = 0;
 	chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
 	list_for_each_entry(entry, &pools, list) {
-		SPIN_LOCK(POOL_LOCK, &entry->lock);
+		HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
 		chunk_appendf(&trash, "  - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n",
 			 entry->name, entry->size, entry->allocated,
 		         entry->size * entry->allocated, entry->used, entry->failed,
@@ -251,7 +251,7 @@
 		allocated += entry->allocated * entry->size;
 		used += entry->used * entry->size;
 		nbpools++;
-		SPIN_UNLOCK(POOL_LOCK, &entry->lock);
+		HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
 	}
 	chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
 		 nbpools, allocated, used);
diff --git a/src/mux_h2.c b/src/mux_h2.c
index fbd042b..07a94bc 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -233,9 +233,9 @@
 	    unlikely((buf = b_alloc_margin(&h2c->dbuf, 0)) == NULL)) {
 		h2c->dbuf_wait.target = h2c->conn;
 		h2c->dbuf_wait.wakeup_cb = h2_dbuf_available;
-		SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		LIST_ADDQ(&buffer_wq, &h2c->dbuf_wait.list);
-		SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		__conn_xprt_stop_recv(h2c->conn);
 	}
 	return buf;
@@ -289,9 +289,9 @@
 	    unlikely((buf = b_alloc_margin(&h2c->mbuf, 0)) == NULL)) {
 		h2c->mbuf_wait.target = h2c;
 		h2c->mbuf_wait.wakeup_cb = h2_mbuf_available;
-		SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		LIST_ADDQ(&buffer_wq, &h2c->mbuf_wait.list);
-		SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 
 		/* FIXME: we should in fact only block the direction being
 		 * currently used. For now it will be enough like this.
@@ -425,14 +425,14 @@
 	if (h2c) {
 		hpack_dht_free(h2c->ddht);
 		h2_release_dbuf(h2c);
-		SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		LIST_DEL(&h2c->dbuf_wait.list);
-		SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 
 		h2_release_mbuf(h2c);
-		SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		LIST_DEL(&h2c->mbuf_wait.list);
-		SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 
 		if (h2c->task) {
 			task_delete(h2c->task);
diff --git a/src/pattern.c b/src/pattern.c
index 61ef5f0..39ecd95 100644
--- a/src/pattern.c
+++ b/src/pattern.c
@@ -489,15 +489,15 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
-		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
 		if (!lru) {
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		}
 		else if (lru->domain) {
 			ret = lru->data;
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 			return ret;
 		}
 	}
@@ -519,7 +519,7 @@
 
 	if (lru) {
 		lru64_commit(lru, ret, expr, expr->revision, NULL);
-		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 	}
 
 	return ret;
@@ -536,15 +536,15 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
-		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
 		if (!lru) {
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		}
 		else if (lru->domain) {
 			ret = lru->data;
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 			return ret;
 		}
 	}
@@ -563,7 +563,7 @@
 
 	if (lru) {
 		lru64_commit(lru, ret, expr, expr->revision, NULL);
-		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 	}
 
 	return ret;
@@ -606,15 +606,15 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
-		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
 		if (!lru) {
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		}
 		else if (lru->domain) {
 			ret = lru->data;
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 			return ret;
 		}
 	}
@@ -630,7 +630,7 @@
 
 	if (lru) {
 		lru64_commit(lru, ret, expr, expr->revision, NULL);
-		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 	}
 
 	return ret;
@@ -675,15 +675,15 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
-		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
 		if (!lru) {
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		}
 		else if (lru->domain) {
 			ret = lru->data;
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 			return ret;
 		}
 	}
@@ -705,7 +705,7 @@
 
 	if (lru) {
 		lru64_commit(lru, ret, expr, expr->revision, NULL);
-		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 	}
 
 	return ret;
@@ -723,15 +723,15 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
-		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
 		if (!lru) {
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		}
 		else if (lru->domain) {
 			ret = lru->data;
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 			return ret;
 		}
 	}
@@ -753,7 +753,7 @@
 
 	if (lru) {
 		lru64_commit(lru, ret, expr, expr->revision, NULL);
-		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 	}
 
 	return ret;
@@ -775,15 +775,15 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
-		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
 		if (!lru) {
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		}
 		else if (lru->domain) {
 			ret = lru->data;
-			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 			return ret;
 		}
 	}
@@ -819,7 +819,7 @@
  leave:
 	if (lru) {
 		lru64_commit(lru, ret, expr, expr->revision, NULL);
-		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 	}
 
 	return ret;
@@ -1765,11 +1765,11 @@
 		if (!expr->pat_head->parse_smp)
 			continue;
 
-		RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+		HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
 		data = pattern_find_smp(expr, elt);
 		if (data && *data && !expr->pat_head->parse_smp(sample, *data))
 			*data = NULL;
-		RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+		HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
 	}
 
 	/* free old sample only when all exprs are updated */
@@ -1872,7 +1872,7 @@
 
 	LIST_INIT(&ref->head);
 	LIST_INIT(&ref->pat);
-	SPIN_INIT(&ref->lock);
+	HA_SPIN_INIT(&ref->lock);
 	LIST_ADDQ(&pattern_reference, &ref->list);
 
 	return ref;
@@ -1991,14 +1991,14 @@
 		return 0;
 	}
 
-	RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+	HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
 	/* index pattern */
 	if (!expr->pat_head->index(expr, &pattern, err)) {
-		RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+		HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
 		free(data);
 		return 0;
 	}
-	RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+	HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
 
 	return 1;
 }
@@ -2073,9 +2073,9 @@
 	struct pattern pattern;
 
 
-	SPIN_LOCK(PATREF_LOCK, &ref->lock);
+	HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
 	list_for_each_entry(expr, &ref->pat, list) {
-		RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+		HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
 	}
 
 	/* all expr are locked, we can safely remove all pat_ref */
@@ -2145,9 +2145,9 @@
 				continue;
 			}
 		}
-		RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+		HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
 	}
-	SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
+	HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
 }
 
 /* This function prune all entries of <ref>. This function
@@ -2160,9 +2160,9 @@
 	struct bref *bref, *back;
 
 	list_for_each_entry(expr, &ref->pat, list) {
-		RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+		HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
 		expr->pat_head->prune(expr);
-		RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+		HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
 	}
 
 	/* we trash pat_ref_elt in a second time to ensure that data is
@@ -2267,7 +2267,7 @@
 
 		expr->ref = ref;
 
-		RWLOCK_INIT(&expr->lock);
+		HA_RWLOCK_INIT(&expr->lock);
 
 		/* We must free this pattern if it is no more used. */
 		list->do_free = 1;
@@ -2579,7 +2579,7 @@
 		return NULL;
 
 	list_for_each_entry(list, &head->head, list) {
-		RWLOCK_RDLOCK(PATEXP_LOCK, &list->expr->lock);
+		HA_RWLOCK_RDLOCK(PATEXP_LOCK, &list->expr->lock);
 		pat = head->match(smp, list->expr, fill);
 		if (pat) {
 			/* We duplicate the pattern cause it could be modified
@@ -2610,10 +2610,10 @@
 				}
 				pat->data = &static_sample_data;
 			}
-			RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
+			HA_RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
 			return pat;
 		}
-		RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
+		HA_RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
 	}
 	return NULL;
 }
@@ -2627,9 +2627,9 @@
 		LIST_DEL(&list->list);
 		if (list->do_free) {
 			LIST_DEL(&list->expr->list);
-			RWLOCK_WRLOCK(PATEXP_LOCK, &list->expr->lock);
+			HA_RWLOCK_WRLOCK(PATEXP_LOCK, &list->expr->lock);
 			head->prune(list->expr);
-			RWLOCK_WRUNLOCK(PATEXP_LOCK, &list->expr->lock);
+			HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &list->expr->lock);
 			free(list->expr);
 		}
 		free(list);
@@ -2676,9 +2676,9 @@
  */
 int pattern_delete(struct pattern_expr *expr, struct pat_ref_elt *ref)
 {
-	RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+	HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
 	expr->pat_head->delete(expr, ref);
-	RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+	HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
 	return 1;
 }
 
@@ -2694,7 +2694,7 @@
 	pat_lru_seed = random();
 	if (global.tune.pattern_cache) {
 		pat_lru_tree = lru64_new(global.tune.pattern_cache);
-		SPIN_INIT(&pat_lru_tree_lock);
+		HA_SPIN_INIT(&pat_lru_tree_lock);
 	}
 
 	list_for_each_entry(ref, &pattern_reference, list) {
diff --git a/src/peers.c b/src/peers.c
index 4819937..94a4852 100644
--- a/src/peers.c
+++ b/src/peers.c
@@ -319,7 +319,7 @@
 		cursor += st->table->key_size;
 	}
 
-	RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
+	HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
 	/* encode values */
 	for (data_type = 0 ; data_type < STKTABLE_DATA_TYPES ; data_type++) {
 
@@ -359,7 +359,7 @@
 			}
 		}
 	}
-	RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
+	HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
 
 	/* Compute datalen */
 	datalen = (cursor - datamsg);
@@ -510,7 +510,7 @@
 
 	/* peer session identified */
 	if (peer) {
-		SPIN_LOCK(PEER_LOCK, &peer->lock);
+		HA_SPIN_LOCK(PEER_LOCK, &peer->lock);
 		if (peer->appctx == appctx) {
 			/* Re-init current table pointers to force announcement on re-connect */
 			peer->remote_table = peer->last_local_table = NULL;
@@ -527,7 +527,7 @@
 			peer->flags &= PEER_TEACH_RESET;
 			peer->flags &= PEER_LEARN_RESET;
 		}
-		SPIN_UNLOCK(PEER_LOCK, &peer->lock);
+		HA_SPIN_UNLOCK(PEER_LOCK, &peer->lock);
 		task_wakeup(peers->sync_task, TASK_WOKEN_MSG);
 	}
 }
@@ -692,7 +692,7 @@
 					goto switchstate;
 				}
 
-				SPIN_LOCK(PEER_LOCK, &curpeer->lock);
+				HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
 				if (curpeer->appctx && curpeer->appctx != appctx) {
 					if (curpeer->local) {
 						/* Local connection, reply a retry */
@@ -726,7 +726,7 @@
 
 				if (!curpeer) {
 					curpeer = appctx->ctx.peers.ptr;
-					SPIN_LOCK(PEER_LOCK, &curpeer->lock);
+					HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
 					if (curpeer->appctx != appctx) {
 						appctx->st0 = PEER_SESS_ST_END;
 						goto switchstate;
@@ -787,7 +787,7 @@
 
 				if (!curpeer) {
 					curpeer = appctx->ctx.peers.ptr;
-					SPIN_LOCK(PEER_LOCK, &curpeer->lock);
+					HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
 					if (curpeer->appctx != appctx) {
 						appctx->st0 = PEER_SESS_ST_END;
 						goto switchstate;
@@ -826,7 +826,7 @@
 
 				if (!curpeer) {
 					curpeer = appctx->ctx.peers.ptr;
-					SPIN_LOCK(PEER_LOCK, &curpeer->lock);
+					HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
 					if (curpeer->appctx != appctx) {
 						appctx->st0 = PEER_SESS_ST_END;
 						goto switchstate;
@@ -913,7 +913,7 @@
 
 				if (!curpeer) {
 					curpeer = appctx->ctx.peers.ptr;
-					SPIN_LOCK(PEER_LOCK, &curpeer->lock);
+					HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
 					if (curpeer->appctx != appctx) {
 						appctx->st0 = PEER_SESS_ST_END;
 						goto switchstate;
@@ -1252,7 +1252,7 @@
 							newts = NULL;
 						}
 
-						RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+						HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 						for (data_type = 0 ; data_type < STKTABLE_DATA_TYPES ; data_type++) {
 
@@ -1264,7 +1264,7 @@
 										data = intdecode(&msg_cur, msg_end);
 										if (!msg_cur) {
 											/* malformed message */
-											RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+											HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 											stktable_touch_remote(st->table, ts, 1);
 											appctx->st0 = PEER_SESS_ST_ERRPROTO;
 											goto switchstate;
@@ -1281,7 +1281,7 @@
 										data = intdecode(&msg_cur, msg_end);
 										if (!msg_cur) {
 											/* malformed message */
-											RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+											HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 											stktable_touch_remote(st->table, ts, 1);
 											appctx->st0 = PEER_SESS_ST_ERRPROTO;
 											goto switchstate;
@@ -1298,7 +1298,7 @@
 										data = intdecode(&msg_cur, msg_end);
 										if (!msg_cur) {
 											/* malformed message */
-											RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+											HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 											stktable_touch_remote(st->table, ts, 1);
 											appctx->st0 = PEER_SESS_ST_ERRPROTO;
 											goto switchstate;
@@ -1320,7 +1320,7 @@
 										data.curr_tick = tick_add(now_ms, -intdecode(&msg_cur, msg_end)) & ~0x1;
 										if (!msg_cur) {
 											/* malformed message */
-											RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+											HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 											stktable_touch_remote(st->table, ts, 1);
 											appctx->st0 = PEER_SESS_ST_ERRPROTO;
 											goto switchstate;
@@ -1328,7 +1328,7 @@
 										data.curr_ctr = intdecode(&msg_cur, msg_end);
 										if (!msg_cur) {
 											/* malformed message */
-											RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+											HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 											stktable_touch_remote(st->table, ts, 1);
 											appctx->st0 = PEER_SESS_ST_ERRPROTO;
 											goto switchstate;
@@ -1336,7 +1336,7 @@
 										data.prev_ctr = intdecode(&msg_cur, msg_end);
 										if (!msg_cur) {
 											/* malformed message */
-											RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+											HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 											stktable_touch_remote(st->table, ts, 1);
 											appctx->st0 = PEER_SESS_ST_ERRPROTO;
 											goto switchstate;
@@ -1351,7 +1351,7 @@
 							}
 						}
 
-						RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+						HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 						stktable_touch_remote(st->table, ts, 1);
 
 					}
@@ -1463,7 +1463,7 @@
 						}
 
 						if (!(curpeer->flags & PEER_F_TEACH_PROCESS)) {
-							SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+							HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 							if (!(curpeer->flags & PEER_F_LEARN_ASSIGN) &&
 							    ((int)(st->last_pushed - st->table->localupdate) < 0)) {
 								struct eb32_node *eb;
@@ -1517,14 +1517,14 @@
 									ts = eb32_entry(eb, struct stksess, upd);
 									updateid = ts->upd.key;
 									ts->ref_cnt++;
-									SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+									HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 
 									msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, 0);
 									if (!msglen) {
 										/* internal error: message does not fit in trash */
-										SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 										ts->ref_cnt--;
-										SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 										appctx->st0 = PEER_SESS_ST_END;
 										goto switchstate;
 									}
@@ -1533,9 +1533,9 @@
 									repl = ci_putblk(si_ic(si), trash.str, msglen);
 									if (repl <= 0) {
 										/* no more write possible */
-										SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 										ts->ref_cnt--;
-										SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 										if (repl == -1) {
 											goto full;
 										}
@@ -1543,7 +1543,7 @@
 										goto switchstate;
 									}
 
-									SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+									HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 									ts->ref_cnt--;
 									st->last_pushed = updateid;
 									if ((int)(st->last_pushed - st->table->commitupdate) > 0)
@@ -1552,7 +1552,7 @@
 									new_pushed = 0;
 								}
 							}
-							SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+							HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 						}
 						else {
 							if (!(st->flags & SHTABLE_F_TEACH_STAGE1)) {
@@ -1584,7 +1584,7 @@
 
 								/* We force new pushed to 1 to force identifier in update message */
 								new_pushed = 1;
-								SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+								HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 								while (1) {
 									uint32_t msglen;
 									struct stksess *ts;
@@ -1604,15 +1604,15 @@
 									ts = eb32_entry(eb, struct stksess, upd);
 									updateid = ts->upd.key;
 									ts->ref_cnt++;
-									SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+									HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 
 									use_timed = !(curpeer->flags & PEER_F_DWNGRD);
 									msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, use_timed);
 									if (!msglen) {
 										/* internal error: message does not fit in trash */
-										SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 										ts->ref_cnt--;
-										SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 										appctx->st0 = PEER_SESS_ST_END;
 										goto switchstate;
 									}
@@ -1621,22 +1621,22 @@
 									repl = ci_putblk(si_ic(si), trash.str, msglen);
 									if (repl <= 0) {
 										/* no more write possible */
-										SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 										ts->ref_cnt--;
-										SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 										if (repl == -1) {
 											goto full;
 										}
 										appctx->st0 = PEER_SESS_ST_END;
 										goto switchstate;
 									}
-									SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+									HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 									ts->ref_cnt--;
 									st->last_pushed = updateid;
 									/* identifier may not needed in next update message */
 									new_pushed = 0;
 								}
-								SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+								HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 							}
 
 							if (!(st->flags & SHTABLE_F_TEACH_STAGE2)) {
@@ -1668,7 +1668,7 @@
 
 								/* We force new pushed to 1 to force identifier in update message */
 								new_pushed = 1;
-								SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+								HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 								while (1) {
 									uint32_t msglen;
 									struct stksess *ts;
@@ -1687,15 +1687,15 @@
 									ts = eb32_entry(eb, struct stksess, upd);
 									updateid = ts->upd.key;
 									ts->ref_cnt++;
-									SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+									HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 
 									use_timed = !(curpeer->flags & PEER_F_DWNGRD);
 									msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, use_timed);
 									if (!msglen) {
 										/* internal error: message does not fit in trash */
-										SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 										ts->ref_cnt--;
-										SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 										appctx->st0 = PEER_SESS_ST_END;
 										goto switchstate;
 									}
@@ -1704,9 +1704,9 @@
 									repl = ci_putblk(si_ic(si), trash.str, msglen);
 									if (repl <= 0) {
 										/* no more write possible */
-										SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 										ts->ref_cnt--;
-										SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+										HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 										if (repl == -1) {
 											goto full;
 										}
@@ -1714,13 +1714,13 @@
 										goto switchstate;
 									}
 
-									SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
+									HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
 									ts->ref_cnt--;
 									st->last_pushed = updateid;
 									/* identifier may not needed in next update message */
 									new_pushed = 0;
 								}
-								SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
+								HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
 							}
 						}
 
@@ -1803,7 +1803,7 @@
 			}
 			case PEER_SESS_ST_END: {
 				if (curpeer) {
-					SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
+					HA_SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
 					curpeer = NULL;
 				}
 				si_shutw(si);
@@ -1817,7 +1817,7 @@
 	si_oc(si)->flags |= CF_READ_DONTWAIT;
 
 	if (curpeer)
-		SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
+		HA_SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
 	return;
 full:
 	si_applet_cant_put(si);
@@ -1973,7 +1973,7 @@
 
 	/* Acquire lock for all peers of the section */
 	for (ps = peers->remote; ps; ps = ps->next)
-		SPIN_LOCK(PEER_LOCK, &ps->lock);
+		HA_SPIN_LOCK(PEER_LOCK, &ps->lock);
 
 	if (!stopping) {
 		/* Normal case (not soft stop)*/
@@ -2147,7 +2147,7 @@
 
 	/* Release lock for all peers of the section */
 	for (ps = peers->remote; ps; ps = ps->next)
-		SPIN_UNLOCK(PEER_LOCK, &ps->lock);
+		HA_SPIN_UNLOCK(PEER_LOCK, &ps->lock);
 
 	/* Wakeup for re-connect */
 	return task;
diff --git a/src/proto_http.c b/src/proto_http.c
index 8d813d3..bfce2cf 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -2621,9 +2621,9 @@
 
 			/* perform update */
 			/* returned code: 1=ok, 0=ko */
-			SPIN_LOCK(PATREF_LOCK, &ref->lock);
+			HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
 			pat_ref_delete(ref, key->str);
-			SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
+			HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
 
 			free_trash_chunk(key);
 			break;
@@ -2649,10 +2649,10 @@
 
 			/* perform update */
 			/* add entry only if it does not already exist */
-			SPIN_LOCK(PATREF_LOCK, &ref->lock);
+			HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
 			if (pat_ref_find_elt(ref, key->str) == NULL)
 				pat_ref_add(ref, key->str, NULL, NULL);
-			SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
+			HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
 
 			free_trash_chunk(key);
 			break;
@@ -2737,7 +2737,7 @@
 					ptr1 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_CNT);
 					ptr2 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_RATE);
 					if (ptr1 || ptr2) {
-						RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+						HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 						if (ptr1)
 							stktable_data_cast(ptr1, http_req_cnt)++;
@@ -2746,7 +2746,7 @@
 							update_freq_ctr_period(&stktable_data_cast(ptr2, http_req_rate),
 							                       t->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
 
-						RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+						HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 					}
 
 					stkctr_set_flags(&s->stkctr[trk_idx(rule->action)], STKCTR_TRACK_CONTENT);
@@ -2915,9 +2915,9 @@
 
 			/* perform update */
 			/* returned code: 1=ok, 0=ko */
-			SPIN_LOCK(PATREF_LOCK, &ref->lock);
+			HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
 			pat_ref_delete(ref, key->str);
-			SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
+			HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
 
 			free_trash_chunk(key);
 			break;
@@ -2980,14 +2980,14 @@
 			value->str[value->len] = '\0';
 
 			/* perform update */
-			SPIN_LOCK(PATREF_LOCK, &ref->lock);
+			HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
 			if (pat_ref_find_elt(ref, key->str) != NULL)
 				/* update entry if it exists */
 				pat_ref_set(ref, key->str, value->str, NULL);
 			else
 				/* insert a new entry */
 				pat_ref_add(ref, key->str, value->str, NULL);
-			SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
+			HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
 			free_trash_chunk(key);
 			free_trash_chunk(value);
 			break;
@@ -3015,7 +3015,7 @@
 				if (key && (ts = stktable_get_entry(t, key))) {
 					stream_track_stkctr(&s->stkctr[trk_idx(rule->action)], t, ts);
 
-					RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+					HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 					/* let's count a new HTTP request as it's the first time we do it */
 					ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_CNT);
@@ -3045,7 +3045,7 @@
 									       t->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1);
 					}
 
-					RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+					HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 
 					stkctr_set_flags(&s->stkctr[trk_idx(rule->action)], STKCTR_TRACK_CONTENT);
 					if (sess->fe != s->be)
@@ -7755,7 +7755,7 @@
 	struct channel *chn = msg->chn;
 	int len1, len2;
 
-	SPIN_LOCK(PROXY_LOCK, &proxy->lock);
+	HA_SPIN_LOCK(PROXY_LOCK, &proxy->lock);
 	es->len = MIN(chn->buf->i, global.tune.bufsize);
 	len1 = chn->buf->data + chn->buf->size - chn->buf->p;
 	len1 = MIN(len1, es->len);
@@ -7795,7 +7795,7 @@
 	es->b_tot = chn->total;
 	es->m_clen = msg->chunk_len;
 	es->m_blen = msg->body_len;
-	SPIN_UNLOCK(PROXY_LOCK, &proxy->lock);
+	HA_SPIN_UNLOCK(PROXY_LOCK, &proxy->lock);
 }
 
 /* Return in <vptr> and <vlen> the pointer and length of occurrence <occ> of
diff --git a/src/proxy.c b/src/proxy.c
index 9169ed9..3af01ef 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -762,7 +762,7 @@
 	/* initial uuid is unassigned (-1) */
 	p->uuid = -1;
 
-	SPIN_INIT(&p->lock);
+	HA_SPIN_INIT(&p->lock);
 }
 
 /*
diff --git a/src/queue.c b/src/queue.c
index 93d3e94..481b506 100644
--- a/src/queue.c
+++ b/src/queue.c
@@ -142,8 +142,8 @@
 	struct proxy  *p = s->proxy;
 	int maxconn;
 
-	SPIN_LOCK(PROXY_LOCK,  &p->lock);
-	SPIN_LOCK(SERVER_LOCK, &s->lock);
+	HA_SPIN_LOCK(PROXY_LOCK,  &p->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
 
 	/* First, check if we can handle some connections queued at the proxy. We
 	 * will take as many as we can handle.
@@ -156,8 +156,8 @@
 			break;
 		task_wakeup(strm->task, TASK_WOKEN_RES);
 	}
-	SPIN_UNLOCK(SERVER_LOCK, &s->lock);
-	SPIN_UNLOCK(PROXY_LOCK,  &p->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+	HA_SPIN_UNLOCK(PROXY_LOCK,  &p->lock);
 }
 
 /* Adds the stream <strm> to the pending connection list of server <strm>->srv
@@ -182,17 +182,17 @@
 
 	if ((strm->flags & SF_ASSIGNED) && srv) {
 		p->srv = srv;
-		SPIN_LOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 		LIST_ADDQ(&srv->pendconns, &p->list);
-		SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 		count = HA_ATOMIC_ADD(&srv->nbpend, 1);
 		strm->logs.srv_queue_size += count;
 		HA_ATOMIC_UPDATE_MAX(&srv->counters.nbpend_max, count);
 	} else {
 		p->srv = NULL;
-		SPIN_LOCK(PROXY_LOCK, &strm->be->lock);
+		HA_SPIN_LOCK(PROXY_LOCK, &strm->be->lock);
 		LIST_ADDQ(&strm->be->pendconns, &p->list);
-		SPIN_UNLOCK(PROXY_LOCK, &strm->be->lock);
+		HA_SPIN_UNLOCK(PROXY_LOCK, &strm->be->lock);
 		count = HA_ATOMIC_ADD(&strm->be->nbpend, 1);
 		strm->logs.prx_queue_size += count;
 		HA_ATOMIC_UPDATE_MAX(&strm->be->be_counters.nbpend_max, count);
@@ -209,7 +209,7 @@
 	struct pendconn *pc, *pc_bck;
 	int xferred = 0;
 
-	SPIN_LOCK(SERVER_LOCK, &s->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
 	list_for_each_entry_safe(pc, pc_bck, &s->pendconns, list) {
 		struct stream *strm = pc->strm;
 
@@ -227,7 +227,7 @@
 			xferred++;
 		}
 	}
-	SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
 	return xferred;
 }
 
@@ -243,7 +243,7 @@
 	if (!srv_currently_usable(s))
 		return 0;
 
-	SPIN_LOCK(PROXY_LOCK, &s->proxy->lock);
+	HA_SPIN_LOCK(PROXY_LOCK, &s->proxy->lock);
 	for (xferred = 0; !s->maxconn || xferred < srv_dynamic_maxconn(s); xferred++) {
 		struct stream *strm;
 		struct pendconn *p;
@@ -256,7 +256,7 @@
 		__pendconn_free(p);
 		task_wakeup(strm->task, TASK_WOKEN_RES);
 	}
-	SPIN_UNLOCK(PROXY_LOCK, &s->proxy->lock);
+	HA_SPIN_UNLOCK(PROXY_LOCK, &s->proxy->lock);
 	return xferred;
 }
 
@@ -268,15 +268,15 @@
 void pendconn_free(struct pendconn *p)
 {
 	if (p->srv) {
-		SPIN_LOCK(SERVER_LOCK, &p->srv->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &p->srv->lock);
 		LIST_DEL(&p->list);
-		SPIN_UNLOCK(SERVER_LOCK, &p->srv->lock);
+		HA_SPIN_UNLOCK(SERVER_LOCK, &p->srv->lock);
 		HA_ATOMIC_SUB(&p->srv->nbpend, 1);
 	}
 	else {
-		SPIN_LOCK(SERVER_LOCK, &p->strm->be->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &p->strm->be->lock);
 		LIST_DEL(&p->list);
-		SPIN_UNLOCK(SERVER_LOCK, &p->strm->be->lock);
+		HA_SPIN_UNLOCK(SERVER_LOCK, &p->strm->be->lock);
 		HA_ATOMIC_SUB(&p->strm->be->nbpend, 1);
 	}
 	p->strm->pend_pos = NULL;
diff --git a/src/server.c b/src/server.c
index 1a78fb3..d79e951 100644
--- a/src/server.c
+++ b/src/server.c
@@ -881,9 +881,9 @@
 
 	srv_register_update(s);
 	for (srv = s->trackers; srv; srv = srv->tracknext) {
-		SPIN_LOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 		srv_set_stopped(srv, NULL, NULL);
-		SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 	}
 }
 
@@ -923,9 +923,9 @@
 
 	srv_register_update(s);
 	for (srv = s->trackers; srv; srv = srv->tracknext) {
-		SPIN_LOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 		srv_set_running(srv, NULL, NULL);
-		SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 	}
 }
 
@@ -964,9 +964,9 @@
 
 	srv_register_update(s);
 	for (srv = s->trackers; srv; srv = srv->tracknext) {
-		SPIN_LOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 		srv_set_stopping(srv, NULL, NULL);
-		SPIN_LOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 	}
 }
 
@@ -1007,9 +1007,9 @@
 		mode = SRV_ADMF_IDRAIN;
 
 	for (srv = s->trackers; srv; srv = srv->tracknext) {
-		SPIN_LOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 		srv_set_admin_flag(srv, mode, cause);
-		SPIN_LOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 	}
 }
 
@@ -1045,9 +1045,9 @@
 		mode = SRV_ADMF_IDRAIN;
 
 	for (srv = s->trackers; srv; srv = srv->tracknext) {
-		SPIN_LOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 		srv_clr_admin_flag(srv, mode);
-		SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+		HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 	}
 }
 
@@ -1062,13 +1062,13 @@
 		return;
 
 	for (srv2 = srv->trackers; srv2; srv2 = srv2->tracknext) {
-		SPIN_LOCK(SERVER_LOCK, &srv2->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &srv2->lock);
 		if (srv->next_admin & (SRV_ADMF_MAINT | SRV_ADMF_CMAINT))
 			srv_set_admin_flag(srv2, SRV_ADMF_IMAINT, NULL);
 
 		if (srv->next_admin & SRV_ADMF_DRAIN)
 			srv_set_admin_flag(srv2, SRV_ADMF_IDRAIN, NULL);
-		SPIN_UNLOCK(SERVER_LOCK, &srv2->lock);
+		HA_SPIN_UNLOCK(SERVER_LOCK, &srv2->lock);
 	}
 }
 
@@ -2028,7 +2028,7 @@
 
 			/* Copy default server settings to new server settings. */
 			srv_settings_cpy(newsrv, &curproxy->defsrv, 0);
-			SPIN_INIT(&newsrv->lock);
+			HA_SPIN_INIT(&newsrv->lock);
 			cur_arg++;
 		} else {
 			newsrv = &curproxy->defsrv;
@@ -2600,10 +2600,10 @@
 {
 	if (LIST_ISEMPTY(&srv->update_status)) {
 		THREAD_WANT_SYNC();
-		SPIN_LOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock);
+		HA_SPIN_LOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock);
 		if (LIST_ISEMPTY(&srv->update_status))
 			LIST_ADDQ(&updated_servers, &srv->update_status);
-		SPIN_UNLOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock);
+		HA_SPIN_UNLOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock);
 	}
 }
 
@@ -2789,7 +2789,7 @@
 			if (msg->len)
 				goto out;
 
-			SPIN_LOCK(SERVER_LOCK, &srv->lock);
+			HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
 			/* recover operational state and apply it to this server
 			 * and all servers tracking this one */
 			switch (srv_op_state) {
@@ -2919,7 +2919,7 @@
 
 			if (port_str)
 				srv->svc_port = port;
-			SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+			HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
 
 			break;
 		default:
@@ -3696,9 +3696,9 @@
 	s = objt_server(requester->owner);
 	if (!s)
 		return 1;
-	SPIN_LOCK(SERVER_LOCK, &s->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
 	snr_update_srv_status(s, 0);
-	SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
 	return 1;
 }
 
@@ -3731,18 +3731,18 @@
 		 *     one used for the server found in the backend
 		 *   * the server found in the backend is not our current server
 		 */
-		SPIN_LOCK(SERVER_LOCK, &tmpsrv->lock);
+		HA_SPIN_LOCK(SERVER_LOCK, &tmpsrv->lock);
 		if ((tmpsrv->hostname_dn == NULL) ||
 		    (srv->hostname_dn_len != tmpsrv->hostname_dn_len) ||
 		    (strcmp(srv->hostname_dn, tmpsrv->hostname_dn) != 0) ||
 		    (srv->puid == tmpsrv->puid)) {
-			SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
+			HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
 			continue;
 		}
 
 		/* If the server has been taken down, don't consider it */
 		if (tmpsrv->next_admin & SRV_ADMF_RMAINT) {
-			SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
+			HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
 			continue;
 		}
 
@@ -3754,10 +3754,10 @@
 		      memcmp(ip, &((struct sockaddr_in *)&tmpsrv->addr)->sin_addr, 4) == 0) ||
 		     (tmpsrv->addr.ss_family == AF_INET6 &&
 		      memcmp(ip, &((struct sockaddr_in6 *)&tmpsrv->addr)->sin6_addr, 16) == 0))) {
-			SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
+			HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
 			return tmpsrv;
 		}
-		SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
+		HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
 	}
 
 
@@ -3789,7 +3789,7 @@
 	int                    hostname_len, hostname_dn_len;
 
 	if (!dns_locked)
-		SPIN_LOCK(DNS_LOCK, &srv->resolvers->lock);
+		HA_SPIN_LOCK(DNS_LOCK, &srv->resolvers->lock);
 	/* run time DNS resolution was not active for this server
 	 * and we can't enable it at run time for now.
 	 */
@@ -3825,12 +3825,12 @@
 
   end:
 	if (!dns_locked)
-		SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
+		HA_SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
 	return 0;
 
   err:
 	if (!dns_locked)
-		SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
+		HA_SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
 	return -1;
 }
 
@@ -4053,7 +4053,7 @@
 	if (!sv)
 		return 1;
 
-	SPIN_LOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 
 	if (strcmp(args[3], "weight") == 0) {
 		warning = server_parse_weight_change_request(sv, args[4]);
@@ -4220,7 +4220,7 @@
 		appctx->st0 = CLI_ST_PRINT;
 	}
  out_unlock:
-	SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 	return 1;
 }
 
@@ -4427,7 +4427,7 @@
 __attribute__((constructor))
 static void __server_init(void)
 {
-	SPIN_INIT(&updated_servers_lock);
+	HA_SPIN_INIT(&updated_servers_lock);
 	cli_register_kw(&cli_kws);
 }
 
diff --git a/src/signal.c b/src/signal.c
index 14e4f1e..3409e76 100644
--- a/src/signal.c
+++ b/src/signal.c
@@ -73,7 +73,7 @@
 	struct signal_descriptor *desc;
 	sigset_t old_sig;
 
-	if (SPIN_TRYLOCK(SIGNALS_LOCK, &signals_lock))
+	if (HA_SPIN_TRYLOCK(SIGNALS_LOCK, &signals_lock))
 		return;
 
 	/* block signal delivery during processing */
@@ -102,7 +102,7 @@
 
 	/* restore signal delivery */
 	sigprocmask(SIG_SETMASK, &old_sig, NULL);
-	SPIN_UNLOCK(SIGNALS_LOCK, &signals_lock);
+	HA_SPIN_UNLOCK(SIGNALS_LOCK, &signals_lock);
 }
 
 /* perform minimal intializations, report 0 in case of error, 1 if OK. */
@@ -114,7 +114,7 @@
 	memset(signal_queue, 0, sizeof(signal_queue));
 	memset(signal_state, 0, sizeof(signal_state));
 
-	SPIN_INIT(&signals_lock);
+	HA_SPIN_INIT(&signals_lock);
 
 	/* Ensure signals are not blocked. Some shells or service managers may
 	 * accidently block all of our signals unfortunately, causing lots of
@@ -150,7 +150,7 @@
 			pool_free2(pool2_sig_handlers, sh);
 		}
 	}
-	SPIN_DESTROY(&signals_lock);
+	HA_SPIN_DESTROY(&signals_lock);
 }
 
 /* Register a function and an integer argument on a signal. A pointer to the
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index b1d39db..597a479 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -218,15 +218,15 @@
 {
 	if (mode & CRYPTO_LOCK) {
 		if (mode & CRYPTO_READ)
-			RWLOCK_RDLOCK(SSL_LOCK, &ssl_rwlocks[n]);
+			HA_RWLOCK_RDLOCK(SSL_LOCK, &ssl_rwlocks[n]);
 		else
-			RWLOCK_WRLOCK(SSL_LOCK, &ssl_rwlocks[n]);
+			HA_RWLOCK_WRLOCK(SSL_LOCK, &ssl_rwlocks[n]);
 	}
 	else {
 		if (mode & CRYPTO_READ)
-			RWLOCK_RDUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
+			HA_RWLOCK_RDUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
 		else
-			RWLOCK_WRUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
+			HA_RWLOCK_WRUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
 	}
 }
 
@@ -239,7 +239,7 @@
 		return -1;
 
 	for (i = 0 ; i < CRYPTO_num_locks() ; i++)
-		RWLOCK_INIT(&ssl_rwlocks[i]);
+		HA_RWLOCK_INIT(&ssl_rwlocks[i]);
 
 	CRYPTO_set_id_callback(ssl_id_function);
 	CRYPTO_set_locking_callback(ssl_locking_function);
@@ -1795,15 +1795,15 @@
 	struct lru64 *lru = NULL;
 
 	if (ssl_ctx_lru_tree) {
-		RWLOCK_RDLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+		HA_RWLOCK_RDLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
 		lru = lru64_lookup(key, ssl_ctx_lru_tree, bind_conf->ca_sign_cert, 0);
 		if (lru && lru->domain) {
 			if (ssl)
 				SSL_set_SSL_CTX(ssl, (SSL_CTX *)lru->data);
-			RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+			HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
 			return (SSL_CTX *)lru->data;
 		}
-		RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+		HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
 	}
 	return NULL;
 }
@@ -1826,16 +1826,16 @@
 	struct lru64 *lru = NULL;
 
 	if (ssl_ctx_lru_tree) {
-		RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+		HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
 		lru = lru64_get(key, ssl_ctx_lru_tree, bind_conf->ca_sign_cert, 0);
 		if (!lru) {
-			RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+			HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
 			return -1;
 		}
 		if (lru->domain && lru->data)
 			lru->free((SSL_CTX *)lru->data);
 		lru64_commit(lru, ssl_ctx, bind_conf->ca_sign_cert, 0, (void (*)(void *))SSL_CTX_free);
-		RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+		HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
 		return 0;
 	}
 	return -1;
@@ -1861,7 +1861,7 @@
 
 	key = ssl_sock_generated_cert_key(servername, strlen(servername));
 	if (ssl_ctx_lru_tree) {
-		RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+		HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
 		lru = lru64_get(key, ssl_ctx_lru_tree, cacert, 0);
 		if (lru && lru->domain)
 			ssl_ctx = (SSL_CTX *)lru->data;
@@ -1870,7 +1870,7 @@
 			lru64_commit(lru, ssl_ctx, cacert, 0, (void (*)(void *))SSL_CTX_free);
 		}
 		SSL_set_SSL_CTX(ssl, ssl_ctx);
-		RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+		HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
 		return 1;
 	}
 	else {
@@ -4782,7 +4782,7 @@
 #if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES)
 	if (global_ssl.ctx_cache) {
 		ssl_ctx_lru_tree = lru64_new(global_ssl.ctx_cache);
-		RWLOCK_INIT(&ssl_ctx_lru_rwlock);
+		HA_RWLOCK_INIT(&ssl_ctx_lru_rwlock);
 	}
 	ssl_ctx_lru_seed = (unsigned int)time(NULL);
 	ssl_ctx_serial   = now_ms;
@@ -8803,7 +8803,7 @@
 #if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES)
 	if (ssl_ctx_lru_tree) {
 		lru64_destroy(ssl_ctx_lru_tree);
-		RWLOCK_DESTROY(&ssl_ctx_lru_rwlock);
+		HA_RWLOCK_DESTROY(&ssl_ctx_lru_rwlock);
 	}
 #endif
 
diff --git a/src/stats.c b/src/stats.c
index 1e4d6ff..038c074 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -2764,7 +2764,7 @@
 					reprocess = 1;
 				}
 				else if ((sv = findserver(px, value)) != NULL) {
-					SPIN_LOCK(SERVER_LOCK, &sv->lock);
+					HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 					switch (action) {
 					case ST_ADM_ACTION_DISABLE:
 						if (!(sv->cur_admin & SRV_ADMF_FMAINT)) {
@@ -2890,7 +2890,7 @@
 						}
 						break;
 					}
-					SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+					HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 				} else {
 					/* the server name is unknown or ambiguous (duplicate names) */
 					total_servers++;
diff --git a/src/stick_table.c b/src/stick_table.c
index 4810c7f..6351143 100644
--- a/src/stick_table.c
+++ b/src/stick_table.c
@@ -61,9 +61,9 @@
  */
 void stksess_free(struct stktable *t, struct stksess *ts)
 {
-	SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
 	__stksess_free(t, ts);
-	SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
 }
 
 /*
@@ -90,11 +90,11 @@
 {
 	int ret;
 
-	SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
 	if (decrefcnt)
 		ts->ref_cnt--;
 	ret = __stksess_kill(t, ts);
-	SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
 
 	return ret;
 }
@@ -126,7 +126,7 @@
 	ts->exp.node.leaf_p = NULL;
 	ts->upd.node.leaf_p = NULL;
 	ts->expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
-	RWLOCK_INIT(&ts->lock);
+	HA_RWLOCK_INIT(&ts->lock);
 	return ts;
 }
 
@@ -201,9 +201,9 @@
 {
 	int ret;
 
-	SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
 	ret = __stktable_trash_oldest(t, to_batch);
-	SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
 
 	return ret;
 }
@@ -249,9 +249,9 @@
 {
 	struct stksess *ts;
 
-	SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
 	ts = __stksess_new(t, key);
-	SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
 
 	return ts;
 }
@@ -287,11 +287,11 @@
 {
 	struct stksess *ts;
 
-	SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
 	ts = __stktable_lookup_key(t, key);
 	if (ts)
 		ts->ref_cnt++;
-	SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
 
 	return ts;
 }
@@ -325,11 +325,11 @@
 {
 	struct stksess *lts;
 
-	SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
 	lts = __stktable_lookup(t, ts);
 	if (lts)
 		lts->ref_cnt++;
-	SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
 
 	return lts;
 }
@@ -389,11 +389,11 @@
  */
 void stktable_touch_remote(struct stktable *t, struct stksess *ts, int decrefcnt)
 {
-	SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
 	__stktable_touch_with_exp(t, ts, 0, ts->expire);
 	if (decrefcnt)
 		ts->ref_cnt--;
-	SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
 }
 
 /* Update the expiration timer for <ts> but do not touch its expiration node.
@@ -406,18 +406,18 @@
 {
 	int expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
 
-	SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
 	__stktable_touch_with_exp(t, ts, 1, expire);
 	if (decrefcnt)
 		ts->ref_cnt--;
-	SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
 }
 /* Just decrease the ref_cnt of the current session */
 void stktable_release(struct stktable *t, struct stksess *ts)
 {
-	SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
 	ts->ref_cnt--;
-	SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
 }
 
 /* Insert new sticky session <ts> in the table. It is assumed that it does not
@@ -466,11 +466,11 @@
 {
 	struct stksess *ts;
 
-	SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
 	ts = __stktable_get_entry(table, key);
 	if (ts)
 		ts->ref_cnt++;
-	SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
 
 	return ts;
 }
@@ -498,10 +498,10 @@
 {
 	struct stksess *ts;
 
-	SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
 	ts = __stktable_set_entry(table, nts);
 	ts->ref_cnt++;
-	SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
 
 	return ts;
 }
@@ -515,7 +515,7 @@
 	struct eb32_node *eb;
 	int looped = 0;
 
-	SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
 	eb = eb32_lookup_ge(&t->exps, now_ms - TIMER_LOOK_BACK);
 
 	while (1) {
@@ -570,7 +570,7 @@
 	/* We have found no task to expire in any tree */
 	t->exp_next = TICK_ETERNITY;
 out_unlock:
-	SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
+	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
 	return t->exp_next;
 }
 
@@ -593,7 +593,7 @@
 		t->keys = EB_ROOT_UNIQUE;
 		memset(&t->exps, 0, sizeof(t->exps));
 		t->updates = EB_ROOT_UNIQUE;
-		SPIN_INIT(&t->lock);
+		HA_SPIN_INIT(&t->lock);
 
 		t->pool = create_pool("sticktables", sizeof(struct stksess) + t->data_size + t->key_size, MEM_F_SHARED);
 
@@ -1546,7 +1546,7 @@
 		ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC0_RATE);
 		ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC0);
 		if (ptr1 || ptr2) {
-			RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 			if (ptr1)
 				update_freq_ctr_period(&stktable_data_cast(ptr1, gpc0_rate),
@@ -1555,7 +1555,7 @@
 			if (ptr2)
 				stktable_data_cast(ptr2, gpc0)++;
 
-			RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 
 			/* If data was modified, we need to touch to re-schedule sync */
 			stktable_touch_local(stkctr->table, ts, 0);
@@ -1628,11 +1628,11 @@
 	/* Store the sample in the required sc, and ignore errors. */
 	ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPT0);
 	if (ptr) {
-		RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 		stktable_data_cast(ptr, gpt0) = rule->arg.gpt.value;
 
-		RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 
 		stktable_touch_local(stkctr->table, ts, 0);
 	}
@@ -1887,11 +1887,11 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = stktable_data_cast(ptr, gpt0);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -1928,11 +1928,11 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = stktable_data_cast(ptr, gpc0);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -1968,12 +1968,12 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, gpc0_rate),
 		                  stkctr->table->data_arg[STKTABLE_DT_GPC0_RATE].u);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2012,7 +2012,7 @@
 		ptr1 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0_RATE);
 		ptr2 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0);
 		if (ptr1 || ptr2) {
-			RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+			HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 			if (ptr1) {
 				update_freq_ctr_period(&stktable_data_cast(ptr1, gpc0_rate),
@@ -2023,7 +2023,7 @@
 			if (ptr2)
 				smp->data.u.sint = ++stktable_data_cast(ptr2, gpc0);
 
-			RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+			HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 			/* If data was modified, we need to touch to re-schedule sync */
 			stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
@@ -2065,12 +2065,12 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = stktable_data_cast(ptr, gpc0);
 		stktable_data_cast(ptr, gpc0) = 0;
 
-		RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		/* If data was modified, we need to touch to re-schedule sync */
 		stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
@@ -2105,11 +2105,11 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = stktable_data_cast(ptr, conn_cnt);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2146,12 +2146,12 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, conn_rate),
 					       stkctr->table->data_arg[STKTABLE_DT_CONN_RATE].u);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2197,11 +2197,11 @@
 
 	smp->data.type = SMP_T_SINT;
 
-	RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+	HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 
 	smp->data.u.sint = ++stktable_data_cast(ptr, conn_cnt);
 
-	RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+	HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 
 	smp->flags = SMP_F_VOL_TEST;
 
@@ -2238,11 +2238,11 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = stktable_data_cast(ptr, conn_cur);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2277,11 +2277,11 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = stktable_data_cast(ptr, sess_cnt);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2315,12 +2315,12 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, sess_rate),
 					       stkctr->table->data_arg[STKTABLE_DT_SESS_RATE].u);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2355,11 +2355,11 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = stktable_data_cast(ptr, http_req_cnt);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2394,12 +2394,12 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
 					       stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2434,11 +2434,11 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = stktable_data_cast(ptr, http_err_cnt);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2473,12 +2473,12 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, http_err_rate),
 					       stkctr->table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2513,11 +2513,11 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = stktable_data_cast(ptr, bytes_in_cnt) >> 10;
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2552,12 +2552,12 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, bytes_in_rate),
 					       stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2592,11 +2592,11 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = stktable_data_cast(ptr, bytes_out_cnt) >> 10;
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2631,12 +2631,12 @@
 			return 0; /* parameter not stored */
 		}
 
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, bytes_out_rate),
 					       stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u);
 
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
 
 		if (stkctr == &tmpstkctr)
 			stktable_release(stkctr->table, stkctr_entry(stkctr));
@@ -2875,13 +2875,13 @@
 			stktable_release(&px->table, ts);
 			return 0;
 		}
-		RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
 		if (!table_dump_entry_to_buffer(&trash, si, px, ts)) {
-			RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
 			stktable_release(&px->table, ts);
 			return 0;
 		}
-		RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
 		stktable_release(&px->table, ts);
 		break;
 
@@ -2910,13 +2910,13 @@
 			return 1;
 		}
 
-		RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 		for (cur_arg = 5; *args[cur_arg]; cur_arg += 2) {
 			if (strncmp(args[cur_arg], "data.", 5) != 0) {
 				appctx->ctx.cli.severity = LOG_ERR;
 				appctx->ctx.cli.msg = "\"data.<type>\" followed by a value expected\n";
 				appctx->st0 = CLI_ST_PRINT;
-				RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+				HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 				stktable_touch_local(&px->table, ts, 1);
 				return 1;
 			}
@@ -2926,7 +2926,7 @@
 				appctx->ctx.cli.severity = LOG_ERR;
 				appctx->ctx.cli.msg = "Unknown data type\n";
 				appctx->st0 = CLI_ST_PRINT;
-				RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+				HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 				stktable_touch_local(&px->table, ts, 1);
 				return 1;
 			}
@@ -2935,7 +2935,7 @@
 				appctx->ctx.cli.severity = LOG_ERR;
 				appctx->ctx.cli.msg = "Data type not stored in this table\n";
 				appctx->st0 = CLI_ST_PRINT;
-				RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+				HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 				stktable_touch_local(&px->table, ts, 1);
 				return 1;
 			}
@@ -2944,7 +2944,7 @@
 				appctx->ctx.cli.severity = LOG_ERR;
 				appctx->ctx.cli.msg = "Require a valid integer value to store\n";
 				appctx->st0 = CLI_ST_PRINT;
-				RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+				HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 				stktable_touch_local(&px->table, ts, 1);
 				return 1;
 			}
@@ -2978,7 +2978,7 @@
 				break;
 			}
 		}
-		RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 		stktable_touch_local(&px->table, ts, 1);
 		break;
 
@@ -3155,16 +3155,16 @@
 				if (appctx->ctx.table.target &&
 				    (strm_li(s)->bind_conf->level & ACCESS_LVL_MASK) >= ACCESS_LVL_OPER) {
 					/* dump entries only if table explicitly requested */
-					SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
+					HA_SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
 					eb = ebmb_first(&appctx->ctx.table.proxy->table.keys);
 					if (eb) {
 						appctx->ctx.table.entry = ebmb_entry(eb, struct stksess, key);
 						appctx->ctx.table.entry->ref_cnt++;
 						appctx->st2 = STAT_ST_LIST;
-						SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
+						HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
 						break;
 					}
-					SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
+					HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
 				}
 			}
 			appctx->ctx.table.proxy = appctx->ctx.table.proxy->next;
@@ -3173,7 +3173,7 @@
 		case STAT_ST_LIST:
 			skip_entry = 0;
 
-			RWLOCK_RDLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
+			HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
 
 			if (appctx->ctx.table.data_type >= 0) {
 				/* we're filtering on some data contents */
@@ -3221,13 +3221,13 @@
 
 			if (show && !skip_entry &&
 			    !table_dump_entry_to_buffer(&trash, si, appctx->ctx.table.proxy, appctx->ctx.table.entry)) {
-				RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
+				HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
 				return 0;
 			}
 
-			RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
+			HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
 
-			SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
+			HA_SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
 			appctx->ctx.table.entry->ref_cnt--;
 
 			eb = ebmb_next(&appctx->ctx.table.entry->key);
@@ -3239,7 +3239,7 @@
 				else if (!skip_entry && !appctx->ctx.table.entry->ref_cnt)
 					__stksess_kill(&appctx->ctx.table.proxy->table, old);
 				appctx->ctx.table.entry->ref_cnt++;
-				SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
+				HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
 				break;
 			}
 
@@ -3249,7 +3249,7 @@
 			else if (!skip_entry && !appctx->ctx.table.entry->ref_cnt)
 				__stksess_kill(&appctx->ctx.table.proxy->table, appctx->ctx.table.entry);
 
-			SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
+			HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
 
 			appctx->ctx.table.proxy = appctx->ctx.table.proxy->next;
 			appctx->st2 = STAT_ST_INFO;
diff --git a/src/stream.c b/src/stream.c
index 12545fb..2d4f78a 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -253,9 +253,9 @@
 	s->txn = NULL;
 	s->hlua = NULL;
 
-	SPIN_LOCK(STRMS_LOCK, &streams_lock);
+	HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
 	LIST_ADDQ(&streams, &s->list);
-	SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
+	HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
 
 	if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
 		goto out_fail_accept;
@@ -326,10 +326,10 @@
 
 	/* We may still be present in the buffer wait queue */
 	if (!LIST_ISEMPTY(&s->buffer_wait.list)) {
-		SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		LIST_DEL(&s->buffer_wait.list);
 		LIST_INIT(&s->buffer_wait.list);
-		SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 	}
 	if (s->req.buf->size || s->res.buf->size) {
 		b_drop(&s->req.buf);
@@ -373,7 +373,7 @@
 
 	stream_store_counters(s);
 
-	SPIN_LOCK(STRMS_LOCK, &streams_lock);
+	HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
 	list_for_each_entry_safe(bref, back, &s->back_refs, users) {
 		/* we have to unlink all watchers. We must not relink them if
 		 * this stream was the last one in the list.
@@ -385,7 +385,7 @@
 		bref->ref = s->list.n;
 	}
 	LIST_DEL(&s->list);
-	SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
+	HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
 
 	si_release_endpoint(&s->si[1]);
 	si_release_endpoint(&s->si[0]);
@@ -423,18 +423,18 @@
 static int stream_alloc_work_buffer(struct stream *s)
 {
 	if (!LIST_ISEMPTY(&s->buffer_wait.list)) {
-		SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 		LIST_DEL(&s->buffer_wait.list);
 		LIST_INIT(&s->buffer_wait.list);
-		SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 	}
 
 	if (b_alloc_margin(&s->res.buf, 0))
 		return 1;
 
-	SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+	HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 	LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
-	SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
+	HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
 	return 0;
 }
 
@@ -468,7 +468,7 @@
 int init_stream()
 {
 	LIST_INIT(&streams);
-	SPIN_INIT(&streams_lock);
+	HA_SPIN_INIT(&streams_lock);
 	pool2_stream = create_pool("stream", sizeof(struct stream), MEM_F_SHARED);
 	return pool2_stream != NULL;
 }
@@ -504,7 +504,7 @@
 					continue;
 			}
 
-			RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 			ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_IN_CNT);
 			if (ptr1)
 				stktable_data_cast(ptr1, bytes_in_cnt) += bytes;
@@ -513,7 +513,7 @@
 			if (ptr2)
 				update_freq_ctr_period(&stktable_data_cast(ptr2, bytes_in_rate),
 						       stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u, bytes);
-			RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 
 			/* If data was modified, we need to touch to re-schedule sync */
 			if (ptr1 || ptr2)
@@ -544,7 +544,7 @@
 					continue;
 			}
 
-			RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 			ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_OUT_CNT);
 			if (ptr1)
 				stktable_data_cast(ptr1, bytes_out_cnt) += bytes;
@@ -553,7 +553,7 @@
 			if (ptr2)
 				update_freq_ctr_period(&stktable_data_cast(ptr2, bytes_out_rate),
 						       stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u, bytes);
-			RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+			HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 
 			/* If data was modified, we need to touch to re-schedule sync */
 			if (ptr1 || ptr2)
@@ -1409,10 +1409,10 @@
 						void *ptr;
 
 						/* srv found in table */
-						RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
+						HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
 						ptr = stktable_data_ptr(rule->table.t, ts, STKTABLE_DT_SERVER_ID);
 						node = eb32_lookup(&px->conf.used_server_id, stktable_data_cast(ptr, server_id));
-						RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
+						HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
 						if (node) {
 							struct server *srv;
 
@@ -1536,10 +1536,10 @@
 		}
 		s->store[i].ts = NULL;
 
-		RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
 		ptr = stktable_data_ptr(s->store[i].table, ts, STKTABLE_DT_SERVER_ID);
 		stktable_data_cast(ptr, server_id) = objt_server(s->target)->puid;
-		RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+		HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 		stktable_touch_local(s->store[i].table, ts, 1);
 	}
 	s->store_count = 0; /* everything is stored */
@@ -2536,12 +2536,12 @@
 		swrate_add(&srv->counters.d_time, TIME_STATS_SAMPLES, t_data);
 		swrate_add(&srv->counters.t_time, TIME_STATS_SAMPLES, t_close);
 	}
-	SPIN_LOCK(PROXY_LOCK, &s->be->lock);
+	HA_SPIN_LOCK(PROXY_LOCK, &s->be->lock);
 	swrate_add(&s->be->be_counters.q_time, TIME_STATS_SAMPLES, t_queue);
 	swrate_add(&s->be->be_counters.c_time, TIME_STATS_SAMPLES, t_connect);
 	swrate_add(&s->be->be_counters.d_time, TIME_STATS_SAMPLES, t_data);
 	swrate_add(&s->be->be_counters.t_time, TIME_STATS_SAMPLES, t_close);
-	SPIN_UNLOCK(PROXY_LOCK, &s->be->lock);
+	HA_SPIN_UNLOCK(PROXY_LOCK, &s->be->lock);
 }
 
 /*
@@ -3056,14 +3056,14 @@
 		 * pointer points back to the head of the streams list.
 		 */
 		LIST_INIT(&appctx->ctx.sess.bref.users);
-		SPIN_LOCK(STRMS_LOCK, &streams_lock);
+		HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
 		appctx->ctx.sess.bref.ref = streams.n;
-		SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
+		HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
 		appctx->st2 = STAT_ST_LIST;
 		/* fall through */
 
 	case STAT_ST_LIST:
-		SPIN_LOCK(STRMS_LOCK, &streams_lock);
+		HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
 		/* first, let's detach the back-ref from a possible previous stream */
 		if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users)) {
 			LIST_DEL(&appctx->ctx.sess.bref.users);
@@ -3084,7 +3084,7 @@
 				LIST_ADDQ(&curr_strm->back_refs, &appctx->ctx.sess.bref.users);
 				/* call the proper dump() function and return if we're missing space */
 				if (!stats_dump_full_strm_to_buffer(si, curr_strm)) {
-					SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
+					HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
 					return 0;
 				}
 
@@ -3212,7 +3212,7 @@
 				 */
 				si_applet_cant_put(si);
 				LIST_ADDQ(&curr_strm->back_refs, &appctx->ctx.sess.bref.users);
-				SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
+				HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
 				return 0;
 			}
 
@@ -3229,17 +3229,17 @@
 
 			if (ci_putchk(si_ic(si), &trash) == -1) {
 				si_applet_cant_put(si);
-				SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
+				HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
 				return 0;
 			}
 
 			appctx->ctx.sess.target = NULL;
 			appctx->ctx.sess.uid = 0;
-			SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
+			HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
 			return 1;
 		}
 
-		SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
+		HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
 		appctx->st2 = STAT_ST_FIN;
 		/* fall through */
 
@@ -3252,10 +3252,10 @@
 static void cli_release_show_sess(struct appctx *appctx)
 {
 	if (appctx->st2 == STAT_ST_LIST) {
-		SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
+		HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
 		if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users))
 			LIST_DEL(&appctx->ctx.sess.bref.users);
-		SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
+		HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
 	}
 }
 
@@ -3308,11 +3308,11 @@
 		return 1;
 
 	/* kill all the stream that are on this server */
-	SPIN_LOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
 	list_for_each_entry_safe(strm, strm_bck, &sv->actconns, by_srv)
 		if (strm->srv_conn == sv)
 			stream_shutdown(strm, SF_ERR_KILLED);
-	SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+	HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
 	return 1;
 }
 
diff --git a/src/task.c b/src/task.c
index a466de6..4555f2f 100644
--- a/src/task.c
+++ b/src/task.c
@@ -121,7 +121,7 @@
 	int ret = TICK_ETERNITY;
 
 	while (1) {
-		SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
+		HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
   lookup_next:
 		eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
 		if (!eb) {
@@ -162,11 +162,11 @@
 				__task_queue(task);
 			goto lookup_next;
 		}
-		SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
+		HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
 		task_wakeup(task, TASK_WOKEN_TIMER);
 	}
 
-	SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
+	HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
 	return ret;
 }
 
@@ -251,7 +251,7 @@
 		return;
 	}
 
-	SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
+	HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
 	rq_next = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
 
 	do {
@@ -289,7 +289,7 @@
 		if (!local_tasks_count)
 			break;
 
-		SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+		HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
 
 		final_tasks_count = 0;
 		for (i = 0; i < local_tasks_count ; i++) {
@@ -305,7 +305,7 @@
 				local_tasks[final_tasks_count++] = t;
 		}
 
-		SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
+		HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
 		for (i = 0; i < final_tasks_count ; i++) {
 			t = local_tasks[i];
 			t->state &= ~TASK_RUNNING;
@@ -321,7 +321,7 @@
 		}
 	} while (max_processed > 0);
 
-	SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+	HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
 }
 
 /* perform minimal intializations, report 0 in case of error, 1 if OK. */
@@ -329,8 +329,8 @@
 {
 	memset(&timers, 0, sizeof(timers));
 	memset(&rqueue, 0, sizeof(rqueue));
-	SPIN_INIT(&wq_lock);
-	SPIN_INIT(&rq_lock);
+	HA_SPIN_INIT(&wq_lock);
+	HA_SPIN_INIT(&rq_lock);
 	pool2_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
 	if (!pool2_task)
 		return 0;
diff --git a/src/vars.c b/src/vars.c
index 6e8e256..fef3685 100644
--- a/src/vars.c
+++ b/src/vars.c
@@ -118,11 +118,11 @@
 	struct var *var, *tmp;
 	unsigned int size = 0;
 
-	RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
+	HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
 	list_for_each_entry_safe(var, tmp, &vars->head, l) {
 		size += var_clear(var);
 	}
-	RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
+	HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
 	var_accounting_diff(vars, sess, strm, -size);
 }
 
@@ -134,11 +134,11 @@
 	struct var *var, *tmp;
 	unsigned int size = 0;
 
-	RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
+	HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
 	list_for_each_entry_safe(var, tmp, &vars->head, l) {
 		size += var_clear(var);
 	}
-	RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
+	HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
 
 	HA_ATOMIC_SUB(&vars->size, size);
 	HA_ATOMIC_SUB(&global.vars.size, size);
@@ -151,7 +151,7 @@
 	LIST_INIT(&vars->head);
 	vars->scope = scope;
 	vars->size = 0;
-	RWLOCK_INIT(&vars->rwlock);
+	HA_RWLOCK_INIT(&vars->rwlock);
 }
 
 /* This function declares a new variable name. It returns a pointer
@@ -214,9 +214,9 @@
 	}
 
 	if (alloc)
-		RWLOCK_WRLOCK(VARS_LOCK, &var_names_rwlock);
+		HA_RWLOCK_WRLOCK(VARS_LOCK, &var_names_rwlock);
 	else
-		RWLOCK_RDLOCK(VARS_LOCK, &var_names_rwlock);
+		HA_RWLOCK_RDLOCK(VARS_LOCK, &var_names_rwlock);
 
 
 	/* Look for existing variable name. */
@@ -263,9 +263,9 @@
 
   end:
 	if (alloc)
-		RWLOCK_WRUNLOCK(VARS_LOCK, &var_names_rwlock);
+		HA_RWLOCK_WRUNLOCK(VARS_LOCK, &var_names_rwlock);
 	else
-		RWLOCK_RDUNLOCK(VARS_LOCK, &var_names_rwlock);
+		HA_RWLOCK_RDUNLOCK(VARS_LOCK, &var_names_rwlock);
 
 	return res;
 }
@@ -312,12 +312,12 @@
 	if (vars->scope != var_desc->scope)
 		return 0;
 
-	RWLOCK_RDLOCK(VARS_LOCK, &vars->rwlock);
+	HA_RWLOCK_RDLOCK(VARS_LOCK, &vars->rwlock);
 	var = var_get(vars, var_desc->name);
 
 	/* check for the variable avalaibility */
 	if (!var) {
-		RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock);
+		HA_RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock);
 		return 0;
 	}
 
@@ -327,7 +327,7 @@
 	smp_dup(smp);
 	smp->flags |= SMP_F_CONST;
 
-	RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock);
+	HA_RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock);
 	return 1;
 }
 
@@ -438,9 +438,9 @@
 	if (vars->scope != scope)
 		return 0;
 
-	RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
+	HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
 	ret = sample_store(vars, name, smp);
-	RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
+	HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
 	return ret;
 }
 
@@ -463,13 +463,13 @@
 		return 0;
 
 	/* Look for existing variable name. */
-	RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
+	HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
 	var = var_get(vars, name);
 	if (var) {
 		size = var_clear(var);
 		var_accounting_diff(vars, smp->sess, smp->strm, -size);
 	}
-	RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
+	HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
 	return 1;
 }
 
@@ -914,5 +914,5 @@
 	http_res_keywords_register(&http_res_kws);
 	cfg_register_keywords(&cfg_kws);
 
-	RWLOCK_INIT(&var_names_rwlock);
+	HA_RWLOCK_INIT(&var_names_rwlock);
 }