BUG/MINOR: pools/threads: don't ignore DEBUG_UAF on double-word CAS capable archs

Since commit cf975d4 ("MINOR: pools/threads: Implement lockless memory
pools."), we support lockless pools. However the parts dedicated to
detecting use-after-free are not present in this part, making DEBUG_UAF
useless in this situation.

The present patch sets a new define CONFIG_HAP_LOCKLESS_POOLS when such
a compatible architecture is detected, and when pool debugging is not
requested, then makes use of this everywhere in pools and buffers
functions. This way enabling DEBUG_UAF will automatically disable the
lockless version.

No backport is needed as this is purely 1.9-dev.
diff --git a/include/common/buffer.h b/include/common/buffer.h
index 20070cc..c920298 100644
--- a/include/common/buffer.h
+++ b/include/common/buffer.h
@@ -735,7 +735,7 @@
 		return *buf;
 
 	*buf = &buf_wanted;
-#ifndef HA_HAVE_CAS_DW
+#ifndef CONFIG_HAP_LOCKLESS_POOLS
 	HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
 #endif
 
@@ -743,7 +743,7 @@
 	if ((pool_head_buffer->allocated - pool_head_buffer->used) > margin) {
 		b = __pool_get_first(pool_head_buffer);
 		if (likely(b)) {
-#ifndef HA_HAVE_CAS_DW
+#ifndef CONFIG_HAP_LOCKLESS_POOLS
 			HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
 #endif
 			b->size = pool_head_buffer->size - sizeof(struct buffer);
@@ -756,7 +756,7 @@
 	/* slow path, uses malloc() */
 	b = __pool_refill_alloc(pool_head_buffer, margin);
 
-#ifndef HA_HAVE_CAS_DW
+#ifndef CONFIG_HAP_LOCKLESS_POOLS
 	HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
 #endif
 
diff --git a/include/common/config.h b/include/common/config.h
index f06ba3b..acd6b20 100644
--- a/include/common/config.h
+++ b/include/common/config.h
@@ -47,6 +47,13 @@
 #define THREAD_LOCAL
 #endif
 
+/* On architectures supporting threads and double-word CAS, we can implement
+ * lock-less memory pools. This isn't supported for debugging modes however.
+ */
+#if !defined(DEBUG_NO_LOCKLESS_POOLS) && defined(USE_THREAD) && defined(HA_HAVE_CAS_DW) && !defined(DEBUG_UAF)
+#define CONFIG_HAP_LOCKLESS_POOLS
+#endif
+
 /* CONFIG_HAP_INLINE_FD_SET
  * This makes use of inline FD_* macros instead of calling equivalent
  * functions. Benchmarks on a Pentium-M show that using functions is
diff --git a/include/common/memory.h b/include/common/memory.h
index 52ca88e..bf77f95 100644
--- a/include/common/memory.h
+++ b/include/common/memory.h
@@ -48,7 +48,7 @@
 #define POOL_LINK(pool, item) ((void **)(item))
 #endif
 
-#ifdef HA_HAVE_CAS_DW
+#ifdef CONFIG_HAP_LOCKLESS_POOLS
 struct pool_free_list {
 	void **free_list;
 	uintptr_t seq;
@@ -57,7 +57,7 @@
 
 struct pool_head {
 	void **free_list;
-#ifdef HA_HAVE_CAS_DW
+#ifdef CONFIG_HAP_LOCKLESS_POOLS
 	uintptr_t seq;
 #else
 	__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
@@ -123,7 +123,7 @@
  */
 void *pool_destroy(struct pool_head *pool);
 
-#ifdef HA_HAVE_CAS_DW
+#ifdef CONFIG_HAP_LOCKLESS_POOLS
 /*
  * Returns a pointer to type <type> taken from the pool <pool_type> if
  * available, otherwise returns NULL. No malloc() is attempted, and poisonning
@@ -226,7 +226,7 @@
 	}
 }
 
-#else
+#else /* CONFIG_HAP_LOCKLESS_POOLS */
 /*
  * Returns a pointer to type <type> taken from the pool <pool_type> if
  * available, otherwise returns NULL. No malloc() is attempted, and poisonning
@@ -377,7 +377,7 @@
 		HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
 	}
 }
-#endif /* HA_HAVE_CAS_DW */
+#endif /* CONFIG_HAP_LOCKLESS_POOLS */
 #endif /* _COMMON_MEMORY_H */
 
 /*
diff --git a/src/memory.c b/src/memory.c
index 929a04a..2d1a5e7 100644
--- a/src/memory.c
+++ b/src/memory.c
@@ -93,13 +93,13 @@
 		LIST_ADDQ(start, &pool->list);
 	}
 	pool->users++;
-#ifndef HA_HAVE_CAS_DW
+#ifndef CONFIG_HAP_LOCKLESS_POOLS
 	HA_SPIN_INIT(&pool->lock);
 #endif
 	return pool;
 }
 
-#ifdef HA_HAVE_CAS_DW
+#ifdef CONFIG_HAP_LOCKLESS_POOLS
 /* Allocates new entries for pool <pool> until there are at least <avail> + 1
  * available, then returns the last one for immediate use, so that at least
  * <avail> are left available in the pool upon return. NULL is returned if the
@@ -221,7 +221,7 @@
 
 	HA_ATOMIC_STORE(&recurse, 0);
 }
-#else
+#else /* CONFIG_HAP_LOCKLESS_POOLS */
 
 /* Allocates new entries for pool <pool> until there are at least <avail> + 1
  * available, then returns the last one for immediate use, so that at least
@@ -352,7 +352,7 @@
 		pool->users--;
 		if (!pool->users) {
 			LIST_DEL(&pool->list);
-#ifndef HA_HAVE_CAS_DW
+#ifndef CONFIG_HAP_LOCKLESS_POOLS
 			HA_SPIN_DESTROY(&pool->lock);
 #endif
 			free(pool);
@@ -371,7 +371,7 @@
 	allocated = used = nbpools = 0;
 	chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
 	list_for_each_entry(entry, &pools, list) {
-#ifndef HA_HAVE_CAS_DW
+#ifndef CONFIG_HAP_LOCKLESS_POOLS
 		HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
 #endif
 		chunk_appendf(&trash, "  - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n",
@@ -382,7 +382,7 @@
 		allocated += entry->allocated * entry->size;
 		used += entry->used * entry->size;
 		nbpools++;
-#ifndef HA_HAVE_CAS_DW
+#ifndef CONFIG_HAP_LOCKLESS_POOLS
 		HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
 #endif
 	}