MINOR: memory: Change the flush_lock to a spinlock, and don't get it in alloc.

The flush_lock was introduced, mostly to be sure that pool_gc() will never
dereference a pointer that has been free'd. __pool_get_first() was acquiring
the lock to, the fear was that otherwise that pointer could get free'd later,
and then pool_gc() would attempt to dereference it. However, that can not
happen, because the only functions that can free a pointer, when using
lockless pools, are pool_gc() and pool_flush(), and as long as those two
are mutually exclusive, nobody will be able to free the pointer while
pool_gc() attempts to access it.
So change the flush_lock to a spinlock, and don't bother acquire/release
it in __pool_get_first(), that way callers of __pool_get_first() won't have
to wait while the pool is flushed. The worst that can happen is we call
__pool_refill_alloc() while the pool is getting flushed, and memory can
get allocated just to be free'd.

This may help with github issue #552

This may be backported to 2.1, 2.0 and 1.9.

(cherry picked from commit 899fb8abdcda90e3b3c69f6945f83c4c1107c459)
Signed-off-by: Willy Tarreau <w@1wt.eu>
(cherry picked from commit 635c9a85464dacacae6ac20d4b8082288f7fbbd1)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/memory.c b/src/memory.c
index fc16339..bd2c1ba 100644
--- a/src/memory.c
+++ b/src/memory.c
@@ -142,7 +142,7 @@
 #ifndef CONFIG_HAP_LOCKLESS_POOLS
 		HA_SPIN_INIT(&pool->lock);
 #else
-		HA_RWLOCK_INIT(&pool->flush_lock);
+		HA_SPIN_INIT(&pool->flush_lock);
 #endif
 	}
 	pool->users++;
@@ -225,7 +225,7 @@
 
 	if (!pool)
 		return;
-	HA_RWLOCK_WRLOCK(POOL_LOCK, &pool->flush_lock);
+	HA_SPIN_LOCK(POOL_LOCK, &pool->flush_lock);
 	do {
 		cmp.free_list = pool->free_list;
 		cmp.seq = pool->seq;
@@ -233,7 +233,7 @@
 		new.seq = cmp.seq + 1;
 	} while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
 	__ha_barrier_atomic_store();
-	HA_RWLOCK_WRUNLOCK(POOL_LOCK, &pool->flush_lock);
+	HA_SPIN_UNLOCK(POOL_LOCK, &pool->flush_lock);
 	next = cmp.free_list;
 	while (next) {
 		temp = next;
@@ -263,7 +263,7 @@
 		return;
 
 	list_for_each_entry(entry, &pools, list) {
-		HA_RWLOCK_WRLOCK(POOL_LOCK, &entry->flush_lock);
+		HA_SPIN_LOCK(POOL_LOCK, &entry->flush_lock);
 		while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
 			struct pool_free_list cmp, new;
 
@@ -280,7 +280,7 @@
 			free(cmp.free_list);
 			_HA_ATOMIC_SUB(&entry->allocated, 1);
 		}
-		HA_RWLOCK_WRUNLOCK(POOL_LOCK, &entry->flush_lock);
+		HA_SPIN_UNLOCK(POOL_LOCK, &entry->flush_lock);
 	}
 
 	_HA_ATOMIC_STORE(&recurse, 0);