MINOR: pools: split pool_free() in the lockfree variant

This separates the validity tests from the code committing the object
to the pool, in order to ease insertion of the thread-local cache.
diff --git a/include/common/memory.h b/include/common/memory.h
index cf86969..af17649 100644
--- a/include/common/memory.h
+++ b/include/common/memory.h
@@ -215,6 +215,21 @@
 	return p;
 }
 
+/* Locklessly add item <ptr> to pool <pool>, then update the pool used count.
+ * Both the pool and the pointer must be valid. Use pool_free() for normal
+ * operations.
+ */
+static inline void __pool_free(struct pool_head *pool, void *ptr)
+{
+	void *free_list = pool->free_list;
+
+	do {
+		*POOL_LINK(pool, ptr) = (void *)free_list;
+		__ha_barrier_store();
+	} while (!HA_ATOMIC_CAS(&pool->free_list, (void *)&free_list, ptr));
+	HA_ATOMIC_SUB(&pool->used, 1);
+}
+
 /*
  * Puts a memory area back to the corresponding pool.
  * Items are chained directly through a pointer that
@@ -227,19 +242,12 @@
 static inline void pool_free(struct pool_head *pool, void *ptr)
 {
         if (likely(ptr != NULL)) {
-		void *free_list;
 #ifdef DEBUG_MEMORY_POOLS
 		/* we'll get late corruption if we refill to the wrong pool or double-free */
 		if (*POOL_LINK(pool, ptr) != (void *)pool)
 			*(volatile int *)0 = 0;
 #endif
-		free_list = pool->free_list;
-		do {
-			*POOL_LINK(pool, ptr) = (void *)free_list;
-			__ha_barrier_store();
-		} while (!HA_ATOMIC_CAS(&pool->free_list, (void *)&free_list, ptr));
-
-		HA_ATOMIC_SUB(&pool->used, 1);
+		__pool_free(pool, ptr);
 	}
 }