CLEANUP: pools: re-merge pool_refill_alloc() and __pool_refill_alloc()

They were strictly equivalent, let's remerge them and rename them to
pool_alloc_nocache() as it's the call which performs a real allocation
which does not check nor update the cache. The only difference in the
past was the former taking the lock and not the second but now the lock
is not needed anymore at this stage since the pool's list is not touched.

In addition, given that the "avail" argument is no longer used by the
function nor by its callers, let's drop it.
diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h
index d01c6d7..a62a93b 100644
--- a/include/haproxy/pool.h
+++ b/include/haproxy/pool.h
@@ -48,8 +48,7 @@
 /* poison each newly allocated area with this byte if >= 0 */
 extern int mem_poison_byte;
 
-void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail);
-void *pool_refill_alloc(struct pool_head *pool, unsigned int avail);
+void *pool_alloc_nocache(struct pool_head *pool);
 void dump_pools_to_trash();
 void dump_pools(void);
 int pool_total_failures();
@@ -279,7 +278,7 @@
 	HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
 #endif
 	if ((p = __pool_get_first(pool)) == NULL)
-		p = __pool_refill_alloc(pool, 0);
+		p = pool_alloc_nocache(pool);
 #if !defined(CONFIG_HAP_LOCKLESS_POOLS) && !defined(CONFIG_HAP_NO_GLOBAL_POOLS)
 	HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
 #endif
diff --git a/src/dynbuf.c b/src/dynbuf.c
index 84e1ca2..0dce210 100644
--- a/src/dynbuf.c
+++ b/src/dynbuf.c
@@ -49,7 +49,7 @@
 		pool_head_buffer->limit = global.tune.buf_limit;
 
 	for (done = 0; done < pool_head_buffer->minavail - 1; done++) {
-		buffer = pool_refill_alloc(pool_head_buffer, 1);
+		buffer = pool_alloc_nocache(pool_head_buffer);
 		if (!buffer)
 			return 0;
 		pool_free(pool_head_buffer, buffer);
diff --git a/src/pool.c b/src/pool.c
index cf1dd86..61b7a7a 100644
--- a/src/pool.c
+++ b/src/pool.c
@@ -146,9 +146,13 @@
 }
 #endif
 
-/* simply fall back on the default OS' allocator */
-
-void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
+/* Tries to allocate an object for the pool <pool> using the system's allocator
+ * and directly returns it. The pool's counters are updated but the object is
+ * never cached, so this is usable with and without local or shared caches.
+ * This may be called with or without the pool lock held, so it must not use
+ * the pool's lock.
+ */
+void *pool_alloc_nocache(struct pool_head *pool)
 {
 	int allocated = pool->allocated;
 	int limit = pool->limit;
@@ -182,14 +186,6 @@
 	return ptr;
 }
 
-/* legacy stuff */
-void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
-{
-	void *ptr;
-
-	ptr = __pool_refill_alloc(pool, avail);
-	return ptr;
-}
 
 #if defined(CONFIG_HAP_NO_GLOBAL_POOLS)