MINOR: pool: check for pool's fullness outside of pool_put_to_shared_cache()

Instead of letting pool_put_to_shared_cache() pass the object to the
underlying OS layer when there's no more room, let's have the caller
check if the pool is full and either call pool_put_to_shared_cache()
or call pool_free_nocache().

Doing this sensibly simplifies the code as this function now only has
to deal with a pool and an item and only for cases where there are
local caches and shared caches. As the code was simplified and the
calls more isolated, the function was moved to pool.c.

Note that it's only called from pool_evict_from_local_cache{,s}() and
that a part of its logic might very well move there when dealing with
batches.
diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h
index 1f9ca21..ec73cbc 100644
--- a/include/haproxy/pool.h
+++ b/include/haproxy/pool.h
@@ -123,14 +123,15 @@
 	/* ignored without shared pools */
 }
 
-static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
+static inline void pool_put_to_shared_cache(struct pool_head *pool, void *item)
 {
-	pool_free_nocache(pool, ptr);
+	/* ignored without shared pools */
 }
 
 #else /* CONFIG_HAP_NO_GLOBAL_POOLS */
 
 void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch);
+void pool_put_to_shared_cache(struct pool_head *pool, void *item);
 
 /* returns true if the pool is considered to have too many free objects */
 static inline int pool_is_crowded(const struct pool_head *pool)
@@ -139,32 +140,6 @@
 	       (int)(pool->allocated - pool->used) >= pool->minavail;
 }
 
-/* Locklessly add item <ptr> to pool <pool>, then update the pool used count.
- * Both the pool and the pointer must be valid. Use pool_free() for normal
- * operations.
- */
-static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
-{
-	void **free_list;
-
-	if (unlikely(pool_is_crowded(pool))) {
-		pool_free_nocache(pool, ptr);
-		return;
-	}
-
-	_HA_ATOMIC_DEC(&pool->used);
-	free_list = _HA_ATOMIC_LOAD(&pool->free_list);
-	do {
-		while (unlikely(free_list == POOL_BUSY)) {
-			__ha_cpu_relax();
-			free_list = _HA_ATOMIC_LOAD(&pool->free_list);
-		}
-		_HA_ATOMIC_STORE((void **)ptr, (void *)free_list);
-		__ha_barrier_atomic_store();
-	} while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
-	__ha_barrier_atomic_store();
-	swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
-}
 
 #endif /* CONFIG_HAP_NO_GLOBAL_POOLS */
 
diff --git a/src/pool.c b/src/pool.c
index 45241fe..7b7dd99 100644
--- a/src/pool.c
+++ b/src/pool.c
@@ -320,7 +320,11 @@
 		pool_cache_count--;
 		LIST_DELETE(&item->by_pool);
 		LIST_DELETE(&item->by_lru);
-		pool_put_to_shared_cache(pool, item);
+
+		if (unlikely(pool_is_crowded(pool)))
+			pool_free_nocache(pool, item);
+		else
+			pool_put_to_shared_cache(pool, item);
 	}
 }
 
@@ -345,7 +349,10 @@
 		ph->count--;
 		pool_cache_count--;
 		pool_cache_bytes -= pool->size;
-		pool_put_to_shared_cache(pool, item);
+		if (unlikely(pool_is_crowded(pool)))
+			pool_free_nocache(pool, item);
+		else
+			pool_put_to_shared_cache(pool, item);
 	} while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
 }
 
@@ -434,6 +441,29 @@
 	pool_cache_bytes += pool->size;
 }
 
+/* Adds cache item entry <item> to the shared cache. The caller is advised to
+ * first check using pool_is_crowded() if it's wise to add this object there.
+ * Both the pool and the item must be valid. Use pool_free() for normal
+ * operations.
+ */
+void pool_put_to_shared_cache(struct pool_head *pool, void *item)
+{
+	void **free_list;
+
+	_HA_ATOMIC_DEC(&pool->used);
+	free_list = _HA_ATOMIC_LOAD(&pool->free_list);
+	do {
+		while (unlikely(free_list == POOL_BUSY)) {
+			__ha_cpu_relax();
+			free_list = _HA_ATOMIC_LOAD(&pool->free_list);
+		}
+		_HA_ATOMIC_STORE((void **)item, (void *)free_list);
+		__ha_barrier_atomic_store();
+	} while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, item));
+	__ha_barrier_atomic_store();
+	swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
+}
+
 /*
  * This function frees whatever can be freed in pool <pool>.
  */