MINOR: pool: allocate from the shared cache through the local caches
One of the thread scaling challenges nowadays for the pools is the
contention on the shared caches. There's never any situation where we
have a shared cache and no local cache anymore, so we can technically
afford to transfer objects from the shared cache to the local cache
before returning them to the user via the regular path. This adds a
little bit more work per object per miss, but will permit batch
processing later.
This patch simply moves pool_get_from_shared_cache() to pool.c under
the new name pool_refill_local_from_shared(), and this function does
not return anything but it places the allocated object at the head of
the local cache.
diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h
index 896fb61..81b392f 100644
--- a/include/haproxy/pool.h
+++ b/include/haproxy/pool.h
@@ -120,13 +120,9 @@
#if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
-/* this is essentially used with local caches and a fast malloc library,
- * which may sometimes be faster than the local shared pools because it
- * will maintain its own per-thread arenas.
- */
-static inline void *pool_get_from_shared_cache(struct pool_head *pool)
+static inline void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch)
{
- return NULL;
+ /* ignored without shared pools */
}
static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
@@ -136,44 +132,7 @@
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
-/*
- * Returns a pointer to type <type> taken from the pool <pool_type> if
- * available, otherwise returns NULL. No malloc() is attempted, and poisonning
- * is never performed. The purpose is to get the fastest possible allocation.
- */
-static inline void *pool_get_from_shared_cache(struct pool_head *pool)
-{
- void *ret;
-
- /* we'll need to reference the first element to figure the next one. We
- * must temporarily lock it so that nobody allocates then releases it,
- * or the dereference could fail.
- */
- ret = pool->free_list;
- do {
- while (unlikely(ret == POOL_BUSY)) {
- __ha_cpu_relax();
- ret = _HA_ATOMIC_LOAD(&pool->free_list);
- }
- if (ret == NULL)
- return ret;
- } while (unlikely((ret = _HA_ATOMIC_XCHG(&pool->free_list, POOL_BUSY)) == POOL_BUSY));
-
- if (unlikely(ret == NULL)) {
- _HA_ATOMIC_STORE(&pool->free_list, NULL);
- goto out;
- }
-
- /* this releases the lock */
- _HA_ATOMIC_STORE(&pool->free_list, *(void **)ret);
- _HA_ATOMIC_INC(&pool->used);
-
- /* keep track of where the element was allocated from */
- POOL_DEBUG_SET_MARK(pool, ret);
- out:
- __ha_barrier_atomic_store();
- return ret;
-}
+void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch);
/* Locklessly add item <ptr> to pool <pool>, then update the pool used count.
* Both the pool and the pointer must be valid. Use pool_free() for normal
@@ -218,8 +177,11 @@
struct pool_cache_head *ph;
ph = &pool->cache[tid];
- if (LIST_ISEMPTY(&ph->list))
- return pool_get_from_shared_cache(pool);
+ if (unlikely(LIST_ISEMPTY(&ph->list))) {
+ pool_refill_local_from_shared(pool, ph);
+ if (LIST_ISEMPTY(&ph->list))
+ return NULL;
+ }
item = LIST_NEXT(&ph->list, typeof(item), by_pool);
ph->count--;
diff --git a/src/pool.c b/src/pool.c
index efebb4a..45241fe 100644
--- a/src/pool.c
+++ b/src/pool.c
@@ -388,6 +388,52 @@
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
+/* Tries to refill the local cache <pch> from the shared one for pool <pool>.
+ * This is only used when pools are in use and shared pools are enabled. No
+ * malloc() is attempted, and poisonning is never performed. The purpose is to
+ * get the fastest possible refilling so that the caller can easily check if
+ * the cache has enough objects for its use.
+ */
+void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch)
+{
+ struct pool_cache_item *item;
+ void *ret;
+
+ /* we'll need to reference the first element to figure the next one. We
+ * must temporarily lock it so that nobody allocates then releases it,
+ * or the dereference could fail.
+ */
+ ret = _HA_ATOMIC_LOAD(&pool->free_list);
+ do {
+ while (unlikely(ret == POOL_BUSY)) {
+ __ha_cpu_relax();
+ ret = _HA_ATOMIC_LOAD(&pool->free_list);
+ }
+ if (ret == NULL)
+ return;
+ } while (unlikely((ret = _HA_ATOMIC_XCHG(&pool->free_list, POOL_BUSY)) == POOL_BUSY));
+
+ if (unlikely(ret == NULL)) {
+ HA_ATOMIC_STORE(&pool->free_list, NULL);
+ return;
+ }
+
+ /* this releases the lock */
+ HA_ATOMIC_STORE(&pool->free_list, *(void **)ret);
+ HA_ATOMIC_INC(&pool->used);
+
+ /* keep track of where the element was allocated from */
+ POOL_DEBUG_SET_MARK(pool, ret);
+
+ /* now store the retrieved object into the local cache */
+ item = ret;
+ LIST_INSERT(&pch->list, &item->by_pool);
+ LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
+ pch->count++;
+ pool_cache_count++;
+ pool_cache_bytes += pool->size;
+}
+
/*
* This function frees whatever can be freed in pool <pool>.
*/