MINOR: pools: factor the release code into pool_put_to_os()
There are two levels of freeing to the OS:
- code that wants to keep the pool's usage counters updated uses
pool_free_area() and handles the counters itself. That's what
pool_put_to_shared_cache() does in the no-global-pools case.
- code that does not want to update the counters because they were
already updated only calls pool_free_area().
Let's extract these calls to establish the symmetry with pool_get_from_os()
and pool_alloc_nocache(), resulting in pool_put_to_os() (which only updates
the allocated counter) and pool_free_nocache() (which also updates the used
counter). This will later allow to simplify the generic code.
diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h
index 8fce593..c1faa27 100644
--- a/include/haproxy/pool.h
+++ b/include/haproxy/pool.h
@@ -49,7 +49,9 @@
extern int mem_poison_byte;
void *pool_get_from_os(struct pool_head *pool);
+void pool_put_to_os(struct pool_head *pool, void *ptr);
void *pool_alloc_nocache(struct pool_head *pool);
+void pool_free_nocache(struct pool_head *pool, void *ptr);
void dump_pools_to_trash();
void dump_pools(void);
int pool_total_failures();
@@ -139,10 +141,7 @@
static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
{
- _HA_ATOMIC_DEC(&pool->used);
- _HA_ATOMIC_DEC(&pool->allocated);
- swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
- pool_free_area(ptr, pool->size + POOL_EXTRA);
+ pool_free_nocache(pool, ptr);
}
#elif defined(CONFIG_HAP_LOCKLESS_POOLS)
@@ -190,8 +189,7 @@
_HA_ATOMIC_DEC(&pool->used);
if (unlikely(pool_is_crowded(pool))) {
- pool_free_area(ptr, pool->size + POOL_EXTRA);
- _HA_ATOMIC_DEC(&pool->allocated);
+ pool_put_to_os(pool, ptr);
} else {
do {
*POOL_LINK(pool, ptr) = (void *)free_list;
@@ -258,8 +256,7 @@
if (ptr) {
/* still not freed */
- pool_free_area(ptr, pool->size + POOL_EXTRA);
- _HA_ATOMIC_DEC(&pool->allocated);
+ pool_put_to_os(pool, ptr);
}
swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
}
diff --git a/src/pool.c b/src/pool.c
index f980922..0956976 100644
--- a/src/pool.c
+++ b/src/pool.c
@@ -139,6 +139,15 @@
}
+/* Releases a pool item back to the operating system and atomically updates
+ * the allocation counter.
+ */
+void pool_put_to_os(struct pool_head *pool, void *ptr)
+{
+ pool_free_area(ptr, pool->size + POOL_EXTRA);
+ _HA_ATOMIC_DEC(&pool->allocated);
+}
+
#ifdef CONFIG_HAP_POOLS
/* Evicts some of the oldest objects from the local cache, pushing them to the
* global pool.
@@ -190,6 +199,17 @@
return ptr;
}
+/* Release a pool item back to the OS and keeps the pool's counters up to date.
+ * This is always defined even when pools are not enabled (their usage stats
+ * are maintained).
+ */
+void pool_free_nocache(struct pool_head *pool, void *ptr)
+{
+ _HA_ATOMIC_DEC(&pool->used);
+ swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
+ pool_put_to_os(pool, ptr);
+}
+
#if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
@@ -231,8 +251,7 @@
while (next) {
temp = next;
next = *POOL_LINK(pool, temp);
- pool_free_area(temp, pool->size + POOL_EXTRA);
- _HA_ATOMIC_DEC(&pool->allocated);
+ pool_put_to_os(pool, temp);
}
pool->free_list = next;
/* here, we should have pool->allocate == pool->used */
@@ -265,8 +284,7 @@
new.seq = cmp.seq + 1;
if (HA_ATOMIC_DWCAS(&entry->free_list, &cmp, &new) == 0)
continue;
- pool_free_area(cmp.free_list, entry->size + POOL_EXTRA);
- _HA_ATOMIC_DEC(&entry->allocated);
+ pool_put_to_os(entry, cmp.free_list);
}
}
@@ -299,8 +317,7 @@
}
pool->free_list = *POOL_LINK(pool, temp);
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
- pool_free_area(temp, pool->size + POOL_EXTRA);
- _HA_ATOMIC_DEC(&pool->allocated);
+ pool_put_to_os(pool, temp);
}
/* here, we should have pool->allocated == pool->used */
}
@@ -325,8 +342,7 @@
(int)(entry->allocated - entry->used) > (int)entry->minavail) {
temp = entry->free_list;
entry->free_list = *POOL_LINK(entry, temp);
- pool_free_area(temp, entry->size + POOL_EXTRA);
- _HA_ATOMIC_DEC(&entry->allocated);
+ pool_put_to_os(entry, temp);
}
}