MEDIUM: pools: move the compat code from trim_all_pools() to malloc_trim()
We already have some generic code in trim_all_pools() to implement the
equivalent of malloc_trim() on jemalloc and macos. Instead of keeping the
logic there, let's just move it to our own malloc_trim() implementation
so that we can unify the mechanism and the logic. Now any low-level code
calling malloc_trim() will either be disabled by haproxy's config if the
user decides to, or will be mapped to the equivalent mechanism if malloc()
was intercepted by a preloaded jemalloc.
Trim_all_pools() preserves the benefit of serializing threads (which we
must not impose to other libs which could come with their own threads).
It means that our own code should mostly use trim_all_pools() instead of
calling malloc_trim() directly.
diff --git a/src/pool.c b/src/pool.c
index aff8581..5a515bc 100644
--- a/src/pool.c
+++ b/src/pool.c
@@ -120,44 +120,10 @@
{
int isolated = thread_isolated();
- if (disable_trim)
- return;
-
if (!isolated)
thread_isolate();
- if (my_mallctl) {
- unsigned int i, narenas = 0;
- size_t len = sizeof(narenas);
-
- if (my_mallctl("arenas.narenas", &narenas, &len, NULL, 0) == 0) {
- for (i = 0; i < narenas; i ++) {
- char mib[32] = {0};
- snprintf(mib, sizeof(mib), "arena.%u.purge", i);
- (void)my_mallctl(mib, NULL, NULL, NULL, 0);
- }
- }
- } else {
-#if defined(HA_HAVE_MALLOC_TRIM)
- if (using_default_allocator)
- malloc_trim(0);
-#elif defined(HA_HAVE_MALLOC_ZONE)
- if (using_default_allocator) {
- vm_address_t *zones;
- unsigned int i, nzones;
-
- if (malloc_get_all_zones(0, NULL, &zones, &nzones) == KERN_SUCCESS) {
- for (i = 0; i < nzones; i ++) {
- malloc_zone_t *zone = (malloc_zone_t *)zones[i];
-
- /* we cannot purge anonymous zones */
- if (zone->zone_name)
- malloc_zone_pressure_relief(zone, 0);
- }
- }
- }
-#endif
- }
+ malloc_trim(0);
if (!isolated)
thread_release();
@@ -230,10 +196,52 @@
if (disable_trim)
return ret;
+ if (my_mallctl) {
+ /* here we're on jemalloc and malloc_trim() is called either
+ * by haproxy or another dependency (the worst case that
+ * normally crashes). Instead of just failing, we can actually
+ * emulate it so let's do it now.
+ */
+ unsigned int i, narenas = 0;
+ size_t len = sizeof(narenas);
+
- if (_malloc_trim && using_default_allocator) {
+ if (my_mallctl("arenas.narenas", &narenas, &len, NULL, 0) == 0) {
+ for (i = 0; i < narenas; i ++) {
+ char mib[32] = {0};
+ snprintf(mib, sizeof(mib), "arena.%u.purge", i);
+ (void)my_mallctl(mib, NULL, NULL, NULL, 0);
+ ret = 1; // success
+ }
+ }
+ }
+ else if (!using_default_allocator) {
+ /* special allocators that can be LD_PRELOADed end here */
+ ret = 0; // did nothing
+ }
+ else if (_malloc_trim) {
/* we're typically on glibc and not overridden */
ret = _malloc_trim(pad);
}
+#if defined(HA_HAVE_MALLOC_ZONE)
+ else {
+ /* we're on MacOS, there's an equivalent mechanism */
+ vm_address_t *zones;
+ unsigned int i, nzones;
+
+ if (malloc_get_all_zones(0, NULL, &zones, &nzones) == KERN_SUCCESS) {
+ for (i = 0; i < nzones; i ++) {
+ malloc_zone_t *zone = (malloc_zone_t *)zones[i];
+
+ /* we cannot purge anonymous zones */
+ if (zone->zone_name) {
+ malloc_zone_pressure_relief(zone, 0);
+ ret = 1; // success
+ }
+ }
+ }
+ }
+#endif
+ /* here we have ret=0 if nothing was release, or 1 if some were */
return ret;
}