MINOR: pools: add a new debugging flag POOL_DBG_INTEGRITY

The test to decide whether or not to enforce integrity checks on cached
objects is now enabled at runtime and conditionned by this new debugging
flag. While previously it was not a concern to inflate the code size by
keeping the two functions static, they were moved to pool.c to limit the
impact. In pool_get_from_cache(), the fast code path remains fast by
having both flags tested at once to open a slower branch when either
POOL_DBG_COLD_FIRST or POOL_DBG_INTEGRITY are set.
diff --git a/include/haproxy/pool-t.h b/include/haproxy/pool-t.h
index a1a9f65..217b754 100644
--- a/include/haproxy/pool-t.h
+++ b/include/haproxy/pool-t.h
@@ -44,6 +44,7 @@
 #define POOL_DBG_FAIL_ALLOC 0x00000001  // randomly fail memory allocations
 #define POOL_DBG_DONT_MERGE 0x00000002  // do not merge same-size pools
 #define POOL_DBG_COLD_FIRST 0x00000004  // pick cold objects first
+#define POOL_DBG_INTEGRITY  0x00000008  // perform integrity checks on cache
 
 
 /* This is the head of a thread-local cache */
@@ -52,9 +53,7 @@
 	unsigned int count;  /* number of objects in this pool */
 	unsigned int tid;    /* thread id, for debugging only */
 	struct pool_head *pool; /* assigned pool, for debugging only */
-#if defined(DEBUG_POOL_INTEGRITY)
 	ulong fill_pattern;  /* pattern used to fill the area on free */
-#endif
 } THREAD_ALIGNED(64);
 
 /* This represents one item stored in the thread-local cache. <by_pool> links
diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h
index 1d22424..296747a 100644
--- a/include/haproxy/pool.h
+++ b/include/haproxy/pool.h
@@ -143,6 +143,8 @@
 void pool_evict_from_local_cache(struct pool_head *pool, int full);
 void pool_evict_from_local_caches(void);
 void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller);
+void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size);
+void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size);
 
 #if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
 
@@ -201,64 +203,6 @@
  * cache first, then from the second level if it exists.
  */
 
-#if defined(DEBUG_POOL_INTEGRITY)
-
-/* Updates <pch>'s fill_pattern and fills the free area after <item> with it,
- * up to <size> bytes. The item part is left untouched.
- */
-static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
-{
-	ulong *ptr = (ulong *)item;
-	uint ofs;
-	ulong u;
-
-	if (size <= sizeof(*item))
-		return;
-
-	/* Upgrade the fill_pattern to change about half of the bits
-	 * (to be sure to catch static flag corruption), and apply it.
-	 */
-	u = pch->fill_pattern += ~0UL / 3; // 0x55...55
-	ofs = sizeof(*item) / sizeof(*ptr);
-	while (ofs < size / sizeof(*ptr))
-		ptr[ofs++] = u;
-}
-
-/* check for a pool_cache_item integrity after extracting it from the cache. It
- * must have been previously initialized using pool_fill_pattern(). If any
- * corruption is detected, the function provokes an immediate crash.
- */
-static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
-{
-	const ulong *ptr = (const ulong *)item;
-	uint ofs;
-	ulong u;
-
-	if (size <= sizeof(*item))
-		return;
-
-	/* let's check that all words past *item are equal */
-	ofs = sizeof(*item) / sizeof(*ptr);
-	u = ptr[ofs++];
-	while (ofs < size / sizeof(*ptr)) {
-		if (unlikely(ptr[ofs] != u))
-			ABORT_NOW();
-		ofs++;
-	}
-}
-
-#else
-
-static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
-{
-}
-
-static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
-{
-}
-
-#endif
-
 /* Tries to retrieve an object from the local pool cache corresponding to pool
  * <pool>. If none is available, tries to allocate from the shared cache, and
  * returns NULL if nothing is available.
@@ -275,19 +219,21 @@
 			return NULL;
 	}
 
-	if (unlikely(pool_debugging & POOL_DBG_COLD_FIRST)) {
+	/* allocate hottest objects first */
+	item = LIST_NEXT(&ph->list, typeof(item), by_pool);
+
+	if (unlikely(pool_debugging & (POOL_DBG_COLD_FIRST|POOL_DBG_INTEGRITY))) {
 		/* allocate oldest objects first so as to keep them as long as possible
 		 * in the cache before being reused and maximizing the chance to detect
 		 * an overwrite.
 		 */
-		item = LIST_PREV(&ph->list, typeof(item), by_pool);
-	} else {
-		/* allocate hottest objects first */
-		item = LIST_NEXT(&ph->list, typeof(item), by_pool);
+		if (pool_debugging & POOL_DBG_COLD_FIRST)
+			item = LIST_PREV(&ph->list, typeof(item), by_pool);
+
+		if (pool_debugging & POOL_DBG_INTEGRITY)
+			pool_check_pattern(ph, item, pool->size);
 	}
-#if defined(DEBUG_POOL_INTEGRITY)
-	pool_check_pattern(ph, item, pool->size);
-#endif
+
 	BUG_ON(&item->by_pool == &ph->list);
 	LIST_DELETE(&item->by_pool);
 	LIST_DELETE(&item->by_lru);
diff --git a/src/pool.c b/src/pool.c
index 87ab4b6..4ba7ea7 100644
--- a/src/pool.c
+++ b/src/pool.c
@@ -47,6 +47,9 @@
 #ifdef DEBUG_POOL_INTEGRITY
 	POOL_DBG_COLD_FIRST |
 #endif
+#ifdef DEBUG_POOL_INTEGRITY
+	POOL_DBG_INTEGRITY  |
+#endif
 	0;
 
 static int mem_fail_rate __read_mostly = 0;
@@ -332,6 +335,50 @@
 
 #ifdef CONFIG_HAP_POOLS
 
+/* Updates <pch>'s fill_pattern and fills the free area after <item> with it,
+ * up to <size> bytes. The item part is left untouched.
+ */
+void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+	ulong *ptr = (ulong *)item;
+	uint ofs;
+	ulong u;
+
+	if (size <= sizeof(*item))
+		return;
+
+	/* Upgrade the fill_pattern to change about half of the bits
+	 * (to be sure to catch static flag corruption), and apply it.
+	 */
+	u = pch->fill_pattern += ~0UL / 3; // 0x55...55
+	ofs = sizeof(*item) / sizeof(*ptr);
+	while (ofs < size / sizeof(*ptr))
+		ptr[ofs++] = u;
+}
+
+/* check for a pool_cache_item integrity after extracting it from the cache. It
+ * must have been previously initialized using pool_fill_pattern(). If any
+ * corruption is detected, the function provokes an immediate crash.
+ */
+void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+	const ulong *ptr = (const ulong *)item;
+	uint ofs;
+	ulong u;
+
+	if (size <= sizeof(*item))
+		return;
+
+	/* let's check that all words past *item are equal */
+	ofs = sizeof(*item) / sizeof(*ptr);
+	u = ptr[ofs++];
+	while (ofs < size / sizeof(*ptr)) {
+		if (unlikely(ptr[ofs] != u))
+			ABORT_NOW();
+		ofs++;
+	}
+}
+
 /* removes up to <count> items from the end of the local pool cache <ph> for
  * pool <pool>. The shared pool is refilled with these objects in the limit
  * of the number of acceptable objects, and the rest will be released to the
@@ -351,7 +398,8 @@
 	while (released < count && !LIST_ISEMPTY(&ph->list)) {
 		item = LIST_PREV(&ph->list, typeof(item), by_pool);
 		BUG_ON(&item->by_pool == &ph->list);
-		pool_check_pattern(ph, item, pool->size);
+		if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+			pool_check_pattern(ph, item, pool->size);
 		LIST_DELETE(&item->by_pool);
 		LIST_DELETE(&item->by_lru);
 
@@ -440,7 +488,8 @@
 	LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
 	POOL_DEBUG_TRACE_CALLER(pool, item, caller);
 	ph->count++;
-	pool_fill_pattern(ph, item, pool->size);
+	if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+		pool_fill_pattern(ph, item, pool->size);
 	pool_cache_count++;
 	pool_cache_bytes += pool->size;
 
@@ -510,7 +559,8 @@
 		LIST_INSERT(&pch->list, &item->by_pool);
 		LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
 		count++;
-		pool_fill_pattern(pch, item, pool->size);
+		if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+			pool_fill_pattern(pch, item, pool->size);
 	}
 	HA_ATOMIC_ADD(&pool->used, count);
 	pch->count += count;