MINOR: pools: add a new debugging flag POOL_DBG_INTEGRITY
The test to decide whether or not to enforce integrity checks on cached
objects is now enabled at runtime and conditionned by this new debugging
flag. While previously it was not a concern to inflate the code size by
keeping the two functions static, they were moved to pool.c to limit the
impact. In pool_get_from_cache(), the fast code path remains fast by
having both flags tested at once to open a slower branch when either
POOL_DBG_COLD_FIRST or POOL_DBG_INTEGRITY are set.
diff --git a/src/pool.c b/src/pool.c
index 87ab4b6..4ba7ea7 100644
--- a/src/pool.c
+++ b/src/pool.c
@@ -47,6 +47,9 @@
#ifdef DEBUG_POOL_INTEGRITY
POOL_DBG_COLD_FIRST |
#endif
+#ifdef DEBUG_POOL_INTEGRITY
+ POOL_DBG_INTEGRITY |
+#endif
0;
static int mem_fail_rate __read_mostly = 0;
@@ -332,6 +335,50 @@
#ifdef CONFIG_HAP_POOLS
+/* Updates <pch>'s fill_pattern and fills the free area after <item> with it,
+ * up to <size> bytes. The item part is left untouched.
+ */
+void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+ ulong *ptr = (ulong *)item;
+ uint ofs;
+ ulong u;
+
+ if (size <= sizeof(*item))
+ return;
+
+ /* Upgrade the fill_pattern to change about half of the bits
+ * (to be sure to catch static flag corruption), and apply it.
+ */
+ u = pch->fill_pattern += ~0UL / 3; // 0x55...55
+ ofs = sizeof(*item) / sizeof(*ptr);
+ while (ofs < size / sizeof(*ptr))
+ ptr[ofs++] = u;
+}
+
+/* check for a pool_cache_item integrity after extracting it from the cache. It
+ * must have been previously initialized using pool_fill_pattern(). If any
+ * corruption is detected, the function provokes an immediate crash.
+ */
+void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+ const ulong *ptr = (const ulong *)item;
+ uint ofs;
+ ulong u;
+
+ if (size <= sizeof(*item))
+ return;
+
+ /* let's check that all words past *item are equal */
+ ofs = sizeof(*item) / sizeof(*ptr);
+ u = ptr[ofs++];
+ while (ofs < size / sizeof(*ptr)) {
+ if (unlikely(ptr[ofs] != u))
+ ABORT_NOW();
+ ofs++;
+ }
+}
+
/* removes up to <count> items from the end of the local pool cache <ph> for
* pool <pool>. The shared pool is refilled with these objects in the limit
* of the number of acceptable objects, and the rest will be released to the
@@ -351,7 +398,8 @@
while (released < count && !LIST_ISEMPTY(&ph->list)) {
item = LIST_PREV(&ph->list, typeof(item), by_pool);
BUG_ON(&item->by_pool == &ph->list);
- pool_check_pattern(ph, item, pool->size);
+ if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+ pool_check_pattern(ph, item, pool->size);
LIST_DELETE(&item->by_pool);
LIST_DELETE(&item->by_lru);
@@ -440,7 +488,8 @@
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
POOL_DEBUG_TRACE_CALLER(pool, item, caller);
ph->count++;
- pool_fill_pattern(ph, item, pool->size);
+ if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+ pool_fill_pattern(ph, item, pool->size);
pool_cache_count++;
pool_cache_bytes += pool->size;
@@ -510,7 +559,8 @@
LIST_INSERT(&pch->list, &item->by_pool);
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
count++;
- pool_fill_pattern(pch, item, pool->size);
+ if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+ pool_fill_pattern(pch, item, pool->size);
}
HA_ATOMIC_ADD(&pool->used, count);
pch->count += count;