DEBUG: pools: add new build option DEBUG_POOL_INTEGRITY
When enabled, objects picked from the cache are checked for corruption
by comparing their contents against a pattern that was placed when they
were inserted into the cache. Objects are also allocated in the reverse
order, from the oldest one to the most recent, so as to maximize the
ability to detect such a corruption. The goal is to detect writes after
free (or possibly hardware memory corruptions). Contrary to DEBUG_UAF
this cannot detect reads after free, but may possibly detect later
corruptions and will not consume extra memory. The CPU usage will
increase a bit due to the cost of filling/checking the area and for the
preference for cold cache instead of hot cache, though not as much as
with DEBUG_UAF. This option is meant to be usable in production.
(cherry picked from commit 0575d8fd760c6cd1de3d6ed66599d685a03c1873)
[wt: adjusted slightly since there is no batch refilling in 2.5; dropped
the API doc parts; tested with/without option and works fine]
Signed-off-by: Willy Tarreau <w@1wt.eu>
(cherry picked from commit 722601212a6403fede5f55d65a3d082721bf1678)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/Makefile b/Makefile
index 6a165df..ccf6b01 100644
--- a/Makefile
+++ b/Makefile
@@ -236,7 +236,7 @@
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_NO_LOCKLESS_POOLS, DEBUG_FD,
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_NOCRASH, DEBUG_HPACK,
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
-# DEBUG_TASK, DEBUG_MEMORY_POOLS.
+# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_INTEGRITY.
DEBUG =
#### Trace options
diff --git a/include/haproxy/pool-t.h b/include/haproxy/pool-t.h
index ede1e0e..7a17aef 100644
--- a/include/haproxy/pool-t.h
+++ b/include/haproxy/pool-t.h
@@ -84,6 +84,9 @@
struct pool_cache_head {
struct list list; /* head of objects in this pool */
unsigned int count; /* number of objects in this pool */
+#if defined(DEBUG_POOL_INTEGRITY)
+ ulong fill_pattern; /* pattern used to fill the area on free */
+#endif
} THREAD_ALIGNED(64);
struct pool_cache_item {
diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h
index cbc5421..4d3bc7f 100644
--- a/include/haproxy/pool.h
+++ b/include/haproxy/pool.h
@@ -232,6 +232,64 @@
* cache first, then from the second level if it exists.
*/
+#if defined(DEBUG_POOL_INTEGRITY)
+
+/* Updates <pch>'s fill_pattern and fills the free area after <item> with it,
+ * up to <size> bytes. The item part is left untouched.
+ */
+static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+ ulong *ptr = (ulong *)item;
+ uint ofs;
+ ulong u;
+
+ if (size <= sizeof(*item))
+ return;
+
+ /* Upgrade the fill_pattern to change about half of the bits
+ * (to be sure to catch static flag corruption), and apply it.
+ */
+ u = pch->fill_pattern += ~0UL / 3; // 0x55...55
+ ofs = sizeof(*item) / sizeof(*ptr);
+ while (ofs < size / sizeof(*ptr))
+ ptr[ofs++] = u;
+}
+
+/* check for a pool_cache_item integrity after extracting it from the cache. It
+ * must have been previously initialized using pool_fill_pattern(). If any
+ * corruption is detected, the function provokes an immediate crash.
+ */
+static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+ const ulong *ptr = (const ulong *)item;
+ uint ofs;
+ ulong u;
+
+ if (size <= sizeof(*item))
+ return;
+
+ /* let's check that all words past *item are equal */
+ ofs = sizeof(*item) / sizeof(*ptr);
+ u = ptr[ofs++];
+ while (ofs < size / sizeof(*ptr)) {
+ if (unlikely(ptr[ofs] != u))
+ ABORT_NOW();
+ ofs++;
+ }
+}
+
+#else
+
+static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+}
+
+static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+}
+
+#endif
+
/* Tries to retrieve an object from the local pool cache corresponding to pool
* <pool>. If none is available, tries to allocate from the shared cache, and
* returns NULL if nothing is available.
@@ -245,7 +303,17 @@
if (LIST_ISEMPTY(&ph->list))
return pool_get_from_shared_cache(pool);
+#if defined(DEBUG_POOL_INTEGRITY)
+ /* allocate oldest objects first so as to keep them as long as possible
+ * in the cache before being reused and maximizing the chance to detect
+ * an overwrite.
+ */
+ item = LIST_PREV(&ph->list, typeof(item), by_pool);
+ pool_check_pattern(ph, item, pool->size);
+#else
+ /* allocate hottest objects first */
item = LIST_NEXT(&ph->list, typeof(item), by_pool);
+#endif
ph->count--;
pool_cache_bytes -= pool->size;
pool_cache_count--;
diff --git a/src/pool.c b/src/pool.c
index d90b091..f15a4a8 100644
--- a/src/pool.c
+++ b/src/pool.c
@@ -278,6 +278,7 @@
ph->count--;
pool_cache_bytes -= pool->size;
pool_cache_count--;
+ pool_check_pattern(ph, item, pool->size);
LIST_DELETE(&item->by_pool);
LIST_DELETE(&item->by_lru);
pool_put_to_shared_cache(pool, item);
@@ -300,6 +301,7 @@
*/
ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list);
pool = container_of(ph - tid, struct pool_head, cache);
+ pool_check_pattern(ph, item, pool->size);
LIST_DELETE(&item->by_pool);
LIST_DELETE(&item->by_lru);
ph->count--;
@@ -322,6 +324,7 @@
LIST_INSERT(&ph->list, &item->by_pool);
LIST_INSERT(&ti->pool_lru_head, &item->by_lru);
ph->count++;
+ pool_fill_pattern(ph, item, pool->size);
pool_cache_count++;
pool_cache_bytes += pool->size;