CONTRIB: mod_defender: import the minimal number of includes

mod_defender currently depends on haproxy includes while it should be
totally autonomous since it should build without and not even depend
on any specific haproxy version.

This imports the strictly minimum number of includes required to build
it. These have been manually stripped from their exported functions
prototypes and their unneeded dependencies.

In reality, the defender.c mostly needs sample.h because it stores its
data this way, spoe.h for the protocol definitions, and a few intops
and tools to decode varints. The rest mostly comes as intermediate
dependencies.
diff --git a/contrib/mod_defender/include/haproxy/api-t.h b/contrib/mod_defender/include/haproxy/api-t.h
new file mode 100644
index 0000000..edb33a8
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/api-t.h
@@ -0,0 +1,40 @@
+/*
+ * include/haproxy/api-t.h
+ * This provides definitions for all common types or type modifiers used
+ * everywhere in the code, and suitable for use in structure fields.
+ *
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HAPROXY_TYPES_H
+#define _HAPROXY_TYPES_H
+
+#include <inttypes.h>
+#include <stddef.h>
+
+#include <haproxy/compat.h>
+#include <haproxy/compiler.h>
+#include <haproxy/defaults.h>
+#include <haproxy/list-t.h>
+
+#endif /* _HAPROXY_TYPES_H */
diff --git a/contrib/mod_defender/include/haproxy/api.h b/contrib/mod_defender/include/haproxy/api.h
new file mode 100644
index 0000000..a5d7805
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/api.h
@@ -0,0 +1,35 @@
+/*
+ * include/haproxy/api.h
+ *
+ * Include wrapper that assembles all includes required by every haproxy file.
+ * Please do not add direct definitions into this file.
+ *
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HAPROXY_BASE_H
+#define _HAPROXY_BASE_H
+
+#include <haproxy/api-t.h>
+
+#endif
diff --git a/contrib/mod_defender/include/haproxy/buf-t.h b/contrib/mod_defender/include/haproxy/buf-t.h
new file mode 100644
index 0000000..3c0f8b5
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/buf-t.h
@@ -0,0 +1,62 @@
+/*
+ * include/haproxy/buf-t.h
+ * Simple buffer handling - types definitions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HAPROXY_BUF_T_H
+#define _HAPROXY_BUF_T_H
+
+#include <haproxy/api-t.h>
+
+/* Structure defining a buffer's head */
+struct buffer {
+	size_t size;                /* buffer size in bytes */
+	char  *area;                /* points to <size> bytes */
+	size_t data;                /* amount of data after head including wrapping */
+	size_t head;                /* start offset of remaining data relative to area */
+};
+
+/* A buffer may be in 3 different states :
+ *   - unallocated : size == 0, area == 0  (b_is_null() is true)
+ *   - waiting     : size == 0, area != 0  (b_is_null() is true)
+ *   - allocated   : size  > 0, area  > 0  (b_is_null() is false)
+ */
+
+/* initializers for certain buffer states. It is important that the NULL buffer
+ * remains the one with all fields initialized to zero so that a calloc() or a
+ * memset() on a struct automatically sets a NULL buffer.
+ */
+#define BUF_NULL   ((struct buffer){ })
+#define BUF_WANTED ((struct buffer){ .area = (char *)1 })
+#define BUF_RING   ((struct buffer){ .area = (char *)2 })
+
+#endif /* _HAPROXY_BUF_T_H */
+
+/*
+ * Local variables:
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ * End:
+ */
diff --git a/contrib/mod_defender/include/haproxy/compat.h b/contrib/mod_defender/include/haproxy/compat.h
new file mode 100644
index 0000000..39d46c2
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/compat.h
@@ -0,0 +1,294 @@
+/*
+ * include/haproxy/compat.h
+ * Operating system compatibility interface.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef _HAPROXY_COMPAT_H
+#define _HAPROXY_COMPAT_H
+
+#include <limits.h>
+#include <signal.h>
+#include <time.h>
+#include <unistd.h>
+/* This is needed on Linux for Netfilter includes */
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+
+
+/* These are a few short names for commonly used types whose size and sometimes
+ * signedness depends on the architecture. Be careful not to rely on a few
+ * common but wrong assumptions:
+ *  - char is not always signed (ARM, AARCH64, PPC)
+ *  - long is not always large enough for a pointer (Windows)
+ * These types are needed with the standard C API (string.h, printf, syscalls).
+ *
+ * When a fixed size is needed (protocol interoperability), better use the
+ * standard types provided by stdint.h:
+ *   - size_t    : unsigned int of default word size, large enough for any
+ *                 object in memory
+ *   - ssize_t   : signed int of default word size, used by some syscalls
+ *   - uintptr_t : an unsigned int large enough to store any pointer
+ *   - ptrdiff_t : a signed int large enough to hold a distance between 2 ptrs
+ *   - int<size>_t : a signed int of <size> bits (8,16,32,64 work everywhere)
+ *   - uint<size>_t : an unsigned int of <size> bits
+ */
+typedef signed char        schar;
+typedef unsigned char      uchar;
+typedef unsigned short     ushort;
+typedef unsigned int       uint;
+typedef unsigned long      ulong;
+typedef unsigned long long ullong;
+typedef long long          llong;
+
+
+/* set any optional field in a struct to this type to save ifdefs. Its address
+ * will still be valid but it will not reserve any room nor require any
+ * initialization.
+ */
+typedef struct { } empty_t;
+
+// Redefine some limits that are not present everywhere
+#ifndef LLONG_MAX
+# define LLONG_MAX 9223372036854775807LL
+# define LLONG_MIN (-LLONG_MAX - 1LL)
+#endif
+
+#ifndef ULLONG_MAX
+# define ULLONG_MAX	(LLONG_MAX * 2ULL + 1)
+#endif
+
+#ifndef LONGBITS
+#define LONGBITS  ((unsigned int)sizeof(long) * 8)
+#endif
+
+#ifndef BITS_PER_INT
+#define BITS_PER_INT    (8*sizeof(int))
+#endif
+
+#ifndef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+/* this is for libc5 for example */
+#ifndef TCP_NODELAY
+#define TCP_NODELAY     1
+#endif
+
+#ifndef SHUT_RD
+#define SHUT_RD	        0
+#endif
+
+#ifndef SHUT_WR
+#define SHUT_WR	        1
+#endif
+
+/* only Linux defines it */
+#ifndef MSG_NOSIGNAL
+#define MSG_NOSIGNAL	0
+#endif
+
+/* AIX does not define MSG_DONTWAIT. We'll define it to zero, and test it
+ * wherever appropriate.
+ */
+#ifndef MSG_DONTWAIT
+#define MSG_DONTWAIT	0
+#endif
+
+/* Only Linux defines MSG_MORE */
+#ifndef MSG_MORE
+#define MSG_MORE	0
+#endif
+
+/* On Linux 2.4 and above, MSG_TRUNC can be used on TCP sockets to drop any
+ * pending data. Let's rely on NETFILTER to detect if this is supported.
+ */
+#ifdef USE_NETFILTER
+#define MSG_TRUNC_CLEARS_INPUT
+#endif
+
+/* Maximum path length, OS-dependant */
+#ifndef MAXPATHLEN
+#define MAXPATHLEN 128
+#endif
+
+/* longest UNIX socket name */
+#ifndef UNIX_MAX_PATH
+#define UNIX_MAX_PATH 108
+#endif
+
+/* On Linux, allows pipes to be resized */
+#ifndef F_SETPIPE_SZ
+#define F_SETPIPE_SZ (1024 + 7)
+#endif
+
+/* On FreeBSD we don't have SI_TKILL but SI_LWP instead */
+#if !defined(SI_TKILL) && defined(SI_LWP)
+#define SI_TKILL SI_LWP
+#endif
+
+/* systems without such defines do not know clockid_t or timer_t */
+#if !(_POSIX_TIMERS > 0)
+#undef clockid_t
+#define clockid_t empty_t
+#undef timer_t
+#define timer_t empty_t
+#endif
+
+/* define a dummy value to designate "no timer". Use only 32 bits. */
+#ifndef TIMER_INVALID
+#define TIMER_INVALID ((timer_t)(unsigned long)(0xfffffffful))
+#endif
+
+#if defined(USE_TPROXY) && defined(USE_NETFILTER)
+#include <linux/types.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_ipv4.h>
+#endif
+
+/* On Linux, IP_TRANSPARENT and/or IP_FREEBIND generally require a kernel patch */
+#if defined(USE_LINUX_TPROXY)
+#if !defined(IP_FREEBIND)
+#define IP_FREEBIND 15
+#endif /* !IP_FREEBIND */
+#if !defined(IP_TRANSPARENT)
+#define IP_TRANSPARENT 19
+#endif /* !IP_TRANSPARENT */
+#if !defined(IPV6_TRANSPARENT)
+#define IPV6_TRANSPARENT 75
+#endif /* !IPV6_TRANSPARENT */
+#endif /* USE_LINUX_TPROXY */
+
+#if defined(IP_FREEBIND)       \
+ || defined(IP_BINDANY)        \
+ || defined(IPV6_BINDANY)      \
+ || defined(SO_BINDANY)        \
+ || defined(IP_TRANSPARENT)    \
+ || defined(IPV6_TRANSPARENT)
+#define CONFIG_HAP_TRANSPARENT
+#endif
+
+/* We'll try to enable SO_REUSEPORT on Linux 2.4 and 2.6 if not defined.
+ * There are two families of values depending on the architecture. Those
+ * are at least valid on Linux 2.4 and 2.6, reason why we'll rely on the
+ * USE_NETFILTER define.
+ */
+#if !defined(SO_REUSEPORT) && defined(USE_NETFILTER)
+#if    (SO_REUSEADDR == 2)
+#define SO_REUSEPORT 15
+#elif  (SO_REUSEADDR == 0x0004)
+#define SO_REUSEPORT 0x0200
+#endif /* SO_REUSEADDR */
+#endif /* SO_REUSEPORT */
+
+/* only Linux defines TCP_FASTOPEN */
+#ifdef USE_TFO
+#ifndef TCP_FASTOPEN
+#define TCP_FASTOPEN 23
+#endif
+
+#ifndef TCP_FASTOPEN_CONNECT
+#define TCP_FASTOPEN_CONNECT 30
+#endif
+#endif
+
+/* If IPv6 is supported, define IN6_IS_ADDR_V4MAPPED() if missing. */
+#if defined(IPV6_TCLASS) && !defined(IN6_IS_ADDR_V4MAPPED)
+#define IN6_IS_ADDR_V4MAPPED(a) \
+((((const uint32_t *) (a))[0] == 0) \
+&& (((const uint32_t *) (a))[1] == 0) \
+&& (((const uint32_t *) (a))[2] == htonl (0xffff)))
+#endif
+
+#if defined(__dietlibc__)
+#include <strings.h>
+#endif
+
+/* crypt_r() has been present in glibc since 2.2 and on FreeBSD since 12.0
+ * (12000002). No other OS makes any mention of it for now. Feel free to add
+ * valid known combinations below if needed to relax the crypt() lock when
+ * using threads.
+ */
+#if (defined(__GNU_LIBRARY__) && (__GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2)) \
+ || (defined(__FreeBSD__) && __FreeBSD_version >= 1200002)
+#define HA_HAVE_CRYPT_R
+#endif
+
+/* some backtrace() implementations are broken or incomplete, in this case we
+ * can replace them. We must not do it all the time as some are more accurate
+ * than ours.
+ */
+#ifdef USE_BACKTRACE
+#if defined(__aarch64__)
+/* on aarch64 at least from gcc-4.7.4 to 7.4.1 we only get a single entry, which
+ * is pointless. Ours works though it misses the faulty function itself,
+ * probably due to an alternate stack for the signal handler which does not
+ * create a new frame hence doesn't store the caller's return address.
+ */
+#elif defined(__clang__) && defined(__x86_64__)
+/* this is on FreeBSD, clang 4.0 to 8.0 produce don't go further than the
+ * sighandler.
+ */
+#else
+#define HA_HAVE_WORKING_BACKTRACE
+#endif
+#endif
+
+/* malloc_trim() can be very convenient to reclaim unused memory especially
+ * from huge pattern files. It's available (and really usable) in glibc 2.8 and
+ * above.
+ */
+#if (defined(__GNU_LIBRARY__) && (__GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
+#include <malloc.h>
+#define HA_HAVE_MALLOC_TRIM
+#endif
+
+/* glibc 2.26 includes a thread-local cache which makes it fast enough in threads */
+#if (defined(__GNU_LIBRARY__) && (__GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 26))
+#include <malloc.h>
+#define HA_HAVE_FAST_MALLOC
+#endif
+
+/* Max number of file descriptors we send in one sendmsg(). Linux seems to be
+ * able to send 253 fds per sendmsg(), not sure about the other OSes.
+ */
+#define MAX_SEND_FD 253
+
+/* Make the new complex name for the xxhash function easier to remember
+ * and use.
+ */
+#ifndef XXH3
+#define XXH3(data, len, seed) XXH3_64bits_withSeed(data, len, seed)
+#endif
+
+#endif /* _HAPROXY_COMPAT_H */
+
+/*
+ * Local variables:
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ * End:
+ */
diff --git a/contrib/mod_defender/include/haproxy/compiler.h b/contrib/mod_defender/include/haproxy/compiler.h
new file mode 100644
index 0000000..7255767
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/compiler.h
@@ -0,0 +1,298 @@
+/*
+ * include/haproxy/compiler.h
+ * This files contains some compiler-specific settings.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_COMPILER_H
+#define _HAPROXY_COMPILER_H
+
+#ifdef DEBUG_USE_ABORT
+#include <stdlib.h>
+#endif
+
+/*
+ * Gcc before 3.0 needs [0] to declare a variable-size array
+ */
+#ifndef VAR_ARRAY
+#if defined(__GNUC__) && (__GNUC__ < 3)
+#define VAR_ARRAY	0
+#else
+#define VAR_ARRAY
+#endif
+#endif
+
+#if !defined(__GNUC__)
+/* Some versions of glibc irresponsibly redefine __attribute__() to empty for
+ * non-gcc compilers, and as such, silently break all constructors with other
+ * other compilers. Let's make sure such incompatibilities are detected if any,
+ * or that the attribute is properly enforced.
+ */
+#undef __attribute__
+#define __attribute__(x) __attribute__(x)
+#endif
+
+/* By default, gcc does not inline large chunks of code, but we want it to
+ * respect our choices.
+ */
+#if !defined(forceinline)
+#if !defined(__GNUC__) || (__GNUC__ < 3)
+#define forceinline inline
+#else
+#define forceinline inline __attribute__((always_inline))
+#endif
+#endif
+
+/* silence the "unused" warnings without having to place painful #ifdefs.
+ * For use with variables or functions.
+ */
+#define __maybe_unused __attribute__((unused))
+
+/* These macros are used to declare a section name for a variable.
+ * WARNING: keep section names short, as MacOS limits them to 16 characters.
+ * The _START and _STOP attributes have to be placed after the start and stop
+ * weak symbol declarations, and are only used by MacOS.
+ */
+#if !defined(USE_OBSOLETE_LINKER)
+
+#ifdef __APPLE__
+#define HA_SECTION(s)           __attribute__((__section__("__DATA, " s)))
+#define HA_SECTION_START(s)     __asm("section$start$__DATA$" s)
+#define HA_SECTION_STOP(s)      __asm("section$end$__DATA$" s)
+#else
+#define HA_SECTION(s)           __attribute__((__section__(s)))
+#define HA_SECTION_START(s)
+#define HA_SECTION_STOP(s)
+#endif
+
+#else // obsolete linker below, let's just not force any section
+
+#define HA_SECTION(s)
+#define HA_SECTION_START(s)
+#define HA_SECTION_STOP(s)
+
+#endif // USE_OBSOLETE_LINKER
+
+/* use this attribute on a variable to move it to the read_mostly section */
+#define __read_mostly           HA_SECTION("read_mostly")
+
+/* This allows gcc to know that some locations are never reached, for example
+ * after a longjmp() in the Lua code, hence that some errors caught by such
+ * methods cannot propagate further. This is important with gcc versions 6 and
+ * above which can more aggressively detect null dereferences. The builtin
+ * below was introduced in gcc 4.5, and before it we didn't care.
+ */
+#ifdef DEBUG_USE_ABORT
+#define my_unreachable() abort()
+#else
+#if __GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+#define my_unreachable() __builtin_unreachable()
+#else
+#define my_unreachable()
+#endif
+#endif
+
+/* This macro may be used to block constant propagation that lets the compiler
+ * detect a possible NULL dereference on a variable resulting from an explicit
+ * assignment in an impossible check. Sometimes a function is called which does
+ * safety checks and returns NULL if safe conditions are not met. The place
+ * where it's called cannot hit this condition and dereferencing the pointer
+ * without first checking it will make the compiler emit a warning about a
+ * "potential null pointer dereference" which is hard to work around. This
+ * macro "washes" the pointer and prevents the compiler from emitting tests
+ * branching to undefined instructions. It may only be used when the developer
+ * is absolutely certain that the conditions are guaranteed and that the
+ * pointer passed in argument cannot be NULL by design.
+ */
+#define ALREADY_CHECKED(p) do { asm("" : "=rm"(p) : "0"(p)); } while (0)
+
+/* same as above but to be used to pass the input value to the output but
+ * without letting the compiler know about its initial properties.
+ */
+#define DISGUISE(v) ({ typeof(v) __v = (v); ALREADY_CHECKED(__v); __v; })
+
+/*
+ * Gcc >= 3 provides the ability for the program to give hints to the
+ * compiler about what branch of an if is most likely to be taken. This
+ * helps the compiler produce the most compact critical paths, which is
+ * generally better for the cache and to reduce the number of jumps.
+ */
+#if !defined(likely)
+#if !defined(__GNUC__) || (__GNUC__ < 3)
+#define __builtin_expect(x,y) (x)
+#define likely(x) (x)
+#define unlikely(x) (x)
+#else
+#define likely(x) (__builtin_expect((x) != 0, 1))
+#define unlikely(x) (__builtin_expect((x) != 0, 0))
+#endif
+#endif
+
+#ifndef __GNUC_PREREQ__
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#define __GNUC_PREREQ__(ma, mi) \
+        (__GNUC__ > (ma) || __GNUC__ == (ma) && __GNUC_MINOR__ >= (mi))
+#else
+#define __GNUC_PREREQ__(ma, mi) 0
+#endif
+#endif
+
+#ifndef offsetof
+#if __GNUC_PREREQ__(4, 1)
+#define offsetof(type, field)  __builtin_offsetof(type, field)
+#else
+#define offsetof(type, field) \
+        ((size_t)(uintptr_t)((const volatile void *)&((type *)0)->field))
+#endif
+#endif
+
+/* Some architectures have a double-word CAS, sometimes even dual-8 bytes.
+ * Some architectures support unaligned accesses, others are fine with them
+ * but only for non-atomic operations. Also mention those supporting unaligned
+ * accesses and being little endian, and those where unaligned accesses are
+ * known to be fast (almost as fast as aligned ones).
+ */
+#if defined(__x86_64__)
+#define HA_UNALIGNED
+#define HA_UNALIGNED_LE
+#define HA_UNALIGNED_LE64
+#define HA_UNALIGNED_FAST
+#define HA_UNALIGNED_ATOMIC
+#define HA_HAVE_CAS_DW
+#define HA_CAS_IS_8B
+#elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
+#define HA_UNALIGNED
+#define HA_UNALIGNED_LE
+#define HA_UNALIGNED_ATOMIC
+#elif defined (__aarch64__) || defined(__ARM_ARCH_8A)
+#define HA_UNALIGNED
+#define HA_UNALIGNED_LE
+#define HA_UNALIGNED_LE64
+#define HA_UNALIGNED_FAST
+#define HA_HAVE_CAS_DW
+#define HA_CAS_IS_8B
+#elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
+#define HA_UNALIGNED
+#define HA_UNALIGNED_LE
+#define HA_UNALIGNED_FAST
+#define HA_HAVE_CAS_DW
+#endif
+
+
+/* sets alignment for current field or variable */
+#ifndef ALIGNED
+#define ALIGNED(x) __attribute__((aligned(x)))
+#endif
+
+/* sets alignment only on architectures preventing unaligned atomic accesses */
+#ifndef MAYBE_ALIGNED
+#ifndef HA_UNALIGNED
+#define MAYBE_ALIGNED(x)  ALIGNED(x)
+#else
+#define MAYBE_ALIGNED(x)
+#endif
+#endif
+
+/* sets alignment only on architectures preventing unaligned atomic accesses */
+#ifndef ATOMIC_ALIGNED
+#ifndef HA_UNALIGNED_ATOMIC
+#define ATOMIC_ALIGNED(x)  ALIGNED(x)
+#else
+#define ATOMIC_ALIGNED(x)
+#endif
+#endif
+
+/* sets alignment for current field or variable only when threads are enabled.
+ * Typically used to respect cache line alignment to avoid false sharing.
+ */
+#ifndef THREAD_ALIGNED
+#ifdef USE_THREAD
+#define THREAD_ALIGNED(x) __attribute__((aligned(x)))
+#else
+#define THREAD_ALIGNED(x)
+#endif
+#endif
+
+/* add a mandatory alignment for next fields in a structure */
+#ifndef ALWAYS_ALIGN
+#define ALWAYS_ALIGN(x)  union { } ALIGNED(x)
+#endif
+
+/* add an optional alignment for next fields in a structure, only for archs
+ * which do not support unaligned accesses.
+ */
+#ifndef MAYBE_ALIGN
+#ifndef HA_UNALIGNED
+#define MAYBE_ALIGN(x)  union { } ALIGNED(x)
+#else
+#define MAYBE_ALIGN(x)
+#endif
+#endif
+
+/* add an optional alignment for next fields in a structure, only for archs
+ * which do not support unaligned accesses for atomic operations.
+ */
+#ifndef ATOMIC_ALIGN
+#ifndef HA_UNALIGNED_ATOMIC
+#define ATOMIC_ALIGN(x)  union { } ALIGNED(x)
+#else
+#define ATOMIC_ALIGN(x)
+#endif
+#endif
+
+/* add an optional alignment for next fields in a structure, only when threads
+ * are enabled. Typically used to respect cache line alignment to avoid false
+ * sharing.
+ */
+#ifndef THREAD_ALIGN
+#ifdef USE_THREAD
+#define THREAD_ALIGN(x) union { } ALIGNED(x)
+#else
+#define THREAD_ALIGN(x)
+#endif
+#endif
+
+/* The THREAD_LOCAL type attribute defines thread-local storage and is defined
+ * to __thread when threads are enabled or empty when disabled.
+ */
+#ifdef USE_THREAD
+#define THREAD_LOCAL __thread
+#else
+#define THREAD_LOCAL
+#endif
+
+/* The __decl_thread() statement is shows the argument when threads are enabled
+ * or hides it when disabled. The purpose is to condition the presence of some
+ * variables or struct members to the fact that threads are enabled, without
+ * having to enclose them inside a #ifdef USE_THREAD/#endif clause.
+ */
+#ifdef USE_THREAD
+#define __decl_thread(decl) decl
+#else
+#define __decl_thread(decl)
+#endif
+
+/* clang has a __has_feature() macro which reports true/false on a number of
+ * internally supported features. Let's make sure this macro is always defined
+ * and returns zero when not supported.
+ */
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+
+#endif /* _HAPROXY_COMPILER_H */
diff --git a/contrib/mod_defender/include/haproxy/defaults.h b/contrib/mod_defender/include/haproxy/defaults.h
new file mode 100644
index 0000000..f6a15db
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/defaults.h
@@ -0,0 +1,417 @@
+/*
+ * include/haproxy/defaults.h
+ * Miscellaneous default values.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef _HAPROXY_DEFAULTS_H
+#define _HAPROXY_DEFAULTS_H
+
+/* MAX_PROCS defines the highest limit for the global "nbproc" value. It
+ * defaults to the number of bits in a long integer but may be lowered to save
+ * resources on embedded systems.
+ */
+#ifndef MAX_PROCS
+#define MAX_PROCS LONGBITS
+#endif
+
+/* MAX_THREADS defines the highest limit for the global nbthread value. It
+ * defaults to the number of bits in a long integer when threads are enabled
+ * but may be lowered to save resources on embedded systems.
+*/
+#ifndef USE_THREAD
+/* threads disabled, 1 thread max */
+#define MAX_THREADS 1
+#define MAX_THREADS_MASK 1
+
+#else
+/* threads enabled, max_threads defaults to long bits */
+#ifndef MAX_THREADS
+#define MAX_THREADS LONGBITS
+#endif
+#define MAX_THREADS_MASK (~0UL >> (LONGBITS - MAX_THREADS))
+#endif
+
+/*
+ * BUFSIZE defines the size of a read and write buffer. It is the maximum
+ * amount of bytes which can be stored by the proxy for each stream. However,
+ * when reading HTTP headers, the proxy needs some spare space to add or rewrite
+ * headers if needed. The size of this spare is defined with MAXREWRITE. So it
+ * is not possible to process headers longer than BUFSIZE-MAXREWRITE bytes. By
+ * default, BUFSIZE=16384 bytes and MAXREWRITE=min(1024,BUFSIZE/2), so the
+ * maximum length of headers accepted is 15360 bytes.
+ */
+#ifndef BUFSIZE
+#define BUFSIZE	        16384
+#endif
+
+/* certain buffers may only be allocated for responses in order to avoid
+ * deadlocks caused by request queuing. 2 buffers is the absolute minimum
+ * acceptable to ensure that a request gaining access to a server can get
+ * a response buffer even if it doesn't completely flush the request buffer.
+ * The worst case is an applet making use of a request buffer that cannot
+ * completely be sent while the server starts to respond, and all unreserved
+ * buffers are allocated by request buffers from pending connections in the
+ * queue waiting for this one to flush. Both buffers reserved buffers may
+ * thus be used at the same time.
+ */
+#ifndef RESERVED_BUFS
+#define RESERVED_BUFS   2
+#endif
+
+// reserved buffer space for header rewriting
+#ifndef MAXREWRITE
+#define MAXREWRITE      1024
+#endif
+
+#ifndef REQURI_LEN
+#define REQURI_LEN      1024
+#endif
+
+#ifndef CAPTURE_LEN
+#define CAPTURE_LEN     64
+#endif
+
+#ifndef MAX_SYSLOG_LEN
+#define MAX_SYSLOG_LEN          1024
+#endif
+
+/* 64kB to archive startup-logs seems way more than enough */
+#ifndef STARTUP_LOG_SIZE
+#define STARTUP_LOG_SIZE        65536
+#endif
+
+// maximum line size when parsing config
+#ifndef LINESIZE
+#define LINESIZE	2048
+#endif
+
+// max # args on a configuration line
+#define MAX_LINE_ARGS   64
+
+// maximum line size when parsing crt-bind-list config
+#define CRT_LINESIZE    65536
+
+// max # args on crt-bind-list configuration line
+#define MAX_CRT_ARGS  2048
+
+// max # args on a command issued on the CLI ("stats socket")
+// This should cover at least 5 + twice the # of data_types
+#define MAX_CLI_ARGS  64
+
+// max # of matches per regexp
+#define	MAX_MATCH       10
+
+// max # of headers in one HTTP request or response
+// By default, about 100 headers (+1 for the first line)
+#ifndef MAX_HTTP_HDR
+#define MAX_HTTP_HDR    101
+#endif
+
+// max # of headers in history when looking for header #-X
+#ifndef MAX_HDR_HISTORY
+#define MAX_HDR_HISTORY 10
+#endif
+
+// max # of stick counters per session (at least 3 for sc0..sc2)
+#ifndef MAX_SESS_STKCTR
+#define MAX_SESS_STKCTR 3
+#endif
+
+// max # of extra stick-table data types that can be registered at runtime
+#ifndef STKTABLE_EXTRA_DATA_TYPES
+#define STKTABLE_EXTRA_DATA_TYPES 0
+#endif
+
+// max # of stick-table filter entries that can be used during dump
+#ifndef STKTABLE_FILTER_LEN
+#define STKTABLE_FILTER_LEN 4
+#endif
+
+// max # of loops we can perform around a read() which succeeds.
+// It's very frequent that the system returns a few TCP segments at a time.
+#ifndef MAX_READ_POLL_LOOPS
+#define MAX_READ_POLL_LOOPS 4
+#endif
+
+// minimum number of bytes read at once above which we don't try to read
+// more, in order not to risk facing an EAGAIN. Most often, if we read
+// at least 10 kB, we can consider that the system has tried to read a
+// full buffer and got multiple segments (>1 MSS for jumbo frames, >7 MSS
+// for normal frames) did not bother truncating the last segment.
+#ifndef MIN_RECV_AT_ONCE_ENOUGH
+#define MIN_RECV_AT_ONCE_ENOUGH (7*1448)
+#endif
+
+// The minimum number of bytes to be forwarded that is worth trying to splice.
+// Below 4kB, it's not worth allocating pipes nor pretending to zero-copy.
+#ifndef MIN_SPLICE_FORWARD
+#define MIN_SPLICE_FORWARD 4096
+#endif
+
+// the max number of events returned in one call to poll/epoll. Too small a
+// value will cause lots of calls, and too high a value may cause high latency.
+#ifndef MAX_POLL_EVENTS
+#define MAX_POLL_EVENTS 200
+#endif
+
+// The maximum number of connections accepted at once by a thread for a single
+// listener. It used to default to 64 divided by the number of processes but
+// the tasklet-based model is much more scalable and benefits from smaller
+// values. Experimentation has shown that 4 gives the highest accept rate for
+// all thread values, and that 3 and 5 come very close, as shown below (HTTP/1
+// connections forwarded per second at multi-accept 4 and 64):
+//
+// ac\thr|    1    2     4     8     16
+// ------+------------------------------
+//      4|   80k  106k  168k  270k  336k
+//     64|   63k   89k  145k  230k  274k
+//
+#ifndef MAX_ACCEPT
+#define MAX_ACCEPT 4
+#endif
+
+// The base max number of tasks to run at once to be used when not set by
+// tune.runqueue-depth. It will automatically be divided by the square root
+// of the number of threads for better fairness. As such, 64 threads will
+// use 35 and a single thread will use 280.
+#ifndef RUNQUEUE_DEPTH
+#define RUNQUEUE_DEPTH 280
+#endif
+
+// cookie delimiter in "prefix" mode. This character is inserted between the
+// persistence cookie and the original value. The '~' is allowed by RFC6265,
+// and should not be too common in server names.
+#ifndef COOKIE_DELIM
+#define COOKIE_DELIM    '~'
+#endif
+
+// this delimiter is used between a server's name and a last visit date in
+// cookies exchanged with the client.
+#ifndef COOKIE_DELIM_DATE
+#define COOKIE_DELIM_DATE       '|'
+#endif
+
+#define CONN_RETRIES    3
+
+#define	CHK_CONNTIME    2000
+#define	DEF_CHKINTR     2000
+#define DEF_MAILALERTTIME 10000
+#define DEF_FALLTIME    3
+#define DEF_RISETIME    2
+#define DEF_AGENT_FALLTIME    1
+#define DEF_AGENT_RISETIME    1
+#define DEF_CHECK_PATH  ""
+
+
+#define DEF_HANA_ONERR		HANA_ONERR_FAILCHK
+#define DEF_HANA_ERRLIMIT	10
+
+// X-Forwarded-For header default
+#define DEF_XFORWARDFOR_HDR	"X-Forwarded-For"
+
+// X-Original-To header default
+#define DEF_XORIGINALTO_HDR	"X-Original-To"
+
+/* Default connections limit.
+ *
+ * A system limit can be enforced at build time in order to avoid using haproxy
+ * beyond reasonable system limits. For this, just define SYSTEM_MAXCONN to the
+ * absolute limit accepted by the system. If the configuration specifies a
+ * higher value, it will be capped to SYSTEM_MAXCONN and a warning will be
+ * emitted. The only way to override this limit will be to set it via the
+ * command-line '-n' argument. If SYSTEM_MAXCONN is not set, a minimum value
+ * of 100 will be used for DEFAULT_MAXCONN which almost guarantees that a
+ * process will correctly start in any situation.
+ */
+#ifdef SYSTEM_MAXCONN
+#undef  DEFAULT_MAXCONN
+#define DEFAULT_MAXCONN SYSTEM_MAXCONN
+#elif !defined(DEFAULT_MAXCONN)
+#define DEFAULT_MAXCONN 100
+#endif
+
+/* Minimum check interval for spread health checks. Servers with intervals
+ * greater than or equal to this value will have their checks spread apart
+ * and will be considered when searching the minimal interval.
+ * Others will be ignored for the minimal interval and will have their checks
+ * scheduled on a different basis.
+ */
+#ifndef SRV_CHK_INTER_THRES
+#define SRV_CHK_INTER_THRES 1000
+#endif
+
+/* Specifies the string used to report the version and release date on the
+ * statistics page. May be defined to the empty string ("") to permanently
+ * disable the feature.
+ */
+#ifndef STATS_VERSION_STRING
+#define STATS_VERSION_STRING " version " HAPROXY_VERSION ", released " HAPROXY_DATE
+#endif
+
+/* This is the default statistics URI */
+#ifdef CONFIG_STATS_DEFAULT_URI
+#define STATS_DEFAULT_URI CONFIG_STATS_DEFAULT_URI
+#else
+#define STATS_DEFAULT_URI "/haproxy?stats"
+#endif
+
+/* This is the default statistics realm */
+#ifdef CONFIG_STATS_DEFAULT_REALM
+#define STATS_DEFAULT_REALM CONFIG_STATS_DEFAULT_REALM
+#else
+#define STATS_DEFAULT_REALM "HAProxy Statistics"
+#endif
+
+/* Maximum signal queue size, and also number of different signals we can
+ * handle.
+ */
+#ifndef MAX_SIGNAL
+#define MAX_SIGNAL 256
+#endif
+
+/* Maximum host name length */
+#ifndef MAX_HOSTNAME_LEN
+#if MAXHOSTNAMELEN
+#define MAX_HOSTNAME_LEN	MAXHOSTNAMELEN
+#else
+#define MAX_HOSTNAME_LEN	64
+#endif // MAXHOSTNAMELEN
+#endif // MAX_HOSTNAME_LEN
+
+/* Maximum health check description length */
+#ifndef HCHK_DESC_LEN
+#define HCHK_DESC_LEN	128
+#endif
+
+/* ciphers used as defaults on connect */
+#ifndef CONNECT_DEFAULT_CIPHERS
+#define CONNECT_DEFAULT_CIPHERS NULL
+#endif
+
+/* ciphers used as defaults on TLS 1.3 connect */
+#ifndef CONNECT_DEFAULT_CIPHERSUITES
+#define CONNECT_DEFAULT_CIPHERSUITES NULL
+#endif
+
+/* ciphers used as defaults on listeners */
+#ifndef LISTEN_DEFAULT_CIPHERS
+#define LISTEN_DEFAULT_CIPHERS NULL
+#endif
+
+/* cipher suites used as defaults on TLS 1.3 listeners */
+#ifndef LISTEN_DEFAULT_CIPHERSUITES
+#define LISTEN_DEFAULT_CIPHERSUITES NULL
+#endif
+
+/* named curve used as defaults for ECDHE ciphers */
+#ifndef ECDHE_DEFAULT_CURVE
+#define ECDHE_DEFAULT_CURVE "prime256v1"
+#endif
+
+/* ssl cache size */
+#ifndef SSLCACHESIZE
+#define SSLCACHESIZE 20000
+#endif
+
+/* ssl max dh param size */
+#ifndef SSL_DEFAULT_DH_PARAM
+#define SSL_DEFAULT_DH_PARAM 0
+#endif
+
+/* max memory cost per SSL session */
+#ifndef SSL_SESSION_MAX_COST
+#define SSL_SESSION_MAX_COST (16*1024)    // measured
+#endif
+
+/* max memory cost per SSL handshake (on top of session) */
+#ifndef SSL_HANDSHAKE_MAX_COST
+#define SSL_HANDSHAKE_MAX_COST (76*1024)  // measured
+#endif
+
+#ifndef DEFAULT_SSL_CTX_CACHE
+#define DEFAULT_SSL_CTX_CACHE 1000
+#endif
+
+/* approximate stream size (for maxconn estimate) */
+#ifndef STREAM_MAX_COST
+#define STREAM_MAX_COST (sizeof(struct stream) + \
+                          2 * sizeof(struct channel) + \
+                          2 * sizeof(struct connection) + \
+                          global.tune.requri_len + \
+                          2 * global.tune.cookie_len)
+#endif
+
+/* available memory estimate : count about 3% of overhead in various structures */
+#ifndef MEM_USABLE_RATIO
+#define MEM_USABLE_RATIO 0.97
+#endif
+
+/* default per-thread pool cache size when enabled */
+#ifndef CONFIG_HAP_POOL_CACHE_SIZE
+#define CONFIG_HAP_POOL_CACHE_SIZE 1048576
+#endif
+
+/* Number of samples used to compute the times reported in stats. A power of
+ * two is highly recommended, and this value multiplied by the largest response
+ * time must not overflow and unsigned int. See freq_ctr.h for more information.
+ * We consider that values are accurate to 95% with two batches of samples below,
+ * so in order to advertise accurate times across 1k samples, we effectively
+ * measure over 512.
+ */
+#ifndef TIME_STATS_SAMPLES
+#define TIME_STATS_SAMPLES 512
+#endif
+
+/* max ocsp cert id asn1 encoded length */
+#ifndef OCSP_MAX_CERTID_ASN1_LENGTH
+#define OCSP_MAX_CERTID_ASN1_LENGTH 128
+#endif
+
+#ifndef OCSP_MAX_RESPONSE_TIME_SKEW
+#define OCSP_MAX_RESPONSE_TIME_SKEW 300
+#endif
+
+/* Number of TLS tickets to check, used for rotation */
+#ifndef TLS_TICKETS_NO
+#define TLS_TICKETS_NO 3
+#endif
+
+/* pattern lookup default cache size, in number of entries :
+ * 10k entries at 10k req/s mean 1% risk of a collision after 60 years, that's
+ * already much less than the memory's reliability in most machines and more
+ * durable than most admin's life expectancy. A collision will result in a
+ * valid result to be returned for a different entry from the same list.
+ */
+#ifndef DEFAULT_PAT_LRU_SIZE
+#define DEFAULT_PAT_LRU_SIZE 10000
+#endif
+
+/* maximum number of pollers that may be registered */
+#ifndef MAX_POLLERS
+#define MAX_POLLERS	10
+#endif
+
+/* Make all xxhash functions inline, with implementations being directly
+ * included within xxhash.h.
+ */
+#ifndef XXH_INLINE_ALL
+#define XXH_INLINE_ALL
+#endif
+
+#endif /* _HAPROXY_DEFAULTS_H */
diff --git a/contrib/mod_defender/include/haproxy/http-t.h b/contrib/mod_defender/include/haproxy/http-t.h
new file mode 100644
index 0000000..81bd8ab
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/http-t.h
@@ -0,0 +1,135 @@
+/*
+ * include/haproxy/http-t.h
+ *
+ * Version-agnostic and implementation-agnostic HTTP protocol definitions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef _HAPROXY_HTTP_T_H
+#define _HAPROXY_HTTP_T_H
+
+#include <inttypes.h>
+#include <haproxy/buf-t.h>
+
+/*
+ * some macros mainly used when parsing header fields.
+ * from RFC7230:
+ *   CTL                 = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+ *   SEP                 = one of the 17 defined separators or SP or HT
+ *   LWS                 = CR, LF, SP or HT
+ *   SPHT                = SP or HT. Use this macro and not a boolean expression for best speed.
+ *   CRLF                = CR or LF. Use this macro and not a boolean expression for best speed.
+ *   token               = any CHAR except CTL or SEP. Use this macro and not a boolean expression for best speed.
+ *
+ * added for ease of use:
+ *   ver_token           = 'H', 'P', 'T', '/', '.', and digits.
+ */
+#define HTTP_FLG_CTL  0x01
+#define HTTP_FLG_SEP  0x02
+#define HTTP_FLG_LWS  0x04
+#define HTTP_FLG_SPHT 0x08
+#define HTTP_FLG_CRLF 0x10
+#define HTTP_FLG_TOK  0x20
+#define HTTP_FLG_VER  0x40
+#define HTTP_FLG_DIG  0x80
+
+#define HTTP_IS_CTL(x)       (http_char_classes[(uint8_t)(x)] & HTTP_FLG_CTL)
+#define HTTP_IS_SEP(x)       (http_char_classes[(uint8_t)(x)] & HTTP_FLG_SEP)
+#define HTTP_IS_LWS(x)       (http_char_classes[(uint8_t)(x)] & HTTP_FLG_LWS)
+#define HTTP_IS_SPHT(x)      (http_char_classes[(uint8_t)(x)] & HTTP_FLG_SPHT)
+#define HTTP_IS_CRLF(x)      (http_char_classes[(uint8_t)(x)] & HTTP_FLG_CRLF)
+#define HTTP_IS_TOKEN(x)     (http_char_classes[(uint8_t)(x)] & HTTP_FLG_TOK)
+#define HTTP_IS_VER_TOKEN(x) (http_char_classes[(uint8_t)(x)] & HTTP_FLG_VER)
+#define HTTP_IS_DIGIT(x)     (http_char_classes[(uint8_t)(x)] & HTTP_FLG_DIG)
+
+/* Known HTTP methods */
+enum http_meth_t {
+	HTTP_METH_OPTIONS,
+	HTTP_METH_GET,
+	HTTP_METH_HEAD,
+	HTTP_METH_POST,
+	HTTP_METH_PUT,
+	HTTP_METH_DELETE,
+	HTTP_METH_TRACE,
+	HTTP_METH_CONNECT,
+	HTTP_METH_OTHER, /* Must be the last entry */
+} __attribute__((packed));
+
+/* Known HTTP authentication schemes */
+enum ht_auth_m {
+	HTTP_AUTH_WRONG		= -1,		/* missing or unknown */
+	HTTP_AUTH_UNKNOWN	= 0,
+	HTTP_AUTH_BASIC,
+	HTTP_AUTH_DIGEST,
+} __attribute__((packed));
+
+/* All implemented HTTP status codes */
+enum {
+	HTTP_ERR_200 = 0,
+	HTTP_ERR_400,
+	HTTP_ERR_401,
+	HTTP_ERR_403,
+	HTTP_ERR_404,
+	HTTP_ERR_405,
+	HTTP_ERR_407,
+	HTTP_ERR_408,
+	HTTP_ERR_410,
+	HTTP_ERR_413,
+	HTTP_ERR_421,
+	HTTP_ERR_425,
+	HTTP_ERR_429,
+	HTTP_ERR_500,
+	HTTP_ERR_501,
+	HTTP_ERR_502,
+	HTTP_ERR_503,
+	HTTP_ERR_504,
+	HTTP_ERR_SIZE
+};
+
+/* Note: the strings below make use of chunks. Chunks may carry an allocated
+ * size in addition to the length. The size counts from the beginning (str)
+ * to the end. If the size is unknown, it MUST be zero, in which case the
+ * sample will automatically be duplicated when a change larger than <len> has
+ * to be performed. Thus it is safe to always set size to zero.
+ */
+struct http_meth {
+	enum http_meth_t meth;
+	struct buffer str;
+};
+
+struct http_auth_data {
+	enum ht_auth_m method;                /* one of HTTP_AUTH_* */
+	/* 7 bytes unused here */
+	struct buffer method_data;            /* points to the creditial part from 'Authorization:' header */
+	char *user, *pass;                    /* extracted username & password */
+};
+
+enum http_etag_type {
+	ETAG_INVALID = 0,
+	ETAG_STRONG,
+	ETAG_WEAK
+};
+
+#endif /* _HAPROXY_HTTP_T_H */
+
+/*
+ * Local variables:
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ * End:
+ */
diff --git a/contrib/mod_defender/include/haproxy/intops.h b/contrib/mod_defender/include/haproxy/intops.h
new file mode 100644
index 0000000..40e87f9
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/intops.h
@@ -0,0 +1,469 @@
+/*
+ * include/haproxy/intops.h
+ * Functions for integer operations.
+ *
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+*/
+
+#ifndef _HAPROXY_INTOPS_H
+#define _HAPROXY_INTOPS_H
+
+#include <haproxy/api.h>
+
+/* Multiply the two 32-bit operands and shift the 64-bit result right 32 bits.
+ * This is used to compute fixed ratios by setting one of the operands to
+ * (2^32*ratio).
+ */
+static inline unsigned int mul32hi(unsigned int a, unsigned int b)
+{
+	return ((unsigned long long)a * b + a) >> 32;
+}
+
+/* gcc does not know when it can safely divide 64 bits by 32 bits. Use this
+ * function when you know for sure that the result fits in 32 bits, because
+ * it is optimal on x86 and on 64bit processors.
+ */
+static inline unsigned int div64_32(unsigned long long o1, unsigned int o2)
+{
+	unsigned long long result;
+#ifdef __i386__
+	asm("divl %2"
+	    : "=A" (result)
+	    : "A"(o1), "rm"(o2));
+#else
+	result = o1 / o2;
+#endif
+	return result;
+}
+
+/* rotate left a 64-bit integer by <bits:[0-5]> bits */
+static inline uint64_t rotl64(uint64_t v, uint8_t bits)
+{
+#if !defined(__ARM_ARCH_8A) && !defined(__x86_64__)
+	bits &= 63;
+#endif
+	v = (v << bits) | (v >> (-bits & 63));
+	return v;
+}
+
+/* rotate right a 64-bit integer by <bits:[0-5]> bits */
+static inline uint64_t rotr64(uint64_t v, uint8_t bits)
+{
+#if !defined(__ARM_ARCH_8A) && !defined(__x86_64__)
+	bits &= 63;
+#endif
+	v = (v >> bits) | (v << (-bits & 63));
+	return v;
+}
+
+/* Simple popcountl implementation. It returns the number of ones in a word.
+ * Described here : https://graphics.stanford.edu/~seander/bithacks.html
+ */
+static inline unsigned int my_popcountl(unsigned long a)
+{
+	a = a - ((a >> 1) & ~0UL/3);
+	a = (a & ~0UL/15*3) + ((a >> 2) & ~0UL/15*3);
+	a = (a + (a >> 4)) & ~0UL/255*15;
+	return (unsigned long)(a * (~0UL/255)) >> (sizeof(unsigned long) - 1) * 8;
+}
+
+/* returns non-zero if <a> has at least 2 bits set */
+static inline unsigned long atleast2(unsigned long a)
+{
+	return a & (a - 1);
+}
+
+/* Simple ffs implementation. It returns the position of the lowest bit set to
+ * one, starting at 1. It is illegal to call it with a==0 (undefined result).
+ */
+static inline unsigned int my_ffsl(unsigned long a)
+{
+	unsigned long cnt;
+
+#if defined(__x86_64__)
+	__asm__("bsf %1,%0\n" : "=r" (cnt) : "rm" (a));
+	cnt++;
+#else
+
+	cnt = 1;
+#if LONG_MAX > 0x7FFFFFFFL /* 64bits */
+	if (!(a & 0xFFFFFFFFUL)) {
+		a >>= 32;
+		cnt += 32;
+	}
+#endif
+	if (!(a & 0XFFFFU)) {
+		a >>= 16;
+		cnt += 16;
+	}
+	if (!(a & 0XFF)) {
+		a >>= 8;
+		cnt += 8;
+	}
+	if (!(a & 0xf)) {
+		a >>= 4;
+		cnt += 4;
+	}
+	if (!(a & 0x3)) {
+		a >>= 2;
+		cnt += 2;
+	}
+	if (!(a & 0x1)) {
+		cnt += 1;
+	}
+#endif /* x86_64 */
+
+	return cnt;
+}
+
+/* Simple fls implementation. It returns the position of the highest bit set to
+ * one, starting at 1. It is illegal to call it with a==0 (undefined result).
+ */
+static inline unsigned int my_flsl(unsigned long a)
+{
+	unsigned long cnt;
+
+#if defined(__x86_64__)
+	__asm__("bsr %1,%0\n" : "=r" (cnt) : "rm" (a));
+	cnt++;
+#else
+
+	cnt = 1;
+#if LONG_MAX > 0x7FFFFFFFUL /* 64bits */
+	if (a & 0xFFFFFFFF00000000UL) {
+		a >>= 32;
+		cnt += 32;
+	}
+#endif
+	if (a & 0XFFFF0000U) {
+		a >>= 16;
+		cnt += 16;
+	}
+	if (a & 0XFF00) {
+		a >>= 8;
+		cnt += 8;
+	}
+	if (a & 0xf0) {
+		a >>= 4;
+		cnt += 4;
+	}
+	if (a & 0xc) {
+		a >>= 2;
+		cnt += 2;
+	}
+	if (a & 0x2) {
+		cnt += 1;
+	}
+#endif /* x86_64 */
+
+	return cnt;
+}
+
+/* Build a word with the <bits> lower bits set (reverse of my_popcountl) */
+static inline unsigned long nbits(int bits)
+{
+	if (--bits < 0)
+		return 0;
+	else
+		return (2UL << bits) - 1;
+}
+
+/* Turns 64-bit value <a> from host byte order to network byte order.
+ * The principle consists in letting the compiler detect we're playing
+ * with a union and simplify most or all operations. The asm-optimized
+ * htonl() version involving bswap (x86) / rev (arm) / other is a single
+ * operation on little endian, or a NOP on big-endian. In both cases,
+ * this lets the compiler "see" that we're rebuilding a 64-bit word from
+ * two 32-bit quantities that fit into a 32-bit register. In big endian,
+ * the whole code is optimized out. In little endian, with a decent compiler,
+ * a few bswap and 2 shifts are left, which is the minimum acceptable.
+ */
+static inline unsigned long long my_htonll(unsigned long long a)
+{
+#if defined(__x86_64__)
+	__asm__ volatile("bswapq %0" : "=r"(a) : "0"(a));
+	return a;
+#else
+	union {
+		struct {
+			unsigned int w1;
+			unsigned int w2;
+		} by32;
+		unsigned long long by64;
+	} w = { .by64 = a };
+	return ((unsigned long long)htonl(w.by32.w1) << 32) | htonl(w.by32.w2);
+#endif
+}
+
+/* Turns 64-bit value <a> from network byte order to host byte order. */
+static inline unsigned long long my_ntohll(unsigned long long a)
+{
+	return my_htonll(a);
+}
+
+/* sets bit <bit> into map <map>, which must be long-aligned */
+static inline void ha_bit_set(unsigned long bit, long *map)
+{
+	map[bit / (8 * sizeof(*map))] |= 1UL << (bit & (8 * sizeof(*map) - 1));
+}
+
+/* clears bit <bit> from map <map>, which must be long-aligned */
+static inline void ha_bit_clr(unsigned long bit, long *map)
+{
+	map[bit / (8 * sizeof(*map))] &= ~(1UL << (bit & (8 * sizeof(*map) - 1)));
+}
+
+/* flips bit <bit> from map <map>, which must be long-aligned */
+static inline void ha_bit_flip(unsigned long bit, long *map)
+{
+	map[bit / (8 * sizeof(*map))] ^= 1UL << (bit & (8 * sizeof(*map) - 1));
+}
+
+/* returns non-zero if bit <bit> from map <map> is set, otherwise 0 */
+static inline int ha_bit_test(unsigned long bit, const long *map)
+{
+	return !!(map[bit / (8 * sizeof(*map))] & 1UL << (bit & (8 * sizeof(*map) - 1)));
+}
+
+/* hash a 32-bit integer to another 32-bit integer. This code may be large when
+ * inlined, use full_hash() instead.
+ */
+static inline unsigned int __full_hash(unsigned int a)
+{
+	/* This function is one of Bob Jenkins' full avalanche hashing
+	 * functions, which when provides quite a good distribution for little
+	 * input variations. The result is quite suited to fit over a 32-bit
+	 * space with enough variations so that a randomly picked number falls
+	 * equally before any server position.
+	 * Check http://burtleburtle.net/bob/hash/integer.html for more info.
+	 */
+	a = (a+0x7ed55d16) + (a<<12);
+	a = (a^0xc761c23c) ^ (a>>19);
+	a = (a+0x165667b1) + (a<<5);
+	a = (a+0xd3a2646c) ^ (a<<9);
+	a = (a+0xfd7046c5) + (a<<3);
+	a = (a^0xb55a4f09) ^ (a>>16);
+
+	/* ensure values are better spread all around the tree by multiplying
+	 * by a large prime close to 3/4 of the tree.
+	 */
+	return a * 3221225473U;
+}
+
+/*
+ * Return integer equivalent of character <c> for a hex digit (0-9, a-f, A-F),
+ * otherwise -1. This compact form helps gcc produce efficient code.
+ */
+static inline int hex2i(int c)
+{
+	if ((unsigned char)(c -= '0') > 9) {
+		if ((unsigned char)(c -= 'A' - '0') > 5 &&
+			      (unsigned char)(c -= 'a' - 'A') > 5)
+			c = -11;
+		c += 10;
+	}
+	return c;
+}
+
+/* This one is 6 times faster than strtoul() on athlon, but does
+ * no check at all.
+ */
+static inline unsigned int __str2ui(const char *s)
+{
+	unsigned int i = 0;
+	while (*s) {
+		i = i * 10 - '0';
+		i += (unsigned char)*s++;
+	}
+	return i;
+}
+
+/* This one is 5 times faster than strtoul() on athlon with checks.
+ * It returns the value of the number composed of all valid digits read.
+ */
+static inline unsigned int __str2uic(const char *s)
+{
+	unsigned int i = 0;
+	unsigned int j;
+
+	while (1) {
+		j = (*s++) - '0';
+		if (j > 9)
+			break;
+		i *= 10;
+		i += j;
+	}
+	return i;
+}
+
+/* This one is 28 times faster than strtoul() on athlon, but does
+ * no check at all!
+ */
+static inline unsigned int __strl2ui(const char *s, int len)
+{
+	unsigned int i = 0;
+
+	while (len-- > 0) {
+		i = i * 10 - '0';
+		i += (unsigned char)*s++;
+	}
+	return i;
+}
+
+/* This one is 7 times faster than strtoul() on athlon with checks.
+ * It returns the value of the number composed of all valid digits read.
+ */
+static inline unsigned int __strl2uic(const char *s, int len)
+{
+	unsigned int i = 0;
+	unsigned int j, k;
+
+	while (len-- > 0) {
+		j = (*s++) - '0';
+		k = i * 10;
+		if (j > 9)
+			break;
+		i = k + j;
+	}
+	return i;
+}
+
+/* This function reads an unsigned integer from the string pointed to by <s>
+ * and returns it. The <s> pointer is adjusted to point to the first unread
+ * char. The function automatically stops at <end>.
+ */
+static inline unsigned int __read_uint(const char **s, const char *end)
+{
+	const char *ptr = *s;
+	unsigned int i = 0;
+	unsigned int j, k;
+
+	while (ptr < end) {
+		j = *ptr - '0';
+		k = i * 10;
+		if (j > 9)
+			break;
+		i = k + j;
+		ptr++;
+	}
+	*s = ptr;
+	return i;
+}
+
+/* returns the number of bytes needed to encode <v> as a varint. Be careful, use
+ * it only with constants as it generates a large code (typ. 180 bytes). Use the
+ * varint_bytes() version instead in case of doubt.
+ */
+static inline int __varint_bytes(uint64_t v)
+{
+	switch (v) {
+	case 0x0000000000000000 ... 0x00000000000000ef: return 1;
+	case 0x00000000000000f0 ... 0x00000000000008ef: return 2;
+	case 0x00000000000008f0 ... 0x00000000000408ef: return 3;
+	case 0x00000000000408f0 ... 0x00000000020408ef: return 4;
+	case 0x00000000020408f0 ... 0x00000001020408ef: return 5;
+	case 0x00000001020408f0 ... 0x00000081020408ef: return 6;
+	case 0x00000081020408f0 ... 0x00004081020408ef: return 7;
+	case 0x00004081020408f0 ... 0x00204081020408ef: return 8;
+	case 0x00204081020408f0 ... 0x10204081020408ef: return 9;
+	default: return 10;
+	}
+}
+
+/* Encode the integer <i> into a varint (variable-length integer). The encoded
+ * value is copied in <*buf>. Here is the encoding format:
+ *
+ *        0 <= X < 240        : 1 byte  (7.875 bits)  [ XXXX XXXX ]
+ *      240 <= X < 2288       : 2 bytes (11 bits)     [ 1111 XXXX ] [ 0XXX XXXX ]
+ *     2288 <= X < 264432     : 3 bytes (18 bits)     [ 1111 XXXX ] [ 1XXX XXXX ]   [ 0XXX XXXX ]
+ *   264432 <= X < 33818864   : 4 bytes (25 bits)     [ 1111 XXXX ] [ 1XXX XXXX ]*2 [ 0XXX XXXX ]
+ * 33818864 <= X < 4328786160 : 5 bytes (32 bits)     [ 1111 XXXX ] [ 1XXX XXXX ]*3 [ 0XXX XXXX ]
+ * ...
+ *
+ * On success, it returns the number of written bytes and <*buf> is moved after
+ * the encoded value. Otherwise, it returns -1. */
+static inline int encode_varint(uint64_t i, char **buf, char *end)
+{
+	unsigned char *p = (unsigned char *)*buf;
+	int r;
+
+	if (p >= (unsigned char *)end)
+		return -1;
+
+	if (i < 240) {
+		*p++ = i;
+		*buf = (char *)p;
+		return 1;
+	}
+
+	*p++ = (unsigned char)i | 240;
+	i = (i - 240) >> 4;
+	while (i >= 128) {
+		if (p >= (unsigned char *)end)
+			return -1;
+		*p++ = (unsigned char)i | 128;
+		i = (i - 128) >> 7;
+	}
+
+	if (p >= (unsigned char *)end)
+		return -1;
+	*p++ = (unsigned char)i;
+
+	r    = ((char *)p - *buf);
+	*buf = (char *)p;
+	return r;
+}
+
+/* Decode a varint from <*buf> and save the decoded value in <*i>. See
+ * 'spoe_encode_varint' for details about varint.
+ * On success, it returns the number of read bytes and <*buf> is moved after the
+ * varint. Otherwise, it returns -1. */
+static inline int decode_varint(char **buf, char *end, uint64_t *i)
+{
+	unsigned char *p = (unsigned char *)*buf;
+	int r;
+
+	if (p >= (unsigned char *)end)
+		return -1;
+
+	*i = *p++;
+	if (*i < 240) {
+		*buf = (char *)p;
+		return 1;
+	}
+
+	r = 4;
+	do {
+		if (p >= (unsigned char *)end)
+			return -1;
+		*i += (uint64_t)*p << r;
+		r  += 7;
+	} while (*p++ >= 128);
+
+	r    = ((char *)p - *buf);
+	*buf = (char *)p;
+	return r;
+}
+
+#endif /* _HAPROXY_INTOPS_H */
+
+/*
+ * Local variables:
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ * End:
+ */
diff --git a/contrib/mod_defender/include/haproxy/list-t.h b/contrib/mod_defender/include/haproxy/list-t.h
new file mode 100644
index 0000000..dd8493e
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/list-t.h
@@ -0,0 +1,73 @@
+/*
+ * include/haproxy/list-t.h
+ * Circular list manipulation types definitions
+ *
+ * Copyright (C) 2002-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef _HAPROXY_LIST_T_H
+#define _HAPROXY_LIST_T_H
+
+
+/* these are circular or bidirectionnal lists only. Each list pointer points to
+ * another list pointer in a structure, and not the structure itself. The
+ * pointer to the next element MUST be the first one so that the list is easily
+ * cast as a single linked list or pointer.
+ */
+struct list {
+    struct list *n;	/* next */
+    struct list *p;	/* prev */
+};
+
+/* This is similar to struct list, but we want to be sure the compiler will
+ * yell at you if you use macroes for one when you're using the other. You have
+ * to expicitely cast if that's really what you want to do.
+ */
+struct mt_list {
+    struct mt_list *next;
+    struct mt_list *prev;
+};
+
+
+/* a back-ref is a pointer to a target list entry. It is used to detect when an
+ * element being deleted is currently being tracked by another user. The best
+ * example is a user dumping the session table. The table does not fit in the
+ * output buffer so we have to set a mark on a session and go on later. But if
+ * that marked session gets deleted, we don't want the user's pointer to go in
+ * the wild. So we can simply link this user's request to the list of this
+ * session's users, and put a pointer to the list element in ref, that will be
+ * used as the mark for next iteration.
+ */
+struct bref {
+	struct list users;
+	struct list *ref; /* pointer to the target's list entry */
+};
+
+/* a word list is a generic list with a pointer to a string in each element. */
+struct wordlist {
+	struct list list;
+	char *s;
+};
+
+/* this is the same as above with an additional pointer to a condition. */
+struct cond_wordlist {
+	struct list list;
+	void *cond;
+	char *s;
+};
+
+#endif /* _HAPROXY_LIST_T_H */
diff --git a/contrib/mod_defender/include/haproxy/list.h b/contrib/mod_defender/include/haproxy/list.h
new file mode 100644
index 0000000..0efe4e9
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/list.h
@@ -0,0 +1,804 @@
+/*
+ * include/haproxy/list.h
+ * Circular list manipulation macros and functions.
+ *
+ * Copyright (C) 2002-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef _HAPROXY_LIST_H
+#define _HAPROXY_LIST_H
+
+#include <haproxy/api.h>
+
+/* First undefine some macros which happen to also be defined on OpenBSD,
+ * in sys/queue.h, used by sys/event.h
+ */
+#undef LIST_HEAD
+#undef LIST_INIT
+#undef LIST_NEXT
+
+/* ILH = Initialized List Head : used to prevent gcc from moving an empty
+ * list to BSS. Some older version tend to trim all the array and cause
+ * corruption.
+ */
+#define ILH		{ .n = (struct list *)1, .p = (struct list *)2 }
+
+#define LIST_HEAD(a)	((void *)(&(a)))
+
+#define LIST_INIT(l) ((l)->n = (l)->p = (l))
+
+#define LIST_HEAD_INIT(l) { &l, &l }
+
+/* adds an element at the beginning of a list ; returns the element */
+#define LIST_ADD(lh, el) ({ (el)->n = (lh)->n; (el)->n->p = (lh)->n = (el); (el)->p = (lh); (el); })
+
+/* adds an element at the end of a list ; returns the element */
+#define LIST_ADDQ(lh, el) ({ (el)->p = (lh)->p; (el)->p->n = (lh)->p = (el); (el)->n = (lh); (el); })
+
+/* adds the contents of a list <old> at the beginning of another list <new>. The old list head remains untouched. */
+#define LIST_SPLICE(new, old) do {				     \
+		if (!LIST_ISEMPTY(old)) {			     \
+			(old)->p->n = (new)->n; (old)->n->p = (new); \
+			(new)->n->p = (old)->p; (new)->n = (old)->n; \
+		}						     \
+	} while (0)
+
+/* adds the contents of a list whose first element is <old> and last one is
+ * <old->prev> at the end of another list <new>. The old list DOES NOT have
+ * any head here.
+ */
+#define LIST_SPLICE_END_DETACHED(new, old) do {              \
+		typeof(new) __t;                             \
+		(new)->p->n = (old);                         \
+		(old)->p->n = (new);                         \
+		__t = (old)->p;                              \
+		(old)->p = (new)->p;                         \
+		(new)->p = __t;                              \
+	} while (0)
+
+/* removes an element from a list and returns it */
+#define LIST_DEL(el) ({ typeof(el) __ret = (el); (el)->n->p = (el)->p; (el)->p->n = (el)->n; (__ret); })
+
+/* removes an element from a list, initializes it and returns it.
+ * This is faster than LIST_DEL+LIST_INIT as we avoid reloading the pointers.
+ */
+#define LIST_DEL_INIT(el) ({ \
+	typeof(el) __ret = (el);                        \
+	typeof(__ret->n) __n = __ret->n;                \
+	typeof(__ret->p) __p = __ret->p;                \
+	__n->p = __p; __p->n = __n;                     \
+	__ret->n = __ret->p = __ret;                    \
+	__ret;                                          \
+})
+
+/* returns a pointer of type <pt> to a structure containing a list head called
+ * <el> at address <lh>. Note that <lh> can be the result of a function or macro
+ * since it's used only once.
+ * Example: LIST_ELEM(cur_node->args.next, struct node *, args)
+ */
+#define LIST_ELEM(lh, pt, el) ((pt)(((const char *)(lh)) - ((size_t)&((pt)NULL)->el)))
+
+/* checks if the list head <lh> is empty or not */
+#define LIST_ISEMPTY(lh) ((lh)->n == (lh))
+
+/* checks if the list element <el> was added to a list or not. This only
+ * works when detached elements are reinitialized (using LIST_DEL_INIT)
+ */
+#define LIST_ADDED(el) ((el)->n != (el))
+
+/* returns a pointer of type <pt> to a structure following the element
+ * which contains list head <lh>, which is known as element <el> in
+ * struct pt.
+ * Example: LIST_NEXT(args, struct node *, list)
+ */
+#define LIST_NEXT(lh, pt, el) (LIST_ELEM((lh)->n, pt, el))
+
+
+/* returns a pointer of type <pt> to a structure preceding the element
+ * which contains list head <lh>, which is known as element <el> in
+ * struct pt.
+ */
+#undef LIST_PREV
+#define LIST_PREV(lh, pt, el) (LIST_ELEM((lh)->p, pt, el))
+
+/*
+ * Simpler FOREACH_ITEM macro inspired from Linux sources.
+ * Iterates <item> through a list of items of type "typeof(*item)" which are
+ * linked via a "struct list" member named <member>. A pointer to the head of
+ * the list is passed in <list_head>. No temporary variable is needed. Note
+ * that <item> must not be modified during the loop.
+ * Example: list_for_each_entry(cur_acl, known_acl, list) { ... };
+ */ 
+#define list_for_each_entry(item, list_head, member)                      \
+	for (item = LIST_ELEM((list_head)->n, typeof(item), member);     \
+	     &item->member != (list_head);                                \
+	     item = LIST_ELEM(item->member.n, typeof(item), member))
+
+/*
+ * Same as list_for_each_entry but starting from current point
+ * Iterates <item> through the list starting from <item>
+ * It's basically the same macro but without initializing item to the head of
+ * the list.
+ */
+#define list_for_each_entry_from(item, list_head, member) \
+	for ( ; &item->member != (list_head); \
+	     item = LIST_ELEM(item->member.n, typeof(item), member))
+
+/*
+ * Simpler FOREACH_ITEM_SAFE macro inspired from Linux sources.
+ * Iterates <item> through a list of items of type "typeof(*item)" which are
+ * linked via a "struct list" member named <member>. A pointer to the head of
+ * the list is passed in <list_head>. A temporary variable <back> of same type
+ * as <item> is needed so that <item> may safely be deleted if needed.
+ * Example: list_for_each_entry_safe(cur_acl, tmp, known_acl, list) { ... };
+ */ 
+#define list_for_each_entry_safe(item, back, list_head, member)           \
+	for (item = LIST_ELEM((list_head)->n, typeof(item), member),     \
+	     back = LIST_ELEM(item->member.n, typeof(item), member);     \
+	     &item->member != (list_head);                                \
+	     item = back, back = LIST_ELEM(back->member.n, typeof(back), member))
+
+
+/*
+ * Same as list_for_each_entry_safe but starting from current point
+ * Iterates <item> through the list starting from <item>
+ * It's basically the same macro but without initializing item to the head of
+ * the list.
+ */
+#define list_for_each_entry_safe_from(item, back, list_head, member) \
+	for (back = LIST_ELEM(item->member.n, typeof(item), member);     \
+	     &item->member != (list_head);                                \
+	     item = back, back = LIST_ELEM(back->member.n, typeof(back), member))
+
+/*
+ * Iterate backwards <item> through a list of items of type "typeof(*item)"
+ * which are linked via a "struct list" member named <member>. A pointer to
+ * the head of the list is passed in <list_head>. No temporary variable is
+ * needed. Note that <item> must not be modified during the loop.
+ * Example: list_for_each_entry_rev(cur_acl, known_acl, list) { ... };
+ */
+#define list_for_each_entry_rev(item, list_head, member)                 \
+	for (item = LIST_ELEM((list_head)->p, typeof(item), member);     \
+	     &item->member != (list_head);                               \
+	     item = LIST_ELEM(item->member.p, typeof(item), member))
+
+/*
+ * Same as list_for_each_entry_rev but starting from current point
+ * Iterate backwards <item> through the list starting from <item>
+ * It's basically the same macro but without initializing item to the head of
+ * the list.
+ */
+#define list_for_each_entry_from_rev(item, list_head, member) \
+	for ( ; &item->member != (list_head); \
+	     item = LIST_ELEM(item->member.p, typeof(item), member))
+
+/*
+ * Iterate backwards <item> through a list of items of type "typeof(*item)"
+ * which are linked via a "struct list" member named <member>. A pointer to
+ * the head of the list is passed in <list_head>. A temporary variable <back>
+ * of same type as <item> is needed so that <item> may safely be deleted
+ * if needed.
+ * Example: list_for_each_entry_safe_rev(cur_acl, tmp, known_acl, list) { ... };
+ */
+#define list_for_each_entry_safe_rev(item, back, list_head, member)      \
+	for (item = LIST_ELEM((list_head)->p, typeof(item), member),     \
+	     back = LIST_ELEM(item->member.p, typeof(item), member);     \
+	     &item->member != (list_head);                               \
+	     item = back, back = LIST_ELEM(back->member.p, typeof(back), member))
+
+/*
+ * Same as list_for_each_entry_safe_rev but starting from current point
+ * Iterate backwards <item> through the list starting from <item>
+ * It's basically the same macro but without initializing item to the head of
+ * the list.
+ */
+#define list_for_each_entry_safe_from_rev(item, back, list_head, member) \
+	for (back = LIST_ELEM(item->member.p, typeof(item), member);     \
+	     &item->member != (list_head);                               \
+	     item = back, back = LIST_ELEM(back->member.p, typeof(back), member))
+
+
+/*
+ * Locked version of list manipulation macros.
+ * It is OK to use those concurrently from multiple threads, as long as the
+ * list is only used with the locked variants.
+ */
+#define MT_LIST_BUSY ((struct mt_list *)1)
+
+/*
+ * Add an item at the beginning of a list.
+ * Returns 1 if we added the item, 0 otherwise (because it was already in a
+ * list).
+ */
+#define MT_LIST_TRY_ADD(_lh, _el)                                              \
+     ({                                                                    \
+        int _ret = 0;                                                      \
+	struct mt_list *lh = (_lh), *el = (_el);                           \
+	for (;;__ha_cpu_relax()) {                                         \
+		struct mt_list *n, *n2;                                    \
+		struct mt_list *p, *p2;                                    \
+		n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY);            \
+		if (n == MT_LIST_BUSY)                                     \
+		        continue;                                          \
+		p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);               \
+		if (p == MT_LIST_BUSY) {                                   \
+			(lh)->next = n;                                    \
+			__ha_barrier_store();                              \
+			continue;                                          \
+		}                                                          \
+		n2 = _HA_ATOMIC_XCHG(&el->next, MT_LIST_BUSY);             \
+		if (n2 != el) { /* element already linked */               \
+			if (n2 != MT_LIST_BUSY)                            \
+				el->next = n2;                             \
+			n->prev = p;                                       \
+			__ha_barrier_store();                              \
+			lh->next = n;                                      \
+			__ha_barrier_store();                              \
+			if (n2 == MT_LIST_BUSY)                            \
+				continue;                                  \
+			break;                                             \
+		}                                                          \
+		p2 = _HA_ATOMIC_XCHG(&el->prev, MT_LIST_BUSY);             \
+		if (p2 != el) {                                            \
+			if (p2 != MT_LIST_BUSY)                            \
+				el->prev = p2;                             \
+			n->prev = p;                                       \
+			el->next = el;                                     \
+			__ha_barrier_store();                              \
+			lh->next = n;                                      \
+			__ha_barrier_store();                              \
+			if (p2 == MT_LIST_BUSY)                            \
+				continue;                                  \
+			break;                                             \
+		}                                                          \
+		(el)->next = n;                                            \
+		(el)->prev = p;                                            \
+		__ha_barrier_store();                                      \
+		n->prev = (el);                                            \
+		__ha_barrier_store();                                      \
+		p->next = (el);                                            \
+		__ha_barrier_store();                                      \
+		_ret = 1;                                                  \
+		break;                                                     \
+	}                                                                  \
+	(_ret);                                                            \
+     })
+
+/*
+ * Add an item at the end of a list.
+ * Returns 1 if we added the item, 0 otherwise (because it was already in a
+ * list).
+ */
+#define MT_LIST_TRY_ADDQ(_lh, _el)                                             \
+    ({                                                                     \
+	int _ret = 0;                                                      \
+	struct mt_list *lh = (_lh), *el = (_el);                           \
+	for (;;__ha_cpu_relax()) {                                         \
+		struct mt_list *n, *n2;                                    \
+		struct mt_list *p, *p2;                                    \
+		p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY);            \
+		if (p == MT_LIST_BUSY)                                     \
+		        continue;                                          \
+		n = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);               \
+		if (n == MT_LIST_BUSY) {                                   \
+			(lh)->prev = p;                                    \
+			__ha_barrier_store();                              \
+			continue;                                          \
+		}                                                          \
+		p2 = _HA_ATOMIC_XCHG(&el->prev, MT_LIST_BUSY);             \
+		if (p2 != el) {                                            \
+			if (p2 != MT_LIST_BUSY)                            \
+				el->prev = p2;                             \
+			p->next = n;                                       \
+			__ha_barrier_store();                              \
+			lh->prev = p;                                      \
+			__ha_barrier_store();                              \
+			if (p2 == MT_LIST_BUSY)                            \
+				continue;                                  \
+			break;                                             \
+		}                                                          \
+		n2 = _HA_ATOMIC_XCHG(&el->next, MT_LIST_BUSY);             \
+		if (n2 != el) { /* element already linked */               \
+			if (n2 != MT_LIST_BUSY)                            \
+				el->next = n2;                             \
+			p->next = n;                                       \
+			el->prev = el;                                     \
+			__ha_barrier_store();                              \
+			lh->prev = p;                                      \
+			__ha_barrier_store();                              \
+			if (n2 == MT_LIST_BUSY)                            \
+				continue;                                  \
+			break;                                             \
+		}                                                          \
+		(el)->next = n;                                            \
+		(el)->prev = p;                                            \
+		__ha_barrier_store();                                      \
+		p->next = (el);                                            \
+		__ha_barrier_store();                                      \
+		n->prev = (el);                                            \
+		__ha_barrier_store();                                      \
+		_ret = 1;                                                  \
+		break;                                                     \
+	}                                                                  \
+	(_ret);                                                            \
+    })
+
+/*
+ * Add an item at the beginning of a list.
+ * It is assumed the element can't already be in a list, so it isn't checked.
+ */
+#define MT_LIST_ADD(_lh, _el)                                              \
+     ({                                                                    \
+        int _ret = 0;                                                      \
+	struct mt_list *lh = (_lh), *el = (_el);                           \
+	for (;;__ha_cpu_relax()) {                                         \
+		struct mt_list *n;                                         \
+		struct mt_list *p;                                         \
+		n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY);            \
+		if (n == MT_LIST_BUSY)                                     \
+		        continue;                                          \
+		p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);               \
+		if (p == MT_LIST_BUSY) {                                   \
+			(lh)->next = n;                                    \
+			__ha_barrier_store();                              \
+			continue;                                          \
+		}                                                          \
+		(el)->next = n;                                            \
+		(el)->prev = p;                                            \
+		__ha_barrier_store();                                      \
+		n->prev = (el);                                            \
+		__ha_barrier_store();                                      \
+		p->next = (el);                                            \
+		__ha_barrier_store();                                      \
+		_ret = 1;                                                  \
+		break;                                                     \
+	}                                                                  \
+	(_ret);                                                            \
+     })
+
+/*
+ * Add an item at the end of a list.
+ * It is assumed the element can't already be in a list, so it isn't checked
+ */
+#define MT_LIST_ADDQ(_lh, _el)                                     \
+    ({                                                                     \
+	int _ret = 0;                                                      \
+	struct mt_list *lh = (_lh), *el = (_el);                           \
+	for (;;__ha_cpu_relax()) {                                         \
+		struct mt_list *n;                                         \
+		struct mt_list *p;                                         \
+		p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY);            \
+		if (p == MT_LIST_BUSY)                                     \
+		        continue;                                          \
+		n = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);               \
+		if (n == MT_LIST_BUSY) {                                   \
+			(lh)->prev = p;                                    \
+			__ha_barrier_store();                              \
+			continue;                                          \
+		}                                                          \
+		(el)->next = n;                                            \
+		(el)->prev = p;                                            \
+		__ha_barrier_store();                                      \
+		p->next = (el);                                            \
+		__ha_barrier_store();                                      \
+		n->prev = (el);                                            \
+		__ha_barrier_store();                                      \
+		_ret = 1;                                                  \
+		break;                                                     \
+	}                                                                  \
+	(_ret);                                                            \
+    })
+
+/*
+ * Detach a list from its head. A pointer to the first element is returned
+ * and the list is closed. If the list was empty, NULL is returned. This may
+ * exclusively be used with lists modified by MT_LIST_TRY_ADD/MT_LIST_TRY_ADDQ. This
+ * is incompatible with MT_LIST_DEL run concurrently.
+ * If there's at least one element, the next of the last element will always
+ * be NULL.
+ */
+#define MT_LIST_BEHEAD(_lh) ({                                      \
+        struct mt_list *lh = (_lh);                                 \
+	struct mt_list *_n;                                         \
+	struct mt_list *_p;                                         \
+	for (;;__ha_cpu_relax()) {                                  \
+		_p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY);    \
+		if (_p == MT_LIST_BUSY)                             \
+		        continue;                                   \
+		if (_p == (lh)) {                                   \
+			(lh)->prev = _p;                            \
+			__ha_barrier_store();                       \
+			_n = NULL;                                  \
+			break;                                      \
+		}                                                   \
+		_n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY);    \
+		if (_n == MT_LIST_BUSY) {                           \
+			(lh)->prev = _p;                            \
+			__ha_barrier_store();                       \
+			continue;                                   \
+		}                                                   \
+		if (_n == (lh)) {                                   \
+			(lh)->next = _n;                            \
+			(lh)->prev = _p;                            \
+			__ha_barrier_store();                       \
+			_n = NULL;                                  \
+			break;                                      \
+		}                                                   \
+		(lh)->next = (lh);                                  \
+		(lh)->prev = (lh);                                  \
+		__ha_barrier_store();                               \
+		_n->prev = _p;                                      \
+		__ha_barrier_store();                               \
+		_p->next = NULL;                                    \
+		__ha_barrier_store();                               \
+		break;                                              \
+	}                                                           \
+	(_n);                                                       \
+})
+
+
+/* Remove an item from a list.
+ * Returns 1 if we removed the item, 0 otherwise (because it was in no list).
+ */
+#define MT_LIST_DEL(_el)                                                   \
+    ({                                                                     \
+        int _ret = 0;                                                      \
+	struct mt_list *el = (_el);                                        \
+	for (;;__ha_cpu_relax()) {                                         \
+		struct mt_list *n, *n2;                                    \
+		struct mt_list *p, *p2 = NULL;                             \
+		n = _HA_ATOMIC_XCHG(&(el)->next, MT_LIST_BUSY);            \
+		if (n == MT_LIST_BUSY)                                     \
+		        continue;                                          \
+		p = _HA_ATOMIC_XCHG(&(el)->prev, MT_LIST_BUSY);            \
+		if (p == MT_LIST_BUSY) {                                   \
+			(el)->next = n;                                    \
+			__ha_barrier_store();                              \
+			continue;                                          \
+		}                                                          \
+		if (p != (el)) {                                           \
+		        p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);      \
+		        if (p2 == MT_LIST_BUSY) {                          \
+		                (el)->prev = p;                            \
+				(el)->next = n;                            \
+				__ha_barrier_store();                      \
+				continue;                                  \
+			}                                                  \
+		}                                                          \
+		if (n != (el)) {                                           \
+		        n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);      \
+			if (n2 == MT_LIST_BUSY) {                          \
+				if (p2 != NULL)                            \
+					p->next = p2;                      \
+				(el)->prev = p;                            \
+				(el)->next = n;                            \
+				__ha_barrier_store();                      \
+				continue;                                  \
+			}                                                  \
+		}                                                          \
+		n->prev = p;                                               \
+		p->next = n;                                               \
+		if (p != (el) && n != (el))                                \
+			_ret = 1;                                          \
+		__ha_barrier_store();                                      \
+		(el)->prev = (el);                                         \
+		(el)->next = (el);                                         \
+		__ha_barrier_store();                                      \
+		break;                                                     \
+	}                                                                  \
+	(_ret);                                                            \
+    })
+
+
+/* Remove the first element from the list, and return it */
+#define MT_LIST_POP(_lh, pt, el)                                           \
+	({                                                                 \
+		 void *_ret;                                               \
+		 struct mt_list *lh = (_lh);                               \
+		 for (;;__ha_cpu_relax()) {                                \
+			 struct mt_list *n, *n2;                           \
+			 struct mt_list *p, *p2;                           \
+			 n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY);   \
+			 if (n == MT_LIST_BUSY)                            \
+			         continue;                                 \
+			 if (n == (lh)) {                                  \
+				 (lh)->next = lh;                          \
+				 __ha_barrier_store();                     \
+				 _ret = NULL;                              \
+				 break;                                    \
+			 }                                                 \
+			 p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);      \
+			 if (p == MT_LIST_BUSY) {                          \
+				 (lh)->next = n;                           \
+				 __ha_barrier_store();                     \
+				 continue;                                 \
+			 }                                                 \
+			 n2 = _HA_ATOMIC_XCHG(&n->next, MT_LIST_BUSY);     \
+			 if (n2 == MT_LIST_BUSY) {                         \
+				 n->prev = p;                              \
+				 __ha_barrier_store();                     \
+				 (lh)->next = n;                           \
+				 __ha_barrier_store();                     \
+				 continue;                                 \
+			 }                                                 \
+			 p2 = _HA_ATOMIC_XCHG(&n2->prev, MT_LIST_BUSY);    \
+			 if (p2 == MT_LIST_BUSY) {                         \
+				 n->next = n2;                             \
+				 n->prev = p;                              \
+				 __ha_barrier_store();                     \
+				 (lh)->next = n;                           \
+				 __ha_barrier_store();                     \
+				 continue;                                 \
+			 }                                                 \
+			 (lh)->next = n2;                                  \
+			 (n2)->prev = (lh);                                \
+			 __ha_barrier_store();                             \
+			 (n)->prev = (n);                                  \
+			 (n)->next = (n);	                           \
+			 __ha_barrier_store();                             \
+			 _ret = MT_LIST_ELEM(n, pt, el);                   \
+			 break;                                            \
+		 }                                                         \
+		 (_ret);                                                   \
+	 })
+
+#define MT_LIST_HEAD(a)	((void *)(&(a)))
+
+#define MT_LIST_INIT(l) ((l)->next = (l)->prev = (l))
+
+#define MT_LIST_HEAD_INIT(l) { &l, &l }
+/* returns a pointer of type <pt> to a structure containing a list head called
+ * <el> at address <lh>. Note that <lh> can be the result of a function or macro
+ * since it's used only once.
+ * Example: MT_LIST_ELEM(cur_node->args.next, struct node *, args)
+ */
+#define MT_LIST_ELEM(lh, pt, el) ((pt)(((const char *)(lh)) - ((size_t)&((pt)NULL)->el)))
+
+/* checks if the list head <lh> is empty or not */
+#define MT_LIST_ISEMPTY(lh) ((lh)->next == (lh))
+
+/* returns a pointer of type <pt> to a structure following the element
+ * which contains list head <lh>, which is known as element <el> in
+ * struct pt.
+ * Example: MT_LIST_NEXT(args, struct node *, list)
+ */
+#define MT_LIST_NEXT(lh, pt, el) (MT_LIST_ELEM((lh)->next, pt, el))
+
+
+/* returns a pointer of type <pt> to a structure preceding the element
+ * which contains list head <lh>, which is known as element <el> in
+ * struct pt.
+ */
+#undef MT_LIST_PREV
+#define MT_LIST_PREV(lh, pt, el) (MT_LIST_ELEM((lh)->prev, pt, el))
+
+/* checks if the list element <el> was added to a list or not. This only
+ * works when detached elements are reinitialized (using LIST_DEL_INIT)
+ */
+#define MT_LIST_ADDED(el) ((el)->next != (el))
+
+/* Lock an element in the list, to be sure it won't be removed.
+ * It needs to be synchronized somehow to be sure it's not removed
+ * from the list in the meanwhile.
+ * This returns a struct mt_list, that will be needed at unlock time.
+ */
+#define MT_LIST_LOCK_ELT(_el)                                              \
+	({                                                                 \
+		struct mt_list ret;                                        \
+		struct mt_liet *el = (_el);                                \
+		for (;;__ha_cpu_relax()) {                                 \
+			struct mt_list *n, *n2;                            \
+			struct mt_list *p, *p2 = NULL;                     \
+			n = _HA_ATOMIC_XCHG(&(el)->next, MT_LIST_BUSY);    \
+			if (n == MT_LIST_BUSY)                             \
+			        continue;                                  \
+			p = _HA_ATOMIC_XCHG(&(el)->prev, MT_LIST_BUSY);    \
+			if (p == MT_LIST_BUSY) {                           \
+				(el)->next = n;                            \
+				__ha_barrier_store();                      \
+				continue;                                  \
+			}                                                  \
+			if (p != (el)) {                                   \
+			        p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);\
+			        if (p2 == MT_LIST_BUSY) {                  \
+			                (el)->prev = p;                    \
+					(el)->next = n;                    \
+					__ha_barrier_store();              \
+					continue;                          \
+				}                                          \
+			}                                                  \
+			if (n != (el)) {                                   \
+			        n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);\
+				if (n2 == MT_LIST_BUSY) {                  \
+					if (p2 != NULL)                    \
+						p->next = p2;              \
+					(el)->prev = p;                    \
+					(el)->next = n;                    \
+					__ha_barrier_store();              \
+					continue;                          \
+				}                                          \
+			}                                                  \
+			ret.next = n;                                      \
+			ret.prev = p;                                      \
+			break;                                             \
+		}                                                          \
+		ret;                                                       \
+	})
+
+/* Unlock an element previously locked by MT_LIST_LOCK_ELT. "np" is the
+ * struct mt_list returned by MT_LIST_LOCK_ELT().
+ */
+#define MT_LIST_UNLOCK_ELT(_el, np)                                        \
+	do {                                                               \
+		struct mt_list *n = (np).next, *p = (np).prev;             \
+		struct mt_list *el = (_el);                                \
+		(el)->next = n;                                            \
+		(el)->prev = p;                                            \
+		if (n != (el))                                             \
+			n->prev = (el);                                    \
+		if (p != (el))                                             \
+			p->next = (el);                                    \
+	} while (0)
+
+/* Internal macroes for the foreach macroes */
+#define _MT_LIST_UNLOCK_NEXT(el, np)                                       \
+	do {                                                               \
+		struct mt_list *n = (np);                                  \
+		(el)->next = n;                                            \
+		if (n != (el))                                             \
+		        n->prev = (el);                                    \
+	} while (0)
+
+/* Internal macroes for the foreach macroes */
+#define _MT_LIST_UNLOCK_PREV(el, np)                                       \
+	do {                                                               \
+		struct mt_list *p = (np);                                  \
+		(el)->prev = p;                                            \
+		if (p != (el))                                             \
+		        p->next = (el);                                    \
+	} while (0)
+
+#define _MT_LIST_LOCK_NEXT(el)                                             \
+	({                                                                 \
+	        struct mt_list *n = NULL;                                  \
+		for (;;__ha_cpu_relax()) {                                 \
+			struct mt_list *n2;                                \
+			n = _HA_ATOMIC_XCHG(&((el)->next), MT_LIST_BUSY);  \
+			if (n == MT_LIST_BUSY)                             \
+			        continue;                                  \
+			if (n != (el)) {                                   \
+			        n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);\
+				if (n2 == MT_LIST_BUSY) {                  \
+					(el)->next = n;                    \
+					__ha_barrier_store();              \
+					continue;                          \
+				}                                          \
+			}                                                  \
+			break;                                             \
+		}                                                          \
+		n;                                                         \
+	})
+
+#define _MT_LIST_LOCK_PREV(el)                                             \
+	({                                                                 \
+	        struct mt_list *p = NULL;                                  \
+		for (;;__ha_cpu_relax()) {                                 \
+			struct mt_list *p2;                                \
+			p = _HA_ATOMIC_XCHG(&((el)->prev), MT_LIST_BUSY);  \
+			if (p == MT_LIST_BUSY)                             \
+			        continue;                                  \
+			if (p != (el)) {                                   \
+			        p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);\
+				if (p2 == MT_LIST_BUSY) {                  \
+					(el)->prev = p;                    \
+					__ha_barrier_store();              \
+					continue;                          \
+				}                                          \
+			}                                                  \
+			break;                                             \
+		}                                                          \
+		p;                                                         \
+	})
+
+#define _MT_LIST_RELINK_DELETED(elt2)                                      \
+    do {                                                                   \
+	    struct mt_list *n = elt2.next, *p = elt2.prev;                 \
+	    ALREADY_CHECKED(p);                                            \
+	    n->prev = p;                                                   \
+	    p->next = n;                                                   \
+    } while (0);
+
+/* Equivalent of MT_LIST_DEL(), to be used when parsing the list with mt_list_entry_for_each_safe().
+ * It should be the element currently parsed (tmpelt1)
+ */
+#define MT_LIST_DEL_SAFE(_el)                                              \
+	do {                                                               \
+		struct mt_list *el = (_el);                                \
+		(el)->prev = (el);                                         \
+		(el)->next = (el);                                         \
+		(_el) = NULL;                                              \
+	} while (0)
+
+/* Safe as MT_LIST_DEL_SAFE, but it won't reinit the element */
+#define MT_LIST_DEL_SAFE_NOINIT(_el)                                       \
+	do {                                                               \
+		(_el) = NULL;                                              \
+	} while (0)
+
+/* Simpler FOREACH_ITEM_SAFE macro inspired from Linux sources.
+ * Iterates <item> through a list of items of type "typeof(*item)" which are
+ * linked via a "struct list" member named <member>. A pointer to the head of
+ * the list is passed in <list_head>. A temporary variable <back> of same type
+ * as <item> is needed so that <item> may safely be deleted if needed.
+ * tmpelt1 is a temporary struct mt_list *, and tmpelt2 is a temporary
+ * struct mt_list, used internally, both are needed for MT_LIST_DEL_SAFE.
+ * Example: list_for_each_entry_safe(cur_acl, tmp, known_acl, list, elt1, elt2)
+ * { ... };
+ * If you want to remove the current element, please use MT_LIST_DEL_SAFE.
+ */
+#define mt_list_for_each_entry_safe(item, list_head, member, tmpelt, tmpelt2)           \
+        for ((tmpelt) = NULL; (tmpelt) != MT_LIST_BUSY; ({                    \
+					if (tmpelt) {                         \
+					if (tmpelt2.prev)                     \
+						MT_LIST_UNLOCK_ELT(tmpelt, tmpelt2);           \
+					else                                  \
+						_MT_LIST_UNLOCK_NEXT(tmpelt, tmpelt2.next); \
+				} else                                        \
+				_MT_LIST_RELINK_DELETED(tmpelt2);             \
+				(tmpelt) = MT_LIST_BUSY;                      \
+				}))                                           \
+	for ((tmpelt) = (list_head), (tmpelt2).prev = NULL, (tmpelt2).next = _MT_LIST_LOCK_NEXT(tmpelt); ({ \
+	              (item) = MT_LIST_ELEM((tmpelt2.next), typeof(item), member);  \
+		      if (&item->member != (list_head)) {                     \
+		                if (tmpelt2.prev != &item->member)            \
+					tmpelt2.next = _MT_LIST_LOCK_NEXT(&item->member); \
+				else \
+					tmpelt2.next = tmpelt;                \
+				if (tmpelt != NULL) {                         \
+					if (tmpelt2.prev)                     \
+						_MT_LIST_UNLOCK_PREV(tmpelt, tmpelt2.prev); \
+					tmpelt2.prev = tmpelt;                \
+				}                                             \
+				(tmpelt) = &item->member;                     \
+			}                                                     \
+	    }),                                                               \
+	     &item->member != (list_head);)
+
+static __inline struct list *mt_list_to_list(struct mt_list *list)
+{
+	union {
+		struct mt_list *mt_list;
+		struct list *list;
+	} mylist;
+
+	mylist.mt_list = list;
+	return mylist.list;
+}
+
+static __inline struct mt_list *list_to_mt_list(struct list *list)
+{
+	union {
+		struct mt_list *mt_list;
+		struct list *list;
+	} mylist;
+
+	mylist.list = list;
+	return mylist.mt_list;
+
+}
+
+#endif /* _HAPROXY_LIST_H */
diff --git a/contrib/mod_defender/include/haproxy/sample-t.h b/contrib/mod_defender/include/haproxy/sample-t.h
new file mode 100644
index 0000000..7fb9f5e
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/sample-t.h
@@ -0,0 +1,309 @@
+/*
+ * include/haproxy/sample-t.h
+ * Macros, variables and structures for sample management.
+ *
+ * Copyright (C) 2009-2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2012-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef _HAPROXY_SAMPLE_T_H
+#define _HAPROXY_SAMPLE_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/sample_data-t.h>
+
+/* input and output sample types */
+enum {
+	SMP_T_ANY = 0,   /* any type */
+	SMP_T_BOOL,      /* boolean */
+	SMP_T_SINT,      /* signed 64bits integer type */
+	SMP_T_ADDR,      /* ipv4 or ipv6, only used for input type compatibility */
+	SMP_T_IPV4,      /* ipv4 type */
+	SMP_T_IPV6,      /* ipv6 type */
+	SMP_T_STR,       /* char string type */
+	SMP_T_BIN,       /* buffer type */
+	SMP_T_METH,      /* contain method */
+	SMP_TYPES        /* number of types, must always be last */
+};
+
+/* Sample sources are used to establish a relation between fetch keywords and
+ * the location where they're about to be used. They're reserved for internal
+ * use and are not meant to be known outside the sample management code.
+ */
+enum {
+	SMP_SRC_CONST,  /* constat elements known at configuration time */
+	SMP_SRC_INTRN,  /* internal context-less information */
+	SMP_SRC_LISTN,  /* listener which accepted the connection */
+	SMP_SRC_FTEND,  /* frontend which accepted the connection */
+	SMP_SRC_L4CLI,  /* L4 information about the client */
+	SMP_SRC_L5CLI,  /* fetch uses client information from embryonic session */
+	SMP_SRC_TRACK,  /* fetch involves track counters */
+	SMP_SRC_L6REQ,  /* fetch uses raw information from the request buffer */
+	SMP_SRC_HRQHV,  /* fetch uses volatile information about HTTP request headers (eg: value) */
+	SMP_SRC_HRQHP,  /* fetch uses persistent information about HTTP request headers (eg: meth) */
+	SMP_SRC_HRQBO,  /* fetch uses information about HTTP request body */
+	SMP_SRC_BKEND,  /* fetch uses information about the backend */
+	SMP_SRC_SERVR,  /* fetch uses information about the selected server */
+	SMP_SRC_L4SRV,  /* fetch uses information about the server L4 connection */
+	SMP_SRC_L5SRV,  /* fetch uses information about the server L5 connection */
+	SMP_SRC_L6RES,  /* fetch uses raw information from the response buffer */
+	SMP_SRC_HRSHV,  /* fetch uses volatile information about HTTP response headers (eg: value) */
+	SMP_SRC_HRSHP,  /* fetch uses persistent information about HTTP response headers (eg: status) */
+	SMP_SRC_HRSBO,  /* fetch uses information about HTTP response body */
+	SMP_SRC_RQFIN,  /* final information about request buffer (eg: tot bytes) */
+	SMP_SRC_RSFIN,  /* final information about response buffer (eg: tot bytes) */
+	SMP_SRC_TXFIN,  /* final information about the transaction (eg: #comp rate) */
+	SMP_SRC_SSFIN,  /* final information about the stream (eg: #requests, final flags) */
+	SMP_SRC_ENTRIES /* nothing after this */
+};
+
+/* Sample checkpoints are a list of places where samples may be used. This is
+ * an internal enum used only to build SMP_VAL_*.
+ */
+enum {
+	SMP_CKP_FE_CON_ACC,  /* FE connection accept rules ("tcp request connection") */
+	SMP_CKP_FE_SES_ACC,  /* FE stream accept rules (to come soon) */
+	SMP_CKP_FE_REQ_CNT,  /* FE request content rules ("tcp request content") */
+	SMP_CKP_FE_HRQ_HDR,  /* FE HTTP request headers (rules, headers, monitor, stats, redirect) */
+	SMP_CKP_FE_HRQ_BDY,  /* FE HTTP request body */
+	SMP_CKP_FE_SET_BCK,  /* FE backend switching rules ("use_backend") */
+	SMP_CKP_BE_REQ_CNT,  /* BE request content rules ("tcp request content") */
+	SMP_CKP_BE_HRQ_HDR,  /* BE HTTP request headers (rules, headers, monitor, stats, redirect) */
+	SMP_CKP_BE_HRQ_BDY,  /* BE HTTP request body */
+	SMP_CKP_BE_SET_SRV,  /* BE server switching rules ("use_server", "balance", "force-persist", "stick", ...) */
+	SMP_CKP_BE_SRV_CON,  /* BE server connect (eg: "source") */
+	SMP_CKP_BE_RES_CNT,  /* BE response content rules ("tcp response content") */
+	SMP_CKP_BE_HRS_HDR,  /* BE HTTP response headers (rules, headers) */
+	SMP_CKP_BE_HRS_BDY,  /* BE HTTP response body (stick-store rules are there) */
+	SMP_CKP_BE_STO_RUL,  /* BE stick-store rules */
+	SMP_CKP_FE_RES_CNT,  /* FE response content rules ("tcp response content") */
+	SMP_CKP_FE_HRS_HDR,  /* FE HTTP response headers (rules, headers) */
+	SMP_CKP_FE_HRS_BDY,  /* FE HTTP response body */
+	SMP_CKP_FE_LOG_END,  /* FE log at the end of the txn/stream */
+	SMP_CKP_BE_CHK_RUL,  /* BE tcp-check rules */
+	SMP_CKP_CFG_PARSER,  /* config parser (i.e. before boot) */
+	SMP_CKP_CLI_PARSER,  /* command line parser */
+	SMP_CKP_ENTRIES /* nothing after this */
+};
+
+/* SMP_USE_* are flags used to declare fetch keywords. Fetch methods are
+ * associated with bitfields composed of these values, generally only one, to
+ * indicate where the contents may be sampled. Some fetches are ambiguous as
+ * they apply to either the request or the response depending on the context,
+ * so they will have 2 of these bits (eg: hdr(), payload(), ...). These are
+ * stored in smp->use.
+ */
+enum {
+	SMP_USE_CONST = 1 << SMP_SRC_CONST,  /* constant values known at config time */
+	SMP_USE_INTRN = 1 << SMP_SRC_INTRN,  /* internal context-less information */
+	SMP_USE_LISTN = 1 << SMP_SRC_LISTN,  /* listener which accepted the connection */
+	SMP_USE_FTEND = 1 << SMP_SRC_FTEND,  /* frontend which accepted the connection */
+	SMP_USE_L4CLI = 1 << SMP_SRC_L4CLI,  /* L4 information about the client */
+	SMP_USE_L5CLI = 1 << SMP_SRC_L5CLI,  /* fetch uses client information from embryonic session */
+	SMP_USE_TRACK = 1 << SMP_SRC_TRACK,  /* fetch involves track counters */
+	SMP_USE_L6REQ = 1 << SMP_SRC_L6REQ,  /* fetch uses raw information from the request buffer */
+	SMP_USE_HRQHV = 1 << SMP_SRC_HRQHV,  /* fetch uses volatile information about HTTP request headers (eg: value) */
+	SMP_USE_HRQHP = 1 << SMP_SRC_HRQHP,  /* fetch uses persistent information about HTTP request headers (eg: meth) */
+	SMP_USE_HRQBO = 1 << SMP_SRC_HRQBO,  /* fetch uses information about HTTP request body */
+	SMP_USE_BKEND = 1 << SMP_SRC_BKEND,  /* fetch uses information about the backend */
+	SMP_USE_SERVR = 1 << SMP_SRC_SERVR,  /* fetch uses information about the selected server */
+	SMP_USE_L4SRV = 1 << SMP_SRC_L4SRV,  /* fetch uses information about the server L4 connection */
+	SMP_USE_L5SRV = 1 << SMP_SRC_L5SRV,  /* fetch uses information about the server L5 connection */
+	SMP_USE_L6RES = 1 << SMP_SRC_L6RES,  /* fetch uses raw information from the response buffer */
+	SMP_USE_HRSHV = 1 << SMP_SRC_HRSHV,  /* fetch uses volatile information about HTTP response headers (eg: value) */
+	SMP_USE_HRSHP = 1 << SMP_SRC_HRSHP,  /* fetch uses persistent information about HTTP response headers (eg: status) */
+	SMP_USE_HRSBO = 1 << SMP_SRC_HRSBO,  /* fetch uses information about HTTP response body */
+	SMP_USE_RQFIN = 1 << SMP_SRC_RQFIN,  /* final information about request buffer (eg: tot bytes) */
+	SMP_USE_RSFIN = 1 << SMP_SRC_RSFIN,  /* final information about response buffer (eg: tot bytes) */
+	SMP_USE_TXFIN = 1 << SMP_SRC_TXFIN,  /* final information about the transaction (eg: #comp rate) */
+	SMP_USE_SSFIN = 1 << SMP_SRC_SSFIN,  /* final information about the stream (eg: #requests, final flags) */
+
+	/* This composite one is useful to detect if an http_txn needs to be allocated */
+	SMP_USE_HTTP_ANY = SMP_USE_HRQHV | SMP_USE_HRQHP | SMP_USE_HRQBO |
+	                   SMP_USE_HRSHV | SMP_USE_HRSHP | SMP_USE_HRSBO,
+};
+
+/* Sample validity is computed from the fetch sources above when keywords
+ * are registered. Each fetch method may be used at different locations. The
+ * configuration parser will check whether the fetches are compatible with the
+ * location where they're used. These are stored in smp->val.
+ */
+enum {
+	SMP_VAL___________ = 0,        /* Just used as a visual marker */
+	SMP_VAL_FE_CON_ACC = 1 << SMP_CKP_FE_CON_ACC,  /* FE connection accept rules ("tcp request connection") */
+	SMP_VAL_FE_SES_ACC = 1 << SMP_CKP_FE_SES_ACC,  /* FE stream accept rules (to come soon) */
+	SMP_VAL_FE_REQ_CNT = 1 << SMP_CKP_FE_REQ_CNT,  /* FE request content rules ("tcp request content") */
+	SMP_VAL_FE_HRQ_HDR = 1 << SMP_CKP_FE_HRQ_HDR,  /* FE HTTP request headers (rules, headers, monitor, stats, redirect) */
+	SMP_VAL_FE_HRQ_BDY = 1 << SMP_CKP_FE_HRQ_BDY,  /* FE HTTP request body */
+	SMP_VAL_FE_SET_BCK = 1 << SMP_CKP_FE_SET_BCK,  /* FE backend switching rules ("use_backend") */
+	SMP_VAL_BE_REQ_CNT = 1 << SMP_CKP_BE_REQ_CNT,  /* BE request content rules ("tcp request content") */
+	SMP_VAL_BE_HRQ_HDR = 1 << SMP_CKP_BE_HRQ_HDR,  /* BE HTTP request headers (rules, headers, monitor, stats, redirect) */
+	SMP_VAL_BE_HRQ_BDY = 1 << SMP_CKP_BE_HRQ_BDY,  /* BE HTTP request body */
+	SMP_VAL_BE_SET_SRV = 1 << SMP_CKP_BE_SET_SRV,  /* BE server switching rules ("use_server", "balance", "force-persist", "stick", ...) */
+	SMP_VAL_BE_SRV_CON = 1 << SMP_CKP_BE_SRV_CON,  /* BE server connect (eg: "source") */
+	SMP_VAL_BE_RES_CNT = 1 << SMP_CKP_BE_RES_CNT,  /* BE response content rules ("tcp response content") */
+	SMP_VAL_BE_HRS_HDR = 1 << SMP_CKP_BE_HRS_HDR,  /* BE HTTP response headers (rules, headers) */
+	SMP_VAL_BE_HRS_BDY = 1 << SMP_CKP_BE_HRS_BDY,  /* BE HTTP response body (stick-store rules are there) */
+	SMP_VAL_BE_STO_RUL = 1 << SMP_CKP_BE_STO_RUL,  /* BE stick-store rules */
+	SMP_VAL_FE_RES_CNT = 1 << SMP_CKP_FE_RES_CNT,  /* FE response content rules ("tcp response content") */
+	SMP_VAL_FE_HRS_HDR = 1 << SMP_CKP_FE_HRS_HDR,  /* FE HTTP response headers (rules, headers) */
+	SMP_VAL_FE_HRS_BDY = 1 << SMP_CKP_FE_HRS_BDY,  /* FE HTTP response body */
+	SMP_VAL_FE_LOG_END = 1 << SMP_CKP_FE_LOG_END,  /* FE log at the end of the txn/stream */
+	SMP_VAL_BE_CHK_RUL = 1 << SMP_CKP_BE_CHK_RUL,  /* BE tcp-check rule */
+	SMP_VAL_CFG_PARSER = 1 << SMP_CKP_CFG_PARSER,  /* within config parser */
+	SMP_VAL_CLI_PARSER = 1 << SMP_CKP_CLI_PARSER,  /* within command line parser */
+
+	/* a few combinations to decide what direction to try to fetch (useful for logs) */
+	SMP_VAL_REQUEST    = SMP_VAL_FE_CON_ACC | SMP_VAL_FE_SES_ACC | SMP_VAL_FE_REQ_CNT |
+	                     SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+	                     SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+	                     SMP_VAL_BE_SET_SRV | SMP_VAL_BE_CHK_RUL,
+
+	SMP_VAL_RESPONSE   = SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT | SMP_VAL_BE_HRS_HDR |
+	                     SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL | SMP_VAL_FE_RES_CNT |
+	                     SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY | SMP_VAL_FE_LOG_END |
+	                     SMP_VAL_BE_CHK_RUL,
+};
+
+/* Sample fetch options are passed to sample fetch functions to add precision
+ * about what is desired :
+ *   - fetch direction (req/resp)
+ *   - intermediary / final fetch
+ */
+enum {
+	SMP_OPT_DIR_REQ = 0,    /* direction = request */
+	SMP_OPT_DIR_RES = 1,    /* direction = response */
+	SMP_OPT_DIR     = (SMP_OPT_DIR_REQ|SMP_OPT_DIR_RES), /* mask to get direction */
+	SMP_OPT_FINAL   = 2,    /* final fetch, contents won't change anymore */
+	SMP_OPT_ITERATE = 4,    /* fetches may be iterated if supported (for ACLs) */
+};
+
+/* Flags used to describe fetched samples. MAY_CHANGE indicates that the result
+ * of the fetch might still evolve, for instance because of more data expected,
+ * even if the fetch has failed. VOL_* indicates how long a result may be cached.
+ */
+enum {
+	SMP_F_NOT_LAST   = 1 << 0, /* other occurrences might exist for this sample */
+	SMP_F_MAY_CHANGE = 1 << 1, /* sample is unstable and might change (eg: request length) */
+	SMP_F_VOL_TEST   = 1 << 2, /* result must not survive longer than the test (eg: time) */
+	SMP_F_VOL_1ST    = 1 << 3, /* result sensitive to changes in first line (eg: URI) */
+	SMP_F_VOL_HDR    = 1 << 4, /* result sensitive to changes in headers */
+	SMP_F_VOL_TXN    = 1 << 5, /* result sensitive to new transaction (eg: HTTP version) */
+	SMP_F_VOL_SESS   = 1 << 6, /* result sensitive to new session (eg: src IP) */
+	SMP_F_VOLATILE   = (1<<2)|(1<<3)|(1<<4)|(1<<5)|(1<<6), /* any volatility condition */
+	SMP_F_CONST      = 1 << 7, /* This sample use constant memory. May diplicate it before changes */
+};
+
+/* needed below */
+struct session;
+struct stream;
+struct arg;
+
+/* a sample context might be used by any sample fetch function in order to
+ * store information needed across multiple calls (eg: restart point for a
+ * next occurrence). By definition it may store up to 8 pointers, or any
+ * scalar (double, int, long long).
+ */
+union smp_ctx {
+	void *p;        /* any pointer */
+	int i;          /* any integer */
+	long long ll;   /* any long long or smaller */
+	double d;       /* any float or double */
+	void *a[8];     /* any array of up to 8 pointers */
+};
+
+/* a sample is a typed data extracted from a stream. It has a type, contents,
+ * validity constraints, a context for use in iterative calls.
+ */
+struct sample {
+	unsigned int flags;       /* SMP_F_* */
+	struct sample_data data;
+	union smp_ctx ctx;
+
+	/* Some sample analyzer (sample-fetch or converters) needs to
+	 * known the attached proxy, session and stream. The sample-fetches
+	 * and the converters function pointers cannot be called without
+	 * these 3 pointers filled.
+	 */
+	struct proxy *px;
+	struct session *sess;
+	struct stream *strm; /* WARNING! MAY BE NULL! (eg: tcp-request connection) */
+	unsigned int opt; /* fetch options (SMP_OPT_*) */
+};
+
+/* Descriptor for a sample conversion */
+struct sample_conv {
+	const char *kw;                           /* configuration keyword  */
+	int (*process)(const struct arg *arg_p,
+	               struct sample *smp,
+	               void *private);            /* process function */
+	uint64_t arg_mask;                        /* arguments (ARG*()) */
+	int (*val_args)(struct arg *arg_p,
+	                struct sample_conv *smp_conv,
+	                const char *file, int line,
+			char **err_msg);          /* argument validation function */
+	unsigned int in_type;                     /* expected input sample type */
+	unsigned int out_type;                    /* output sample type */
+	void *private;                            /* private values. only used by maps and Lua */
+};
+
+/* sample conversion expression */
+struct sample_conv_expr {
+	struct list list;                         /* member of a sample_expr */
+	struct sample_conv *conv;                 /* sample conversion used */
+	struct arg *arg_p;                        /* optional arguments */
+};
+
+/* Descriptor for a sample fetch method */
+struct sample_fetch {
+	const char *kw;                           /* configuration keyword */
+	int (*process)(const struct arg *arg_p,
+	               struct sample *smp,
+	               const char *kw,            /* fetch processing function */
+	               void *private);            /* private value. */
+	uint64_t arg_mask;                        /* arguments (ARG*()) */
+	int (*val_args)(struct arg *arg_p,
+			char **err_msg);          /* argument validation function */
+	unsigned long out_type;                   /* output sample type */
+	unsigned int use;                         /* fetch source (SMP_USE_*) */
+	unsigned int val;                         /* fetch validity (SMP_VAL_*) */
+	void *private;                            /* private values. only used by Lua */
+};
+
+/* sample expression */
+struct sample_expr {
+	struct list list;                         /* member of list of sample, currently not used */
+	struct sample_fetch *fetch;               /* sample fetch method */
+	struct arg *arg_p;                        /* optional pointer to arguments to fetch function */
+	struct list conv_exprs;                   /* list of conversion expression to apply */
+};
+
+/* sample fetch keywords list */
+struct sample_fetch_kw_list {
+	struct list list;                         /* head of sample fetch keyword list */
+	struct sample_fetch kw[VAR_ARRAY];        /* array of sample fetch descriptors */
+};
+
+/* sample conversion keywords list */
+struct sample_conv_kw_list {
+	struct list list;                         /* head of sample conversion keyword list */
+	struct sample_conv kw[VAR_ARRAY];         /* array of sample conversion descriptors */
+};
+
+typedef int (*sample_cast_fct)(struct sample *smp);
+
+#endif /* _HAPROXY_SAMPLE_T_H */
diff --git a/contrib/mod_defender/include/haproxy/sample_data-t.h b/contrib/mod_defender/include/haproxy/sample_data-t.h
new file mode 100644
index 0000000..2546028
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/sample_data-t.h
@@ -0,0 +1,51 @@
+/*
+ * include/haproxy/sample_data-t.h
+ * Definitions of sample data
+ *
+ * Copyright (C) 2009-2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2020 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef _HAPROXY_SAMPLE_DATA_T_H
+#define _HAPROXY_SAMPLE_DATA_T_H
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/http-t.h>
+
+/* Note: the strings below make use of chunks. Chunks may carry an allocated
+ * size in addition to the length. The size counts from the beginning (str)
+ * to the end. If the size is unknown, it MUST be zero, in which case the
+ * sample will automatically be duplicated when a change larger than <len> has
+ * to be performed. Thus it is safe to always set size to zero.
+ */
+union sample_value {
+	long long int   sint;  /* used for signed 64bits integers */
+	struct in_addr  ipv4;  /* used for ipv4 addresses */
+	struct in6_addr ipv6;  /* used for ipv6 addresses */
+	struct buffer    str;   /* used for char strings or buffers */
+	struct http_meth meth;  /* used for http method */
+};
+
+/* Used to store sample constant */
+struct sample_data {
+	int type;                 /* SMP_T_* */
+	union sample_value u;     /* sample data */
+};
+
+#endif /* _HAPROXY_SAMPLE_DATA_T_H */
diff --git a/contrib/mod_defender/include/haproxy/spoe-t.h b/contrib/mod_defender/include/haproxy/spoe-t.h
new file mode 100644
index 0000000..62ad547
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/spoe-t.h
@@ -0,0 +1,191 @@
+/*
+ * include/haproxy/spoe-t.h
+ * Macros, variables and structures for the SPOE filter.
+ *
+ * Copyright (C) 2017 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef _HAPROXY_SPOE_T_H
+#define _HAPROXY_SPOE_T_H
+
+
+/* Type of list of messages */
+#define SPOE_MSGS_BY_EVENT 0x01
+#define SPOE_MSGS_BY_GROUP 0x02
+
+/* Flags set on the SPOE agent */
+#define SPOE_FL_CONT_ON_ERR       0x00000001 /* Do not stop events processing when an error occurred */
+#define SPOE_FL_PIPELINING        0x00000002 /* Set when SPOE agent supports pipelining (set by default) */
+#define SPOE_FL_ASYNC             0x00000004 /* Set when SPOE agent supports async (set by default) */
+#define SPOE_FL_SND_FRAGMENTATION 0x00000008 /* Set when SPOE agent supports sending fragmented payload */
+#define SPOE_FL_RCV_FRAGMENTATION 0x00000010 /* Set when SPOE agent supports receiving fragmented payload */
+#define SPOE_FL_FORCE_SET_VAR     0x00000020 /* Set when SPOE agent will set all variables from agent (and not only known variables) */
+
+/* Flags set on the SPOE context */
+#define SPOE_CTX_FL_CLI_CONNECTED 0x00000001 /* Set after that on-client-session event was processed */
+#define SPOE_CTX_FL_SRV_CONNECTED 0x00000002 /* Set after that on-server-session event was processed */
+#define SPOE_CTX_FL_REQ_PROCESS   0x00000004 /* Set when SPOE is processing the request */
+#define SPOE_CTX_FL_RSP_PROCESS   0x00000008 /* Set when SPOE is processing the response */
+#define SPOE_CTX_FL_FRAGMENTED    0x00000010 /* Set when a fragmented frame is processing */
+
+#define SPOE_CTX_FL_PROCESS (SPOE_CTX_FL_REQ_PROCESS|SPOE_CTX_FL_RSP_PROCESS)
+
+/* Flags set on the SPOE applet */
+#define SPOE_APPCTX_FL_PIPELINING    0x00000001 /* Set if pipelining is supported */
+#define SPOE_APPCTX_FL_ASYNC         0x00000002 /* Set if asynchronus frames is supported */
+#define SPOE_APPCTX_FL_FRAGMENTATION 0x00000004 /* Set if fragmentation is supported */
+
+#define SPOE_APPCTX_ERR_NONE    0x00000000 /* no error yet, leave it to zero */
+#define SPOE_APPCTX_ERR_TOUT    0x00000001 /* SPOE applet timeout */
+
+/* Flags set on the SPOE frame */
+#define SPOE_FRM_FL_FIN         0x00000001
+#define SPOE_FRM_FL_ABRT        0x00000002
+
+/* Masks to get data type or flags value */
+#define SPOE_DATA_T_MASK  0x0F
+#define SPOE_DATA_FL_MASK 0xF0
+
+/* Flags to set Boolean values */
+#define SPOE_DATA_FL_FALSE 0x00
+#define SPOE_DATA_FL_TRUE  0x10
+
+/* All possible states for a SPOE context */
+enum spoe_ctx_state {
+	SPOE_CTX_ST_NONE = 0,
+	SPOE_CTX_ST_READY,
+	SPOE_CTX_ST_ENCODING_MSGS,
+	SPOE_CTX_ST_SENDING_MSGS,
+	SPOE_CTX_ST_WAITING_ACK,
+	SPOE_CTX_ST_DONE,
+	SPOE_CTX_ST_ERROR,
+};
+
+/* All possible states for a SPOE applet */
+enum spoe_appctx_state {
+	SPOE_APPCTX_ST_CONNECT = 0,
+	SPOE_APPCTX_ST_CONNECTING,
+	SPOE_APPCTX_ST_IDLE,
+	SPOE_APPCTX_ST_PROCESSING,
+	SPOE_APPCTX_ST_SENDING_FRAG_NOTIFY,
+	SPOE_APPCTX_ST_WAITING_SYNC_ACK,
+	SPOE_APPCTX_ST_DISCONNECT,
+	SPOE_APPCTX_ST_DISCONNECTING,
+	SPOE_APPCTX_ST_EXIT,
+	SPOE_APPCTX_ST_END,
+};
+
+/* All supported SPOE actions */
+enum spoe_action_type {
+	SPOE_ACT_T_SET_VAR = 1,
+	SPOE_ACT_T_UNSET_VAR,
+	SPOE_ACT_TYPES,
+};
+
+/* All supported SPOE events */
+enum spoe_event {
+	SPOE_EV_NONE = 0,
+
+	/* Request events */
+	SPOE_EV_ON_CLIENT_SESS = 1,
+	SPOE_EV_ON_TCP_REQ_FE,
+	SPOE_EV_ON_TCP_REQ_BE,
+	SPOE_EV_ON_HTTP_REQ_FE,
+	SPOE_EV_ON_HTTP_REQ_BE,
+
+	/* Response events */
+	SPOE_EV_ON_SERVER_SESS,
+	SPOE_EV_ON_TCP_RSP,
+	SPOE_EV_ON_HTTP_RSP,
+
+	SPOE_EV_EVENTS
+};
+
+/* Errors triggered by streams */
+enum spoe_context_error {
+	SPOE_CTX_ERR_NONE = 0,
+	SPOE_CTX_ERR_TOUT,
+	SPOE_CTX_ERR_RES,
+	SPOE_CTX_ERR_TOO_BIG,
+	SPOE_CTX_ERR_FRAG_FRAME_ABRT,
+	SPOE_CTX_ERR_INTERRUPT,
+	SPOE_CTX_ERR_UNKNOWN = 255,
+	SPOE_CTX_ERRS,
+};
+
+/* Errors triggered by SPOE applet */
+enum spoe_frame_error {
+	SPOE_FRM_ERR_NONE = 0,
+	SPOE_FRM_ERR_IO,
+	SPOE_FRM_ERR_TOUT,
+	SPOE_FRM_ERR_TOO_BIG,
+	SPOE_FRM_ERR_INVALID,
+	SPOE_FRM_ERR_NO_VSN,
+	SPOE_FRM_ERR_NO_FRAME_SIZE,
+	SPOE_FRM_ERR_NO_CAP,
+	SPOE_FRM_ERR_BAD_VSN,
+	SPOE_FRM_ERR_BAD_FRAME_SIZE,
+	SPOE_FRM_ERR_FRAG_NOT_SUPPORTED,
+	SPOE_FRM_ERR_INTERLACED_FRAMES,
+	SPOE_FRM_ERR_FRAMEID_NOTFOUND,
+	SPOE_FRM_ERR_RES,
+	SPOE_FRM_ERR_UNKNOWN = 99,
+	SPOE_FRM_ERRS,
+};
+
+/* Scopes used for variables set by agents. It is a way to be agnotic to vars
+ * scope. */
+enum spoe_vars_scope {
+	SPOE_SCOPE_PROC = 0, /* <=> SCOPE_PROC  */
+	SPOE_SCOPE_SESS,     /* <=> SCOPE_SESS */
+	SPOE_SCOPE_TXN,      /* <=> SCOPE_TXN  */
+	SPOE_SCOPE_REQ,      /* <=> SCOPE_REQ  */
+	SPOE_SCOPE_RES,      /* <=> SCOPE_RES  */
+};
+
+/* Frame Types sent by HAProxy and by agents */
+enum spoe_frame_type {
+	SPOE_FRM_T_UNSET = 0,
+
+	/* Frames sent by HAProxy */
+	SPOE_FRM_T_HAPROXY_HELLO = 1,
+	SPOE_FRM_T_HAPROXY_DISCON,
+	SPOE_FRM_T_HAPROXY_NOTIFY,
+
+	/* Frames sent by the agents */
+	SPOE_FRM_T_AGENT_HELLO = 101,
+	SPOE_FRM_T_AGENT_DISCON,
+	SPOE_FRM_T_AGENT_ACK
+};
+
+/* All supported data types */
+enum spoe_data_type {
+	SPOE_DATA_T_NULL = 0,
+	SPOE_DATA_T_BOOL,
+	SPOE_DATA_T_INT32,
+	SPOE_DATA_T_UINT32,
+	SPOE_DATA_T_INT64,
+	SPOE_DATA_T_UINT64,
+	SPOE_DATA_T_IPV4,
+	SPOE_DATA_T_IPV6,
+	SPOE_DATA_T_STR,
+	SPOE_DATA_T_BIN,
+	SPOE_DATA_TYPES
+};
+
+
+#endif /* _HAPROXY_SPOE_T_H */
diff --git a/contrib/mod_defender/include/haproxy/spoe.h b/contrib/mod_defender/include/haproxy/spoe.h
new file mode 100644
index 0000000..b7b981a
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/spoe.h
@@ -0,0 +1,352 @@
+/*
+ * include/haproxy/spoe.h
+ * Encoding/Decoding functions for the SPOE filters (and other helpers).
+ *
+ * Copyright (C) 2017 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef _HAPROXY_SPOE_H
+#define _HAPROXY_SPOE_H
+
+#include <string.h>
+#include <haproxy/api.h>
+#include <haproxy/intops.h>
+#include <haproxy/sample-t.h>
+#include <haproxy/spoe-t.h>
+
+
+/* Encode a buffer. Its length <len> is encoded as a varint, followed by a copy
+ * of <str>. It must have enough space in <*buf> to encode the buffer, else an
+ * error is triggered.
+ * On success, it returns <len> and <*buf> is moved after the encoded value. If
+ * an error occurred, it returns -1. */
+static inline int
+spoe_encode_buffer(const char *str, size_t len, char **buf, char *end)
+{
+	char *p = *buf;
+	int   ret;
+
+	if (p >= end)
+		return -1;
+
+	if (!len) {
+		*p++ = 0;
+		*buf = p;
+		return 0;
+	}
+
+	ret = encode_varint(len, &p, end);
+	if (ret == -1 || p + len > end)
+		return -1;
+
+	memcpy(p, str, len);
+	*buf = p + len;
+	return len;
+}
+
+/* Encode a buffer, possibly partially. It does the same thing than
+ * 'spoe_encode_buffer', but if there is not enough space, it does not fail.
+ * On success, it returns the number of copied bytes and <*buf> is moved after
+ * the encoded value. If an error occurred, it returns -1. */
+static inline int
+spoe_encode_frag_buffer(const char *str, size_t len, char **buf, char *end)
+{
+	char *p = *buf;
+	int   ret;
+
+	if (p >= end)
+		return -1;
+
+	if (!len) {
+		*p++ = 0;
+		*buf = p;
+		return 0;
+	}
+
+	ret = encode_varint(len, &p, end);
+	if (ret == -1 || p >= end)
+		return -1;
+
+	ret = (p+len < end) ? len : (end - p);
+	memcpy(p, str, ret);
+	*buf = p + ret;
+	return ret;
+}
+
+/* Decode a buffer. The buffer length is decoded and saved in <*len>. <*str>
+ * points on the first byte of the buffer.
+ * On success, it returns the buffer length and <*buf> is moved after the
+ * encoded buffer. Otherwise, it returns -1. */
+static inline int
+spoe_decode_buffer(char **buf, char *end, char **str, uint64_t *len)
+{
+	char    *p = *buf;
+	uint64_t sz;
+	int      ret;
+
+	*str = NULL;
+	*len = 0;
+
+	ret = decode_varint(&p, end, &sz);
+	if (ret == -1 || p + sz > end)
+		return -1;
+
+	*str = p;
+	*len = sz;
+	*buf = p + sz;
+	return sz;
+}
+
+/* Encode a typed data using value in <smp>. On success, it returns the number
+ * of copied bytes and <*buf> is moved after the encoded value. If an error
+ * occurred, it returns -1.
+ *
+ * If the value is too big to be encoded, depending on its type, then encoding
+ * failed or the value is partially encoded. Only strings and binaries can be
+ * partially encoded. */
+static inline int
+spoe_encode_data(struct sample *smp, char **buf, char *end)
+{
+	char *p = *buf;
+	int   ret;
+
+	if (p >= end)
+		return -1;
+
+	if (smp == NULL) {
+		*p++ = SPOE_DATA_T_NULL;
+		goto end;
+	}
+
+	switch (smp->data.type) {
+		case SMP_T_BOOL:
+			*p    = SPOE_DATA_T_BOOL;
+			*p++ |= ((!smp->data.u.sint) ? SPOE_DATA_FL_FALSE : SPOE_DATA_FL_TRUE);
+			break;
+
+		case SMP_T_SINT:
+			*p++ = SPOE_DATA_T_INT64;
+			if (encode_varint(smp->data.u.sint, &p, end) == -1)
+				return -1;
+			break;
+
+		case SMP_T_IPV4:
+			if (p + 5 > end)
+				return -1;
+			*p++ = SPOE_DATA_T_IPV4;
+			memcpy(p, &smp->data.u.ipv4, 4);
+			p += 4;
+			break;
+
+		case SMP_T_IPV6:
+			if (p + 17 > end)
+				return -1;
+			*p++ = SPOE_DATA_T_IPV6;
+			memcpy(p, &smp->data.u.ipv6, 16);
+			p += 16;
+			break;
+
+		case SMP_T_STR:
+		case SMP_T_BIN: {
+			/* If defined, get length and offset of the sample by reading the sample
+			 * context. ctx.a[0] is the pointer to the length and ctx.a[1] is the
+			 * pointer to the offset. If the offset is greater than 0, it means the
+			 * sample is partially encoded. In this case, we only need to encode the
+			 * reamining. When all the sample is encoded, the offset is reset to 0.
+			 * So the caller know it can try to encode the next sample. */
+			struct buffer *chk = &smp->data.u.str;
+			unsigned int *len  = smp->ctx.a[0];
+			unsigned int *off  = smp->ctx.a[1];
+
+			if (!*off) {
+				/* First evaluation of the sample : encode the
+				 * type (string or binary), the buffer length
+				 * (as a varint) and at least 1 byte of the
+				 * buffer. */
+				struct buffer *chk = &smp->data.u.str;
+
+				*p++ = (smp->data.type == SMP_T_STR)
+					? SPOE_DATA_T_STR
+					: SPOE_DATA_T_BIN;
+				ret = spoe_encode_frag_buffer(chk->area,
+							      chk->data, &p,
+							      end);
+				if (ret == -1)
+					return -1;
+				*len = chk->data;
+			}
+			else {
+				/* The sample has been fragmented, encode remaining data */
+				ret = MIN(*len - *off, end - p);
+				memcpy(p, chk->area + *off, ret);
+				p += ret;
+			}
+			/* Now update <*off> */
+			if (ret + *off != *len)
+				*off += ret;
+			else
+				*off = 0;
+			break;
+		}
+
+		case SMP_T_METH: {
+			char   *m;
+			size_t  len;
+
+			*p++ = SPOE_DATA_T_STR;
+			switch (smp->data.u.meth.meth) {
+				case HTTP_METH_OPTIONS: m = "OPTIONS"; len = 7; break;
+				case HTTP_METH_GET    : m = "GET";     len = 3; break;
+				case HTTP_METH_HEAD   : m = "HEAD";    len = 4; break;
+				case HTTP_METH_POST   : m = "POST";    len = 4; break;
+				case HTTP_METH_PUT    : m = "PUT";     len = 3; break;
+				case HTTP_METH_DELETE : m = "DELETE";  len = 6; break;
+				case HTTP_METH_TRACE  : m = "TRACE";   len = 5; break;
+				case HTTP_METH_CONNECT: m = "CONNECT"; len = 7; break;
+
+				default :
+					m   = smp->data.u.meth.str.area;
+					len = smp->data.u.meth.str.data;
+			}
+			if (spoe_encode_buffer(m, len, &p, end) == -1)
+				return -1;
+			break;
+		}
+
+		default:
+			*p++ = SPOE_DATA_T_NULL;
+			break;
+	}
+
+  end:
+	ret  = (p - *buf);
+	*buf = p;
+	return ret;
+}
+
+/* Skip a typed data. If an error occurred, -1 is returned, otherwise the number
+ * of skipped bytes is returned and the <*buf> is moved after skipped data.
+ *
+ * A types data is composed of a type (1 byte) and corresponding data:
+ *  - boolean: non additional data (0 bytes)
+ *  - integers: a variable-length integer (see decode_varint)
+ *  - ipv4: 4 bytes
+ *  - ipv6: 16 bytes
+ *  - binary and string: a buffer prefixed by its size, a variable-length
+ *    integer (see spoe_decode_buffer) */
+static inline int
+spoe_skip_data(char **buf, char *end)
+{
+	char    *str, *p = *buf;
+	int      type, ret;
+	uint64_t v, sz;
+
+	if (p >= end)
+		return -1;
+
+	type = *p++;
+	switch (type & SPOE_DATA_T_MASK) {
+		case SPOE_DATA_T_BOOL:
+			break;
+		case SPOE_DATA_T_INT32:
+		case SPOE_DATA_T_INT64:
+		case SPOE_DATA_T_UINT32:
+		case SPOE_DATA_T_UINT64:
+			if (decode_varint(&p, end, &v) == -1)
+				return -1;
+			break;
+		case SPOE_DATA_T_IPV4:
+			if (p+4 > end)
+				return -1;
+			p += 4;
+			break;
+		case SPOE_DATA_T_IPV6:
+			if (p+16 > end)
+				return -1;
+			p += 16;
+			break;
+		case SPOE_DATA_T_STR:
+		case SPOE_DATA_T_BIN:
+			/* All the buffer must be skipped */
+			if (spoe_decode_buffer(&p, end, &str, &sz) == -1)
+				return -1;
+			break;
+	}
+
+	ret  = (p - *buf);
+	*buf = p;
+	return ret;
+}
+
+/* Decode a typed data and fill <smp>. If an error occurred, -1 is returned,
+ * otherwise the number of read bytes is returned and <*buf> is moved after the
+ * decoded data. See spoe_skip_data for details. */
+static inline int
+spoe_decode_data(char **buf, char *end, struct sample *smp)
+{
+	char  *str, *p = *buf;
+	int    type, r = 0;
+	uint64_t sz;
+
+	if (p >= end)
+		return -1;
+
+	type = *p++;
+	switch (type & SPOE_DATA_T_MASK) {
+		case SPOE_DATA_T_BOOL:
+			smp->data.u.sint = ((type & SPOE_DATA_FL_MASK) == SPOE_DATA_FL_TRUE);
+			smp->data.type = SMP_T_BOOL;
+			break;
+		case SPOE_DATA_T_INT32:
+		case SPOE_DATA_T_INT64:
+		case SPOE_DATA_T_UINT32:
+		case SPOE_DATA_T_UINT64:
+			if (decode_varint(&p, end, (uint64_t *)&smp->data.u.sint) == -1)
+				return -1;
+			smp->data.type = SMP_T_SINT;
+			break;
+		case SPOE_DATA_T_IPV4:
+			if (p+4 > end)
+				return -1;
+			smp->data.type = SMP_T_IPV4;
+			memcpy(&smp->data.u.ipv4, p, 4);
+			p += 4;
+			break;
+		case SPOE_DATA_T_IPV6:
+			if (p+16 > end)
+				return -1;
+			memcpy(&smp->data.u.ipv6, p, 16);
+			smp->data.type = SMP_T_IPV6;
+			p += 16;
+			break;
+		case SPOE_DATA_T_STR:
+		case SPOE_DATA_T_BIN:
+			/* All the buffer must be decoded */
+			if (spoe_decode_buffer(&p, end, &str, &sz) == -1)
+				return -1;
+			smp->data.u.str.area = str;
+			smp->data.u.str.data = sz;
+			smp->data.type = (type == SPOE_DATA_T_STR) ? SMP_T_STR : SMP_T_BIN;
+			break;
+	}
+
+	r    = (p - *buf);
+	*buf = p;
+	return r;
+}
+
+#endif /* _HAPROXY_SPOE_H */
diff --git a/contrib/mod_defender/include/haproxy/tools.h b/contrib/mod_defender/include/haproxy/tools.h
new file mode 100644
index 0000000..7bd0c51
--- /dev/null
+++ b/contrib/mod_defender/include/haproxy/tools.h
@@ -0,0 +1,350 @@
+/*
+ * include/haproxy/tools.h
+ * This files contains some general purpose functions and macros.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef _HAPROXY_TOOLS_H
+#define _HAPROXY_TOOLS_H
+
+#include <string.h>
+#include <stdio.h>
+#include <time.h>
+#include <stdarg.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+/****** string-specific macros and functions ******/
+/* if a > max, then bound <a> to <max>. The macro returns the new <a> */
+#define UBOUND(a, max)	({ typeof(a) b = (max); if ((a) > b) (a) = b; (a); })
+
+/* if a < min, then bound <a> to <min>. The macro returns the new <a> */
+#define LBOUND(a, min)	({ typeof(a) b = (min); if ((a) < b) (a) = b; (a); })
+
+#define SWAP(a, b) do { typeof(a) t; t = a; a = b; b = t; } while(0)
+
+/* returns true if <c> is an identifier character, that is, a digit, a letter,
+ * or '-', '+', '_', ':' or '.'. This is usable for proxy names, server names,
+ * ACL names, sample fetch names, and converter names.
+ */
+static inline char *cut_crlf(char *s) {
+
+	while (*s != '\r' && *s != '\n') {
+		char *p = s++;
+
+		if (!*p)
+			return p;
+	}
+
+	*s++ = '\0';
+
+	return s;
+}
+
+static inline char *ltrim(char *s, char c) {
+
+	if (c)
+		while (*s == c)
+			s++;
+
+	return s;
+}
+
+static inline char *rtrim(char *s, char c) {
+
+	char *p = s + strlen(s);
+
+	while (p-- > s)
+		if (*p == c)
+			*p = '\0';
+		else
+			break;
+
+	return s;
+}
+
+static inline char *alltrim(char *s, char c) {
+
+	rtrim(s, c);
+
+	return ltrim(s, c);
+}
+
+/* This function converts the time_t value <now> into a broken out struct tm
+ * which must be allocated by the caller. It is highly recommended to use this
+ * function instead of localtime() because that one requires a time_t* which
+ * is not always compatible with tv_sec depending on OS/hardware combinations.
+ */
+static inline void get_localtime(const time_t now, struct tm *tm)
+{
+	localtime_r(&now, tm);
+}
+
+/* This function converts the time_t value <now> into a broken out struct tm
+ * which must be allocated by the caller. It is highly recommended to use this
+ * function instead of gmtime() because that one requires a time_t* which
+ * is not always compatible with tv_sec depending on OS/hardware combinations.
+ */
+static inline void get_gmtime(const time_t now, struct tm *tm)
+{
+	gmtime_r(&now, tm);
+}
+
+/* Counts a number of elapsed days since 01/01/0000 based solely on elapsed
+ * years and assuming the regular rule for leap years applies. It's fake but
+ * serves as a temporary origin. It's worth remembering that it's the first
+ * year of each period that is leap and not the last one, so for instance year
+ * 1 sees 366 days since year 0 was leap. For this reason we have to apply
+ * modular arithmetic which is why we offset the year by 399 before
+ * subtracting the excess at the end. No overflow here before ~11.7 million
+ * years.
+ */
+static inline unsigned int days_since_zero(unsigned int y)
+{
+	return y * 365 + (y + 399) / 4 - (y + 399) / 100 + (y + 399) / 400
+	       - 399 / 4 + 399 / 100;
+}
+
+/* sets the address family to AF_UNSPEC so that is_addr() does not match */
+static inline void clear_addr(struct sockaddr_storage *addr)
+{
+	addr->ss_family = AF_UNSPEC;
+}
+
+/* returns non-zero if addr has a valid and non-null IPv4 or IPv6 address,
+ * otherwise zero.
+ */
+static inline int is_inet_addr(const struct sockaddr_storage *addr)
+{
+	int i;
+
+	switch (addr->ss_family) {
+	case AF_INET:
+		return *(int *)&((struct sockaddr_in *)addr)->sin_addr;
+	case AF_INET6:
+		for (i = 0; i < sizeof(struct in6_addr) / sizeof(int); i++)
+			if (((int *)&((struct sockaddr_in6 *)addr)->sin6_addr)[i] != 0)
+				return ((int *)&((struct sockaddr_in6 *)addr)->sin6_addr)[i];
+	}
+	return 0;
+}
+
+/* returns port in network byte order */
+static inline int get_net_port(struct sockaddr_storage *addr)
+{
+	switch (addr->ss_family) {
+	case AF_INET:
+		return ((struct sockaddr_in *)addr)->sin_port;
+	case AF_INET6:
+		return ((struct sockaddr_in6 *)addr)->sin6_port;
+	}
+	return 0;
+}
+
+/* returns port in host byte order */
+static inline int get_host_port(struct sockaddr_storage *addr)
+{
+	switch (addr->ss_family) {
+	case AF_INET:
+		return ntohs(((struct sockaddr_in *)addr)->sin_port);
+	case AF_INET6:
+		return ntohs(((struct sockaddr_in6 *)addr)->sin6_port);
+	}
+	return 0;
+}
+
+/* returns address len for <addr>'s family, 0 for unknown families */
+static inline int get_addr_len(const struct sockaddr_storage *addr)
+{
+	switch (addr->ss_family) {
+	case AF_INET:
+		return sizeof(struct sockaddr_in);
+	case AF_INET6:
+		return sizeof(struct sockaddr_in6);
+	case AF_UNIX:
+		return sizeof(struct sockaddr_un);
+	}
+	return 0;
+}
+
+/* set port in host byte order */
+static inline int set_net_port(struct sockaddr_storage *addr, int port)
+{
+	switch (addr->ss_family) {
+	case AF_INET:
+		((struct sockaddr_in *)addr)->sin_port = port;
+		break;
+	case AF_INET6:
+		((struct sockaddr_in6 *)addr)->sin6_port = port;
+		break;
+	}
+	return 0;
+}
+
+/* set port in network byte order */
+static inline int set_host_port(struct sockaddr_storage *addr, int port)
+{
+	switch (addr->ss_family) {
+	case AF_INET:
+		((struct sockaddr_in *)addr)->sin_port = htons(port);
+		break;
+	case AF_INET6:
+		((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
+		break;
+	}
+	return 0;
+}
+
+/* after increasing a pointer value, it can exceed the first buffer
+ * size. This function transform the value of <ptr> according with
+ * the expected position. <chunks> is an array of the one or two
+ * available chunks. The first value is the start of the first chunk,
+ * the second value if the end+1 of the first chunks. The third value
+ * is NULL or the start of the second chunk and the fourth value is
+ * the end+1 of the second chunk. The function returns 1 if does a
+ * wrap, else returns 0.
+ */
+static inline int fix_pointer_if_wrap(const char **chunks, const char **ptr)
+{
+	if (*ptr < chunks[1])
+		return 0;
+	if (!chunks[2])
+		return 0;
+	*ptr = chunks[2] + ( *ptr - chunks[1] );
+	return 1;
+}
+
+/************************* Composite address manipulation *********************
+ * Composite addresses are simply unsigned long data in which the higher bits
+ * represent a pointer, and the two lower bits are flags. There are several
+ * places where we just want to associate one or two flags to a pointer (eg,
+ * to type it), and these functions permit this. The pointer is necessarily a
+ * 32-bit aligned pointer, as its two lower bits will be cleared and replaced
+ * with the flags.
+ *****************************************************************************/
+
+/* Masks the two lower bits of a composite address and converts it to a
+ * pointer. This is used to mix some bits with some aligned pointers to
+ * structs and to retrieve the original (32-bit aligned) pointer.
+ */
+static inline void *caddr_to_ptr(unsigned long caddr)
+{
+	return (void *)(caddr & ~3UL);
+}
+
+/* Only retrieves the two lower bits of a composite address. This is used to mix
+ * some bits with some aligned pointers to structs and to retrieve the original
+ * data (2 bits).
+ */
+static inline unsigned int caddr_to_data(unsigned long caddr)
+{
+	return (caddr & 3UL);
+}
+
+/* Combines the aligned pointer whose 2 lower bits will be masked with the bits
+ * from <data> to form a composite address. This is used to mix some bits with
+ * some aligned pointers to structs and to retrieve the original (32-bit aligned)
+ * pointer.
+ */
+static inline unsigned long caddr_from_ptr(void *ptr, unsigned int data)
+{
+	return (((unsigned long)ptr) & ~3UL) + (data & 3);
+}
+
+/* sets the 2 bits of <data> in the <caddr> composite address */
+static inline unsigned long caddr_set_flags(unsigned long caddr, unsigned int data)
+{
+	return caddr | (data & 3);
+}
+
+/* clears the 2 bits of <data> in the <caddr> composite address */
+static inline unsigned long caddr_clr_flags(unsigned long caddr, unsigned int data)
+{
+	return caddr & ~(unsigned long)(data & 3);
+}
+
+static inline unsigned char utf8_return_code(unsigned int code)
+{
+	return code & 0xf0;
+}
+
+static inline unsigned char utf8_return_length(unsigned char code)
+{
+	return code & 0x0f;
+}
+
+/* returns a 64-bit a timestamp with the finest resolution available. The
+ * unit is intentionally not specified. It's mostly used to compare dates.
+ */
+#if defined(__i386__) || defined(__x86_64__)
+static inline unsigned long long rdtsc()
+{
+     unsigned int a, d;
+     asm volatile("rdtsc" : "=a" (a), "=d" (d));
+     return a + ((unsigned long long)d << 32);
+}
+#else
+static inline unsigned long long rdtsc()
+{
+	struct timeval tv;
+	gettimeofday(&tv, NULL);
+	return tv.tv_sec * 1000000 + tv.tv_usec;
+}
+#endif
+
+/* Note that this may result in opening libgcc() on first call, so it may need
+ * to have been called once before chrooting.
+ */
+static forceinline int my_backtrace(void **buffer, int max)
+{
+#if !defined(USE_BACKTRACE)
+	return 0;
+#elif defined(HA_HAVE_WORKING_BACKTRACE)
+	return backtrace(buffer, max);
+#else
+	const struct frame {
+		const struct frame *next;
+		void *ra;
+	} *frame;
+	int count;
+
+	frame = __builtin_frame_address(0);
+	for (count = 0; count < max && may_access(frame) && may_access(frame->ra);) {
+		buffer[count++] = frame->ra;
+		frame = frame->next;
+	}
+	return count;
+#endif
+}
+
+/* same as realloc() except that ptr is also freed upon failure */
+static inline void *my_realloc2(void *ptr, size_t size)
+{
+	void *ret;
+
+	ret = realloc(ptr, size);
+	if (!ret && size)
+		free(ptr);
+	return ret;
+}
+
+#endif /* _HAPROXY_TOOLS_H */