REORG: ebtree: move the include files from ebtree to include/import/

This is where other imported components are located. All files which
used to directly include ebtree were touched to update their include
path so that "import/" is now prefixed before the ebtree-related files.

The ebtree.h file was slightly adjusted to read compiler.h from the
common/ subdirectory (this is the only change).

A build issue was encountered when eb32sctree.h is loaded before
eb32tree.h because only the former checks for the latter before
defining type u32. This was addressed by adding the reverse ifdef
in eb32tree.h.

No further cleanup was done yet in order to keep changes minimal.
diff --git a/include/common/namespace.h b/include/common/namespace.h
index 41931ff..596aac3 100644
--- a/include/common/namespace.h
+++ b/include/common/namespace.h
@@ -2,7 +2,7 @@
 #define _NAMESPACE_H
 
 #include <stdlib.h>
-#include <ebistree.h>
+#include <import/ebistree.h>
 
 #ifdef USE_NS
 
diff --git a/include/common/standard.h b/include/common/standard.h
index 40991fa..3f6f954 100644
--- a/include/common/standard.h
+++ b/include/common/standard.h
@@ -41,8 +41,8 @@
 #include <common/chunk.h>
 #include <common/config.h>
 #include <common/namespace.h>
-#include <eb32tree.h>
-#include <eb32sctree.h>
+#include <import/eb32tree.h>
+#include <import/eb32sctree.h>
 #include <types/protocol.h>
 
 /* size used for max length of decimal representation of long long int. */
diff --git a/include/import/eb32sctree.h b/include/import/eb32sctree.h
new file mode 100644
index 0000000..7061308
--- /dev/null
+++ b/include/import/eb32sctree.h
@@ -0,0 +1,145 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on 32bit nodes.
+ * Version 6.0.6 with backports from v7-dev
+ * (C) 2002-2017 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EB32SCTREE_H
+#define _EB32SCTREE_H
+
+#include "ebtree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb32sc_entry(ptr, type, member) container_of(ptr, type, member)
+
+/* These types may sometimes already be defined */
+#ifndef _EB32TREE_H
+typedef unsigned int u32;
+typedef   signed int s32;
+#endif
+
+/* This structure carries a node, a leaf, a scope, and a key. It must start
+ * with the eb_node so that it can be cast into an eb_node. We could also
+ * have put some sort of transparent union here to reduce the indirection
+ * level, but the fact is, the end user is not meant to manipulate internals,
+ * so this is pointless.
+ * In case sizeof(void*)>=sizeof(long), we know there will be some padding after
+ * the leaf if it's unaligned. In this case we force the alignment on void* so
+ * that we prefer to have the padding before for more efficient accesses.
+ */
+struct eb32sc_node {
+	struct eb_node node; /* the tree node, must be at the beginning */
+	MAYBE_ALIGN(sizeof(u32));
+	u32 key;
+	ALWAYS_ALIGN(sizeof(void*));
+	unsigned long node_s; /* visibility of this node's branches */
+	unsigned long leaf_s; /* visibility of this node's leaf */
+} ALIGNED(sizeof(void*));
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/*
+ * The following functions are not inlined by default. They are declared
+ * in eb32sctree.c, which simply relies on their inline version.
+ */
+struct eb32sc_node *eb32sc_lookup_ge(struct eb_root *root, u32 x, unsigned long scope);
+struct eb32sc_node *eb32sc_lookup_ge_or_first(struct eb_root *root, u32 x, unsigned long scope);
+struct eb32sc_node *eb32sc_insert(struct eb_root *root, struct eb32sc_node *new, unsigned long scope);
+void eb32sc_delete(struct eb32sc_node *node);
+
+/* Walks down left starting at root pointer <start>, and follow the leftmost
+ * branch whose scope matches <scope>. It either returns the node hosting the
+ * first leaf on that side, or NULL if no leaf is found. <start> may either be
+ * NULL or a branch pointer. The pointer to the leaf (or NULL) is returned.
+ */
+static inline struct eb32sc_node *eb32sc_walk_down_left(eb_troot_t *start, unsigned long scope)
+{
+	struct eb_root *root;
+	struct eb_node *node;
+	struct eb32sc_node *eb32;
+
+	if (unlikely(!start))
+		return NULL;
+
+	while (1) {
+		if (eb_gettag(start) == EB_NODE) {
+			root = eb_untag(start, EB_NODE);
+			node = eb_root_to_node(root);
+			eb32 = container_of(node, struct eb32sc_node, node);
+			if (eb32->node_s & scope) {
+				start = node->branches.b[EB_LEFT];
+				continue;
+			}
+			start = node->node_p;
+		}
+		else {
+			root = eb_untag(start, EB_LEAF);
+			node = eb_root_to_node(root);
+			eb32 = container_of(node, struct eb32sc_node, node);
+			if (eb32->leaf_s & scope)
+				return eb32;
+			start = node->leaf_p;
+		}
+
+		/* here we're on a node that doesn't match the scope. We have
+		 * to walk to the closest right location.
+		 */
+		while (eb_gettag(start) != EB_LEFT)
+			/* Walking up from right branch, so we cannot be below root */
+			start = (eb_root_to_node(eb_untag(start, EB_RGHT)))->node_p;
+
+		/* Note that <start> cannot be NULL at this stage */
+		root = eb_untag(start, EB_LEFT);
+		start = root->b[EB_RGHT];
+		if (eb_clrtag(start) == NULL)
+			return NULL;
+	}
+}
+
+/* Return next node in the tree, starting with tagged parent <start>, or NULL if none */
+static inline struct eb32sc_node *eb32sc_next_with_parent(eb_troot_t *start, unsigned long scope)
+{
+	while (eb_gettag(start) != EB_LEFT)
+		/* Walking up from right branch, so we cannot be below root */
+		start = (eb_root_to_node(eb_untag(start, EB_RGHT)))->node_p;
+
+	/* Note that <t> cannot be NULL at this stage */
+	start = (eb_untag(start, EB_LEFT))->b[EB_RGHT];
+	if (eb_clrtag(start) == NULL)
+		return NULL;
+
+	return eb32sc_walk_down_left(start, scope);
+}
+
+/* Return next node in the tree, or NULL if none */
+static inline struct eb32sc_node *eb32sc_next(struct eb32sc_node *eb32, unsigned long scope)
+{
+	return eb32sc_next_with_parent(eb32->node.leaf_p, scope);
+}
+
+/* Return leftmost node in the tree, or NULL if none */
+static inline struct eb32sc_node *eb32sc_first(struct eb_root *root, unsigned long scope)
+{
+	return eb32sc_walk_down_left(root->b[0], scope);
+}
+
+#endif /* _EB32SC_TREE_H */
diff --git a/include/import/eb32tree.h b/include/import/eb32tree.h
new file mode 100644
index 0000000..76f9eaa
--- /dev/null
+++ b/include/import/eb32tree.h
@@ -0,0 +1,502 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on 32bit nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EB32TREE_H
+#define _EB32TREE_H
+
+#include "ebtree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb32_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define EB32_ROOT	EB_ROOT
+#define EB32_TREE_HEAD	EB_TREE_HEAD
+
+/* These types may sometimes already be defined */
+#ifndef _EB32SCTREE_H
+typedef unsigned int u32;
+typedef   signed int s32;
+#endif
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ */
+struct eb32_node {
+	struct eb_node node; /* the tree node, must be at the beginning */
+	MAYBE_ALIGN(sizeof(u32));
+	u32 key;
+} ALIGNED(sizeof(void*));
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_first(struct eb_root *root)
+{
+	return eb32_entry(eb_first(root), struct eb32_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_last(struct eb_root *root)
+{
+	return eb32_entry(eb_last(root), struct eb32_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_next(struct eb32_node *eb32)
+{
+	return eb32_entry(eb_next(&eb32->node), struct eb32_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_prev(struct eb32_node *eb32)
+{
+	return eb32_entry(eb_prev(&eb32->node), struct eb32_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb32_node *eb32_next_dup(struct eb32_node *eb32)
+{
+	return eb32_entry(eb_next_dup(&eb32->node), struct eb32_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb32_node *eb32_prev_dup(struct eb32_node *eb32)
+{
+	return eb32_entry(eb_prev_dup(&eb32->node), struct eb32_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb32_node *eb32_next_unique(struct eb32_node *eb32)
+{
+	return eb32_entry(eb_next_unique(&eb32->node), struct eb32_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb32_node *eb32_prev_unique(struct eb32_node *eb32)
+{
+	return eb32_entry(eb_prev_unique(&eb32->node), struct eb32_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static inline void eb32_delete(struct eb32_node *eb32)
+{
+	eb_delete(&eb32->node);
+}
+
+/*
+ * The following functions are not inlined by default. They are declared
+ * in eb32tree.c, which simply relies on their inline version.
+ */
+struct eb32_node *eb32_lookup(struct eb_root *root, u32 x);
+struct eb32_node *eb32i_lookup(struct eb_root *root, s32 x);
+struct eb32_node *eb32_lookup_le(struct eb_root *root, u32 x);
+struct eb32_node *eb32_lookup_ge(struct eb_root *root, u32 x);
+struct eb32_node *eb32_insert(struct eb_root *root, struct eb32_node *new);
+struct eb32_node *eb32i_insert(struct eb_root *root, struct eb32_node *new);
+
+/*
+ * The following functions are less likely to be used directly, because their
+ * code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __eb32_delete(struct eb32_node *eb32)
+{
+	__eb_delete(&eb32->node);
+}
+
+/*
+ * Find the first occurrence of a key in the tree <root>. If none can be
+ * found, return NULL.
+ */
+static forceinline struct eb32_node *__eb32_lookup(struct eb_root *root, u32 x)
+{
+	struct eb32_node *node;
+	eb_troot_t *troot;
+	u32 y;
+	int node_bit;
+
+	troot = root->b[EB_LEFT];
+	if (unlikely(troot == NULL))
+		return NULL;
+
+	while (1) {
+		if ((eb_gettag(troot) == EB_LEAF)) {
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct eb32_node, node.branches);
+			if (node->key == x)
+				return node;
+			else
+				return NULL;
+		}
+		node = container_of(eb_untag(troot, EB_NODE),
+				    struct eb32_node, node.branches);
+		node_bit = node->node.bit;
+
+		y = node->key ^ x;
+		if (!y) {
+			/* Either we found the node which holds the key, or
+			 * we have a dup tree. In the later case, we have to
+			 * walk it down left to get the first entry.
+			 */
+			if (node_bit < 0) {
+				troot = node->node.branches.b[EB_LEFT];
+				while (eb_gettag(troot) != EB_LEAF)
+					troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+				node = container_of(eb_untag(troot, EB_LEAF),
+						    struct eb32_node, node.branches);
+			}
+			return node;
+		}
+
+		if ((y >> node_bit) >= EB_NODE_BRANCHES)
+			return NULL; /* no more common bits */
+
+		troot = node->node.branches.b[(x >> node_bit) & EB_NODE_BRANCH_MASK];
+	}
+}
+
+/*
+ * Find the first occurrence of a signed key in the tree <root>. If none can
+ * be found, return NULL.
+ */
+static forceinline struct eb32_node *__eb32i_lookup(struct eb_root *root, s32 x)
+{
+	struct eb32_node *node;
+	eb_troot_t *troot;
+	u32 key = x ^ 0x80000000;
+	u32 y;
+	int node_bit;
+
+	troot = root->b[EB_LEFT];
+	if (unlikely(troot == NULL))
+		return NULL;
+
+	while (1) {
+		if ((eb_gettag(troot) == EB_LEAF)) {
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct eb32_node, node.branches);
+			if (node->key == (u32)x)
+				return node;
+			else
+				return NULL;
+		}
+		node = container_of(eb_untag(troot, EB_NODE),
+				    struct eb32_node, node.branches);
+		node_bit = node->node.bit;
+
+		y = node->key ^ x;
+		if (!y) {
+			/* Either we found the node which holds the key, or
+			 * we have a dup tree. In the later case, we have to
+			 * walk it down left to get the first entry.
+			 */
+			if (node_bit < 0) {
+				troot = node->node.branches.b[EB_LEFT];
+				while (eb_gettag(troot) != EB_LEAF)
+					troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+				node = container_of(eb_untag(troot, EB_LEAF),
+						    struct eb32_node, node.branches);
+			}
+			return node;
+		}
+
+		if ((y >> node_bit) >= EB_NODE_BRANCHES)
+			return NULL; /* no more common bits */
+
+		troot = node->node.branches.b[(key >> node_bit) & EB_NODE_BRANCH_MASK];
+	}
+}
+
+/* Insert eb32_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The eb32_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb32_node *
+__eb32_insert(struct eb_root *root, struct eb32_node *new) {
+	struct eb32_node *old;
+	unsigned int side;
+	eb_troot_t *troot, **up_ptr;
+	u32 newkey; /* caching the key saves approximately one cycle */
+	eb_troot_t *root_right;
+	eb_troot_t *new_left, *new_rght;
+	eb_troot_t *new_leaf;
+	int old_node_bit;
+
+	side = EB_LEFT;
+	troot = root->b[EB_LEFT];
+	root_right = root->b[EB_RGHT];
+	if (unlikely(troot == NULL)) {
+		/* Tree is empty, insert the leaf part below the left branch */
+		root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+		new->node.leaf_p = eb_dotag(root, EB_LEFT);
+		new->node.node_p = NULL; /* node part unused */
+		return new;
+	}
+
+	/* The tree descent is fairly easy :
+	 *  - first, check if we have reached a leaf node
+	 *  - second, check if we have gone too far
+	 *  - third, reiterate
+	 * Everywhere, we use <new> for the node node we are inserting, <root>
+	 * for the node we attach it to, and <old> for the node we are
+	 * displacing below <new>. <troot> will always point to the future node
+	 * (tagged with its type). <side> carries the side the node <new> is
+	 * attached to below its parent, which is also where previous node
+	 * was attached. <newkey> carries the key being inserted.
+	 */
+	newkey = new->key;
+
+	while (1) {
+		if (eb_gettag(troot) == EB_LEAF) {
+			/* insert above a leaf */
+			old = container_of(eb_untag(troot, EB_LEAF),
+					    struct eb32_node, node.branches);
+			new->node.node_p = old->node.leaf_p;
+			up_ptr = &old->node.leaf_p;
+			break;
+		}
+
+		/* OK we're walking down this link */
+		old = container_of(eb_untag(troot, EB_NODE),
+				    struct eb32_node, node.branches);
+		old_node_bit = old->node.bit;
+
+		/* Stop going down when we don't have common bits anymore. We
+		 * also stop in front of a duplicates tree because it means we
+		 * have to insert above.
+		 */
+
+		if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+		    (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+			/* The tree did not contain the key, so we insert <new> before the node
+			 * <old>, and set ->bit to designate the lowest bit position in <new>
+			 * which applies to ->branches.b[].
+			 */
+			new->node.node_p = old->node.node_p;
+			up_ptr = &old->node.node_p;
+			break;
+		}
+
+		/* walk down */
+		root = &old->node.branches;
+		side = (newkey >> old_node_bit) & EB_NODE_BRANCH_MASK;
+		troot = root->b[side];
+	}
+
+	new_left = eb_dotag(&new->node.branches, EB_LEFT);
+	new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+	new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+	/* We need the common higher bits between new->key and old->key.
+	 * What differences are there between new->key and the node here ?
+	 * NOTE that bit(new) is always < bit(root) because highest
+	 * bit of new->key and old->key are identical here (otherwise they
+	 * would sit on different branches).
+	 */
+
+	// note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+	new->node.bit = flsnz(new->key ^ old->key) - EB_NODE_BITS;
+
+	if (new->key == old->key) {
+		new->node.bit = -1; /* mark as new dup tree, just in case */
+
+		if (likely(eb_gettag(root_right))) {
+			/* we refuse to duplicate this key if the tree is
+			 * tagged as containing only unique keys.
+			 */
+			return old;
+		}
+
+		if (eb_gettag(troot) != EB_LEAF) {
+			/* there was already a dup tree below */
+			struct eb_node *ret;
+			ret = eb_insert_dup(&old->node, &new->node);
+			return container_of(ret, struct eb32_node, node);
+		}
+		/* otherwise fall through */
+	}
+
+	if (new->key >= old->key) {
+		new->node.branches.b[EB_LEFT] = troot;
+		new->node.branches.b[EB_RGHT] = new_leaf;
+		new->node.leaf_p = new_rght;
+		*up_ptr = new_left;
+	}
+	else {
+		new->node.branches.b[EB_LEFT] = new_leaf;
+		new->node.branches.b[EB_RGHT] = troot;
+		new->node.leaf_p = new_left;
+		*up_ptr = new_rght;
+	}
+
+	/* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+	 * parent is already set to <new>, and the <root>'s branch is still in
+	 * <side>. Update the root's leaf till we have it. Note that we can also
+	 * find the side by checking the side of new->node.node_p.
+	 */
+
+	root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+	return new;
+}
+
+/* Insert eb32_node <new> into subtree starting at node root <root>, using
+ * signed keys. Only new->key needs be set with the key. The eb32_node
+ * is returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb32_node *
+__eb32i_insert(struct eb_root *root, struct eb32_node *new) {
+	struct eb32_node *old;
+	unsigned int side;
+	eb_troot_t *troot, **up_ptr;
+	int newkey; /* caching the key saves approximately one cycle */
+	eb_troot_t *root_right;
+	eb_troot_t *new_left, *new_rght;
+	eb_troot_t *new_leaf;
+	int old_node_bit;
+
+	side = EB_LEFT;
+	troot = root->b[EB_LEFT];
+	root_right = root->b[EB_RGHT];
+	if (unlikely(troot == NULL)) {
+		/* Tree is empty, insert the leaf part below the left branch */
+		root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+		new->node.leaf_p = eb_dotag(root, EB_LEFT);
+		new->node.node_p = NULL; /* node part unused */
+		return new;
+	}
+
+	/* The tree descent is fairly easy :
+	 *  - first, check if we have reached a leaf node
+	 *  - second, check if we have gone too far
+	 *  - third, reiterate
+	 * Everywhere, we use <new> for the node node we are inserting, <root>
+	 * for the node we attach it to, and <old> for the node we are
+	 * displacing below <new>. <troot> will always point to the future node
+	 * (tagged with its type). <side> carries the side the node <new> is
+	 * attached to below its parent, which is also where previous node
+	 * was attached. <newkey> carries a high bit shift of the key being
+	 * inserted in order to have negative keys stored before positive
+	 * ones.
+	 */
+	newkey = new->key + 0x80000000;
+
+	while (1) {
+		if (eb_gettag(troot) == EB_LEAF) {
+			old = container_of(eb_untag(troot, EB_LEAF),
+					    struct eb32_node, node.branches);
+			new->node.node_p = old->node.leaf_p;
+			up_ptr = &old->node.leaf_p;
+			break;
+		}
+
+		/* OK we're walking down this link */
+		old = container_of(eb_untag(troot, EB_NODE),
+				    struct eb32_node, node.branches);
+		old_node_bit = old->node.bit;
+
+		/* Stop going down when we don't have common bits anymore. We
+		 * also stop in front of a duplicates tree because it means we
+		 * have to insert above.
+		 */
+
+		if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+		    (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+			/* The tree did not contain the key, so we insert <new> before the node
+			 * <old>, and set ->bit to designate the lowest bit position in <new>
+			 * which applies to ->branches.b[].
+			 */
+			new->node.node_p = old->node.node_p;
+			up_ptr = &old->node.node_p;
+			break;
+		}
+
+		/* walk down */
+		root = &old->node.branches;
+		side = (newkey >> old_node_bit) & EB_NODE_BRANCH_MASK;
+		troot = root->b[side];
+	}
+
+	new_left = eb_dotag(&new->node.branches, EB_LEFT);
+	new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+	new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+	/* We need the common higher bits between new->key and old->key.
+	 * What differences are there between new->key and the node here ?
+	 * NOTE that bit(new) is always < bit(root) because highest
+	 * bit of new->key and old->key are identical here (otherwise they
+	 * would sit on different branches).
+	 */
+
+	// note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+	new->node.bit = flsnz(new->key ^ old->key) - EB_NODE_BITS;
+
+	if (new->key == old->key) {
+		new->node.bit = -1; /* mark as new dup tree, just in case */
+
+		if (likely(eb_gettag(root_right))) {
+			/* we refuse to duplicate this key if the tree is
+			 * tagged as containing only unique keys.
+			 */
+			return old;
+		}
+
+		if (eb_gettag(troot) != EB_LEAF) {
+			/* there was already a dup tree below */
+			struct eb_node *ret;
+			ret = eb_insert_dup(&old->node, &new->node);
+			return container_of(ret, struct eb32_node, node);
+		}
+		/* otherwise fall through */
+	}
+
+	if ((s32)new->key >= (s32)old->key) {
+		new->node.branches.b[EB_LEFT] = troot;
+		new->node.branches.b[EB_RGHT] = new_leaf;
+		new->node.leaf_p = new_rght;
+		*up_ptr = new_left;
+	}
+	else {
+		new->node.branches.b[EB_LEFT] = new_leaf;
+		new->node.branches.b[EB_RGHT] = troot;
+		new->node.leaf_p = new_left;
+		*up_ptr = new_rght;
+	}
+
+	/* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+	 * parent is already set to <new>, and the <root>'s branch is still in
+	 * <side>. Update the root's leaf till we have it. Note that we can also
+	 * find the side by checking the side of new->node.node_p.
+	 */
+
+	root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+	return new;
+}
+
+#endif /* _EB32_TREE_H */
diff --git a/include/import/eb64tree.h b/include/import/eb64tree.h
new file mode 100644
index 0000000..ff2feee
--- /dev/null
+++ b/include/import/eb64tree.h
@@ -0,0 +1,589 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on 64bit nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EB64TREE_H
+#define _EB64TREE_H
+
+#include "ebtree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb64_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define EB64_ROOT	EB_ROOT
+#define EB64_TREE_HEAD	EB_TREE_HEAD
+
+/* These types may sometimes already be defined */
+typedef unsigned long long u64;
+typedef   signed long long s64;
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ * In case sizeof(void*)>=sizeof(u64), we know there will be some padding after
+ * the key if it's unaligned. In this case we force the alignment on void* so
+ * that we prefer to have the padding before for more efficient accesses.
+ */
+struct eb64_node {
+	struct eb_node node; /* the tree node, must be at the beginning */
+	MAYBE_ALIGN(sizeof(u64));
+	ALWAYS_ALIGN(sizeof(void*));
+	u64 key;
+} ALIGNED(sizeof(void*));
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_first(struct eb_root *root)
+{
+	return eb64_entry(eb_first(root), struct eb64_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_last(struct eb_root *root)
+{
+	return eb64_entry(eb_last(root), struct eb64_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_next(struct eb64_node *eb64)
+{
+	return eb64_entry(eb_next(&eb64->node), struct eb64_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_prev(struct eb64_node *eb64)
+{
+	return eb64_entry(eb_prev(&eb64->node), struct eb64_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb64_node *eb64_next_dup(struct eb64_node *eb64)
+{
+	return eb64_entry(eb_next_dup(&eb64->node), struct eb64_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb64_node *eb64_prev_dup(struct eb64_node *eb64)
+{
+	return eb64_entry(eb_prev_dup(&eb64->node), struct eb64_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb64_node *eb64_next_unique(struct eb64_node *eb64)
+{
+	return eb64_entry(eb_next_unique(&eb64->node), struct eb64_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb64_node *eb64_prev_unique(struct eb64_node *eb64)
+{
+	return eb64_entry(eb_prev_unique(&eb64->node), struct eb64_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static inline void eb64_delete(struct eb64_node *eb64)
+{
+	eb_delete(&eb64->node);
+}
+
+/*
+ * The following functions are not inlined by default. They are declared
+ * in eb64tree.c, which simply relies on their inline version.
+ */
+struct eb64_node *eb64_lookup(struct eb_root *root, u64 x);
+struct eb64_node *eb64i_lookup(struct eb_root *root, s64 x);
+struct eb64_node *eb64_lookup_le(struct eb_root *root, u64 x);
+struct eb64_node *eb64_lookup_ge(struct eb_root *root, u64 x);
+struct eb64_node *eb64_insert(struct eb_root *root, struct eb64_node *new);
+struct eb64_node *eb64i_insert(struct eb_root *root, struct eb64_node *new);
+
+/*
+ * The following functions are less likely to be used directly, because their
+ * code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __eb64_delete(struct eb64_node *eb64)
+{
+	__eb_delete(&eb64->node);
+}
+
+/*
+ * Find the first occurrence of a key in the tree <root>. If none can be
+ * found, return NULL.
+ */
+static forceinline struct eb64_node *__eb64_lookup(struct eb_root *root, u64 x)
+{
+	struct eb64_node *node;
+	eb_troot_t *troot;
+	u64 y;
+
+	troot = root->b[EB_LEFT];
+	if (unlikely(troot == NULL))
+		return NULL;
+
+	while (1) {
+		if ((eb_gettag(troot) == EB_LEAF)) {
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct eb64_node, node.branches);
+			if (node->key == x)
+				return node;
+			else
+				return NULL;
+		}
+		node = container_of(eb_untag(troot, EB_NODE),
+				    struct eb64_node, node.branches);
+
+		y = node->key ^ x;
+		if (!y) {
+			/* Either we found the node which holds the key, or
+			 * we have a dup tree. In the later case, we have to
+			 * walk it down left to get the first entry.
+			 */
+			if (node->node.bit < 0) {
+				troot = node->node.branches.b[EB_LEFT];
+				while (eb_gettag(troot) != EB_LEAF)
+					troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+				node = container_of(eb_untag(troot, EB_LEAF),
+						    struct eb64_node, node.branches);
+			}
+			return node;
+		}
+
+		if ((y >> node->node.bit) >= EB_NODE_BRANCHES)
+			return NULL; /* no more common bits */
+
+		troot = node->node.branches.b[(x >> node->node.bit) & EB_NODE_BRANCH_MASK];
+	}
+}
+
+/*
+ * Find the first occurrence of a signed key in the tree <root>. If none can
+ * be found, return NULL.
+ */
+static forceinline struct eb64_node *__eb64i_lookup(struct eb_root *root, s64 x)
+{
+	struct eb64_node *node;
+	eb_troot_t *troot;
+	u64 key = x ^ (1ULL << 63);
+	u64 y;
+
+	troot = root->b[EB_LEFT];
+	if (unlikely(troot == NULL))
+		return NULL;
+
+	while (1) {
+		if ((eb_gettag(troot) == EB_LEAF)) {
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct eb64_node, node.branches);
+			if (node->key == (u64)x)
+				return node;
+			else
+				return NULL;
+		}
+		node = container_of(eb_untag(troot, EB_NODE),
+				    struct eb64_node, node.branches);
+
+		y = node->key ^ x;
+		if (!y) {
+			/* Either we found the node which holds the key, or
+			 * we have a dup tree. In the later case, we have to
+			 * walk it down left to get the first entry.
+			 */
+			if (node->node.bit < 0) {
+				troot = node->node.branches.b[EB_LEFT];
+				while (eb_gettag(troot) != EB_LEAF)
+					troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+				node = container_of(eb_untag(troot, EB_LEAF),
+						    struct eb64_node, node.branches);
+			}
+			return node;
+		}
+
+		if ((y >> node->node.bit) >= EB_NODE_BRANCHES)
+			return NULL; /* no more common bits */
+
+		troot = node->node.branches.b[(key >> node->node.bit) & EB_NODE_BRANCH_MASK];
+	}
+}
+
+/* Insert eb64_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The eb64_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb64_node *
+__eb64_insert(struct eb_root *root, struct eb64_node *new) {
+	struct eb64_node *old;
+	unsigned int side;
+	eb_troot_t *troot;
+	u64 newkey; /* caching the key saves approximately one cycle */
+	eb_troot_t *root_right;
+	int old_node_bit;
+
+	side = EB_LEFT;
+	troot = root->b[EB_LEFT];
+	root_right = root->b[EB_RGHT];
+	if (unlikely(troot == NULL)) {
+		/* Tree is empty, insert the leaf part below the left branch */
+		root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+		new->node.leaf_p = eb_dotag(root, EB_LEFT);
+		new->node.node_p = NULL; /* node part unused */
+		return new;
+	}
+
+	/* The tree descent is fairly easy :
+	 *  - first, check if we have reached a leaf node
+	 *  - second, check if we have gone too far
+	 *  - third, reiterate
+	 * Everywhere, we use <new> for the node node we are inserting, <root>
+	 * for the node we attach it to, and <old> for the node we are
+	 * displacing below <new>. <troot> will always point to the future node
+	 * (tagged with its type). <side> carries the side the node <new> is
+	 * attached to below its parent, which is also where previous node
+	 * was attached. <newkey> carries the key being inserted.
+	 */
+	newkey = new->key;
+
+	while (1) {
+		if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+			eb_troot_t *new_left, *new_rght;
+			eb_troot_t *new_leaf, *old_leaf;
+
+			old = container_of(eb_untag(troot, EB_LEAF),
+					    struct eb64_node, node.branches);
+
+			new_left = eb_dotag(&new->node.branches, EB_LEFT);
+			new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+			new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+			old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+			new->node.node_p = old->node.leaf_p;
+
+			/* Right here, we have 3 possibilities :
+			   - the tree does not contain the key, and we have
+			     new->key < old->key. We insert new above old, on
+			     the left ;
+
+			   - the tree does not contain the key, and we have
+			     new->key > old->key. We insert new above old, on
+			     the right ;
+
+			   - the tree does contain the key, which implies it
+			     is alone. We add the new key next to it as a
+			     first duplicate.
+
+			   The last two cases can easily be partially merged.
+			*/
+			 
+			if (new->key < old->key) {
+				new->node.leaf_p = new_left;
+				old->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = new_leaf;
+				new->node.branches.b[EB_RGHT] = old_leaf;
+			} else {
+				/* we may refuse to duplicate this key if the tree is
+				 * tagged as containing only unique keys.
+				 */
+				if ((new->key == old->key) && eb_gettag(root_right))
+					return old;
+
+				/* new->key >= old->key, new goes the right */
+				old->node.leaf_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_leaf;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+
+				if (new->key == old->key) {
+					new->node.bit = -1;
+					root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+					return new;
+				}
+			}
+			break;
+		}
+
+		/* OK we're walking down this link */
+		old = container_of(eb_untag(troot, EB_NODE),
+				    struct eb64_node, node.branches);
+		old_node_bit = old->node.bit;
+
+		/* Stop going down when we don't have common bits anymore. We
+		 * also stop in front of a duplicates tree because it means we
+		 * have to insert above.
+		 */
+
+		if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+		    (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+			/* The tree did not contain the key, so we insert <new> before the node
+			 * <old>, and set ->bit to designate the lowest bit position in <new>
+			 * which applies to ->branches.b[].
+			 */
+			eb_troot_t *new_left, *new_rght;
+			eb_troot_t *new_leaf, *old_node;
+
+			new_left = eb_dotag(&new->node.branches, EB_LEFT);
+			new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+			new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+			old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+			new->node.node_p = old->node.node_p;
+
+			if (new->key < old->key) {
+				new->node.leaf_p = new_left;
+				old->node.node_p = new_rght;
+				new->node.branches.b[EB_LEFT] = new_leaf;
+				new->node.branches.b[EB_RGHT] = old_node;
+			}
+			else if (new->key > old->key) {
+				old->node.node_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_node;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+			}
+			else {
+				struct eb_node *ret;
+				ret = eb_insert_dup(&old->node, &new->node);
+				return container_of(ret, struct eb64_node, node);
+			}
+			break;
+		}
+
+		/* walk down */
+		root = &old->node.branches;
+#if BITS_PER_LONG >= 64
+		side = (newkey >> old_node_bit) & EB_NODE_BRANCH_MASK;
+#else
+		side = newkey;
+		side >>= old_node_bit;
+		if (old_node_bit >= 32) {
+			side = newkey >> 32;
+			side >>= old_node_bit & 0x1F;
+		}
+		side &= EB_NODE_BRANCH_MASK;
+#endif
+		troot = root->b[side];
+	}
+
+	/* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+	 * parent is already set to <new>, and the <root>'s branch is still in
+	 * <side>. Update the root's leaf till we have it. Note that we can also
+	 * find the side by checking the side of new->node.node_p.
+	 */
+
+	/* We need the common higher bits between new->key and old->key.
+	 * What differences are there between new->key and the node here ?
+	 * NOTE that bit(new) is always < bit(root) because highest
+	 * bit of new->key and old->key are identical here (otherwise they
+	 * would sit on different branches).
+	 */
+	// note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+	new->node.bit = fls64(new->key ^ old->key) - EB_NODE_BITS;
+	root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+
+	return new;
+}
+
+/* Insert eb64_node <new> into subtree starting at node root <root>, using
+ * signed keys. Only new->key needs be set with the key. The eb64_node
+ * is returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb64_node *
+__eb64i_insert(struct eb_root *root, struct eb64_node *new) {
+	struct eb64_node *old;
+	unsigned int side;
+	eb_troot_t *troot;
+	u64 newkey; /* caching the key saves approximately one cycle */
+	eb_troot_t *root_right;
+	int old_node_bit;
+
+	side = EB_LEFT;
+	troot = root->b[EB_LEFT];
+	root_right = root->b[EB_RGHT];
+	if (unlikely(troot == NULL)) {
+		/* Tree is empty, insert the leaf part below the left branch */
+		root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+		new->node.leaf_p = eb_dotag(root, EB_LEFT);
+		new->node.node_p = NULL; /* node part unused */
+		return new;
+	}
+
+	/* The tree descent is fairly easy :
+	 *  - first, check if we have reached a leaf node
+	 *  - second, check if we have gone too far
+	 *  - third, reiterate
+	 * Everywhere, we use <new> for the node node we are inserting, <root>
+	 * for the node we attach it to, and <old> for the node we are
+	 * displacing below <new>. <troot> will always point to the future node
+	 * (tagged with its type). <side> carries the side the node <new> is
+	 * attached to below its parent, which is also where previous node
+	 * was attached. <newkey> carries a high bit shift of the key being
+	 * inserted in order to have negative keys stored before positive
+	 * ones.
+	 */
+	newkey = new->key ^ (1ULL << 63);
+
+	while (1) {
+		if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+			eb_troot_t *new_left, *new_rght;
+			eb_troot_t *new_leaf, *old_leaf;
+
+			old = container_of(eb_untag(troot, EB_LEAF),
+					    struct eb64_node, node.branches);
+
+			new_left = eb_dotag(&new->node.branches, EB_LEFT);
+			new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+			new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+			old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+			new->node.node_p = old->node.leaf_p;
+
+			/* Right here, we have 3 possibilities :
+			   - the tree does not contain the key, and we have
+			     new->key < old->key. We insert new above old, on
+			     the left ;
+
+			   - the tree does not contain the key, and we have
+			     new->key > old->key. We insert new above old, on
+			     the right ;
+
+			   - the tree does contain the key, which implies it
+			     is alone. We add the new key next to it as a
+			     first duplicate.
+
+			   The last two cases can easily be partially merged.
+			*/
+			 
+			if ((s64)new->key < (s64)old->key) {
+				new->node.leaf_p = new_left;
+				old->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = new_leaf;
+				new->node.branches.b[EB_RGHT] = old_leaf;
+			} else {
+				/* we may refuse to duplicate this key if the tree is
+				 * tagged as containing only unique keys.
+				 */
+				if ((new->key == old->key) && eb_gettag(root_right))
+					return old;
+
+				/* new->key >= old->key, new goes the right */
+				old->node.leaf_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_leaf;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+
+				if (new->key == old->key) {
+					new->node.bit = -1;
+					root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+					return new;
+				}
+			}
+			break;
+		}
+
+		/* OK we're walking down this link */
+		old = container_of(eb_untag(troot, EB_NODE),
+				    struct eb64_node, node.branches);
+		old_node_bit = old->node.bit;
+
+		/* Stop going down when we don't have common bits anymore. We
+		 * also stop in front of a duplicates tree because it means we
+		 * have to insert above.
+		 */
+
+		if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+		    (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+			/* The tree did not contain the key, so we insert <new> before the node
+			 * <old>, and set ->bit to designate the lowest bit position in <new>
+			 * which applies to ->branches.b[].
+			 */
+			eb_troot_t *new_left, *new_rght;
+			eb_troot_t *new_leaf, *old_node;
+
+			new_left = eb_dotag(&new->node.branches, EB_LEFT);
+			new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+			new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+			old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+			new->node.node_p = old->node.node_p;
+
+			if ((s64)new->key < (s64)old->key) {
+				new->node.leaf_p = new_left;
+				old->node.node_p = new_rght;
+				new->node.branches.b[EB_LEFT] = new_leaf;
+				new->node.branches.b[EB_RGHT] = old_node;
+			}
+			else if ((s64)new->key > (s64)old->key) {
+				old->node.node_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_node;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+			}
+			else {
+				struct eb_node *ret;
+				ret = eb_insert_dup(&old->node, &new->node);
+				return container_of(ret, struct eb64_node, node);
+			}
+			break;
+		}
+
+		/* walk down */
+		root = &old->node.branches;
+#if BITS_PER_LONG >= 64
+		side = (newkey >> old_node_bit) & EB_NODE_BRANCH_MASK;
+#else
+		side = newkey;
+		side >>= old_node_bit;
+		if (old_node_bit >= 32) {
+			side = newkey >> 32;
+			side >>= old_node_bit & 0x1F;
+		}
+		side &= EB_NODE_BRANCH_MASK;
+#endif
+		troot = root->b[side];
+	}
+
+	/* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+	 * parent is already set to <new>, and the <root>'s branch is still in
+	 * <side>. Update the root's leaf till we have it. Note that we can also
+	 * find the side by checking the side of new->node.node_p.
+	 */
+
+	/* We need the common higher bits between new->key and old->key.
+	 * What differences are there between new->key and the node here ?
+	 * NOTE that bit(new) is always < bit(root) because highest
+	 * bit of new->key and old->key are identical here (otherwise they
+	 * would sit on different branches).
+	 */
+	// note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+	new->node.bit = fls64(new->key ^ old->key) - EB_NODE_BITS;
+	root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+
+	return new;
+}
+
+#endif /* _EB64_TREE_H */
diff --git a/include/import/ebimtree.h b/include/import/ebimtree.h
new file mode 100644
index 0000000..8b12a54
--- /dev/null
+++ b/include/import/ebimtree.h
@@ -0,0 +1,324 @@
+/*
+ * Elastic Binary Trees - macros for Indirect Multi-Byte data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EBIMTREE_H
+#define _EBIMTREE_H
+
+#include <string.h>
+#include "ebtree.h"
+#include "ebpttree.h"
+
+/* These functions and macros rely on Pointer nodes and use the <key> entry as
+ * a pointer to an indirect key. Most operations are performed using ebpt_*.
+ */
+
+/* The following functions are not inlined by default. They are declared
+ * in ebimtree.c, which simply relies on their inline version.
+ */
+struct ebpt_node *ebim_lookup(struct eb_root *root, const void *x, unsigned int len);
+struct ebpt_node *ebim_insert(struct eb_root *root, struct ebpt_node *new, unsigned int len);
+
+/* Find the first occurrence of a key of a least <len> bytes matching <x> in the
+ * tree <root>. The caller is responsible for ensuring that <len> will not exceed
+ * the common parts between the tree's keys and <x>. In case of multiple matches,
+ * the leftmost node is returned. This means that this function can be used to
+ * lookup string keys by prefix if all keys in the tree are zero-terminated. If
+ * no match is found, NULL is returned. Returns first node if <len> is zero.
+ */
+static forceinline struct ebpt_node *
+__ebim_lookup(struct eb_root *root, const void *x, unsigned int len)
+{
+	struct ebpt_node *node;
+	eb_troot_t *troot;
+	int pos, side;
+	int node_bit;
+
+	troot = root->b[EB_LEFT];
+	if (unlikely(troot == NULL))
+		goto ret_null;
+
+	if (unlikely(len == 0))
+		goto walk_down;
+
+	pos = 0;
+	while (1) {
+		if (eb_gettag(troot) == EB_LEAF) {
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebpt_node, node.branches);
+			if (memcmp(node->key + pos, x, len) != 0)
+				goto ret_null;
+			else
+				goto ret_node;
+		}
+		node = container_of(eb_untag(troot, EB_NODE),
+				    struct ebpt_node, node.branches);
+
+		node_bit = node->node.bit;
+		if (node_bit < 0) {
+			/* We have a dup tree now. Either it's for the same
+			 * value, and we walk down left, or it's a different
+			 * one and we don't have our key.
+			 */
+			if (memcmp(node->key + pos, x, len) != 0)
+				goto ret_null;
+			else
+				goto walk_left;
+		}
+
+		/* OK, normal data node, let's walk down. We check if all full
+		 * bytes are equal, and we start from the last one we did not
+		 * completely check. We stop as soon as we reach the last byte,
+		 * because we must decide to go left/right or abort.
+		 */
+		node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+		if (node_bit < 0) {
+			/* This surprising construction gives better performance
+			 * because gcc does not try to reorder the loop. Tested to
+			 * be fine with 2.95 to 4.2.
+			 */
+			while (1) {
+				if (*(unsigned char*)(node->key + pos++) ^ *(unsigned char*)(x++))
+					goto ret_null; /* more than one full byte is different */
+				if (--len == 0)
+					goto walk_left; /* return first node if all bytes matched */
+				node_bit += 8;
+				if (node_bit >= 0)
+					break;
+			}
+		}
+
+		/* here we know that only the last byte differs, so node_bit < 8.
+		 * We have 2 possibilities :
+		 *   - more than the last bit differs => return NULL
+		 *   - walk down on side = (x[pos] >> node_bit) & 1
+		 */
+		side = *(unsigned char *)x >> node_bit;
+		if (((*(unsigned char*)(node->key + pos) >> node_bit) ^ side) > 1)
+			goto ret_null;
+		side &= 1;
+		troot = node->node.branches.b[side];
+	}
+ walk_left:
+	troot = node->node.branches.b[EB_LEFT];
+ walk_down:
+	while (eb_gettag(troot) != EB_LEAF)
+		troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+	node = container_of(eb_untag(troot, EB_LEAF),
+			    struct ebpt_node, node.branches);
+ ret_node:
+	return node;
+ ret_null:
+	return NULL;
+}
+
+/* Insert ebpt_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The ebpt_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes.
+ */
+static forceinline struct ebpt_node *
+__ebim_insert(struct eb_root *root, struct ebpt_node *new, unsigned int len)
+{
+	struct ebpt_node *old;
+	unsigned int side;
+	eb_troot_t *troot;
+	eb_troot_t *root_right;
+	int diff;
+	int bit;
+	int old_node_bit;
+
+	side = EB_LEFT;
+	troot = root->b[EB_LEFT];
+	root_right = root->b[EB_RGHT];
+	if (unlikely(troot == NULL)) {
+		/* Tree is empty, insert the leaf part below the left branch */
+		root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+		new->node.leaf_p = eb_dotag(root, EB_LEFT);
+		new->node.node_p = NULL; /* node part unused */
+		return new;
+	}
+
+	len <<= 3;
+
+	/* The tree descent is fairly easy :
+	 *  - first, check if we have reached a leaf node
+	 *  - second, check if we have gone too far
+	 *  - third, reiterate
+	 * Everywhere, we use <new> for the node node we are inserting, <root>
+	 * for the node we attach it to, and <old> for the node we are
+	 * displacing below <new>. <troot> will always point to the future node
+	 * (tagged with its type). <side> carries the side the node <new> is
+	 * attached to below its parent, which is also where previous node
+	 * was attached.
+	 */
+
+	bit = 0;
+	while (1) {
+		if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+			eb_troot_t *new_left, *new_rght;
+			eb_troot_t *new_leaf, *old_leaf;
+
+			old = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebpt_node, node.branches);
+
+			new_left = eb_dotag(&new->node.branches, EB_LEFT);
+			new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+			new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+			old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+			new->node.node_p = old->node.leaf_p;
+
+			/* Right here, we have 3 possibilities :
+			 * - the tree does not contain the key, and we have
+			 *   new->key < old->key. We insert new above old, on
+			 *   the left ;
+			 *
+			 * - the tree does not contain the key, and we have
+			 *   new->key > old->key. We insert new above old, on
+			 *   the right ;
+			 *
+			 * - the tree does contain the key, which implies it
+			 *   is alone. We add the new key next to it as a
+			 *   first duplicate.
+			 *
+			 * The last two cases can easily be partially merged.
+			 */
+			bit = equal_bits(new->key, old->key, bit, len);
+
+			/* Note: we can compare more bits than the current node's because as
+			 * long as they are identical, we know we descend along the correct
+			 * side. However we don't want to start to compare past the end.
+			 */
+			diff = 0;
+			if (((unsigned)bit >> 3) < len)
+				diff = cmp_bits(new->key, old->key, bit);
+
+			if (diff < 0) {
+				new->node.leaf_p = new_left;
+				old->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = new_leaf;
+				new->node.branches.b[EB_RGHT] = old_leaf;
+			} else {
+				/* we may refuse to duplicate this key if the tree is
+				 * tagged as containing only unique keys.
+				 */
+				if (diff == 0 && eb_gettag(root_right))
+					return old;
+
+				/* new->key >= old->key, new goes the right */
+				old->node.leaf_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_leaf;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+
+				if (diff == 0) {
+					new->node.bit = -1;
+					root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+					return new;
+				}
+			}
+			break;
+		}
+
+		/* OK we're walking down this link */
+		old = container_of(eb_untag(troot, EB_NODE),
+				   struct ebpt_node, node.branches);
+		old_node_bit = old->node.bit;
+
+		/* Stop going down when we don't have common bits anymore. We
+		 * also stop in front of a duplicates tree because it means we
+		 * have to insert above. Note: we can compare more bits than
+		 * the current node's because as long as they are identical, we
+		 * know we descend along the correct side.
+		 */
+		if (old_node_bit < 0) {
+			/* we're above a duplicate tree, we must compare till the end */
+			bit = equal_bits(new->key, old->key, bit, len);
+			goto dup_tree;
+		}
+		else if (bit < old_node_bit) {
+			bit = equal_bits(new->key, old->key, bit, old_node_bit);
+		}
+
+		if (bit < old_node_bit) { /* we don't have all bits in common */
+			/* The tree did not contain the key, so we insert <new> before the node
+			 * <old>, and set ->bit to designate the lowest bit position in <new>
+			 * which applies to ->branches.b[].
+			 */
+			eb_troot_t *new_left, *new_rght;
+			eb_troot_t *new_leaf, *old_node;
+
+		dup_tree:
+			new_left = eb_dotag(&new->node.branches, EB_LEFT);
+			new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+			new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+			old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+			new->node.node_p = old->node.node_p;
+
+			/* Note: we can compare more bits than the current node's because as
+			 * long as they are identical, we know we descend along the correct
+			 * side. However we don't want to start to compare past the end.
+			 */
+			diff = 0;
+			if (((unsigned)bit >> 3) < len)
+				diff = cmp_bits(new->key, old->key, bit);
+
+			if (diff < 0) {
+				new->node.leaf_p = new_left;
+				old->node.node_p = new_rght;
+				new->node.branches.b[EB_LEFT] = new_leaf;
+				new->node.branches.b[EB_RGHT] = old_node;
+			}
+			else if (diff > 0) {
+				old->node.node_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_node;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+			}
+			else {
+				struct eb_node *ret;
+				ret = eb_insert_dup(&old->node, &new->node);
+				return container_of(ret, struct ebpt_node, node);
+			}
+			break;
+		}
+
+		/* walk down */
+		root = &old->node.branches;
+		side = (((unsigned char *)new->key)[old_node_bit >> 3] >> (~old_node_bit & 7)) & 1;
+		troot = root->b[side];
+	}
+
+	/* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+	 * parent is already set to <new>, and the <root>'s branch is still in
+	 * <side>. Update the root's leaf till we have it. Note that we can also
+	 * find the side by checking the side of new->node.node_p.
+	 */
+
+	/* We need the common higher bits between new->key and old->key.
+	 * This number of bits is already in <bit>.
+	 */
+	new->node.bit = bit;
+	root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+	return new;
+}
+
+#endif /* _EBIMTREE_H */
diff --git a/include/import/ebistree.h b/include/import/ebistree.h
new file mode 100644
index 0000000..a438fa1
--- /dev/null
+++ b/include/import/ebistree.h
@@ -0,0 +1,329 @@
+/*
+ * Elastic Binary Trees - macros to manipulate Indirect String data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* These functions and macros rely on Multi-Byte nodes */
+
+#ifndef _EBISTREE_H
+#define _EBISTREE_H
+
+#include <string.h>
+#include "ebtree.h"
+#include "ebpttree.h"
+#include "ebimtree.h"
+
+/* These functions and macros rely on Pointer nodes and use the <key> entry as
+ * a pointer to an indirect key. Most operations are performed using ebpt_*.
+ */
+
+/* The following functions are not inlined by default. They are declared
+ * in ebistree.c, which simply relies on their inline version.
+ */
+struct ebpt_node *ebis_lookup(struct eb_root *root, const char *x);
+struct ebpt_node *ebis_insert(struct eb_root *root, struct ebpt_node *new);
+
+/* Find the first occurrence of a length <len> string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings, and that no null character is present
+ * in string <x> in the first <len> chars. If none can be found, return NULL.
+ */
+static forceinline struct ebpt_node *
+ebis_lookup_len(struct eb_root *root, const char *x, unsigned int len)
+{
+	struct ebpt_node *node;
+
+	node = ebim_lookup(root, x, len);
+	if (!node || ((const char *)node->key)[len] != 0)
+		return NULL;
+	return node;
+}
+
+/* Find the first occurrence of a zero-terminated string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings. If none can be found, return NULL.
+ */
+static forceinline struct ebpt_node *__ebis_lookup(struct eb_root *root, const void *x)
+{
+	struct ebpt_node *node;
+	eb_troot_t *troot;
+	int bit;
+	int node_bit;
+
+	troot = root->b[EB_LEFT];
+	if (unlikely(troot == NULL))
+		return NULL;
+
+	bit = 0;
+	while (1) {
+		if ((eb_gettag(troot) == EB_LEAF)) {
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebpt_node, node.branches);
+			if (strcmp(node->key, x) == 0)
+				return node;
+			else
+				return NULL;
+		}
+		node = container_of(eb_untag(troot, EB_NODE),
+				    struct ebpt_node, node.branches);
+		node_bit = node->node.bit;
+
+		if (node_bit < 0) {
+			/* We have a dup tree now. Either it's for the same
+			 * value, and we walk down left, or it's a different
+			 * one and we don't have our key.
+			 */
+			if (strcmp(node->key, x) != 0)
+				return NULL;
+
+			troot = node->node.branches.b[EB_LEFT];
+			while (eb_gettag(troot) != EB_LEAF)
+				troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebpt_node, node.branches);
+			return node;
+		}
+
+		/* OK, normal data node, let's walk down but don't compare data
+		 * if we already reached the end of the key.
+		 */
+		if (likely(bit >= 0)) {
+			bit = string_equal_bits(x, node->key, bit);
+			if (likely(bit < node_bit)) {
+				if (bit >= 0)
+					return NULL; /* no more common bits */
+
+				/* bit < 0 : we reached the end of the key. If we
+				 * are in a tree with unique keys, we can return
+				 * this node. Otherwise we have to walk it down
+				 * and stop comparing bits.
+				 */
+				if (eb_gettag(root->b[EB_RGHT]))
+					return node;
+			}
+			/* if the bit is larger than the node's, we must bound it
+			 * because we might have compared too many bytes with an
+			 * inappropriate leaf. For a test, build a tree from "0",
+			 * "WW", "W", "S" inserted in this exact sequence and lookup
+			 * "W" => "S" is returned without this assignment.
+			 */
+			else
+				bit = node_bit;
+		}
+
+		troot = node->node.branches.b[(((unsigned char*)x)[node_bit >> 3] >>
+					       (~node_bit & 7)) & 1];
+	}
+}
+
+/* Insert ebpt_node <new> into subtree starting at node root <root>. Only
+ * new->key needs be set with the zero-terminated string key. The ebpt_node is
+ * returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * caller is responsible for properly terminating the key with a zero.
+ */
+static forceinline struct ebpt_node *
+__ebis_insert(struct eb_root *root, struct ebpt_node *new)
+{
+	struct ebpt_node *old;
+	unsigned int side;
+	eb_troot_t *troot;
+	eb_troot_t *root_right;
+	int diff;
+	int bit;
+	int old_node_bit;
+
+	side = EB_LEFT;
+	troot = root->b[EB_LEFT];
+	root_right = root->b[EB_RGHT];
+	if (unlikely(troot == NULL)) {
+		/* Tree is empty, insert the leaf part below the left branch */
+		root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+		new->node.leaf_p = eb_dotag(root, EB_LEFT);
+		new->node.node_p = NULL; /* node part unused */
+		return new;
+	}
+
+	/* The tree descent is fairly easy :
+	 *  - first, check if we have reached a leaf node
+	 *  - second, check if we have gone too far
+	 *  - third, reiterate
+	 * Everywhere, we use <new> for the node node we are inserting, <root>
+	 * for the node we attach it to, and <old> for the node we are
+	 * displacing below <new>. <troot> will always point to the future node
+	 * (tagged with its type). <side> carries the side the node <new> is
+	 * attached to below its parent, which is also where previous node
+	 * was attached.
+	 */
+
+	bit = 0;
+	while (1) {
+		if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+			eb_troot_t *new_left, *new_rght;
+			eb_troot_t *new_leaf, *old_leaf;
+
+			old = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebpt_node, node.branches);
+
+			new_left = eb_dotag(&new->node.branches, EB_LEFT);
+			new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+			new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+			old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+			new->node.node_p = old->node.leaf_p;
+
+			/* Right here, we have 3 possibilities :
+			 * - the tree does not contain the key, and we have
+			 *   new->key < old->key. We insert new above old, on
+			 *   the left ;
+			 *
+			 * - the tree does not contain the key, and we have
+			 *   new->key > old->key. We insert new above old, on
+			 *   the right ;
+			 *
+			 * - the tree does contain the key, which implies it
+			 *   is alone. We add the new key next to it as a
+			 *   first duplicate.
+			 *
+			 * The last two cases can easily be partially merged.
+			 */
+			if (bit >= 0)
+				bit = string_equal_bits(new->key, old->key, bit);
+
+			if (bit < 0) {
+				/* key was already there */
+
+				/* we may refuse to duplicate this key if the tree is
+				 * tagged as containing only unique keys.
+				 */
+				if (eb_gettag(root_right))
+					return old;
+
+				/* new arbitrarily goes to the right and tops the dup tree */
+				old->node.leaf_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_leaf;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+				new->node.bit = -1;
+				root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+				return new;
+			}
+
+			diff = cmp_bits(new->key, old->key, bit);
+			if (diff < 0) {
+				/* new->key < old->key, new takes the left */
+				new->node.leaf_p = new_left;
+				old->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = new_leaf;
+				new->node.branches.b[EB_RGHT] = old_leaf;
+			} else {
+				/* new->key > old->key, new takes the right */
+				old->node.leaf_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_leaf;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+			}
+			break;
+		}
+
+		/* OK we're walking down this link */
+		old = container_of(eb_untag(troot, EB_NODE),
+				   struct ebpt_node, node.branches);
+		old_node_bit = old->node.bit;
+
+		/* Stop going down when we don't have common bits anymore. We
+		 * also stop in front of a duplicates tree because it means we
+		 * have to insert above. Note: we can compare more bits than
+		 * the current node's because as long as they are identical, we
+		 * know we descend along the correct side.
+		 */
+		if (bit >= 0 && (bit < old_node_bit || old_node_bit < 0))
+			bit = string_equal_bits(new->key, old->key, bit);
+
+		if (unlikely(bit < 0)) {
+			/* Perfect match, we must only stop on head of dup tree
+			 * or walk down to a leaf.
+			 */
+			if (old_node_bit < 0) {
+				/* We know here that string_equal_bits matched all
+				 * bits and that we're on top of a dup tree, then
+				 * we can perform the dup insertion and return.
+				 */
+				struct eb_node *ret;
+				ret = eb_insert_dup(&old->node, &new->node);
+				return container_of(ret, struct ebpt_node, node);
+			}
+			/* OK so let's walk down */
+		}
+		else if (bit < old_node_bit || old_node_bit < 0) {
+			/* The tree did not contain the key, or we stopped on top of a dup
+			 * tree, possibly containing the key. In the former case, we insert
+			 * <new> before the node <old>, and set ->bit to designate the lowest
+			 * bit position in <new> which applies to ->branches.b[]. In the later
+			 * case, we add the key to the existing dup tree. Note that we cannot
+			 * enter here if we match an intermediate node's key that is not the
+			 * head of a dup tree.
+			 */
+			eb_troot_t *new_left, *new_rght;
+			eb_troot_t *new_leaf, *old_node;
+
+			new_left = eb_dotag(&new->node.branches, EB_LEFT);
+			new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+			new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+			old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+			new->node.node_p = old->node.node_p;
+
+			/* we can never match all bits here */
+			diff = cmp_bits(new->key, old->key, bit);
+			if (diff < 0) {
+				new->node.leaf_p = new_left;
+				old->node.node_p = new_rght;
+				new->node.branches.b[EB_LEFT] = new_leaf;
+				new->node.branches.b[EB_RGHT] = old_node;
+			}
+			else {
+				old->node.node_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_node;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+			}
+			break;
+		}
+
+		/* walk down */
+		root = &old->node.branches;
+		side = (((unsigned char *)new->key)[old_node_bit >> 3] >> (~old_node_bit & 7)) & 1;
+		troot = root->b[side];
+	}
+
+	/* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+	 * parent is already set to <new>, and the <root>'s branch is still in
+	 * <side>. Update the root's leaf till we have it. Note that we can also
+	 * find the side by checking the side of new->node.node_p.
+	 */
+
+	/* We need the common higher bits between new->key and old->key.
+	 * This number of bits is already in <bit>.
+	 * NOTE: we can't get here whit bit < 0 since we found a dup !
+	 */
+	new->node.bit = bit;
+	root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+	return new;
+}
+
+#endif /* _EBISTREE_H */
diff --git a/include/import/ebmbtree.h b/include/import/ebmbtree.h
new file mode 100644
index 0000000..8262ec6
--- /dev/null
+++ b/include/import/ebmbtree.h
@@ -0,0 +1,814 @@
+/*
+ * Elastic Binary Trees - macros and structures for Multi-Byte data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EBMBTREE_H
+#define _EBMBTREE_H
+
+#include <string.h>
+#include "ebtree.h"
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define ebmb_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define EBMB_ROOT	EB_ROOT
+#define EBMB_TREE_HEAD	EB_TREE_HEAD
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ * The 'node.bit' value here works differently from scalar types, as it contains
+ * the number of identical bits between the two branches.
+ * Note that we take a great care of making sure the key is located exactly at
+ * the end of the struct even if that involves holes before it, so that it
+ * always aliases any external key a user would append after. This is why the
+ * key uses the same alignment as the struct.
+ */
+struct ebmb_node {
+	struct eb_node node; /* the tree node, must be at the beginning */
+	ALWAYS_ALIGN(sizeof(void*));
+	unsigned char key[0]; /* the key, its size depends on the application */
+} ALIGNED(sizeof(void*));
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_first(struct eb_root *root)
+{
+	return ebmb_entry(eb_first(root), struct ebmb_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_last(struct eb_root *root)
+{
+	return ebmb_entry(eb_last(root), struct ebmb_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_next(struct ebmb_node *ebmb)
+{
+	return ebmb_entry(eb_next(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_prev(struct ebmb_node *ebmb)
+{
+	return ebmb_entry(eb_prev(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebmb_node *ebmb_next_dup(struct ebmb_node *ebmb)
+{
+	return ebmb_entry(eb_next_dup(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebmb_node *ebmb_prev_dup(struct ebmb_node *ebmb)
+{
+	return ebmb_entry(eb_prev_dup(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebmb_node *ebmb_next_unique(struct ebmb_node *ebmb)
+{
+	return ebmb_entry(eb_next_unique(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebmb_node *ebmb_prev_unique(struct ebmb_node *ebmb)
+{
+	return ebmb_entry(eb_prev_unique(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static forceinline void ebmb_delete(struct ebmb_node *ebmb)
+{
+	eb_delete(&ebmb->node);
+}
+
+/* The following functions are not inlined by default. They are declared
+ * in ebmbtree.c, which simply relies on their inline version.
+ */
+struct ebmb_node *ebmb_lookup(struct eb_root *root, const void *x, unsigned int len);
+struct ebmb_node *ebmb_insert(struct eb_root *root, struct ebmb_node *new, unsigned int len);
+struct ebmb_node *ebmb_lookup_longest(struct eb_root *root, const void *x);
+struct ebmb_node *ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx);
+struct ebmb_node *ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len);
+
+/* The following functions are less likely to be used directly, because their
+ * code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __ebmb_delete(struct ebmb_node *ebmb)
+{
+	__eb_delete(&ebmb->node);
+}
+
+/* Find the first occurrence of a key of a least <len> bytes matching <x> in the
+ * tree <root>. The caller is responsible for ensuring that <len> will not exceed
+ * the common parts between the tree's keys and <x>. In case of multiple matches,
+ * the leftmost node is returned. This means that this function can be used to
+ * lookup string keys by prefix if all keys in the tree are zero-terminated. If
+ * no match is found, NULL is returned. Returns first node if <len> is zero.
+ */
+static forceinline struct ebmb_node *__ebmb_lookup(struct eb_root *root, const void *x, unsigned int len)
+{
+	struct ebmb_node *node;
+	eb_troot_t *troot;
+	int pos, side;
+	int node_bit;
+
+	troot = root->b[EB_LEFT];
+	if (unlikely(troot == NULL))
+		goto ret_null;
+
+	if (unlikely(len == 0))
+		goto walk_down;
+
+	pos = 0;
+	while (1) {
+		if (eb_gettag(troot) == EB_LEAF) {
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebmb_node, node.branches);
+			if (memcmp(node->key + pos, x, len) != 0)
+				goto ret_null;
+			else
+				goto ret_node;
+		}
+		node = container_of(eb_untag(troot, EB_NODE),
+				    struct ebmb_node, node.branches);
+
+		node_bit = node->node.bit;
+		if (node_bit < 0) {
+			/* We have a dup tree now. Either it's for the same
+			 * value, and we walk down left, or it's a different
+			 * one and we don't have our key.
+			 */
+			if (memcmp(node->key + pos, x, len) != 0)
+				goto ret_null;
+			else
+				goto walk_left;
+		}
+
+		/* OK, normal data node, let's walk down. We check if all full
+		 * bytes are equal, and we start from the last one we did not
+		 * completely check. We stop as soon as we reach the last byte,
+		 * because we must decide to go left/right or abort.
+		 */
+		node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+		if (node_bit < 0) {
+			/* This surprising construction gives better performance
+			 * because gcc does not try to reorder the loop. Tested to
+			 * be fine with 2.95 to 4.2.
+			 */
+			while (1) {
+				if (node->key[pos++] ^ *(unsigned char*)(x++))
+					goto ret_null;  /* more than one full byte is different */
+				if (--len == 0)
+					goto walk_left; /* return first node if all bytes matched */
+				node_bit += 8;
+				if (node_bit >= 0)
+					break;
+			}
+		}
+
+		/* here we know that only the last byte differs, so node_bit < 8.
+		 * We have 2 possibilities :
+		 *   - more than the last bit differs => return NULL
+		 *   - walk down on side = (x[pos] >> node_bit) & 1
+		 */
+		side = *(unsigned char *)x >> node_bit;
+		if (((node->key[pos] >> node_bit) ^ side) > 1)
+			goto ret_null;
+		side &= 1;
+		troot = node->node.branches.b[side];
+	}
+ walk_left:
+	troot = node->node.branches.b[EB_LEFT];
+ walk_down:
+	while (eb_gettag(troot) != EB_LEAF)
+		troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+	node = container_of(eb_untag(troot, EB_LEAF),
+			    struct ebmb_node, node.branches);
+ ret_node:
+	return node;
+ ret_null:
+	return NULL;
+}
+
+/* Insert ebmb_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The ebmb_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes. It is absolutely mandatory that this length
+ * is the same for all keys in the tree. This function cannot be used to
+ * insert strings.
+ */
+static forceinline struct ebmb_node *
+__ebmb_insert(struct eb_root *root, struct ebmb_node *new, unsigned int len)
+{
+	struct ebmb_node *old;
+	unsigned int side;
+	eb_troot_t *troot, **up_ptr;
+	eb_troot_t *root_right;
+	int diff;
+	int bit;
+	eb_troot_t *new_left, *new_rght;
+	eb_troot_t *new_leaf;
+	int old_node_bit;
+
+	side = EB_LEFT;
+	troot = root->b[EB_LEFT];
+	root_right = root->b[EB_RGHT];
+	if (unlikely(troot == NULL)) {
+		/* Tree is empty, insert the leaf part below the left branch */
+		root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+		new->node.leaf_p = eb_dotag(root, EB_LEFT);
+		new->node.node_p = NULL; /* node part unused */
+		return new;
+	}
+
+	/* The tree descent is fairly easy :
+	 *  - first, check if we have reached a leaf node
+	 *  - second, check if we have gone too far
+	 *  - third, reiterate
+	 * Everywhere, we use <new> for the node node we are inserting, <root>
+	 * for the node we attach it to, and <old> for the node we are
+	 * displacing below <new>. <troot> will always point to the future node
+	 * (tagged with its type). <side> carries the side the node <new> is
+	 * attached to below its parent, which is also where previous node
+	 * was attached.
+	 */
+
+	bit = 0;
+	while (1) {
+		if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+			/* insert above a leaf */
+			old = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebmb_node, node.branches);
+			new->node.node_p = old->node.leaf_p;
+			up_ptr = &old->node.leaf_p;
+			goto check_bit_and_break;
+		}
+
+		/* OK we're walking down this link */
+		old = container_of(eb_untag(troot, EB_NODE),
+				   struct ebmb_node, node.branches);
+		old_node_bit = old->node.bit;
+
+		if (unlikely(old->node.bit < 0)) {
+			/* We're above a duplicate tree, so we must compare the whole value */
+			new->node.node_p = old->node.node_p;
+			up_ptr = &old->node.node_p;
+		check_bit_and_break:
+			bit = equal_bits(new->key, old->key, bit, len << 3);
+			break;
+		}
+
+		/* Stop going down when we don't have common bits anymore. We
+		 * also stop in front of a duplicates tree because it means we
+		 * have to insert above. Note: we can compare more bits than
+		 * the current node's because as long as they are identical, we
+		 * know we descend along the correct side.
+		 */
+
+		bit = equal_bits(new->key, old->key, bit, old_node_bit);
+		if (unlikely(bit < old_node_bit)) {
+			/* The tree did not contain the key, so we insert <new> before the
+			 * node <old>, and set ->bit to designate the lowest bit position in
+			 * <new> which applies to ->branches.b[].
+			 */
+			new->node.node_p = old->node.node_p;
+			up_ptr = &old->node.node_p;
+			break;
+		}
+		/* we don't want to skip bits for further comparisons, so we must limit <bit>.
+		 * However, since we're going down around <old_node_bit>, we know it will be
+		 * properly matched, so we can skip this bit.
+		 */
+		bit = old_node_bit + 1;
+
+		/* walk down */
+		root = &old->node.branches;
+		side = old_node_bit & 7;
+		side ^= 7;
+		side = (new->key[old_node_bit >> 3] >> side) & 1;
+		troot = root->b[side];
+	}
+
+	new_left = eb_dotag(&new->node.branches, EB_LEFT);
+	new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+	new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+	new->node.bit = bit;
+
+	/* Note: we can compare more bits than the current node's because as
+	 * long as they are identical, we know we descend along the correct
+	 * side. However we don't want to start to compare past the end.
+	 */
+	diff = 0;
+	if (((unsigned)bit >> 3) < len)
+		diff = cmp_bits(new->key, old->key, bit);
+
+	if (diff == 0) {
+		new->node.bit = -1; /* mark as new dup tree, just in case */
+
+		if (likely(eb_gettag(root_right))) {
+			/* we refuse to duplicate this key if the tree is
+			 * tagged as containing only unique keys.
+			 */
+			return old;
+		}
+
+		if (eb_gettag(troot) != EB_LEAF) {
+			/* there was already a dup tree below */
+			struct eb_node *ret;
+			ret = eb_insert_dup(&old->node, &new->node);
+			return container_of(ret, struct ebmb_node, node);
+		}
+		/* otherwise fall through */
+	}
+
+	if (diff >= 0) {
+		new->node.branches.b[EB_LEFT] = troot;
+		new->node.branches.b[EB_RGHT] = new_leaf;
+		new->node.leaf_p = new_rght;
+		*up_ptr = new_left;
+	}
+	else if (diff < 0) {
+		new->node.branches.b[EB_LEFT] = new_leaf;
+		new->node.branches.b[EB_RGHT] = troot;
+		new->node.leaf_p = new_left;
+		*up_ptr = new_rght;
+	}
+
+	/* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+	 * parent is already set to <new>, and the <root>'s branch is still in
+	 * <side>. Update the root's leaf till we have it. Note that we can also
+	 * find the side by checking the side of new->node.node_p.
+	 */
+
+	root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+	return new;
+}
+
+
+/* Find the first occurrence of the longest prefix matching a key <x> in the
+ * tree <root>. It's the caller's responsibility to ensure that key <x> is at
+ * least as long as the keys in the tree. Note that this can be ensured by
+ * having a byte at the end of <x> which cannot be part of any prefix, typically
+ * the trailing zero for a string. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *__ebmb_lookup_longest(struct eb_root *root, const void *x)
+{
+	struct ebmb_node *node;
+	eb_troot_t *troot, *cover;
+	int pos, side;
+	int node_bit;
+
+	troot = root->b[EB_LEFT];
+	if (unlikely(troot == NULL))
+		return NULL;
+
+	cover = NULL;
+	pos = 0;
+	while (1) {
+		if ((eb_gettag(troot) == EB_LEAF)) {
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebmb_node, node.branches);
+			if (check_bits(x - pos, node->key, pos, node->node.pfx))
+				goto not_found;
+
+			return node;
+		}
+		node = container_of(eb_untag(troot, EB_NODE),
+				    struct ebmb_node, node.branches);
+
+		node_bit = node->node.bit;
+		if (node_bit < 0) {
+			/* We have a dup tree now. Either it's for the same
+			 * value, and we walk down left, or it's a different
+			 * one and we don't have our key.
+			 */
+			if (check_bits(x - pos, node->key, pos, node->node.pfx))
+				goto not_found;
+
+			troot = node->node.branches.b[EB_LEFT];
+			while (eb_gettag(troot) != EB_LEAF)
+				troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebmb_node, node.branches);
+			return node;
+		}
+
+		node_bit >>= 1; /* strip cover bit */
+		node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+		if (node_bit < 0) {
+			/* This uncommon construction gives better performance
+			 * because gcc does not try to reorder the loop. Tested to
+			 * be fine with 2.95 to 4.2.
+			 */
+			while (1) {
+				x++; pos++;
+				if (node->key[pos-1] ^ *(unsigned char*)(x-1))
+					goto not_found; /* more than one full byte is different */
+				node_bit += 8;
+				if (node_bit >= 0)
+					break;
+			}
+		}
+
+		/* here we know that only the last byte differs, so 0 <= node_bit <= 7.
+		 * We have 2 possibilities :
+		 *   - more than the last bit differs => data does not match
+		 *   - walk down on side = (x[pos] >> node_bit) & 1
+		 */
+		side = *(unsigned char *)x >> node_bit;
+		if (((node->key[pos] >> node_bit) ^ side) > 1)
+			goto not_found;
+
+		if (!(node->node.bit & 1)) {
+			/* This is a cover node, let's keep a reference to it
+			 * for later. The covering subtree is on the left, and
+			 * the covered subtree is on the right, so we have to
+			 * walk down right.
+			 */
+			cover = node->node.branches.b[EB_LEFT];
+			troot = node->node.branches.b[EB_RGHT];
+			continue;
+		}
+		side &= 1;
+		troot = node->node.branches.b[side];
+	}
+
+ not_found:
+	/* Walk down last cover tre if it exists. It does not matter if cover is NULL */
+	return ebmb_entry(eb_walk_down(cover, EB_LEFT), struct ebmb_node, node);
+}
+
+
+/* Find the first occurrence of a prefix matching a key <x> of <pfx> BITS in the
+ * tree <root>. It's the caller's responsibility to ensure that key <x> is at
+ * least as long as the keys in the tree. Note that this can be ensured by
+ * having a byte at the end of <x> which cannot be part of any prefix, typically
+ * the trailing zero for a string. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *__ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx)
+{
+	struct ebmb_node *node;
+	eb_troot_t *troot;
+	int pos, side;
+	int node_bit;
+
+	troot = root->b[EB_LEFT];
+	if (unlikely(troot == NULL))
+		return NULL;
+
+	pos = 0;
+	while (1) {
+		if ((eb_gettag(troot) == EB_LEAF)) {
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebmb_node, node.branches);
+			if (node->node.pfx != pfx)
+				return NULL;
+			if (check_bits(x - pos, node->key, pos, node->node.pfx))
+				return NULL;
+			return node;
+		}
+		node = container_of(eb_untag(troot, EB_NODE),
+				    struct ebmb_node, node.branches);
+
+		node_bit = node->node.bit;
+		if (node_bit < 0) {
+			/* We have a dup tree now. Either it's for the same
+			 * value, and we walk down left, or it's a different
+			 * one and we don't have our key.
+			 */
+			if (node->node.pfx != pfx)
+				return NULL;
+			if (check_bits(x - pos, node->key, pos, node->node.pfx))
+				return NULL;
+
+			troot = node->node.branches.b[EB_LEFT];
+			while (eb_gettag(troot) != EB_LEAF)
+				troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebmb_node, node.branches);
+			return node;
+		}
+
+		node_bit >>= 1; /* strip cover bit */
+		node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+		if (node_bit < 0) {
+			/* This uncommon construction gives better performance
+			 * because gcc does not try to reorder the loop. Tested to
+			 * be fine with 2.95 to 4.2.
+			 */
+			while (1) {
+				x++; pos++;
+				if (node->key[pos-1] ^ *(unsigned char*)(x-1))
+					return NULL; /* more than one full byte is different */
+				node_bit += 8;
+				if (node_bit >= 0)
+					break;
+			}
+		}
+
+		/* here we know that only the last byte differs, so 0 <= node_bit <= 7.
+		 * We have 2 possibilities :
+		 *   - more than the last bit differs => data does not match
+		 *   - walk down on side = (x[pos] >> node_bit) & 1
+		 */
+		side = *(unsigned char *)x >> node_bit;
+		if (((node->key[pos] >> node_bit) ^ side) > 1)
+			return NULL;
+
+		if (!(node->node.bit & 1)) {
+			/* This is a cover node, it may be the entry we're
+			 * looking for. We already know that it matches all the
+			 * bits, let's compare prefixes and descend the cover
+			 * subtree if they match.
+			 */
+			if ((unsigned short)node->node.bit >> 1 == pfx)
+				troot = node->node.branches.b[EB_LEFT];
+			else
+				troot = node->node.branches.b[EB_RGHT];
+			continue;
+		}
+		side &= 1;
+		troot = node->node.branches.b[side];
+	}
+}
+
+
+/* Insert ebmb_node <new> into a prefix subtree starting at node root <root>.
+ * Only new->key and new->pfx need be set with the key and its prefix length.
+ * Note that bits between <pfx> and <len> are theorically ignored and should be
+ * zero, as it is not certain yet that they will always be ignored everywhere
+ * (eg in bit compare functions).
+ * The ebmb_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes.
+ */
+static forceinline struct ebmb_node *
+__ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len)
+{
+	struct ebmb_node *old;
+	unsigned int side;
+	eb_troot_t *troot, **up_ptr;
+	eb_troot_t *root_right;
+	int diff;
+	int bit;
+	eb_troot_t *new_left, *new_rght;
+	eb_troot_t *new_leaf;
+	int old_node_bit;
+
+	side = EB_LEFT;
+	troot = root->b[EB_LEFT];
+	root_right = root->b[EB_RGHT];
+	if (unlikely(troot == NULL)) {
+		/* Tree is empty, insert the leaf part below the left branch */
+		root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+		new->node.leaf_p = eb_dotag(root, EB_LEFT);
+		new->node.node_p = NULL; /* node part unused */
+		return new;
+	}
+
+	len <<= 3;
+	if (len > new->node.pfx)
+		len = new->node.pfx;
+
+	/* The tree descent is fairly easy :
+	 *  - first, check if we have reached a leaf node
+	 *  - second, check if we have gone too far
+	 *  - third, reiterate
+	 * Everywhere, we use <new> for the node node we are inserting, <root>
+	 * for the node we attach it to, and <old> for the node we are
+	 * displacing below <new>. <troot> will always point to the future node
+	 * (tagged with its type). <side> carries the side the node <new> is
+	 * attached to below its parent, which is also where previous node
+	 * was attached.
+	 */
+
+	bit = 0;
+	while (1) {
+		if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+			/* Insert above a leaf. Note that this leaf could very
+			 * well be part of a cover node.
+			 */
+			old = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebmb_node, node.branches);
+			new->node.node_p = old->node.leaf_p;
+			up_ptr = &old->node.leaf_p;
+			goto check_bit_and_break;
+		}
+
+		/* OK we're walking down this link */
+		old = container_of(eb_untag(troot, EB_NODE),
+				   struct ebmb_node, node.branches);
+		old_node_bit = old->node.bit;
+		/* Note that old_node_bit can be :
+		 *   < 0    : dup tree
+		 *   = 2N   : cover node for N bits
+		 *   = 2N+1 : normal node at N bits
+		 */
+
+		if (unlikely(old_node_bit < 0)) {
+			/* We're above a duplicate tree, so we must compare the whole value */
+			new->node.node_p = old->node.node_p;
+			up_ptr = &old->node.node_p;
+		check_bit_and_break:
+			/* No need to compare everything if the leaves are shorter than the new one. */
+			if (len > old->node.pfx)
+				len = old->node.pfx;
+			bit = equal_bits(new->key, old->key, bit, len);
+			break;
+		}
+
+		/* WARNING: for the two blocks below, <bit> is counted in half-bits */
+
+		bit = equal_bits(new->key, old->key, bit, old_node_bit >> 1);
+		bit = (bit << 1) + 1; // assume comparisons with normal nodes
+
+		/* we must always check that our prefix is larger than the nodes
+		 * we visit, otherwise we have to stop going down. The following
+		 * test is able to stop before both normal and cover nodes.
+		 */
+		if (bit >= (new->node.pfx << 1) && (new->node.pfx << 1) < old_node_bit) {
+			/* insert cover node here on the left */
+			new->node.node_p = old->node.node_p;
+			up_ptr = &old->node.node_p;
+			new->node.bit = new->node.pfx << 1;
+			diff = -1;
+			goto insert_above;
+		}
+
+		if (unlikely(bit < old_node_bit)) {
+			/* The tree did not contain the key, so we insert <new> before the
+			 * node <old>, and set ->bit to designate the lowest bit position in
+			 * <new> which applies to ->branches.b[]. We know that the bit is not
+			 * greater than the prefix length thanks to the test above.
+			 */
+			new->node.node_p = old->node.node_p;
+			up_ptr = &old->node.node_p;
+			new->node.bit = bit;
+			diff = cmp_bits(new->key, old->key, bit >> 1);
+			goto insert_above;
+		}
+
+		if (!(old_node_bit & 1)) {
+			/* if we encounter a cover node with our exact prefix length, it's
+			 * necessarily the same value, so we insert there as a duplicate on
+			 * the left. For that, we go down on the left and the leaf detection
+			 * code will finish the job.
+			 */
+			if ((new->node.pfx << 1) == old_node_bit) {
+				root = &old->node.branches;
+				side = EB_LEFT;
+				troot = root->b[side];
+				continue;
+			}
+
+			/* cover nodes are always walked through on the right */
+			side = EB_RGHT;
+			bit = old_node_bit >> 1; /* recheck that bit */
+			root = &old->node.branches;
+			troot = root->b[side];
+			continue;
+		}
+
+		/* we don't want to skip bits for further comparisons, so we must limit <bit>.
+		 * However, since we're going down around <old_node_bit>, we know it will be
+		 * properly matched, so we can skip this bit.
+		 */
+		old_node_bit >>= 1;
+		bit = old_node_bit + 1;
+
+		/* walk down */
+		root = &old->node.branches;
+		side = old_node_bit & 7;
+		side ^= 7;
+		side = (new->key[old_node_bit >> 3] >> side) & 1;
+		troot = root->b[side];
+	}
+
+	/* Right here, we have 4 possibilities :
+	 * - the tree does not contain any leaf matching the
+	 *   key, and we have new->key < old->key. We insert
+	 *   new above old, on the left ;
+	 *
+	 * - the tree does not contain any leaf matching the
+	 *   key, and we have new->key > old->key. We insert
+	 *   new above old, on the right ;
+	 *
+	 * - the tree does contain the key with the same prefix
+	 *   length. We add the new key next to it as a first
+	 *   duplicate (since it was alone).
+	 *
+	 * The last two cases can easily be partially merged.
+	 *
+	 * - the tree contains a leaf matching the key, we have
+	 *   to insert above it as a cover node. The leaf with
+	 *   the shortest prefix becomes the left subtree and
+	 *   the leaf with the longest prefix becomes the right
+	 *   one. The cover node gets the min of both prefixes
+	 *   as its new bit.
+	 */
+
+	/* first we want to ensure that we compare the correct bit, which means
+	 * the largest common to both nodes.
+	 */
+	if (bit > new->node.pfx)
+		bit = new->node.pfx;
+	if (bit > old->node.pfx)
+		bit = old->node.pfx;
+
+	new->node.bit = (bit << 1) + 1; /* assume normal node by default */
+
+	/* if one prefix is included in the second one, we don't compare bits
+	 * because they won't necessarily match, we just proceed with a cover
+	 * node insertion.
+	 */
+	diff = 0;
+	if (bit < old->node.pfx && bit < new->node.pfx)
+		diff = cmp_bits(new->key, old->key, bit);
+
+	if (diff == 0) {
+		/* Both keys match. Either it's a duplicate entry or we have to
+		 * put the shortest prefix left and the largest one right below
+		 * a new cover node. By default, diff==0 means we'll be inserted
+		 * on the right.
+		 */
+		new->node.bit--; /* anticipate cover node insertion */
+		if (new->node.pfx == old->node.pfx) {
+			new->node.bit = -1; /* mark as new dup tree, just in case */
+
+			if (unlikely(eb_gettag(root_right))) {
+				/* we refuse to duplicate this key if the tree is
+				 * tagged as containing only unique keys.
+				 */
+				return old;
+			}
+
+			if (eb_gettag(troot) != EB_LEAF) {
+				/* there was already a dup tree below */
+				struct eb_node *ret;
+				ret = eb_insert_dup(&old->node, &new->node);
+				return container_of(ret, struct ebmb_node, node);
+			}
+			/* otherwise fall through to insert first duplicate */
+		}
+		/* otherwise we just rely on the tests below to select the right side */
+		else if (new->node.pfx < old->node.pfx)
+			diff = -1; /* force insertion to left side */
+	}
+
+ insert_above:
+	new_left = eb_dotag(&new->node.branches, EB_LEFT);
+	new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+	new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+	if (diff >= 0) {
+		new->node.branches.b[EB_LEFT] = troot;
+		new->node.branches.b[EB_RGHT] = new_leaf;
+		new->node.leaf_p = new_rght;
+		*up_ptr = new_left;
+	}
+	else {
+		new->node.branches.b[EB_LEFT] = new_leaf;
+		new->node.branches.b[EB_RGHT] = troot;
+		new->node.leaf_p = new_left;
+		*up_ptr = new_rght;
+	}
+
+	root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+	return new;
+}
+
+
+
+#endif /* _EBMBTREE_H */
+
diff --git a/include/import/ebpttree.h b/include/import/ebpttree.h
new file mode 100644
index 0000000..6cd6659
--- /dev/null
+++ b/include/import/ebpttree.h
@@ -0,0 +1,180 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on pointer nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EBPTTREE_H
+#define _EBPTTREE_H
+
+#include "ebtree.h"
+#include "eb32tree.h"
+#include "eb64tree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define ebpt_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define EBPT_ROOT	EB_ROOT
+#define EBPT_TREE_HEAD	EB_TREE_HEAD
+
+/* on *almost* all platforms, a pointer can be cast into a size_t which is unsigned */
+#ifndef PTR_INT_TYPE
+#define PTR_INT_TYPE	size_t
+#endif
+
+typedef PTR_INT_TYPE ptr_t;
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ * Internally, it is automatically cast as an eb32_node or eb64_node.
+ * We always align the key since the struct itself will be padded to the same
+ * size anyway.
+ */
+struct ebpt_node {
+	struct eb_node node; /* the tree node, must be at the beginning */
+	ALWAYS_ALIGN(sizeof(void*));
+	void *key;
+} ALIGNED(sizeof(void*));
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_first(struct eb_root *root)
+{
+	return ebpt_entry(eb_first(root), struct ebpt_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_last(struct eb_root *root)
+{
+	return ebpt_entry(eb_last(root), struct ebpt_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_next(struct ebpt_node *ebpt)
+{
+	return ebpt_entry(eb_next(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_prev(struct ebpt_node *ebpt)
+{
+	return ebpt_entry(eb_prev(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebpt_node *ebpt_next_dup(struct ebpt_node *ebpt)
+{
+	return ebpt_entry(eb_next_dup(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebpt_node *ebpt_prev_dup(struct ebpt_node *ebpt)
+{
+	return ebpt_entry(eb_prev_dup(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebpt_node *ebpt_next_unique(struct ebpt_node *ebpt)
+{
+	return ebpt_entry(eb_next_unique(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebpt_node *ebpt_prev_unique(struct ebpt_node *ebpt)
+{
+	return ebpt_entry(eb_prev_unique(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static forceinline void ebpt_delete(struct ebpt_node *ebpt)
+{
+	eb_delete(&ebpt->node);
+}
+
+/*
+ * The following functions are inlined but derived from the integer versions.
+ */
+static forceinline struct ebpt_node *ebpt_lookup(struct eb_root *root, void *x)
+{
+	if (sizeof(void *) == 4)
+		return (struct ebpt_node *)eb32_lookup(root, (u32)(PTR_INT_TYPE)x);
+	else
+		return (struct ebpt_node *)eb64_lookup(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *ebpt_lookup_le(struct eb_root *root, void *x)
+{
+	if (sizeof(void *) == 4)
+		return (struct ebpt_node *)eb32_lookup_le(root, (u32)(PTR_INT_TYPE)x);
+	else
+		return (struct ebpt_node *)eb64_lookup_le(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *ebpt_lookup_ge(struct eb_root *root, void *x)
+{
+	if (sizeof(void *) == 4)
+		return (struct ebpt_node *)eb32_lookup_ge(root, (u32)(PTR_INT_TYPE)x);
+	else
+		return (struct ebpt_node *)eb64_lookup_ge(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *ebpt_insert(struct eb_root *root, struct ebpt_node *new)
+{
+	if (sizeof(void *) == 4)
+		return (struct ebpt_node *)eb32_insert(root, (struct eb32_node *)new);
+	else
+		return (struct ebpt_node *)eb64_insert(root, (struct eb64_node *)new);
+}
+
+/*
+ * The following functions are less likely to be used directly, because
+ * their code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __ebpt_delete(struct ebpt_node *ebpt)
+{
+	__eb_delete(&ebpt->node);
+}
+
+static forceinline struct ebpt_node *__ebpt_lookup(struct eb_root *root, void *x)
+{
+	if (sizeof(void *) == 4)
+		return (struct ebpt_node *)__eb32_lookup(root, (u32)(PTR_INT_TYPE)x);
+	else
+		return (struct ebpt_node *)__eb64_lookup(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *__ebpt_insert(struct eb_root *root, struct ebpt_node *new)
+{
+	if (sizeof(void *) == 4)
+		return (struct ebpt_node *)__eb32_insert(root, (struct eb32_node *)new);
+	else
+		return (struct ebpt_node *)__eb64_insert(root, (struct eb64_node *)new);
+}
+
+#endif /* _EBPT_TREE_H */
diff --git a/include/import/ebsttree.h b/include/import/ebsttree.h
new file mode 100644
index 0000000..db2267b
--- /dev/null
+++ b/include/import/ebsttree.h
@@ -0,0 +1,324 @@
+/*
+ * Elastic Binary Trees - macros to manipulate String data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* These functions and macros rely on Multi-Byte nodes */
+
+#ifndef _EBSTTREE_H
+#define _EBSTTREE_H
+
+#include "ebtree.h"
+#include "ebmbtree.h"
+
+/* The following functions are not inlined by default. They are declared
+ * in ebsttree.c, which simply relies on their inline version.
+ */
+struct ebmb_node *ebst_lookup(struct eb_root *root, const char *x);
+struct ebmb_node *ebst_insert(struct eb_root *root, struct ebmb_node *new);
+
+/* Find the first occurrence of a length <len> string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings, and that no null character is present
+ * in string <x> in the first <len> chars. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *
+ebst_lookup_len(struct eb_root *root, const char *x, unsigned int len)
+{
+	struct ebmb_node *node;
+
+	node = ebmb_lookup(root, x, len);
+	if (!node || node->key[len] != 0)
+		return NULL;
+	return node;
+}
+
+/* Find the first occurrence of a zero-terminated string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *__ebst_lookup(struct eb_root *root, const void *x)
+{
+	struct ebmb_node *node;
+	eb_troot_t *troot;
+	int bit;
+	int node_bit;
+
+	troot = root->b[EB_LEFT];
+	if (unlikely(troot == NULL))
+		return NULL;
+
+	bit = 0;
+	while (1) {
+		if ((eb_gettag(troot) == EB_LEAF)) {
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebmb_node, node.branches);
+			if (strcmp((char *)node->key, x) == 0)
+				return node;
+			else
+				return NULL;
+		}
+		node = container_of(eb_untag(troot, EB_NODE),
+				    struct ebmb_node, node.branches);
+		node_bit = node->node.bit;
+
+		if (node_bit < 0) {
+			/* We have a dup tree now. Either it's for the same
+			 * value, and we walk down left, or it's a different
+			 * one and we don't have our key.
+			 */
+			if (strcmp((char *)node->key, x) != 0)
+				return NULL;
+
+			troot = node->node.branches.b[EB_LEFT];
+			while (eb_gettag(troot) != EB_LEAF)
+				troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+			node = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebmb_node, node.branches);
+			return node;
+		}
+
+		/* OK, normal data node, let's walk down but don't compare data
+		 * if we already reached the end of the key.
+		 */
+		if (likely(bit >= 0)) {
+			bit = string_equal_bits(x, node->key, bit);
+			if (likely(bit < node_bit)) {
+				if (bit >= 0)
+					return NULL; /* no more common bits */
+
+				/* bit < 0 : we reached the end of the key. If we
+				 * are in a tree with unique keys, we can return
+				 * this node. Otherwise we have to walk it down
+				 * and stop comparing bits.
+				 */
+				if (eb_gettag(root->b[EB_RGHT]))
+					return node;
+			}
+			/* if the bit is larger than the node's, we must bound it
+			 * because we might have compared too many bytes with an
+			 * inappropriate leaf. For a test, build a tree from "0",
+			 * "WW", "W", "S" inserted in this exact sequence and lookup
+			 * "W" => "S" is returned without this assignment.
+			 */
+			else
+				bit = node_bit;
+		}
+
+		troot = node->node.branches.b[(((unsigned char*)x)[node_bit >> 3] >>
+					       (~node_bit & 7)) & 1];
+	}
+}
+
+/* Insert ebmb_node <new> into subtree starting at node root <root>. Only
+ * new->key needs be set with the zero-terminated string key. The ebmb_node is
+ * returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * caller is responsible for properly terminating the key with a zero.
+ */
+static forceinline struct ebmb_node *
+__ebst_insert(struct eb_root *root, struct ebmb_node *new)
+{
+	struct ebmb_node *old;
+	unsigned int side;
+	eb_troot_t *troot;
+	eb_troot_t *root_right;
+	int diff;
+	int bit;
+	int old_node_bit;
+
+	side = EB_LEFT;
+	troot = root->b[EB_LEFT];
+	root_right = root->b[EB_RGHT];
+	if (unlikely(troot == NULL)) {
+		/* Tree is empty, insert the leaf part below the left branch */
+		root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+		new->node.leaf_p = eb_dotag(root, EB_LEFT);
+		new->node.node_p = NULL; /* node part unused */
+		return new;
+	}
+
+	/* The tree descent is fairly easy :
+	 *  - first, check if we have reached a leaf node
+	 *  - second, check if we have gone too far
+	 *  - third, reiterate
+	 * Everywhere, we use <new> for the node node we are inserting, <root>
+	 * for the node we attach it to, and <old> for the node we are
+	 * displacing below <new>. <troot> will always point to the future node
+	 * (tagged with its type). <side> carries the side the node <new> is
+	 * attached to below its parent, which is also where previous node
+	 * was attached.
+	 */
+
+	bit = 0;
+	while (1) {
+		if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+			eb_troot_t *new_left, *new_rght;
+			eb_troot_t *new_leaf, *old_leaf;
+
+			old = container_of(eb_untag(troot, EB_LEAF),
+					    struct ebmb_node, node.branches);
+
+			new_left = eb_dotag(&new->node.branches, EB_LEFT);
+			new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+			new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+			old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+			new->node.node_p = old->node.leaf_p;
+
+			/* Right here, we have 3 possibilities :
+			 * - the tree does not contain the key, and we have
+			 *   new->key < old->key. We insert new above old, on
+			 *   the left ;
+			 *
+			 * - the tree does not contain the key, and we have
+			 *   new->key > old->key. We insert new above old, on
+			 *   the right ;
+			 *
+			 * - the tree does contain the key, which implies it
+			 *   is alone. We add the new key next to it as a
+			 *   first duplicate.
+			 *
+			 * The last two cases can easily be partially merged.
+			 */
+			if (bit >= 0)
+				bit = string_equal_bits(new->key, old->key, bit);
+
+			if (bit < 0) {
+				/* key was already there */
+
+				/* we may refuse to duplicate this key if the tree is
+				 * tagged as containing only unique keys.
+				 */
+				if (eb_gettag(root_right))
+					return old;
+
+				/* new arbitrarily goes to the right and tops the dup tree */
+				old->node.leaf_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_leaf;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+				new->node.bit = -1;
+				root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+				return new;
+			}
+
+			diff = cmp_bits(new->key, old->key, bit);
+			if (diff < 0) {
+				/* new->key < old->key, new takes the left */
+				new->node.leaf_p = new_left;
+				old->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = new_leaf;
+				new->node.branches.b[EB_RGHT] = old_leaf;
+			} else {
+				/* new->key > old->key, new takes the right */
+				old->node.leaf_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_leaf;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+			}
+			break;
+		}
+
+		/* OK we're walking down this link */
+		old = container_of(eb_untag(troot, EB_NODE),
+				   struct ebmb_node, node.branches);
+		old_node_bit = old->node.bit;
+
+		/* Stop going down when we don't have common bits anymore. We
+		 * also stop in front of a duplicates tree because it means we
+		 * have to insert above. Note: we can compare more bits than
+		 * the current node's because as long as they are identical, we
+		 * know we descend along the correct side.
+		 */
+		if (bit >= 0 && (bit < old_node_bit || old_node_bit < 0))
+			bit = string_equal_bits(new->key, old->key, bit);
+
+		if (unlikely(bit < 0)) {
+			/* Perfect match, we must only stop on head of dup tree
+			 * or walk down to a leaf.
+			 */
+			if (old_node_bit < 0) {
+				/* We know here that string_equal_bits matched all
+				 * bits and that we're on top of a dup tree, then
+				 * we can perform the dup insertion and return.
+				 */
+				struct eb_node *ret;
+				ret = eb_insert_dup(&old->node, &new->node);
+				return container_of(ret, struct ebmb_node, node);
+			}
+			/* OK so let's walk down */
+		}
+		else if (bit < old_node_bit || old_node_bit < 0) {
+			/* The tree did not contain the key, or we stopped on top of a dup
+			 * tree, possibly containing the key. In the former case, we insert
+			 * <new> before the node <old>, and set ->bit to designate the lowest
+			 * bit position in <new> which applies to ->branches.b[]. In the later
+			 * case, we add the key to the existing dup tree. Note that we cannot
+			 * enter here if we match an intermediate node's key that is not the
+			 * head of a dup tree.
+			 */
+			eb_troot_t *new_left, *new_rght;
+			eb_troot_t *new_leaf, *old_node;
+
+			new_left = eb_dotag(&new->node.branches, EB_LEFT);
+			new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+			new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+			old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+			new->node.node_p = old->node.node_p;
+
+			/* we can never match all bits here */
+			diff = cmp_bits(new->key, old->key, bit);
+			if (diff < 0) {
+				new->node.leaf_p = new_left;
+				old->node.node_p = new_rght;
+				new->node.branches.b[EB_LEFT] = new_leaf;
+				new->node.branches.b[EB_RGHT] = old_node;
+			}
+			else {
+				old->node.node_p = new_left;
+				new->node.leaf_p = new_rght;
+				new->node.branches.b[EB_LEFT] = old_node;
+				new->node.branches.b[EB_RGHT] = new_leaf;
+			}
+			break;
+		}
+
+		/* walk down */
+		root = &old->node.branches;
+		side = (new->key[old_node_bit >> 3] >> (~old_node_bit & 7)) & 1;
+		troot = root->b[side];
+	}
+
+	/* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+	 * parent is already set to <new>, and the <root>'s branch is still in
+	 * <side>. Update the root's leaf till we have it. Note that we can also
+	 * find the side by checking the side of new->node.node_p.
+	 */
+
+	/* We need the common higher bits between new->key and old->key.
+	 * This number of bits is already in <bit>.
+	 * NOTE: we can't get here whit bit < 0 since we found a dup !
+	 */
+	new->node.bit = bit;
+	root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+	return new;
+}
+
+#endif /* _EBSTTREE_H */
+
diff --git a/include/import/ebtree.h b/include/import/ebtree.h
new file mode 100644
index 0000000..9e5daca
--- /dev/null
+++ b/include/import/ebtree.h
@@ -0,0 +1,919 @@
+/*
+ * Elastic Binary Trees - generic macros and structures.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+
+/*
+  General idea:
+  -------------
+  In a radix binary tree, we may have up to 2N-1 nodes for N keys if all of
+  them are leaves. If we find a way to differentiate intermediate nodes (later
+  called "nodes") and final nodes (later called "leaves"), and we associate
+  them by two, it is possible to build sort of a self-contained radix tree with
+  intermediate nodes always present. It will not be as cheap as the ultree for
+  optimal cases as shown below, but the optimal case almost never happens :
+
+  Eg, to store 8, 10, 12, 13, 14 :
+
+             ultree          this theorical tree
+
+               8                   8
+              / \                 / \
+             10 12               10 12
+               /  \                /  \
+              13  14              12  14
+                                 / \
+                                12 13
+
+   Note that on real-world tests (with a scheduler), is was verified that the
+   case with data on an intermediate node never happens. This is because the
+   data spectrum is too large for such coincidences to happen. It would require
+   for instance that a task has its expiration time at an exact second, with
+   other tasks sharing that second. This is too rare to try to optimize for it.
+
+   What is interesting is that the node will only be added above the leaf when
+   necessary, which implies that it will always remain somewhere above it. So
+   both the leaf and the node can share the exact value of the leaf, because
+   when going down the node, the bit mask will be applied to comparisons. So we
+   are tempted to have one single key shared between the node and the leaf.
+
+   The bit only serves the nodes, and the dups only serve the leaves. So we can
+   put a lot of information in common. This results in one single entity with
+   two branch pointers and two parent pointers, one for the node part, and one
+   for the leaf part :
+
+              node's         leaf's
+              parent         parent
+                |              |
+              [node]         [leaf]
+               / \
+           left   right
+         branch   branch
+
+   The node may very well refer to its leaf counterpart in one of its branches,
+   indicating that its own leaf is just below it :
+
+              node's
+              parent
+                |
+              [node]
+               / \
+           left  [leaf]
+         branch
+
+   Adding keys in such a tree simply consists in inserting nodes between
+   other nodes and/or leaves :
+
+                [root]
+                  |
+               [node2]
+                 / \
+          [leaf1]   [node3]
+                      / \
+               [leaf2]   [leaf3]
+
+   On this diagram, we notice that [node2] and [leaf2] have been pulled away
+   from each other due to the insertion of [node3], just as if there would be
+   an elastic between both parts. This elastic-like behaviour gave its name to
+   the tree : "Elastic Binary Tree", or "EBtree". The entity which associates a
+   node part and a leaf part will be called an "EB node".
+
+   We also notice on the diagram that there is a root entity required to attach
+   the tree. It only contains two branches and there is nothing above it. This
+   is an "EB root". Some will note that [leaf1] has no [node1]. One property of
+   the EBtree is that all nodes have their branches filled, and that if a node
+   has only one branch, it does not need to exist. Here, [leaf1] was added
+   below [root] and did not need any node.
+
+   An EB node contains :
+     - a pointer to the node's parent (node_p)
+     - a pointer to the leaf's parent (leaf_p)
+     - two branches pointing to lower nodes or leaves (branches)
+     - a bit position (bit)
+     - an optional key.
+
+   The key here is optional because it's used only during insertion, in order
+   to classify the nodes. Nothing else in the tree structure requires knowledge
+   of the key. This makes it possible to write type-agnostic primitives for
+   everything, and type-specific insertion primitives. This has led to consider
+   two types of EB nodes. The type-agnostic ones will serve as a header for the
+   other ones, and will simply be called "struct eb_node". The other ones will
+   have their type indicated in the structure name. Eg: "struct eb32_node" for
+   nodes carrying 32 bit keys.
+
+   We will also node that the two branches in a node serve exactly the same
+   purpose as an EB root. For this reason, a "struct eb_root" will be used as
+   well inside the struct eb_node. In order to ease pointer manipulation and
+   ROOT detection when walking upwards, all the pointers inside an eb_node will
+   point to the eb_root part of the referenced EB nodes, relying on the same
+   principle as the linked lists in Linux.
+
+   Another important point to note, is that when walking inside a tree, it is
+   very convenient to know where a node is attached in its parent, and what
+   type of branch it has below it (leaf or node). In order to simplify the
+   operations and to speed up the processing, it was decided in this specific
+   implementation to use the lowest bit from the pointer to designate the side
+   of the upper pointers (left/right) and the type of a branch (leaf/node).
+   This practise is not mandatory by design, but an implementation-specific
+   optimisation permitted on all platforms on which data must be aligned. All
+   known 32 bit platforms align their integers and pointers to 32 bits, leaving
+   the two lower bits unused. So, we say that the pointers are "tagged". And
+   since they designate pointers to root parts, we simply call them
+   "tagged root pointers", or "eb_troot" in the code.
+
+   Duplicate keys are stored in a special manner. When inserting a key, if
+   the same one is found, then an incremental binary tree is built at this
+   place from these keys. This ensures that no special case has to be written
+   to handle duplicates when walking through the tree or when deleting entries.
+   It also guarantees that duplicates will be walked in the exact same order
+   they were inserted. This is very important when trying to achieve fair
+   processing distribution for instance.
+
+   Algorithmic complexity can be derived from 3 variables :
+     - the number of possible different keys in the tree : P
+     - the number of entries in the tree : N
+     - the number of duplicates for one key : D
+
+   Note that this tree is deliberately NOT balanced. For this reason, the worst
+   case may happen with a small tree (eg: 32 distinct keys of one bit). BUT,
+   the operations required to manage such data are so much cheap that they make
+   it worth using it even under such conditions. For instance, a balanced tree
+   may require only 6 levels to store those 32 keys when this tree will
+   require 32. But if per-level operations are 5 times cheaper, it wins.
+
+   Minimal, Maximal and Average times are specified in number of operations.
+   Minimal is given for best condition, Maximal for worst condition, and the
+   average is reported for a tree containing random keys. An operation
+   generally consists in jumping from one node to the other.
+
+   Complexity :
+     - lookup              : min=1, max=log(P), avg=log(N)
+     - insertion from root : min=1, max=log(P), avg=log(N)
+     - insertion of dups   : min=1, max=log(D), avg=log(D)/2 after lookup
+     - deletion            : min=1, max=1,      avg=1
+     - prev/next           : min=1, max=log(P), avg=2 :
+       N/2 nodes need 1 hop  => 1*N/2
+       N/4 nodes need 2 hops => 2*N/4
+       N/8 nodes need 3 hops => 3*N/8
+       ...
+       N/x nodes need log(x) hops => log2(x)*N/x
+       Total cost for all N nodes : sum[i=1..N](log2(i)*N/i) = N*sum[i=1..N](log2(i)/i)
+       Average cost across N nodes = total / N = sum[i=1..N](log2(i)/i) = 2
+
+   This design is currently limited to only two branches per node. Most of the
+   tree descent algorithm would be compatible with more branches (eg: 4, to cut
+   the height in half), but this would probably require more complex operations
+   and the deletion algorithm would be problematic.
+
+   Useful properties :
+     - a node is always added above the leaf it is tied to, and never can get
+       below nor in another branch. This implies that leaves directly attached
+       to the root do not use their node part, which is indicated by a NULL
+       value in node_p. This also enhances the cache efficiency when walking
+       down the tree, because when the leaf is reached, its node part will
+       already have been visited (unless it's the first leaf in the tree).
+
+     - pointers to lower nodes or leaves are stored in "branch" pointers. Only
+       the root node may have a NULL in either branch, it is not possible for
+       other branches. Since the nodes are attached to the left branch of the
+       root, it is not possible to see a NULL left branch when walking up a
+       tree. Thus, an empty tree is immediately identified by a NULL left
+       branch at the root. Conversely, the one and only way to identify the
+       root node is to check that it right branch is NULL. Note that the
+       NULL pointer may have a few low-order bits set.
+
+     - a node connected to its own leaf will have branch[0|1] pointing to
+       itself, and leaf_p pointing to itself.
+
+     - a node can never have node_p pointing to itself.
+
+     - a node is linked in a tree if and only if it has a non-null leaf_p.
+
+     - a node can never have both branches equal, except for the root which can
+       have them both NULL.
+
+     - deletion only applies to leaves. When a leaf is deleted, its parent must
+       be released too (unless it's the root), and its sibling must attach to
+       the grand-parent, replacing the parent. Also, when a leaf is deleted,
+       the node tied to this leaf will be removed and must be released too. If
+       this node is different from the leaf's parent, the freshly released
+       leaf's parent will be used to replace the node which must go. A released
+       node will never be used anymore, so there's no point in tracking it.
+
+     - the bit index in a node indicates the bit position in the key which is
+       represented by the branches. That means that a node with (bit == 0) is
+       just above two leaves. Negative bit values are used to build a duplicate
+       tree. The first node above two identical leaves gets (bit == -1). This
+       value logarithmically decreases as the duplicate tree grows. During
+       duplicate insertion, a node is inserted above the highest bit value (the
+       lowest absolute value) in the tree during the right-sided walk. If bit
+       -1 is not encountered (highest < -1), we insert above last leaf.
+       Otherwise, we insert above the node with the highest value which was not
+       equal to the one of its parent + 1.
+
+     - the "eb_next" primitive walks from left to right, which means from lower
+       to higher keys. It returns duplicates in the order they were inserted.
+       The "eb_first" primitive returns the left-most entry.
+
+     - the "eb_prev" primitive walks from right to left, which means from
+       higher to lower keys. It returns duplicates in the opposite order they
+       were inserted. The "eb_last" primitive returns the right-most entry.
+
+     - a tree which has 1 in the lower bit of its root's right branch is a
+       tree with unique nodes. This means that when a node is inserted with
+       a key which already exists will not be inserted, and the previous
+       entry will be returned.
+
+ */
+
+#ifndef _EBTREE_H
+#define _EBTREE_H
+
+#include <stdlib.h>
+#include <common/compiler.h>
+
+static inline int flsnz8_generic(unsigned int x)
+{
+	int ret = 0;
+	if (x >> 4) { x >>= 4; ret += 4; }
+	return ret + ((0xFFFFAA50U >> (x << 1)) & 3) + 1;
+}
+
+/* Note: we never need to run fls on null keys, so we can optimize the fls
+ * function by removing a conditional jump.
+ */
+#if defined(__i386__) || defined(__x86_64__)
+/* this code is similar on 32 and 64 bit */
+static inline int flsnz(int x)
+{
+	int r;
+	__asm__("bsrl %1,%0\n"
+	        : "=r" (r) : "rm" (x));
+	return r+1;
+}
+
+static inline int flsnz8(unsigned char x)
+{
+	int r;
+	__asm__("movzbl %%al, %%eax\n"
+		"bsrl %%eax,%0\n"
+	        : "=r" (r) : "a" (x));
+	return r+1;
+}
+
+#else
+// returns 1 to 32 for 1<<0 to 1<<31. Undefined for 0.
+#define flsnz(___a) ({ \
+	register int ___x, ___bits = 0; \
+	___x = (___a); \
+	if (___x & 0xffff0000) { ___x &= 0xffff0000; ___bits += 16;} \
+	if (___x & 0xff00ff00) { ___x &= 0xff00ff00; ___bits +=  8;} \
+	if (___x & 0xf0f0f0f0) { ___x &= 0xf0f0f0f0; ___bits +=  4;} \
+	if (___x & 0xcccccccc) { ___x &= 0xcccccccc; ___bits +=  2;} \
+	if (___x & 0xaaaaaaaa) { ___x &= 0xaaaaaaaa; ___bits +=  1;} \
+	___bits + 1; \
+	})
+
+static inline int flsnz8(unsigned int x)
+{
+	return flsnz8_generic(x);
+}
+
+
+#endif
+
+static inline int fls64(unsigned long long x)
+{
+	unsigned int h;
+	unsigned int bits = 32;
+
+	h = x >> 32;
+	if (!h) {
+		h = x;
+		bits = 0;
+	}
+	return flsnz(h) + bits;
+}
+
+#define fls_auto(x) ((sizeof(x) > 4) ? fls64(x) : flsnz(x))
+
+/* Linux-like "container_of". It returns a pointer to the structure of type
+ * <type> which has its member <name> stored at address <ptr>.
+ */
+#ifndef container_of
+#define container_of(ptr, type, name) ((type *)(((void *)(ptr)) - ((long)&((type *)0)->name)))
+#endif
+
+/* returns a pointer to the structure of type <type> which has its member <name>
+ * stored at address <ptr>, unless <ptr> is 0, in which case 0 is returned.
+ */
+#ifndef container_of_safe
+#define container_of_safe(ptr, type, name) \
+	({ void *__p = (ptr); \
+		__p ? (type *)(__p - ((long)&((type *)0)->name)) : (type *)0; \
+	})
+#endif
+
+/* Number of bits per node, and number of leaves per node */
+#define EB_NODE_BITS          1
+#define EB_NODE_BRANCHES      (1 << EB_NODE_BITS)
+#define EB_NODE_BRANCH_MASK   (EB_NODE_BRANCHES - 1)
+
+/* Be careful not to tweak those values. The walking code is optimized for NULL
+ * detection on the assumption that the following values are intact.
+ */
+#define EB_LEFT     0
+#define EB_RGHT     1
+#define EB_LEAF     0
+#define EB_NODE     1
+
+/* Tags to set in root->b[EB_RGHT] :
+ * - EB_NORMAL is a normal tree which stores duplicate keys.
+ * - EB_UNIQUE is a tree which stores unique keys.
+ */
+#define EB_NORMAL   0
+#define EB_UNIQUE   1
+
+/* This is the same as an eb_node pointer, except that the lower bit embeds
+ * a tag. See eb_dotag()/eb_untag()/eb_gettag(). This tag has two meanings :
+ *  - 0=left, 1=right to designate the parent's branch for leaf_p/node_p
+ *  - 0=link, 1=leaf  to designate the branch's type for branch[]
+ */
+typedef void eb_troot_t;
+
+/* The eb_root connects the node which contains it, to two nodes below it, one
+ * of which may be the same node. At the top of the tree, we use an eb_root
+ * too, which always has its right branch NULL (+/1 low-order bits).
+ */
+struct eb_root {
+	eb_troot_t    *b[EB_NODE_BRANCHES]; /* left and right branches */
+};
+
+/* The eb_node contains the two parts, one for the leaf, which always exists,
+ * and one for the node, which remains unused in the very first node inserted
+ * into the tree. This structure is 20 bytes per node on 32-bit machines. Do
+ * not change the order, benchmarks have shown that it's optimal this way.
+ * Note: be careful about this struct's alignment if it gets included into
+ * another struct and some atomic ops are expected on the keys or the node.
+ */
+struct eb_node {
+	struct eb_root branches; /* branches, must be at the beginning */
+	eb_troot_t    *node_p;  /* link node's parent */
+	eb_troot_t    *leaf_p;  /* leaf node's parent */
+	short int      bit;     /* link's bit position. */
+	short unsigned int pfx; /* data prefix length, always related to leaf */
+} __attribute__((packed));
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb_entry(ptr, type, member) container_of(ptr, type, member)
+
+/* The root of a tree is an eb_root initialized with both pointers NULL.
+ * During its life, only the left pointer will change. The right one will
+ * always remain NULL, which is the way we detect it.
+ */
+#define EB_ROOT						\
+	(struct eb_root) {				\
+		.b = {[0] = NULL, [1] = NULL },		\
+	}
+
+#define EB_ROOT_UNIQUE					\
+	(struct eb_root) {				\
+		.b = {[0] = NULL, [1] = (void *)1 },	\
+	}
+
+#define EB_TREE_HEAD(name)				\
+	struct eb_root name = EB_ROOT
+
+
+/***************************************\
+ * Private functions. Not for end-user *
+\***************************************/
+
+/* Converts a root pointer to its equivalent eb_troot_t pointer,
+ * ready to be stored in ->branch[], leaf_p or node_p. NULL is not
+ * conserved. To be used with EB_LEAF, EB_NODE, EB_LEFT or EB_RGHT in <tag>.
+ */
+static inline eb_troot_t *eb_dotag(const struct eb_root *root, const int tag)
+{
+	return (eb_troot_t *)((void *)root + tag);
+}
+
+/* Converts an eb_troot_t pointer pointer to its equivalent eb_root pointer,
+ * for use with pointers from ->branch[], leaf_p or node_p. NULL is conserved
+ * as long as the tree is not corrupted. To be used with EB_LEAF, EB_NODE,
+ * EB_LEFT or EB_RGHT in <tag>.
+ */
+static inline struct eb_root *eb_untag(const eb_troot_t *troot, const int tag)
+{
+	return (struct eb_root *)((void *)troot - tag);
+}
+
+/* returns the tag associated with an eb_troot_t pointer */
+static inline int eb_gettag(eb_troot_t *troot)
+{
+	return (unsigned long)troot & 1;
+}
+
+/* Converts a root pointer to its equivalent eb_troot_t pointer and clears the
+ * tag, no matter what its value was.
+ */
+static inline struct eb_root *eb_clrtag(const eb_troot_t *troot)
+{
+	return (struct eb_root *)((unsigned long)troot & ~1UL);
+}
+
+/* Returns a pointer to the eb_node holding <root> */
+static inline struct eb_node *eb_root_to_node(struct eb_root *root)
+{
+	return container_of(root, struct eb_node, branches);
+}
+
+/* Walks down starting at root pointer <start>, and always walking on side
+ * <side>. It either returns the node hosting the first leaf on that side,
+ * or NULL if no leaf is found. <start> may either be NULL or a branch pointer.
+ * The pointer to the leaf (or NULL) is returned.
+ */
+static inline struct eb_node *eb_walk_down(eb_troot_t *start, unsigned int side)
+{
+	/* A NULL pointer on an empty tree root will be returned as-is */
+	while (eb_gettag(start) == EB_NODE)
+		start = (eb_untag(start, EB_NODE))->b[side];
+	/* NULL is left untouched (root==eb_node, EB_LEAF==0) */
+	return eb_root_to_node(eb_untag(start, EB_LEAF));
+}
+
+/* This function is used to build a tree of duplicates by adding a new node to
+ * a subtree of at least 2 entries. It will probably never be needed inlined,
+ * and it is not for end-user.
+ */
+static forceinline struct eb_node *
+__eb_insert_dup(struct eb_node *sub, struct eb_node *new)
+{
+	struct eb_node *head = sub;
+	
+	eb_troot_t *new_left = eb_dotag(&new->branches, EB_LEFT);
+	eb_troot_t *new_rght = eb_dotag(&new->branches, EB_RGHT);
+	eb_troot_t *new_leaf = eb_dotag(&new->branches, EB_LEAF);
+
+	/* first, identify the deepest hole on the right branch */
+	while (eb_gettag(head->branches.b[EB_RGHT]) != EB_LEAF) {
+		struct eb_node *last = head;
+		head = container_of(eb_untag(head->branches.b[EB_RGHT], EB_NODE),
+				    struct eb_node, branches);
+		if (head->bit > last->bit + 1)
+			sub = head;     /* there's a hole here */
+	}
+
+	/* Here we have a leaf attached to (head)->b[EB_RGHT] */
+	if (head->bit < -1) {
+		/* A hole exists just before the leaf, we insert there */
+		new->bit = -1;
+		sub = container_of(eb_untag(head->branches.b[EB_RGHT], EB_LEAF),
+				   struct eb_node, branches);
+		head->branches.b[EB_RGHT] = eb_dotag(&new->branches, EB_NODE);
+
+		new->node_p = sub->leaf_p;
+		new->leaf_p = new_rght;
+		sub->leaf_p = new_left;
+		new->branches.b[EB_LEFT] = eb_dotag(&sub->branches, EB_LEAF);
+		new->branches.b[EB_RGHT] = new_leaf;
+		return new;
+	} else {
+		int side;
+		/* No hole was found before a leaf. We have to insert above
+		 * <sub>. Note that we cannot be certain that <sub> is attached
+		 * to the right of its parent, as this is only true if <sub>
+		 * is inside the dup tree, not at the head.
+		 */
+		new->bit = sub->bit - 1; /* install at the lowest level */
+		side = eb_gettag(sub->node_p);
+		head = container_of(eb_untag(sub->node_p, side), struct eb_node, branches);
+		head->branches.b[side] = eb_dotag(&new->branches, EB_NODE);
+					
+		new->node_p = sub->node_p;
+		new->leaf_p = new_rght;
+		sub->node_p = new_left;
+		new->branches.b[EB_LEFT] = eb_dotag(&sub->branches, EB_NODE);
+		new->branches.b[EB_RGHT] = new_leaf;
+		return new;
+	}
+}
+
+
+/**************************************\
+ * Public functions, for the end-user *
+\**************************************/
+
+/* Return non-zero if the tree is empty, otherwise zero */
+static inline int eb_is_empty(const struct eb_root *root)
+{
+	return !root->b[EB_LEFT];
+}
+
+/* Return non-zero if the node is a duplicate, otherwise zero */
+static inline int eb_is_dup(const struct eb_node *node)
+{
+	return node->bit < 0;
+}
+
+/* Return the first leaf in the tree starting at <root>, or NULL if none */
+static inline struct eb_node *eb_first(struct eb_root *root)
+{
+	return eb_walk_down(root->b[0], EB_LEFT);
+}
+
+/* Return the last leaf in the tree starting at <root>, or NULL if none */
+static inline struct eb_node *eb_last(struct eb_root *root)
+{
+	return eb_walk_down(root->b[0], EB_RGHT);
+}
+
+/* Return previous leaf node before an existing leaf node, or NULL if none. */
+static inline struct eb_node *eb_prev(struct eb_node *node)
+{
+	eb_troot_t *t = node->leaf_p;
+
+	while (eb_gettag(t) == EB_LEFT) {
+		/* Walking up from left branch. We must ensure that we never
+		 * walk beyond root.
+		 */
+		if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+			return NULL;
+		t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p;
+	}
+	/* Note that <t> cannot be NULL at this stage */
+	t = (eb_untag(t, EB_RGHT))->b[EB_LEFT];
+	return eb_walk_down(t, EB_RGHT);
+}
+
+/* Return next leaf node after an existing leaf node, or NULL if none. */
+static inline struct eb_node *eb_next(struct eb_node *node)
+{
+	eb_troot_t *t = node->leaf_p;
+
+	while (eb_gettag(t) != EB_LEFT)
+		/* Walking up from right branch, so we cannot be below root */
+		t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p;
+
+	/* Note that <t> cannot be NULL at this stage */
+	t = (eb_untag(t, EB_LEFT))->b[EB_RGHT];
+	if (eb_clrtag(t) == NULL)
+		return NULL;
+	return eb_walk_down(t, EB_LEFT);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb_node *eb_prev_dup(struct eb_node *node)
+{
+	eb_troot_t *t = node->leaf_p;
+
+	while (eb_gettag(t) == EB_LEFT) {
+		/* Walking up from left branch. We must ensure that we never
+		 * walk beyond root.
+		 */
+		if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+			return NULL;
+		/* if the current node leaves a dup tree, quit */
+		if ((eb_root_to_node(eb_untag(t, EB_LEFT)))->bit >= 0)
+			return NULL;
+		t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p;
+	}
+	/* Note that <t> cannot be NULL at this stage */
+	if ((eb_root_to_node(eb_untag(t, EB_RGHT)))->bit >= 0)
+		return NULL;
+	t = (eb_untag(t, EB_RGHT))->b[EB_LEFT];
+	return eb_walk_down(t, EB_RGHT);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb_node *eb_next_dup(struct eb_node *node)
+{
+	eb_troot_t *t = node->leaf_p;
+
+	while (eb_gettag(t) != EB_LEFT) {
+		/* Walking up from right branch, so we cannot be below root */
+		/* if the current node leaves a dup tree, quit */
+		if ((eb_root_to_node(eb_untag(t, EB_RGHT)))->bit >= 0)
+			return NULL;
+		t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p;
+	}
+
+	/* Note that <t> cannot be NULL at this stage */
+	if ((eb_root_to_node(eb_untag(t, EB_LEFT)))->bit >= 0)
+		return NULL;
+	t = (eb_untag(t, EB_LEFT))->b[EB_RGHT];
+	if (eb_clrtag(t) == NULL)
+		return NULL;
+	return eb_walk_down(t, EB_LEFT);
+}
+
+/* Return previous leaf node before an existing leaf node, skipping duplicates,
+ * or NULL if none. */
+static inline struct eb_node *eb_prev_unique(struct eb_node *node)
+{
+	eb_troot_t *t = node->leaf_p;
+
+	while (1) {
+		if (eb_gettag(t) != EB_LEFT) {
+			node = eb_root_to_node(eb_untag(t, EB_RGHT));
+			/* if we're right and not in duplicates, stop here */
+			if (node->bit >= 0)
+				break;
+			t = node->node_p;
+		}
+		else {
+			/* Walking up from left branch. We must ensure that we never
+			 * walk beyond root.
+			 */
+			if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+				return NULL;
+			t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p;
+		}
+	}
+	/* Note that <t> cannot be NULL at this stage */
+	t = (eb_untag(t, EB_RGHT))->b[EB_LEFT];
+	return eb_walk_down(t, EB_RGHT);
+}
+
+/* Return next leaf node after an existing leaf node, skipping duplicates, or
+ * NULL if none.
+ */
+static inline struct eb_node *eb_next_unique(struct eb_node *node)
+{
+	eb_troot_t *t = node->leaf_p;
+
+	while (1) {
+		if (eb_gettag(t) == EB_LEFT) {
+			if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+				return NULL;	/* we reached root */
+			node = eb_root_to_node(eb_untag(t, EB_LEFT));
+			/* if we're left and not in duplicates, stop here */
+			if (node->bit >= 0)
+				break;
+			t = node->node_p;
+		}
+		else {
+			/* Walking up from right branch, so we cannot be below root */
+			t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p;
+		}
+	}
+
+	/* Note that <t> cannot be NULL at this stage */
+	t = (eb_untag(t, EB_LEFT))->b[EB_RGHT];
+	if (eb_clrtag(t) == NULL)
+		return NULL;
+	return eb_walk_down(t, EB_LEFT);
+}
+
+
+/* Removes a leaf node from the tree if it was still in it. Marks the node
+ * as unlinked.
+ */
+static forceinline void __eb_delete(struct eb_node *node)
+{
+	__label__ delete_unlink;
+	unsigned int pside, gpside, sibtype;
+	struct eb_node *parent;
+	struct eb_root *gparent;
+
+	if (!node->leaf_p)
+		return;
+
+	/* we need the parent, our side, and the grand parent */
+	pside = eb_gettag(node->leaf_p);
+	parent = eb_root_to_node(eb_untag(node->leaf_p, pside));
+
+	/* We likely have to release the parent link, unless it's the root,
+	 * in which case we only set our branch to NULL. Note that we can
+	 * only be attached to the root by its left branch.
+	 */
+
+	if (eb_clrtag(parent->branches.b[EB_RGHT]) == NULL) {
+		/* we're just below the root, it's trivial. */
+		parent->branches.b[EB_LEFT] = NULL;
+		goto delete_unlink;
+	}
+
+	/* To release our parent, we have to identify our sibling, and reparent
+	 * it directly to/from the grand parent. Note that the sibling can
+	 * either be a link or a leaf.
+	 */
+
+	gpside = eb_gettag(parent->node_p);
+	gparent = eb_untag(parent->node_p, gpside);
+
+	gparent->b[gpside] = parent->branches.b[!pside];
+	sibtype = eb_gettag(gparent->b[gpside]);
+
+	if (sibtype == EB_LEAF) {
+		eb_root_to_node(eb_untag(gparent->b[gpside], EB_LEAF))->leaf_p =
+			eb_dotag(gparent, gpside);
+	} else {
+		eb_root_to_node(eb_untag(gparent->b[gpside], EB_NODE))->node_p =
+			eb_dotag(gparent, gpside);
+	}
+	/* Mark the parent unused. Note that we do not check if the parent is
+	 * our own node, but that's not a problem because if it is, it will be
+	 * marked unused at the same time, which we'll use below to know we can
+	 * safely remove it.
+	 */
+	parent->node_p = NULL;
+
+	/* The parent node has been detached, and is currently unused. It may
+	 * belong to another node, so we cannot remove it that way. Also, our
+	 * own node part might still be used. so we can use this spare node
+	 * to replace ours if needed.
+	 */
+
+	/* If our link part is unused, we can safely exit now */
+	if (!node->node_p)
+		goto delete_unlink;
+
+	/* From now on, <node> and <parent> are necessarily different, and the
+	 * <node>'s node part is in use. By definition, <parent> is at least
+	 * below <node>, so keeping its key for the bit string is OK.
+	 */
+
+	parent->node_p = node->node_p;
+	parent->branches = node->branches;
+	parent->bit = node->bit;
+
+	/* We must now update the new node's parent... */
+	gpside = eb_gettag(parent->node_p);
+	gparent = eb_untag(parent->node_p, gpside);
+	gparent->b[gpside] = eb_dotag(&parent->branches, EB_NODE);
+
+	/* ... and its branches */
+	for (pside = 0; pside <= 1; pside++) {
+		if (eb_gettag(parent->branches.b[pside]) == EB_NODE) {
+			eb_root_to_node(eb_untag(parent->branches.b[pside], EB_NODE))->node_p =
+				eb_dotag(&parent->branches, pside);
+		} else {
+			eb_root_to_node(eb_untag(parent->branches.b[pside], EB_LEAF))->leaf_p =
+				eb_dotag(&parent->branches, pside);
+		}
+	}
+ delete_unlink:
+	/* Now the node has been completely unlinked */
+	node->leaf_p = NULL;
+	return; /* tree is not empty yet */
+}
+
+/* Compare blocks <a> and <b> byte-to-byte, from bit <ignore> to bit <len-1>.
+ * Return the number of equal bits between strings, assuming that the first
+ * <ignore> bits are already identical. It is possible to return slightly more
+ * than <len> bits if <len> does not stop on a byte boundary and we find exact
+ * bytes. Note that parts or all of <ignore> bits may be rechecked. It is only
+ * passed here as a hint to speed up the check.
+ */
+static forceinline int equal_bits(const unsigned char *a,
+				  const unsigned char *b,
+				  int ignore, int len)
+{
+	for (ignore >>= 3, a += ignore, b += ignore, ignore <<= 3;
+	     ignore < len; ) {
+		unsigned char c;
+
+		a++; b++;
+		ignore += 8;
+		c = b[-1] ^ a[-1];
+
+		if (c) {
+			/* OK now we know that old and new differ at byte <ptr> and that <c> holds
+			 * the bit differences. We have to find what bit is differing and report
+			 * it as the number of identical bits. Note that low bit numbers are
+			 * assigned to high positions in the byte, as we compare them as strings.
+			 */
+			ignore -= flsnz8(c);
+			break;
+		}
+	}
+	return ignore;
+}
+
+/* check that the two blocks <a> and <b> are equal on <len> bits. If it is known
+ * they already are on some bytes, this number of equal bytes to be skipped may
+ * be passed in <skip>. It returns 0 if they match, otherwise non-zero.
+ */
+static forceinline int check_bits(const unsigned char *a,
+				  const unsigned char *b,
+				  int skip,
+				  int len)
+{
+	int bit, ret;
+
+	/* This uncommon construction gives the best performance on x86 because
+	 * it makes heavy use multiple-index addressing and parallel instructions,
+	 * and it prevents gcc from reordering the loop since it is already
+	 * properly oriented. Tested to be fine with 2.95 to 4.2.
+	 */
+	bit = ~len + (skip << 3) + 9; // = (skip << 3) + (8 - len)
+	ret = a[skip] ^ b[skip];
+	if (unlikely(bit >= 0))
+		return ret >> bit;
+	while (1) {
+		skip++;
+		if (ret)
+			return ret;
+		ret = a[skip] ^ b[skip];
+		bit += 8;
+		if (bit >= 0)
+			return ret >> bit;
+	}
+}
+
+
+/* Compare strings <a> and <b> byte-to-byte, from bit <ignore> to the last 0.
+ * Return the number of equal bits between strings, assuming that the first
+ * <ignore> bits are already identical. Note that parts or all of <ignore> bits
+ * may be rechecked. It is only passed here as a hint to speed up the check.
+ * The caller is responsible for not passing an <ignore> value larger than any
+ * of the two strings. However, referencing any bit from the trailing zero is
+ * permitted. Equal strings are reported as a negative number of bits, which
+ * indicates the end was reached.
+ */
+static forceinline int string_equal_bits(const unsigned char *a,
+					 const unsigned char *b,
+					 int ignore)
+{
+	int beg;
+	unsigned char c;
+
+	beg = ignore >> 3;
+
+	/* skip known and identical bits. We stop at the first different byte
+	 * or at the first zero we encounter on either side.
+	 */
+	while (1) {
+		unsigned char d;
+
+		c = a[beg];
+		d = b[beg];
+		beg++;
+
+		c ^= d;
+		if (c)
+			break;
+		if (!d)
+			return -1;
+	}
+	/* OK now we know that a and b differ at byte <beg>, or that both are zero.
+	 * We have to find what bit is differing and report it as the number of
+	 * identical bits. Note that low bit numbers are assigned to high positions
+	 * in the byte, as we compare them as strings.
+	 */
+	return (beg << 3) - flsnz8(c);
+}
+
+static forceinline int cmp_bits(const unsigned char *a, const unsigned char *b, unsigned int pos)
+{
+	unsigned int ofs;
+	unsigned char bit_a, bit_b;
+
+	ofs = pos >> 3;
+	pos = ~pos & 7;
+
+	bit_a = (a[ofs] >> pos) & 1;
+	bit_b = (b[ofs] >> pos) & 1;
+
+	return bit_a - bit_b; /* -1: a<b; 0: a=b; 1: a>b */
+}
+
+static forceinline int get_bit(const unsigned char *a, unsigned int pos)
+{
+	unsigned int ofs;
+
+	ofs = pos >> 3;
+	pos = ~pos & 7;
+	return (a[ofs] >> pos) & 1;
+}
+
+/* These functions are declared in ebtree.c */
+void eb_delete(struct eb_node *node);
+struct eb_node *eb_insert_dup(struct eb_node *sub, struct eb_node *new);
+
+#endif /* _EB_TREE_H */
+
+/*
+ * Local variables:
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ * End:
+ */
diff --git a/include/import/lru.h b/include/import/lru.h
index 7427fd6..d674e53 100644
--- a/include/import/lru.h
+++ b/include/import/lru.h
@@ -22,7 +22,7 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <eb64tree.h>
+#include <import/eb64tree.h>
 
 /* The LRU supports a global cache shared between multiple domains and multiple
  * versions of their datasets. The purpose is not to have to flush the whole
diff --git a/include/proto/task.h b/include/proto/task.h
index 07da99d..52b284d 100644
--- a/include/proto/task.h
+++ b/include/proto/task.h
@@ -32,8 +32,8 @@
 #include <common/ticks.h>
 #include <common/hathreads.h>
 
-#include <eb32sctree.h>
-#include <eb32tree.h>
+#include <import/eb32sctree.h>
+#include <import/eb32tree.h>
 
 #include <types/global.h>
 #include <types/task.h>
diff --git a/include/types/acl.h b/include/types/acl.h
index 04318ea..f5d3858 100644
--- a/include/types/acl.h
+++ b/include/types/acl.h
@@ -32,7 +32,7 @@
 #include <types/proxy.h>
 #include <types/server.h>
 
-#include <ebmbtree.h>
+#include <import/ebmbtree.h>
 
 /* ACL test result.
  *
diff --git a/include/types/checks.h b/include/types/checks.h
index 27ce72d..a23091c 100644
--- a/include/types/checks.h
+++ b/include/types/checks.h
@@ -13,7 +13,7 @@
 #ifndef _TYPES_CHECKS_H
 #define _TYPES_CHECKS_H
 
-#include <ebpttree.h>
+#include <import/ebpttree.h>
 
 #include <common/standard.h>
 #include <common/config.h>
diff --git a/include/types/dict.h b/include/types/dict.h
index 006e915..30953f6 100644
--- a/include/types/dict.h
+++ b/include/types/dict.h
@@ -2,7 +2,7 @@
 #define _TYPES_DICT_H
 
 #include <common/hathreads.h>
-#include <ebpttree.h>
+#include <import/ebpttree.h>
 
 struct dict_entry {
 	struct ebpt_node value;
diff --git a/include/types/dns.h b/include/types/dns.h
index 9926de0..06afda8 100644
--- a/include/types/dns.h
+++ b/include/types/dns.h
@@ -22,7 +22,7 @@
 #ifndef _TYPES_DNS_H
 #define _TYPES_DNS_H
 
-#include <eb32tree.h>
+#include <import/eb32tree.h>
 
 #include <common/mini-clist.h>
 #include <common/hathreads.h>
diff --git a/include/types/fcgi-app.h b/include/types/fcgi-app.h
index f9e399f..d7beb26 100644
--- a/include/types/fcgi-app.h
+++ b/include/types/fcgi-app.h
@@ -28,7 +28,7 @@
 #include <common/mini-clist.h>
 #include <common/regex.h>
 
-#include <ebistree.h>
+#include <import/ebistree.h>
 
 #include <types/acl.h>
 #include <types/filters.h>
diff --git a/include/types/http_htx.h b/include/types/http_htx.h
index 42b3c39..301034c 100644
--- a/include/types/http_htx.h
+++ b/include/types/http_htx.h
@@ -23,7 +23,7 @@
 #ifndef _TYPES_HTTP_HTX_H
 #define _TYPES_HTTP_HTX_H
 
-#include <ebistree.h>
+#include <import/ebistree.h>
 
 #include <common/buf.h>
 #include <common/http.h>
diff --git a/include/types/lb_chash.h b/include/types/lb_chash.h
index 5991ce9..9486064 100644
--- a/include/types/lb_chash.h
+++ b/include/types/lb_chash.h
@@ -23,8 +23,8 @@
 #define _TYPES_LB_CHASH_H
 
 #include <common/config.h>
-#include <ebtree.h>
-#include <eb32tree.h>
+#include <import/ebtree.h>
+#include <import/eb32tree.h>
 
 struct lb_chash {
 	struct eb_root act;	/* weighted chash entries of active servers */
diff --git a/include/types/lb_fas.h b/include/types/lb_fas.h
index 4590a96..e20d70a 100644
--- a/include/types/lb_fas.h
+++ b/include/types/lb_fas.h
@@ -23,7 +23,7 @@
 #define _TYPES_LB_FAS_H
 
 #include <common/config.h>
-#include <ebtree.h>
+#include <import/ebtree.h>
 
 struct lb_fas {
 	struct eb_root act;	/* weighted least conns on the active servers */
diff --git a/include/types/lb_fwlc.h b/include/types/lb_fwlc.h
index 170eb24..f20659a 100644
--- a/include/types/lb_fwlc.h
+++ b/include/types/lb_fwlc.h
@@ -23,7 +23,7 @@
 #define _TYPES_LB_FWLC_H
 
 #include <common/config.h>
-#include <ebtree.h>
+#include <import/ebtree.h>
 
 struct lb_fwlc {
 	struct eb_root act;	/* weighted least conns on the active servers */
diff --git a/include/types/lb_fwrr.h b/include/types/lb_fwrr.h
index 731f055..754d0c6 100644
--- a/include/types/lb_fwrr.h
+++ b/include/types/lb_fwrr.h
@@ -23,7 +23,7 @@
 #define _TYPES_LB_FWRR_H
 
 #include <common/config.h>
-#include <ebtree.h>
+#include <import/ebtree.h>
 
 /* This structure is used to apply fast weighted round robin on a server group */
 struct fwrr_group {
diff --git a/include/types/listener.h b/include/types/listener.h
index b815cc3..d035a90 100644
--- a/include/types/listener.h
+++ b/include/types/listener.h
@@ -34,7 +34,7 @@
 #include <common/hathreads.h>
 
 #include <types/obj_type.h>
-#include <eb32tree.h>
+#include <import/eb32tree.h>
 
 /* Some pointer types reference below */
 struct task;
diff --git a/include/types/pattern.h b/include/types/pattern.h
index 9ed6b4b..f4c0a13 100644
--- a/include/types/pattern.h
+++ b/include/types/pattern.h
@@ -29,7 +29,7 @@
 
 #include <types/sample.h>
 
-#include <ebmbtree.h>
+#include <import/ebmbtree.h>
 
 /* Pattern matching function result.
  *
diff --git a/include/types/peers.h b/include/types/peers.h
index 89962a3..78270bf 100644
--- a/include/types/peers.h
+++ b/include/types/peers.h
@@ -31,7 +31,7 @@
 #include <common/mini-clist.h>
 #include <common/regex.h>
 #include <common/tools.h>
-#include <eb32tree.h>
+#include <import/eb32tree.h>
 
 #include <types/dict.h>
 
diff --git a/include/types/protocol.h b/include/types/protocol.h
index 39f99ca..10437a7 100644
--- a/include/types/protocol.h
+++ b/include/types/protocol.h
@@ -27,7 +27,7 @@
 
 #include <common/config.h>
 #include <common/mini-clist.h>
-#include <eb32tree.h>
+#include <import/eb32tree.h>
 
 /* some pointer types referenced below */
 struct listener;
diff --git a/include/types/proxy.h b/include/types/proxy.h
index c136ecb..a9adce0 100644
--- a/include/types/proxy.h
+++ b/include/types/proxy.h
@@ -35,8 +35,8 @@
 #include <common/tools.h>
 #include <common/hathreads.h>
 
-#include <eb32tree.h>
-#include <ebistree.h>
+#include <import/eb32tree.h>
+#include <import/ebistree.h>
 
 #include <types/acl.h>
 #include <types/backend.h>
diff --git a/include/types/server.h b/include/types/server.h
index 7b1ae5f..376fc03 100644
--- a/include/types/server.h
+++ b/include/types/server.h
@@ -30,7 +30,7 @@
 #include <common/hathreads.h>
 #include <common/openssl-compat.h>
 
-#include <eb32tree.h>
+#include <import/eb32tree.h>
 
 #include <types/connection.h>
 #include <types/counters.h>
diff --git a/include/types/ssl_crtlist.h b/include/types/ssl_crtlist.h
index 095a127..2879d74 100644
--- a/include/types/ssl_crtlist.h
+++ b/include/types/ssl_crtlist.h
@@ -23,7 +23,7 @@
 #define _TYPES_SSL_CRTLIST_H
 #ifdef USE_OPENSSL
 
-#include <ebmbtree.h>
+#include <import/ebmbtree.h>
 
 #include <common/mini-clist.h>
 
diff --git a/include/types/ssl_sock.h b/include/types/ssl_sock.h
index 78639ac..aed6e6c 100644
--- a/include/types/ssl_sock.h
+++ b/include/types/ssl_sock.h
@@ -23,9 +23,9 @@
 #define _TYPES_SSL_SOCK_H
 #ifdef USE_OPENSSL
 
-#include <ebpttree.h>
-#include <ebmbtree.h>
-#include <eb64tree.h>
+#include <import/ebpttree.h>
+#include <import/ebmbtree.h>
+#include <import/eb64tree.h>
 
 #include <types/connection.h> /* struct wait_event */
 #include <types/ssl_ckch.h>
diff --git a/include/types/stick_table.h b/include/types/stick_table.h
index 5e15aaa..832ecd2 100644
--- a/include/types/stick_table.h
+++ b/include/types/stick_table.h
@@ -26,9 +26,9 @@
 #include <sys/socket.h>
 #include <netinet/in.h>
 
-#include <ebtree.h>
-#include <ebmbtree.h>
-#include <eb32tree.h>
+#include <import/ebtree.h>
+#include <import/ebmbtree.h>
+#include <import/eb32tree.h>
 #include <common/memory.h>
 #include <types/dict.h>
 #include <types/freq_ctr.h>
diff --git a/include/types/task.h b/include/types/task.h
index 6ca9767..0fba898 100644
--- a/include/types/task.h
+++ b/include/types/task.h
@@ -26,8 +26,8 @@
 
 #include <common/config.h>
 #include <common/mini-clist.h>
-#include <eb32sctree.h>
-#include <eb32tree.h>
+#include <import/eb32sctree.h>
+#include <import/eb32tree.h>
 
 /* values for task->state */
 #define TASK_SLEEPING     0x0000  /* task sleeping */