MAJOR: threads/map: Make acls/maps thread safe

locks have been added in pat_ref and pattern_expr structures to protect all
accesses to an instance of on of them. Moreover, a global lock has been added to
protect the LRU cache used for pattern matching.

Patterns are now duplicated after a successfull matching, to avoid modification
by other threads when the result is used.

Finally, the function reloading a pattern list has been modified to be
thread-safe.
diff --git a/include/common/hathreads.h b/include/common/hathreads.h
index e23b687..1a14a62 100644
--- a/include/common/hathreads.h
+++ b/include/common/hathreads.h
@@ -160,6 +160,9 @@
 	STRMS_LOCK,
 	SSL_LOCK,
 	SSL_GEN_CERTS_LOCK,
+	PATREF_LOCK,
+	PATEXP_LOCK,
+	PATLRU_LOCK,
 	LOCK_LABELS
 };
 struct lock_stat {
@@ -246,7 +249,8 @@
 					   "TASK_RQ", "TASK_WQ", "POOL",
 					   "LISTENER", "LISTENER_QUEUE", "PROXY", "SERVER",
 					   "UPDATED_SERVERS", "LBPRM", "SIGNALS", "STK_TABLE", "STK_SESS",
-					   "APPLETS", "PEER", "BUF_WQ", "STREAMS", "SSL", "SSL_GEN_CERTS"};
+					   "APPLETS", "PEER", "BUF_WQ", "STREAMS", "SSL", "SSL_GEN_CERTS",
+					   "PATREF", "PATEXP", "PATLRU" };
 	int lbl;
 
 	for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
diff --git a/include/types/pattern.h b/include/types/pattern.h
index d7ce613..d5c340a 100644
--- a/include/types/pattern.h
+++ b/include/types/pattern.h
@@ -107,6 +107,9 @@
 	char *display; /* String displayed to identify the pattern origin. */
 	struct list head; /* The head of the list of struct pat_ref_elt. */
 	struct list pat; /* The head of the list of struct pattern_expr. */
+#ifdef USE_THREAD
+	HA_SPINLOCK_T lock; /* Lock used to protect pat ref elements */
+#endif
 };
 
 /* This is a part of struct pat_ref. Each entry contain one
@@ -199,6 +202,9 @@
 	struct eb_root pattern_tree;  /* may be used for lookup in large datasets */
 	struct eb_root pattern_tree_2;  /* may be used for different types */
 	int mflags;                     /* flags relative to the parsing or matching method. */
+#ifdef USE_THREAD
+	HA_RWLOCK_T lock;               /* lock used to protect patterns */
+#endif
 };
 
 /* This is a list of expression. A struct pattern_expr can be used by
diff --git a/src/map.c b/src/map.c
index dc4dd95..99ea334 100644
--- a/src/map.c
+++ b/src/map.c
@@ -325,12 +325,17 @@
 		 * this pointer. We know we have reached the end when this
 		 * pointer points back to the head of the streams list.
 		 */
+		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		LIST_INIT(&appctx->ctx.map.bref.users);
 		appctx->ctx.map.bref.ref = appctx->ctx.map.ref->head.n;
+		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		appctx->st2 = STAT_ST_LIST;
 		/* fall through */
 
 	case STAT_ST_LIST:
+
+		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+
 		if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users)) {
 			LIST_DEL(&appctx->ctx.map.bref.users);
 			LIST_INIT(&appctx->ctx.map.bref.users);
@@ -354,15 +359,16 @@
 				/* let's try again later from this stream. We add ourselves into
 				 * this stream's users so that it can remove us upon termination.
 				 */
-				si_applet_cant_put(si);
 				LIST_ADDQ(&elt->back_refs, &appctx->ctx.map.bref.users);
+				SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
+				si_applet_cant_put(si);
 				return 0;
 			}
 
 			/* get next list entry and check the end of the list */
 			appctx->ctx.map.bref.ref = elt->list.n;
 		}
-
+		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		appctx->st2 = STAT_ST_FIN;
 		/* fall through */
 
@@ -450,6 +456,7 @@
 		/* fall through */
 
 	case STAT_ST_LIST:
+		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		/* for each lookup type */
 		while (appctx->ctx.map.expr) {
 			/* initialise chunk to build new message */
@@ -460,6 +467,7 @@
 			sample.flags = SMP_F_CONST;
 			sample.data.u.str.len = appctx->ctx.map.chunk.len;
 			sample.data.u.str.str = appctx->ctx.map.chunk.str;
+
 			if (appctx->ctx.map.expr->pat_head->match &&
 			    sample_convert(&sample, appctx->ctx.map.expr->pat_head->expect_type))
 				pat = appctx->ctx.map.expr->pat_head->match(&sample, appctx->ctx.map.expr, 1);
@@ -534,6 +542,7 @@
 				/* let's try again later from this stream. We add ourselves into
 				 * this stream's users so that it can remove us upon termination.
 				 */
+				SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 				si_applet_cant_put(si);
 				return 0;
 			}
@@ -542,7 +551,7 @@
 			appctx->ctx.map.expr = pat_expr_get_next(appctx->ctx.map.expr,
 			                                         &appctx->ctx.map.ref->pat);
 		}
-
+		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		appctx->st2 = STAT_ST_FIN;
 		/* fall through */
 
@@ -619,8 +628,10 @@
 static void cli_release_show_map(struct appctx *appctx)
 {
 	if (appctx->st2 == STAT_ST_LIST) {
+		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users))
 			LIST_DEL(&appctx->ctx.map.bref.users);
+		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 	}
 }
 
@@ -717,26 +728,32 @@
 
 			/* Try to delete the entry. */
 			err = NULL;
+			SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 			if (!pat_ref_set_by_id(appctx->ctx.map.ref, ref, args[4], &err)) {
+				SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 				if (err)
 					memprintf(&err, "%s.\n", err);
 				appctx->ctx.cli.err = err;
 				appctx->st0 = CLI_ST_PRINT_FREE;
 				return 1;
 			}
+			SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		}
 		else {
 			/* Else, use the entry identifier as pattern
 			 * string, and update the value.
 			 */
 			err = NULL;
+			SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 			if (!pat_ref_set(appctx->ctx.map.ref, args[3], args[4], &err)) {
+				SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 				if (err)
 					memprintf(&err, "%s.\n", err);
 				appctx->ctx.cli.err = err;
 				appctx->st0 = CLI_ST_PRINT_FREE;
 				return 1;
 			}
+			SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		}
 
 		/* The set is done, send message. */
@@ -808,10 +825,12 @@
 
 		/* Add value. */
 		err = NULL;
+		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		if (appctx->ctx.map.display_flags == PAT_REF_MAP)
 			ret = pat_ref_add(appctx->ctx.map.ref, args[3], args[4], &err);
 		else
 			ret = pat_ref_add(appctx->ctx.map.ref, args[3], NULL, &err);
+		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		if (!ret) {
 			if (err)
 				memprintf(&err, "%s.\n", err);
@@ -891,25 +910,31 @@
 		}
 
 		/* Try to delete the entry. */
+		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		if (!pat_ref_delete_by_id(appctx->ctx.map.ref, ref)) {
+			SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 			/* The entry is not found, send message. */
 			appctx->ctx.cli.severity = LOG_ERR;
 			appctx->ctx.cli.msg = "Key not found.\n";
 			appctx->st0 = CLI_ST_PRINT;
 			return 1;
 		}
+		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 	}
 	else {
 		/* Else, use the entry identifier as pattern
 		 * string and try to delete the entry.
 		 */
+		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		if (!pat_ref_delete(appctx->ctx.map.ref, args[3])) {
+			SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 			/* The entry is not found, send message. */
 			appctx->ctx.cli.severity = LOG_ERR;
 			appctx->ctx.cli.msg = "Key not found.\n";
 			appctx->st0 = CLI_ST_PRINT;
 			return 1;
 		}
+		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 	}
 
 	/* The deletion is done, send message. */
@@ -958,7 +983,9 @@
 		}
 
 		/* Clear all. */
+		SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 		pat_ref_prune(appctx->ctx.map.ref);
+		SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
 
 		/* return response */
 		appctx->st0 = CLI_ST_PROMPT;
diff --git a/src/pattern.c b/src/pattern.c
index a8b56ca..61ef5f0 100644
--- a/src/pattern.c
+++ b/src/pattern.c
@@ -148,12 +148,16 @@
 };
 
 /* this struct is used to return information */
-static struct pattern static_pattern;
+static THREAD_LOCAL struct pattern static_pattern;
+static THREAD_LOCAL struct sample_data static_sample_data;
 
 /* This is the root of the list of all pattern_ref avalaibles. */
 struct list pattern_reference = LIST_HEAD_INIT(pattern_reference);
 
 static struct lru64_head *pat_lru_tree;
+#ifdef USE_THREAD
+HA_SPINLOCK_T pat_lru_tree_lock;
+#endif
 static unsigned long long pat_lru_seed;
 
 /*
@@ -485,12 +489,20 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
+		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
-		if (lru && lru->domain)
-			return lru->data;
+		if (!lru) {
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		}
+		else if (lru->domain) {
+			ret = lru->data;
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			return ret;
+		}
 	}
 
+
 	list_for_each_entry(lst, &expr->patterns, list) {
 		pattern = &lst->pat;
 
@@ -505,8 +517,10 @@
 		}
 	}
 
-	if (lru)
-	    lru64_commit(lru, ret, expr, expr->revision, NULL);
+	if (lru) {
+		lru64_commit(lru, ret, expr, expr->revision, NULL);
+		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+	}
 
 	return ret;
 }
@@ -522,10 +536,17 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
+		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
-		if (lru && lru->domain)
-			return lru->data;
+		if (!lru) {
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		}
+		else if (lru->domain) {
+			ret = lru->data;
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			return ret;
+		}
 	}
 
 	list_for_each_entry(lst, &expr->patterns, list) {
@@ -540,8 +561,10 @@
 		}
 	}
 
-	if (lru)
-	    lru64_commit(lru, ret, expr, expr->revision, NULL);
+	if (lru) {
+		lru64_commit(lru, ret, expr, expr->revision, NULL);
+		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+	}
 
 	return ret;
 }
@@ -583,10 +606,17 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
+		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
-		if (lru && lru->domain)
-			return lru->data;
+		if (!lru) {
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		}
+		else if (lru->domain) {
+			ret = lru->data;
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			return ret;
+		}
 	}
 
 	list_for_each_entry(lst, &expr->patterns, list) {
@@ -598,8 +628,10 @@
 		}
 	}
 
-	if (lru)
-	    lru64_commit(lru, ret, expr, expr->revision, NULL);
+	if (lru) {
+		lru64_commit(lru, ret, expr, expr->revision, NULL);
+		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+	}
 
 	return ret;
 }
@@ -643,10 +675,17 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
+		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
-		if (lru && lru->domain)
-			return lru->data;
+		if (!lru) {
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		}
+		else if (lru->domain) {
+			ret = lru->data;
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			return ret;
+		}
 	}
 
 	list_for_each_entry(lst, &expr->patterns, list) {
@@ -664,8 +703,10 @@
 		break;
 	}
 
-	if (lru)
-	    lru64_commit(lru, ret, expr, expr->revision, NULL);
+	if (lru) {
+		lru64_commit(lru, ret, expr, expr->revision, NULL);
+		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+	}
 
 	return ret;
 }
@@ -682,10 +723,17 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
+		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
-		if (lru && lru->domain)
-			return lru->data;
+		if (!lru) {
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		}
+		else if (lru->domain) {
+			ret = lru->data;
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			return ret;
+		}
 	}
 
 	list_for_each_entry(lst, &expr->patterns, list) {
@@ -703,8 +751,10 @@
 		break;
 	}
 
-	if (lru)
-	    lru64_commit(lru, ret, expr, expr->revision, NULL);
+	if (lru) {
+		lru64_commit(lru, ret, expr, expr->revision, NULL);
+		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+	}
 
 	return ret;
 }
@@ -725,10 +775,17 @@
 	if (pat_lru_tree) {
 		unsigned long long seed = pat_lru_seed ^ (long)expr;
 
+		SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
 		lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
 				pat_lru_tree, expr, expr->revision);
-		if (lru && lru->domain)
-			return lru->data;
+		if (!lru) {
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+		}
+		else if (lru->domain) {
+			ret = lru->data;
+			SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+			return ret;
+		}
 	}
 
 	list_for_each_entry(lst, &expr->patterns, list) {
@@ -760,8 +817,10 @@
 		}
 	}
  leave:
-	if (lru)
-	    lru64_commit(lru, ret, expr, expr->revision, NULL);
+	if (lru) {
+		lru64_commit(lru, ret, expr, expr->revision, NULL);
+		SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
+	}
 
 	return ret;
 }
@@ -1600,6 +1659,8 @@
 			list_for_each_entry(expr, &ref->pat, list)
 				pattern_delete(expr, elt);
 
+			/* pat_ref_elt is trashed once all expr
+			   are cleaned and there is no ref remaining */
 			LIST_DEL(&elt->list);
 			free(elt->sample);
 			free(elt->pattern);
@@ -1638,6 +1699,8 @@
 			list_for_each_entry(expr, &ref->pat, list)
 				pattern_delete(expr, elt);
 
+			/* pat_ref_elt is trashed once all expr
+			   are cleaned and there is no ref remaining */
 			LIST_DEL(&elt->list);
 			free(elt->sample);
 			free(elt->pattern);
@@ -1695,9 +1758,6 @@
 		memprintf(err, "out of memory error");
 		return 0;
 	}
-	free(elt->sample);
-	elt->sample = sample;
-
 	/* Load sample in each reference. All the conversion are tested
 	 * below, normally these calls dosn't fail.
 	 */
@@ -1705,11 +1765,18 @@
 		if (!expr->pat_head->parse_smp)
 			continue;
 
+		RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
 		data = pattern_find_smp(expr, elt);
 		if (data && *data && !expr->pat_head->parse_smp(sample, *data))
 			*data = NULL;
+		RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
 	}
 
+	/* free old sample only when all exprs are updated */
+	free(elt->sample);
+	elt->sample = sample;
+
+
 	return 1;
 }
 
@@ -1805,7 +1872,7 @@
 
 	LIST_INIT(&ref->head);
 	LIST_INIT(&ref->pat);
-
+	SPIN_INIT(&ref->lock);
 	LIST_ADDQ(&pattern_reference, &ref->list);
 
 	return ref;
@@ -1924,11 +1991,14 @@
 		return 0;
 	}
 
+	RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
 	/* index pattern */
 	if (!expr->pat_head->index(expr, &pattern, err)) {
+		RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
 		free(data);
 		return 0;
 	}
+	RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
 
 	return 1;
 }
@@ -1983,6 +2053,7 @@
 			return 0;
 		}
 	}
+
 	return 1;
 }
 
@@ -1995,23 +2066,88 @@
 void pat_ref_reload(struct pat_ref *ref, struct pat_ref *replace)
 {
 	struct pattern_expr *expr;
-	struct pat_ref_elt *elt;
 	char *err = NULL;
+	struct pat_ref_elt *elt, *safe;
+	struct bref *bref, *back;
+	struct sample_data *data;
+	struct pattern pattern;
 
-	pat_ref_prune(ref);
 
+	SPIN_LOCK(PATREF_LOCK, &ref->lock);
+	list_for_each_entry(expr, &ref->pat, list) {
+		RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+	}
+
+	/* all expr are locked, we can safely remove all pat_ref */
+	list_for_each_entry_safe(elt, safe, &ref->head, list) {
+		list_for_each_entry_safe(bref, back, &elt->back_refs, users) {
+			/*
+			 * we have to unlink all watchers. We must not relink them if
+			 * this elt  was the last one in the list.
+			 */
+			LIST_DEL(&bref->users);
+			LIST_INIT(&bref->users);
+			if (elt->list.n != &ref->head)
+				LIST_ADDQ(&LIST_ELEM(elt->list.n, struct stream *, list)->back_refs, &bref->users);
+			bref->ref = elt->list.n;
+		}
+		LIST_DEL(&elt->list);
+		free(elt->pattern);
+		free(elt->sample);
+		free(elt);
+	}
+
+	/* switch pat_ret_elt lists */
 	LIST_ADD(&replace->head, &ref->head);
 	LIST_DEL(&replace->head);
 
-	list_for_each_entry(elt, &ref->head, list) {
-		list_for_each_entry(expr, &ref->pat, list) {
-			if (!pat_ref_push(elt, expr, 0, &err)) {
+	list_for_each_entry(expr, &ref->pat, list) {
+		expr->pat_head->prune(expr);
+		list_for_each_entry(elt, &ref->head, list) {
+			/* Create sample */
+			if (elt->sample && expr->pat_head->parse_smp) {
+				/* New sample. */
+				data = malloc(sizeof(*data));
+				if (!data)
+					continue;
+
+				/* Parse value. */
+				if (!expr->pat_head->parse_smp(elt->sample, data)) {
+					memprintf(&err, "unable to parse '%s'", elt->sample);
+					send_log(NULL, LOG_NOTICE, "%s", err);
+					free(err);
+					free(data);
+					continue;
+				}
+
+			}
+			else
+				data = NULL;
+
+			/* initialise pattern */
+			memset(&pattern, 0, sizeof(pattern));
+			pattern.data = data;
+			pattern.ref = elt;
+
+			/* parse pattern */
+			if (!expr->pat_head->parse(elt->pattern, &pattern, expr->mflags, &err)) {
 				send_log(NULL, LOG_NOTICE, "%s", err);
 				free(err);
-				err = NULL;
+				free(data);
+				continue;
+			}
+
+			/* index pattern */
+			if (!expr->pat_head->index(expr, &pattern, &err)) {
+				send_log(NULL, LOG_NOTICE, "%s", err);
+				free(err);
+				free(data);
+				continue;
 			}
 		}
+		RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
 	}
+	SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
 }
 
 /* This function prune all entries of <ref>. This function
@@ -2023,6 +2159,14 @@
 	struct pattern_expr *expr;
 	struct bref *bref, *back;
 
+	list_for_each_entry(expr, &ref->pat, list) {
+		RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+		expr->pat_head->prune(expr);
+		RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+	}
+
+	/* we trash pat_ref_elt in a second time to ensure that data is
+	   free once there is no ref on it */
 	list_for_each_entry_safe(elt, safe, &ref->head, list) {
 		list_for_each_entry_safe(bref, back, &elt->back_refs, users) {
 			/*
@@ -2041,8 +2185,7 @@
 		free(elt);
 	}
 
-	list_for_each_entry(expr, &ref->pat, list)
-		expr->pat_head->prune(expr);
+
 }
 
 /* This function lookup for existing reference <ref> in pattern_head <head>. */
@@ -2124,6 +2267,8 @@
 
 		expr->ref = ref;
 
+		RWLOCK_INIT(&expr->lock);
+
 		/* We must free this pattern if it is no more used. */
 		list->do_free = 1;
 	}
@@ -2434,9 +2579,41 @@
 		return NULL;
 
 	list_for_each_entry(list, &head->head, list) {
+		RWLOCK_RDLOCK(PATEXP_LOCK, &list->expr->lock);
 		pat = head->match(smp, list->expr, fill);
-		if (pat)
+		if (pat) {
+			/* We duplicate the pattern cause it could be modified
+			   by another thread */
+			if (pat != &static_pattern) {
+				memcpy(&static_pattern, pat, sizeof(struct pattern));
+				pat = &static_pattern;
+			}
+
+			/* We also duplicate the sample data for
+			   same reason */
+			if (pat->data && (pat->data != &static_sample_data)) {
+				switch(pat->type) {
+					case SMP_T_STR:
+						static_sample_data.type = SMP_T_STR;
+						static_sample_data.u.str = *get_trash_chunk();
+						static_sample_data.u.str.len = pat->data->u.str.len;
+						if (static_sample_data.u.str.len >= static_sample_data.u.str.size)
+							static_sample_data.u.str.len = static_sample_data.u.str.size - 1;
+						memcpy(static_sample_data.u.str.str, pat->data->u.str.str, static_sample_data.u.str.len);
+						static_sample_data.u.str.str[static_sample_data.u.str.len] = 0;
+					case SMP_T_IPV4:
+					case SMP_T_IPV6:
+					case SMP_T_SINT:
+						memcpy(&static_sample_data, pat->data, sizeof(struct sample_data));
+					default:
+						pat->data = NULL;
+				}
+				pat->data = &static_sample_data;
+			}
+			RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
 			return pat;
+		}
+		RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
 	}
 	return NULL;
 }
@@ -2450,7 +2627,9 @@
 		LIST_DEL(&list->list);
 		if (list->do_free) {
 			LIST_DEL(&list->expr->list);
+			RWLOCK_WRLOCK(PATEXP_LOCK, &list->expr->lock);
 			head->prune(list->expr);
+			RWLOCK_WRUNLOCK(PATEXP_LOCK, &list->expr->lock);
 			free(list->expr);
 		}
 		free(list);
@@ -2497,7 +2676,9 @@
  */
 int pattern_delete(struct pattern_expr *expr, struct pat_ref_elt *ref)
 {
+	RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
 	expr->pat_head->delete(expr, ref);
+	RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
 	return 1;
 }
 
@@ -2511,8 +2692,10 @@
 	struct list pr = LIST_HEAD_INIT(pr);
 
 	pat_lru_seed = random();
-	if (global.tune.pattern_cache)
+	if (global.tune.pattern_cache) {
 		pat_lru_tree = lru64_new(global.tune.pattern_cache);
+		SPIN_INIT(&pat_lru_tree_lock);
+	}
 
 	list_for_each_entry(ref, &pattern_reference, list) {
 		if (ref->unique_id == -1) {
diff --git a/src/proto_http.c b/src/proto_http.c
index 724916f..390de89 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -2621,7 +2621,9 @@
 
 			/* perform update */
 			/* returned code: 1=ok, 0=ko */
+			SPIN_LOCK(PATREF_LOCK, &ref->lock);
 			pat_ref_delete(ref, key->str);
+			SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
 
 			free_trash_chunk(key);
 			break;
@@ -2647,8 +2649,10 @@
 
 			/* perform update */
 			/* add entry only if it does not already exist */
+			SPIN_LOCK(PATREF_LOCK, &ref->lock);
 			if (pat_ref_find_elt(ref, key->str) == NULL)
 				pat_ref_add(ref, key->str, NULL, NULL);
+			SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
 
 			free_trash_chunk(key);
 			break;
@@ -2911,7 +2915,9 @@
 
 			/* perform update */
 			/* returned code: 1=ok, 0=ko */
+			SPIN_LOCK(PATREF_LOCK, &ref->lock);
 			pat_ref_delete(ref, key->str);
+			SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
 
 			free_trash_chunk(key);
 			break;
@@ -2974,13 +2980,14 @@
 			value->str[value->len] = '\0';
 
 			/* perform update */
+			SPIN_LOCK(PATREF_LOCK, &ref->lock);
 			if (pat_ref_find_elt(ref, key->str) != NULL)
 				/* update entry if it exists */
 				pat_ref_set(ref, key->str, value->str, NULL);
 			else
 				/* insert a new entry */
 				pat_ref_add(ref, key->str, value->str, NULL);
-
+			SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
 			free_trash_chunk(key);
 			free_trash_chunk(value);
 			break;