MINOR: tasks: Change the task API so that the callback takes 3 arguments.

In preparation for thread-specific runqueues, change the task API so that
the callback takes 3 arguments, the task itself, the context, and the state,
those were retrieved from the task before. This will allow these elements to
change atomically in the scheduler while the application uses the copied
value, and even to have NULL tasks later.
diff --git a/src/checks.c b/src/checks.c
index d07a82f..3f4c0e1 100644
--- a/src/checks.c
+++ b/src/checks.c
@@ -1434,9 +1434,9 @@
  * reached, the task automatically stops. Note that any server status change
  * must have updated s->last_change accordingly.
  */
-static struct task *server_warmup(struct task *t)
+static struct task *server_warmup(struct task *t, void *context, unsigned short state)
 {
-	struct server *s = t->context;
+	struct server *s = context;
 
 	/* by default, plan on stopping the task */
 	t->expire = TICK_ETERNITY;
@@ -1967,9 +1967,9 @@
  * Please do NOT place any return statement in this function and only leave
  * via the out_unlock label.
  */
-static struct task *process_chk_proc(struct task *t)
+static struct task *process_chk_proc(struct task *t, void *context, unsigned short state)
 {
-	struct check *check = t->context;
+	struct check *check = context;
 	struct server *s = check->server;
 	int rv;
 	int ret;
@@ -2099,9 +2099,9 @@
  * Please do NOT place any return statement in this function and only leave
  * via the out_unlock label.
  */
-static struct task *process_chk_conn(struct task *t)
+static struct task *process_chk_conn(struct task *t, void *context, unsigned short state)
 {
-	struct check *check = t->context;
+	struct check *check = context;
 	struct server *s = check->server;
 	struct conn_stream *cs = check->cs;
 	struct connection *conn = cs_conn(cs);
@@ -2272,13 +2272,13 @@
  * manages a server health-check. Returns
  * the time the task accepts to wait, or TIME_ETERNITY for infinity.
  */
-static struct task *process_chk(struct task *t)
+static struct task *process_chk(struct task *t, void *context, unsigned short state)
 {
-	struct check *check = t->context;
+	struct check *check = context;
 
 	if (check->type == PR_O2_EXT_CHK)
-		return process_chk_proc(t);
-	return process_chk_conn(t);
+		return process_chk_proc(t, context, state);
+	return process_chk_conn(t, context, state);
 
 }
 
@@ -3126,9 +3126,9 @@
 	pool_free(pool_head_email_alert, alert);
 }
 
-static struct task *process_email_alert(struct task *t)
+static struct task *process_email_alert(struct task *t, void *context, unsigned short state)
 {
-	struct check        *check = t->context;
+	struct check        *check = context;
 	struct email_alertq *q;
 	struct email_alert  *alert;
 
@@ -3153,7 +3153,7 @@
 			check->state         |= CHK_ST_ENABLED;
 		}
 
-		process_chk(t);
+		process_chk(t, context, state);
 		if (check->state & CHK_ST_INPROGRESS)
 			break;
 
diff --git a/src/dns.c b/src/dns.c
index 9984e2c..8d6a6d6 100644
--- a/src/dns.c
+++ b/src/dns.c
@@ -1698,9 +1698,9 @@
  * resolutions and retry them if possible. Else a timeout is reported. Then, it
  * checks the wait list to trigger new resolutions.
  */
-static struct task *dns_process_resolvers(struct task *t)
+static struct task *dns_process_resolvers(struct task *t, void *context, unsigned short state)
 {
-	struct dns_resolvers  *resolvers = t->context;
+	struct dns_resolvers  *resolvers = context;
 	struct dns_resolution *res, *resback;
 	int exp;
 
diff --git a/src/flt_spoe.c b/src/flt_spoe.c
index f96a94f..856050d 100644
--- a/src/flt_spoe.c
+++ b/src/flt_spoe.c
@@ -1205,9 +1205,9 @@
 /* Callback function that catches applet timeouts. If a timeout occurred, we set
  * <appctx->st1> flag and the SPOE applet is woken up. */
 static struct task *
-spoe_process_appctx(struct task * task)
+spoe_process_appctx(struct task * task, void *context, unsigned short state)
 {
-	struct appctx *appctx = task->context;
+	struct appctx *appctx = context;
 
 	appctx->st1 = SPOE_APPCTX_ERR_NONE;
 	if (tick_is_expired(task->expire, now_ms)) {
diff --git a/src/haproxy.c b/src/haproxy.c
index 48a77e8..6fd2e83 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -211,7 +211,7 @@
 /* list of the temporarily limited listeners because of lack of resource */
 struct list global_listener_queue = LIST_HEAD_INIT(global_listener_queue);
 struct task *global_listener_queue_task;
-static struct task *manage_global_listener_queue(struct task *t);
+static struct task *manage_global_listener_queue(struct task *t, void *context, unsigned short state);
 
 /* bitfield of a few warnings to emit just once (WARN_*) */
 unsigned int warned = 0;
@@ -2476,7 +2476,7 @@
  * for global resources when there are enough free resource, or at least once in
  * a while. It is designed to be called as a task.
  */
-static struct task *manage_global_listener_queue(struct task *t)
+static struct task *manage_global_listener_queue(struct task *t, void *context, unsigned short state)
 {
 	int next = TICK_ETERNITY;
 	/* queue is empty, nothing to do */
diff --git a/src/hlua.c b/src/hlua.c
index 78d9cad..4de5db5 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -5517,9 +5517,9 @@
  * Task wrapper are longjmp safe because the only one Lua code
  * executed is the safe hlua_ctx_resume();
  */
-static struct task *hlua_process_task(struct task *task)
+static struct task *hlua_process_task(struct task *task, void *context, unsigned short state)
 {
-	struct hlua *hlua = task->context;
+	struct hlua *hlua = context;
 	enum hlua_exec status;
 
 	if (task->thread_mask == MAX_THREADS_MASK)
@@ -6216,9 +6216,9 @@
 	}
 }
 
-struct task *hlua_applet_wakeup(struct task *t)
+struct task *hlua_applet_wakeup(struct task *t, void *context, unsigned short state)
 {
-	struct appctx *ctx = t->context;
+	struct appctx *ctx = context;
 	struct stream_interface *si = ctx->owner;
 
 	/* If the applet is wake up without any expected work, the sheduler
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 62a9f04..9c7b828 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -215,7 +215,7 @@
 	.id        = 0,
 };
 
-static struct task *h2_timeout_task(struct task *t);
+static struct task *h2_timeout_task(struct task *t, void *context, unsigned short state);
 
 /*****************************************************/
 /* functions below are for dynamic buffer management */
@@ -2324,9 +2324,9 @@
  * immediately killed. If it's allocatable and empty, we attempt to send a
  * GOAWAY frame.
  */
-static struct task *h2_timeout_task(struct task *t)
+static struct task *h2_timeout_task(struct task *t, void *context, unsigned short state)
 {
-	struct h2c *h2c = t->context;
+	struct h2c *h2c = context;
 	int expired = tick_is_expired(t->expire, now_ms);
 
 	if (!expired && h2c)
diff --git a/src/peers.c b/src/peers.c
index c56ed3a..6d5a556 100644
--- a/src/peers.c
+++ b/src/peers.c
@@ -1958,9 +1958,9 @@
  * Task processing function to manage re-connect and peer session
  * tasks wakeup on local update.
  */
-static struct task *process_peer_sync(struct task * task)
+static struct task *process_peer_sync(struct task * task, void *context, unsigned short state)
 {
-	struct peers *peers = task->context;
+	struct peers *peers = context;
 	struct peer *ps;
 	struct shared_table *st;
 
diff --git a/src/proxy.c b/src/proxy.c
index 6f71b4b..c262966 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -834,9 +834,9 @@
  * called as a task which is woken up upon stopping or when rate limiting must
  * be enforced.
  */
-struct task *manage_proxy(struct task *t)
+struct task *manage_proxy(struct task *t, void *context, unsigned short state)
 {
-	struct proxy *p = t->context;
+	struct proxy *p = context;
 	int next = TICK_ETERNITY;
 	unsigned int wait;
 
@@ -934,7 +934,7 @@
 	return 0;
 }
 
-struct task *hard_stop(struct task *t)
+struct task *hard_stop(struct task *t, void *context, unsigned short state)
 {
 	struct proxy *p;
 	struct stream *s;
diff --git a/src/session.c b/src/session.c
index 318c171..c1bd2d6 100644
--- a/src/session.c
+++ b/src/session.c
@@ -31,7 +31,7 @@
 struct pool_head *pool_head_session;
 
 static int conn_complete_session(struct connection *conn);
-static struct task *session_expire_embryonic(struct task *t);
+static struct task *session_expire_embryonic(struct task *t, void *context, unsigned short state);
 
 /* Create a a new session and assign it to frontend <fe>, listener <li>,
  * origin <origin>, set the current date and clear the stick counters pointers.
@@ -381,9 +381,9 @@
 /* Manages the embryonic session timeout. It is only called when the timeout
  * strikes and performs the required cleanup.
  */
-static struct task *session_expire_embryonic(struct task *t)
+static struct task *session_expire_embryonic(struct task *t, void *context, unsigned short state)
 {
-	struct session *sess = t->context;
+	struct session *sess = context;
 
 	if (!(t->state & TASK_WOKEN_TIMER))
 		return t;
diff --git a/src/stick_table.c b/src/stick_table.c
index 3e44747..fcc6fe6 100644
--- a/src/stick_table.c
+++ b/src/stick_table.c
@@ -578,9 +578,9 @@
  * Task processing function to trash expired sticky sessions. A pointer to the
  * task itself is returned since it never dies.
  */
-static struct task *process_table_expire(struct task *task)
+static struct task *process_table_expire(struct task *task, void *context, unsigned short state)
 {
-	struct stktable *t = task->context;
+	struct stktable *t = context;
 
 	task->expire = stktable_trash_expired(t);
 	return task;
diff --git a/src/stream.c b/src/stream.c
index 1d0b22c..3ea8953 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -1615,10 +1615,10 @@
  * and each function is called only if at least another function has changed at
  * least one flag it is interested in.
  */
-struct task *process_stream(struct task *t)
+struct task *process_stream(struct task *t, void *context, unsigned short state)
 {
 	struct server *srv;
-	struct stream *s = t->context;
+	struct stream *s = context;
 	struct session *sess = s->sess;
 	unsigned int rqf_last, rpf_last;
 	unsigned int rq_prod_last, rq_cons_last;
@@ -1655,7 +1655,7 @@
 	si_b->flags |= SI_FL_DONT_WAKE;
 
 	/* update pending events */
-	s->pending_events |= (t->state & TASK_WOKEN_ANY);
+	s->pending_events |= (state & TASK_WOKEN_ANY);
 
 	/* 1a: Check for low level timeouts if needed. We just set a flag on
 	 * stream interfaces when their timeouts have expired.
diff --git a/src/task.c b/src/task.c
index 3d021bb..23e310b 100644
--- a/src/task.c
+++ b/src/task.c
@@ -226,10 +226,10 @@
 			 * predictor take this most common call.
 			 */
 			if (likely(t->process == process_stream))
-				t = process_stream(t);
+				t = process_stream(t, t->context, t->state);
 			else {
 				if (t->process != NULL)
-					t = t->process(t);
+					t = t->process(t, t->context, t->state);
 				else {
 					__task_free(t);
 					t = NULL;
@@ -314,10 +314,10 @@
 			 */
 			curr_task = t;
 			if (likely(t->process == process_stream))
-				t = process_stream(t);
+				t = process_stream(t, t->context, t->state);
 			else {
 				if (t->process != NULL)
-					t = t->process(t);
+					t = t->process(t, t->context, t->state);
 				else {
 					__task_free(t);
 					t = NULL;