MEDIUM: muxes: mark idle conns tasklets with TASK_F_USR1
The muxes are touching the idle_conns_lock all the time now because
they need to be careful that no other thread has stolen their tasklet's
context.
This patch changes this a little bit by setting the TASK_F_USR1 flag on
the tasklet before marking a connection idle, and removing it once it's
not idle anymore. Thanks to this we have the guarantee that a tasklet
without this flag cannot be present in an idle list and does not need
to go through this costly lock. This is especially true for front
connections.
(cherry picked from commit e388f2fbca40197590bd15dce0f4eb4d6cded20a)
[wt: backported as really needed to address the high contention issues
in multi-threaded environments: all I/O tasklets queue up on the
takeover lock as soon as there's some activity on the reuse part,
sometimes causing "reuse always" to be slower than "reuse never"!
The context differs quite a bit due to the changes in tasks and idle
conns in 2.4, but the main principle is to bypass the lock when
TASK_F_USR1 is not set. ]
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index 837c894..fef8952 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -2930,32 +2930,39 @@
struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
{
struct connection *conn;
- struct fcgi_conn *fconn;
+ struct fcgi_conn *fconn = ctx;
struct tasklet *tl = (struct tasklet *)t;
int conn_in_list;
int ret = 0;
-
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
- if (tl->context == NULL) {
- /* The connection has been taken over by another thread,
- * we're no longer responsible for it, so just free the
- * tasklet, and do nothing.
+ if (status & TASK_F_USR1) {
+ /* the tasklet was idling on an idle connection, it might have
+ * been stolen, let's be careful!
*/
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
- tasklet_free(tl);
- return NULL;
-
- }
- fconn = ctx;
- conn = fconn->conn;
- TRACE_POINT(FCGI_EV_FCONN_WAKE, conn);
+ HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ if (tl->context == NULL) {
+ /* The connection has been taken over by another thread,
+ * we're no longer responsible for it, so just free the
+ * tasklet, and do nothing.
+ */
+ HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ tasklet_free(tl);
+ return NULL;
+ }
+ conn = fconn->conn;
- conn_in_list = conn->flags & CO_FL_LIST_MASK;
- if (conn_in_list)
- MT_LIST_DEL(&conn->list);
+ TRACE_POINT(FCGI_EV_FCONN_WAKE, conn);
+ conn_in_list = conn->flags & CO_FL_LIST_MASK;
+ if (conn_in_list)
+ MT_LIST_DEL(&conn->list);
+ } else {
+ /* we're certain the connection was not in an idle list */
+ conn = fconn->conn;
+ TRACE_ENTER(FCGI_EV_FCONN_WAKE, conn);
+ conn_in_list = 0;
+ }
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
if (!(fconn->wait_event.events & SUB_RETRY_SEND))
@@ -3465,6 +3472,10 @@
cs_free(cs);
return NULL;
}
+
+ /* the connection is not idle anymore, let's mark this */
+ HA_ATOMIC_AND(&fconn->wait_event.tasklet->state, ~TASK_F_USR1);
+
TRACE_LEAVE(FCGI_EV_FSTRM_NEW, conn, fstrm);
return cs;
}
@@ -3587,6 +3598,10 @@
fconn->conn->owner = NULL;
}
+ /* mark that the tasklet may lose its context to another thread and
+ * that the handler needs to check it under the idle conns lock.
+ */
+ HA_ATOMIC_OR(&fconn->wait_event.tasklet->state, TASK_F_USR1);
if (!srv_add_to_idle_list(objt_server(fconn->conn->target), fconn->conn, 1)) {
/* The server doesn't want it, let's kill the connection right away */
fconn->conn->mux->destroy(fconn);