MEDIUM: connection: protect idle conn lists with locks
This is a preparation work for connection reuse with sni/proxy
protocol/specific src-dst addresses.
Protect every access to idle conn lists with a lock. This is currently
strictly not needed because the access to the list are made with atomic
operations. However, to be able to reuse connection with specific
parameters, the list storage will be converted to eb-trees. As this
structure does not have atomic operation, it is mandatory to protect it
with a lock.
For this, the takeover lock is reused. Its role was to protect during
connection takeover. As it is now extended to general idle conns usage,
it is renamed to idle_conns_lock. A new lock section is also
instantiated named IDLE_CONNS_LOCK to isolate its impact on performance.
diff --git a/include/haproxy/connection-t.h b/include/haproxy/connection-t.h
index 7fa0611..2d65240 100644
--- a/include/haproxy/connection-t.h
+++ b/include/haproxy/connection-t.h
@@ -617,7 +617,7 @@
struct idle_conns {
struct mt_list toremove_conns;
struct task *cleanup_task;
- __decl_thread(HA_SPINLOCK_T takeover_lock);
+ __decl_thread(HA_SPINLOCK_T idle_conns_lock);
} THREAD_ALIGNED(64);
#endif /* _HAPROXY_CONNECTION_T_H */
diff --git a/include/haproxy/server.h b/include/haproxy/server.h
index 00036b1..c162e99 100644
--- a/include/haproxy/server.h
+++ b/include/haproxy/server.h
@@ -36,6 +36,7 @@
__decl_thread(extern HA_SPINLOCK_T idle_conn_srv_lock);
+extern struct idle_conns idle_conns[MAX_THREADS];
extern struct eb_root idle_conn_srv;
extern struct task *idle_conn_task;
extern struct dict server_key_dict;
@@ -271,7 +272,9 @@
}
/* Remove the connection from any list (safe, idle or available) */
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
MT_LIST_DEL((struct mt_list *)&conn->list);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
/* This adds an idle connection to the server's list if the connection is
@@ -304,7 +307,10 @@
return 0;
}
_HA_ATOMIC_SUB(&srv->curr_used_conns, 1);
+
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
MT_LIST_DEL(&conn->list);
+
if (is_safe) {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
MT_LIST_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
@@ -314,6 +320,7 @@
MT_LIST_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
_HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
}
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
_HA_ATOMIC_ADD(&srv->curr_idle_thr[tid], 1);
__ha_barrier_full();
diff --git a/include/haproxy/thread.h b/include/haproxy/thread.h
index bdabcac..a519e77 100644
--- a/include/haproxy/thread.h
+++ b/include/haproxy/thread.h
@@ -400,6 +400,7 @@
SNI_LOCK,
SSL_SERVER_LOCK,
SFT_LOCK, /* sink forward target */
+ IDLE_CONNS_LOCK,
OTHER_LOCK,
LOCK_LABELS
};
@@ -444,6 +445,7 @@
case SNI_LOCK: return "SNI";
case SSL_SERVER_LOCK: return "SSL_SERVER";
case SFT_LOCK: return "SFT";
+ case IDLE_CONNS_LOCK: return "IDLE_CONNS";
case OTHER_LOCK: return "OTHER";
case LOCK_LABELS: break; /* keep compiler happy */
};
diff --git a/src/backend.c b/src/backend.c
index 7a1d179..577b38f 100644
--- a/src/backend.c
+++ b/src/backend.c
@@ -1113,7 +1113,7 @@
* to end up with two threads using the same connection.
*/
i = tid;
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
conn = MT_LIST_POP(&mt_list[tid], struct connection *, list);
/* If we failed to pick a connection from the idle list, let's try again with
@@ -1126,7 +1126,7 @@
mt_list = srv->safe_conns;
}
}
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* If we found a connection in our own list, and we don't have to
* steal one from another thread, then we're done.
@@ -1161,7 +1161,7 @@
if (!srv->curr_idle_thr[i] || i == tid)
continue;
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
mt_list_for_each_entry_safe(conn, &mt_list[i], list, elt1, elt2) {
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
MT_LIST_DEL_SAFE(elt1);
@@ -1185,7 +1185,7 @@
}
}
}
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
} while (!found && (i = (i + 1 == global.nbthread) ? 0 : i + 1) != stop);
if (!found)
@@ -1335,12 +1335,15 @@
* acceptable, attempt to kill an idling connection
*/
/* First, try from our own idle list */
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
tokill_conn = MT_LIST_POP(&srv->idle_conns[tid],
struct connection *, list);
if (tokill_conn)
tokill_conn->mux->destroy(tokill_conn->ctx);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+
/* If not, iterate over other thread's idling pool, and try to grab one */
- else {
+ if (!tokill_conn) {
int i;
for (i = tid; (i = ((i + 1 == global.nbthread) ? 0 : i + 1)) != tid;) {
@@ -1350,12 +1353,13 @@
// see it possibly larger.
ALREADY_CHECKED(i);
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
tokill_conn = MT_LIST_POP(&srv->idle_conns[i],
struct connection *, list);
if (!tokill_conn)
tokill_conn = MT_LIST_POP(&srv->safe_conns[i],
struct connection *, list);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
if (tokill_conn) {
/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
@@ -1363,10 +1367,8 @@
MT_LIST_ADDQ(&idle_conns[i].toremove_conns,
(struct mt_list *)&tokill_conn->list);
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
break;
}
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
}
}
diff --git a/src/cfgparse.c b/src/cfgparse.c
index 12eae67..c3159f5 100644
--- a/src/cfgparse.c
+++ b/src/cfgparse.c
@@ -3575,7 +3575,7 @@
goto err;
idle_conns[i].cleanup_task->process = srv_cleanup_toremove_connections;
idle_conns[i].cleanup_task->context = NULL;
- HA_SPIN_INIT(&idle_conns[i].takeover_lock);
+ HA_SPIN_INIT(&idle_conns[i].idle_conns_lock);
MT_LIST_INIT(&idle_conns[i].toremove_conns);
}
}
diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index 2cc5177..0d3ab78 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -2984,13 +2984,13 @@
int ret = 0;
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (tl->context == NULL) {
/* The connection has been taken over by another thread,
* we're no longer responsible for it, so just free the
* tasklet, and do nothing.
*/
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
tasklet_free(tl);
return NULL;
@@ -3004,7 +3004,7 @@
if (conn_in_list)
MT_LIST_DEL(&conn->list);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (!(fconn->wait_event.events & SUB_RETRY_SEND))
ret = fcgi_send(fconn);
@@ -3021,10 +3021,12 @@
if (!ret && conn_in_list) {
struct server *srv = objt_server(conn->target);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;
}
@@ -3153,19 +3155,19 @@
TRACE_ENTER(FCGI_EV_FCONN_WAKE, (fconn ? fconn->conn : NULL));
if (fconn) {
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* Somebody already stole the connection from us, so we should not
* free it, we just have to free the task.
*/
if (!t->context) {
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
fconn = NULL;
goto do_leave;
}
if (!expired) {
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
TRACE_DEVEL("leaving (not expired)", FCGI_EV_FCONN_WAKE, fconn->conn);
return t;
}
@@ -3176,7 +3178,7 @@
if (fconn->conn->flags & CO_FL_LIST_MASK)
MT_LIST_DEL(&fconn->conn->list);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
do_leave:
diff --git a/src/mux_h1.c b/src/mux_h1.c
index a3aa388..ab639c6 100644
--- a/src/mux_h1.c
+++ b/src/mux_h1.c
@@ -2808,13 +2808,13 @@
int ret = 0;
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (tl->context == NULL) {
/* The connection has been taken over by another thread,
* we're no longer responsible for it, so just free the
* tasklet, and do nothing.
*/
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
tasklet_free(tl);
return NULL;
}
@@ -2830,7 +2830,7 @@
if (conn_in_list)
MT_LIST_DEL(&conn->list);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (!(h1c->wait_event.events & SUB_RETRY_SEND))
ret = h1_send(h1c);
@@ -2846,10 +2846,12 @@
if (!ret && conn_in_list) {
struct server *srv = objt_server(conn->target);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;
}
@@ -2889,19 +2891,19 @@
if (h1c) {
/* Make sure nobody stole the connection from us */
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* Somebody already stole the connection from us, so we should not
* free it, we just have to free the task.
*/
if (!t->context) {
h1c = NULL;
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
goto do_leave;
}
if (!expired) {
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
TRACE_DEVEL("leaving (not expired)", H1_EV_H1C_WAKE, h1c->conn, h1c->h1s);
return t;
}
@@ -2910,7 +2912,7 @@
* stream's timeout
*/
if (h1c->flags & H1C_F_ST_READY) {
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
t->expire = TICK_ETERNITY;
TRACE_DEVEL("leaving (CS still attached)", H1_EV_H1C_WAKE, h1c->conn, h1c->h1s);
return t;
@@ -2924,7 +2926,7 @@
h1_send(h1c);
if (b_data(&h1c->obuf) || (h1c->flags & H1C_F_ERR_PENDING)) {
h1_refresh_timeout(h1c);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
return t;
}
}
@@ -2935,7 +2937,7 @@
h1c->h1s->cs->flags |= (CS_FL_EOS|CS_FL_ERROR);
h1_alert(h1c->h1s);
h1_refresh_timeout(h1c);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].idle_conns_lock);
TRACE_DEVEL("waiting to release the CS before releasing the connection", H1_EV_H1C_WAKE);
return t;
}
@@ -2946,7 +2948,7 @@
if (h1c->conn->flags & CO_FL_LIST_MASK)
MT_LIST_DEL(&h1c->conn->list);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
do_leave:
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 4d71422..093ab16 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -3782,13 +3782,13 @@
int ret = 0;
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (t->context == NULL) {
/* The connection has been taken over by another thread,
* we're no longer responsible for it, so just free the
* tasklet, and do nothing.
*/
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
tasklet_free(tl);
goto leave;
}
@@ -3805,7 +3805,7 @@
if (conn_in_list)
MT_LIST_DEL(&conn->list);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (!(h2c->wait_event.events & SUB_RETRY_SEND))
ret = h2_send(h2c);
@@ -3822,10 +3822,12 @@
if (!ret && conn_in_list) {
struct server *srv = objt_server(conn->target);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
leave:
@@ -3902,15 +3904,15 @@
}
/* connections in error must be removed from the idle lists */
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
MT_LIST_DEL((struct mt_list *)&conn->list);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
else if (h2c->st0 == H2_CS_ERROR) {
/* connections in error must be removed from the idle lists */
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
MT_LIST_DEL((struct mt_list *)&conn->list);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
if (!b_data(&h2c->dbuf))
@@ -3966,20 +3968,20 @@
if (h2c) {
/* Make sure nobody stole the connection from us */
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* Somebody already stole the connection from us, so we should not
* free it, we just have to free the task.
*/
if (!t->context) {
h2c = NULL;
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
goto do_leave;
}
if (!expired) {
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
TRACE_DEVEL("leaving (not expired)", H2_EV_H2C_WAKE, h2c->conn);
return t;
}
@@ -3988,7 +3990,7 @@
/* we do still have streams but all of them are idle, waiting
* for the data layer, so we must not enforce the timeout here.
*/
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
t->expire = TICK_ETERNITY;
return t;
}
@@ -3999,7 +4001,7 @@
if (h2c->conn->flags & CO_FL_LIST_MASK)
MT_LIST_DEL(&h2c->conn->list);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
do_leave:
@@ -4047,9 +4049,9 @@
}
/* in any case this connection must not be considered idle anymore */
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
MT_LIST_DEL((struct mt_list *)&h2c->conn->list);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* either we can release everything now or it will be done later once
* the last stream closes.
diff --git a/src/server.c b/src/server.c
index 453c598..2e49949 100644
--- a/src/server.c
+++ b/src/server.c
@@ -5277,6 +5277,8 @@
/* Move toremove_nb connections from idle_list to toremove_list, -1 means
* moving them all.
* Returns the number of connections moved.
+ *
+ * Must be called with idle_conns_lock held.
*/
static int srv_migrate_conns_to_remove(struct mt_list *idle_list, struct mt_list *toremove_list, int toremove_nb)
{
@@ -5308,10 +5310,12 @@
/* check all threads starting with ours */
for (i = tid;;) {
did_remove = 0;
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
if (srv_migrate_conns_to_remove(&srv->idle_conns[i], &idle_conns[i].toremove_conns, -1) > 0)
did_remove = 1;
if (srv_migrate_conns_to_remove(&srv->safe_conns[i], &idle_conns[i].toremove_conns, -1) > 0)
did_remove = 1;
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
if (did_remove)
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
@@ -5381,12 +5385,14 @@
max_conn = (exceed_conns * srv->curr_idle_thr[i]) /
curr_idle + 1;
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
j = srv_migrate_conns_to_remove(&srv->idle_conns[i], &idle_conns[i].toremove_conns, max_conn);
if (j > 0)
did_remove = 1;
if (max_conn - j > 0 &&
srv_migrate_conns_to_remove(&srv->safe_conns[i], &idle_conns[i].toremove_conns, max_conn - j) > 0)
did_remove = 1;
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
if (did_remove)
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index db5b019..2ca47d5 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -5808,9 +5808,9 @@
int conn_in_list;
int ret = 0;
- HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (tl->context == NULL) {
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
tasklet_free(tl);
return NULL;
}
@@ -5818,7 +5818,7 @@
conn_in_list = conn->flags & CO_FL_LIST_MASK;
if (conn_in_list)
MT_LIST_DEL(&conn->list);
- HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* First if we're doing an handshake, try that */
if (ctx->conn->flags & CO_FL_SSL_WAIT_HS)
ssl_sock_handshake(ctx->conn, CO_FL_SSL_WAIT_HS);
@@ -5866,10 +5866,12 @@
if (!ret && conn_in_list) {
struct server *srv = objt_server(conn->target);
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;
}