MINOR: server/idle: make the next_takeover index per-tgroup

In order to evenly pick idle connections from other threads, there is
a "next_takeover" index in the server, that is incremented each time
a connection is picked from another thread, and indicates which one to
start from next time.

With thread groups this doesn't work well because the index is the same
regardless of the group, and if a group has more threads than another,
there's even a risk to reintroduce an imbalance.

This patch introduces a new per-tgroup storage in servers which, for now,
only contains an instance of this next_takeover index. This way each
thread will now only manipulate the index specific to its own group, and
the takeover will become fair again. More entries may come soon.
diff --git a/include/haproxy/server-t.h b/include/haproxy/server-t.h
index ff5faae..1ff7d18 100644
--- a/include/haproxy/server-t.h
+++ b/include/haproxy/server-t.h
@@ -212,6 +212,11 @@
 	struct eb_root avail_conns;             /* Connections in use, but with still new streams available */
 };
 
+/* Each server will have one occurrence of this structure per thread group */
+struct srv_per_tgroup {
+	unsigned int next_takeover;             /* thread ID to try to steal connections from next time */
+};
+
 /* Configure the protocol selection for websocket */
 enum __attribute__((__packed__)) srv_ws_mode {
 	SRV_WS_AUTO = 0,
@@ -239,6 +244,7 @@
 	const struct mux_proto_list *mux_proto;       /* the mux to use for all outgoing connections (specified by the "proto" keyword) */
 	unsigned maxconn, minconn;		/* max # of active sessions (0 = unlimited), min# for dynamic limit. */
 	struct srv_per_thread *per_thr;         /* array of per-thread stuff such as connections lists */
+	struct srv_per_tgroup *per_tgrp;        /* array of per-tgroup stuff such as idle conns */
 	unsigned int *curr_idle_thr;            /* Current number of orphan idling connections per thread */
 
 	unsigned int pool_purge_delay;          /* Delay before starting to purge the idle conns pool */
@@ -282,7 +288,6 @@
 	unsigned int curr_used_conns;           /* Current number of used connections */
 	unsigned int max_used_conns;            /* Max number of used connections (the counter is reset at each connection purges */
 	unsigned int est_need_conns;            /* Estimate on the number of needed connections (max of curr and previous max_used) */
-	unsigned int next_takeover;             /* thread ID to try to steal connections from next time */
 
 	struct queue queue;			/* pending connections */
 
diff --git a/src/backend.c b/src/backend.c
index 4b1ce5c..bac8a08 100644
--- a/src/backend.c
+++ b/src/backend.c
@@ -1201,7 +1201,7 @@
 	/* Lookup all other threads for an idle connection, starting from last
 	 * unvisited thread, but always staying in the same group.
 	 */
-	stop = srv->next_takeover;
+	stop = srv->per_tgrp[tgid - 1].next_takeover;
 	if (stop >= tg->count)
 		stop %= tg->count;
 
@@ -1246,7 +1246,7 @@
 		conn = NULL;
  done:
 	if (conn) {
-		_HA_ATOMIC_STORE(&srv->next_takeover, (i + 1 == tg->base + tg->count) ? tg->base : i + 1);
+		_HA_ATOMIC_STORE(&srv->per_tgrp[tgid - 1].next_takeover, (i + 1 == tg->base + tg->count) ? tg->base : i + 1);
 
 		srv_use_conn(srv, conn);
 
diff --git a/src/server.c b/src/server.c
index 699113e..51ca0cd 100644
--- a/src/server.c
+++ b/src/server.c
@@ -2381,6 +2381,7 @@
 	free(srv->hostname_dn);
 	free((char*)srv->conf.file);
 	free(srv->per_thr);
+	free(srv->per_tgrp);
 	free(srv->curr_idle_thr);
 	free(srv->resolvers_id);
 	free(srv->addr_node.key);
@@ -4650,7 +4651,8 @@
 	int i;
 
 	srv->per_thr = calloc(global.nbthread, sizeof(*srv->per_thr));
-	if (!srv->per_thr)
+	srv->per_tgrp = calloc(global.nbtgroups, sizeof(*srv->per_tgrp));
+	if (!srv->per_thr || !srv->per_tgrp)
 		return -1;
 
 	for (i = 0; i < global.nbthread; i++) {