MEDIUM: connections: Add a way to control the number of idling connections.

As by default we add all keepalive connections to the idle pool, if we run
into a pathological case, where all client don't do keepalive, but the server
does, and haproxy is configured to only reuse "safe" connections, we will
soon find ourself having lots of idling, unusable for new sessions, connections,
while we won't have any file descriptors available to create new connections.

To fix this, add 2 new global settings, "pool_low_ratio" and "pool_high_ratio".
pool-low-fd-ratio  is the % of fds we're allowed to use (against the maximum
number of fds available to haproxy) before we stop adding connections to the
idle pool, and destroy them instead. The default is 20. pool-high-fd-ratio is
the % of fds we're allowed to use (against the maximum number of fds available
to haproxy) before we start killing idling connection in the event we have to
create a new outgoing connection, and no reuse is possible. The default is 25.
diff --git a/doc/configuration.txt b/doc/configuration.txt
index 357a67e..c07961a 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -1673,6 +1673,21 @@
   performed. This has an impact on the kernel's memory footprint, so this must
   not be changed if impacts are not understood.
 
+tune.pool-low-fd-ratio <number>
+  This setting sets the max number of file descriptors (in percentage) used by
+  haproxy globally against the maximum number of file descriptors haproxy can
+  use before we stop putting connection into the idle pool for reuse. The
+  default is 20.
+
+tune.pool-high-fd-ratio <number>
+  This setting sets the max number of file descriptors (in percentage) used by
+  haproxy globally against the maximum number of file descriptors haproxy can
+  use before we start killing idle connections when we can't reuse a connection
+  and we have to create a new one. The default is 25 (one quarter of the file
+  descriptor will mean that roughly half of the maximum front connections can
+  keep an idle connection behind, anything beyond this probably doesn't make
+  much sense in the general case when targetting connection reuse).
+
 tune.rcvbuf.client <number>
 tune.rcvbuf.server <number>
   Forces the kernel socket receive buffer size on the client or the server side
diff --git a/include/proto/server.h b/include/proto/server.h
index 77a5312..d0b925c 100644
--- a/include/proto/server.h
+++ b/include/proto/server.h
@@ -251,7 +251,8 @@
 	    (srv->max_idle_conns == -1 || srv->max_idle_conns > srv->curr_idle_conns) &&
 	    !(conn->flags & CO_FL_PRIVATE) &&
 	    ((srv->proxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
-	    !conn->mux->used_streams(conn) && conn->mux->avail_streams(conn)) {
+	    !conn->mux->used_streams(conn) && conn->mux->avail_streams(conn) &&
+	    ha_used_fds < global.tune.pool_low_count) {
 		int retadd;
 
 		retadd = _HA_ATOMIC_ADD(&srv->curr_idle_conns, 1);
diff --git a/include/types/global.h b/include/types/global.h
index f1db95e..ba3738b 100644
--- a/include/types/global.h
+++ b/include/types/global.h
@@ -161,6 +161,10 @@
 		int pattern_cache; /* max number of entries in the pattern cache. */
 		int sslcachesize;  /* SSL cache size in session, defaults to 20000 */
 		int comp_maxlevel;    /* max HTTP compression level */
+		int pool_low_ratio;   /* max ratio of FDs used before we stop using new idle connections */
+		int pool_high_ratio;  /* max ratio of FDs used before we start killing idle connections when creating new connections */
+		int pool_low_count;   /* max number of opened fd before we stop using new idle connections */
+		int pool_high_count;  /* max number of opened fd before we start killing idle connections when creating new connections */
 		unsigned short idle_timer; /* how long before an empty buffer is considered idle (ms) */
 	} tune;
 	struct {
diff --git a/src/backend.c b/src/backend.c
index abe7308..40ce949 100644
--- a/src/backend.c
+++ b/src/backend.c
@@ -1338,6 +1338,39 @@
 				reuse = 0;
 		}
 	}
+	if ((!reuse || (srv_conn && !(srv_conn->flags & CO_FL_CONNECTED)))
+	    && ha_used_fds > global.tune.pool_high_count) {
+		struct connection *tokill_conn;
+
+		/* We can't reuse a connection, and e have more FDs than deemd
+		 * acceptable, attempt to kill an idling connection
+		 */
+		/* First, try from our own idle list */
+		tokill_conn = LIST_POP_LOCKED(&srv->idle_orphan_conns[tid],
+		    struct connection *, list);
+		if (tokill_conn)
+			tokill_conn->mux->destroy(tokill_conn->ctx);
+		/* If not, iterate over other thread's idling pool, and try to grab one */
+		else {
+			int i;
+
+			for (i = 0; i < global.nbthread; i++) {
+				if (i == tid)
+					continue;
+				tokill_conn = LIST_POP_LOCKED(&srv->idle_orphan_conns[i],
+				    struct connection *, list);
+				if (tokill_conn) {
+					/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
+
+					LIST_ADDQ_LOCKED(&toremove_connections[i],
+					    &tokill_conn->list);
+					task_wakeup(idle_conn_cleanup[i], TASK_WOKEN_OTHER);
+					break;
+				}
+			}
+		}
+
+	}
 	/* If we're really reusing the connection, remove it from the orphan
 	 * list and add it back to the idle list.
 	 */
diff --git a/src/haproxy.c b/src/haproxy.c
index fb6481b..4c5c839 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -164,6 +164,8 @@
 		.chksize = (BUFSIZE + 2*sizeof(void *) - 1) & -(2*sizeof(void *)),
 		.reserved_bufs = RESERVED_BUFS,
 		.pattern_cache = DEFAULT_PAT_LRU_SIZE,
+		.pool_low_ratio  = 20,
+		.pool_high_ratio = 25,
 #ifdef USE_OPENSSL
 		.sslcachesize = SSLCACHESIZE,
 #endif
@@ -1937,6 +1939,10 @@
 		global.maxsock += global.maxconn * sides * global.ssl_used_async_engines;
 	}
 
+	/* update connection pool thresholds */
+	global.tune.pool_low_count  = ((long long)global.maxsock * global.tune.pool_low_ratio  + 99) / 100;
+	global.tune.pool_high_count = ((long long)global.maxsock * global.tune.pool_high_ratio + 99) / 100;
+
 	proxy_adjust_all_maxconn();
 
 	if (global.tune.maxpollevents <= 0)
diff --git a/src/server.c b/src/server.c
index d2473c5..12a14ad 100644
--- a/src/server.c
+++ b/src/server.c
@@ -5390,6 +5390,41 @@
 
 	return task;
 }
+
+/* config parser for global "tune.pool-{low,high}-fd-ratio" */
+static int cfg_parse_pool_fd_ratio(char **args, int section_type, struct proxy *curpx,
+                                   struct proxy *defpx, const char *file, int line,
+                                   char **err)
+{
+	int arg = -1;
+
+	if (too_many_args(1, args, err, NULL))
+		return -1;
+
+	if (*(args[1]) != 0)
+		arg = atoi(args[1]);
+
+	if (arg < 0 || arg > 100) {
+		memprintf(err, "'%s' expects an integer argument between 0 and 100.", args[0]);
+		return -1;
+	}
+
+	if (args[0][10] == 'h')
+		global.tune.pool_high_ratio = arg;
+	else
+		global.tune.pool_low_ratio = arg;
+	return 0;
+}
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+	{ CFG_GLOBAL, "tune.pool-high-fd-ratio",     cfg_parse_pool_fd_ratio },
+	{ CFG_GLOBAL, "tune.pool-low-fd-ratio",      cfg_parse_pool_fd_ratio },
+	{ 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
 /*
  * Local variables:
  *  c-indent-level: 8