Merge branch 'abort-close' into merge
diff --git a/ROADMAP b/ROADMAP
index 9c0c95f..c960c9b 100644
--- a/ROADMAP
+++ b/ROADMAP
@@ -22,7 +22,7 @@
 
  - separate timeout controls
 
- - option 'abortonclose' : if the session is queued or being connecting
+ + option 'abortonclose' : if the session is queued or being connecting
    to the server, and the client sends a shutdown(), then decide to abort
    the session early because in most situations, this will be caused by
    a client hitting the 'Stop' button, so there's no reason to overload
@@ -30,7 +30,7 @@
    and might cause little trouble to some very specific clients used to
    close immediately after sending the request (no support for KA, which ones?)
 
- - minconn : makes the server's maxconn dynamic, which will be computed as a
+ + minconn : makes the server's maxconn dynamic, which will be computed as a
    ratio of the proxy's sessions :
      srv->effective_maxconn =
           max(srv->maxconn * px->nbsess / px->maxconn, srv->minconn)
diff --git a/doc/haproxy-en.txt b/doc/haproxy-en.txt
index 27ac4f0..4f4dec3 100644
--- a/doc/haproxy-en.txt
+++ b/doc/haproxy-en.txt
@@ -1218,6 +1218,41 @@
         server web-backup1 192.168.2.1:80 cookie s4 check maxconn 200 backup
         server web-excuse 192.168.3.1:80 check backup
 
+
+This was so much efficient at reducing the server's response time that some
+users wanted to use low values to improve their server's performance. However,
+they were not able anymore to handle very large loads because it was not
+possible anymore to saturate the servers. For this reason, version 1.2.14 has
+brought dynamic limitation with the addition of the parameter 'minconn'. When
+this parameter is set along with maxconn, it will enable dynamic limitation
+based on the instance's load. The maximum number of concurrent sessions on a
+server will be proportionnal to the number of sessions on the instance relative
+to its maxconn. A minimum of <minconn> will be allowed whatever the load. This
+will ensure that servers will perform at their best level under normal loads,
+while still handling surges when needed. The dynamic limit is computed like
+this :
+
+    srv.dyn_limit = max(srv.minconn, srv.maxconn * inst.sess / inst.maxconn)
+
+Example :
+---------
+    # be nice with P3 which only has 256 MB of RAM.
+    listen web_appl 0.0.0.0:80
+        maxconn 10000
+        mode http
+        cookie SERVERID insert nocache indirect
+        balance roundrobin
+        server pentium3-800 192.168.1.1:80 cookie s1 weight  8 minconn 10 maxconn 100 check
+        server opteron-2.0G 192.168.1.2:80 cookie s2 weight 20 minconn 30 maxconn 300 check
+        server opteron-2.4G 192.168.1.3:80 cookie s3 weight 24 minconn 30 maxconn 300 check
+        server web-backup1 192.168.2.1:80 cookie s4 check maxconn 200 backup
+        server web-excuse 192.168.3.1:80 check backup
+
+In the example above, the server 'pentium3-800' will receive at most 100
+simultaneous sessions when the proxy instance will reach 10000 sessions, and
+will receive only 10 simultaneous sessions when the proxy will be under 1000
+sessions.
+
 Notes :
 -------
   - The requests will not stay indefinitely in the queue, they follow the
@@ -1225,10 +1260,54 @@
     timeout because the server is saturated or because the queue is filled,
     the session will expire with a 503 error.
 
+  - if only <minconn> is specified, it has the same effect as <maxconn>
+
   - setting too low values for maxconn might improve performance but might also
     allow slow users to block access to the server for other users.
 
 
+3.5) Dropping aborted requests
+------------------------------
+In presence of very high loads, the servers will take some time to respond. The
+per-proxy's connection queue will inflate, and the response time will increase
+respective to the size of the queue times the average per-session response
+time. When clients will wait for more than a few seconds, they will often hit
+the 'STOP' button on their browser, leaving a useless request in the queue, and
+slowing down other users.
+
+As there is no way to distinguish between a full STOP and a simple
+shutdown(SHUT_WR) on the client side, HTTP agents should be conservative and
+consider that the client might only have closed its output channel while
+waiting for the response. However, this introduces risks of congestion when
+lots of users do the same, and is completely useless nowadays because probably
+no client at all will close the session while waiting for the response. Some
+HTTP agents support this (Squid, Apache, HAProxy), and others do not (TUX, most
+hardware-based load balancers). So the probability for a closed input channel
+to represent a user hitting the 'STOP' button is close to 100%, and it is very
+tempting to be able to abort the session early without polluting the servers.
+
+For this reason, a new option "abortonclose" was introduced in version 1.2.14.
+By default (without the option) the behaviour is HTTP-compliant. But when the
+option is specified, a session with an incoming channel closed will be aborted
+if it's still possible, which means that it's either waiting for a connect() to
+establish or it is queued waiting for a connection slot. This considerably
+reduces the queue size and the load on saturated servers when users are tempted
+to click on STOP, which in turn reduces the response time for other users.
+
+Example :
+---------
+    listen web_appl 0.0.0.0:80
+        maxconn 10000
+        mode http
+        cookie SERVERID insert nocache indirect
+        balance roundrobin
+        server web1 192.168.1.1:80 cookie s1 weight 10 maxconn 100 check
+        server web2 192.168.1.2:80 cookie s2 weight 10 maxconn 100 check
+        server web3 192.168.1.3:80 cookie s3 weight 10 maxconn 100 check
+        server bck1 192.168.2.1:80 cookie s4 check maxconn 200 backup
+        option abortonclose
+
+
 4) Additionnal features
 =======================
 
diff --git a/haproxy.c b/haproxy.c
index 5e08965..586978a 100644
--- a/haproxy.c
+++ b/haproxy.c
@@ -379,6 +379,7 @@
 #define PR_O_FORCE_CLO	0x00200000	/* enforce the connection close immediately after server response */
 #define PR_O_BALANCE_SH	0x00400000	/* balance on source IP hash */
 #define PR_O_BALANCE	(PR_O_BALANCE_RR | PR_O_BALANCE_SH)
+#define PR_O_ABRT_CLOSE	0x00800000	/* immediately abort request when client closes */
 
 /* various session flags, bits values 0x01 to 0x20 (shift 0) */
 #define SN_DIRECT	0x00000001	/* connection made on the server matching the client cookie */
@@ -586,7 +587,7 @@
     unsigned int wscore;		/* weight score, used during srv map computation */
     int cur_sess, cur_sess_max;		/* number of currently active sessions (including syn_sent) */
     unsigned int cum_sess;		/* cumulated number of sessions really sent to this server */
-    unsigned int maxconn;		/* max # of active sessions. 0 = unlimited. */
+    unsigned int maxconn, minconn;	/* max # of active sessions (0 = unlimited), min# for dynamic limit. */
     unsigned failed_checks, down_trans;	/* failed checks and up-down transitions */
     unsigned failed_conns, failed_resp;	/* failed connect() and responses */
     unsigned failed_secu;		/* blocked responses because of security concerns */
@@ -694,9 +695,9 @@
     struct list pendconns;		/* pending connections with no server assigned yet */
     int nbpend, nbpend_max;		/* number of pending connections with no server assigned yet */
     int totpend;			/* total number of pending connections on this instance (for stats) */
-    int nbconn, nbconn_max;		/* # of active sessions */
+    unsigned int nbconn, nbconn_max;	/* # of active sessions */
     unsigned int cum_conn;		/* cumulated number of processed sessions */
-    int maxconn;			/* max # of active sessions */
+    unsigned int maxconn;		/* max # of active sessions */
     unsigned failed_conns, failed_resp;	/* failed connect() and responses */
     unsigned failed_secu;		/* blocked responses because of security concerns */
     int conn_retries;			/* maximum number of connect retries */
@@ -1982,12 +1983,22 @@
     return p;
 }
 
+/* returns the effective dynamic maxconn for a server, considering the minconn
+ * and the proxy's usage relative to its saturation.
+ */
+static unsigned int srv_dynamic_maxconn(struct server *s) {
+    return s->minconn ? 
+	((s->maxconn * s->proxy->nbconn / s->proxy->maxconn) < s->minconn) ? s->minconn :
+	(s->maxconn * s->proxy->nbconn / s->proxy->maxconn) : s->maxconn;
+}
+
 /* returns 0 if nothing has to be done for server <s> regarding queued connections,
  * and non-zero otherwise. Suited for and if/else usage.
  */
 static inline int may_dequeue_tasks(struct server *s, struct proxy *p) {
     return (s && (s->nbpend || p->nbpend) &&
-	    s->maxconn && s->cur_sess < s->maxconn && s->queue_mgt);
+	    (!s->maxconn || s->cur_sess < srv_dynamic_maxconn(s)) &&
+	    s->queue_mgt);
 }
 
 
@@ -2156,7 +2167,7 @@
 
     do {
 	srv = px->srv_map[newidx++];
-	if (!srv->maxconn || srv->cur_sess < srv->maxconn) {
+	if (!srv->maxconn || srv->cur_sess < srv_dynamic_maxconn(srv)) {
 	    px->srv_rr_idx = newidx;
 	    return srv;
 	}
@@ -2348,7 +2359,7 @@
 	 * is not needed.
 	 */
 	if (s->srv &&
-	    s->srv->maxconn && s->srv->cur_sess >= s->srv->maxconn) {
+	    s->srv->maxconn && s->srv->cur_sess >= srv_dynamic_maxconn(s->srv)) {
 	    p = pendconn_add(s);
 	    if (p)
 		return SRV_STATUS_QUEUED;
@@ -2363,8 +2374,8 @@
     switch (err) {
     case SRV_STATUS_OK:
 	/* in balance mode, we might have servers with connection limits */
-	if (s->srv != NULL &&
-	    s->srv->maxconn && s->srv->cur_sess >= s->srv->maxconn) {
+	if (s->srv &&
+	    s->srv->maxconn && s->srv->cur_sess >= srv_dynamic_maxconn(s->srv)) {
 	    p = pendconn_add(s);
 	    if (p)
 		return SRV_STATUS_QUEUED;
@@ -5399,9 +5410,9 @@
     if (s == SV_STIDLE) {
 	if (c == CL_STHEADERS)
 	    return 0;	/* stay in idle, waiting for data to reach the client side */
-	else if (c == CL_STCLOSE ||
-		 c == CL_STSHUTW ||
-		 (c == CL_STSHUTR && t->req->l == 0)) { /* give up */
+	else if (c == CL_STCLOSE || c == CL_STSHUTW ||
+		 (c == CL_STSHUTR &&
+		  (t->req->l == 0 || t->proxy->options & PR_O_ABRT_CLOSE))) { /* give up */
 	    tv_eternity(&t->cnexpire);
 	    if (t->pend_pos)
 		t->logs.t_queue = tv_diff(&t->logs.tv_accept, &now);
@@ -5451,6 +5462,20 @@
 	}
     }
     else if (s == SV_STCONN) { /* connection in progress */
+	if (c == CL_STCLOSE || c == CL_STSHUTW ||
+	    (c == CL_STSHUTR &&
+	     (t->req->l == 0 || t->proxy->options & PR_O_ABRT_CLOSE))) { /* give up */
+	    tv_eternity(&t->cnexpire);
+	    fd_delete(t->srv_fd);
+	    if (t->srv)
+		t->srv->cur_sess--;
+
+	    /* note that this must not return any error because it would be able to
+	     * overwrite the client_retnclose() output.
+	     */
+	    srv_close_with_err(t, SN_ERR_CLICL, SN_FINST_C, 0, 0, NULL);
+	    return 1;
+	}
 	if (t->res_sw == RES_SILENT && tv_cmp2_ms(&t->cnexpire, &now) > 0) {
 	    //fprintf(stderr,"1: c=%d, s=%d, now=%d.%06d, exp=%d.%06d\n", c, s, now.tv_sec, now.tv_usec, t->cnexpire.tv_sec, t->cnexpire.tv_usec);
 	    return 0; /* nothing changed */
@@ -6722,7 +6747,7 @@
 		    /* check if we can handle some connections queued at the proxy. We
 		     * will take as many as we can handle.
 		     */
-		    for (xferred = 0; !s->maxconn || xferred < s->maxconn; xferred++) {
+		    for (xferred = 0; !s->maxconn || xferred < srv_dynamic_maxconn(s); xferred++) {
 			struct session *sess;
 			struct pendconn *p;
 
@@ -6795,7 +6820,7 @@
     /* First, check if we can handle some connections queued at the proxy. We
      * will take as many as we can handle.
      */
-    for (xferred = 0; s->cur_sess + xferred < s->maxconn; xferred++) {
+    for (xferred = 0; s->cur_sess + xferred < srv_dynamic_maxconn(s); xferred++) {
 	struct session *sess;
 
 	sess = pendconn_get_next_sess(s, p);
@@ -8317,6 +8342,9 @@
 	else if (!strcmp(args[1], "logasap"))
 	    /* log as soon as possible, without waiting for the session to complete */
 	    curproxy->options |= PR_O_LOGASAP;
+	else if (!strcmp(args[1], "abortonclose"))
+	    /* abort connection if client closes during queue or connect() */
+	    curproxy->options |= PR_O_ABRT_CLOSE;
 	else if (!strcmp(args[1], "httpclose"))
 	    /* force connection: close in both directions in HTTP mode */
 	    curproxy->options |= PR_O_HTTP_CLOSE;
@@ -8538,6 +8566,10 @@
 		newsrv->uweight = w - 1;
 		cur_arg += 2;
 	    }
+	    else if (!strcmp(args[cur_arg], "minconn")) {
+		newsrv->minconn = atol(args[cur_arg + 1]);
+		cur_arg += 2;
+	    }
 	    else if (!strcmp(args[cur_arg], "maxconn")) {
 		newsrv->maxconn = atol(args[cur_arg + 1]);
 		cur_arg += 2;
@@ -8558,7 +8590,7 @@
 		cur_arg += 2;
 	    }
 	    else {
-		Alert("parsing [%s:%d] : server %s only supports options 'backup', 'cookie', 'check', 'inter', 'rise', 'fall', 'port', 'source', and 'weight'.\n",
+		Alert("parsing [%s:%d] : server %s only supports options 'backup', 'cookie', 'check', 'inter', 'rise', 'fall', 'port', 'source', 'minconn', 'maxconn' and 'weight'.\n",
 		      file, linenum, newsrv->id);
 		return -1;
 	    }
@@ -9433,6 +9465,12 @@
 	 */
 	newsrv = curproxy->srv;
 	while (newsrv != NULL) {
+	    if (newsrv->minconn && !newsrv->maxconn) {
+		/* only 'minconn' was specified. Let's turn this into maxconn */
+		newsrv->maxconn = newsrv->minconn;
+		newsrv->minconn = 0;
+	    }
+
 	    if (newsrv->maxconn > 0) {
 		struct task *t;