BUG/MEDIUM: mux-h1: Refresh H1 connection timeout after a synchronous send

The H1 multiplexer is able to perform synchronous send. When a large body is
transfer, if nothing is received and if no error or shutdown occurs, it is
possible to not go down at the H1 connection level to do I/O for a long
time. When this happens, we must still take care to refresh the H1 connection
timeout. Otherwise it is possible to hit the connection timeout during the
transfer while it should not expire.

This bug exists because only h1_process() refresh the H1 connection timeout. To
fix the bug, h1_snd_buf() must also refresh this timeout. To make things more
readable, a dedicated function has been introduced and called to refresh the
timeout.

This bug exists on all HTX versions. But it is harder to hit it on 2.1 and below
because when a H1 mux is initialized, we actively try to read data instead of
subscribing for receiving. So there is at least one call to h1_process().

This patch should fix the issue #790. It must be backported as far as 2.0.
diff --git a/src/mux_h1.c b/src/mux_h1.c
index 5c68d09..280ed82 100644
--- a/src/mux_h1.c
+++ b/src/mux_h1.c
@@ -456,7 +456,23 @@
 	return 1 - h1_used_streams(conn);
 }
 
-
+/* Refresh the h1c task timeout if necessary */
+static void h1_refresh_timeout(struct h1c *h1c)
+{
+	if (h1c->task) {
+		h1c->task->expire = TICK_ETERNITY;
+		if ((!h1c->h1s && !conn_is_back(h1c->conn)) || b_data(&h1c->obuf)) {
+			/* front connections waiting for a stream, as well as any connection with
+			 * pending data, need a timeout.
+			 */
+			h1c->task->expire = tick_add(now_ms, ((h1c->flags & (H1C_F_CS_SHUTW_NOW|H1C_F_CS_SHUTDOWN))
+							      ? h1c->shut_timeout
+							      : h1c->timeout));
+			task_queue(h1c->task);
+			TRACE_DEVEL("refreshing connection's timeout", H1_EV_H1C_SEND, h1c->conn);
+		}
+	}
+}
 /*****************************************************************/
 /* functions below are dedicated to the mux setup and management */
 /*****************************************************************/
@@ -2206,15 +2222,7 @@
 		h1s->cs->data_cb->wake(h1s->cs);
 	}
   end:
-	if (h1c->task) {
-		h1c->task->expire = TICK_ETERNITY;
-		if (b_data(&h1c->obuf)) {
-			h1c->task->expire = tick_add(now_ms, ((h1c->flags & (H1C_F_CS_SHUTW_NOW|H1C_F_CS_SHUTDOWN))
-							      ? h1c->shut_timeout
-							      : h1c->timeout));
-			task_queue(h1c->task);
-		}
-	}
+	h1_refresh_timeout(h1c);
 	TRACE_LEAVE(H1_EV_H1C_WAKE, conn);
 	return 0;
 
@@ -2507,19 +2515,7 @@
 			h1_process(h1c);
 		else
 			h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
-		if (h1c->task) {
-			h1c->task->expire = TICK_ETERNITY;
-			if ((!h1c->h1s && !conn_is_back(h1c->conn)) || b_data(&h1c->obuf)) {
-				/* front connections waiting for a stream, as well as any connection with
-				 * pending data, need a timeout.
-				 */
-				h1c->task->expire = tick_add(now_ms, ((h1c->flags & (H1C_F_CS_SHUTW_NOW|H1C_F_CS_SHUTDOWN))
-								      ? h1c->shut_timeout
-								      : h1c->timeout));
-				task_queue(h1c->task);
-				TRACE_DEVEL("refreshing connection's timeout", H1_EV_STRM_END, h1c->conn);
-			}
-		}
+		h1_refresh_timeout(h1c);
 	}
   end:
 	TRACE_LEAVE(H1_EV_STRM_END);
@@ -2757,7 +2753,7 @@
 		if ((h1c->wait_event.events & SUB_RETRY_SEND) || !h1_send(h1c))
 			break;
 	}
-
+	h1_refresh_timeout(h1c);
 	TRACE_LEAVE(H1_EV_STRM_SEND, h1c->conn, h1s,, (size_t[]){total});
 	return total;
 }