REGTESTS: http_request_buffer: Increase client timeout to wait "slow" clients

The default client timeout is too small to be sure to always wait end of
slow clients (the last 2 clients use a delay to send their request). But it
cannot be increased because it will slow down the regtest execution. So a
dedicated frontend with a higher client timeout has been added. This
frontend is used by "slow" clients. The other one is used for normal
requests.

(cherry picked from commit 33a2745c8738f2b163196d88f23188166a579643)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
(cherry picked from commit e0df0fd07e866af6e80d33743199a92ab401de53)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
(cherry picked from commit 202e79852e87f97a8485435b36ecf2c232842f18)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/reg-tests/http-messaging/http_request_buffer.vtc b/reg-tests/http-messaging/http_request_buffer.vtc
index 4fd7bb2..f1cec1a 100644
--- a/reg-tests/http-messaging/http_request_buffer.vtc
+++ b/reg-tests/http-messaging/http_request_buffer.vtc
@@ -27,9 +27,9 @@
 	recv
 	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"GET / HTTP/1\\.1\""
 	recv
-	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"POST /1 HTTP/1\\.1\""
+	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"POST /1 HTTP/1\\.1\""
 	recv
-	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/<NOSRV> [0-9]*/-1/-1/-1/[0-9]* -1 .* - - CR-- .* .* \"POST /2 HTTP/1\\.1\""
+	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be1/<NOSRV> [0-9]*/-1/-1/-1/[0-9]* -1 .* - - CR-- .* .* \"POST /2 HTTP/1\\.1\""
 } -start
 
 haproxy h1 -conf {
@@ -49,6 +49,14 @@
 		log ${S_addr}:${S_port} local0 debug err
 		bind "fd@${fe1}"
 		use_backend be1
+
+	frontend fe2
+	        timeout client 10s
+		option httplog
+		option http-buffer-request
+		log ${S_addr}:${S_port} local0 debug err
+		bind "fd@${fe2}"
+		use_backend be1
 } -start
 
 # 1 byte of the payload is missing.
@@ -91,7 +99,7 @@
 # Payload is fully sent in 2 steps (with a small delay, smaller than the client
 # timeout) and splitted on a chunk size.
 #   ==> Request must be sent to the server. A 200 must be received
-client c3 -connect ${h1_fe1_sock} {
+client c3 -connect ${h1_fe2_sock} {
 	send "POST /1  HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\n1\r\n1"
 	delay 0.01
 	send "\r\n1\r\n0\r\n\r\n"
@@ -103,7 +111,7 @@
 # (with a small delay, smaller than the client timeout) and splitted on a chunk
 # size. The client aborts before sending the last CRLF.
 #   ==> Request must be handled as an error with 'CR--' termination state.
-client c4 -connect ${h1_fe1_sock} {
+client c4 -connect ${h1_fe2_sock} {
 	send "POST /2  HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\n1\r\n1"
 	delay 0.01
 	send "\r\n1\r\n0\r\n"