REGTESTS: http_request_buffer: Increase client timeout to wait "slow" clients

The default client timeout is too small to be sure to always wait end of
slow clients (the last 2 clients use a delay to send their request). But it
cannot be increased because it will slow down the regtest execution. So a
dedicated frontend with a higher client timeout has been added. This
frontend is used by "slow" clients. The other one is used for normal
requests.
diff --git a/reg-tests/http-messaging/http_request_buffer.vtc b/reg-tests/http-messaging/http_request_buffer.vtc
index 35cca28..a45d155 100644
--- a/reg-tests/http-messaging/http_request_buffer.vtc
+++ b/reg-tests/http-messaging/http_request_buffer.vtc
@@ -26,9 +26,9 @@
 	recv
 	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"GET / HTTP/1\\.1\""
 	recv
-	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"POST /1 HTTP/1\\.1\""
+	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"POST /1 HTTP/1\\.1\""
 	recv
-	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/<NOSRV> [0-9]*/-1/-1/-1/[0-9]* -1 .* - - CR-- .* .* \"POST /2 HTTP/1\\.1\""
+	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be1/<NOSRV> [0-9]*/-1/-1/-1/[0-9]* -1 .* - - CR-- .* .* \"POST /2 HTTP/1\\.1\""
 } -start
 
 haproxy h1 -conf {
@@ -47,6 +47,14 @@
 		log ${S_addr}:${S_port} local0 debug err
 		bind "fd@${fe1}"
 		use_backend be1
+
+	frontend fe2
+	        timeout client 10s
+		option httplog
+		option http-buffer-request
+		log ${S_addr}:${S_port} local0 debug err
+		bind "fd@${fe2}"
+		use_backend be1
 } -start
 
 # 1 byte of the payload is missing.
@@ -89,7 +97,7 @@
 # Payload is fully sent in 2 steps (with a small delay, smaller than the client
 # timeout) and split on a chunk size.
 #   ==> Request must be sent to the server. A 200 must be received
-client c3 -connect ${h1_fe1_sock} {
+client c3 -connect ${h1_fe2_sock} {
 	send "POST /1  HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\n1\r\n1"
 	delay 0.01
 	send "\r\n1\r\n0\r\n\r\n"
@@ -101,7 +109,7 @@
 # (with a small delay, smaller than the client timeout) and split on a chunk
 # size. The client aborts before sending the last CRLF.
 #   ==> Request must be handled as an error with 'CR--' termination state.
-client c4 -connect ${h1_fe1_sock} {
+client c4 -connect ${h1_fe2_sock} {
 	send "POST /2  HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\n1\r\n1"
 	delay 0.01
 	send "\r\n1\r\n0\r\n"