Merge tag 'v2.4.24'

HAProxy 2.4.24
diff --git a/.cirrus.yml b/.cirrus.yml
index e754f83..f762e0a 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -1,7 +1,7 @@
 FreeBSD_task:
   freebsd_instance:
     matrix:
-      image_family: freebsd-13-0
+      image_family: freebsd-13-2
   only_if: $CIRRUS_BRANCH =~ 'master|next'
   install_script:
     - pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua53 socat pcre
diff --git a/.github/matrix.py b/.github/matrix.py
index 8e7130f..e03358b 100644
--- a/.github/matrix.py
+++ b/.github/matrix.py
@@ -8,6 +8,9 @@
 
 import json
 import sys
+import urllib.request
+import re
+from os import environ
 
 if len(sys.argv) == 2:
     build_type = sys.argv[1]
@@ -29,28 +32,45 @@
 def clean_ssl(ssl):
     return ssl.replace("_VERSION", "").lower()
 
+def determine_latest_openssl(ssl):
+    openssl_tags = urllib.request.urlopen("https://api.github.com/repos/openssl/openssl/tags")
+    tags = json.loads(openssl_tags.read().decode('utf-8'))
+    latest_tag = ''
+    for tag in tags:
+        name = tag['name']
+        if "openssl-" in name:
+            if name > latest_tag:
+               latest_tag = name
+    return "OPENSSL_VERSION={}".format(latest_tag[8:])
+
+def determine_latest_libressl(ssl):
+    libressl_download_list = urllib.request.urlopen(
+        "https://cdn.openbsd.org/pub/OpenBSD/LibreSSL/"
+    )
+    for line in libressl_download_list.readlines():
+        decoded_line = line.decode("utf-8")
+        if "libressl-" in decoded_line and ".tar.gz.asc" in decoded_line:
+             l = re.split("libressl-|.tar.gz.asc", decoded_line)[1]
+    return "LIBRESSL_VERSION={}".format(l)
 
 def clean_compression(compression):
     return compression.replace("USE_", "").lower()
 
 
 def get_asan_flags(cc):
-    if cc == "clang":
-        return [
-            "USE_OBSOLETE_LINKER=1",
-            'DEBUG_CFLAGS="-g -fsanitize=address"',
-            'LDFLAGS="-fsanitize=address"',
-            'CPU_CFLAGS.generic="-O1"',
-        ]
+    return [
+        "USE_OBSOLETE_LINKER=1",
+        'DEBUG_CFLAGS="-g -fsanitize=address"',
+        'LDFLAGS="-fsanitize=address"',
+        'CPU_CFLAGS.generic="-O1"',
+    ]
 
-    raise ValueError("ASAN is only supported for clang")
-
 
 matrix = []
 
 # Ubuntu
 
-os = "ubuntu-latest"
+os = "ubuntu-20.04"
 TARGET = "linux-glibc"
 for CC in ["gcc", "clang"]:
     matrix.append(
@@ -109,8 +129,7 @@
         "stock",
         "OPENSSL_VERSION=1.0.2u",
         "OPENSSL_VERSION=3.0.2",
-        "LIBRESSL_VERSION=2.9.2",
-        "LIBRESSL_VERSION=3.5.2",
+        "LIBRESSL_VERSION=3.5.3",
 #        "BORINGSSL=yes",
     ]:
         flags = ["USE_OPENSSL=1"]
@@ -121,6 +140,11 @@
         if ssl != "stock":
             flags.append("SSL_LIB=${HOME}/opt/lib")
             flags.append("SSL_INC=${HOME}/opt/include")
+        if "LIBRESSL" in ssl and "latest" in ssl:
+            ssl = determine_latest_libressl(ssl)
+        if "OPENSSL" in ssl and "latest" in ssl:
+            ssl = determine_latest_openssl(ssl)
+
         matrix.append(
             {
                 "name": "{}, {}, ssl={}".format(clean_os(os), CC, clean_ssl(ssl)),
@@ -134,38 +158,38 @@
 
 # ASAN
 
-os = "ubuntu-latest"
-CC = "clang"
+os = "ubuntu-20.04"
 TARGET = "linux-glibc"
-matrix.append(
-    {
-        "name": "{}, {}, ASAN, all features".format(clean_os(os), CC),
-        "os": os,
-        "TARGET": TARGET,
-        "CC": CC,
-        "FLAGS": get_asan_flags(CC)
-        + [
-            "USE_ZLIB=1",
-            "USE_OT=1",
-            "OT_INC=${HOME}/opt-ot/include",
-            "OT_LIB=${HOME}/opt-ot/lib",
-            "OT_RUNPATH=1",
-            "USE_PCRE=1",
-            "USE_PCRE_JIT=1",
-            "USE_LUA=1",
-            "USE_OPENSSL=1",
-            "USE_SYSTEMD=1",
-            "USE_WURFL=1",
-            "WURFL_INC=addons/wurfl/dummy",
-            "WURFL_LIB=addons/wurfl/dummy",
-            "USE_DEVICEATLAS=1",
-            "DEVICEATLAS_SRC=addons/deviceatlas/dummy",
-            "USE_PROMEX=1",
-            "USE_51DEGREES=1",
-            "51DEGREES_SRC=addons/51degrees/dummy/pattern",
-        ],
-    }
-)
+for CC in ["gcc","clang"]:
+    matrix.append(
+        {
+            "name": "{}, {}, ASAN, all features".format(clean_os(os), CC),
+            "os": os,
+            "TARGET": TARGET,
+            "CC": CC,
+            "FLAGS": get_asan_flags(CC)
+            + [
+                "USE_ZLIB=1",
+                "USE_OT=1",
+                "OT_INC=${HOME}/opt-ot/include",
+                "OT_LIB=${HOME}/opt-ot/lib",
+                "OT_RUNPATH=1",
+                "USE_PCRE=1",
+                "USE_PCRE_JIT=1",
+                "USE_LUA=1",
+                "USE_OPENSSL=1",
+                "USE_SYSTEMD=1",
+                "USE_WURFL=1",
+                "WURFL_INC=addons/wurfl/dummy",
+                "WURFL_LIB=addons/wurfl/dummy",
+                "USE_DEVICEATLAS=1",
+                "DEVICEATLAS_SRC=addons/deviceatlas/dummy",
+                "USE_PROMEX=1",
+                "USE_51DEGREES=1",
+                "51DEGREES_SRC=addons/51degrees/dummy/pattern",
+            ],
+        }
+    )
 
 # macOS
 
@@ -186,4 +210,6 @@
 
 print(json.dumps(matrix, indent=4, sort_keys=True))
 
-print("::set-output name=matrix::{}".format(json.dumps({"include": matrix})))
+if environ.get('GITHUB_OUTPUT') is not None:
+    with open(environ.get('GITHUB_OUTPUT'), 'a') as f:
+        print("matrix={}".format(json.dumps({"include": matrix})), file=f)
diff --git a/.github/workflows/compliance.yml b/.github/workflows/compliance.yml
index 66bc154..a11f0fa 100644
--- a/.github/workflows/compliance.yml
+++ b/.github/workflows/compliance.yml
@@ -24,7 +24,7 @@
         curl -fsSL https://github.com/summerwind/h2spec/releases/download/${H2SPEC_VERSION}/h2spec_linux_amd64.tar.gz -o h2spec.tar.gz
         tar xvf h2spec.tar.gz
         sudo install -m755 h2spec /usr/local/bin/h2spec
-        echo "::set-output name=version::${H2SPEC_VERSION}"
+        echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
     - name: Compile HAProxy with ${{ matrix.CC }}
       run: |
         make -j$(nproc) all \
@@ -47,7 +47,7 @@
         fi
         echo "::endgroup::"
         haproxy -vv
-        echo "::set-output name=version::$(haproxy -v |awk 'NR==1{print $3}')"
+        echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
     - name: Launch HAProxy ${{ steps.show-version.outputs.version }}
       run: haproxy -f .github/h2spec.config -D
     - name: Run h2spec ${{ steps.install-h2spec.outputs.version }}
diff --git a/.github/workflows/cross-zoo.yml b/.github/workflows/cross-zoo.yml
new file mode 100644
index 0000000..f2c8d7a
--- /dev/null
+++ b/.github/workflows/cross-zoo.yml
@@ -0,0 +1,110 @@
+#
+# this is naamed "zoo" after OpenSSL "cross zoo pipeline"
+#
+name: Cross Compile
+
+on:
+  schedule:
+    - cron: "0 0 21 * *"
+
+permissions:
+  contents: read
+
+jobs:
+  cross-compilation:
+    strategy:
+      matrix:
+        platform: [
+          {
+            arch: aarch64-linux-gnu,
+            libs: libc6-dev-arm64-cross,
+            target: linux-aarch64
+          }, {
+            arch: alpha-linux-gnu,
+            libs: libc6.1-dev-alpha-cross,
+            target: linux-alpha-gcc
+          }, {
+            arch: arm-linux-gnueabi,
+            libs: libc6-dev-armel-cross,
+            target: linux-armv4
+          }, {
+            arch: arm-linux-gnueabihf,
+            libs: libc6-dev-armhf-cross,
+            target: linux-armv4
+          }, {
+            arch: hppa-linux-gnu,
+            libs: libc6-dev-hppa-cross,
+            target: -static linux-generic32
+          }, {
+            arch: m68k-linux-gnu,
+            libs: libc6-dev-m68k-cross,
+            target: -static -m68040 linux-latomic
+          }, {
+            arch: mips-linux-gnu,
+            libs: libc6-dev-mips-cross,
+            target: -static linux-mips32
+          }, {
+            arch: mips64-linux-gnuabi64,
+            libs: libc6-dev-mips64-cross,
+            target: -static linux64-mips64
+          }, {
+            arch: mipsel-linux-gnu,
+            libs: libc6-dev-mipsel-cross,
+            target: linux-mips32
+          }, {
+            arch: powerpc64le-linux-gnu,
+            libs: libc6-dev-ppc64el-cross,
+            target: linux-ppc64le
+          }, {
+            arch: riscv64-linux-gnu,
+            libs: libc6-dev-riscv64-cross,
+            target: linux64-riscv64
+          }, {
+            arch: s390x-linux-gnu,
+            libs: libc6-dev-s390x-cross,
+            target: linux64-s390x
+          }, {
+            arch: sh4-linux-gnu,
+            libs: libc6-dev-sh4-cross,
+            target: no-async linux-latomic
+          }, {
+            arch: hppa-linux-gnu,
+            libs: libc6-dev-hppa-cross,
+            target: linux-generic32,
+          }, {
+            arch: m68k-linux-gnu,
+            libs: libc6-dev-m68k-cross,
+            target: -mcfv4e linux-latomic
+          }, {
+            arch: mips-linux-gnu,
+            libs: libc6-dev-mips-cross,
+            target: linux-mips32
+          }, {
+            arch: mips64-linux-gnuabi64,
+            libs: libc6-dev-mips64-cross,
+            target: linux64-mips64
+          }, {
+            arch: sparc64-linux-gnu,
+            libs: libc6-dev-sparc64-cross,
+            target: linux64-sparcv9
+          }
+        ]
+    runs-on: ubuntu-latest
+    steps:
+    - name: install packages
+      run: |
+        sudo apt-get update
+        sudo apt-get -yq --force-yes install \
+            gcc-${{ matrix.platform.arch }} \
+            ${{ matrix.platform.libs }}
+    - uses: actions/checkout@v3
+
+
+    - name: install quictls
+      run: |
+        QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS=yes scripts/build-ssl.sh
+
+    - name: Build
+      run: |
+        make ERR=1 CC=${{ matrix.platform.arch }}-gcc TARGET=linux-glibc USE_LIBCRYPT= USE_OPENSSL=1 USE_QUIC=1 USE_PROMEX=1 SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ADDLIB="-Wl,-rpath,${HOME}/opt/lib"
+
diff --git a/.github/workflows/vtest.yml b/.github/workflows/vtest.yml
index ea39662..0fdaca8 100644
--- a/.github/workflows/vtest.yml
+++ b/.github/workflows/vtest.yml
@@ -50,7 +50,7 @@
     - name: Generate cache key
       id: generate-cache-key
       run: |
-        echo "::set-output name=key::$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')"
+        echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" >> $GITHUB_OUTPUT
 
     - name: Cache SSL libs
       if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
@@ -114,6 +114,9 @@
       run: make -C addons/wurfl/dummy
     - name: Compile HAProxy with ${{ matrix.CC }}
       run: |
+        echo "::group::Show compiler's version"
+        echo | ${{ matrix.CC }} -v
+        echo "::endgroup::"
         echo "::group::Show platform specific defines"
         echo | ${{ matrix.CC }} -dM -xc -E -
         echo "::endgroup::"
@@ -138,7 +141,7 @@
         fi
         echo "::endgroup::"
         haproxy -vv
-        echo "::set-output name=version::$(haproxy -v |awk 'NR==1{print $3}')"
+        echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
     - name: Install problem matcher for VTest
       # This allows one to more easily see which tests fail.
       run: echo "::add-matcher::.github/vtest.json"
@@ -163,3 +166,4 @@
           sudo cat $asan
           echo "::endgroup::"
         done
+        exit 1
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
index 58283ff..022c7f6 100644
--- a/.github/workflows/windows.yml
+++ b/.github/workflows/windows.yml
@@ -56,9 +56,10 @@
           TARGET=${{ matrix.TARGET }} \
           CC=${{ matrix.CC }} \
           DEBUG="-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS -DDEBUG_POOL_INTEGRITY" \
+          DEBUG_CFLAGS="-g -Wno-deprecated-declarations" \
           ${{ join(matrix.FLAGS, ' ') }}
     - name: Show HAProxy version
       id: show-version
       run: |
         ./haproxy -vv
-        echo "::set-output name=version::$(./haproxy -v |awk 'NR==1{print $3}')"
+        echo "version=$(./haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
diff --git a/.gitignore b/.gitignore
index ec9dd62..62a92d1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -38,6 +38,7 @@
 *.rej
 *.orig
 *.bak
+*.sw[op]
 # And reject some specific files
 /admin/halog/halog
 /admin/dyncookie/dyncookie
diff --git a/CHANGELOG b/CHANGELOG
index 4bf6378..29e701e 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,6 +1,424 @@
 ChangeLog :
 ===========
 
+2023/08/19 : 2.4.24
+    - MINOR: proto_uxst: add resume method
+    - CLEANUP: listener: function comment typo in stop_listener()
+    - BUG/MINOR: listener: null pointer dereference suspected by coverity
+    - MINOR: listener/api: add lli hint to listener functions
+    - MINOR: listener: add relax_listener() function
+    - MINOR: listener: workaround for closing a tiny race between resume_listener() and stopping
+    - MINOR: listener: make sure we don't pause/resume bypassed listeners
+    - BUG/MEDIUM: listener: fix pause_listener() suspend return value handling
+    - BUG/MINOR: listener: fix resume_listener() resume return value handling
+    - BUG/MEDIUM: resume from LI_ASSIGNED in default_resume_listener()
+    - MINOR: listener: pause_listener() becomes suspend_listener()
+    - BUG/MEDIUM: listener/proxy: fix listeners notify for proxy resume
+    - MEDIUM: proto_ux: properly suspend named UNIX listeners
+    - MINOR: proto_ux: ability to dump ABNS names in error messages
+    - MINOR: lua: Add a function to get a reference on a table in the stack
+    - CLEANUP: Remove unused function hlua_get_top_error_string
+    - MINOR: hlua: add simple hlua reference handling API
+    - BUG/MINOR: hlua: fix reference leak in core.register_task()
+    - BUG/MINOR: hlua: fix reference leak in hlua_post_init_state()
+    - MINOR: hlua: simplify lua locking
+    - BUG/MEDIUM: hlua: prevent deadlocks with main lua lock
+    - BUG/MINOR: server: inherit from netns in srv_settings_cpy()
+    - BUG/MINOR: namespace: missing free in netns_sig_stop()
+    - BUG/MEDIUM: mworker: increase maxsock with each new worker
+    - DOC: Add tune.h2.max-frame-size option to table of contents
+    - BUILD: debug: avoid a build warning related to epoll_wait() in debug code
+    - BUG/MINOR: tcp_sample: bc_{dst,src} return IP not INT
+    - BUG/MINOR: cache: A 'max-age=0' cache-control directive can be overriden by a s-maxage
+    - BUG/MEDIUM: sink: invalid server list in sink_new_from_logsrv()
+    - BUG/MINOR: sink: missing sft free in sink_deinit()
+    - BUG/MINOR: ring: size warning incorrectly reported as fatal error
+    - BUG/MINOR: ring: maxlen warning reported as alert
+    - BUG/MINOR: log: LF upsets maxlen for UDP targets
+    - MINOR: sink/api: pass explicit maxlen parameter to sink_write()
+    - BUG/MEDIUM: log: improper use of logsrv->maxlen for buffer targets
+    - BUG/MINOR: log: fix missing name error message in cfg_parse_log_forward()
+    - BUG/MINOR: log: fix multiple error paths in cfg_parse_log_forward()
+    - BUG/MINOR: log: free errmsg on error in cfg_parse_log_forward()
+    - BUG/MINOR: sink: invalid sft free in sink_deinit()
+    - BUG/MINOR: sink: fix errors handling in cfg_post_parse_ring()
+    - BUG/MINOR: sink/log: properly deinit srv in sink_new_from_logsrv()
+    - BUG/MINOR: config: Remove final '\n' in error messages
+    - BUG/MINOR: hlua: hlua_yieldk ctx argument should support pointers
+    - BUG/MINOR: sample: Fix wrong overflow detection in add/sub conveters
+    - BUG/MINOR: http: Return the right reason for 302
+    - CI: explicitely highlight VTest result section if there's something
+    - BUG/MINOR: hlua: add check for lua_newstate
+    - BUG/MINOR: h1-htx: Return the right reason for 302 FCGI responses
+    - BUG/MEDIUM: listener: Acquire proxy's lock in relax_listener() if necessary
+    - DOC: configuration: describe Td in Timing events
+    - BUG/MINOR: chunk: fix chunk_appendf() to not write a zero if buffer is full
+    - BUG/MAJOR: http-ana: Get a fresh trash buffer for each header value replacement
+    - BUG/MAJOR: http: reject any empty content-length header value
+    - MINOR: ist: add new function ist_find_range() to find a character range
+    - MINOR: http: add new function http_path_has_forbidden_char()
+    - MINOR: h2: pass accept-invalid-http-request down the request parser
+    - REGTESTS: http-rules: add accept-invalid-http-request for normalize-uri tests
+    - BUG/MINOR: h1: do not accept '#' as part of the URI component
+    - BUG/MINOR: h2: reject more chars from the :path pseudo header
+    - REGTESTS: http-rules: verify that we block '#' by default for normalize-uri
+    - DOC: clarify the handling of URL fragments in requests
+    - BUG/MINOR: http: skip leading zeroes in content-length values
+
+2023/06/09 : 2.4.23
+    - DEV: hpack: fix `trash` build regression
+    - BUG/MINOR: ssl: ssl-(min|max)-ver parameter not duplicated for bundles in crt-list
+    - BUG/MINOR: mworker: stop doing strtok directly from the env
+    - BUG/MEDIUM: mworker: don't register mworker_accept_wrapper() when master FD is wrong
+    - MINOR: startup: HAPROXY_STARTUP_VERSION contains the version used to start
+    - BUG/MINOR: sched: properly report long_rq when tasks remain in the queue
+    - BUG/MEDIUM: sched: allow a bit more TASK_HEAVY to be processed when needed
+    - BUG/MINOR: mworker: prevent incorrect values in uptime
+    - BUG/MINOR: cache: Cache response even if request has "no-cache" directive
+    - BUG/MINOR: cache: Check cache entry is complete in case of Vary
+    - BUG/MINOR: ring: do not realign ring contents on resize
+    - DOC: config: Fix description of options about HTTP connection modes
+    - DOC: config: Add the missing tune.fail-alloc option from global listing
+    - DOC: config: Clarify the meaning of 'hold' in the 'resolvers' section
+    - BUG/MINOR: http-check: Don't set HTX_SL_F_BODYLESS flag with a log-format body
+    - BUG/MINOR: http-check: Skip C-L header for empty body when it's not mandatory
+    - BUG/MINOR: http-ana: Do a L7 retry on read error if there is no response
+    - BUG/MINOR: ssl: Use 'date' instead of 'now' in ocsp stapling callback
+    - BUG/MINOR: init: properly detect NUMA bindings on large systems
+    - BUG/MINOR: init: make sure to always limit the total number of threads
+    - DOC/CLEANUP: fix typos
+    - BUG/MINOR: mux-h2: make sure the h2c task exists before refreshing it
+    - BUG/MEDIUM: listener: duplicate inherited FDs if needed
+    - BUG/MEDIUM: spoe: Don't set the default traget for the SPOE agent frontend
+    - BUG/MINOR: proto_ux: report correct error when bind_listener fails
+    - BUG/MINOR: protocol: fix minor memory leak in protocol_bind_all()
+    - BUG/MINOR: sock_unix: match finalname with tempname in sock_unix_addrcmp()
+    - BUG/MEDIUM: connection: Clear flags when a conn is removed from an idle list
+    - BUG/MEDIUM: connection: Preserve flags when a conn is removed from an idle list
+    - BUG/MEDIUM: mux-h2: erase h2c->wait_event.tasklet on error path
+    - BUG/MEDIUM: mux-h1: Wakeup H1C on shutw if there is no I/O subscription
+    - BUILD: da: extends CFLAGS to support API v3 from 3.1.7 and onwards.
+    - MINOR: proxy/pool: prevent unnecessary calls to pool_gc()
+    - DOC: config: strict-sni allows to start without certificate
+    - BUG/MEDIUM: channel: Improve reports for shut in co_getblk()
+    - BUG/MEDIUM: dns: Properly handle error when a response consumed
+    - MINOR: proxy: check if p is NULL in free_proxy()
+    - BUG/MINOR: sink: free forward_px on deinit()
+    - BUG/MINOR: log: free log forward proxies on deinit()
+    - BUG/MINOR: hlua: enforce proper running context for register_x functions
+    - CLEANUP: hlua: fix conflicting comment in hlua_ctx_destroy()
+    - BUG/MEDIUM: resolvers: Force the connect timeout for DNS resolutions
+    - BUG/MINOR: stick_table: alert when type len has incorrect characters
+    - CI: bump "actions/checkout" to v3 for cross zoo matrix
+    - REGTESTS: fix the race conditions in log_uri.vtc
+    - BUG/MEDIUM: log: Properly handle client aborts in syslog applet
+    - CLEANUP: backend: Remove useless debug message in assign_server()
+    - BUG/MINOR: cfgparse: make sure to include openssl-compat
+    - BUG/MEDIUM: proxy/sktable: prevent watchdog trigger on soft-stop
+    - BUG/MEDIUM: Update read expiration date on synchronous send
+    - BUG/MINOR: mux-h2: make sure to produce a log on invalid requests
+    - MINOR: checks: make sure spread-checks is used also at boot time
+    - MINOR: clock: measure the total boot time
+    - BUG/MINOR: checks: postpone the startup of health checks by the boot time
+    - BUG/MINOR: clock: fix the boot time measurement method for 2.6 and older
+    - BUG/MINOR: tcp-rules: Don't shortened the inspect-delay when EOI is set
+    - DOC: config: Clarify conditions to shorten the inspect-delay for TCP rules
+    - DOC: add size format section to manual
+    - DOC/MINOR: config: Fix typo in description for `ssl_bc` in configuration.txt
+    - BUG/MINOR: hlua: unsafe hlua_lua2smp() usage
+    - SCRIPTS: publish-release: update the umask to keep group write access
+    - BUG/MINOR: log: fix memory error handling in parse_logsrv()
+    - BUG/MINOR: proxy: missing free in free_proxy for redirect rules
+    - MINOR: spoe: Don't stop disabled proxies
+    - BUILD: mjson: Fix warning about unused variables
+    - BUG/MINOR: debug: do not emit empty lines in thread dumps
+    - BUG/MEDIUM: spoe: Don't start new applet if there are enough idle ones
+    - CI: switch to Fastly CDN to download LibreSSL
+    - BUILD: ssl: switch LibreSSL to Fastly CDN
+    - BUG/MINOR: server: incorrect report for tracking servers leaving drain
+    - MINOR: server: explicitly commit state change in srv_update_status()
+    - BUG/MINOR: server: don't miss proxy stats update on server state transitions
+    - BUG/MINOR: server: don't miss server stats update on server state transitions
+    - BUG/MINOR: server: don't use date when restoring last_change from state file
+    - CI: cirrus-ci: bump FreeBSD image to 13-1
+    - BUG/MEDIUM: filters: Don't deinit filters for disabled proxies during startup
+    - MINOR: proxy: add http_free_redirect_rule() function
+    - BUG/MINOR: http_rules: fix errors paths in http_parse_redirect_rule()
+    - DOC: config: Fix bind/server/peer documentation in the peers section
+    - CONTRIB: Add vi file extensions to .gitignore
+    - BUG/MINOR: spoe: Only skip sending new frame after a receive attempt
+    - BUG/MINOR: cfgparse-tcp: leak when re-declaring interface from bind line
+    - BUG/MINOR: proxy: add missing interface bind free in free_proxy
+
+2023/02/14 : 2.4.22
+    - BUG/MINOR: fcgi-app: prevent 'use-fcgi-app' in default section
+    - BUG/MEDIUM: ssl: wrong eviction from the session cache tree
+    - BUG/MINOR: ssl/crt-list: warn when a line is malformated
+    - BUG/MEDIUM: stick-table: do not leave entries in end of window during purge
+    - BUG/MEDIUM: cache: use the correct time reference when comparing dates
+    - DOC: config: fix option spop-check proxy compatibility
+    - DOC: config: 'http-send-name-header' option may be used in default section
+    - DOC: proxy-protocol: fix wrong byte in provided example
+    - BUG/MEDIUM: stconn: Schedule a shutw on shutr if data must be sent first
+    - CI: github: don't warn on deprecated openssl functions on windows
+    - BUG/CRITICAL: http: properly reject empty http header field names
+
+2023/01/27 : 2.4.21
+    - BUG/MINOR: http-htx: Don't consider an URI as normalized after a set-uri action
+    - BUG/MEDIIM: stconn: Flush output data before forwarding close to write side
+    - CI: github: change "ubuntu-latest" to "ubuntu-20.04"
+    - BUILD: peers: peers-t.h depends on stick-table-t.h
+    - BUG/MINOR: resolvers: Don't wait periodic resolution on healthcheck failure
+    - BUG/MEDIUM: ssl: Verify error codes can exceed 63
+    - BUG/MINOR: ssl: Fix potential overflow
+    - BUG/MEDIUM: mworker: fix segv in early failure of mworker mode with peers
+    - BUG/MINOR: promex: create haproxy_backend_agg_server_status
+    - MINOR: promex: introduce haproxy_backend_agg_check_status
+    - DOC: promex: Add missing backend metrics
+    - BUG/MAJOR: fcgi: Fix uninitialized reserved bytes
+    - REGTESTS: fix the race conditions in iff.vtc
+    - REGTESTS: startup: check maxconn computation
+    - BUG/MEDIUM: resolvers: Use tick_first() to update the resolvers task timeout
+    - LICENSE: wurfl: clarify the dummy library license.
+    - BUG/MINOR: ssl: Fix memory leak of find_chain in ssl_sock_load_cert_chain
+    - BUG/MEDIUM: mux-h2: Refuse interim responses with end-stream flag set
+    - BUG/MINOR: pool/stats: Use ullong to report total pool usage in bytes in stats
+    - BUILD: makefile: build the features list dynamically
+    - BUILD: makefile: sort the features list
+    - BUG/MINOR: http-fetch: Only fill txn status during prefetch if not already set
+    - BUG/MAJOR: buf: Fix copy of wrapping output data when a buffer is realigned
+    - REGTEST: fix the race conditions in json_query.vtc
+    - REGTEST: fix the race conditions in digest.vtc
+    - REGTEST: fix the race conditions in hmac.vtc
+    - BUG/MINOR: http: Memory leak of http redirect rules' format string
+    - CLEANUP: htx: fix a typo in an error message of http_str_to_htx
+    - BUG/MINOR: h1-htx: Remove flags about protocol upgrade on non-101 responses
+    - BUG/MINOR: resolvers: Wait the resolution execution for a do_resolv action
+    - BUG/MINOR: promex: Don't forget to consume the request on error
+    - BUG/MINOR: http-ana: Report SF_FINST_R flag on error waiting the request body
+    - BUG/MINOR: http-fetch: Don't block HTTP sample fetch eval in HTTP_MSG_ERROR state
+    - BUG/MINOR: http-ana: make set-status also update txn->status
+    - BUG/MINOR: listeners: fix suspend/resume of inherited FDs
+    - DOC: config: fix wrong section number for "protocol prefixes"
+    - DOC: config: fix aliases for protocol prefixes "udp4@" and "udp6@"
+    - BUG/MINOR: mux-fcgi: Correctly set pathinfo
+    - DOC: config: fix "Address formats" chapter syntax
+    - BUG/MINOR: listener: close tiny race between resume_listener() and stopping
+    - BUG/MINOR: mux-h2: add missing traces on failed headers decoding
+    - BUILD: hpack: include global.h for the trash that is needed in debug mode
+    - BUG/MINOR: sink: free the forwarding task on exit
+
+2022/12/09 : 2.4.20
+    - BUG/MINOR: checks: update pgsql regex on auth packet
+    - DOC: config: Fix pgsql-check documentation to make user param mandatory
+    - BUG/MEDIUM: lua: Don't crash in hlua_lua2arg_check on failure
+    - BUG/MEDIUM: lua: handle stick table implicit arguments right.
+    - BUILD: h1: silence an initiialized warning with gcc-4.7 and -Os
+    - BUG/MINOR: http-fetch: Update method after a prefetch in smp_fetch_meth()
+    - BUILD: http_fetch: silence an uninitiialized warning with gcc-4/5/6 at -Os
+    - BUG/MINOR: mux-h1: Account consumed output data on synchronous connection error
+    - MINOR: smtpchk: Update expect rule to fully match replies to EHLO commands
+    - BUG/MINOR: smtpchk: SMTP Service check should gracefully close SMTP transaction
+    - BUG/MINOR: backend: only enforce turn-around state when not redispatching
+    - DOC: configuration: missing 'if' in tcp-request content example
+    - BUG/MAJOR: stick-tables: do not try to index a server name for applets
+    - BUG/MINOR: server: make sure "show servers state" hides private bits
+    - CI: Replace the deprecated `::set-output` command by writing to $GITHUB_OUTPUT in matrix.py
+    - CI: Replace the deprecated `::set-output` command by writing to $GITHUB_OUTPUT in workflow definition
+    - BUG/MINOR: log: Preserve message facility when the log target is a ring buffer
+    - BUG/MINOR: ring: Properly parse connect timeout
+    - BUG/MEDIUM: compression: handle rewrite errors when updating response headers
+    - BUG/MINOR: sink: Only use backend capability for the sink proxies
+    - BUG/MINOR: sink: Set default connect/server timeout for implicit ring buffers
+    - CI: SSL: use proper version generating when "latest" semantic is used
+    - CI: SSL: temporarily stick to LibreSSL=3.5.3
+    - BUG/MINOR: stick-table: Use server_id instead of std_t_sint in process_store_rules()
+    - DOC: management: add forgotten "show startup-logs"
+    - BUG/MAJOR: stick-table: don't process store-response rules for applets
+    - BUG/MEDIUM: stick-table: fix a race condition when updating the expiration task
+    - BUG/MINOR: log: fixing bug in tcp syslog_io_handler Octet-Counting
+    - CI: add monthly gcc cross compile jobs
+    - BUG/MINOR: ssl: Memory leak of AUTHORITY_KEYID struct when loading issuer
+    - BUG/MINOR: ssl: ocsp structure not freed properly in case of error
+    - CI: switch to the "latest" LibreSSL
+    - CI: emit the compiler's version in the build reports
+    - BUG/MEDIUM: wdt/clock: properly handle early task hangs
+    - BUG/MINOR: http-htx: Fix error handling during parsing http replies
+    - BUG/MINOR: resolvers: Set port before IP address when processing SRV records
+    - BUG/MINOR: mux-fcgi: Be sure to send empty STDING record in case of zero-copy
+    - BUG/MEDIUM: mux-fcgi: Avoid value length overflow when it doesn't fit at once
+    - BUG/MINOR: mux-h1: Do not send a last null chunk on body-less answers
+    - REG-TESTS: cache: Remove T-E header for 304-Not-Modified responses
+    - DOC: config: fix alphabetical ordering of global section
+    - BUG/MEDIUM: ring: fix creation of server in uninitialized ring
+    - BUG/MINOR: pool/cli: use ullong to report total pool usage in bytes
+    - BUG/MEDIUM: listener: Fix race condition when updating the global mngmt task
+    - BUG/MINOR: http_ana/txn: don't re-initialize txn and req var lists
+    - BUG/MINOR: ssl: don't initialize the keylog callback when not required
+    - BUG/MEDIUM: peers: messages about unkown tables not correctly ignored
+    - BUILD: peers: Remove unused variables
+    - BUG/MINOR: server/idle: at least use atomic stores when updating max_used_conns
+    - BUILD: listener: fix build warning on global_listener_rwlock without threads
+    - BUG/MINOR: cfgparse-listen: fix ebpt_next_dup pointer dereference on proxy "from" inheritance
+    - BUG/MINOR: log: fix parse_log_message rfc5424 size check
+    - BUG/MINOR: http-htx: Don't consider an URI as normalized after a set-uri action
+    - BUILD: http-htx: Silent build error about a possible NULL start-line
+    - BUG/MINOR: mux-h1: Fix handling of 408-Request-Time-Out
+    - Revert "BUG/MINOR: http-htx: Don't consider an URI as normalized after a set-uri action"
+    - DOC: config: provide some configuration hints for "http-reuse"
+    - DOC: config: clarify the fact that SNI should not be used in HTTP scenarios
+    - DOC: config: mention that a single monitor-uri rule is supported
+    - DOC: config: explain how default matching method for ACL works
+    - DOC: config: clarify the fact that "retries" is not just for connections
+    - DOC: config: clarify the -m dir and -m dom pattern matching methods
+    - SCRIPTS: announce-release: add a link to the data plane API
+    - Revert "CI: switch to the "latest" LibreSSL"
+    - Revert "CI: determine actual OpenSSL version dynamically"
+
+2022/09/28 : 2.4.19
+    - BUG/MEDIUM: mworker: use default maxconn in wait mode
+    - MINOR: http: Add function to get port part of a host
+    - MINOR: http: Add function to detect default port
+    - BUG/MEDIUM: h1: Improve authority validation for CONNCET request
+    - MINOR: http-htx: Use new HTTP functions for the scheme based normalization
+    - MINOR: ebtree: add ebmb_lookup_shorter() to pursue lookups
+    - BUG/MEDIUM: pattern: only visit equivalent nodes when skipping versions
+    - MINOR: peers: Use a dedicated reconnect timeout when stopping the local peer
+    - BUG/MEDIUM: peers: limit reconnect attempts of the old process on reload
+    - BUG/MINOR: peers: Use right channel flag to consider the peer as connected
+    - BUG/MEDIUM: dns: Properly initialize new DNS session
+    - MINOR: server: Constify source server to copy its settings
+    - REORG: server: Export srv_settings_cpy() function
+    - BUG/MEDIUM: proxy: Perform a custom copy for default server settings
+    - BUG/MINOR: ring/cli: fix a race condition between the writer and the reader
+    - BUG/MINOR: sink: fix a race condition between the writer and the reader
+    - BUILD: cfgparse: always defined _GNU_SOURCE for sched.h and crypt.h
+    - BUG/MEDIUM: poller: use fd_delete() to release the poller pipes
+    - BUG/MEDIUM: task: relax one thread consistency check in task_unlink_wq()
+    - BUILD: debug: silence warning on gcc-5
+    - BUG/MEDIUM: ring: fix too lax 'size' parser
+    - BUILD: http: silence an uninitialized warning affecting gcc-5
+    - BUG/MEDIUM: http-ana: fix crash or wrong header deletion by http-restrict-req-hdr-names
+    - BUG/MEDIUM: mux-h2: do not fiddle with ->dsi to indicate demux is idle
+    - BUG/MAJOR: log-forward: Fix log-forward proxies not fully initialized
+    - BUG/MAJOR: mworker: fix infinite loop on master with no proxies.
+    - BUG/MINOR: resolvers: return the correct value in resolvers_finalize_config()
+    - BUG/MINOR: tcpcheck: Disable QUICKACK only if data should be sent after connect
+    - REGTESTS: Fix prometheus script to perform HTTP health-checks
+    - DOC: configuration: do-resolve doesn't work with a port in the string
+    - BUG/MEDIUM: spoe: Properly update streams waiting for a ACK in async mode
+    - BUG/MEDIUM: peers: Add connect and server timeut to peers proxy
+    - BUG/MEDIUM: peers: Don't use resync timer when local resync is in progress
+    - BUG/MEDIUM: peers: Don't start resync on reload if local peer is not up-to-date
+    - BUG/MINOR: hlua: Rely on CF_EOI to detect end of message in HTTP applets
+    - BUG/MINOR: tcpcheck: Disable QUICKACK for default tcp-check (with no rule)
+    - BUG/MEDIUM: mux-h1: do not refrain from signaling errors after end of input
+    - REGTESTS: http_request_buffer: Add a barrier to not mix up log messages
+    - BUG/MEDIUM: mux-h1: always use RST to kill idle connections in pools
+    - BUG/MINOR: mux-h2: fix the "show fd" dest buffer for the subscriber
+    - BUG/MINOR: mux-h1: fix the "show fd" dest buffer for the subscriber
+    - BUG/MINOR: mux-fcgi: fix the "show fd" dest buffer for the subscriber
+    - BUG/MINOR: regex: Properly handle PCRE2 lib compiled without JIT support
+    - BUILD: makefile: enable crypt(3) for NetBSD
+    - BUG/MINOR: h1: Support headers case adjustment for TCP proxies
+    - BUG/MINOR: task: always reset a new tasklet's call date
+    - BUG/MINOR: signals/poller: set the poller timeout to 0 when there are signals
+    - BUG/MINOR: signals/poller: ensure wakeup from signals
+    - CI: cirrus-ci: bump FreeBSD image to 13-1
+    - BUG/MEDIUM: proxy: ensure pause_proxy() and resume_proxy() own PROXY_LOCK
+    - MINOR: listener: small API change
+    - BUG/MINOR: stats: fixing stat shows disabled frontend status as 'OPEN'
+    - REGTESTS: healthcheckmail: Relax matching on the healthcheck log message
+    - REGTESTS: log: test the log-forward feature
+    - BUG/MEDIUM: sink: bad init sequence on tcp sink from a ring.
+    - REGTESTS: ssl/log: test the log-forward with SSL
+    - DOC: fix TOC in starter guide for subsection 3.3.8. Statistics
+    - BUG/MEDIUM: captures: free() an error capture out of the proxy lock
+    - BUILD: fd: fix a build warning on the DWCAS
+    - SCRIPTS: announce-release: update some URLs to https
+    - BUG/MINOR: log: improper behavior when escaping log data
+    - REGTESTS: 4be_1srv_smtpchk_httpchk_layer47errors: Return valid SMTP replies
+    - BUG/MEDIUM: resolvers: Remove aborted resolutions from query_ids tree
+
+2022/07/27 : 2.4.18
+    - CI: determine actual LibreSSL version dynamically
+    - MEDIUM: http-ana: Add a proxy option to restrict chars in request header names
+    - BUILD: fix build warning on solaris based systems with __maybe_unused.
+    - MINOR: tools: add get_exec_path implementation for solaris based systems.
+    - BUG/MINOR: ssl: Fix crash when no private key is found in pem
+    - REGTESTS: abortonclose: Fix some race conditions
+    - BUG/MEDIUM: config: Reset outline buffer size on realloc error in readcfgfile()
+    - BUG/MINOR: check: Reinit the buffer wait list at the end of a check
+    - BUG/MINOR: cfgparse: abort earlier in case of allocation error
+    - BUG/MINOR: peers: fix error reporting of "bind" lines
+    - BUILD/MINOR: cpuset fix build for FreeBSD 13.1
+    - CI: determine actual OpenSSL version dynamically
+    - BUG/MEDIUM: tools: Fix `inet_ntop` usage in sa2str
+    - BUG/MEDIUM: http: Properly reject non-HTTP/1.x protocols
+    - BUG/MEDIUM: resolvers: Don't defer resolutions release in deinit function
+    - BUG/MEDIUM: peers: fix segfault using multiple bind on peers sections
+    - BUG/MEDIUM: peers: prevent unitialized multiple listeners on peers section
+    - BUG/MEDIUM: sample: Fix adjusting size in word converter
+    - REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+ (2)
+    - SCRIPTS: add make-releases-json to recreate a releases.json file in download dirs
+    - SCRIPTS: make publish-release try to launch make-releases-json
+    - DOC: peers: indicate that some server settings are not usable
+    - DOC: peers: clarify when entry expiration date is renewed.
+    - DOC: peers: fix port number and addresses on new peers section format
+    - DOC: intro: adjust the numbering of paragrams to keep the output ordered
+    - BUG/MINOR: ssl_ckch: Free error msg if commit changes on a cert entry fails
+    - BUG/MEDIUM: ssl_ckch: Don't delete a cert entry if it is being modified
+    - BUG/MINOR: ssl_ckch: Don't duplicate path when replacing a cert entry
+    - BUG/MEDIUM: ssl_ckch: Rework 'commit ssl cert' to handle full buffer cases
+    - BUG/MEDIUM: ssl/crt-list: Rework 'add ssl crt-list' to handle full buffer cases
+    - MEDIUM: http-ana: Always report rewrite failures as PRXCOND in logs
+    - REGTESTS: abortonclose: Add a barrier to not mix up log messages
+    - REGTESTS: http_request_buffer: Increase client timeout to wait "slow" clients
+    - BUG/MINOR: ssl_ckch: Dump cert transaction only once if show command yield
+    - BUG/MINOR: ssl_ckch: Fix possible uninitialized value in show_cert I/O handler
+    - REGTESTS: http_abortonclose: Extend supported versions
+    - REGTESTS: restrict_req_hdr_names: Extend supported versions
+    - BUILD: compiler: implement unreachable for older compilers too
+    - BUG/MEDIUM: mailers: Set the object type for check attached to an email alert
+    - BUG/MINOR: trace: Test server existence for health-checks to get proxy
+    - BUG/MINOR: checks: Properly handle email alerts in trace messages
+    - REGTESTS: healthcheckmail: Update the test to be functionnal again
+    - REGTESTS: healthcheckmail: Relax health-check failure condition
+    - BUG/MINOR: cli/stats: add missing trailing LF after JSON outputs
+    - BUG/MINOR: server: do not enable DNS resolution on disabled proxies
+    - BUG/MINOR: cli/stats: add missing trailing LF after "show info json"
+    - BUG/MINOR: tcp-rules: Make action call final on read error and delay expiration
+    - BUG/MEDIUM: ssl/cli: crash when crt inserted into a crt-list
+    - MEDIUM: mux-h2: try to coalesce outgoing WINDOW_UPDATE frames
+    - BUG/MINOR: ssl: Do not look for key in extra files if already in pem
+    - BUG/MINOR: http-ana: Set method to HTTP_METH_OTHER when an HTTP txn is created
+    - BUG/MINOR: http-fetch: Use integer value when possible in "method" sample fetch
+    - MINOR: fd: add a new FD_DISOWN flag to prevent from closing a deleted FD
+    - BUG/MEDIUM: ssl/fd: unexpected fd close using async engine
+    - BUILD: Makefile: Add Lua 5.4 autodetect
+    - CI: re-enable gcc asan builds
+    - MINOR: fd: Add BUG_ON checks on fd_insert()
+    - BUG/MINOR: peers/config: always fill the bind_conf's argument
+    - BUG/MINOR: http-check: Preserve headers if not redefined by an implicit rule
+    - BUG/MINOR: http-act: Properly generate 103 responses when several rules are used
+    - BUG/MINOR: peers: fix possible NULL dereferences at config parsing
+    - BUG/MINOR: http-htx: Fix scheme based normalization for URIs wih userinfo
+    - BUG/MEDIUM: http-fetch: Don't fetch the method if there is no stream
+    - REGTEESTS: filters: Fix CONNECT request in random-forwarding script
+    - BUG/MINOR: mux-h1: Be sure to commit htx changes in the demux buffer
+    - BUG/MEDIUM: http-ana: Don't wait to have an empty buf to switch in TUNNEL state
+    - BUG/MEDIUM: mux-h1: Handle connection error after a synchronous send
+    - MEDIUM: mworker: set the iocb of the socketpair without using fd_insert()
+    - BUG/MEDIUM: tools: avoid calling dlsym() in static builds
+    - BUILD: makefile: Fix install(1) handling for OpenBSD/NetBSD/Solaris/AIX
+    - BUG/MEDIUM: tools: avoid calling dlsym() in static builds (try 2)
+    - BUG/MINOR: tools: fix statistical_prng_range()'s output range
+    - REGTESTS: Fix some scripts to be compatible with 2.4 and prior
+    - BUG/MEDIUM: mworker: proc_self incorrectly set crashes upon reload
+    - BUILD: add detection for unsupported compiler models
+    - BUG/MINOR: backend: Fallback on RR algo if balance on source is impossible
+    - BUG/MINOR: sockpair: wrong return value for fd_send_uxst()
+
 2022/05/13 : 2.4.17
     - CI: github actions: update LibreSSL to 3.5.2
     - SCRIPTS: announce-release: add URL of dev packages
diff --git a/INSTALL b/INSTALL
index 526bf08..b07d6a6 100644
--- a/INSTALL
+++ b/INSTALL
@@ -318,9 +318,9 @@
 advanced scripting capabilities. Only versions 5.3 and above are supported.
 In order to enable Lua support, please specify "USE_LUA=1" on the command line.
 Some systems provide this library under various names to avoid conflicts with
-previous versions. By default, HAProxy looks for "lua5.3", "lua53", "lua". If
-your system uses a different naming, you may need to set the library name in
-the "LUA_LIB_NAME" variable.
+previous versions. By default, HAProxy looks for "lua5.4", "lua54", "lua5.3",
+"lua53", "lua". If your system uses a different naming, you may need to set the
+library name in the "LUA_LIB_NAME" variable.
 
 If Lua is not provided on your system, it can be very simply built locally. It
 can be downloaded from https://www.lua.org/, extracted and built, for example :
diff --git a/Makefile b/Makefile
index ccf6b01..05067bc 100644
--- a/Makefile
+++ b/Makefile
@@ -83,6 +83,9 @@
 #   DESTDIR is not set by default and is used for installation only.
 #           It might be useful to set DESTDIR if you want to install haproxy
 #           in a sandbox.
+#   INSTALL is set to "install" by default and is used to provide the name of
+#           the install binary used by the install targets and any additional
+#           flags.
 #   PREFIX  is set to "/usr/local" by default and is used for installation only.
 #   SBINDIR is set to "$(PREFIX)/sbin" by default and is used for installation
 #           only.
@@ -106,7 +109,7 @@
 #   LUA_LIB        : force the lib path to lua
 #   LUA_INC        : force the include path to lua
 #   LUA_LIB_NAME   : force the lib name (or automatically evaluated, by order of
-#                                        priority : lua5.3, lua53, lua).
+#                                        priority : lua5.4, lua54, lua5.3, lua53, lua).
 #   OT_DEBUG       : compile the OpenTracing filter in debug mode
 #   OT_INC         : force the include path to libopentracing-c-wrapper
 #   OT_LIB         : force the lib path to libopentracing-c-wrapper
@@ -148,6 +151,7 @@
 
 #### Installation options.
 DESTDIR =
+INSTALL = install
 PREFIX = /usr/local
 SBINDIR = $(PREFIX)/sbin
 MANDIR = $(PREFIX)/share/man
@@ -356,6 +360,7 @@
     USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_FUTEX USE_LINUX_TPROXY          \
     USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO     \
     USE_GETADDRINFO USE_BACKTRACE)
+  INSTALL = install -v
 endif
 
 # For linux >= 2.6.28, glibc without new features
@@ -364,6 +369,7 @@
     USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER  \
     USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_FUTEX USE_LINUX_TPROXY          \
     USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_GETADDRINFO)
+  INSTALL = install -v
 endif
 
 # For linux >= 2.6.28 and musl
@@ -373,6 +379,7 @@
     USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_FUTEX USE_LINUX_TPROXY          \
     USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO     \
     USE_GETADDRINFO)
+  INSTALL = install -v
 endif
 
 # Solaris 10 and above
@@ -416,8 +423,8 @@
 # NetBSD 8 and above
 ifeq ($(TARGET),netbsd)
   set_target_defaults = $(call default_opts, \
-    USE_POLL USE_TPROXY USE_THREAD USE_KQUEUE USE_ACCEPT4 USE_CLOSEFROM   \
-    USE_GETADDRINFO)
+    USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_KQUEUE USE_ACCEPT4        \
+    USE_CLOSEFROM USE_GETADDRINFO)
 endif
 
 # AIX 5.1 only
@@ -517,7 +524,11 @@
 # is used to report a list of all flags which were used to build this version.
 # Do not assign anything to it.
 BUILD_OPTIONS  := $(foreach opt,$(use_opts),$(call ignore_implicit,$(opt)))
-BUILD_FEATURES := $(foreach opt,$(patsubst USE_%,%,$(use_opts)),$(if $(USE_$(opt)),+$(opt),-$(opt)))
+
+# Make a list of all known features with +/- prepended depending on their
+# activation status. Must be a macro so that dynamically enabled ones are
+# evaluated with their current status.
+BUILD_FEATURES  = $(foreach opt,$(patsubst USE_%,%,$(sort $(use_opts))),$(if $(USE_$(opt)),+$(opt),-$(opt)))
 
 # All USE_* options have their equivalent macro defined in the code (some might
 # possibly be unused though)
@@ -614,11 +625,11 @@
 LUA_LD_FLAGS := -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic) $(if $(LUA_LIB),-L$(LUA_LIB))
 ifeq ($(LUA_LIB_NAME),)
 # Try to automatically detect the Lua library
-LUA_LIB_NAME := $(firstword $(foreach lib,lua5.3 lua53 lua,$(call check_lua_lib,$(lib),$(LUA_LD_FLAGS))))
+LUA_LIB_NAME := $(firstword $(foreach lib,lua5.4 lua54 lua5.3 lua53 lua,$(call check_lua_lib,$(lib),$(LUA_LD_FLAGS))))
 ifeq ($(LUA_LIB_NAME),)
-$(error unable to automatically detect the Lua library name, you can enforce its name with LUA_LIB_NAME=<name> (where <name> can be lua5.3, lua53, lua, ...))
+$(error unable to automatically detect the Lua library name, you can enforce its name with LUA_LIB_NAME=<name> (where <name> can be lua5.4, lua54, lua, ...))
 endif
-LUA_INC := $(firstword $(foreach lib,lua5.3 lua53 lua,$(call check_lua_inc,$(lib),"/usr/include/")))
+LUA_INC := $(firstword $(foreach lib,lua5.4 lua54 lua5.3 lua53 lua,$(call check_lua_inc,$(lib),"/usr/include/")))
 ifneq ($(LUA_INC),)
 OPTIONS_CFLAGS  += -I$(LUA_INC)
 endif
@@ -664,7 +675,7 @@
 OPTIONS_OBJS	+= $(DEVICEATLAS_LIB)/dac.o
 endif
 OPTIONS_OBJS	+= addons/deviceatlas/da.o
-OPTIONS_CFLAGS += $(if $(DEVICEATLAS_INC),-I$(DEVICEATLAS_INC))
+OPTIONS_CFLAGS += $(if $(DEVICEATLAS_INC),-I$(DEVICEATLAS_INC)) $(if $(DEVICEATLAS_SRC),-DDATLAS_DA_NOCACHE)
 endif
 
 ifneq ($(USE_51DEGREES),)
@@ -991,16 +1002,16 @@
 	       -c -o $@ $<
 
 install-man:
-	$(Q)install -v -d "$(DESTDIR)$(MANDIR)"/man1
-	$(Q)install -v -m 644 doc/haproxy.1 "$(DESTDIR)$(MANDIR)"/man1
+	$(Q)$(INSTALL) -d "$(DESTDIR)$(MANDIR)"/man1
+	$(Q)$(INSTALL) -m 644 doc/haproxy.1 "$(DESTDIR)$(MANDIR)"/man1
 
 EXCLUDE_DOCUMENTATION = lgpl gpl coding-style
 DOCUMENTATION = $(filter-out $(EXCLUDE_DOCUMENTATION),$(patsubst doc/%.txt,%,$(wildcard doc/*.txt)))
 
 install-doc:
-	$(Q)install -v -d "$(DESTDIR)$(DOCDIR)"
+	$(Q)$(INSTALL) -d "$(DESTDIR)$(DOCDIR)"
 	$(Q)for x in $(DOCUMENTATION); do \
-		install -v -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
+		$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
 	done
 
 install-bin:
@@ -1010,8 +1021,8 @@
 			exit 1; \
 		fi; \
 	done
-	$(Q)install -v -d "$(DESTDIR)$(SBINDIR)"
-	$(Q)install -v haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
+	$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
+	$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
 
 install: install-bin install-man install-doc
 
diff --git a/VERDATE b/VERDATE
index cce1228..22d534d 100644
--- a/VERDATE
+++ b/VERDATE
@@ -1,2 +1,2 @@
 $Format:%ci$
-2022/05/13
+2023/08/19
diff --git a/VERSION b/VERSION
index 1324c03..0cb980f 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.4.17
+2.4.24
diff --git a/addons/promex/README b/addons/promex/README
index e41ebdc..4e29e23 100644
--- a/addons/promex/README
+++ b/addons/promex/README
@@ -286,6 +286,8 @@
 | haproxy_backend_max_total_time_seconds              |
 | haproxy_backend_internal_errors_total               |
 | haproxy_backend_uweight                             |
+| haproxy_backend_agg_server_status                   |
+| haproxy_backend_agg_check_status                    |
 +-----------------------------------------------------+
 
 * Server metrics
diff --git a/addons/promex/service-prometheus.c b/addons/promex/service-prometheus.c
index b267f98..d31b666 100644
--- a/addons/promex/service-prometheus.c
+++ b/addons/promex/service-prometheus.c
@@ -289,6 +289,8 @@
 	[ST_F_NEED_CONN_EST]        = { .n = IST("need_connections_current"),         .type = PROMEX_MT_GAUGE,    .flags = (                                                                       PROMEX_FL_SRV_METRIC) },
 	[ST_F_UWEIGHT]              = { .n = IST("uweight"),                          .type = PROMEX_MT_GAUGE,    .flags = (                                               PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
 	[ST_F_AGG_SRV_CHECK_STATUS] = { .n = IST("agg_server_check_status"),	      .type = PROMEX_MT_GAUGE,    .flags = (                                               PROMEX_FL_BACK_METRIC                       ) },
+	[ST_F_AGG_SRV_STATUS ]      = { .n = IST("agg_server_status"),	              .type = PROMEX_MT_GAUGE,    .flags = (                                               PROMEX_FL_BACK_METRIC                       ) },
+	[ST_F_AGG_CHECK_STATUS]     = { .n = IST("agg_check_status"),	              .type = PROMEX_MT_GAUGE,    .flags = (                                               PROMEX_FL_BACK_METRIC                       ) },
 };
 
 /* Description of overridden stats fields */
@@ -792,6 +794,7 @@
 	double secs;
 	enum promex_back_state bkd_state;
 	enum promex_srv_state srv_state;
+	enum healthcheck_status srv_check_status;
 
 	for (;appctx->st2 < ST_F_TOTAL_FIELDS; appctx->st2++) {
 		if (!(promex_st_metrics[appctx->st2].flags & appctx->ctx.stats.flags))
@@ -800,6 +803,8 @@
 		while (appctx->ctx.stats.obj1) {
 			struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
 			unsigned int srv_state_count[PROMEX_SRV_STATE_COUNT] = { 0 };
+			unsigned int srv_check_count[HCHK_STATUS_SIZE] = { 0 };
+			const char *check_state;
 
 			px = appctx->ctx.stats.obj1;
 
@@ -814,7 +819,8 @@
 				return -1;
 
 			switch (appctx->st2) {
-				case ST_F_AGG_SRV_CHECK_STATUS:
+				case ST_F_AGG_SRV_CHECK_STATUS: // DEPRECATED
+				case ST_F_AGG_SRV_STATUS:
 					if (!px->srv)
 						goto next_px;
 					sv = px->srv;
@@ -833,6 +839,28 @@
 					}
 					appctx->ctx.stats.st_code = 0;
 					goto next_px;
+				case ST_F_AGG_CHECK_STATUS:
+					if (!px->srv)
+						goto next_px;
+					sv = px->srv;
+					while (sv) {
+						srv_check_status = sv->check.status;
+						srv_check_count[srv_check_status] += 1;
+						sv = sv->next;
+					}
+					for (; appctx->ctx.stats.st_code < HCHK_STATUS_SIZE; appctx->ctx.stats.st_code++) {
+						if (get_check_status_result(appctx->ctx.stats.st_code) < CHK_RES_FAILED)
+								continue;
+						val = mkf_u32(FO_STATUS, srv_check_count[appctx->ctx.stats.st_code]);
+						check_state = get_check_status_info(appctx->ctx.stats.st_code);
+						labels[1].name = ist("state");
+						labels[1].value = ist(check_state);
+						if (!promex_dump_metric(appctx, htx, prefix, &promex_st_metrics[appctx->st2],
+									&val, labels, &out, max))
+							goto full;
+					}
+					appctx->ctx.stats.st_code = 0;
+					goto next_px;
 				case ST_F_STATUS:
 					bkd_state = ((px->lbprm.tot_weight > 0 || !px->srv) ? 1 : 0);
 					for (; appctx->ctx.stats.st_code < PROMEX_BACK_STATE_COUNT; appctx->ctx.stats.st_code++) {
@@ -1547,6 +1575,7 @@
 	res->flags |= CF_READ_NULL;
 	si_shutr(si);
 	si_shutw(si);
+	goto out;
 }
 
 struct applet promex_applet = {
diff --git a/addons/wurfl/dummy/wurfl/wurfl.h b/addons/wurfl/dummy/wurfl/wurfl.h
index 3b450fc..7659561 100644
--- a/addons/wurfl/dummy/wurfl/wurfl.h
+++ b/addons/wurfl/dummy/wurfl/wurfl.h
@@ -4,11 +4,16 @@
  * Copyright (c) ScientiaMobile, Inc.
  * http://www.scientiamobile.com
  *
- * This software package is the property of ScientiaMobile Inc. and is licensed
- * commercially according to a contract between the Licensee and ScientiaMobile Inc. (Licensor).
- * If you represent the Licensee, please refer to the licensing agreement which has been signed
- * between the two parties. If you do not represent the Licensee, you are not authorized to use
- * this software in any way.
+ * This software package is the property of ScientiaMobile Inc. and is distributed under
+ * a dual licensing scheme:
+ *
+ * 1) commercially according to a contract between the Licensee and ScientiaMobile Inc. (Licensor).
+ *    If you represent the Licensee, please refer to the licensing agreement which has been signed
+ *    between the two parties. If you do not represent the Licensee, you are not authorized to use
+ *    this software in any way.
+ *
+ * 2) LGPL when used in the context of the HAProxy project with the purpose of testing compatibility
+ *    of HAProxy with ScientiaMobile software.
  *
  */
 
diff --git a/dev/hpack/decode.c b/dev/hpack/decode.c
index ae82512..13c95c7 100644
--- a/dev/hpack/decode.c
+++ b/dev/hpack/decode.c
@@ -30,7 +30,7 @@
 char trash_buf[MAX_RQ_SIZE];
 char tmp_buf[MAX_RQ_SIZE];
 
-struct buffer trash = { .area = trash_buf, .data = 0, .size = sizeof(trash_buf) };
+THREAD_LOCAL struct buffer trash = { .area = trash_buf, .data = 0, .size = sizeof(trash_buf) };
 struct buffer tmp   = { .area = tmp_buf,   .data = 0, .size = sizeof(tmp_buf)   };
 
 /* displays a <len> long memory block at <buf>, assuming first byte of <buf>
diff --git a/doc/configuration.txt b/doc/configuration.txt
index af41588..7b5ee65 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -3,7 +3,7 @@
                           Configuration Manual
                          ----------------------
                               version 2.4
-                              2022/05/13
+                              2023/08/19
 
 
 This document covers the configuration language as implemented in the version
@@ -42,7 +42,8 @@
 2.3.      Environment variables
 2.4.      Conditional blocks
 2.5.      Time format
-2.6.      Examples
+2.6.      Size format
+2.7.      Examples
 
 3.    Global parameters
 3.1.      Process management and security
@@ -753,6 +754,10 @@
 * HAPROXY_MASTER_CLI: In master-worker mode, listeners addresses of the master
   CLI, separated by semicolons.
 
+* HAPROXY_STARTUP_VERSION: contains the version used to start, in master-worker
+  mode this is the version which was used to start the master, even after
+  updating the binary and reloading.
+
 In addition, some pseudo-variables are internally resolved and may be used as
 regular variables. Pseudo-variables always start with a dot ('.'), and are the
 only ones where the dot is permitted. The current list of pseudo-variables is:
@@ -908,8 +913,24 @@
   - h  : hours.   1h = 60m = 3600s = 3600000ms
   - d  : days.    1d = 24h = 1440m = 86400s = 86400000ms
 
+
+2.6. Size format
+----------------
+
+Some parameters involve values representing size, such as bandwidth limits.
+These values are generally expressed in bytes (unless explicitly stated
+otherwise) but may be expressed in any other unit by suffixing the unit to the
+numeric value. It is important to consider this because it will not be repeated
+for every keyword. Supported units are case insensitive :
+
+  - k : kilobytes. 1 kilobyte = 1024 bytes
+  - m : megabytes. 1 megabyte = 1048576 bytes
+  - g : gigabytes. 1 gigabyte = 1073741824 bytes
 
-2.6. Examples
+Both time and size formats require integers, decimal notation is not allowed.
+
+
+2.7. Examples
 -------------
 
     # Simple configuration for an HTTP proxy listening on port 80 on all
@@ -965,32 +986,36 @@
 The following keywords are supported in the "global" section :
 
  * Process management and security
+   - 51degrees-cache-size
+   - 51degrees-data-file
+   - 51degrees-property-name-list
+   - 51degrees-property-separator
    - ca-base
    - chroot
-   - crt-base
    - cpu-map
+   - crt-base
    - daemon
    - default-path
    - description
    - deviceatlas-json-file
    - deviceatlas-log-level
-   - deviceatlas-separator
    - deviceatlas-properties-cookie
+   - deviceatlas-separator
    - expose-experimental-directives
    - external-check
    - gid
    - group
-   - hard-stop-after
    - h1-case-adjust
    - h1-case-adjust-file
+   - h2-workaround-bogus-websocket-clients
+   - hard-stop-after
    - insecure-fork-wanted
    - insecure-setuid-wanted
    - issuers-chain-path
-   - h2-workaround-bogus-websocket-clients
    - localpeer
    - log
-   - log-tag
    - log-send-hostname
+   - log-tag
    - lua-load
    - lua-load-per-thread
    - lua-prepend-path
@@ -1003,13 +1028,9 @@
    - pp2-never-send-local
    - presetenv
    - resetenv
-   - uid
-   - ulimit-n
-   - user
    - set-dumpable
    - set-var
    - setenv
-   - stats
    - ssl-default-bind-ciphers
    - ssl-default-bind-ciphersuites
    - ssl-default-bind-curves
@@ -1020,25 +1041,25 @@
    - ssl-dh-param-file
    - ssl-server-verify
    - ssl-skip-self-issued-ca
+   - stats
+   - strict-limits
+   - uid
+   - ulimit-n
    - unix-bind
    - unsetenv
-   - 51degrees-data-file
-   - 51degrees-property-name-list
-   - 51degrees-property-separator
-   - 51degrees-cache-size
+   - user
+   - wurfl-cache-size
    - wurfl-data-file
    - wurfl-information-list
    - wurfl-information-list-separator
-   - wurfl-cache-size
-   - strict-limits
 
  * Performance tuning
    - busy-polling
    - max-spread-checks
+   - maxcompcpuusage
+   - maxcomprate
    - maxconn
    - maxconnrate
-   - maxcomprate
-   - maxcompcpuusage
    - maxpipes
    - maxsessrate
    - maxsslconn
@@ -1046,16 +1067,16 @@
    - maxzlibmem
    - no-memory-trimming
    - noepoll
-   - nokqueue
    - noevports
-   - nopoll
-   - nosplice
    - nogetaddrinfo
+   - nokqueue
+   - nopoll
    - noreuseport
+   - nosplice
    - profiling.tasks
-   - spread-checks
    - server-state-base
    - server-state-file
+   - spread-checks
    - ssl-engine
    - ssl-mode-async
    - tune.buffers.limit
@@ -1063,10 +1084,12 @@
    - tune.bufsize
    - tune.chksize
    - tune.comp.maxlevel
+   - tune.fail-alloc
    - tune.fd.edge-triggered
    - tune.h2.header-table-size
    - tune.h2.initial-window-size
    - tune.h2.max-concurrent-streams
+   - tune.h2.max-frame-size
    - tune.http.cookielen
    - tune.http.logurilen
    - tune.http.maxhdr
@@ -1074,9 +1097,9 @@
    - tune.idletimer
    - tune.lua.forced-yield
    - tune.lua.maxmem
+   - tune.lua.service-timeout
    - tune.lua.session-timeout
    - tune.lua.task-timeout
-   - tune.lua.service-timeout
    - tune.maxaccept
    - tune.maxpollevents
    - tune.maxrewrite
@@ -1092,13 +1115,13 @@
    - tune.sndbuf.client
    - tune.sndbuf.server
    - tune.ssl.cachesize
+   - tune.ssl.capture-cipherlist-size
+   - tune.ssl.default-dh-param
+   - tune.ssl.force-private-cache
    - tune.ssl.keylog
    - tune.ssl.lifetime
-   - tune.ssl.force-private-cache
    - tune.ssl.maxrecord
-   - tune.ssl.default-dh-param
    - tune.ssl.ssl-ctx-cache-size
-   - tune.ssl.capture-cipherlist-size
    - tune.vars.global-max-size
    - tune.vars.proc-max-size
    - tune.vars.reqres-max-size
@@ -1115,6 +1138,36 @@
 3.1. Process management and security
 ------------------------------------
 
+51degrees-data-file <file path>
+  The path of the 51Degrees data file to provide device detection services. The
+  file should be unzipped and accessible by HAProxy with relevant permissions.
+
+  Please note that this option is only available when HAProxy has been
+  compiled with USE_51DEGREES.
+
+51degrees-property-name-list [<string> ...]
+  A list of 51Degrees property names to be load from the dataset. A full list
+  of names is available on the 51Degrees website:
+  https://51degrees.com/resources/property-dictionary
+
+  Please note that this option is only available when HAProxy has been
+  compiled with USE_51DEGREES.
+
+51degrees-property-separator <char>
+  A char that will be appended to every property value in a response header
+  containing 51Degrees results. If not set that will be set as ','.
+
+  Please note that this option is only available when HAProxy has been
+  compiled with USE_51DEGREES.
+
+51degrees-cache-size <number>
+  Sets the size of the 51Degrees converter cache to <number> entries. This
+  is an LRU cache which reminds previous device detections and their results.
+  By default, this cache is disabled.
+
+  Please note that this option is only available when HAProxy has been
+  compiled with USE_51DEGREES.
+
 ca-base <dir>
   Assigns a default directory to fetch SSL CA certificates and CRLs from when a
   relative path is used with "ca-file", "ca-verify-file" or "crl-file"
@@ -1267,6 +1320,13 @@
   paths. A robust approach could consist in prefixing all files names with
   their respective site name, or in doing so at the directory level.
 
+description <text>
+  Add a text that describes the instance.
+
+  Please note that it is required to escape certain characters (# for example)
+  and this text is inserted into a html page so you should avoid using
+  "<" and ">" characters.
+
 deviceatlas-json-file <path>
   Sets the path of the DeviceAtlas JSON data file to be loaded by the API.
   The path must be a valid JSON data file and accessible by HAProxy process.
@@ -1275,15 +1335,15 @@
   Sets the level of information returned by the API. This directive is
   optional and set to 0 by default if not set.
 
-deviceatlas-separator <char>
-  Sets the character separator for the API properties results. This directive
-  is optional and set to | by default if not set.
-
 deviceatlas-properties-cookie <name>
   Sets the client cookie's name used for the detection if the DeviceAtlas
   Client-side component was used during the request. This directive is optional
   and set to DAPROPS by default if not set.
 
+deviceatlas-separator <char>
+  Sets the character separator for the API properties results. This directive
+  is optional and set to | by default if not set.
+
 expose-experimental-directives
   This statement must appear before using directives tagged as experimental or
   the config file will be rejected.
@@ -1309,22 +1369,6 @@
   Similar to "gid" but uses the GID of group name <group name> from /etc/group.
   See also "gid" and "user".
 
-hard-stop-after <time>
-  Defines the maximum time allowed to perform a clean soft-stop.
-
-  Arguments :
-    <time>  is the maximum time (by default in milliseconds) for which the
-            instance will remain alive when a soft-stop is received via the
-            SIGUSR1 signal.
-
-  This may be used to ensure that the instance will quit even if connections
-  remain opened during a soft-stop (for example with long timeouts for a proxy
-  in tcp mode). It applies both in TCP and HTTP mode.
-
-  Example:
-    global
-      hard-stop-after 30s
-
 h1-case-adjust <from> <to>
   Defines the case adjustment to apply, when enabled, to the header name
   <from>, to change it to <to> before sending it to HTTP/1 clients or
@@ -1374,6 +1418,33 @@
   See "h1-case-adjust", "option h1-case-adjust-bogus-client" and
   "option h1-case-adjust-bogus-server".
 
+h2-workaround-bogus-websocket-clients
+  This disables the announcement of the support for h2 websockets to clients.
+  This can be use to overcome clients which have issues when implementing the
+  relatively fresh RFC8441, such as Firefox 88. To allow clients to
+  automatically downgrade to http/1.1 for the websocket tunnel, specify h2
+  support on the bind line using "alpn" without an explicit "proto" keyword. If
+  this statement was previously activated, this can be disabled by prefixing
+  the keyword with "no'.
+
+hard-stop-after <time>
+  Defines the maximum time allowed to perform a clean soft-stop.
+
+  Arguments :
+    <time>  is the maximum time (by default in milliseconds) for which the
+            instance will remain alive when a soft-stop is received via the
+            SIGUSR1 signal.
+
+  This may be used to ensure that the instance will quit even if connections
+  remain opened during a soft-stop (for example with long timeouts for a proxy
+  in tcp mode). It applies both in TCP and HTTP mode.
+
+  Example:
+    global
+      hard-stop-after 30s
+
+  See also: grace
+
 insecure-fork-wanted
   By default HAProxy tries hard to prevent any thread and process creation
   after it starts. Doing so is particularly important when using Lua files of
@@ -1421,15 +1492,6 @@
   "issuers-chain-path" directory. All other certificates with the same issuer
   will share the chain in memory.
 
-h2-workaround-bogus-websocket-clients
-  This disables the announcement of the support for h2 websockets to clients.
-  This can be use to overcome clients which have issues when implementing the
-  relatively fresh RFC8441, such as Firefox 88. To allow clients to
-  automatically downgrade to http/1.1 for the websocket tunnel, specify h2
-  support on the bind line using "alpn" without an explicit "proto" keyword. If
-  this statement was previously activated, this can be disabled by prefixing
-  the keyword with "no'.
-
 localpeer <name>
   Sets the local instance's peer name. It will be ignored if the "-L"
   command line argument is specified or if used after "peers" section
@@ -1762,6 +1824,26 @@
   configuration. See also "server-state-base" and "show servers state",
   "load-server-state-from-file" and "server-state-file-name"
 
+set-dumpable
+  This option is better left disabled by default and enabled only upon a
+  developer's request. If it has been enabled, it may still be forcibly
+  disabled by prefixing it with the "no" keyword. It has no impact on
+  performance nor stability but will try hard to re-enable core dumps that were
+  possibly disabled by file size limitations (ulimit -f), core size limitations
+  (ulimit -c), or "dumpability" of a process after changing its UID/GID (such
+  as /proc/sys/fs/suid_dumpable on Linux). Core dumps might still be limited by
+  the current directory's permissions (check what directory the file is started
+  from), the chroot directory's permission (it may be needed to temporarily
+  disable the chroot directive or to move it to a dedicated writable location),
+  or any other system-specific constraint. For example, some Linux flavours are
+  notorious for replacing the default core file with a path to an executable
+  not even installed on the system (check /proc/sys/kernel/core_pattern). Often,
+  simply writing "core", "core.%p" or "/var/log/core/core.%p" addresses the
+  issue. When trying to enable this option waiting for a rare issue to
+  re-appear, it's often a good idea to first try to obtain such a dump by
+  issuing, for example, "kill -11" to the "haproxy" process and verify that it
+  leaves a core where expected when dying.
+
 set-var <var-name> <expr>
   Sets the process-wide variable '<var-name>' to the result of the evaluation
   of the sample expression <expr>. The variable '<var-name>' may only be a
@@ -1785,26 +1867,6 @@
   the configuration file sees the new value. See also "presetenv", "resetenv",
   and "unsetenv".
 
-set-dumpable
-  This option is better left disabled by default and enabled only upon a
-  developer's request. If it has been enabled, it may still be forcibly
-  disabled by prefixing it with the "no" keyword. It has no impact on
-  performance nor stability but will try hard to re-enable core dumps that were
-  possibly disabled by file size limitations (ulimit -f), core size limitations
-  (ulimit -c), or "dumpability" of a process after changing its UID/GID (such
-  as /proc/sys/fs/suid_dumpable on Linux). Core dumps might still be limited by
-  the current directory's permissions (check what directory the file is started
-  from), the chroot directory's permission (it may be needed to temporarily
-  disable the chroot directive or to move it to a dedicated writable location),
-  or any other system-specific constraint. For example, some Linux flavours are
-  notorious for replacing the default core file with a path to an executable
-  not even installed on the system (check /proc/sys/kernel/core_pattern). Often,
-  simply writing "core", "core.%p" or "/var/log/core/core.%p" addresses the
-  issue. When trying to enable this option waiting for a rare issue to
-  re-appear, it's often a good idea to first try to obtain such a dump by
-  issuing, for example, "kill -11" to the "haproxy" process and verify that it
-  leaves a core where expected when dying.
-
 ssl-default-bind-ciphers <ciphers>
   This setting is only available when support for OpenSSL was built in. It sets
   the default string describing the list of cipher algorithms ("cipher suite")
@@ -1995,6 +2057,10 @@
   certificates. It's useless for BoringSSL, .issuer is ignored because ocsp
   bits does not need it. Requires at least OpenSSL 1.0.2.
 
+stats maxconn <connections>
+  By default, the stats socket is limited to 10 concurrent connections. It is
+  possible to change this value with "stats maxconn".
+
 stats socket [<address:port>|<path>] [param*]
   Binds a UNIX socket to <path> or a TCPv4/v6 address to <address:port>.
   Connections to this socket will return various statistics outputs and even
@@ -2011,9 +2077,12 @@
   to change this value with "stats timeout". The value must be passed in
   milliseconds, or be suffixed by a time unit among { us, ms, s, m, h, d }.
 
-stats maxconn <connections>
-  By default, the stats socket is limited to 10 concurrent connections. It is
-  possible to change this value with "stats maxconn".
+strict-limits
+  Makes process fail at startup when a setrlimit fails. HAProxy tries to set the
+  best setrlimit according to what has been calculated. If it fails, it will
+  emit a warning. This option is here to guarantee an explicit failure of
+  HAProxy when those limits fail. It is enabled by default. It may still be
+  forcibly disabled by prefixing it with the "no" keyword.
 
 uid <number>
   Changes the process's user ID to <number>. It is recommended that the user ID
@@ -2061,42 +2130,14 @@
   nodes, it becomes easy to immediately spot what server is handling the
   traffic.
 
-description <text>
-  Add a text that describes the instance.
-
-  Please note that it is required to escape certain characters (# for example)
-  and this text is inserted into a html page so you should avoid using
-  "<" and ">" characters.
-
-51degrees-data-file <file path>
-  The path of the 51Degrees data file to provide device detection services. The
-  file should be unzipped and accessible by HAProxy with relevant permissions.
-
-  Please note that this option is only available when HAProxy has been
-  compiled with USE_51DEGREES.
-
-51degrees-property-name-list [<string> ...]
-  A list of 51Degrees property names to be load from the dataset. A full list
-  of names is available on the 51Degrees website:
-  https://51degrees.com/resources/property-dictionary
-
-  Please note that this option is only available when HAProxy has been
-  compiled with USE_51DEGREES.
-
-51degrees-property-separator <char>
-  A char that will be appended to every property value in a response header
-  containing 51Degrees results. If not set that will be set as ','.
-
-  Please note that this option is only available when HAProxy has been
-  compiled with USE_51DEGREES.
-
-51degrees-cache-size <number>
-  Sets the size of the 51Degrees converter cache to <number> entries. This
-  is an LRU cache which reminds previous device detections and their results.
-  By default, this cache is disabled.
+wurfl-cache-size <size>
+  Sets the WURFL Useragent cache size. For faster lookups, already processed user
+  agents are kept in a LRU cache :
+  - "0"     : no cache is used.
+  - <size>  : size of lru cache in elements.
 
-  Please note that this option is only available when HAProxy has been
-  compiled with USE_51DEGREES.
+  Please note that this option is only available when HAProxy has been compiled
+  with USE_WURFL=1.
 
 wurfl-data-file <file path>
   The path of the WURFL data file to provide device detection services. The
@@ -2152,22 +2193,6 @@
   Please note that this option is only available when HAProxy has been compiled
   with USE_WURFL=1.
 
-wurfl-cache-size <size>
-  Sets the WURFL Useragent cache size. For faster lookups, already processed user
-  agents are kept in a LRU cache :
-  - "0"     : no cache is used.
-  - <size>  : size of lru cache in elements.
-
-  Please note that this option is only available when HAProxy has been compiled
-  with USE_WURFL=1.
-
-strict-limits
-  Makes process fail at startup when a setrlimit fails. HAProxy tries to set the
-  best setrlimit according to what has been calculated. If it fails, it will
-  emit a warning. This option is here to guarantee an explicit failure of
-  HAProxy when those limits fail. It is enabled by default. It may still be
-  forcibly disabled by prefixing it with the "no" keyword.
-
 3.2. Performance tuning
 -----------------------
 
@@ -2204,6 +2229,24 @@
   even if the servers' check intervals are larger. When servers run with
   shorter intervals, their intervals will be respected though.
 
+maxcompcpuusage <number>
+  Sets the maximum CPU usage HAProxy can reach before stopping the compression
+  for new requests or decreasing the compression level of current requests.
+  It works like 'maxcomprate' but measures CPU usage instead of incoming data
+  bandwidth. The value is expressed in percent of the CPU used by HAProxy. A
+  value of 100 disable the limit. The default value is 100. Setting a lower
+  value will prevent the compression work from slowing the whole process down
+  and from introducing high latencies.
+
+maxcomprate <number>
+  Sets the maximum per-process input compression rate to <number> kilobytes
+  per second. For each session, if the maximum is reached, the compression
+  level will be decreased during the session. If the maximum is reached at the
+  beginning of a session, the session will not compress at all. If the maximum
+  is not reached, the compression level will be increased up to
+  tune.comp.maxlevel. A value of zero means there is no limit, this is the
+  default value.
+
 maxconn <number>
   Sets the maximum per-process number of concurrent connections to <number>. It
   is equivalent to the command-line argument "-n". Proxies will stop accepting
@@ -2229,25 +2272,6 @@
   value close to its expected share. Also, lowering tune.maxaccept can improve
   fairness.
 
-maxcomprate <number>
-  Sets the maximum per-process input compression rate to <number> kilobytes
-  per second. For each session, if the maximum is reached, the compression
-  level will be decreased during the session. If the maximum is reached at the
-  beginning of a session, the session will not compress at all. If the maximum
-  is not reached, the compression level will be increased up to
-  tune.comp.maxlevel. A value of zero means there is no limit, this is the
-  default value.
-
-maxcompcpuusage <number>
-  Sets the maximum CPU usage HAProxy can reach before stopping the compression
-  for new requests or decreasing the compression level of current requests.
-  It works like 'maxcomprate' but measures CPU usage instead of incoming data
-  bandwidth. The value is expressed in percent of the CPU used by HAProxy. In
-  case of multiple processes (nbproc > 1), each process manages its individual
-  usage. A value of 100 disable the limit. The default value is 100. Setting
-  a lower value will prevent the compression work from slowing the whole
-  process down and from introducing high latencies.
-
 maxpipes <number>
   Sets the maximum per-process number of pipes to <number>. Currently, pipes
   are only used by kernel-based tcp splicing. Since a pipe contains two file
@@ -2323,17 +2347,21 @@
   equivalent to the command-line argument "-de". The next polling system
   used will generally be "poll". See also "nopoll".
 
-nokqueue
-  Disables the use of the "kqueue" event polling system on BSD. It is
-  equivalent to the command-line argument "-dk". The next polling system
-  used will generally be "poll". See also "nopoll".
-
 noevports
   Disables the use of the event ports event polling system on SunOS systems
   derived from Solaris 10 and later. It is equivalent to the command-line
   argument "-dv". The next polling system used will generally be "poll". See
   also "nopoll".
 
+nogetaddrinfo
+  Disables the use of getaddrinfo(3) for name resolving. It is equivalent to
+  the command line argument "-dG". Deprecated gethostbyname(3) will be used.
+
+nokqueue
+  Disables the use of the "kqueue" event polling system on BSD. It is
+  equivalent to the command-line argument "-dk". The next polling system
+  used will generally be "poll". See also "nopoll".
+
 nopoll
   Disables the use of the "poll" event polling system. It is equivalent to the
   command-line argument "-dp". The next polling system used will be "select".
@@ -2341,6 +2369,10 @@
   platforms supported by HAProxy. See also "nokqueue", "noepoll" and
   "noevports".
 
+noreuseport
+  Disables the use of SO_REUSEPORT - see socket(7). It is equivalent to the
+  command line argument "-dR".
+
 nosplice
   Disables the use of kernel tcp splicing between sockets on Linux. It is
   equivalent to the command line argument "-dS". Data will then be copied
@@ -2351,14 +2383,6 @@
   case of doubt. See also "option splice-auto", "option splice-request" and
   "option splice-response".
 
-nogetaddrinfo
-  Disables the use of getaddrinfo(3) for name resolving. It is equivalent to
-  the command line argument "-dG". Deprecated gethostbyname(3) will be used.
-
-noreuseport
-  Disables the use of SO_REUSEPORT - see socket(7). It is equivalent to the
-  command line argument "-dR".
-
 profiling.memory { on | off }
   Enables ('on') or disables ('off') per-function memory profiling. This will
   keep usage statistics of malloc/calloc/realloc/free calls anywhere in the
@@ -2604,18 +2628,18 @@
   counts only the pure Lua runtime. If the Lua does a sleep, the sleep is
   not taken in account. The default timeout is 4s.
 
-tune.lua.task-timeout <timeout>
-  Purpose is the same as "tune.lua.session-timeout", but this timeout is
-  dedicated to the tasks. By default, this timeout isn't set because a task may
-  remain alive during of the lifetime of HAProxy. For example, a task used to
-  check servers.
-
 tune.lua.service-timeout <timeout>
   This is the execution timeout for the Lua services. This is useful for
   preventing infinite loops or spending too much time in Lua. This timeout
   counts only the pure Lua runtime. If the Lua does a sleep, the sleep is
   not taken in account. The default timeout is 4s.
 
+tune.lua.task-timeout <timeout>
+  Purpose is the same as "tune.lua.session-timeout", but this timeout is
+  dedicated to the tasks. By default, this timeout isn't set because a task may
+  remain alive during of the lifetime of HAProxy. For example, a task used to
+  check servers.
+
 tune.maxaccept <number>
   Sets the maximum number of consecutive connections a process may accept in a
   row before switching to other work. In single process mode, higher numbers
@@ -2756,6 +2780,26 @@
   pre-allocated upon startup and are shared between all processes if "nbproc"
   is greater than 1. Setting this value to 0 disables the SSL session cache.
 
+tune.ssl.capture-cipherlist-size <number>
+  Sets the maximum size of the buffer used for capturing client hello cipher
+  list, extensions list, elliptic curves list and elliptic curve point
+  formats. If the value is 0 (default value) the capture is disabled,
+  otherwise a buffer is allocated for each SSL/TLS connection.
+
+tune.ssl.default-dh-param <number>
+  Sets the maximum size of the Diffie-Hellman parameters used for generating
+  the ephemeral/temporary Diffie-Hellman key in case of DHE key exchange. The
+  final size will try to match the size of the server's RSA (or DSA) key (e.g,
+  a 2048 bits temporary DH key for a 2048 bits RSA key), but will not exceed
+  this maximum value. Only 1024 or higher values are allowed. Higher values
+  will increase the CPU load, and values greater than 1024 bits are not
+  supported by Java 7 and earlier clients. This value is not used if static
+  Diffie-Hellman parameters are supplied either directly in the certificate
+  file or by using the ssl-dh-param-file parameter.
+  If there is neither a default-dh-param nor a ssl-dh-param-file defined, and
+  if the server's PEM file of a given frontend does not specify its own DH
+  parameters, then DHE ciphers will be unavailable for this frontend.
+
 tune.ssl.force-private-cache
   This option disables SSL session cache sharing between all processes. It
   should normally not be used since it will force many renegotiations due to
@@ -2824,28 +2868,12 @@
   best value. HAProxy will automatically switch to this setting after an idle
   stream has been detected (see tune.idletimer above).
 
-tune.ssl.default-dh-param <number>
-  Sets the maximum size of the Diffie-Hellman parameters used for generating
-  the ephemeral/temporary Diffie-Hellman key in case of DHE key exchange. The
-  final size will try to match the size of the server's RSA (or DSA) key (e.g,
-  a 2048 bits temporary DH key for a 2048 bits RSA key), but will not exceed
-  this maximum value. Default value if 2048. Only 1024 or higher values are
-  allowed. Higher values will increase the CPU load, and values greater than
-  1024 bits are not supported by Java 7 and earlier clients. This value is not
-  used if static Diffie-Hellman parameters are supplied either directly
-  in the certificate file or by using the ssl-dh-param-file parameter.
-
 tune.ssl.ssl-ctx-cache-size <number>
   Sets the size of the cache used to store generated certificates to <number>
   entries. This is a LRU cache. Because generating a SSL certificate
   dynamically is expensive, they are cached. The default cache size is set to
   1000 entries.
 
-tune.ssl.capture-cipherlist-size <number>
-  Sets the maximum size of the buffer used for capturing client-hello cipher
-  list. If the value is 0 (default value) the capture is disabled, otherwise
-  a buffer is allocated for each SSL/TLS connection.
-
 tune.vars.global-max-size <size>
 tune.vars.proc-max-size <size>
 tune.vars.reqres-max-size <size>
@@ -2970,7 +2998,8 @@
   Creates a new peer list with name <peersect>. It is an independent section,
   which is referenced by one or more stick-tables.
 
-bind [<address>]:<port_range> [, ...] [param*]
+bind [<address>]:port [param*]
+bind /<path> [param*]
   Defines the binding parameters of the local peer of this "peers" section.
   Such lines are not supported with "peer" line in the same "peers" section.
 
@@ -2988,9 +3017,10 @@
   Arguments:
     <param*>  is a list of parameters for this server. The "default-server"
               keyword accepts an important number of options and has a complete
-              section dedicated to it. Please refer to section 5 for more
-              details.
-
+              section dedicated to it. In a peers section, the transport
+              parameters of a "default-server" line are supported. Please refer
+              to section 5 for more details, and the "server" keyword below in
+              this section for some of the restrictions.
 
   See also: "server" and section 5 about server options
 
@@ -3004,16 +3034,17 @@
   log information about the "peers" listener. See "log" option for proxies for
   more details.
 
-peer <peername> <ip>:<port> [param*]
+peer <peername> [<address>]:port [param*]
+peer <peername> /<path> [param*]
   Defines a peer inside a peers section.
   If <peername> is set to the local peer name (by default hostname, or forced
   using "-L" command line option or "localpeer" global configuration setting),
-  HAProxy will listen for incoming remote peer connection on <ip>:<port>.
-  Otherwise, <ip>:<port> defines where to connect to in order to join the
-  remote peer, and <peername> is used at the protocol level to identify and
+  HAProxy will listen for incoming remote peer connection on the provided
+  address.  Otherwise, the address defines where to connect to in order to join
+  the remote peer, and <peername> is used at the protocol level to identify and
   validate the remote peer on the server side.
 
-  During a soft restart, local peer <ip>:<port> is used by the old instance to
+  During a soft restart, local peer address is used by the old instance to
   connect the new one and initiate a complete replication (teaching process).
 
   It is strongly recommended to have the exact same peers declaration on all
@@ -3027,14 +3058,19 @@
   Note: "peer" keyword may transparently be replaced by "server" keyword (see
   "server" keyword explanation below).
 
-server <peername> [<ip>:<port>] [param*]
+server <peername> [<address>:<port>] [param*]
+server <peername> [/<path>] [param*]
   As previously mentioned, "peer" keyword may be replaced by "server" keyword
-  with a support for all "server" parameters found in 5.2 paragraph.
-  If the underlying peer is local, <ip>:<port> parameters must not be present.
-  These parameters must  be provided on a "bind" line (see "bind" keyword
-  of this "peers" section).
-  Some of these parameters are irrelevant for "peers" sections.
+  with a support for all "server" parameters found in 5.2 paragraph that are
+  related to transport settings. If the underlying peer is local, the address
+  parameter must not be present; it must be provided on a "bind" line (see
+  "bind" keyword of this "peers" section).
 
+  A number of "server" parameters are irrelevant for "peers" sections. Peers by
+  nature do not support dynamic host name resolution nor health checks, hence
+  parameters like "init_addr", "resolvers", "check", "agent-check", or "track"
+  are not supported. Similarly, there is no load balancing nor stickiness, thus
+  parameters such as "weight" or "cookie" have no effect.
 
   Example:
     # The old way.
@@ -3054,10 +3090,11 @@
 
    Example:
      peers mypeers
-         bind 127.0.0.11:10001 ssl crt mycerts/pem
-         default-server ssl verify none
-         server hostA  127.0.0.10:10000
-         server hostB  #local peer
+        bind 192.168.0.1:1024 ssl crt mycerts/pem
+        default-server ssl verify none
+        server haproxy1 #local peer
+        server haproxy2 192.168.0.2:1024
+        server haproxy3 10.2.0.1:1024
 
 
 table <tablename> type {ip | integer | string [len <length>] | binary [len <length>]}
@@ -3613,7 +3650,7 @@
 http-request                              -          X         X         X
 http-response                             -          X         X         X
 http-reuse                                X          -         X         X
-http-send-name-header                     -          -         X         X
+http-send-name-header                     X          -         X         X
 id                                        -          X         X         X
 ignore-persist                            -          -         X         X
 load-server-state-from-file               X          -         X         X
@@ -3645,6 +3682,7 @@
 option http-keep-alive               (*)  X          X         X         X
 option http-no-delay                 (*)  X          X         X         X
 option http-pretend-keepalive        (*)  X          -         X         X
+option http-restrict-req-hdr-names        X          X         X         X
 option http-server-close             (*)  X          X         X         X
 option http-use-proxy-header         (*)  X          X         X         -
 option httpchk                            X          -         X         X
@@ -3670,7 +3708,7 @@
 option splice-auto                   (*)  X          X         X         X
 option splice-request                (*)  X          X         X         X
 option splice-response               (*)  X          X         X         X
-option spop-check                         -          -         -         X
+option spop-check                         X          -         X         X
 option srvtcpka                      (*)  X          -         X         X
 option ssl-hello-chk                      X          -         X         X
 -- keyword -------------------------- defaults - frontend - listen -- backend -
@@ -6155,7 +6193,8 @@
   based on information found in the request (IE a Host header).
   If this action is used to find the server's IP address (using the
   "set-dst" action), then the server IP address in the backend must be set
-  to 0.0.0.0.
+  to 0.0.0.0. The do-resolve action takes an host-only parameter, any port must
+  be removed from the string.
 
   Example:
     resolvers mydns
@@ -6170,7 +6209,7 @@
 
     frontend fe
       bind 10.42.0.1:80
-      http-request do-resolve(txn.myip,mydns,ipv4) hdr(Host),lower
+      http-request do-resolve(txn.myip,mydns,ipv4) hdr(Host),lower,regsub(:[0-9]*$,)
       http-request capture var(txn.myip) len 40
 
       # return 503 when the variable is not set,
@@ -6807,9 +6846,11 @@
 
   This rewrites the request URI with the result of the evaluation of format
   string <fmt>. The scheme, authority, path and query string are all replaced
-  at once. This can be used to rewrite hosts in front of proxies, or to
-  perform complex modifications to the URI such as moving parts between the
-  path and the query string.
+  at once. This can be used to rewrite hosts in front of proxies, or to perform
+  complex modifications to the URI such as moving parts between the path and
+  the query string. If an absolute URI is set, it will be sent as is to
+  HTTP/1.1 servers. If it is not the desired behavior, the host, the path
+  and/or the query string should be set separately.
   See also "http-request set-path" and "http-request set-query".
 
 http-request set-var(<var-name>) <expr> [ { if | unless } <condition> ]
@@ -7534,7 +7575,26 @@
   because almost no new connection will be established while idle connections
   remain available. This is particularly true with the "always" strategy.
 
+  The rules to decide to keep an idle connection opened or to close it after
+  processing are also governed by the "tune.pool-low-fd-ratio" (default: 20%)
+  and "tune.pool-high-fd-ratio" (default: 25%). These correspond to the
+  percentage of total file descriptors spent in idle connections above which
+  haproxy will respectively refrain from keeping a connection opened after a
+  response, and actively kill idle connections. Some setups using a very high
+  ratio of idle connections, either because of too low a global "maxconn", or
+  due to a lot of HTTP/2 or HTTP/3 traffic on the frontend (few connections)
+  but HTTP/1 connections on the backend, may observe a lower reuse rate because
+  too few connections are kept open. It may be desirable in this case to adjust
+  such thresholds or simply to increase the global "maxconn" value.
+
+  Similarly, when thread groups are explicitly enabled, it is important to
+  understand that idle connections are only usable between threads from a same
+  group. As such it may happen that unfair load between groups leads to more
+  idle connections being needed, causing a lower reuse rate. The same solution
+  may then be applied (increase global "maxconn" or increase pool ratios).
+
-  See also : "option http-keep-alive", "server maxconn"
+  See also : "option http-keep-alive", "server maxconn", "thread-groups",
+             "tune.pool-high-fd-ratio", "tune.pool-low-fd-ratio"
 
 
 http-send-name-header [<header>]
@@ -8066,7 +8126,9 @@
   Monitor requests are processed very early, just after the request is parsed
   and even before any "http-request". The only rulesets applied before are the
   tcp-request ones. They cannot be logged either, and it is the intended
-  purpose. They are only used to report HAProxy's health to an upper component,
+  purpose. Only one URI may be configured for monitoring; when multiple
+  "monitor-uri" statements are present, the last one will define the URI to
+  be used. They are only used to report HAProxy's health to an upper component,
   nothing more. However, it is possible to add any number of conditions using
   "monitor fail" and ACLs so that the result can be adjusted to whatever check
   can be imagined (most often the number of available servers in a backend).
@@ -8153,7 +8215,8 @@
   remaining ones are blocked by default unless this option is enabled. This
   option also relaxes the test on the HTTP version, it allows HTTP/0.9 requests
   to pass through (no version specified) and multiple digits for both the major
-  and the minor version.
+  and the minor version. Finally, this option also allows incoming URLs to
+  contain fragment references ('#' after the path).
 
   This option should never be enabled by default as it hides application bugs
   and open security breaches. It should only be deployed after a problem has
@@ -8596,18 +8659,18 @@
 
 option http-keep-alive
 no option http-keep-alive
-  Enable or disable HTTP keep-alive from client to server
+  Enable or disable HTTP keep-alive from client to server for HTTP/1.x
+  connections
   May be used in sections :   defaults | frontend | listen | backend
                                  yes   |    yes   |   yes  |   yes
   Arguments : none
 
   By default HAProxy operates in keep-alive mode with regards to persistent
-  connections: for each connection it processes each request and response, and
-  leaves the connection idle on both sides between the end of a response and
-  the start of a new request. This mode may be changed by several options such
-  as "option http-server-close" or "option httpclose". This option allows to
-  set back the keep-alive mode, which can be useful when another mode was used
-  in a defaults section.
+  HTTP/1.x connections: for each connection it processes each request and
+  response, and leaves the connection idle on both sides. This mode may be
+  changed by several options such as "option http-server-close" or "option
+  httpclose". This option allows to set back the keep-alive mode, which can be
+  useful when another mode was used in a defaults section.
 
   Setting "option http-keep-alive" enables HTTP keep-alive mode on the client-
   and server- sides. This provides the lowest latency on the client side (slow
@@ -8624,15 +8687,6 @@
       compared to the cost of retrieving the associated object from the server.
 
   This last case can happen when the server is a fast static server of cache.
-  In this case, the server will need to be properly tuned to support high enough
-  connection counts because connections will last until the client sends another
-  request.
-
-  If the client request has to go to another backend or another server due to
-  content switching or the load balancing algorithm, the idle connection will
-  immediately be closed and a new one re-opened. Option "prefer-last-server" is
-  available to try optimize server selection so that if the server currently
-  attached to an idle connection is usable, it will be used.
 
   At the moment, logs will not indicate whether requests came from the same
   session or not. The accept date reported in the logs corresponds to the end
@@ -8642,12 +8696,10 @@
   not set.
 
   This option disables and replaces any previous "option httpclose" or "option
-  http-server-close". When backend and frontend options differ, all of these 4
-  options have precedence over "option http-keep-alive".
+  http-server-close".
 
   See also : "option httpclose",, "option http-server-close",
-             "option prefer-last-server", "option http-pretend-keepalive",
-             and "1.1. The HTTP transaction model".
+             "option prefer-last-server" and "option http-pretend-keepalive".
 
 
 option http-no-delay
@@ -8686,19 +8738,19 @@
 
 option http-pretend-keepalive
 no option http-pretend-keepalive
-  Define whether HAProxy will announce keepalive to the server or not
+  Define whether HAProxy will announce keepalive for HTTP/1.x connection to the
+  server or not
   May be used in sections :   defaults | frontend | listen | backend
                                  yes   |    no   |   yes  |   yes
   Arguments : none
 
   When running with "option http-server-close" or "option httpclose", HAProxy
-  adds a "Connection: close" header to the request forwarded to the server.
-  Unfortunately, when some servers see this header, they automatically refrain
-  from using the chunked encoding for responses of unknown length, while this
-  is totally unrelated. The immediate effect is that this prevents HAProxy from
-  maintaining the client connection alive. A second effect is that a client or
-  a cache could receive an incomplete response without being aware of it, and
-  consider the response complete.
+  adds a "Connection: close" header to the HTTP/1.x request forwarded to the
+  server. Unfortunately, when some servers see this header, they automatically
+  refrain from using the chunked encoding for responses of unknown length,
+  while this is totally unrelated. The effect is that a client or a cache could
+  receive an incomplete response without being aware of it, and consider the
+  response complete.
 
   By setting "option http-pretend-keepalive", HAProxy will make the server
   believe it will keep the connection alive. The server will then not fall back
@@ -8718,9 +8770,7 @@
   This option may be set in backend and listen sections. Using it in a frontend
   section will be ignored and a warning will be reported during startup. It is
   a backend related option, so there is no real reason to set it on a
-  frontend. This option may be combined with "option httpclose", which will
-  cause keepalive to be announced to the server and close to be announced to
-  the client. This practice is discouraged though.
+  frontend.
 
   If this option has been enabled in a "defaults" section, it can be disabled
   in a specific instance by prepending the "no" keyword before it.
@@ -8728,29 +8778,55 @@
   See also : "option httpclose", "option http-server-close", and
              "option http-keep-alive"
 
+option http-restrict-req-hdr-names { preserve | delete | reject }
+  Set HAProxy policy about HTTP request header names containing characters
+  outside the "[a-zA-Z0-9-]" charset
+  May be used in sections :   defaults | frontend | listen | backend
+                                 yes   |    yes   |   yes  |   yes
+  Arguments :
+      preserve  disable the filtering. It is the default mode for HTTP proxies
+                with no FastCGI application configured.
+
+      delete    remove request headers with a name containing a character
+                outside the "[a-zA-Z0-9-]" charset. It is the default mode for
+                HTTP backends with a configured FastCGI application.
+
+      reject    reject the request with a 403-Forbidden response if it contains a
+                header name with a character outside the "[a-zA-Z0-9-]" charset.
+
+  This option may be used to restrict the request header names to alphanumeric
+  and hyphen characters ([A-Za-z0-9-]). This may be mandatory to interoperate
+  with non-HTTP compliant servers that fail to handle some characters in header
+  names. It may also be mandatory for FastCGI applications because all
+  non-alphanumeric characters in header names are replaced by an underscore
+  ('_'). Thus, it is easily possible to mix up header names and bypass some
+  rules. For instance, "X-Forwarded-For" and "X_Forwarded-For" headers are both
+  converted to "HTTP_X_FORWARDED_FOR" in FastCGI.
+
+  Note this option is evaluated per proxy and after the http-request rules
+  evaluation.
 
 option http-server-close
 no option http-server-close
-  Enable or disable HTTP connection closing on the server side
+  Enable or disable HTTP/1.x connection closing on the server side
   May be used in sections :   defaults | frontend | listen | backend
                                  yes   |    yes   |   yes  |   yes
   Arguments : none
 
   By default HAProxy operates in keep-alive mode with regards to persistent
-  connections: for each connection it processes each request and response, and
-  leaves the connection idle on both sides between the end of a response and
-  the start of a new request. This mode may be changed by several options such
-  as "option http-server-close" or "option httpclose". Setting "option
-  http-server-close" enables HTTP connection-close mode on the server side
-  while keeping the ability to support HTTP keep-alive and pipelining on the
-  client side. This provides the lowest latency on the client side (slow
-  network) and the fastest session reuse on the server side to save server
-  resources, similarly to "option httpclose".  It also permits non-keepalive
-  capable servers to be served in keep-alive mode to the clients if they
-  conform to the requirements of RFC7230. Please note that some servers do not
-  always conform to those requirements when they see "Connection: close" in the
-  request. The effect will be that keep-alive will never be used. A workaround
-  consists in enabling "option http-pretend-keepalive".
+  HTTP/1.x connections: for each connection it processes each request and
+  response, and leaves the connection idle on both sides. This mode may be
+  changed by several options such as "option http-server-close" or "option
+  httpclose". Setting "option http-server-close" enables HTTP connection-close
+  mode on the server side while keeping the ability to support HTTP keep-alive
+  and pipelining on the client side. This provides the lowest latency on the
+  client side (slow network) and the fastest session reuse on the server side
+  to save server resources, similarly to "option httpclose".  It also permits
+  non-keepalive capable servers to be served in keep-alive mode to the clients
+  if they conform to the requirements of RFC7230. Please note that some servers
+  do not always conform to those requirements when they see "Connection: close"
+  in the request. The effect will be that keep-alive will never be used. A
+  workaround consists in enabling "option http-pretend-keepalive".
 
   At the moment, logs will not indicate whether requests came from the same
   session or not. The accept date reported in the logs corresponds to the end
@@ -8768,8 +8844,8 @@
   If this option has been enabled in a "defaults" section, it can be disabled
   in a specific instance by prepending the "no" keyword before it.
 
-  See also : "option httpclose", "option http-pretend-keepalive",
-             "option http-keep-alive", and "1.1. The HTTP transaction model".
+  See also : "option httpclose", "option http-pretend-keepalive" and
+             "option http-keep-alive".
 
 option http-use-proxy-header
 no option http-use-proxy-header
@@ -8866,37 +8942,37 @@
 
 option httpclose
 no option httpclose
-  Enable or disable HTTP connection closing
+  Enable or disable HTTP/1.x connection closing
   May be used in sections :   defaults | frontend | listen | backend
                                  yes   |    yes   |   yes  |   yes
   Arguments : none
 
   By default HAProxy operates in keep-alive mode with regards to persistent
-  connections: for each connection it processes each request and response, and
-  leaves the connection idle on both sides between the end of a response and
-  the start of a new request. This mode may be changed by several options such
-  as "option http-server-close" or "option httpclose".
+  HTTP/1.x connections: for each connection it processes each request and
+  response, and leaves the connection idle on both sides. This mode may be
+  changed by several options such as "option http-server-close" or "option
+  httpclose".
 
-  If "option httpclose" is set, HAProxy will close connections with the server
-  and the client as soon as the request and the response are received. It will
-  also check if a "Connection: close" header is already set in each direction,
-  and will add one if missing. Any "Connection" header different from "close"
-  will also be removed.
+  If "option httpclose" is set, HAProxy will close the client or the server
+  connection, depending where the option is set. Only the frontend is
+  considered for client connections while the frontend and the backend are
+  considered for server ones. In this case the option is enabled if at least
+  one of the frontend or backend holding the connection has it enabled. If the
+  option is set on a listener, it is applied both on client and server
+  connections. It will check if a "Connection: close" header is already set in
+  each direction, and will add one if missing.
 
   This option may also be combined with "option http-pretend-keepalive", which
-  will disable sending of the "Connection: close" header, but will still cause
-  the connection to be closed once the whole response is received.
+  will disable sending of the "Connection: close" request header, but will
+  still cause the connection to be closed once the whole response is received.
 
-  This option may be set both in a frontend and in a backend. It is enabled if
-  at least one of the frontend or backend holding a connection has it enabled.
   It disables and replaces any previous "option http-server-close" or "option
-  http-keep-alive". Please check section 4 ("Proxies") to see how this option
-  combines with others when frontend and backend options differ.
+  http-keep-alive".
 
   If this option has been enabled in a "defaults" section, it can be disabled
   in a specific instance by prepending the "no" keyword before it.
 
-  See also : "option http-server-close" and "1.1. The HTTP transaction model".
+  See also : "option http-server-close".
 
 
 option httplog [ clf ]
@@ -9316,7 +9392,7 @@
   See also : "option redispatch", "retries", "force-persist"
 
 
-option pgsql-check [ user <username> ]
+option pgsql-check user <username>
   Use PostgreSQL health checks for server testing
   May be used in sections :   defaults | frontend | listen | backend
                                  yes   |    no    |   yes  |   yes
@@ -9576,7 +9652,7 @@
 option spop-check
   Use SPOP health checks for server testing
   May be used in sections :   defaults | frontend | listen | backend
-                                 no    |    no    |   no   |   yes
+                                 yes   |    no    |   yes  |   yes
   Arguments : none
 
   It is possible to test that the server correctly talks SPOP protocol instead
@@ -10141,24 +10217,26 @@
 
 
 retries <value>
-  Set the number of retries to perform on a server after a connection failure
+  Set the number of retries to perform on a server after a failure
   May be used in sections:    defaults | frontend | listen | backend
                                  yes   |    no    |   yes  |   yes
   Arguments :
-    <value>   is the number of times a connection attempt should be retried on
-              a server when a connection either is refused or times out. The
-              default value is 3.
+    <value>   is the number of times a request or connection attempt should be
+              retried on a server after a failure.
 
-  It is important to understand that this value applies to the number of
-  connection attempts, not full requests. When a connection has effectively
-  been established to a server, there will be no more retry.
+  By default, retries apply only to new connection attempts. However, when
+  the "retry-on" directive is used, other conditions might trigger a retry
+  (e.g. empty response, undesired status code), and each of them will count
+  one attempt, and when the total number attempts reaches the value here, an
+  error will be returned.
 
   In order to avoid immediate reconnections to a server which is restarting,
   a turn-around timer of min("timeout connect", one second) is applied before
-  a retry occurs.
+  a retry occurs on the same server.
 
-  When "option redispatch" is set, the last retry may be performed on another
-  server even if a cookie references a different server.
+  When "option redispatch" is set, some retries may be performed on another
+  server even if a cookie references a different server. By default this will
+  only be the last retry unless an argument is passed to "option redispatch".
 
   See also : "option redispatch"
 
@@ -11316,13 +11394,16 @@
                       belonging to the same unique process.
 
     <expire>   defines the maximum duration of an entry in the table since it
-               was last created, refreshed or matched. The expiration delay is
+               was last created, refreshed using 'track-sc' or matched using
+               'stick match' or 'stick on' rule. The expiration delay is
                defined using the standard time format, similarly as the various
                timeouts. The maximum duration is slightly above 24 days. See
                section 2.5 for more information. If this delay is not specified,
                the session won't automatically expire, but older entries will
                be removed once full. Be sure not to use the "nopurge" parameter
                if not expiration delay is specified.
+               Note: 'table_*' converters performs lookups but won't update touch
+               expire since they don't require 'track-sc'.
 
     <srvkey>   specifies how each server is identified for the purposes of the
                stick table. The valid values are "name" and "addr". If "name" is
@@ -12386,7 +12467,7 @@
   evaluated.
 
   Example:
-    tcp-request content use-service lua.deny { src -f /etc/haproxy/blacklist.lst }
+    tcp-request content use-service lua.deny if { src -f /etc/haproxy/blacklist.lst }
 
   Example:
 
@@ -12490,6 +12571,9 @@
   Obviously this is unlikely to be very useful and might even be racy, so such
   setups are not recommended.
 
+  Note the inspection delay is shortened if an connection error or shutdown is
+  experienced or if the request buffer appears as full.
+
   As soon as a rule matches, the request is released and continues as usual. If
   the timeout is reached and no rule matches, the default policy will be to let
   it pass through unaffected.
@@ -13483,7 +13567,8 @@
   who provide a valid TLS Server Name Indication field matching one of their
   CN or alt subjects. Wildcards are supported, where a wildcard character '*'
   is used instead of the first hostname component (e.g. *.example.org matches
-  www.example.org but not www.sub.example.org).
+  www.example.org but not www.sub.example.org). If an empty directory is used,
+  HAProxy will not start unless the "strict-sni" keyword is used.
 
   If no SNI is provided by the client or if the SSL library does not support
   TLS extensions, or if the client provides an SNI hostname which does not
@@ -13904,8 +13989,11 @@
 strict-sni
   This setting is only available when support for OpenSSL was built in. The
   SSL/TLS negotiation is allow only if the client provided an SNI which match
-  a certificate. The default certificate is not used.
-  See the "crt" option for more information.
+  a certificate. The default certificate is not used. This option also allows
+  to start without any certificate on a bind line, so an empty directory could
+  be used and filled later from the stats socket.
+  See the "crt" option for more information. See "add ssl crt-list" command in
+  the management guide.
 
 tcp-ut <delay>
   Sets the TCP User Timeout for all incoming connections instantiated from this
@@ -14949,8 +15037,10 @@
   The "sni" parameter evaluates the sample fetch expression, converts it to a
   string and uses the result as the host name sent in the SNI TLS extension to
   the server. A typical use case is to send the SNI received from the client in
-  a bridged HTTPS scenario, using the "ssl_fc_sni" sample fetch for the
-  expression, though alternatives such as req.hdr(host) can also make sense. If
+  a bridged TCP/SSL scenario, using the "ssl_fc_sni" sample fetch for the
+  expression. THIS MUST NOT BE USED FOR HTTPS, where req.hdr(host) should be
+  used instead, since SNI in HTTPS must always match the Host field and clients
+  are allowed to use different host names over the same connection). If
   "verify required" is set (which is the recommended setting), the resulting
   name will also be matched against the server certificate's names. See the
   "verify" directive for more details. If you want to set a SNI for health
@@ -15245,15 +15335,53 @@
   placed in the resolvers section in place of this directive.
 
 hold <status> <period>
-  Defines <period> during which the last name resolution should be kept based
-  on last resolution <status>
-    <status> : last name resolution status. Acceptable values are "nx",
-               "other", "refused", "timeout", "valid", "obsolete".
-    <period> : interval between two successive name resolution when the last
-               answer was in <status>. It follows the HAProxy time format.
-               <period> is in milliseconds by default.
+  Upon receiving the DNS response <status>, determines whether a server's state
+  should change from UP to DOWN. To make that determination, it checks whether
+  any valid status has been received during the past <period> in order to
+  counteract the just received invalid status.
+
+    <status> : last name resolution status.
+           nx        After receiving an NXDOMAIN status, check for any valid
+                     status during the concluding period.
+
+           refused   After receiving a REFUSED status, check for any valid
+                     status during the concluding period.
+
+           timeout   After the "timeout retry" has struck, check for any
+                     valid status during the concluding period.
+
+           other     After receiving any other invalid status, check for any
+                     valid status during the concluding period.
+
+           valid     Applies only to "http-request do-resolve" and
+                     "tcp-request content do-resolve" actions. It defines the
+                     period for which the server will maintain a valid response
+                     before triggering another resolution. It does not affect
+                     dynamic resolution of servers.
+
+           obsolete  Defines how long to wait before removing obsolete DNS
+                     records after an updated answer record is received. It
+                     applies to SRV records.
+
+    <period> : Amount of time into the past during which a valid response must
+               have been received. It follows the HAProxy time format and is in
+               milliseconds by default.
+
+  For a server that relies on dynamic DNS resolution to determine its IP
+  address, receiving an invalid DNS response, such as NXDOMAIN, will lead to
+  changing the server's state from UP to DOWN. The hold directives define how
+  far into the past to look for a valid response. If a valid response has been
+  received within <period>, the just received invalid status will be ignored.
+
+  Unless a valid response has been receiving during the concluding period, the
+  server will be marked as DOWN. For example, if "hold nx 30s" is set and the
+  last received DNS response was NXDOMAIN, the server will be marked DOWN
+  unless a valid response has been received during the last 30 seconds.
 
-  Default value is 10s for "valid", 0s for "obsolete" and 30s for others.
+  A server in the DOWN state will be marked UP immediately upon receiving a
+  valid status from the DNS server.
+
+  A separate behavior exists for "hold valid" and "hold obsolete".
 
 resolve_retries <nb>
   Defines the number <nb> of queries to send to resolve a server name before
@@ -15642,7 +15770,11 @@
 All ACL-specific criteria imply a default matching method. Most often, these
 criteria are composed by concatenating the name of the original sample fetch
 method and the matching method. For example, "hdr_beg" applies the "beg" match
-to samples retrieved using the "hdr" fetch method. Since all ACL-specific
+to samples retrieved using the "hdr" fetch method. This matching method is only
+usable when the keyword is used alone, without any converter. In case any such
+converter were to be applied after such an ACL keyword, the default matching
+method from the ACL keyword is simply ignored since what will matter for the
+matching is the output type of the last converter. Since all ACL-specific
 criteria rely on a sample fetch method, it is always possible instead to use
 the original sample fetch method and the explicit matching method using "-m".
 
@@ -15768,13 +15900,24 @@
   - suffix match    (-m end) : the patterns are compared with the end of the
     extracted string, and the ACL matches if any of them matches.
 
-  - subdir match    (-m dir) : the patterns are looked up inside the extracted
-    string, delimited with slashes ("/"), and the ACL matches if any of them
-    matches.
+  - subdir match    (-m dir) : the patterns are looked up anywhere inside the
+    extracted string, delimited with slashes ("/"), the beginning or the end
+    of the string. The ACL matches if any of them matches. As such, the string
+    "/images/png/logo/32x32.png", would match "/images", "/images/png",
+    "images/png", "/png/logo", "logo/32x32.png" or "32x32.png" but not "png"
+    nor "32x32".
 
-  - domain match    (-m dom) : the patterns are looked up inside the extracted
-    string, delimited with dots ("."), and the ACL matches if any of them
-    matches.
+  - domain match    (-m dom) : the patterns are looked up anywhere inside the
+    extracted string, delimited with dots ("."), colons (":"), slashes ("/"),
+    question marks ("?"), the beginning or the end of the string. This is made
+    to be used with URLs. Leading and trailing delimiters in the pattern are
+    ignored. The ACL matches if any of them matches. As such, in the example
+    string "http://www1.dc-eu.example.com:80/blah", the patterns "http",
+    "www1", ".www1", "dc-eu", "example", "com", "80", "dc-eu.example",
+    "blah", ":www1:", "dc-eu.example:80" would match, but not "eu" nor "dc".
+    Using it to match domain suffixes for filtering or routing is generally
+    not a good idea, as the routing could easily be fooled by prepending the
+    matching prefix in front of another domain for example.
 
 String matching applies to verbatim strings as they are passed, with the
 exception of the backslash ("\") which makes it possible to escape some
@@ -18365,7 +18508,7 @@
 ssl_bc : boolean
   Returns true when the back connection was made via an SSL/TLS transport
   layer and is locally deciphered. This means the outgoing connection was made
-  other a server with the "ssl" option. It can be used in a tcp-check or an
+  to a server with the "ssl" option. It can be used in a tcp-check or an
   http-check ruleset.
 
 ssl_bc_alg_keysize : integer
@@ -18823,6 +18966,16 @@
   requires that the SSL library is built with support for TLS extensions
   enabled (check haproxy -vv).
 
+  CAUTION! Except under very specific conditions, it is normally not correct to
+  use this field as a substitute for the HTTP "Host" header field. For example,
+  when forwarding an HTTPS connection to a server, the SNI field must be set
+  from the HTTP Host header field using "req.hdr(host)" and not from the front
+  SNI value. The reason is that SNI is solely used to select the certificate
+  the server side will present, and that clients are then allowed to send
+  requests with different Host values as long as they match the names in the
+  certificate. As such, "ssl_fc_sni" should normally not be used as an argument
+  to the "sni" server keyword, unless the backend works in TCP mode.
+
   ACL derivatives :
     ssl_fc_sni_end : suffix match
     ssl_fc_sni_reg : regex match
@@ -19528,7 +19681,11 @@
   information from databases and keep them in caches. Note that with outgoing
   caches, it would be wiser to use "url" instead. With ACLs, it's typically
   used to match exact file names (e.g. "/login.php"), or directory parts using
-  the derivative forms. See also the "url" and "base" fetch methods.
+  the derivative forms. See also the "url" and "base" fetch methods. Please
+  note that any fragment reference in the URI ('#' after the path) is strictly
+  forbidden by the HTTP standard and will be rejected. However, if the frontend
+  receiving the request has "option accept-invalid-http-request", then this
+  fragment part will be accepted and will also appear in the path.
 
   ACL derivatives :
     path     : exact string match
@@ -19546,7 +19703,11 @@
   relative URI, excluding the scheme and the authority part, if any. Indeed,
   while it is the common representation for an HTTP/1.1 request target, in
   HTTP/2, an absolute URI is often used. This sample fetch will return the same
-  result in both cases.
+  result in both cases. Please note that any fragment reference in the URI ('#'
+  after the path) is strictly forbidden by the HTTP standard and will be
+  rejected. However, if the frontend receiving the request has "option
+  accept-invalid-http-request", then this fragment part will be accepted and
+  will also appear in the path.
 
 query : string
   This extracts the request's query string, which starts after the first
@@ -19779,7 +19940,11 @@
   "path" is preferred over using "url", because clients may send a full URL as
   is normally done with proxies. The only real use is to match "*" which does
   not match in "path", and for which there is already a predefined ACL. See
-  also "path" and "base".
+  also "path" and "base". Please note that any fragment reference in the URI
+  ('#' after the path) is strictly forbidden by the HTTP standard and will be
+  rejected. However, if the frontend receiving the request has "option
+  accept-invalid-http-request", then this fragment part will be accepted and
+  will also appear in the url.
 
   ACL derivatives :
     url     : exact string match
@@ -20876,6 +21041,13 @@
     header (empty line) was never seen, most likely because the server timeout
     stroke before the server managed to process the request.
 
+  - Td: this is the total transfer time of the response payload till the last
+    byte sent to the client. In HTTP it starts after the last response header
+    (after Tr).
+
+    The data sent are not guaranteed to be received by the client, they can be
+    stuck in either the kernel or the network.
+
   - Ta: total active time for the HTTP request, between the moment the proxy
     received the first byte of the request header and the emission of the last
     byte of the response body. The exception is when the "logasap" option is
@@ -22056,8 +22228,8 @@
 socket type and the transport method.
 
 
-11.1 Address family prefixes
-----------------------------
+11.1. Address family prefixes
+-----------------------------
 
 'abns@<name>' following <name> is an abstract namespace (Linux only).
 
@@ -22092,8 +22264,8 @@
                start by slash '/'.
 
 
-11.2 Socket type prefixes
--------------------------
+11.2. Socket type prefixes
+--------------------------
 
 Previous "Address family prefixes" can also be prefixed to force the socket
 type and the transport method. The default depends of the statement using
@@ -22102,7 +22274,7 @@
 but we could force to use syslog over TCP.
 
 Those prefixes were designed for internal purpose and users should
-instead use aliases of the next section "11.5.3 Protocol prefixes".
+instead use aliases of the next section "11.3 Protocol prefixes".
 
 If users need one those prefixes to perform what they expect because
 they can not configure the same using the protocol prefixes, they should
@@ -22115,8 +22287,8 @@
                             to "datagram".
 
 
-11.3 Protocol prefixes
-----------------------
+11.3. Protocol prefixes
+-----------------------
 
 'tcp@<address>[:port1[-port2]]' following <address> is considered as an IPv4
                                 or IPv6 address depending of the syntax but
@@ -22153,14 +22325,14 @@
                                  method is forced to "datagram". Depending on
                                  the statement using this address, a port or
                                  port range can or must be specified.
-                                 It is considered as an alias of 'stream+ipv4@'.
+                                 It is considered as an alias of 'dgram+ipv4@'.
 
 'udp6@<address>[:port1[-port2]]' following <address> is always considered as
                                  an IPv6 address but socket type and transport
                                  method is forced to "datagram". Depending on
                                  the statement using this address, a port or
                                  port range can or must be specified.
-                                 It is considered as an alias of 'stream+ipv4@'.
+                                 It is considered as an alias of 'dgram+ipv4@'.
 
 'uxdg@<path>'    following string is considered as a unix socket <path> but
                  transport method is forced to "datagram". It is considered as
diff --git a/doc/design-thoughts/config-language.txt b/doc/design-thoughts/config-language.txt
index 510ada6..20c4fbd 100644
--- a/doc/design-thoughts/config-language.txt
+++ b/doc/design-thoughts/config-language.txt
@@ -24,9 +24,9 @@
     <operator>  = [ == | =~ | =* | =^ | =/ | != | !~ | !* | !^ | !/ ]
     <pattern>   = "<string>"
     <action>    = [ allow | permit | deny | delete | replace | switch | add | set | redir ]
-    <args>      = optionnal action args
+    <args>      = optional action args
 
-    exemples:
+    examples:
 
         req in URI     =^ "/images" switch images
         req in h(host) =* ".mydomain.com" switch mydomain
diff --git a/doc/internals/http-parsing.txt b/doc/internals/http-parsing.txt
index 494558b..8b3f239 100644
--- a/doc/internals/http-parsing.txt
+++ b/doc/internals/http-parsing.txt
@@ -325,11 +325,11 @@
 
 - each http_txn has 1 request message (http_req), and 0 or 1 response message
   (http_rtr). Each of them has 1 and only one http_txn. An http_txn holds
-  informations such as the HTTP method, the URI, the HTTP version, the
+  information such as the HTTP method, the URI, the HTTP version, the
   transfer-encoding, the HTTP status, the authorization, the req and rtr
   content-length, the timers, logs, etc... The backend and server which process
   the request are also known from the http_txn.
 
-- both request and response messages hold header and parsing informations, such
+- both request and response messages hold header and parsing information, such
   as the parsing state, start of headers, start of message, captures, etc...
 
diff --git a/doc/intro.txt b/doc/intro.txt
index 5289fa7..d6e8648 100644
--- a/doc/intro.txt
+++ b/doc/intro.txt
@@ -40,22 +40,23 @@
 3.3.4.        High availability
 3.3.5.        Load balancing
 3.3.6.        Stickiness
-3.3.7.        Sampling and converting information
-3.3.8.        Maps
-3.3.9.        ACLs and conditions
-3.3.10.       Content switching
-3.3.11.       Stick-tables
-3.3.12.       Formatted strings
-3.3.13.       HTTP rewriting and redirection
-3.3.14.       Server protection
-3.3.15.       Logging
-3.3.16.       Statistics
-3.4.      Advanced features
-3.4.1.        Management
-3.4.2.        System-specific capabilities
-3.4.3.        Scripting
-3.5.      Sizing
-3.6.      How to get HAProxy
+3.3.7.        Logging
+3.3.8.        Statistics
+3.4.      Standard features
+3.4.1.        Sampling and converting information
+3.4.2.        Maps
+3.4.3.        ACLs and conditions
+3.4.4.        Content switching
+3.4.5.        Stick-tables
+3.4.6.        Formatted strings
+3.4.7.        HTTP rewriting and redirection
+3.4.8.        Server protection
+3.5.      Advanced features
+3.5.1.        Management
+3.5.2.        System-specific capabilities
+3.5.3.        Scripting
+3.6.      Sizing
+3.7.      How to get HAProxy
 
 4.    Companion products and alternatives
 4.1.      Apache HTTP server
@@ -776,8 +777,71 @@
     to reach the server they've been assigned to but no new users will go there.
 
 
-3.3.7. Basic features : Sampling and converting information
------------------------------------------------------------
+3.3.7. Basic features : Logging
+-------------------------------
+
+Logging is an extremely important feature for a load balancer, first because a
+load balancer is often wrongly accused of causing the problems it reveals, and
+second because it is placed at a critical point in an infrastructure where all
+normal and abnormal activity needs to be analyzed and correlated with other
+components.
+
+HAProxy provides very detailed logs, with millisecond accuracy and the exact
+connection accept time that can be searched in firewalls logs (e.g. for NAT
+correlation). By default, TCP and HTTP logs are quite detailed and contain
+everything needed for troubleshooting, such as source IP address and port,
+frontend, backend, server, timers (request receipt duration, queue duration,
+connection setup time, response headers time, data transfer time), global
+process state, connection counts, queue status, retries count, detailed
+stickiness actions and disconnect reasons, header captures with a safe output
+encoding. It is then possible to extend or replace this format to include any
+sampled data, variables, captures, resulting in very detailed information. For
+example it is possible to log the number of cumulative requests or number of
+different URLs visited by a client.
+
+The log level may be adjusted per request using standard ACLs, so it is possible
+to automatically silent some logs considered as pollution and instead raise
+warnings when some abnormal behavior happen for a small part of the traffic
+(e.g. too many URLs or HTTP errors for a source address). Administrative logs
+are also emitted with their own levels to inform about the loss or recovery of a
+server for example.
+
+Each frontend and backend may use multiple independent log outputs, which eases
+multi-tenancy. Logs are preferably sent over UDP, maybe JSON-encoded, and are
+truncated after a configurable line length in order to guarantee delivery. But
+it is also possible to send them to stdout/stderr or any file descriptor, as
+well as to a ring buffer that a client can subscribe to in order to retrieve
+them.
+
+
+3.3.8. Basic features : Statistics
+----------------------------------
+
+HAProxy provides a web-based statistics reporting interface with authentication,
+security levels and scopes. It is thus possible to provide each hosted customer
+with his own page showing only his own instances. This page can be located in a
+hidden URL part of the regular web site so that no new port needs to be opened.
+This page may also report the availability of other HAProxy nodes so that it is
+easy to spot if everything works as expected at a glance. The view is synthetic
+with a lot of details accessible (such as error causes, last access and last
+change duration, etc), which are also accessible as a CSV table that other tools
+may import to draw graphs. The page may self-refresh to be used as a monitoring
+page on a large display. In administration mode, the page also allows to change
+server state to ease maintenance operations.
+
+A Prometheus exporter is also provided so that the statistics can be consumed
+in a different format depending on the deployment.
+
+
+3.4. Standard features
+----------------------
+
+In this section, some features that are very commonly used in HAProxy but are
+not necessarily present on other load balancers are enumerated.
+
+
+3.4.1. Standard features : Sampling and converting information
+--------------------------------------------------------------
 
 HAProxy supports information sampling using a wide set of "sample fetch
 functions". The principle is to extract pieces of information known as samples,
@@ -836,8 +900,8 @@
   - map-based key-to-value conversion from a file (mostly used for geolocation).
 
 
-3.3.8. Basic features : Maps
-----------------------------
+3.4.2. Standard features : Maps
+-------------------------------
 
 Maps are a powerful type of converter consisting in loading a two-columns file
 into memory at boot time, then looking up each input sample from the first
@@ -856,8 +920,8 @@
 to set up.
 
 
-3.3.9. Basic features : ACLs and conditions
--------------------------------------------
+3.4.3. Standard features : ACLs and conditions
+----------------------------------------------
 
 Most operations in HAProxy can be made conditional. Conditions are built by
 combining multiple ACLs using logic operators (AND, OR, NOT). Each ACL is a
@@ -897,8 +961,8 @@
 being analyzed.
 
 
-3.3.10. Basic features : Content switching
-------------------------------------------
+3.4.4. Standard features : Content switching
+--------------------------------------------
 
 HAProxy implements a mechanism known as content-based switching. The principle
 is that a connection or request arrives on a frontend, then the information
@@ -928,8 +992,8 @@
 been reported to work fine at least with 300000 backends in production.
 
 
-3.3.11. Basic features : Stick-tables
--------------------------------------
+3.4.5. Standard features : Stick-tables
+---------------------------------------
 
 Stick-tables are commonly used to store stickiness information, that is, to keep
 a reference to the server a certain visitor was directed to. The key is then the
@@ -967,8 +1031,8 @@
 speed.
 
 
-3.3.12. Basic features : Formatted strings
------------------------------------------
+3.4.6. Standard features : Formatted strings
+--------------------------------------------
 
 There are many places where HAProxy needs to manipulate character strings, such
 as logs, redirects, header additions, and so on. In order to provide the
@@ -983,8 +1047,8 @@
 special tags are provided as shortcuts for information commonly used in logs.
 
 
-3.3.13. Basic features : HTTP rewriting and redirection
--------------------------------------------------------
+3.4.7. Standard features : HTTP rewriting and redirection
+---------------------------------------------------------
 
 Installing a load balancer in front of an application that was never designed
 for this can be a challenging task without the proper tools. One of the most
@@ -1030,8 +1094,8 @@
   - all operations support ACL-based conditions;
 
 
-3.3.14. Basic features : Server protection
-------------------------------------------
+3.4.8. Standard features : Server protection
+--------------------------------------------
 
 HAProxy does a lot to maximize service availability, and for this it takes
 large efforts to protect servers against overloading and attacks. The first
@@ -1089,67 +1153,11 @@
 cacheable response and which may result in an intermediary cache to deliver it
 to another visitor, causing an accidental session sharing.
 
-
-3.3.15. Basic features : Logging
---------------------------------
-
-Logging is an extremely important feature for a load balancer, first because a
-load balancer is often wrongly accused of causing the problems it reveals, and
-second because it is placed at a critical point in an infrastructure where all
-normal and abnormal activity needs to be analyzed and correlated with other
-components.
-
-HAProxy provides very detailed logs, with millisecond accuracy and the exact
-connection accept time that can be searched in firewalls logs (e.g. for NAT
-correlation). By default, TCP and HTTP logs are quite detailed and contain
-everything needed for troubleshooting, such as source IP address and port,
-frontend, backend, server, timers (request receipt duration, queue duration,
-connection setup time, response headers time, data transfer time), global
-process state, connection counts, queue status, retries count, detailed
-stickiness actions and disconnect reasons, header captures with a safe output
-encoding. It is then possible to extend or replace this format to include any
-sampled data, variables, captures, resulting in very detailed information. For
-example it is possible to log the number of cumulative requests or number of
-different URLs visited by a client.
-
-The log level may be adjusted per request using standard ACLs, so it is possible
-to automatically silent some logs considered as pollution and instead raise
-warnings when some abnormal behavior happen for a small part of the traffic
-(e.g. too many URLs or HTTP errors for a source address). Administrative logs
-are also emitted with their own levels to inform about the loss or recovery of a
-server for example.
-
-Each frontend and backend may use multiple independent log outputs, which eases
-multi-tenancy. Logs are preferably sent over UDP, maybe JSON-encoded, and are
-truncated after a configurable line length in order to guarantee delivery. But
-it is also possible to send them to stdout/stderr or any file descriptor, as
-well as to a ring buffer that a client can subscribe to in order to retrieve
-them.
 
-
-3.3.16. Basic features : Statistics
------------------------------------
-
-HAProxy provides a web-based statistics reporting interface with authentication,
-security levels and scopes. It is thus possible to provide each hosted customer
-with his own page showing only his own instances. This page can be located in a
-hidden URL part of the regular web site so that no new port needs to be opened.
-This page may also report the availability of other HAProxy nodes so that it is
-easy to spot if everything works as expected at a glance. The view is synthetic
-with a lot of details accessible (such as error causes, last access and last
-change duration, etc), which are also accessible as a CSV table that other tools
-may import to draw graphs. The page may self-refresh to be used as a monitoring
-page on a large display. In administration mode, the page also allows to change
-server state to ease maintenance operations.
-
-A Prometheus exporter is also provided so that the statistics can be consumed
-in a different format depending on the deployment.
-
-
-3.4. Advanced features
+3.5. Advanced features
 ----------------------
 
-3.4.1. Advanced features : Management
+3.5.1. Advanced features : Management
 -------------------------------------
 
 HAProxy is designed to remain extremely stable and safe to manage in a regular
@@ -1230,7 +1238,7 @@
     bug in HAProxy is suspected;
 
 
-3.4.2. Advanced features : System-specific capabilities
+3.5.2. Advanced features : System-specific capabilities
 -------------------------------------------------------
 
 Depending on the operating system HAProxy is deployed on, certain extra features
@@ -1279,7 +1287,7 @@
 interrupted during the process's replacement.
 
 
-3.4.3. Advanced features : Scripting
+3.5.3. Advanced features : Scripting
 ------------------------------------
 
 HAProxy can be built with support for the Lua embedded language, which opens a
@@ -1291,7 +1299,7 @@
 "doc/lua-api/index.rst" for more information on how to use Lua.
 
 
-3.4.4. Advanced features: Tracing
+3.5.4. Advanced features: Tracing
 ---------------------------------
 
 At any moment an administrator may connect over the CLI and enable tracing in
@@ -1303,7 +1311,7 @@
 violations from faulty servers and clients, or denial of service attacks.
 
 
-3.5. Sizing
+3.6. Sizing
 -----------
 
 Typical CPU usage figures show 15% of the processing time spent in HAProxy
@@ -1397,15 +1405,17 @@
   - about 5000 concurrent end-to-end TLS connections (both sides) per GB of
     RAM including the memory required for system buffers;
 
+A more recent benchmark featuring the multi-thread enabled HAProxy 2.4 on a
+64-core ARM Graviton2 processor in AWS reached 2 million HTTPS requests per
+second at sub-millisecond response time, and 100 Gbps of traffic:
+
+  https://www.haproxy.com/blog/haproxy-forwards-over-2-million-http-requests-per-second-on-a-single-aws-arm-instance/
+
 Thus a good rule of thumb to keep in mind is that the request rate is divided
 by 10 between TLS keep-alive and TLS resume, and between TLS resume and TLS
 renegotiation, while it's only divided by 3 between HTTP keep-alive and HTTP
 close. Another good rule of thumb is to remember that a high frequency core
-with AES instructions can do around 5 Gbps of AES-GCM per core.
-
-Having more cores rarely helps (except for TLS) and is even counter-productive
-due to the lower frequency. In general a small number of high frequency cores
-is better.
+with AES instructions can do around 20 Gbps of AES-GCM per core.
 
 Another good rule of thumb is to consider that on the same server, HAProxy will
 be able to saturate :
@@ -1417,7 +1427,7 @@
   - and about 100-1000 application servers depending on the technology in use.
 
 
-3.6. How to get HAProxy
+3.7. How to get HAProxy
 -----------------------
 
 HAProxy is an open source project covered by the GPLv2 license, meaning that
diff --git a/doc/management.txt b/doc/management.txt
index 56e4f66..1177aa2 100644
--- a/doc/management.txt
+++ b/doc/management.txt
@@ -2563,7 +2563,7 @@
     other: any other DNS errors
     invalid: invalid DNS response (from a protocol point of view)
     too_big: too big response
-    outdated: number of response arrived too late (after an other name server)
+    outdated: number of response arrived too late (after another name server)
 
 show servers conn [<backend>]
   Dump the current and idle connections state of the servers belonging to the
@@ -2919,6 +2919,10 @@
     ecdsa.pem:3 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3] localhost !www.test1.com
     ecdsa.pem:4 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3]
 
+show startup-logs
+  Dump all messages emitted during the startup of the current haproxy process,
+  each startup-logs buffer is unique to its haproxy worker.
+
 show table
   Dump general information on all known stick-tables. Their name is returned
   (the name of the proxy which holds them), their type (currently zero, always
diff --git a/doc/proxy-protocol.txt b/doc/proxy-protocol.txt
index 4d49d5c..fac0331 100644
--- a/doc/proxy-protocol.txt
+++ b/doc/proxy-protocol.txt
@@ -500,7 +500,7 @@
     - if the incoming byte count is 16 or above and the 13 first bytes match
       the protocol signature block followed by the protocol version 2 :
 
-           \x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A\x02
+           \x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A\x20
 
     - otherwise, if the incoming byte count is 8 or above, and the 5 first
       characters match the US-ASCII representation of "PROXY" then the protocol
diff --git a/include/haproxy/applet-t.h b/include/haproxy/applet-t.h
index 49c8ab4..014e01e 100644
--- a/include/haproxy/applet-t.h
+++ b/include/haproxy/applet-t.h
@@ -175,7 +175,7 @@
 			int flags;           /* non-zero if "dict" dump requested */
 		} cfgpeers;
 		struct {
-			char *path;
+			char *err;
 			struct ckch_store *old_ckchs;
 			struct ckch_store *new_ckchs;
 			struct ckch_inst *next_ckchi;
diff --git a/include/haproxy/buf.h b/include/haproxy/buf.h
index 6cfd9d3..fc7b6fb 100644
--- a/include/haproxy/buf.h
+++ b/include/haproxy/buf.h
@@ -445,7 +445,7 @@
 
 	/* process output data in two steps to cover wrapping */
 	if (block1 > b_size(b) - b_head_ofs(b)) {
-		block2 = b_size(b) - b_head_ofs(b);
+		block2 = b_peek_ofs(b, block1);
 		block1 -= block2;
 	}
 	memcpy(swap + b_size(b) - output, b_head(b), block1);
diff --git a/include/haproxy/bug.h b/include/haproxy/bug.h
index a2a9f6d..445dd7c 100644
--- a/include/haproxy/bug.h
+++ b/include/haproxy/bug.h
@@ -42,8 +42,10 @@
 #if __GNUC_PREREQ__(5, 0)
 #pragma GCC diagnostic push
 #pragma GCC diagnostic ignored "-Warray-bounds"
+#if __GNUC_PREREQ__(6, 0)
 #pragma GCC diagnostic ignored "-Wnull-dereference"
 #endif
+#endif
 	*(volatile char *)1 = 0;
 #if __GNUC_PREREQ__(5, 0)
 #pragma GCC diagnostic pop
diff --git a/include/haproxy/compiler.h b/include/haproxy/compiler.h
index 39bb996..345e608 100644
--- a/include/haproxy/compiler.h
+++ b/include/haproxy/compiler.h
@@ -58,10 +58,12 @@
 #endif
 #endif
 
+#ifndef __maybe_unused
 /* silence the "unused" warnings without having to place painful #ifdefs.
  * For use with variables or functions.
  */
 #define __maybe_unused __attribute__((unused))
+#endif
 
 /* These macros are used to declare a section name for a variable.
  * WARNING: keep section names short, as MacOS limits them to 16 characters.
@@ -124,7 +126,7 @@
 #if defined(__GNUC__) && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
 #define my_unreachable() __builtin_unreachable()
 #else
-#define my_unreachable()
+#define my_unreachable() do { } while (1)
 #endif
 #endif
 
diff --git a/include/haproxy/connection.h b/include/haproxy/connection.h
index 4b8b098..157387b 100644
--- a/include/haproxy/connection.h
+++ b/include/haproxy/connection.h
@@ -218,6 +218,16 @@
 		c->xprt->shutw(c, c->xprt_ctx, 0);
 }
 
+/* Used to know if a connection is in an idle list. It returns connection flag
+ * corresponding to the idle list if the connection is idle (CO_FL_SAFE_LIST or
+ * CO_FL_IDLE_LIST) or 0 otherwise. Note that if the connection is scheduled to
+ * be removed, 0 is returned, regardless the connection flags.
+ */
+static inline unsigned int conn_get_idle_flag(const struct connection *conn)
+{
+	return (!MT_LIST_INLIST(&conn->toremove_list) ? conn->flags & CO_FL_LIST_MASK : 0);
+}
+
 /* This is used at the end of the socket IOCB to possibly create the mux if it
  * was not done yet, or wake it up if flags changed compared to old_flags or if
  * need_wake insists on this. It returns <0 if the connection was destroyed and
@@ -266,7 +276,7 @@
 	     ((conn->flags ^ old_flags) & CO_FL_NOTIFY_DONE) ||
 	     ((old_flags & CO_FL_WAIT_XPRT) && !(conn->flags & CO_FL_WAIT_XPRT))) &&
 	    conn->mux && conn->mux->wake) {
-		uint conn_in_list = conn->flags & CO_FL_LIST_MASK;
+		uint conn_in_list = conn_get_idle_flag(conn);
 		struct server *srv = objt_server(conn->target);
 
 		if (conn_in_list) {
diff --git a/include/haproxy/cpuset-t.h b/include/haproxy/cpuset-t.h
index 7731464..8c7607c 100644
--- a/include/haproxy/cpuset-t.h
+++ b/include/haproxy/cpuset-t.h
@@ -24,7 +24,7 @@
 
 # define CPUSET_REPR cpuset_t
 
-# if defined(__FreeBSD__) && __FreeBSD_version >= 1400046
+# if defined(__FreeBSD__) && __FreeBSD_version >= 1301000
 #  define CPUSET_USE_CPUSET
 # else
 #  define CPUSET_USE_FREEBSD_CPUSET
diff --git a/include/haproxy/fd-t.h b/include/haproxy/fd-t.h
index 4759ef2..77d6eed 100644
--- a/include/haproxy/fd-t.h
+++ b/include/haproxy/fd-t.h
@@ -69,6 +69,7 @@
 #define FD_ET_POSSIBLE_BIT 19  /* edge-triggered is possible on this FD */
 #define FD_EXPORTED_BIT    20  /* FD is exported and must not be closed */
 #define FD_EXCL_SYSCALL_BIT 21 /* a syscall claims exclusivity on this FD */
+#define FD_DISOWN_BIT      22  /* this fd will be closed by some external code */
 
 
 /* and flag values */
@@ -109,6 +110,7 @@
 #define FD_ET_POSSIBLE      (1U << FD_ET_POSSIBLE_BIT)
 #define FD_EXPORTED         (1U << FD_EXPORTED_BIT)
 #define FD_EXCL_SYSCALL     (1U << FD_EXCL_SYSCALL_BIT)
+#define FD_DISOWN           (1U << FD_DISOWN_BIT)
 
 /* This is the value used to mark a file descriptor as dead. This value is
  * negative, this is important so that tests on fd < 0 properly match. It
diff --git a/include/haproxy/fd.h b/include/haproxy/fd.h
index 66a9aea..88a387c 100644
--- a/include/haproxy/fd.h
+++ b/include/haproxy/fd.h
@@ -445,6 +445,12 @@
 {
 	extern void sock_conn_iocb(int);
 
+	/* This must never happen and would definitely indicate a bug, in
+	 * addition to overwriting some unexpected memory areas.
+	 */
+	BUG_ON(fdtab[fd].owner != NULL);
+	BUG_ON(fdtab[fd].state != 0);
+
 	fdtab[fd].owner = owner;
 	fdtab[fd].iocb = iocb;
 	fdtab[fd].state = 0;
diff --git a/include/haproxy/h2.h b/include/haproxy/h2.h
index 8d2aa95..4f872b9 100644
--- a/include/haproxy/h2.h
+++ b/include/haproxy/h2.h
@@ -207,7 +207,7 @@
 /* various protocol processing functions */
 
 int h2_parse_cont_len_header(unsigned int *msgf, struct ist *value, unsigned long long *body_len);
-int h2_make_htx_request(struct http_hdr *list, struct htx *htx, unsigned int *msgf, unsigned long long *body_len);
+int h2_make_htx_request(struct http_hdr *list, struct htx *htx, unsigned int *msgf, unsigned long long *body_len, int relaxed);
 int h2_make_htx_response(struct http_hdr *list, struct htx *htx, unsigned int *msgf, unsigned long long *body_len, char *upgrade_protocol);
 int h2_make_htx_trailers(struct http_hdr *list, struct htx *htx);
 
diff --git a/include/haproxy/hlua.h b/include/haproxy/hlua.h
index 21e4534..d77f92e 100644
--- a/include/haproxy/hlua.h
+++ b/include/haproxy/hlua.h
@@ -54,6 +54,7 @@
 void hlua_applet_tcp_fct(struct appctx *ctx);
 void hlua_applet_http_fct(struct appctx *ctx);
 struct task *hlua_process_task(struct task *task, void *context, unsigned int state);
+void hlua_yieldk(lua_State *L, int nresults, lua_KContext ctx, lua_KFunction k, int timeout, unsigned int flags);
 
 #else /* USE_LUA */
 
diff --git a/include/haproxy/http.h b/include/haproxy/http.h
index d0f3fd2..e8c5b85 100644
--- a/include/haproxy/http.h
+++ b/include/haproxy/http.h
@@ -36,6 +36,8 @@
 enum http_meth_t find_http_meth(const char *str, const int len);
 int http_get_status_idx(unsigned int status);
 const char *http_get_reason(unsigned int status);
+struct ist http_get_host_port(const struct ist host);
+int http_is_default_port(const struct ist schm, const struct ist port);
 int http_validate_scheme(const struct ist schm);
 struct ist http_get_scheme(const struct ist uri);
 struct ist http_get_authority(const struct ist uri, int no_userinfo);
@@ -132,6 +134,25 @@
 	return ETAG_INVALID;
 }
 
+/* Looks into <ist> for forbidden characters for :path values (0x00..0x1F,
+ * 0x20, 0x23), starting at pointer <start> which must be within <ist>.
+ * Returns non-zero if such a character is found, 0 otherwise. When run on
+ * unlikely header match, it's recommended to first check for the presence
+ * of control chars using ist_find_ctl().
+ */
+static inline int http_path_has_forbidden_char(const struct ist ist, const char *start)
+{
+	do {
+		if ((uint8_t)*start <= 0x23) {
+			if ((uint8_t)*start < 0x20)
+				return 1;
+			if ((1U << ((uint8_t)*start & 0x1F)) & ((1<<3) | (1<<0)))
+				return 1;
+		}
+		start++;
+	} while (start < istend(ist));
+	return 0;
+}
 
 #endif /* _HAPROXY_HTTP_H */
 
diff --git a/include/haproxy/http_ana-t.h b/include/haproxy/http_ana-t.h
index 89d41dd..508721d 100644
--- a/include/haproxy/http_ana-t.h
+++ b/include/haproxy/http_ana-t.h
@@ -59,7 +59,7 @@
 /* cacheability management, bits values 0x1000 to 0x3000 (0-3 shift 12) */
 #define TX_CACHEABLE	0x00001000	/* at least part of the response is cacheable */
 #define TX_CACHE_COOK	0x00002000	/* a cookie in the response is cacheable */
-#define TX_CACHE_IGNORE 0x00004000	/* do not retrieve object from cache, or avoid caching response */
+#define TX_CACHE_IGNORE 0x00004000	/* do not retrieve object from cache */
 #define TX_CACHE_SHIFT	12		/* bit shift */
 
 #define TX_CON_WANT_TUN 0x00008000	/* Will be a tunnel (CONNECT or 101-Switching-Protocol) */
diff --git a/include/haproxy/http_rules.h b/include/haproxy/http_rules.h
index 060e3e8..e048064 100644
--- a/include/haproxy/http_rules.h
+++ b/include/haproxy/http_rules.h
@@ -34,6 +34,7 @@
 struct act_rule *parse_http_req_cond(const char **args, const char *file, int linenum, struct proxy *proxy);
 struct act_rule *parse_http_res_cond(const char **args, const char *file, int linenum, struct proxy *proxy);
 struct act_rule *parse_http_after_res_cond(const char **args, const char *file, int linenum, struct proxy *proxy);
+void  http_free_redirect_rule(struct redirect_rule *rdr);
 struct redirect_rule *http_parse_redirect_rule(const char *file, int linenum, struct proxy *curproxy,
                                                const char **args, char **errmsg, int use_fmt, int dir);
 
diff --git a/include/haproxy/listener-t.h b/include/haproxy/listener-t.h
index c350290..dac71ec 100644
--- a/include/haproxy/listener-t.h
+++ b/include/haproxy/listener-t.h
@@ -156,12 +156,21 @@
 #endif
 };
 
+/*
+ * In OpenSSL 3.0.0, the biggest verify error code's value is 94 and on the
+ * latest 1.1.1 it already reaches 79 so we need to size the ca/crt-ignore-err
+ * arrays accordingly. If the max error code increases, the arrays might need to
+ * be resized.
+ */
+#define SSL_MAX_VFY_ERROR_CODE 94
+#define IGNERR_BF_SIZE ((SSL_MAX_VFY_ERROR_CODE >> 6) + 1)
+
 /* "bind" line settings */
 struct bind_conf {
 #ifdef USE_OPENSSL
 	struct ssl_bind_conf ssl_conf; /* ssl conf for ctx setting */
-	unsigned long long ca_ignerr;  /* ignored verify errors in handshake if depth > 0 */
-	unsigned long long crt_ignerr; /* ignored verify errors in handshake if depth == 0 */
+	unsigned long long ca_ignerr_bitfield[IGNERR_BF_SIZE];   /* ignored verify errors in handshake if depth > 0 */
+	unsigned long long crt_ignerr_bitfield[IGNERR_BF_SIZE];  /* ignored verify errors in handshake if depth == 0 */
 	SSL_CTX *initial_ctx;      /* SSL context for initial negotiation */
 	SSL_CTX *default_ctx;      /* SSL context of first/default certificate */
 	struct ssl_bind_conf *default_ssl_conf; /* custom SSL conf of default_ctx */
@@ -196,6 +205,9 @@
 	struct rx_settings settings; /* all the settings needed for the listening socket */
 };
 
+#define LI_F_FINALIZED           0x00000002  /* listener made it to the READY||LIMITED||FULL state at least once, may be suspended/resumed safely */
+#define LI_F_SUSPENDED           0x00000004  /* listener has been suspended using suspend_listener(), it is either is LI_PAUSED or LI_ASSIGNED state */
+
 /* The listener will be directly referenced by the fdtab[] which holds its
  * socket. The listener provides the protocol-specific accept() function to
  * the fdtab.
@@ -206,6 +218,8 @@
 	short int nice;                 /* nice value to assign to the instantiated tasks */
 	int luid;			/* listener universally unique ID, used for SNMP */
 	int options;			/* socket options : LI_O_* */
+	uint16_t flags;                 /* listener flags: LI_F_* */
+	/* 2-bytes hole here */
 	__decl_thread(HA_RWLOCK_T lock);
 
 	struct fe_counters *counters;	/* statistics counters */
diff --git a/include/haproxy/listener.h b/include/haproxy/listener.h
index 3fb078f..44af4bc 100644
--- a/include/haproxy/listener.h
+++ b/include/haproxy/listener.h
@@ -39,18 +39,40 @@
  * closes upon SHUT_WR and refuses to rebind. So a common validation path
  * involves SHUT_WR && listen && SHUT_RD. In case of success, the FD's polling
  * is disabled. It normally returns non-zero, unless an error is reported.
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * suspend() may totally stop a listener if it doesn't support the PAUSED
+ * state, in which case state will be set to ASSIGNED.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
  */
-int pause_listener(struct listener *l);
+int suspend_listener(struct listener *l, int lpx, int lli);
 
 /* This function tries to resume a temporarily disabled listener.
  * The resulting state will either be LI_READY or LI_FULL. 0 is returned
  * in case of failure to resume (eg: dead socket).
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
  */
-int resume_listener(struct listener *l);
+int resume_listener(struct listener *l, int lpx, int lli);
+
+/* Same as resume_listener(), but will only work to resume from
+ * LI_FULL or LI_LIMITED states because we try to relax listeners that
+ * were temporarily restricted and not to resume inactive listeners that
+ * may have been paused or completely stopped in the meantime.
+ * Returns positive value for success and 0 for failure.
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
+ */
+int relax_listener(struct listener *l, int lpx, int lli);
 
 /*
  * This function completely stops a listener. It will need to operate under the
- * proxy's lock, the protocol's lock, and the listener's lock. The caller is
+ * proxy's lock, the protocol's and the listener's lock. The caller is
  * responsible for indicating in lpx, lpr, lli whether the respective locks are
  * already held (non-zero) or not (zero) so that the function picks the missing
  * ones, in this order.
diff --git a/include/haproxy/peers-t.h b/include/haproxy/peers-t.h
index ee9d905..93920fd 100644
--- a/include/haproxy/peers-t.h
+++ b/include/haproxy/peers-t.h
@@ -31,6 +31,7 @@
 
 #include <haproxy/api-t.h>
 #include <haproxy/dict-t.h>
+#include <haproxy/stick_table-t.h>
 #include <haproxy/thread-t.h>
 
 
diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h
index 9264998..0547dcb 100644
--- a/include/haproxy/pool.h
+++ b/include/haproxy/pool.h
@@ -55,8 +55,8 @@
 void dump_pools_to_trash();
 void dump_pools(void);
 int pool_total_failures();
-unsigned long pool_total_allocated();
-unsigned long pool_total_used();
+unsigned long long pool_total_allocated();
+unsigned long long pool_total_used();
 void pool_flush(struct pool_head *pool);
 void pool_gc(struct pool_head *pool_ctx);
 struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags);
diff --git a/include/haproxy/proxy-t.h b/include/haproxy/proxy-t.h
index 5d7f598..5aa1ad3 100644
--- a/include/haproxy/proxy-t.h
+++ b/include/haproxy/proxy-t.h
@@ -143,7 +143,12 @@
 #define PR_O2_SRC_ADDR	0x00100000	/* get the source ip and port for logs */
 
 #define PR_O2_FAKE_KA   0x00200000      /* pretend we do keep-alive with server even though we close */
-/* unused : 0x00400000..0x80000000 */
+
+#define PR_O2_RSTRICT_REQ_HDR_NAMES_BLK  0x00400000 /* reject request with header names containing chars ouside of [0-9a-zA-Z-] charset */
+#define PR_O2_RSTRICT_REQ_HDR_NAMES_DEL  0x00800000 /* remove request header names containing chars outside of [0-9a-zA-Z-] charset */
+#define PR_O2_RSTRICT_REQ_HDR_NAMES_NOOP 0x01000000 /* preserve request header names containing chars ouside of [0-9a-zA-Z-] charset */
+#define PR_O2_RSTRICT_REQ_HDR_NAMES_MASK 0x01c00000 /* mask for restrict-http-header-names option */
+/* unused : 0x0000000..0x80000000 */
 
 /* server health checks */
 #define PR_O2_CHK_NONE  0x00000000      /* no L7 health checks configured (TCP by default) */
@@ -397,6 +402,7 @@
 	unsigned int li_paused;                 /* total number of listeners paused (LI_PAUSED) */
 	unsigned int li_bound;                  /* total number of listeners ready (LI_LISTEN)  */
 	unsigned int li_ready;                  /* total number of listeners ready (>=LI_READY) */
+	unsigned int li_suspended;		/* total number of listeners suspended (could be paused or unbound) */
 
 	/* warning: these structs are huge, keep them at the bottom */
 	struct sockaddr_storage dispatch_addr;	/* the default address to connect to */
diff --git a/include/haproxy/server.h b/include/haproxy/server.h
index c927de3..66dcaf9 100644
--- a/include/haproxy/server.h
+++ b/include/haproxy/server.h
@@ -45,6 +45,7 @@
 int srv_downtime(const struct server *s);
 int srv_lastsession(const struct server *s);
 int srv_getinter(const struct check *check);
+void srv_settings_cpy(struct server *srv, const struct server *src, int srv_tmpl);
 int parse_server(const char *file, int linenum, char **args, struct proxy *curproxy, const struct proxy *defproxy, int parse_flags);
 int srv_update_addr(struct server *s, void *ip, int ip_sin_family, const char *updater);
 const char *srv_update_addr_port(struct server *s, const char *addr, const char *port, char *updater);
@@ -254,18 +255,21 @@
 
 static inline void srv_use_conn(struct server *srv, struct connection *conn)
 {
-	unsigned int curr;
+	unsigned int curr, prev;
 
 	curr = _HA_ATOMIC_ADD_FETCH(&srv->curr_used_conns, 1);
 
+
 	/* It's ok not to do that atomically, we don't need an
 	 * exact max.
 	 */
-	if (srv->max_used_conns < curr)
-		srv->max_used_conns = curr;
+	prev = HA_ATOMIC_LOAD(&srv->max_used_conns);
+	if (prev < curr)
+		HA_ATOMIC_STORE(&srv->max_used_conns, curr);
 
-	if (srv->est_need_conns < curr)
-		srv->est_need_conns = curr;
+	prev = HA_ATOMIC_LOAD(&srv->est_need_conns);
+	if (prev < curr)
+		HA_ATOMIC_STORE(&srv->est_need_conns, curr);
 }
 
 static inline void conn_delete_from_tree(struct ebmb_node *node)
@@ -295,6 +299,7 @@
 	/* Remove the connection from any tree (safe, idle or available) */
 	HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
 	conn_delete_from_tree(&conn->hash_node->node);
+	conn->flags &= ~CO_FL_LIST_MASK;
 	HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
 }
 
diff --git a/include/haproxy/sink.h b/include/haproxy/sink.h
index 51d507c..71cad0f 100644
--- a/include/haproxy/sink.h
+++ b/include/haproxy/sink.h
@@ -28,9 +28,12 @@
 
 extern struct list sink_list;
 
+extern struct proxy *sink_proxies_list;
+
 struct sink *sink_find(const char *name);
 struct sink *sink_new_fd(const char *name, const char *desc, enum log_fmt, int fd);
-ssize_t __sink_write(struct sink *sink, const struct ist msg[], size_t nmsg,
+ssize_t __sink_write(struct sink *sink, size_t maxlen,
+                     const struct ist msg[], size_t nmsg,
                      int level, int facility, struct ist * metadata);
 int sink_announce_dropped(struct sink *sink, int facility);
 
@@ -42,7 +45,8 @@
  * The function returns the number of Bytes effectively sent or announced.
  * or <= 0 in other cases.
  */
-static inline ssize_t sink_write(struct sink *sink, const struct ist msg[], size_t nmsg,
+static inline ssize_t sink_write(struct sink *sink, size_t maxlen,
+                                 const struct ist msg[], size_t nmsg,
                                  int level, int facility, struct ist *metadata)
 {
 	ssize_t sent;
@@ -67,7 +71,7 @@
 	}
 
 	HA_RWLOCK_RDLOCK(LOGSRV_LOCK, &sink->ctx.lock);
-	sent = __sink_write(sink, msg, nmsg, level, facility, metadata);
+	sent = __sink_write(sink, maxlen, msg, nmsg, level, facility, metadata);
 	HA_RWLOCK_RDUNLOCK(LOGSRV_LOCK, &sink->ctx.lock);
 
  fail:
diff --git a/include/haproxy/spoe-t.h b/include/haproxy/spoe-t.h
index 197f47b..38e3272 100644
--- a/include/haproxy/spoe-t.h
+++ b/include/haproxy/spoe-t.h
@@ -308,6 +308,7 @@
 		struct freq_ctr conn_per_sec;   /* connections per second */
 		struct freq_ctr err_per_sec;    /* connection errors per second */
 
+		unsigned int    idles;          /* # of idle applets */
 		struct eb_root  idle_applets;   /* idle SPOE applets available to process data */
 		struct list     applets;        /* all SPOE applets for this agent */
 		struct list     sending_queue;  /* Queue of streams waiting to send data */
diff --git a/include/haproxy/ssl_sock-t.h b/include/haproxy/ssl_sock-t.h
index 9839011..35270ca 100644
--- a/include/haproxy/ssl_sock-t.h
+++ b/include/haproxy/ssl_sock-t.h
@@ -52,16 +52,20 @@
 #define SSL_SOCK_SEND_UNLIMITED     0x00000004
 #define SSL_SOCK_RECV_HEARTBEAT     0x00000008
 
-/* bits 0xFFFF0000 are reserved to store verify errors */
+/* bits 0xFFFFFF00 are reserved to store verify errors.
+ * The CA en CRT error codes will be stored on 7 bits each
+ * (since the max verify error code does not exceed 127)
+ * and the CA error depth will be stored on 4 bits.
+ */
 
 /* Verify errors macros */
-#define SSL_SOCK_CA_ERROR_TO_ST(e) (((e > 63) ? 63 : e) << (16))
-#define SSL_SOCK_CAEDEPTH_TO_ST(d) (((d > 15) ? 15 : d) << (6+16))
-#define SSL_SOCK_CRTERROR_TO_ST(e) (((e > 63) ? 63 : e) << (4+6+16))
+#define SSL_SOCK_CA_ERROR_TO_ST(e) (((e > 127) ? 127 : e) << (8))
+#define SSL_SOCK_CAEDEPTH_TO_ST(d) (((d > 15) ? 15 : d) << (7+8))
+#define SSL_SOCK_CRTERROR_TO_ST(e) (((e > 127) ? 127 : e) << (4+7+8))
 
-#define SSL_SOCK_ST_TO_CA_ERROR(s) ((s >> (16)) & 63)
-#define SSL_SOCK_ST_TO_CAEDEPTH(s) ((s >> (6+16)) & 15)
-#define SSL_SOCK_ST_TO_CRTERROR(s) ((s >> (4+6+16)) & 63)
+#define SSL_SOCK_ST_TO_CA_ERROR(s) ((s >> (8)) & 127)
+#define SSL_SOCK_ST_TO_CAEDEPTH(s) ((s >> (7+8)) & 15)
+#define SSL_SOCK_ST_TO_CRTERROR(s) ((s >> (4+7+8)) & 127)
 
 /* ssl_methods flags for ssl options */
 #define MC_SSL_O_ALL            0x0000
@@ -106,7 +110,9 @@
 	SETCERT_ST_INIT = 0,
 	SETCERT_ST_GEN,
 	SETCERT_ST_INSERT,
+	SETCERT_ST_SUCCESS,
 	SETCERT_ST_FIN,
+	SETCERT_ST_ERROR,
 };
 
 #if (HA_OPENSSL_VERSION_NUMBER < 0x1010000fL)
diff --git a/include/haproxy/ssl_sock.h b/include/haproxy/ssl_sock.h
index c68425a..9db4ec4 100644
--- a/include/haproxy/ssl_sock.h
+++ b/include/haproxy/ssl_sock.h
@@ -149,6 +149,29 @@
 		return 1;
 }
 
+static inline int cert_ignerr_bitfield_get(const unsigned long long *bitfield, int bit_index)
+{
+	int byte_index = bit_index >> 6;
+	int val = 0;
+
+	if (byte_index < IGNERR_BF_SIZE)
+		val = bitfield[byte_index] & (1ULL << (bit_index & 0x3F));
+
+	return val != 0;
+}
+
+static inline void cert_ignerr_bitfield_set(unsigned long long *bitfield, int bit_index)
+{
+	int byte_index = bit_index >> 6;
+
+	if (byte_index < IGNERR_BF_SIZE)
+		bitfield[byte_index] |= (1ULL << (bit_index & 0x3F));
+}
+
+static inline void cert_ignerr_bitfield_set_all(unsigned long long *bitfield)
+{
+	memset(bitfield, -1, IGNERR_BF_SIZE*sizeof(*bitfield));
+}
 
 #endif /* USE_OPENSSL */
 #endif /* _HAPROXY_SSL_SOCK_H */
diff --git a/include/haproxy/stats-t.h b/include/haproxy/stats-t.h
index 17641f5..920a311 100644
--- a/include/haproxy/stats-t.h
+++ b/include/haproxy/stats-t.h
@@ -443,7 +443,9 @@
 	ST_F_USED_CONN_CUR,
 	ST_F_NEED_CONN_EST,
 	ST_F_UWEIGHT,
+	ST_F_AGG_SRV_STATUS,
 	ST_F_AGG_SRV_CHECK_STATUS,
+	ST_F_AGG_CHECK_STATUS,
 
 	/* must always be the last one */
 	ST_F_TOTAL_FIELDS
diff --git a/include/haproxy/stick_table.h b/include/haproxy/stick_table.h
index c9fb85e..5fd8d1e 100644
--- a/include/haproxy/stick_table.h
+++ b/include/haproxy/stick_table.h
@@ -46,7 +46,7 @@
 int stksess_kill(struct stktable *t, struct stksess *ts, int decrefcount);
 
 int stktable_init(struct stktable *t);
-int stktable_parse_type(char **args, int *idx, unsigned long *type, size_t *key_size);
+int stktable_parse_type(char **args, int *idx, unsigned long *type, size_t *key_size, const char *file, int linenum);
 int parse_stick_table(const char *file, int linenum, char **args,
                       struct stktable *t, char *id, char *nid, struct peers *peers);
 struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key *key);
diff --git a/include/haproxy/stream.h b/include/haproxy/stream.h
index 1ec87b1..5a61c20 100644
--- a/include/haproxy/stream.h
+++ b/include/haproxy/stream.h
@@ -337,7 +337,7 @@
 	       ((s->be->conn_retries - si->conn_retries) %
 	        (s->be->conn_retries + 1 + s->be->redispatch_after) == 0))) ||
 	     (!(s->flags & SF_DIRECT) && s->be->srv_act > 1 &&
-	      ((s->be->lbprm.algo & BE_LB_KIND) == BE_LB_KIND_RR)))) {
+	      ((s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_HI)))) {
 		sess_change_server(s, NULL);
 		if (may_dequeue_tasks(objt_server(s->target), s->be))
 			process_srv_queue(objt_server(s->target), 0);
diff --git a/include/haproxy/task.h b/include/haproxy/task.h
index d31c893..b3ffdf2 100644
--- a/include/haproxy/task.h
+++ b/include/haproxy/task.h
@@ -281,7 +281,7 @@
 
 	if (likely(task_in_wq(t))) {
 		locked = t->state & TASK_SHARED_WQ;
-		BUG_ON(!locked && t->thread_mask != tid_bit);
+		BUG_ON(!locked && t->thread_mask != tid_bit && !(global.mode & MODE_STOPPING));
 		if (locked)
 			HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
 		__task_unlink_wq(t);
@@ -526,6 +526,7 @@
 	t->process = NULL;
 	t->tid = -1;
 #ifdef DEBUG_TASK
+	t->call_date = 0;
 	t->debug.caller_idx = 0;
 #endif
 	LIST_INIT(&t->list);
diff --git a/include/haproxy/tcpcheck-t.h b/include/haproxy/tcpcheck-t.h
index 29cb4cc..439fe18 100644
--- a/include/haproxy/tcpcheck-t.h
+++ b/include/haproxy/tcpcheck-t.h
@@ -35,6 +35,7 @@
 #define TCPCHK_OPT_DEFAULT_CONNECT 0x0008  /* Do a connect using server params */
 #define TCPCHK_OPT_IMPLICIT        0x0010  /* Implicit connect */
 #define TCPCHK_OPT_SOCKS4          0x0020  /* check the connection via socks4 proxy */
+#define TCPCHK_OPT_HAS_DATA        0x0040  /* data should be sent after conncetion */
 
 enum tcpcheck_send_type {
 	TCPCHK_SEND_UNDEF = 0,  /* Send is not parsed. */
diff --git a/include/haproxy/time.h b/include/haproxy/time.h
index c8358e0..e3bb74e 100644
--- a/include/haproxy/time.h
+++ b/include/haproxy/time.h
@@ -54,7 +54,8 @@
 extern THREAD_LOCAL unsigned int   idle_time;        /* total idle time over current sample */
 extern THREAD_LOCAL struct timeval now;              /* internal date is a monotonic function of real clock */
 extern THREAD_LOCAL struct timeval date;             /* the real current date */
-extern struct timeval start_date;       /* the process's start date */
+extern struct timeval start_date;                    /* the process's start date */
+extern struct timeval ready_date;                    /* date when the process was considered ready */
 extern THREAD_LOCAL struct timeval before_poll;      /* system date before calling poll() */
 extern THREAD_LOCAL struct timeval after_poll;       /* system date after leaving poll() */
 extern volatile unsigned long long global_now;
diff --git a/include/haproxy/tools.h b/include/haproxy/tools.h
index a564f63..30b6bfa 100644
--- a/include/haproxy/tools.h
+++ b/include/haproxy/tools.h
@@ -70,6 +70,7 @@
  */
 extern THREAD_LOCAL int itoa_idx; /* index of next itoa_str to use */
 extern THREAD_LOCAL char itoa_str[][171];
+extern int build_is_static;
 extern char *ultoa_r(unsigned long n, char *buffer, int size);
 extern char *lltoa_r(long long int n, char *buffer, int size);
 extern char *sltoa_r(long n, char *buffer, int size);
@@ -405,7 +406,8 @@
 
 /*
  * Tries to prefix characters tagged in the <map> with the <escape>
- * character. The input <string> must be zero-terminated. The result will
+ * character. The input <string> is processed until string_stop
+ * is reached or NULL-byte is encountered. The result will
  * be stored between <start> (included) and <stop> (excluded). This
  * function will always try to terminate the resulting string with a '\0'
  * before <stop>, and will return its position if the conversion
@@ -413,7 +415,7 @@
  */
 char *escape_string(char *start, char *stop,
 		    const char escape, const long *map,
-		    const char *string);
+		    const char *string, const char *string_stop);
 
 /*
  * Tries to prefix characters tagged in the <map> with the <escape>
@@ -1060,7 +1062,7 @@
  */
 static inline uint statistical_prng_range(uint range)
 {
-	return mul32hi(statistical_prng(), range);
+	return mul32hi(statistical_prng(), range ? range - 1 : 0);
 }
 
 /* Update array <fp> with the character transition <prev> to <curr>. If <prev>
diff --git a/include/import/ebmbtree.h b/include/import/ebmbtree.h
index f99c16b..2d63ed1 100644
--- a/include/import/ebmbtree.h
+++ b/include/import/ebmbtree.h
@@ -118,6 +118,59 @@
 struct ebmb_node *ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx);
 struct ebmb_node *ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len);
 
+/* start from a valid leaf and find the next matching prefix that's either a
+ * duplicate, or immediately shorter than the node's current one and still
+ * matches it. The purpose is to permit a caller that is not satisfied with a
+ * result provided by ebmb_lookup_longest() to evaluate the next matching
+ * entry. Given that shorter keys are necessarily attached to nodes located
+ * above the current one, it's sufficient to restart from the current leaf and
+ * go up until we find a shorter prefix, or a non-matching one.
+ */
+static inline struct ebmb_node *ebmb_lookup_shorter(struct ebmb_node *start)
+{
+	eb_troot_t *t = start->node.leaf_p;
+	struct ebmb_node *node;
+
+	/* first, chcek for duplicates */
+	node = ebmb_next_dup(start);
+	if (node)
+		return node;
+
+	while (1) {
+		if (eb_gettag(t) == EB_LEFT) {
+			/* Walking up from left branch. We must ensure that we never
+			 * walk beyond root.
+			 */
+			if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+				return NULL;
+			node = container_of(eb_root_to_node(eb_untag(t, EB_LEFT)), struct ebmb_node, node);
+		} else {
+			/* Walking up from right branch, so we cannot be below
+			 * root. However, if we end up on a node with an even
+			 * and positive bit, this is a cover node, which mandates
+			 * that the left branch only contains cover values, so we
+			 * must descend it.
+			 */
+			node = container_of(eb_root_to_node(eb_untag(t, EB_RGHT)), struct ebmb_node, node);
+			if (node->node.bit > 0 && !(node->node.bit & 1))
+				return ebmb_entry(eb_walk_down(t, EB_LEFT), struct ebmb_node, node);
+		}
+
+		/* Note that <t> cannot be NULL at this stage */
+		t = node->node.node_p;
+
+		/* this is a node attached to a deeper (and possibly different)
+		 * leaf, not interesting for us.
+		 */
+		if (node->node.pfx >= start->node.pfx)
+			continue;
+
+		if (check_bits(start->key, node->key, 0, node->node.pfx) == 0)
+			break;
+	}
+	return node;
+}
+
 /* The following functions are less likely to be used directly, because their
  * code is larger. The non-inlined version is preferred.
  */
diff --git a/include/import/ist.h b/include/import/ist.h
index 539a27d..31566b1 100644
--- a/include/import/ist.h
+++ b/include/import/ist.h
@@ -746,6 +746,53 @@
 	return NULL;
 }
 
+/* Returns a pointer to the first character found <ist> that belongs to the
+ * range [min:max] inclusive, or NULL if none is present. The function is
+ * optimized for strings having no such chars by processing up to sizeof(long)
+ * bytes at once on architectures supporting efficient unaligned accesses.
+ * Despite this it is not very fast (~0.43 byte/cycle) and should mostly be
+ * used on low match probability when it can save a call to a much slower
+ * function. Will not work for characters 0x80 and above. It's optimized for
+ * min and max to be known at build time.
+ */
+static inline const char *ist_find_range(const struct ist ist, unsigned char min, unsigned char max)
+{
+	const union { unsigned long v; } __attribute__((packed)) *u;
+	const char *curr = (void *)ist.ptr - sizeof(long);
+	const char *last = curr + ist.len;
+	unsigned long l1, l2;
+
+	/* easier with an exclusive boundary */
+	max++;
+
+	do {
+		curr += sizeof(long);
+		if (curr > last)
+			break;
+		u = (void *)curr;
+		/* add 0x<min><min><min><min>..<min> then subtract
+		 * 0x<max><max><max><max>..<max> to the value to generate a
+		 * carry in the lower byte if the byte contains a lower value.
+		 * If we generate a bit 7 that was not there, it means the byte
+		 * was min..max.
+		 */
+		l2  = u->v;
+		l1  = ~l2 & ((~0UL / 255) * 0x80); /* 0x808080...80 */
+		l2 += (~0UL / 255) * min;          /* 0x<min><min>..<min> */
+		l2 -= (~0UL / 255) * max;          /* 0x<max><max>..<max> */
+	} while ((l1 & l2) == 0);
+
+	last += sizeof(long);
+	if (__builtin_expect(curr < last, 0)) {
+		do {
+			if ((unsigned char)(*curr - min) < (unsigned char)(max - min))
+				return curr;
+			curr++;
+		} while (curr < last);
+	}
+	return NULL;
+}
+
 /* looks for first occurrence of character <chr> in string <ist> and returns
  * the tail of the string starting with this character, or (ist.end,0) if not
  * found.
diff --git a/reg-tests/cache/caching_rules.vtc b/reg-tests/cache/caching_rules.vtc
index 114b2fd..10840e1 100644
--- a/reg-tests/cache/caching_rules.vtc
+++ b/reg-tests/cache/caching_rules.vtc
@@ -67,6 +67,42 @@
     txresp -hdr "Cache-Control: max-age=500" \
         -hdr "Age: 100" -bodylen 140
 
+
+    # "Control-Cache: no-cache" on client request but still stored in cache
+    rxreq
+    expect req.url == "/nocache"
+    txresp -hdr "Cache-Control: max-age=500" \
+        -hdr "Age: 100" -bodylen 140
+
+    rxreq
+    expect req.url == "/nocache"
+    txresp -hdr "Cache-Control: max-age=500" \
+        -hdr "Age: 100" -bodylen 140
+
+
+    # max-age=0
+    rxreq
+    expect req.url == "/maxage_zero"
+    txresp -hdr "Cache-Control: max-age=0" \
+        -bodylen 150
+
+    rxreq
+    expect req.url == "/maxage_zero"
+    txresp -hdr "Cache-Control: max-age=0" \
+        -bodylen 150
+
+    # Overridden null max-age
+    rxreq
+    expect req.url == "/overridden"
+    txresp -hdr "Cache-Control: max-age=1, s-maxage=5" \
+        -bodylen 160
+
+    rxreq
+    expect req.url == "/overridden_null_maxage"
+    txresp -hdr "Cache-Control: max-age=0, s-maxage=5" \
+        -bodylen 190
+
+
 } -start
 
 server s2 {
@@ -222,4 +258,64 @@
         expect resp.bodylen == 140
         expect resp.http.X-Cache-Hit == 1
 
+        # Cache-Control: no-cache
+        txreq -url "/nocache" -hdr "Cache-Control: no-cache"
+        rxresp
+        expect resp.status == 200
+        expect resp.bodylen == 140
+        expect resp.http.X-Cache-Hit == 0
+
+        txreq -url "/nocache" -hdr "Cache-Control: no-cache"
+        rxresp
+        expect resp.status == 200
+        expect resp.bodylen == 140
+        expect resp.http.X-Cache-Hit == 0
+
+        txreq -url "/nocache"
+        rxresp
+        expect resp.status == 200
+        expect resp.bodylen == 140
+        expect resp.http.X-Cache-Hit == 1
+
+        # max-age=0 (control test for the overridden null max-age test below)
+        txreq -url "/maxage_zero"
+        rxresp
+        expect resp.status == 200
+        expect resp.bodylen == 150
+        expect resp.http.X-Cache-Hit == 0
+
+        txreq -url "/maxage_zero"
+        rxresp
+        expect resp.status == 200
+        expect resp.bodylen == 150
+        expect resp.http.X-Cache-Hit == 0
+
+        # Overridden max-age directive
+        txreq -url "/overridden"
+        rxresp
+        expect resp.status == 200
+        expect resp.bodylen == 160
+        expect resp.http.X-Cache-Hit == 0
+
+        txreq -url "/overridden"
+        rxresp
+        expect resp.status == 200
+        expect resp.bodylen == 160
+        expect resp.http.X-Cache-Hit == 1
+
+        txreq -url "/overridden_null_maxage"
+        rxresp
+        expect resp.status == 200
+        expect resp.bodylen == 190
+        expect resp.http.X-Cache-Hit == 0
+
+        # The previous response should have been cached even if it had
+        # a max-age=0 since it also had a positive s-maxage
+        txreq -url "/overridden_null_maxage"
+        rxresp
+        expect resp.status == 200
+        expect resp.bodylen == 190
+        expect resp.http.X-Cache-Hit == 1
+
+
 } -run
diff --git a/reg-tests/cache/if-modified-since.vtc b/reg-tests/cache/if-modified-since.vtc
index 8ae1cce..387fc7e 100644
--- a/reg-tests/cache/if-modified-since.vtc
+++ b/reg-tests/cache/if-modified-since.vtc
@@ -62,6 +62,10 @@
                server www ${s1_addr}:${s1_port}
                http-response cache-store my_cache
 
+               # Remove Transfer-Encoding header because of a vtest issue with
+               # 304-Not-Modified responses
+               http-after-response del-header transfer-encoding if { status eq 304 }
+
        cache my_cache
                total-max-size 3
                max-age 20
@@ -149,4 +153,3 @@
        expect resp.bodylen == 0
 
 } -run
-
diff --git a/reg-tests/cache/if-none-match.vtc b/reg-tests/cache/if-none-match.vtc
index ba3336a..bc7d67b 100644
--- a/reg-tests/cache/if-none-match.vtc
+++ b/reg-tests/cache/if-none-match.vtc
@@ -47,6 +47,10 @@
                server www ${s1_addr}:${s1_port}
                http-response cache-store my_cache
 
+               # Remove Transfer-Encoding header because of a vtest issue with
+               # 304-Not-Modified responses
+               http-after-response del-header transfer-encoding if { status eq 304 }
+
        cache my_cache
                total-max-size 3
                max-age 20
diff --git a/reg-tests/checks/4be_1srv_smtpchk_httpchk_layer47errors.vtc b/reg-tests/checks/4be_1srv_smtpchk_httpchk_layer47errors.vtc
index f9f37a1..ea72701 100644
--- a/reg-tests/checks/4be_1srv_smtpchk_httpchk_layer47errors.vtc
+++ b/reg-tests/checks/4be_1srv_smtpchk_httpchk_layer47errors.vtc
@@ -8,7 +8,7 @@
 
 syslog S1 -level notice {
     recv
-    expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv1 succeeded.+reason: Layer7 check passed.+code: 2(20|48).+check duration: [[:digit:]]+ms.+status: 1/1 UP."
+    expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv1 succeeded.+reason: Layer7 check passed.+code: 221.+check duration: [[:digit:]]+ms.+status: 1/1 UP."
     barrier b sync
     recv
     expect ~ "Health check for server be1/srv1 failed.+reason: Layer7 timeout.+check duration: [[:digit:]]+ms.+status: 0/1 DOWN"
@@ -36,12 +36,17 @@
     send "2"
     send "2"
     send "0"
-    send "\r\n\r\n"
+    send "\r\n"
     recv 16
     send "2"
     send "4"
     send "8"
-    send "\r\n\r\n"
+    send "\r\n"
+    recv 6
+    send "2"
+    send "2"
+    send "1"
+    send " ok\r\n"
 } -start
 
 server s2 {
diff --git a/reg-tests/checks/pgsql-check.vtc b/reg-tests/checks/pgsql-check.vtc
index 968a18c..05c5a71 100644
--- a/reg-tests/checks/pgsql-check.vtc
+++ b/reg-tests/checks/pgsql-check.vtc
@@ -23,6 +23,11 @@
   send "Not a PostgreSQL response"
 } -start
 
+server s4 {
+  recv 23
+  sendhex "52000000170000000A534352414D2D5348412D3235360000"
+} -start
+
 syslog S1 -level notice {
     recv
     expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded, reason: Layer7 check passed.+info: \"PostgreSQL server is ok\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
@@ -38,6 +43,10 @@
     expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv failed, reason: Layer7 wrong status.+info: \"PostgreSQL unknown error\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
 } -start
 
+syslog S4 -level notice {
+    recv
+    expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be4/srv succeeded, reason: Layer7 check passed.+info: \"PostgreSQL server is ok\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
 
 haproxy h1 -conf {
     defaults
@@ -64,6 +73,12 @@
         option pgsql-check user postgres
         server srv ${s3_addr}:${s3_port} check inter 1s rise 1 fall 1
 
+    backend be4
+        log ${S4_addr}:${S4_port} daemon
+        option log-health-checks
+        option pgsql-check user postgres
+        server srv ${s4_addr}:${s4_port} check inter 1s rise 1 fall 1
+
     listen pgsql1
         bind "fd@${pgsql}"
         tcp-request inspect-delay 100ms
@@ -75,3 +90,4 @@
 syslog S1 -wait
 syslog S2 -wait
 syslog S3 -wait
+syslog S4 -wait
diff --git a/reg-tests/checks/smtp-check.vtc b/reg-tests/checks/smtp-check.vtc
index 29d0ddb..9d8bb8a 100644
--- a/reg-tests/checks/smtp-check.vtc
+++ b/reg-tests/checks/smtp-check.vtc
@@ -10,6 +10,8 @@
   send "220 smtp-check.vtc SMTP Server\r\n"
   recv 16
   send "250 smtp-check.vtc\r\n"
+  recv 6
+  send "221 smtp-check.vtc closing\r\n"
 } -start
 
 server s2 {
@@ -18,6 +20,8 @@
   send "250-smtp-check.vtc\r\n"
   send "250-KEYWORD\r\n"
   send "250 LAST KEYWORD\r\n"
+  recv 6
+  send "221 smtp-check.vtc closing\r\n"
 } -start
 
 server s3 {
@@ -36,12 +40,12 @@
 
 syslog S1 -level notice {
     recv
-    expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded, reason: Layer7 check passed.+code: 250.+info: \"smtp-check.vtc\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+    expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded, reason: Layer7 check passed.+code: 221.+info: \"smtp-check.vtc closing\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
 } -start
 
 syslog S2 -level notice {
     recv
-    expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv succeeded, reason: Layer7 check passed.+code: 250.+info: \"smtp-check.vtc\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+    expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv succeeded, reason: Layer7 check passed.+code: 221.+info: \"smtp-check.vtc closing\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
 } -start
 
 syslog S3 -level notice {
diff --git a/reg-tests/contrib/prometheus.vtc b/reg-tests/contrib/prometheus.vtc
index ebe0b87..766db47 100644
--- a/reg-tests/contrib/prometheus.vtc
+++ b/reg-tests/contrib/prometheus.vtc
@@ -8,12 +8,12 @@
 server s1 {
 	rxreq
 	txresp
-} -repeat 2 -start
+} -start
 
 server s2 {
 	rxreq
 	txresp
-} -repeat 2 -start
+}  -start
 
 haproxy h1 -conf {
     defaults
@@ -33,8 +33,9 @@
 
     backend be
 	stick-table type ip size 1m expire 10s store http_req_rate(10s)
+	option httpchk
 	server s1 ${s1_addr}:${s1_port}
-	server s2 ${s2_addr}:${s2_port} check maxqueue 10 maxconn 12 pool-max-conn 42
+	server s2 ${s2_addr}:${s2_port} check inter 5s maxqueue 10 maxconn 12 pool-max-conn 42
 } -start
 
 client c1 -connect ${h1_stats_sock} {
diff --git a/reg-tests/converter/digest.vtc b/reg-tests/converter/digest.vtc
index a14f1cc..511daca 100644
--- a/reg-tests/converter/digest.vtc
+++ b/reg-tests/converter/digest.vtc
@@ -7,7 +7,7 @@
 
 server s1 {
 	rxreq
-	txresp
+	txresp -hdr "Connection: close"
 } -repeat 2 -start
 
 haproxy h1 -conf {
diff --git a/reg-tests/converter/hmac.vtc b/reg-tests/converter/hmac.vtc
index f9d9d35..391e04a 100644
--- a/reg-tests/converter/hmac.vtc
+++ b/reg-tests/converter/hmac.vtc
@@ -7,7 +7,7 @@
 
 server s1 {
 	rxreq
-	txresp
+	txresp -hdr "Connection: close"
 } -repeat 2 -start
 
 haproxy h1 -conf {
diff --git a/reg-tests/converter/iif.vtc b/reg-tests/converter/iif.vtc
index 22414e0..3a6d6a6 100644
--- a/reg-tests/converter/iif.vtc
+++ b/reg-tests/converter/iif.vtc
@@ -5,7 +5,7 @@
 
 server s1 {
 	rxreq
-	txresp
+	txresp -hdr "Connection: close"
 } -repeat 3 -start
 
 haproxy h1 -conf {
diff --git a/reg-tests/converter/json_query.vtc b/reg-tests/converter/json_query.vtc
index b420942..9db77eb 100644
--- a/reg-tests/converter/json_query.vtc
+++ b/reg-tests/converter/json_query.vtc
@@ -5,7 +5,7 @@
 
 server s1 {
 	rxreq
-	txresp
+	txresp -hdr "Connection: close"
 } -repeat 8 -start
 
 haproxy h1 -conf {
diff --git a/reg-tests/filters/random-forwarding.vtc b/reg-tests/filters/random-forwarding.vtc
index 6ea7092..6e3df88 100644
--- a/reg-tests/filters/random-forwarding.vtc
+++ b/reg-tests/filters/random-forwarding.vtc
@@ -18,7 +18,7 @@
           -bodylen 1048576
 
         rxreq
-        expect req.url == "/"
+        expect req.url == "127.0.0.1:80"
         txresp \
           -hdr "Content-Length: 0"
         recv 36000
@@ -93,7 +93,7 @@
 	gunzip
         expect resp.bodylen == 1048576
 
-        txreq -method "CONNECT" -url "/" -nolen
+        txreq -method "CONNECT" -url "127.0.0.1:80" -hdr "host: 127.0.0.1:80" -nolen
         rxresp
         expect resp.status == 200
         send_n 1000 "0123456789abcdefghijklmnopqrstuvwxyz"
diff --git a/reg-tests/http-messaging/h1_host_normalization.vtc b/reg-tests/http-messaging/h1_host_normalization.vtc
new file mode 100644
index 0000000..6ea32d2
--- /dev/null
+++ b/reg-tests/http-messaging/h1_host_normalization.vtc
@@ -0,0 +1,276 @@
+varnishtest "H1 authority validation and host normalizarion based on the scheme (rfc3982 6.3.2) or the method (connect)"
+
+#REQUIRE_VERSION=2.4
+feature ignore_unknown_macro
+
+syslog S1 -level info {
+    # C1
+    recv
+    expect ~ "^.* uri: GET http://toto:poue@hostname/c1 HTTP/1.1; host: {hostname}$"
+
+    # C2
+    recv
+    expect ~ "^.* uri: GET http://hostname:8080/c2 HTTP/1.1; host: {hostname:8080}$"
+
+    # C3
+    recv
+    expect ~ "^.* uri: GET https://hostname/c3 HTTP/1.1; host: {hostname}$"
+
+    # C4
+    recv
+    expect ~ "^.* uri: GET https://hostname:80/c4 HTTP/1.1; host: {hostname:80}$"
+
+    # C5
+    recv
+    expect ~ "^.* uri: CONNECT hostname:80 HTTP/1.1; host: {hostname}$"
+    recv
+    expect ~ "^.* uri: CONNECT hostname:80 HTTP/1.1; host: {hostname}$"
+
+    # C6
+    recv
+    expect ~ "^.* uri: CONNECT hostname:443 HTTP/1.1; host: {hostname}$"
+    recv
+    expect ~ "^.* uri: CONNECT hostname:443 HTTP/1.1; host: {hostname}$"
+
+    recv
+    expect ~ "^.* uri: CONNECT hostname:8443 HTTP/1.1; host: {hostname:8443}$"
+} -start
+
+haproxy h1 -conf {
+    defaults
+        mode http
+        timeout connect 1s
+        timeout client  1s
+        timeout server  1s
+
+    frontend fe
+        bind "fd@${fe}"
+
+        http-request capture req.hdr(host) len 512
+        log-format "uri: %r; host: %hr"
+        log ${S1_addr}:${S1_port} len 2048 local0 debug err
+
+        http-request return status 200
+} -start
+
+# default port 80 with http scheme => should be normalized
+# Be sure userinfo are skipped
+client c1 -connect ${h1_fe_sock} {
+    txreq \
+      -req "GET" \
+      -url "http://toto:poue@hostname:80/c1" \
+      -hdr "host: hostname:80"
+
+    rxresp
+    expect resp.status == 200
+} -run
+
+# port 8080 with http scheme => no normalization
+client c2 -connect ${h1_fe_sock} {
+    txreq \
+      -req "GET" \
+      -url "http://hostname:8080/c2" \
+      -hdr "host: hostname:8080"
+
+    rxresp
+    expect resp.status == 200
+} -run
+
+# default port 443 with https scheme => should be normalized
+client c3 -connect ${h1_fe_sock} {
+    txreq \
+      -req "GET" \
+      -url "https://hostname:443/c3" \
+      -hdr "host: hostname:443"
+
+    rxresp
+    expect resp.status == 200
+} -run
+
+# port 80 with https scheme => no normalization
+client c4 -connect ${h1_fe_sock} {
+    txreq \
+      -req "GET" \
+      -url "https://hostname:80/c4" \
+      -hdr "host: hostname:80"
+
+    rxresp
+    expect resp.status == 200
+} -run
+
+# CONNECT on port 80 => should be normalized
+client c5 -connect ${h1_fe_sock} {
+    txreq \
+      -req "CONNECT" \
+      -url "hostname:80" \
+      -hdr "host: hostname:80"
+
+    rxresp
+    expect resp.status == 200
+} -run
+client c5 -connect ${h1_fe_sock} {
+
+    txreq \
+      -req "CONNECT" \
+      -url "hostname:80" \
+      -hdr "host: hostname"
+
+    rxresp
+    expect resp.status == 200
+} -run
+
+# CONNECT on port 443 => should be normalized
+client c6 -connect ${h1_fe_sock} {
+    txreq \
+      -req "CONNECT" \
+      -url "hostname:443" \
+      -hdr "host: hostname:443"
+
+    rxresp
+    expect resp.status == 200
+} -run
+client c6 -connect ${h1_fe_sock} {
+    txreq \
+      -req "CONNECT" \
+      -url "hostname:443" \
+      -hdr "host: hostname"
+
+    rxresp
+    expect resp.status == 200
+} -run
+
+# CONNECT on port non-default port => no normalization
+client c7 -connect ${h1_fe_sock} {
+    txreq \
+      -req "CONNECT" \
+      -url "hostname:8443" \
+      -hdr "host: hostname:8443"
+
+    rxresp
+    expect resp.status == 200
+} -run
+
+# host miss-match => error
+client c8 -connect ${h1_fe_sock} {
+    txreq \
+      -req "GET" \
+      -url "http://hostname1/" \
+      -hdr "host: hostname2"
+
+    rxresp
+    expect resp.status == 400
+} -run
+
+# port miss-match => error
+client c9 -connect ${h1_fe_sock} {
+    txreq \
+      -req "GET" \
+      -url "http://hostname:80/" \
+      -hdr "host: hostname:81"
+
+    rxresp
+    expect resp.status == 400
+} -run
+
+# no host port with a non-default port in abs-uri  => error
+client c10 -connect ${h1_fe_sock} {
+    txreq \
+      -req "GET" \
+      -url "http://hostname:8080/" \
+      -hdr "host: hostname"
+
+    rxresp
+    expect resp.status == 400
+} -run
+
+# non-default host port with a default in abs-uri  => error
+client c11 -connect ${h1_fe_sock} {
+    txreq \
+      -req "GET" \
+      -url "http://hostname/" \
+      -hdr "host: hostname:81"
+
+    rxresp
+    expect resp.status == 400
+} -run
+
+# miss-match between host headers => error
+client c12 -connect ${h1_fe_sock} {
+    txreq \
+      -req "GET" \
+      -url "http://hostname1/" \
+      -hdr "host: hostname1" \
+      -hdr "host: hostname2"
+
+    rxresp
+    expect resp.status == 400
+} -run
+
+# miss-match between host headers but with a normalization => error
+client c13 -connect ${h1_fe_sock} {
+    txreq \
+      -req "GET" \
+      -url "http://hostname1/" \
+      -hdr "host: hostname1:80" \
+      -hdr "host: hostname1"
+
+    rxresp
+    expect resp.status == 400
+} -run
+
+# CONNECT authoriy without port => error
+client c14 -connect ${h1_fe_sock} {
+    txreq \
+      -req "CONNECT" \
+      -url "hostname" \
+      -hdr "host: hostname"
+
+    rxresp
+    expect resp.status == 400
+} -run
+
+# host miss-match with CONNECT => error
+client c15 -connect ${h1_fe_sock} {
+    txreq \
+      -req "CONNECT" \
+      -url "hostname1:80" \
+      -hdr "host: hostname2:80"
+
+    rxresp
+    expect resp.status == 400
+} -run
+
+# port miss-match with CONNECT => error
+client c16 -connect ${h1_fe_sock} {
+    txreq \
+      -req "CONNECT" \
+      -url "hostname:80" \
+      -hdr "host: hostname:443"
+
+    rxresp
+    expect resp.status == 400
+} -run
+
+# no host port with non-default port in CONNECT authority => error
+client c17 -connect ${h1_fe_sock} {
+    txreq \
+      -req "CONNECT" \
+      -url "hostname:8080" \
+      -hdr "host: hostname"
+
+    rxresp
+    expect resp.status == 400
+} -run
+
+# no authority => error
+client c18 -connect ${h1_fe_sock} {
+    txreq \
+      -req "CONNECT" \
+      -url "/" \
+      -hdr "host: hostname"
+
+    rxresp
+    expect resp.status == 400
+} -run
+
+syslog S1 -wait
diff --git a/reg-tests/http-messaging/h1_to_h1.vtc b/reg-tests/http-messaging/h1_to_h1.vtc
index c7d0085..603c032 100644
--- a/reg-tests/http-messaging/h1_to_h1.vtc
+++ b/reg-tests/http-messaging/h1_to_h1.vtc
@@ -275,3 +275,29 @@
 	# arrive here.
 	expect_close
 } -run
+
+client c4h1 -connect ${h1_feh1_sock} {
+	# this request is invalid and advertises an invalid C-L ending with an
+        # empty value, which results in a stream error.
+	txreq \
+	  -req "GET" \
+	  -url "/test31.html" \
+          -hdr "content-length: 0," \
+          -hdr "connection: close"
+	rxresp
+	expect resp.status == 400
+	expect_close
+} -run
+
+client c5h1 -connect ${h1_feh1_sock} {
+	# this request is invalid and advertises an empty C-L, which results
+	# in a stream error.
+	txreq \
+	  -req "GET" \
+	  -url "/test41.html" \
+          -hdr "content-length:" \
+          -hdr "connection: close"
+	rxresp
+	expect resp.status == 400
+	expect_close
+} -run
diff --git a/reg-tests/http-messaging/h2_to_h1.vtc b/reg-tests/http-messaging/h2_to_h1.vtc
index 0d2b1e5..ec7a7c1 100644
--- a/reg-tests/http-messaging/h2_to_h1.vtc
+++ b/reg-tests/http-messaging/h2_to_h1.vtc
@@ -10,6 +10,8 @@
 barrier b2 cond 2 -cyclic
 barrier b3 cond 2 -cyclic
 barrier b4 cond 2 -cyclic
+barrier b5 cond 2 -cyclic
+barrier b6 cond 2 -cyclic
 
 server s1 {
 	rxreq
@@ -31,6 +33,12 @@
 
 	barrier b4 sync
 	# the next request is never received
+
+	barrier b5 sync
+	# the next request is never received
+
+	barrier b6 sync
+	# the next request is never received
 } -repeat 2 -start
 
 haproxy h1 -conf {
@@ -121,6 +129,32 @@
 		txdata -data "this is sent and ignored"
 		rxrst
 	} -run
+
+	# fifth request is invalid and advertises an invalid C-L ending with an
+        # empty value, which results in a stream error.
+	stream 9 {
+		barrier b5 sync
+		txreq \
+		  -req "GET" \
+		  -scheme "https" \
+		  -url "/test5.html" \
+		  -hdr "content-length" "0," \
+		  -nostrend
+		rxrst
+	} -run
+
+	# sixth request is invalid and advertises an empty C-L, which results
+	# in a stream error.
+	stream 11 {
+		barrier b6 sync
+		txreq \
+		  -req "GET" \
+		  -scheme "https" \
+		  -url "/test6.html" \
+		  -hdr "content-length" "" \
+		  -nostrend
+		rxrst
+	} -run
 } -run
 
 # HEAD requests : don't work well yet
@@ -263,4 +297,30 @@
 		txdata -data "this is sent and ignored"
 		rxrst
 	} -run
+
+	# fifth request is invalid and advertises invalid C-L ending with an
+        # empty value, which results in a stream error.
+	stream 9 {
+		barrier b5 sync
+		txreq \
+		  -req "POST" \
+		  -scheme "https" \
+		  -url "/test25.html" \
+		  -hdr "content-length" "0," \
+		  -nostrend
+		rxrst
+	} -run
+
+	# sixth request is invalid and advertises an empty C-L, which results
+	# in a stream error.
+	stream 11 {
+		barrier b6 sync
+		txreq \
+		  -req "POST" \
+		  -scheme "https" \
+		  -url "/test26.html" \
+		  -hdr "content-length" "" \
+		  -nostrend
+		rxrst
+	} -run
 } -run
diff --git a/reg-tests/http-messaging/http_abortonclose.vtc b/reg-tests/http-messaging/http_abortonclose.vtc
index aba4654..1c0facd 100644
--- a/reg-tests/http-messaging/http_abortonclose.vtc
+++ b/reg-tests/http-messaging/http_abortonclose.vtc
@@ -4,13 +4,15 @@
 # NOTE : This test may fail if too many vtest are running in parallel because
 #        the port reserved for closed s1 server may be reused by another vtest
 
-#REQUIRE_VERSION=2.3
+#REQUIRE_VERSION=2.0
 #REGTEST_TYPE=slow
 
+# b0 : Wait s1 was detected as DOWN to be sure it is stopped
 # b1 : Don't send /c4 before /c3 was received by s2 server
 # b2 : Don't finish c2 before c1 and c3 before c4 (from syslog POV)
 # b3 : finish c3 before s2
 
+barrier b0 cond 2 -cyclic
 barrier b1 cond 2 -cyclic
 barrier b2 cond 2 -cyclic
 barrier b3 cond 2 -cyclic
@@ -32,12 +34,16 @@
 } -start
 
 syslog S -level info {
+    recv alert
+    expect ~ "[^:\\[ ]*\\[[0-9]*\\]: Server check/srv1 is DOWN.*"
+    barrier b0 sync
+
     recv
-    expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* 503 .* - - SC-- .* .* \"GET /c1 HTTP/1\\.1\""
+    expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1_1/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* 503 .* - - SC-- .* .* \"GET /c1 HTTP/1\\.1\""
     barrier b2 sync
     recv
-    expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* 503 .* - - CC-- .* .* \"GET /c2 HTTP/1\\.1\""
-
+    expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1_2/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* 503 .* - - CC-- .* .* \"GET /c2 HTTP/1\\.1\""
+    barrier b2 sync
     recv
     expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be2/<NOSRV> [0-9]*/[0-9]*/-1/-1/[0-9]* 503 .* - - CQ-- .* .* \"GET /c4 HTTP/1\\.1\""
     barrier b2 sync
@@ -66,7 +72,8 @@
         option httplog
         log ${S_addr}:${S_port} local0 debug err
         bind "fd@${fe1}"
-        use_backend be1
+        use_backend be1_1 if { path /c1 }
+        use_backend be1_2 if { path /c2 }
 
     frontend fe2
         option httplog
@@ -74,13 +81,25 @@
         bind "fd@${fe2}"
         use_backend be2
 
-    backend be1
+    backend be1_1
         server srv1 ${s1_addr}:${s1_port}
 
+    backend be1_2
+        timeout connect 1s
+        retries 10
+        server srv1 ${s1_addr}:${s1_port}
+
     backend be2
         server srv1 ${s2_addr}:${s2_port} maxconn 1
+
+    backend check
+        server srv1 ${s1_addr}:${s1_port} check
+        log ${S_addr}:${S_port} local0 debug alert
 } -start
 
+# Wait s1 was detected as DOWN
+barrier b0 sync
+
 # No server, wait all connection retries : SC--
 client  c1 -connect ${h1_fe1_sock} {
     txreq -url /c1
@@ -96,6 +115,9 @@
     txreq -url /c2
 } -run
 
+# Wait c2 log entry
+barrier b2 sync
+
 # server with maxconn=1, abort waiting the server reply : CH--
 client  c3 -connect ${h1_fe2_sock} {
     txreq -url /c3
diff --git a/reg-tests/http-messaging/http_request_buffer.vtc b/reg-tests/http-messaging/http_request_buffer.vtc
index 4fd7bb2..e712e32 100644
--- a/reg-tests/http-messaging/http_request_buffer.vtc
+++ b/reg-tests/http-messaging/http_request_buffer.vtc
@@ -9,6 +9,8 @@
 # thanks to "http-buffer-request". If this was the case, c2 client
 # could not connect to s1 server and this would lead to make this test fail.
 
+barrier b1 cond 2 -cyclic
+
 server s1 {
 	rxreq
 	expect req.bodylen == 257
@@ -23,13 +25,19 @@
 
 syslog S -level info {
 	recv
-	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 fe1/<NOSRV> .* 408 .* - - cD-- .* .* \"GET /this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url HTTP/1\\.1\""
+	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 fe1/<NOSRV> .* 408 .* - - cR-- .* .* \"GET /this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url HTTP/1\\.1\""
+	barrier b1 sync
+
 	recv
 	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"GET / HTTP/1\\.1\""
+	barrier b1 sync
+
 	recv
-	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"POST /1 HTTP/1\\.1\""
+	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"POST /1 HTTP/1\\.1\""
+	barrier b1 sync
+
 	recv
-	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/<NOSRV> [0-9]*/-1/-1/-1/[0-9]* -1 .* - - CR-- .* .* \"POST /2 HTTP/1\\.1\""
+	expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be1/<NOSRV> [0-9]*/-1/-1/-1/[0-9]* -1 .* - - CR-- .* .* \"POST /2 HTTP/1\\.1\""
 } -start
 
 haproxy h1 -conf {
@@ -49,6 +57,14 @@
 		log ${S_addr}:${S_port} local0 debug err
 		bind "fd@${fe1}"
 		use_backend be1
+
+	frontend fe2
+	        timeout client 10s
+		option httplog
+		option http-buffer-request
+		log ${S_addr}:${S_port} local0 debug err
+		bind "fd@${fe2}"
+		use_backend be1
 } -start
 
 # 1 byte of the payload is missing.
@@ -80,6 +96,9 @@
 	expect resp.status == 408
 } -run
 
+# Wait matching on log message
+barrier b1 sync
+
 # Payload is fully sent
 #   ==> Request must be sent to the server. A 200 must be received
 client c2 -connect ${h1_fe1_sock} {
@@ -88,10 +107,13 @@
 	expect resp.status == 200
 } -run
 
+# Wait matching on log message
+barrier b1 sync
+
 # Payload is fully sent in 2 steps (with a small delay, smaller than the client
 # timeout) and splitted on a chunk size.
 #   ==> Request must be sent to the server. A 200 must be received
-client c3 -connect ${h1_fe1_sock} {
+client c3 -connect ${h1_fe2_sock} {
 	send "POST /1  HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\n1\r\n1"
 	delay 0.01
 	send "\r\n1\r\n0\r\n\r\n"
@@ -99,11 +121,14 @@
 	expect resp.status == 200
 } -run
 
+# Wait matching on log message
+barrier b1 sync
+
 # Last CRLF of the request payload is missing but payload is sent in 2 steps
 # (with a small delay, smaller than the client timeout) and splitted on a chunk
 # size. The client aborts before sending the last CRLF.
 #   ==> Request must be handled as an error with 'CR--' termination state.
-client c4 -connect ${h1_fe1_sock} {
+client c4 -connect ${h1_fe2_sock} {
 	send "POST /2  HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\n1\r\n1"
 	delay 0.01
 	send "\r\n1\r\n0\r\n"
diff --git a/reg-tests/http-rules/h1or2_to_h1c.vtc b/reg-tests/http-rules/h1or2_to_h1c.vtc
index 81b53e7..7490172 100644
--- a/reg-tests/http-rules/h1or2_to_h1c.vtc
+++ b/reg-tests/http-rules/h1or2_to_h1c.vtc
@@ -27,11 +27,11 @@
 	  -body "This is a body"
 
 	expect req.method == "GET"
-	expect req.http.fe-sl1-crc == 992395575
-	expect req.http.fe-sl2-crc == 1270056220
+	expect req.http.fe-sl1-crc == 1874847043
+	expect req.http.fe-sl2-crc == 1142278307
 	expect req.http.fe-hdr-crc == 1719311923
-	expect req.http.be-sl1-crc == 2604236007
-	expect req.http.be-sl2-crc == 4181358964
+	expect req.http.be-sl1-crc == 3455320059
+	expect req.http.be-sl2-crc == 2509326257
 	expect req.http.be-hdr-crc == 3634102538
 } -repeat 2 -start
 
@@ -53,6 +53,7 @@
 	http-request set-var(req.path)       path
 	http-request set-var(req.query)      query
 	http-request set-var(req.param)      url_param(qs_arg)
+	http-request set-var(req.cl)         req.fhdr(content-length)
 
 	http-request set-header     sl1      "sl1: "
 
@@ -65,8 +66,10 @@
 
 	http-request set-header     sl1      "%[req.fhdr(sl1)] method=<%[var(req.method)]>; uri=<%[var(req.uri)]>; path=<%[var(req.path)]>;"
 	http-request set-header     sl1      "%[req.fhdr(sl1)] query=<%[var(req.query)]>; param=<%[var(req.param)]>"
+	http-request set-header     sl1      "%[req.fhdr(sl1)] cl=<%[var(req.cl)]>"
 	http-request set-header     sl2      "%[req.fhdr(sl2)] method=<%[method]>; uri=<%[url]>; path=<%[path]>; "
 	http-request set-header     sl2      "%[req.fhdr(sl2)] query=<%[query]>; param=<%[url_param(qs_arg)]>"
+	http-request set-header     sl2      "%[req.fhdr(sl2)] cl=<%[req.fhdr(content-length)]>"
 	http-request set-header     hdr      "%[req.fhdr(hdr)] hdr1=<%[req.hdr(hdr1)]>; fhdr1=<%[req.fhdr(hdr1)]>;"
 	http-request set-header     hdr      "%[req.fhdr(hdr)] hdr2=<%[req.hdr(hdr2)]>; fhdr2=<%[req.fhdr(hdr2)]>;"
 	http-request set-header     hdr      "%[req.fhdr(hdr)] hdr3=<%[req.hdr(hdr3)]>; fhdr3=<%[req.fhdr(hdr3)]>;"
@@ -120,6 +123,7 @@
 	http-request set-var(req.path)       path
 	http-request set-var(req.query)      query
 	http-request set-var(req.param)      url_param(qs_arg)
+	http-request set-var(req.cl)         req.fhdr(content-length)
 
 	http-request set-header     sl1      "sl1: "
 
@@ -132,8 +136,10 @@
 
 	http-request set-header     sl1      "%[req.fhdr(sl1)] method=<%[var(req.method)]>; uri=<%[var(req.uri)]>; path=<%[var(req.path)]>;"
 	http-request set-header     sl1      "%[req.fhdr(sl1)] query=<%[var(req.query)]>; param=<%[var(req.param)]>"
+	http-request set-header     sl1      "%[req.fhdr(sl1)] cl=<%[var(req.cl)]>"
 	http-request set-header     sl2      "%[req.fhdr(sl2)] method=<%[method]>; uri=<%[url]>; path=<%[path]>; "
 	http-request set-header     sl2      "%[req.fhdr(sl2)] query=<%[query]>; param=<%[url_param(qs_arg)]>"
+	http-request set-header     sl2      "%[req.fhdr(sl2)] cl=<%[req.fhdr(content-length)]>"
 	http-request set-header     hdr      "%[req.fhdr(hdr)] hdr1=<%[req.hdr(hdr1)]>; fhdr1=<%[req.fhdr(hdr1)]>;"
 	http-request set-header     hdr      "%[req.fhdr(hdr)] hdr2=<%[req.hdr(hdr2)]>; fhdr2=<%[req.fhdr(hdr2)]>;"
 	http-request set-header     hdr      "%[req.fhdr(hdr)] hdr3=<%[req.hdr(hdr3)]>; fhdr3=<%[req.fhdr(hdr3)]>;"
@@ -171,6 +177,7 @@
 	txreq \
 	  -req GET \
 	  -url /path/to/file.extension?qs_arg=qs_value \
+	  -hdr "content-length: 000, 00" \
 	  -hdr "hdr1: val1" \
 	  -hdr "hdr2:  val2a" \
 	  -hdr "hdr2:    val2b" \
@@ -205,6 +212,7 @@
 		  -req GET \
 		  -scheme "https" \
 		  -url /path/to/file.extension?qs_arg=qs_value \
+		  -hdr "content-length" "000, 00" \
 		  -hdr "hdr1" "val1" \
 		  -hdr "hdr2" " val2a" \
 		  -hdr "hdr2" "   val2b" \
diff --git a/reg-tests/http-rules/normalize_uri.vtc b/reg-tests/http-rules/normalize_uri.vtc
index 6a1dc31..a144075 100644
--- a/reg-tests/http-rules/normalize_uri.vtc
+++ b/reg-tests/http-rules/normalize_uri.vtc
@@ -127,6 +127,7 @@
 
     frontend fe_fragment_strip
         bind "fd@${fe_fragment_strip}"
+        option accept-invalid-http-request
 
         http-request set-var(txn.before) url
         http-request normalize-uri fragment-strip
@@ -139,6 +140,7 @@
 
     frontend fe_fragment_encode
         bind "fd@${fe_fragment_encode}"
+        option accept-invalid-http-request
 
         http-request set-var(txn.before) url
         http-request normalize-uri fragment-encode
@@ -149,6 +151,11 @@
 
         default_backend be
 
+    frontend fe_fragment_block
+        bind "fd@${fe_fragment_block}"
+        http-request normalize-uri fragment-strip
+        default_backend be
+
     backend be
         server s1 ${s1_addr}:${s1_port}
 
@@ -534,3 +541,9 @@
     expect resp.http.before == "*"
     expect resp.http.after == "*"
 } -run
+
+client c11 -connect ${h1_fe_fragment_block_sock} {
+    txreq -url "/#foo"
+    rxresp
+    expect resp.status == 400
+} -run
diff --git a/reg-tests/http-rules/restrict_req_hdr_names.vtc b/reg-tests/http-rules/restrict_req_hdr_names.vtc
new file mode 100644
index 0000000..b493c84
--- /dev/null
+++ b/reg-tests/http-rules/restrict_req_hdr_names.vtc
@@ -0,0 +1,185 @@
+varnishtest "http-restrict-req-hdr-names option tests"
+#REQUIRE_VERSION=2.0
+
+# This config tests "http-restrict-req-hdr-names" option
+
+feature ignore_unknown_macro
+
+server s1 {
+    rxreq
+    expect req.http.x-my_hdr  == on
+    txresp
+} -start
+
+server s2 {
+    rxreq
+    expect req.http.x-my_hdr  == <undef>
+    txresp
+} -start
+
+server s3 {
+    rxreq
+    expect req.http.x-my_hdr  == on
+    txresp
+} -start
+
+server s4 {
+    rxreq
+    expect req.http.x-my_hdr  == <undef>
+    txresp
+} -start
+
+server s5 {
+    rxreq
+    expect req.http.x-my_hdr  == on
+    txresp
+} -start
+
+server s6 {
+    rxreq
+    expect req.http.x_my_hdr_with_lots_of_underscores  == <undef>
+    txresp
+} -start
+
+server s7 {
+    rxreq
+    expect req.http.x_my_hdr-1  == <undef>
+    expect req.http.x-my-hdr-2  == on
+    txresp
+} -start
+
+server s8 {
+    rxreq
+    expect req.http.x-my_hdr-1  == <undef>
+    expect req.http.x-my_hdr-2  == <undef>
+    txresp
+} -start
+
+server s9 {
+    rxreq
+    expect req.http.x-my-hdr-with-trailing-underscore_  == <undef>
+    txresp
+} -start
+
+haproxy h1 -conf {
+    defaults
+        mode http
+        timeout connect 1s
+        timeout client  1s
+        timeout server  1s
+
+    frontend fe1
+        bind "fd@${fe1}"
+        use_backend be-http1 if { path /req1 }
+        use_backend be-http2 if { path /req2 }
+        use_backend be-http3 if { path /req3 }
+        use_backend be-fcgi1 if { path /req4 }
+        use_backend be-fcgi2 if { path /req5 }
+        use_backend be-fcgi3 if { path /req6 }
+        use_backend be-http4 if { path /req7 }
+        use_backend be-http5 if { path /req8 }
+        use_backend be-http6 if { path /req9 }
+        use_backend be-http7 if { path /req10 }
+
+    backend be-http1
+        server s1 ${s1_addr}:${s1_port}
+
+    backend be-http2
+        option http-restrict-req-hdr-names delete
+        server s2 ${s2_addr}:${s2_port}
+
+    backend be-http3
+        option http-restrict-req-hdr-names reject
+
+    backend be-fcgi1
+        option http-restrict-req-hdr-names preserve
+        server s3 ${s3_addr}:${s3_port}
+
+    backend be-fcgi2
+        option http-restrict-req-hdr-names delete
+        server s4 ${s4_addr}:${s4_port}
+
+    backend be-fcgi3
+        option http-restrict-req-hdr-names reject
+
+    backend be-http4
+        option http-restrict-req-hdr-names delete
+        server s6 ${s6_addr}:${s6_port}
+
+    backend be-http5
+        option http-restrict-req-hdr-names delete
+        server s7 ${s7_addr}:${s7_port}
+
+    backend be-http6
+        option http-restrict-req-hdr-names delete
+        server s8 ${s8_addr}:${s8_port}
+
+    backend be-http7
+        option http-restrict-req-hdr-names delete
+        server s9 ${s9_addr}:${s9_port}
+
+    defaults
+        mode http
+        timeout connect 1s
+        timeout client  1s
+        timeout server  1s
+        option http-restrict-req-hdr-names preserve
+
+    frontend fe2
+        bind "fd@${fe2}"
+        default_backend be-fcgi4
+
+    backend be-fcgi4
+        server s5 ${s5_addr}:${s5_port}
+
+    fcgi-app my-fcgi-app
+        docroot ${testdir}
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+    txreq -req GET -url /req1 -hdr "X-my_hdr: on"
+    rxresp
+    expect resp.status == 200
+
+    txreq -req GET -url /req2 -hdr "X-my_hdr: on"
+    rxresp
+    expect resp.status == 200
+
+    txreq -req GET -url /req3 -hdr "X-my_hdr: on"
+    rxresp
+    expect resp.status == 403
+
+    txreq -req GET -url /req4 -hdr "X-my_hdr: on"
+    rxresp
+    expect resp.status == 200
+
+    txreq -req GET -url /req5 -hdr "X-my_hdr: on"
+    rxresp
+    expect resp.status == 200
+
+    txreq -req GET -url /req6 -hdr "X-my_hdr: on"
+    rxresp
+    expect resp.status == 403
+
+    txreq -req GET -url /req7 -hdr "X_my_hdr_with_lots_of_underscores: on"
+    rxresp
+    expect resp.status == 200
+
+    txreq -req GET -url /req8 -hdr "X_my_hdr-1: on" -hdr "X-my-hdr-2: on"
+    rxresp
+    expect resp.status == 200
+
+    txreq -req GET -url /req9 -hdr "X-my_hdr-1: on" -hdr "X-my_hdr-2: on"
+    rxresp
+    expect resp.status == 200
+
+    txreq -req GET -url /req10 -hdr "X-my-hdr-with-trailing-underscore_: on"
+    rxresp
+    expect resp.status == 200
+} -run
+
+client c2 -connect ${h1_fe2_sock} {
+    txreq -req GET -url /req1 -hdr "X-my_hdr: on"
+    rxresp
+    expect resp.status == 200
+} -run
diff --git a/reg-tests/log/log_forward.vtc b/reg-tests/log/log_forward.vtc
new file mode 100644
index 0000000..3977f4c
--- /dev/null
+++ b/reg-tests/log/log_forward.vtc
@@ -0,0 +1,57 @@
+varnishtest "Test the TCP load-forward"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.3-dev1)'"
+feature ignore_unknown_macro
+
+server s1 {
+    rxreq
+	txresp
+} -repeat 500 -start
+
+syslog Slg1 -level info {
+    recv
+    expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /client_c1 HTTP/1.1\""
+} -repeat 50 -start
+
+haproxy h1 -conf {
+	defaults
+		mode http
+		option httplog
+		timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+		timeout client  "${HAPROXY_TEST_TIMEOUT-5s}"
+		timeout server  "${HAPROXY_TEST_TIMEOUT-5s}"
+
+	frontend fe1
+		bind "fd@${fe_1}"
+		log 127.0.0.1:1514 local0
+#		log ${Slg1_addr}:${Slg1_port} local0
+		default_backend be
+
+	backend be
+		server app1 ${s1_addr}:${s1_port}
+
+	ring myring
+		description "My local buffer"
+		format rfc5424
+		maxlen 1200
+		size 32764
+		timeout connect 5s
+		timeout server 10s
+		# syslog tcp server
+		server mysyslogsrv 127.0.0.1:2514
+
+	log-forward syslog2tcp
+		dgram-bind 127.0.0.1:1514
+		log ring@myring local0 # To TCP log
+
+	log-forward syslog2local
+		bind 127.0.0.1:2514
+		log ${Slg1_addr}:${Slg1_port} local0 # To VTest syslog
+} -start
+
+client c1 -connect ${h1_fe_1_sock} {
+    txreq -url "/client_c1"
+    rxresp
+    expect resp.status == 200
+} -repeat 50 -start
+
+syslog Slg1 -wait
diff --git a/reg-tests/log/log_uri.vtc b/reg-tests/log/log_uri.vtc
index b5a5753..60b0bdb 100644
--- a/reg-tests/log/log_uri.vtc
+++ b/reg-tests/log/log_uri.vtc
@@ -5,7 +5,7 @@
 
 server s1 {
     rxreq
-    txresp
+    txresp  -hdr "Connection: close"
 } -repeat 4 -start
 
 syslog Slg_1 -level info {
diff --git a/reg-tests/mailers/healthcheckmail.lua b/reg-tests/mailers/healthcheckmail.lua
index 50b561c..4cb0e9d 100644
--- a/reg-tests/mailers/healthcheckmail.lua
+++ b/reg-tests/mailers/healthcheckmail.lua
@@ -4,46 +4,6 @@
 local mailconnectionsmade = 0
 local healthcheckcounter = 0
 
-core.register_action("bug", { "http-res" }, function(txn)
-	data = txn:get_priv()
-	if not data then
-		data = 0
-	end
-	data = data + 1
-	print(string.format("set to %d", data))
-	txn.http:res_set_status(200 + data)
-	txn:set_priv(data)
-end)
-
-core.register_service("luahttpservice", "http", function(applet)
-	local response = "?"
-	local responsestatus = 200
-       if applet.path == "/setport" then
-		vtc_port1 = applet.headers["vtcport1"][0]
-		response = "OK"
-	end
-	if applet.path == "/svr_healthcheck" then
-		healthcheckcounter = healthcheckcounter + 1
-		if healthcheckcounter < 2 or healthcheckcounter > 6 then
-			responsestatus = 403
-		end
-	end
-
-	applet:set_status(responsestatus)
-       if applet.path == "/checkMailCounters" then
-		response = "MailCounters"
-		applet:add_header("mailsreceived", mailsreceived)
-		applet:add_header("mailconnectionsmade", mailconnectionsmade)
-	end
-	applet:start_response()
-	applet:send(response)
-end)
-
-core.register_service("fakeserv", "http", function(applet)
-	applet:set_status(200)
-	applet:start_response()
-end)
-
 function RecieveAndCheck(applet, expect)
 	data = applet:getline()
 	if data:sub(1,expect:len()) ~= expect then
@@ -60,20 +20,24 @@
 	applet:send("220 Welcome\r\n")
 	local data
 
-	if RecieveAndCheck(applet, "EHLO") == false then
-		return
+	if RecieveAndCheck(applet, "HELO") == false then
+	   applet:set_var("txn.result", "ERROR (step: HELO)")
+	   return
 	end
 	applet:send("250 OK\r\n")
 	if RecieveAndCheck(applet, "MAIL FROM:") == false then
-		return
+	   applet:set_var("txn.result", "ERROR (step: MAIL FROM)")
+	   return
 	end
 	applet:send("250 OK\r\n")
 	if RecieveAndCheck(applet, "RCPT TO:") == false then
-		return
+	   applet:set_var("txn.result", "ERROR (step: RCPT TO)")
+	   return
 	end
 	applet:send("250 OK\r\n")
 	if RecieveAndCheck(applet, "DATA") == false then
-		return
+	   applet:set_var("txn.result", "ERROR (step: DATA)")
+	   return
 	end
 	applet:send("354 OK\r\n")
 	core.Info("#### Send your mailbody")
@@ -83,7 +47,7 @@
 		data = applet:getline() -- BODY CONTENT
 		--core.Info(data)
 		if data:sub(1, 9) == "Subject: " then
-			subject = data
+		   subject = data
 		end
 		if (data == "\r\n") then
 			data = applet:getline() -- BODY CONTENT
@@ -97,9 +61,10 @@
 	applet:send("250 OK\r\n")
 
 	if RecieveAndCheck(applet, "QUIT") == false then
-		return
+	   applet:set_var("txn.result", "ERROR (step: QUIT)")
+	   return
 	end
 	applet:send("221 Mail queued for delivery to /dev/null \r\n")
 	core.Info("Mail queued for delivery to /dev/null subject: "..subject)
-	mailsreceived = mailsreceived + 1
+	applet:set_var("txn.result", "SUCCESS")
 end)
diff --git a/reg-tests/mailers/healthcheckmail.vtc b/reg-tests/mailers/healthcheckmail.vtc
index ce3335f..d36a0d3 100644
--- a/reg-tests/mailers/healthcheckmail.vtc
+++ b/reg-tests/mailers/healthcheckmail.vtc
@@ -1,76 +1,59 @@
-varnishtest "Lua: txn:get_priv() scope"
+varnishtest "Check health-check email alerts"
 #REQUIRE_OPTIONS=LUA
-#REQUIRE_VERSION=1.6
-#REGTEST_TYPE=broken
 
 feature ignore_unknown_macro
 
-server s1 {
-    rxreq
-    txresp
+syslog S1 -level notice {
+    recv
+    expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv1 failed.+check duration: [[:digit:]]+ms.+status: 0/1 DOWN."
+    recv info
+    expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Result=SUCCESS Bytes=[[:digit:]]+"
 } -start
 
 haproxy h1 -conf {
     global
         lua-load ${testdir}/healthcheckmail.lua
-defaults
-    frontend femail
+
+    defaults
+        timeout client 1s
+        timeout server 1s
+        timeout connect 1s
+
+    listen lisrv
         mode tcp
-        bind "fd@${femail}"
-        tcp-request content use-service lua.mailservice
+        bind "fd@${lisrv}"
+        tcp-request connection reject
 
-    frontend luahttpservice
-        mode http
-        bind "fd@${luahttpservice}"
-        http-request use-service lua.luahttpservice
+    listen lismtp
+        mode tcp
+        bind "fd@${lismtp}"
+        log ${S1_addr}:${S1_port} daemon
+        log-format "Result=%[var(txn.result)] Bytes=%B"
+        tcp-request content use-service lua.mailservice
 
     frontend fe1
         mode http
         bind "fd@${fe1}"
-        default_backend b1
+        default_backend be1
 
-        http-response lua.bug
-
-    backend b1
+    backend be1
         mode http
-        option httpchk /svr_healthcheck
+        log ${S1_addr}:${S1_port} daemon
+        option httpchk
         option log-health-checks
 
+        default-server inter 200ms downinter 100ms rise 1 fall 1
+
         email-alert mailers mymailers
         email-alert level info
         email-alert from from@domain.tld
         email-alert to to@domain.tld
 
-        server broken 127.0.0.1:65535 check
-        server srv_lua ${h1_luahttpservice_addr}:${h1_luahttpservice_port} check inter 500
-        server srv1 ${s1_addr}:${s1_port} check inter 500
+        server srv1 ${h1_lisrv_addr}:${h1_lisrv_port} check
 
     mailers mymailers
-#      timeout mail 20s
-#      timeout mail 200ms
-      mailer smtp1 ${h1_femail_addr}:${h1_femail_port}
+        mailer smtp1 ${h1_lismtp_addr}:${h1_lismtp_port}
 
 } -start
 
-# configure port for lua to call feluaservice
-client c1 -connect ${h1_luahttpservice_sock} {
-    timeout 2
-    txreq -url "/setport" -hdr "vtcport1: ${h1_femail_port}"
-    rxresp
-    expect resp.status == 200
-    expect resp.body == "OK"
-} -run
-
-delay 2
-server s2 -repeat 5 -start
-delay 5
-
-client c2 -connect ${h1_luahttpservice_sock} {
-    timeout 2
-    txreq -url "/checkMailCounters"
-    rxresp
-    expect resp.status == 200
-    expect resp.body == "MailCounters"
-    expect resp.http.mailsreceived == 16
-    expect resp.http.mailconnectionsmade == 16
-} -run
+syslog S1 -wait
diff --git a/reg-tests/ssl/add_ssl_crt-list.vtc b/reg-tests/ssl/add_ssl_crt-list.vtc
index f42e3af..d35779b 100644
--- a/reg-tests/ssl/add_ssl_crt-list.vtc
+++ b/reg-tests/ssl/add_ssl_crt-list.vtc
@@ -52,6 +52,7 @@
         bind "${tmpdir}/ssl.sock" ssl strict-sni crt-list ${testdir}/localhost.crt-list
 
         server s1 ${s1_addr}:${s1_port}
+        server s2 ${s1_addr}:${s1_port} ssl crt "${testdir}/common.pem" weight 0 verify none
 } -start
 
 
@@ -70,6 +71,7 @@
     echo "new ssl cert ${testdir}/ecdsa.pem" | socat "${tmpdir}/h1/stats" -
     printf "set ssl cert ${testdir}/ecdsa.pem <<\n$(cat ${testdir}/ecdsa.pem)\n\n" | socat "${tmpdir}/h1/stats" -
     echo "commit ssl cert ${testdir}/ecdsa.pem" | socat "${tmpdir}/h1/stats" -
+    printf "add ssl crt-list ${testdir}/localhost.crt-list/ <<\n${testdir}/common.pem [ssl-min-ver SSLv3 verify none allow-0rtt] !*\n\n" | socat "${tmpdir}/h1/stats" -
     printf "add ssl crt-list ${testdir}/localhost.crt-list/ <<\n${testdir}/ecdsa.pem [ssl-min-ver SSLv3 verify none allow-0rtt] localhost !www.test1.com\n\n" | socat "${tmpdir}/h1/stats" -
     printf "add ssl crt-list ${testdir}/localhost.crt-list <<\n${testdir}/ecdsa.pem [verify none allow-0rtt]\n\n" | socat "${tmpdir}/h1/stats" -
     printf "add ssl crt-list ${testdir}/localhost.crt-list/// <<\n${testdir}/ecdsa.pem localhost !www.test1.com\n\n" | socat "${tmpdir}/h1/stats" -
diff --git a/reg-tests/ssl/log_forward_ssl.vtc b/reg-tests/ssl/log_forward_ssl.vtc
new file mode 100644
index 0000000..6b7515b
--- /dev/null
+++ b/reg-tests/ssl/log_forward_ssl.vtc
@@ -0,0 +1,60 @@
+varnishtest "Test the TCP+SSL load-forward"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.3-dev1)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+feature ignore_unknown_macro
+
+server s1 {
+    rxreq
+	txresp
+} -repeat 500 -start
+
+syslog Slg1 -level info {
+    recv
+    expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /client_c1 HTTP/1.1\""
+} -repeat 50 -start
+
+haproxy h1 -conf {
+	global
+		insecure-fork-wanted
+	defaults
+		mode http
+		option httplog
+		timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+		timeout client  "${HAPROXY_TEST_TIMEOUT-5s}"
+		timeout server  "${HAPROXY_TEST_TIMEOUT-5s}"
+
+	frontend fe1
+		bind "fd@${fe_1}"
+		log 127.0.0.1:1514 local0
+#		log ${Slg1_addr}:${Slg1_port} local0
+		default_backend be
+
+	backend be
+		server app1 ${s1_addr}:${s1_port}
+
+	ring myring
+		description "My local buffer"
+		format rfc5424
+		maxlen 1200
+		size 32764
+		timeout connect 5s
+		timeout server 10s
+		# syslog tcp server
+		server mysyslogsrv 127.0.0.1:2514 ssl verify none
+
+	log-forward syslog2tcp
+		dgram-bind 127.0.0.1:1514
+		log ring@myring local0 # To TCP log
+
+	log-forward syslog2local
+		bind 127.0.0.1:2514 ssl crt ${testdir}/common.pem
+		log ${Slg1_addr}:${Slg1_port} local0 # To VTest syslog
+} -start
+
+client c1 -connect ${h1_fe_1_sock} {
+    txreq -url "/client_c1"
+    rxresp
+    expect resp.status == 200
+} -repeat 50 -start
+
+syslog Slg1 -wait
diff --git a/reg-tests/ssl/ssl_default_server.vtc b/reg-tests/ssl/ssl_default_server.vtc
index 607225d..4f97346 100644
--- a/reg-tests/ssl/ssl_default_server.vtc
+++ b/reg-tests/ssl/ssl_default_server.vtc
@@ -11,8 +11,8 @@
 #
 
 varnishtest "Test the 'set ssl cert' feature of the CLI"
-feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
-feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+#REQUIRE_VERSION=2.4
+#REQUIRE_OPTIONS=OPENSSL
 feature ignore_unknown_macro
 
 server s1 -repeat 7 {
diff --git a/reg-tests/startup/automatic_maxconn.vtc b/reg-tests/startup/automatic_maxconn.vtc
new file mode 100644
index 0000000..686a5c5
--- /dev/null
+++ b/reg-tests/startup/automatic_maxconn.vtc
@@ -0,0 +1,102 @@
+#REGTEST_TYPE=broken
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTION=OPENSSL
+
+# Check the maxconn computation with the -m parameter
+# Broken because it can't work with ASAN.
+
+varnishtest "Automatic maxconn computation"
+
+
+feature ignore_unknown_macro
+
+server s1 {
+    rxreq
+    txresp
+} -start
+
+
+haproxy h1 -arg "-m 1024" -conf {
+} -start
+
+haproxy h1 -cli {
+	send "show info"
+	expect ~ ".*Maxconn: 29000\n.*"
+}
+
+haproxy h2 -arg "-m 384" -conf {
+} -start
+
+haproxy h2 -cli {
+	send "show info"
+	expect ~ ".*Maxconn: 10000\n.*"
+}
+
+haproxy h3 -arg "-m 256" -conf {
+} -start
+
+haproxy h3 -cli {
+	send "show info"
+	expect ~ ".*Maxconn: 7300\n.*"
+}
+
+# 1 SSL front but no back
+
+haproxy h4 -arg "-m 256" -conf {
+	defaults
+		mode http
+		timeout connect 1s
+		timeout client  1s
+		timeout server  1s
+
+	frontend fe1
+		bind "fd@${fe1}" ssl crt ${testdir}/common.pem
+
+} -start
+
+haproxy h4 -cli {
+	send "show info"
+	expect ~ ".*Maxconn: 1900\n.*"
+}
+
+# 1 SSL back but not front
+
+haproxy h5 -arg "-m 256" -conf {
+	defaults
+		mode http
+		timeout connect 1s
+		timeout client  1s
+		timeout server  1s
+
+	listen li2
+		bind "fd@${li2}"
+		server ssl "${s1_addr}:${s1_port}" ssl verify none
+
+} -start
+
+haproxy h5 -cli {
+	send "show info"
+	expect ~ ".*Maxconn: 1900\n.*"
+}
+
+
+# 1 SSL front and 1 back
+
+haproxy h6 -arg "-m 256" -conf {
+	defaults
+		mode http
+		timeout connect 1s
+		timeout client  1s
+		timeout server  1s
+
+	listen li3
+		bind "fd@${li3}" ssl crt ${testdir}/common.pem
+		server ssl "${s1_addr}:${s1_port}" ssl verify none
+
+} -start
+
+haproxy h6 -cli {
+	send "show info"
+	expect ~ ".*Maxconn: 1700\n.*"
+}
+
diff --git a/reg-tests/startup/common.pem b/reg-tests/startup/common.pem
new file mode 100644
index 0000000..206e417
--- /dev/null
+++ b/reg-tests/startup/common.pem
@@ -0,0 +1,117 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAnb0BDF7FsqzslakNg7u/n/JQkq6nheuKwvyTqECfpc9y7uSB
+e/vrEFqBaDSLQagJxuZdL5geFeVtRbdAoB97N1/LZa6vecjjgGSP0Aag/gS/ocnM
+RIyvlVWWT9MrD46OG3qZY1ORU1ltrVL0NKttJP8xME7j3bTwIDElx/hNI0n7L+yS
+kAe2xb/7CbZRfoOhjTVAcGv4aSLVc/Hi8k6VkIzdOEtH6TcghXmuGcuqvLNH9Buo
+syngKTcQ8zg6J+e64aVvC+e7vi94uil9Qu+JHm0pkDzAZ2WluNsuXlrJToPirWyj
+6/YdN6xgSI1hbZkBmUPAebgYuxBt6huvfyQd3wIDAQABAoIBABojc8UE/2W4WgwC
+04Z82ig7Ezb7Ui9S9M+S4zUCYHItijIkE4DkIfO3y7Hk4x6iJdyb191HK9UdC5p9
+32upS9XFPgM/izx3GZvxDhO+xXbSep7ovbyuQ3pPkHTx3TTavpm3GyvmcTKKoy4R
+jP4dWhzDXPdQW1ol3ZS4EDau4rlyClY6oi1mq9aBEX3MqVjB/nO7s2AbdgclAgP2
+OZMhTzWYR1k5tYySHCXh3ggGMCikyvHU0+SsGyrstYzP1VYi/n3f0VgqW/5ZjG8x
+6SHpe04unErPF3HuSun2ZMCFdBxaTFZ8FENb8evrSXe3nQOc9W21RQdRRrNNUbjl
+JYI4veECgYEA0ATYKMS1VCUYRZoQ49b5GTg7avUYqfW4bEo4fSfBue8NrnKR3Wu8
+PPBiCTuIYq1vSF+60B7Vu+hW0A8OuQ2UuMxLpYcQ7lKfNad/+yAfoWWafIqCqNU9
+at0QMdbW6A69d6jZt7OrXtleBsphCnN58jTz4ch4PIa2Oyq46NUXCvUCgYEAwh8t
+G6BOHOs3yRNI2s9Y9EEfwoil2uIKrZhqiL3AwdIpu5uNIMuPnbaEpXvRX6jv/qtL
+321i8vZLc31aM7zfxQ6B4ReQFJfYC80FJsWvcLwT9hB9mTJpLS4sIu5tzQc87O6w
+RtjFMom+5ns5hfPB4Eccy0EtbQWVY4nCzUeO6QMCgYBSvqqRRPXwG7VU8lznlHqP
+upuABzChYrnScY+Y0TixUlL54l79Wb6N6vzEOWceAWkzu8iewrU4QspNhr/PgoR3
+IeSxWlG0yy7Dc/ZnmTabx8O06I/iwrfkizzG5nOj6UEamRLJjPGNEB/jyZriQl7u
+pnugg1K4mMliLbNSAnlhBQKBgQCmYepbv260Qrex1KGhSg9Ia3k5V74weYYFfJnz
+UhChD+1NK+ourcsOtp3C6PlwMHBjq5aAjlU9QfUxq8NgjQaO8/xGXdfUjsFSfAtq
+TA4vZkUFpuTAJgEYBHc4CXx7OzTxLzRPxQRgaMgC7KNFOMR34vu/CsJQq3R7uFwL
+bsYC2QKBgQCtEmg1uDZVdByX9zyUMuRxz5Tq/vDcp+A5lJj2mha1+bUMaKX2+lxQ
+vPxY55Vaw/ukWkJirRrpGv6IytBn0dLAFSlKZworZGBaxsm8OGTFJ5Oe9+kZTjI9
+hvjpClOA1otbmj2F2uZAbuIjxQGDNUkLoifN5yDYCC8JPujHuHmULw==
+-----END RSA PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIGeTCCBGGgAwIBAgIBAjANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJGUjEW
+MBQGA1UECBMNSWxlLWRlLUZyYW5jZTEOMAwGA1UEBxMFUGFyaXMxEDAOBgNVBAoT
+B296b24uaW8xFTATBgNVBAMTDE96b24gVGVzdCBDQTEeMBwGCSqGSIb3DQEJARYP
+c3VwcG9ydEBvem9uLmlvMB4XDTE2MDExNzIzMDIzOFoXDTE4MDExNjIzMDIzOFow
+gb4xCzAJBgNVBAYTAkZSMRYwFAYDVQQIEw1JbGUtZGUtRnJhbmNlMRowGAYDVQQH
+ExFOZXVpbGx5LXN1ci1TZWluZTEYMBYGA1UEChMPVE9BRCBDb25zdWx0aW5nMRcw
+FQYDVQQLEw5lUGFyYXBoZXIgVGVhbTEWMBQGA1UEAxMNd3d3LnRlc3QxLmNvbTEw
+MC4GCSqGSIb3DQEJARYhYXJuYXVsdC5taWNoZWxAdG9hZC1jb25zdWx0aW5nLmZy
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnb0BDF7FsqzslakNg7u/
+n/JQkq6nheuKwvyTqECfpc9y7uSBe/vrEFqBaDSLQagJxuZdL5geFeVtRbdAoB97
+N1/LZa6vecjjgGSP0Aag/gS/ocnMRIyvlVWWT9MrD46OG3qZY1ORU1ltrVL0NKtt
+JP8xME7j3bTwIDElx/hNI0n7L+ySkAe2xb/7CbZRfoOhjTVAcGv4aSLVc/Hi8k6V
+kIzdOEtH6TcghXmuGcuqvLNH9BuosyngKTcQ8zg6J+e64aVvC+e7vi94uil9Qu+J
+Hm0pkDzAZ2WluNsuXlrJToPirWyj6/YdN6xgSI1hbZkBmUPAebgYuxBt6huvfyQd
+3wIDAQABo4IBvzCCAbswCwYDVR0PBAQDAgOoMBMGA1UdJQQMMAoGCCsGAQUFBwMB
+MB0GA1UdDgQWBBTIihFNVNgOseQnsWEcAQxAbIKE4TCBsgYDVR0jBIGqMIGngBRv
+G9At9gzk2MW5Z7JVey1LtPIZ8KGBg6SBgDB+MQswCQYDVQQGEwJGUjEWMBQGA1UE
+CBMNSWxlLWRlLUZyYW5jZTEOMAwGA1UEBxMFUGFyaXMxEDAOBgNVBAoTB296b24u
+aW8xFTATBgNVBAMTDE96b24gVGVzdCBDQTEeMBwGCSqGSIb3DQEJARYPc3VwcG9y
+dEBvem9uLmlvggkA15FtIaGcrk8wDAYDVR0TAQH/BAIwADAaBgNVHREEEzARgg9j
+b21tb25OYW1lOmNvcHkwCQYDVR0SBAIwADBIBgNVHR8EQTA/MD2gO6A5hjdodHRw
+Oi8vb3BlbnNzbGNhLnRvYWQtY29uc3VsdGluZy5jb20vb3BlbnZwbi9MYXRlc3Qu
+Y3JsMBEGCWCGSAGG+EIBAQQEAwIGQDAxBglghkgBhvhCAQ0EJBYiVE9BRC1Db25z
+dWx0aW5nIHNlcnZlciBjZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsFAAOCAgEAewDa
+9BukGNJMex8gsXmmdaczTr8yh9Uvw4NJcZS38I+26o//2g+d6i7wxcQg8hIm62Hj
+0TblGU3+RsJo4uzcWxxA5YUYlVszbHNBRpQengEE5pjwHvoXVMNES6Bt8xP04+Vj
+0qVnA8gUaDMk9lN5anK7tF/mbHOIJwHJZYCa2t3y95dIOVEXFwOIzzbSbaprjkLN
+w0BgR5paJz7NZWNqo4sZHUUz94uH2bPEd01SqHO0dJwEVxadgxuPnD05I9gqGpGX
+Zf3Rn7EQylvUtX9mpPaulQPXc3emefewLUSSAdnZrVikZK2J/B4lSi9FpUwl4iQH
+pZoE0QLQHtB1SBKacnOAddGSTLSdFvpzjErjjWSpMukF0vutmrP86GG3xtshWVhI
+u+yLfDJVm/pXfaeDtWMXpxIT/U1i0avpk5MZtFMRC0MTaxEWBTnnJm+/yiaAXQYg
+E1ZIP0mkZkiUojIawTR7JTjHGhIraP9UVPNceVy0DLfETHEou3vhwBn7PFOz7piJ
+wjp3A47DStJD4fapaX6B1fqM+n34CMD9ZAiJFgQEIQfObAWC9hyr4m+pqkp1Qfuw
+vsAP/ZoS1CBirJfm3i+Gshh+VeH+TAmO/NBBYCfzBdgkNz4tJCkOc7CUT/NQTR/L
+N2OskR/Fkge149RJi7hHvE3gk/mtGtNmHJPuQ+s=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIJazCCBVOgAwIBAgIUWHoc5e2FUECgyCvyVf8wCtt8gTYwDQYJKoZIhvcNAQEL
+BQAwRTELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
+GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDA4MDQxODU4MTZaFw0yMDA5
+MDMxODU4MTZaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
+HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggQiMA0GCSqGSIb3DQEB
+AQUAA4IEDwAwggQKAoIEAQDARiuHkhrnf38Md1nxGDSneJfwv/QksdNNMNTJBdjg
+OVmaRCIAyz43oefTWDQ/TebbSwB+Lg9pud1zadGWhlZRhCgBPP8JDMhIKH4eXIRk
+5IIa8WD08EwvSlqJL0r4gsMtVsxy7BZHAkka/2Ket9pyGt4kG5n75RFdc6BI80/8
+RwJt/MDxPrcVBAT7LnCluxQpyya9mZCabj7l+9a2yU2hgWS6QqfZJ133krkP/MMh
+AEQkSoA4mmBwWk9yPqXmUqiOi7v6iLkIUEh5SgYVPRk9BtU/kDaUdSwuqRrpCZo4
+SsWZWFLxBmLHkSh+G+BWjCVYMQr2ye7e+VMT/20+5xAfq4fj9n5BsPcx3QcVuTof
+RAc/Oygnt4MYnIcUb7zRFvCAvgpUHL7BnEn6nhyXjHJGqGDchsg8m9t3v/Y3ohq+
+qmrSzdeuylE1n3W5aWJlbFmyXegNP45MJ0xicesVrXEWF7YD/ir9mGJ8bQYr4blf
+77PrbF02komC6AzVPKOJa0jR+eW1wErzYlkYgez6ylBWCiHJd1dhEHlK3h2rXdYa
+Gnb45ILCLpEDjNEUrHifLLNXwqJpgZQsJU6BgMgk7ZgBfAKrCfTeg0rkCqCAPeVb
+8eSLf7FBF7YBRJ5P6u8qXc4RtgEu607GaWV0gIMfyVBY52oV+OaNsEdFetrJnp3c
+friG8vJ+7jdq6zjUCGgnfUIHoViJPh3JuFfhA3jT0gQDKW5PeI7dxhrNvlqdYfHI
+fxX7Y1/J6cTQkqJ1cai2f0bwJIJiTAThNbG+zrtjJ7fZ3wJ4udyU/IKrwShqtmTb
+1Ofj0tJDdwOH8i84vIySLUvR9aAb7ClFlnsx6rzwOxG90W7C0LA2M0EHm4FezJm/
+FfujnZwEWr1T9Wki6qE0MHCbdN/TTDws//EKkkE44FC+amL96w0IQl70vpE37j2A
+zlDWvFFID95SIxfmpkwWDvXDKv6gr1GMLeysCl2fgpY05Xidw5cEo9/tEkuWn/dG
+x/D9hnLBGeroA0251ES12jemqDjI2U0tfaeHakjwSsoWElf94Qmuh2iPZ+1zIxQs
+7o6nAWN8X9hfsmrDTTHlww0TEfrjlbzG5Yh+0ZRxmejgiUyOCXck+eh/ZXMXvfWh
+y3CorIIuWgkRjm80PYkdaRDJdZuyP6R7tXfTXNVzAiSQf0Qx9ru2KB2Fs/XZPamH
+KjItAU5Q6msIVvaRMS0muQgV+b6hqSEBzqXqJfAlpVLHXr5FqK+U7EB9y02B6piB
+tAmxqXP8OOCoQql6/vgIcrDFUOo6KtGBW36ef74XE3KCUVaIzVJZSIt6i/Vi0bZj
+bAjsJUQ3qDlHdorv9TRVOhnC1GUz7SuYnpEOyiXmyx3LAgMBAAGjUzBRMB0GA1Ud
+DgQWBBQ62csZcH/meQcENHhNbqz9LMzwjjAfBgNVHSMEGDAWgBQ62csZcH/meQcE
+NHhNbqz9LMzwjjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IEAQBA
+wLsGf3R1+/I2zQE+lsj7RasZtA/Cos92iEGDAPvFbx9e+roG8Gg8KBsEJu/HN0JH
+lMMiQ8dDRHSBMvRBENL5/57oOOhmqc+1u5sazLuANhzAYPZG17Klib7YpEwWoXar
+FDDiJYtCyLW0oNLpCswYopWK9GC0RJNucB0NFvOxehJ2sP2/fxGBQMB09L6mjKjd
+4KsOzyd3dNf0VYS6jB+/1pcKSHKQUo9HRHB5FK04PsYHoh4AtmEHvmYQKcWWidgU
+v26ftlH00ERzuW2juqBbz9mghlNRqXi0IyZ9b4tSj29dxW+WWFzo7j2zEPaD6z2W
+DEHq7zvON+g+q6qLgWeszqMgJzjvWjMj00E/t06PoHPiz/cAnDKEqp+ZzxCIFrxj
+/qneChpogDWyLbawhyyzbZvbirx5znOSbWjPZgydqaNEFViqbxwinBx4Xxabo6XN
+TU020FuMWmgfbIcvtgjKgyKqc97l7JMNNm7LQV9+9W0U5zdIqQKLZ9MMrd2w3xh4
+MAB8NKnwzHReK0TWwUU9HSgFAGdEX6HnyZ3bQ13ijg+sNBRMEi0gBHaqZKDdyoft
+B2u2uasSwioV48dbSIcHl+rTBKxiMh5XQ7ENnaGOJkjsIqTVzizqnPHU8eMBnSbb
+dsXlamROYII44+j3Ku6OGt51w86eGk4VxI3tmaECcJKqTkwUFD8AcNDrkjtmLuxK
+12yjnoM+u1cclfqQ5NOtRc6MJZ27jCobfBBhVdKVDp4X1WNyqGlbsU5adDAzknuI
+GT7MJO7lGjkZX2n54BNPSfrSknYMOVYcZqL0Dbcrhx5IyEmg+iOlOu1HO1tdnZop
+ej4vT+1V2w9Sa4Wo3UCo84jcm5v/4z7jCYh4BRQ60CFb7GLxZoqXIslcGSPool3n
+jl8JWoaLXrJUPfZGXo1iAlayJ5EiMyZl4eB/TBUf6TMm8vLvsPiUT+CEsjLppOdS
+eYppZAZ6H1JrJGs5kKBdOJHGn6Pkp5QsHIswOBd1HqHrBbYbZmDaDLRHduILWLrM
+e0/IfDdeXB/bKfmZoEpT8xRiauw15p0AHLumiK7KISAehfgBqUnxx+YmgGoZ7EWX
+KnMYAfCuC6oJ1DL0gp4Z9yMK1eu+GV1sLxPq9ZruEHW1R+H+4sGyiA5Gso2tgB6/
+XW//wxKclNp5LZR7hqfs/kGuh5asrJrnEbMwWn2+tr/LqfYtYh1D6nHfIXpT0o1d
+rNy/HrsKnRDMWxjm03r4hCViuNVD3Zb9anAF/NSPDVu8ATM5JbJNrCYX4eipz6ZE
+aQBkwIBkTPgtgP4r8v2G+uMYDw8nq7xh72FK107aeTTwc6MgU5jfeFNMr2XJisJd
+lSem1ngKYQSEzjVsTE4c
+-----END CERTIFICATE-----
diff --git a/scripts/announce-release b/scripts/announce-release
index 37e2ac4..c990821 100755
--- a/scripts/announce-release
+++ b/scripts/announce-release
@@ -210,20 +210,21 @@
 fi
 
 (echo "Please find the usual URLs below :"
- echo "   Site index       : http://www.haproxy.org/"
- echo "   Documentation    : http://docs.haproxy.org/"
+ echo "   Site index       : https://www.haproxy.org/"
+ echo "   Documentation    : https://docs.haproxy.org/"
  echo "   Wiki             : https://github.com/haproxy/wiki/wiki"
- echo "   Discourse        : http://discourse.haproxy.org/"
+ echo "   Discourse        : https://discourse.haproxy.org/"
  echo "   Slack channel    : https://slack.haproxy.org/"
  echo "   Issue tracker    : https://github.com/haproxy/haproxy/issues"
- echo "   Sources          : http://www.haproxy.org/download/${BRANCH}/src/"
- echo "   Git repository   : http://git.haproxy.org/git/${gitdir}/"
- echo "   Git Web browsing : http://git.haproxy.org/?p=${gitdir}"
- echo "   Changelog        : http://www.haproxy.org/download/${BRANCH}/src/CHANGELOG"
- echo "   Pending bugs     : http://www.haproxy.org/l/pending-bugs"
- echo "   Reviewed bugs    : http://www.haproxy.org/l/reviewed-bugs"
- echo "   Code reports     : http://www.haproxy.org/l/code-reports"
- echo "   Latest builds    : http://www.haproxy.org/l/dev-packages"
+ echo "   Sources          : https://www.haproxy.org/download/${BRANCH}/src/"
+ echo "   Git repository   : https://git.haproxy.org/git/${gitdir}/"
+ echo "   Git Web browsing : https://git.haproxy.org/?p=${gitdir}"
+ echo "   Changelog        : https://www.haproxy.org/download/${BRANCH}/src/CHANGELOG"
+ echo "   Dataplane API    : https://github.com/haproxytech/dataplaneapi/releases/latest"
+ echo "   Pending bugs     : https://www.haproxy.org/l/pending-bugs"
+ echo "   Reviewed bugs    : https://www.haproxy.org/l/reviewed-bugs"
+ echo "   Code reports     : https://www.haproxy.org/l/code-reports"
+ echo "   Latest builds    : https://www.haproxy.org/l/dev-packages"
 ) >> "$OUTPUT"
 
 # sign
diff --git a/scripts/build-ssl.sh b/scripts/build-ssl.sh
index e1d89a0..4934a4e 100755
--- a/scripts/build-ssl.sh
+++ b/scripts/build-ssl.sh
@@ -59,7 +59,7 @@
 download_libressl () {
     if [ ! -f "download-cache/libressl-${LIBRESSL_VERSION}.tar.gz" ]; then
         wget -P download-cache/ \
-	    "https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-${LIBRESSL_VERSION}.tar.gz"
+	    "https://cdn.openbsd.org/pub/OpenBSD/LibreSSL/libressl-${LIBRESSL_VERSION}.tar.gz"
     fi
 }
 
diff --git a/scripts/make-releases-json b/scripts/make-releases-json
new file mode 100755
index 0000000..38bb2b6
--- /dev/null
+++ b/scripts/make-releases-json
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+#
+# Scan a branch directory for source tarballs and rebuild the releases.json
+# file for that branch. md5 and sha256 are added if present. The highest
+# numberred version is referenced as the latest release.
+#
+# Usage: $0 [-b branch] [-o outfile] /path/to/download/branch
+#
+
+USAGE="Usage: ${0##*/} [-b branch] [-o outfile] DIR"
+OUTPUT=
+BRANCH=
+DIR=
+
+die() {
+	[ "$#" -eq 0 ] || echo "$*" >&2
+	exit 1
+}
+
+err() {
+	echo "$*" >&2
+}
+
+quit() {
+	[ "$#" -eq 0 -o -n "$QUIET" ] || echo "$*"
+	exit 0
+}
+
+emit_json() {
+	printf '{\n  "branch": "%s",\n' ${BRANCH}
+	latest=""
+	for file in $(find "$DIR/src" -name 'haproxy-[0-9]*.gz' -printf "%P\n" |grep -v '[0-9]-patches*' | sort -rV ); do
+		rel="${file##*haproxy-}"
+		rel="${rel%%.tar.*}"
+		if [ -z "$latest" ]; then
+			latest="$rel";
+			printf '  "latest_release": "%s",\n' ${latest}
+			printf '  "releases": {\n'
+		else
+			printf ",\n"
+		fi
+		printf '    "%s": {\n' ${rel}
+		printf '      "file": "%s"' ${file}
+		if [ -s "$DIR/src/$file.md5" ]; then
+			printf ',\n      "md5": "%s"' $(awk '{print $1}' "$DIR/src/$file.md5")
+		fi
+		if [ -s "$DIR/src/$file.sha256" ]; then
+			printf ',\n      "sha256": "%s"' $(awk '{print $1}' "$DIR/src/$file.sha256")
+		fi
+		printf '\n    }'
+	done
+
+	if [ -n "$latest" ]; then
+		printf "\n  }"  ## "releases"
+	fi
+
+	printf '\n}\n'
+}
+
+
+### main
+
+while [ -n "$1" -a -z "${1##-*}" ]; do
+	case "$1" in
+		-b)        BRANCH="$2"    ; shift 2 ;;
+		-o)        OUTPUT="$2"    ; shift 2 ;;
+		-h|--help) quit "$USAGE" ;;
+		*)         die  "$USAGE" ;;
+	esac
+done
+
+if [ $# -ne 1 ]; then
+	die "$USAGE"
+fi
+
+DIR="$1" ; shift
+if [ -z "$DIR" ]; then
+	die "Missing download directory name."
+fi
+
+if [ ! -d "$DIR/." ]; then
+	die "Download directory doesn't exist : $DIR"
+fi
+
+if [ ! -d "$DIR/src" ]; then
+	die "Download directory must contain 'src' : $DIR"
+fi
+
+if [ -z "$BRANCH" ]; then
+	BRANCH=${DIR##*/}
+	if [ -n "${BRANCH//[0-9.]}" ]; then
+		die "Couldn't determine branch number from dir name: $BRANCH"
+	fi
+fi
+
+# echo "debug: DIR=$DIR BRANCH=$BRANCH"
+if [ -n "$OUTPUT" ]; then
+	emit_json > "$OUTPUT.tmp"
+	mv -f "$OUTPUT.tmp" "$OUTPUT"
+	rm -f "$OUTPUT.tmp"
+else
+	emit_json
+fi
diff --git a/scripts/publish-release b/scripts/publish-release
index 4938049..9066d4a 100755
--- a/scripts/publish-release
+++ b/scripts/publish-release
@@ -17,10 +17,14 @@
 DEVEL=
 QUIET=
 AUTO=
+ARG0="$0"
 NEW=
 DIR=
 DOC=( )
 
+# need to have group write on emitted files for others to update
+umask 002
+
 die() {
 	[ "$#" -eq 0 ] || echo "$*" >&2
 	exit 1
@@ -178,6 +182,11 @@
 	$CMD_GZIP < "$TARGET_DIR/doc/${i#doc/}" > "$TARGET_DIR/doc/${i#doc/}.gz"
 done
 
+if [ -x "${ARG0%/*}/make-releases-json" ]; then
+        # regenerate versions
+        "${ARG0%/*}/make-releases-json" -o "$TARGET_DIR/src/releases.json" "$TARGET_DIR"
+fi
+
 echo "Done : ls -l ${TARGET_DIR}"
 ( cd "$TARGET_DIR" ;
   ls -l src/CHANGELOG "src${DEVEL}/haproxy-${NEW}".tar.gz{,.md5,.sha256} $(for i in "${DOC[@]}"; do echo "doc/${i#doc/}"{,.gz}; done)
diff --git a/src/backend.c b/src/backend.c
index a864fc1..3f58064 100644
--- a/src/backend.c
+++ b/src/backend.c
@@ -696,8 +696,6 @@
 	struct server *srv = NULL, *prev_srv;
 	int err;
 
-	DPRINTF(stderr,"assign_server : s=%p\n",s);
-
 	err = SRV_STATUS_INTERNAL;
 	if (unlikely(s->pend_pos || s->flags & SF_ASSIGNED))
 		goto out_err;
@@ -812,11 +810,6 @@
 							    (void *)&((struct sockaddr_in6 *)conn->src)->sin6_addr,
 							    16, prev_srv);
 				}
-				else {
-					/* unknown IP family */
-					err = SRV_STATUS_INTERNAL;
-					goto out;
-				}
 				break;
 
 			case BE_LB_HASH_URI:
@@ -2437,7 +2430,6 @@
 
 		/* only wait when we're retrying on the same server */
 		if ((si->state == SI_ST_ASS ||
-		     (s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_RR ||
 		     (s->be->srv_act <= 1)) && !reused) {
 			si->state = SI_ST_TAR;
 			si->exp = tick_add(now_ms, MS_TO_TICKS(delay));
diff --git a/src/cache.c b/src/cache.c
index ad4e715..30e0b7d 100644
--- a/src/cache.c
+++ b/src/cache.c
@@ -136,7 +136,7 @@
 struct cache_entry {
 	unsigned int complete;    /* An entry won't be valid until complete is not null. */
 	unsigned int latest_validation;     /* latest validation date */
-	unsigned int expire;      /* expiration date */
+	unsigned int expire;      /* expiration date (wall clock time) */
 	unsigned int age;         /* Origin server "Age" header value */
 
 	struct eb32_node eb;     /* ebtree node used to hold the cache object */
@@ -188,7 +188,7 @@
 	if (memcmp(entry->hash, hash, sizeof(entry->hash)))
 		return NULL;
 
-	if (entry->expire > now.tv_sec) {
+	if (entry->expire > date.tv_sec) {
 		return entry;
 	} else {
 		delete_entry(entry);
@@ -249,7 +249,7 @@
 		 * when we find them. Calling delete_entry would be too costly
 		 * so we simply call eb32_delete. The secondary_entry count will
 		 * be updated when we try to insert a new entry to this list. */
-		if (entry->expire <= now.tv_sec) {
+		if (entry->expire <= date.tv_sec) {
 			eb32_delete(&entry->eb);
 			entry->eb.key = 0;
 		}
@@ -258,7 +258,7 @@
 	}
 
 	/* Expired entry */
-	if (entry && entry->expire <= now.tv_sec) {
+	if (entry && entry->expire <= date.tv_sec) {
 		eb32_delete(&entry->eb);
 		entry->eb.key = 0;
 		entry = NULL;
@@ -283,7 +283,7 @@
 	while (prev) {
 		entry = container_of(prev, struct cache_entry, eb);
 		prev = eb32_prev_dup(prev);
-		if (entry->expire <= now.tv_sec) {
+		if (entry->expire <= date.tv_sec) {
 			eb32_delete(&entry->eb);
 			entry->eb.key = 0;
 		}
@@ -315,7 +315,7 @@
 	struct eb32_node *prev = NULL;
 	struct cache_entry *entry = NULL;
 	unsigned int entry_count = 0;
-	unsigned int last_clear_ts = now.tv_sec;
+	unsigned int last_clear_ts = date.tv_sec;
 
 	struct eb32_node *node = eb32_insert(&cache->entries, &new_entry->eb);
 
@@ -338,7 +338,7 @@
 			 * space. In order to avoid going over the same list too
 			 * often, we first check the timestamp of the last check
 			 * performed. */
-			if (last_clear_ts == now.tv_sec) {
+			if (last_clear_ts == date.tv_sec) {
 				/* Too many entries for this primary key, clear the
 				 * one that was inserted. */
 				eb32_delete(node);
@@ -351,7 +351,7 @@
 				/* Still too many entries for this primary key, delete
 				 * the newly inserted one. */
 				entry = container_of(prev, struct cache_entry, eb);
-				entry->last_clear_ts = now.tv_sec;
+				entry->last_clear_ts = date.tv_sec;
 				eb32_delete(node);
 				node->key = 0;
 				return NULL;
@@ -811,8 +811,8 @@
 				/* A request having an expiring date earlier
 				 * than the current date should be considered as
 				 * stale. */
-				expires = (expires_val >= now.tv_sec) ?
-					(expires_val - now.tv_sec) : 0;
+				expires = (expires_val >= date.tv_sec) ?
+					(expires_val - date.tv_sec) : 0;
 			}
 			else {
 				/* Following RFC 7234#5.3, an invalid date
@@ -886,7 +886,7 @@
 	/* Fallback on the current time if no "Last-Modified" or "Date" header
 	 * was found. */
 	if (!last_modified)
-		last_modified = now.tv_sec;
+		last_modified = date.tv_sec;
 
 	return last_modified;
 }
@@ -1083,7 +1083,7 @@
 
 	http_check_response_for_cacheability(s, &s->res);
 
-	if (!(txn->flags & TX_CACHEABLE) || !(txn->flags & TX_CACHE_COOK) || (txn->flags & TX_CACHE_IGNORE))
+	if (!(txn->flags & TX_CACHEABLE) || !(txn->flags & TX_CACHE_COOK))
 		goto out;
 
 	shctx_lock(shctx);
@@ -1120,7 +1120,7 @@
 	 * is set by the end of this function (in case of concurrent accesses to
 	 * the same resource). This way the second access will find an existing
 	 * but not yet usable entry in the tree and will avoid storing its data. */
-	object->expire = now.tv_sec + 2;
+	object->expire = date.tv_sec + 2;
 
 	memcpy(object->hash, txn->cache_hash, sizeof(object->hash));
 	if (vary_signature)
@@ -1224,8 +1224,8 @@
 	if (cache_ctx) {
 		cache_ctx->first_block = first;
 		/* store latest value and expiration time */
-		object->latest_validation = now.tv_sec;
-		object->expire = now.tv_sec + effective_maxage;
+		object->latest_validation = date.tv_sec;
+		object->expire = date.tv_sec + effective_maxage;
 		return ACT_RET_CONT;
 	}
 
@@ -1416,7 +1416,7 @@
 	char *end;
 
 	chunk_reset(&trash);
-	age = MAX(0, (int)(now.tv_sec - cache_ptr->latest_validation)) + cache_ptr->age;
+	age = MAX(0, (int)(date.tv_sec - cache_ptr->latest_validation)) + cache_ptr->age;
 	if (unlikely(age > CACHE_ENTRY_MAX_AGE))
 		age = CACHE_ENTRY_MAX_AGE;
 	end = ultoa_o(age, b_head(&trash), b_size(&trash));
@@ -1776,8 +1776,10 @@
 
 	shctx_lock(shctx_ptr(cache));
 	res = entry_exist(cache, s->txn->cache_hash);
-	/* We must not use an entry that is not complete. */
-	if (res && res->complete) {
+	/* We must not use an entry that is not complete but the check will be
+	 * performed after we look for a potential secondary entry (in case of
+	 * Vary). */
+	if (res) {
 		struct appctx *appctx;
 		entry_block = block_ptr(res);
 		shctx_row_inc_hot(shctx_ptr(cache), entry_block);
@@ -1804,9 +1806,11 @@
 				res = NULL;
 		}
 
-		/* We looked for a valid secondary entry and could not find one,
-		 * the request must be forwarded to the server. */
-		if (!res) {
+		/* We either looked for a valid secondary entry and could not
+		 * find one, or the entry we want to use is not complete. We
+		 * can't use the cache's entry and must forward the request to
+		 * the server. */
+		if (!res || !res->complete) {
 			shctx_lock(shctx_ptr(cache));
 			shctx_row_dec_hot(shctx_ptr(cache), entry_block);
 			shctx_unlock(shctx_ptr(cache));
@@ -2602,13 +2606,13 @@
 			entry = container_of(node, struct cache_entry, eb);
 			next_key = node->key + 1;
 
-			if (entry->expire > now.tv_sec) {
+			if (entry->expire > date.tv_sec) {
 				chunk_printf(&trash, "%p hash:%u vary:0x", entry, read_u32(entry->hash));
 				for (i = 0; i < HTTP_CACHE_SEC_KEY_LEN; ++i)
 					chunk_appendf(&trash, "%02x", (unsigned char)entry->secondary_key[i]);
 				chunk_appendf(&trash, " size:%u (%u blocks), refcount:%u, expire:%d\n",
 					      block_ptr(entry)->len, block_ptr(entry)->block_count,
-					      block_ptr(entry)->refcount, entry->expire - (int)now.tv_sec);
+					      block_ptr(entry)->refcount, entry->expire - (int)date.tv_sec);
 			} else {
 				/* time to remove that one */
 				delete_entry(entry);
diff --git a/src/cfgparse-listen.c b/src/cfgparse-listen.c
index 8fd067a..35bd498 100644
--- a/src/cfgparse-listen.c
+++ b/src/cfgparse-listen.c
@@ -292,6 +292,8 @@
 			curr_defproxy = last_defproxy;
 
 		if (strcmp(args[arg], "from") == 0) {
+			struct ebpt_node *next_by_name;
+
 			curr_defproxy = proxy_find_by_name(args[arg+1], PR_CAP_DEF, 0);
 
 			if (!curr_defproxy) {
@@ -300,8 +302,8 @@
 				goto out;
 			}
 
-			if (ebpt_next_dup(&curr_defproxy->conf.by_name)) {
-				struct proxy *px2 = container_of(ebpt_next_dup(&curr_defproxy->conf.by_name), struct proxy, conf.by_name);
+			if ((next_by_name = ebpt_next_dup(&curr_defproxy->conf.by_name))) {
+				struct proxy *px2 = container_of(next_by_name, struct proxy, conf.by_name);
 
 				ha_alert("parsing [%s:%d] : ambiguous defaults section name '%s' referenced by %s '%s' exists at least at %s:%d and %s:%d.\n",
 					 file, linenum, args[arg+1], proxy_cap_str(rc), name,
@@ -2340,6 +2342,38 @@
 				}
 			} /* end while loop */
 		}
+		else if (strcmp(args[1], "http-restrict-req-hdr-names") == 0) {
+			if (kwm != KWM_STD) {
+				ha_alert("parsing [%s:%d]: negation/default is not supported for option '%s'.\n",
+					 file, linenum, args[1]);
+				err_code |= ERR_ALERT | ERR_FATAL;
+				goto out;
+			}
+
+			if (alertif_too_many_args(2, file, linenum, args, &err_code))
+				goto out;
+
+			if (*(args[2]) == 0) {
+				ha_alert("parsing [%s:%d] : missing parameter. option '%s' expects 'preserve', 'reject' or 'delete' option.\n",
+					 file, linenum, args[1]);
+				err_code |= ERR_ALERT | ERR_FATAL;
+				goto out;
+			}
+
+			curproxy->options2 &= ~PR_O2_RSTRICT_REQ_HDR_NAMES_MASK;
+			if (strcmp(args[2], "preserve") == 0)
+				curproxy->options2 |= PR_O2_RSTRICT_REQ_HDR_NAMES_NOOP;
+			else if (strcmp(args[2], "reject") == 0)
+				curproxy->options2 |= PR_O2_RSTRICT_REQ_HDR_NAMES_BLK;
+			else if (strcmp(args[2], "delete") == 0)
+				curproxy->options2 |= PR_O2_RSTRICT_REQ_HDR_NAMES_DEL;
+			else {
+				ha_alert("parsing [%s:%d] : invalid parameter '%s'. option '%s' expects 'preserve', 'reject' or 'delete' option.\n",
+					 file, linenum, args[2], args[1]);
+				err_code |= ERR_ALERT | ERR_FATAL;
+				goto out;
+			}
+		}
 		else {
 			const char *best = proxy_find_best_option(args[1], common_options);
 
diff --git a/src/cfgparse-ssl.c b/src/cfgparse-ssl.c
index 654b020..fcd0416 100644
--- a/src/cfgparse-ssl.c
+++ b/src/cfgparse-ssl.c
@@ -758,7 +758,7 @@
 {
 	int code;
 	char *p = args[cur_arg + 1];
-	unsigned long long *ignerr = &conf->crt_ignerr;
+	unsigned long long *ignerr = conf->crt_ignerr_bitfield;
 
 	if (!*p) {
 		memprintf(err, "'%s' : missing error IDs list", args[cur_arg]);
@@ -766,21 +766,21 @@
 	}
 
 	if (strcmp(args[cur_arg], "ca-ignore-err") == 0)
-		ignerr = &conf->ca_ignerr;
+		ignerr = conf->ca_ignerr_bitfield;
 
 	if (strcmp(p, "all") == 0) {
-		*ignerr = ~0ULL;
+		cert_ignerr_bitfield_set_all(ignerr);
 		return 0;
 	}
 
 	while (p) {
 		code = atoi(p);
-		if ((code <= 0) || (code > 63)) {
-			memprintf(err, "'%s' : ID '%d' out of range (1..63) in error IDs list '%s'",
-			          args[cur_arg], code, args[cur_arg + 1]);
+		if ((code <= 0) || (code > SSL_MAX_VFY_ERROR_CODE)) {
+			memprintf(err, "'%s' : ID '%d' out of range (1..%d) in error IDs list '%s'",
+			          args[cur_arg], code, SSL_MAX_VFY_ERROR_CODE, args[cur_arg + 1]);
 			return ERR_ALERT | ERR_FATAL;
 		}
-		*ignerr |= 1ULL << code;
+		cert_ignerr_bitfield_set(ignerr, code);
 		p = strchr(p, ',');
 		if (p)
 			p++;
diff --git a/src/cfgparse-tcp.c b/src/cfgparse-tcp.c
index a15a110..e91e7a3 100644
--- a/src/cfgparse-tcp.c
+++ b/src/cfgparse-tcp.c
@@ -165,6 +165,7 @@
 		return ERR_ALERT | ERR_FATAL;
 	}
 
+	ha_free(&conf->settings.interface);
 	conf->settings.interface = strdup(args[cur_arg + 1]);
 	return 0;
 }
diff --git a/src/cfgparse.c b/src/cfgparse.c
index be0eaa7..01014c0 100644
--- a/src/cfgparse.c
+++ b/src/cfgparse.c
@@ -10,10 +10,10 @@
  *
  */
 
-#ifdef USE_LIBCRYPT
-/* This is to have crypt() defined on Linux */
+/* This is to have crypt() and sched_setaffinity() defined on Linux */
 #define _GNU_SOURCE
 
+#ifdef USE_LIBCRYPT
 #ifdef USE_CRYPT_H
 /* some platforms such as Solaris need this */
 #include <crypt.h>
@@ -29,6 +29,9 @@
 #include <pwd.h>
 #include <grp.h>
 #include <errno.h>
+#ifdef USE_CPU_AFFINITY
+#include <sched.h>
+#endif
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
@@ -62,9 +65,11 @@
 #include <haproxy/lb_map.h>
 #include <haproxy/listener.h>
 #include <haproxy/log.h>
+#include <haproxy/sink.h>
 #include <haproxy/mailers.h>
 #include <haproxy/namespace.h>
 #include <haproxy/obj_type-t.h>
+#include <haproxy/openssl-compat.h>
 #include <haproxy/peers-t.h>
 #include <haproxy/peers.h>
 #include <haproxy/pool.h>
@@ -659,9 +664,10 @@
 
 	if (!LIST_ISEMPTY(&p->conf.bind)) {
 		bind_conf = LIST_ELEM((&p->conf.bind)->n, typeof(bind_conf), by_fe);
-		free(bind_conf->file);
-		bind_conf->file = strdup(file);
-		bind_conf->line = line;
+		/*
+		 * We keep bind_conf->file and bind_conf->line unchanged
+		 * to make them available for error messages
+		 */
 		if (arg) {
 			free(bind_conf->arg);
 			bind_conf->arg = strdup(arg);
@@ -745,7 +751,12 @@
 		}
 
 		bind_conf = bind_conf_uniq_alloc(curpeers->peers_fe, file, linenum,
-		                                 NULL, xprt_get(XPRT_RAW));
+		                                 args[1], xprt_get(XPRT_RAW));
+		if (!bind_conf) {
+			ha_alert("parsing [%s:%d] : '%s %s' : cannot allocate memory.\n", file, linenum, args[0], args[1]);
+			err_code |= ERR_FATAL;
+			goto out;
+		}
 		if (*args[0] == 'b') {
 			struct listener *l;
 
@@ -755,6 +766,11 @@
 				goto out;
 			}
 
+			if (!LIST_ISEMPTY(&bind_conf->listeners)) {
+				ha_alert("parsing [%s:%d] : One listener per \"peers\" section is authorized but another is already configured at [%s:%d].\n", file, linenum, bind_conf->file, bind_conf->line);
+				err_code |= ERR_FATAL;
+			}
+
 			if (!str2listener(args[1], curpeers->peers_fe, bind_conf, file, linenum, &errmsg)) {
 				if (errmsg && *errmsg) {
 					indent_msg(&errmsg, 2);
@@ -762,11 +778,14 @@
 				}
 				else
 					ha_alert("parsing [%s:%d] : '%s %s' : error encountered while parsing listening address %s.\n",
-							 file, linenum, args[0], args[1], args[2]);
+							 file, linenum, args[0], args[1], args[1]);
 				err_code |= ERR_FATAL;
 				goto out;
 			}
-			l = LIST_ELEM(bind_conf->listeners.n, typeof(l), by_bind);
+			/*
+			 * Newly allocated listener is at the end of the list
+			 */
+			l = LIST_ELEM(bind_conf->listeners.p, typeof(l), by_bind);
 			l->maxaccept = 1;
 			l->accept = session_accept_fd;
 			l->analysers |=  curpeers->peers_fe->fe_req_ana;
@@ -977,6 +996,16 @@
 			goto out;
 
 		bind_conf = bind_conf_uniq_alloc(curpeers->peers_fe, file, linenum, args[2], xprt_get(XPRT_RAW));
+		if (!bind_conf) {
+			ha_alert("parsing [%s:%d] : '%s %s' : Cannot allocate memory.\n", file, linenum, args[0], args[1]);
+			err_code |= ERR_FATAL;
+			goto out;
+		}
+
+		if (!LIST_ISEMPTY(&bind_conf->listeners)) {
+			ha_alert("parsing [%s:%d] : One listener per \"peers\" section is authorized but another is already configured at [%s:%d].\n", file, linenum, bind_conf->file, bind_conf->line);
+			err_code |= ERR_FATAL;
+		}
 
 		if (!str2listener(args[2], curpeers->peers_fe, bind_conf, file, linenum, &errmsg)) {
 			if (errmsg && *errmsg) {
@@ -990,7 +1019,10 @@
 			goto out;
 		}
 
-		l = LIST_ELEM(bind_conf->listeners.n, typeof(l), by_bind);
+		/*
+		 * Newly allocated listener is at the end of the list
+		 */
+		l = LIST_ELEM(bind_conf->listeners.p, typeof(l), by_bind);
 		l->maxaccept = 1;
 		l->accept = session_accept_fd;
 		l->analysers |=  curpeers->peers_fe->fe_req_ana;
@@ -2024,9 +2056,10 @@
 				if (outline == NULL) {
 					ha_alert("parsing [%s:%d]: line too long, cannot allocate memory.\n",
 						 file, linenum);
-					err_code |= ERR_ALERT | ERR_FATAL;
+					err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
 					fatal++;
-					goto next_line;
+					outlinesize = 0;
+					goto err;
 				}
 				/* try again */
 				continue;
@@ -2602,6 +2635,7 @@
 {
 	int cfgerr = 0;
 	struct proxy *curproxy = NULL;
+	struct proxy *init_proxies_list = NULL;
 	struct stktable *t;
 	struct server *newsrv = NULL;
 	int err_code = 0;
@@ -2617,9 +2651,6 @@
 	 * Now, check for the integrity of all that we have collected.
 	 */
 
-	/* will be needed further to delay some tasks */
-	tv_update_date(0,1);
-
 	if (!global.tune.max_http_hdr)
 		global.tune.max_http_hdr = MAX_HTTP_HDR;
 
@@ -2646,6 +2677,12 @@
 #endif
 			global.nbthread = numa_cores ? numa_cores :
 			                               thread_cpus_enabled_at_boot;
+
+			if (global.nbthread > MAX_THREADS) {
+				ha_diag_warning("nbthread not set, found %d CPUs, limiting to %d threads. Please set nbthreads in the global section to silence this warning.\n",
+					   global.nbthread, MAX_THREADS);
+				global.nbthread = MAX_THREADS;
+			}
 		}
 		all_threads_mask = nbits(global.nbthread);
 #endif
@@ -2679,7 +2716,11 @@
 		proxies_list = next;
 	}
 
-	for (curproxy = proxies_list; curproxy; curproxy = curproxy->next) {
+	/* starting to initialize the main proxies list */
+	init_proxies_list = proxies_list;
+
+init_proxies_list_stage1:
+	for (curproxy = init_proxies_list; curproxy; curproxy = curproxy->next) {
 		struct switching_rule *rule;
 		struct server_rule *srule;
 		struct sticking_rule *mrule;
@@ -2809,11 +2850,16 @@
 		case PR_MODE_CLI:
 			cfgerr += proxy_cfg_ensure_no_http(curproxy);
 			break;
+
 		case PR_MODE_SYSLOG:
+			/* this mode is initialized as the classic tcp proxy */
+			cfgerr += proxy_cfg_ensure_no_http(curproxy);
+			break;
+
 		case PR_MODE_PEERS:
 		case PR_MODES:
 			/* should not happen, bug gcc warn missing switch statement */
-			ha_alert("config : %s '%s' cannot use peers or syslog mode for this proxy. NOTE: PLEASE REPORT THIS TO DEVELOPERS AS YOU'RE NOT SUPPOSED TO BE ABLE TO CREATE A CONFIGURATION TRIGGERING THIS!\n",
+			ha_alert("config: %s '%s' cannot initialize this proxy mode (peers) in this way. NOTE: PLEASE REPORT THIS TO DEVELOPERS AS YOU'RE NOT SUPPOSED TO BE ABLE TO CREATE A CONFIGURATION TRIGGERING THIS!\n",
 				 proxy_type_str(curproxy), curproxy->id);
 			cfgerr++;
 			break;
@@ -3938,6 +3984,24 @@
 		}
 	}
 
+	/*
+	 * We have just initialized the main proxies list
+	 * we must also configure the log-forward proxies list
+	 */
+	if (init_proxies_list == proxies_list) {
+		init_proxies_list = cfg_log_forward;
+		/* check if list is not null to avoid infinite loop */
+		if (init_proxies_list)
+			goto init_proxies_list_stage1;
+	}
+
+	if (init_proxies_list == cfg_log_forward) {
+		init_proxies_list = sink_proxies_list;
+		/* check if list is not null to avoid infinite loop */
+		if (init_proxies_list)
+			goto init_proxies_list_stage1;
+	}
+
 	/***********************************************************/
 	/* At this point, target names have already been resolved. */
 	/***********************************************************/
@@ -4059,7 +4123,11 @@
 
 	/* perform the final checks before creating tasks */
 
-	for (curproxy = proxies_list; curproxy; curproxy = curproxy->next) {
+	/* starting to initialize the main proxies list */
+	init_proxies_list = proxies_list;
+
+init_proxies_list_stage2:
+	for (curproxy = init_proxies_list; curproxy; curproxy = curproxy->next) {
 		struct listener *listener;
 		unsigned int next_id;
 
@@ -4181,6 +4249,17 @@
 	}
 
 	/*
+	 * We have just initialized the main proxies list
+	 * we must also configure the log-forward proxies list
+	 */
+	if (init_proxies_list == proxies_list) {
+		init_proxies_list = cfg_log_forward;
+		/* check if list is not null to avoid infinite loop */
+		if (init_proxies_list)
+			goto init_proxies_list_stage2;
+	}
+
+	/*
 	 * Recount currently required checks.
 	 */
 
diff --git a/src/channel.c b/src/channel.c
index 524d104..94dfbcf 100644
--- a/src/channel.c
+++ b/src/channel.c
@@ -398,7 +398,7 @@
 	if (chn->flags & CF_SHUTW)
 		return -1;
 
-	if (len + offset > co_data(chn)) {
+	if (len + offset > co_data(chn) || co_data(chn) == 0) {
 		if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
 			return -1;
 		return 0;
diff --git a/src/check.c b/src/check.c
index e8167a7..54704bd 100644
--- a/src/check.c
+++ b/src/check.c
@@ -198,14 +198,18 @@
 	if (!check || src->verbosity < CHK_VERB_CLEAN)
 		return;
 
-	chunk_appendf(&trace_buf, " : [%c] SRV=%s",
-		      ((check->type == PR_O2_EXT_CHK) ? 'E' : (check->state & CHK_ST_AGENT ? 'A' : 'H')),
-		      srv->id);
+	if (srv) {
+		chunk_appendf(&trace_buf, " : [%c] SRV=%s",
+			      ((check->type == PR_O2_EXT_CHK) ? 'E' : (check->state & CHK_ST_AGENT ? 'A' : 'H')),
+			      srv->id);
 
-	chunk_appendf(&trace_buf, " status=%d/%d %s",
-		      (check->health >= check->rise) ? check->health - check->rise + 1 : check->health,
-		      (check->health >= check->rise) ? check->fall : check->rise,
-		      (check->health >= check->rise) ? (srv->uweight ? "UP" : "DRAIN") : "DOWN");
+		chunk_appendf(&trace_buf, " status=%d/%d %s",
+			      (check->health >= check->rise) ? check->health - check->rise + 1 : check->health,
+			      (check->health >= check->rise) ? check->fall : check->rise,
+			      (check->health >= check->rise) ? (srv->uweight ? "UP" : "DRAIN") : "DOWN");
+	}
+	else
+		chunk_appendf(&trace_buf, " : [EMAIL]");
 
 	switch (check->result) {
 	case CHK_RES_NEUTRAL: res = "-";     break;
@@ -1214,6 +1218,10 @@
 			check_notify_success(check);
 		}
 	}
+
+        if (LIST_INLIST(&check->buf_wait.list))
+                LIST_DEL_INIT(&check->buf_wait.list);
+
 	task_set_affinity(t, MAX_THREADS_MASK);
 	check_release_buf(check, &check->bi);
 	check_release_buf(check, &check->bo);
@@ -1348,6 +1356,7 @@
 {
 	struct task *t;
 	unsigned long thread_mask = MAX_THREADS_MASK;
+	ulong boottime = tv_ms_remain(&start_date, &ready_date);
 
 	if (check->type == PR_O2_EXT_CHK)
 		thread_mask = 1;
@@ -1366,11 +1375,19 @@
 	if (mininter < srv_getinter(check))
 		mininter = srv_getinter(check);
 
+	if (global.spread_checks > 0) {
+		int rnd;
+
+		rnd  = srv_getinter(check) * global.spread_checks / 100;
+		rnd -= (int) (2 * rnd * (ha_random32() / 4294967295.0));
+		mininter += rnd;
+	}
+
 	if (global.max_spread_checks && mininter > global.max_spread_checks)
 		mininter = global.max_spread_checks;
 
 	/* check this every ms */
-	t->expire = tick_add(now_ms, MS_TO_TICKS(mininter * srvpos / nbcheck));
+	t->expire = tick_add(now_ms, MS_TO_TICKS(boottime + mininter * srvpos / nbcheck));
 	check->start = now;
 	task_queue(t);
 
@@ -1707,6 +1724,13 @@
 		LIST_INSERT(srv->agent.tcpcheck_rules->list, &chk->list);
 	}
 
+	/* <chk> is always defined here and it is a CONNECT action. If there is
+	 * a preset variable, it means there is an agent string defined and data
+	 * will be sent after the connect.
+	 */
+	if (!LIST_ISEMPTY(&srv->agent.tcpcheck_rules->preset_vars))
+		chk->connect.options |= TCPCHK_OPT_HAS_DATA;
+
 
 	err = init_check(&srv->agent, PR_O2_TCPCHK_CHK);
 	if (err) {
diff --git a/src/chunk.c b/src/chunk.c
index 5c720c1..abc039d 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -152,15 +152,19 @@
 int chunk_appendf(struct buffer *chk, const char *fmt, ...)
 {
 	va_list argp;
+	size_t room;
 	int ret;
 
 	if (!chk->area || !chk->size)
 		return 0;
 
+	room = chk->size - chk->data;
+	if (!room)
+		return chk->data;
+
 	va_start(argp, fmt);
-	ret = vsnprintf(chk->area + chk->data, chk->size - chk->data, fmt,
-			argp);
-	if (ret >= chk->size - chk->data)
+	ret = vsnprintf(chk->area + chk->data, room, fmt, argp);
+	if (ret >= room)
 		/* do not copy anything in case of truncation */
 		chk->area[chk->data] = 0;
 	else
diff --git a/src/debug.c b/src/debug.c
index f9eccad..6582381 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -272,9 +272,10 @@
 	if (hlua && hlua->T) {
 		chunk_appendf(buf, "stack traceback:\n    ");
 		append_prefixed_str(buf, hlua_traceback(hlua->T, "\n    "), pfx, '\n', 0);
-		b_putchr(buf, '\n');
 	}
-	else
+
+	/* we may need to terminate the current line */
+	if (*b_peek(buf, b_data(buf)-1) != '\n')
 		b_putchr(buf, '\n');
 #endif
 }
@@ -1037,7 +1038,27 @@
 				      S_ISLNK(statbuf.st_mode)  ? "link":
 				      S_ISSOCK(statbuf.st_mode) ? "sock":
 #ifdef USE_EPOLL
-				      epoll_wait(fd, NULL, 0, 0) != -1 || errno != EBADF ? "epol":
+				      /* trick: epoll_ctl() will return -ENOENT when trying
+				       * to remove from a valid epoll FD an FD that was not
+				       * registered against it. But we don't want to risk
+				       * disabling a random FD. Instead we'll create a new
+				       * one by duplicating 0 (it should be valid since
+				       * pointing to a terminal or /dev/null), and try to
+				       * remove it.
+				       */
+				      ({
+					      int fd2 = dup(0);
+					      int ret = fd2;
+					      if (ret >= 0) {
+						      ret = epoll_ctl(fd, EPOLL_CTL_DEL, fd2, NULL);
+						      if (ret == -1 && errno == ENOENT)
+							      ret = 0; // that's a real epoll
+						      else
+							      ret = -1; // it's something else
+						      close(fd2);
+					      }
+					      ret;
+				      }) == 0 ? "epol" :
 #endif
 				      "????",
 				      (uint)statbuf.st_mode & 07777,
diff --git a/src/dns.c b/src/dns.c
index 1ef5e87..3277f40 100644
--- a/src/dns.c
+++ b/src/dns.c
@@ -656,30 +656,35 @@
 			struct dns_query *query;
 
 			if (!ds->rx_msg.len) {
-				/* next message len is not fully available into the channel */
-				if (co_data(si_oc(si)) < 2)
-					break;
-
 				/* retrieve message len */
-				co_getblk(si_oc(si), (char *)&msg_len, 2, 0);
+				ret = co_getblk(si_oc(si), (char *)&msg_len, 2, 0);
+				if (ret <= 0) {
+					if (ret == -1)
+						goto close;
+					si_cant_get(si);
+					break;
+				}
 
 				/* mark as consumed */
 				co_skip(si_oc(si), 2);
 
 				/* store message len */
 				ds->rx_msg.len = ntohs(msg_len);
+				if (!ds->rx_msg.len)
+					continue;
 			}
 
-			if (!co_data(si_oc(si))) {
-				/* we need more data but nothing is available */
-				break;
-			}
-
 			if (co_data(si_oc(si)) + ds->rx_msg.offset < ds->rx_msg.len) {
 				/* message only partially available */
 
 				/* read available data */
-				co_getblk(si_oc(si), ds->rx_msg.area + ds->rx_msg.offset, co_data(si_oc(si)), 0);
+				ret = co_getblk(si_oc(si), ds->rx_msg.area + ds->rx_msg.offset, co_data(si_oc(si)), 0);
+				if (ret <= 0) {
+					if (ret == -1)
+						goto close;
+					si_cant_get(si);
+					break;
+				}
 
 				/* update message offset */
 				ds->rx_msg.offset += co_data(si_oc(si));
@@ -688,13 +693,20 @@
 				co_skip(si_oc(si), co_data(si_oc(si)));
 
 				/* we need to wait for more data */
+				si_cant_get(si);
 				break;
 			}
 
 			/* enough data is available into the channel to read the message until the end */
 
 			/* read from the channel until the end of the message */
-			co_getblk(si_oc(si), ds->rx_msg.area + ds->rx_msg.offset, ds->rx_msg.len - ds->rx_msg.offset, 0);
+			ret = co_getblk(si_oc(si), ds->rx_msg.area + ds->rx_msg.offset, ds->rx_msg.len - ds->rx_msg.offset, 0);
+			if (ret <= 0) {
+				if (ret == -1)
+					goto close;
+				si_cant_get(si);
+				break;
+			}
 
 			/* consume all data until the end of the message from the channel */
 			co_skip(si_oc(si), ds->rx_msg.len - ds->rx_msg.offset);
@@ -1023,7 +1035,7 @@
 	if (dss->maxconn && (dss->maxconn <= dss->cur_conns))
 		return NULL;
 
-	ds = pool_alloc(dns_session_pool);
+	ds = pool_zalloc(dns_session_pool);
 	if (!ds)
 		return NULL;
 
diff --git a/src/ev_epoll.c b/src/ev_epoll.c
index 330c38c..041d6d8 100644
--- a/src/ev_epoll.c
+++ b/src/ev_epoll.c
@@ -185,7 +185,7 @@
 
 	thread_harmless_now();
 
-	/* now let's wait for polled events */
+	/* Now let's wait for polled events. */
 	wait_time = wake ? 0 : compute_poll_timeout(exp);
 	tv_entering_poll();
 	activity_count_runtime();
@@ -201,7 +201,7 @@
 		}
 		if (timeout || !wait_time)
 			break;
-		if (signal_queue_len || wake)
+		if (wake)
 			break;
 		if (tick_isset(exp) && tick_is_expired(exp, now_ms))
 			break;
diff --git a/src/ev_evports.c b/src/ev_evports.c
index 109e59c..e012e00 100644
--- a/src/ev_evports.c
+++ b/src/ev_evports.c
@@ -153,9 +153,7 @@
 
 	thread_harmless_now();
 
-	/*
-	 * Determine how long to wait for events to materialise on the port.
-	 */
+	/* Now let's wait for polled events. */
 	wait_time = wake ? 0 : compute_poll_timeout(exp);
 	tv_entering_poll();
 	activity_count_runtime();
@@ -195,7 +193,7 @@
 			break;
 		if (timeout || !wait_time)
 			break;
-		if (signal_queue_len || wake)
+		if (wake)
 			break;
 		if (tick_isset(exp) && tick_is_expired(exp, now_ms))
 			break;
diff --git a/src/ev_kqueue.c b/src/ev_kqueue.c
index d51a833..0d81d67 100644
--- a/src/ev_kqueue.c
+++ b/src/ev_kqueue.c
@@ -141,7 +141,7 @@
 	}
 	fd_nbupdt = 0;
 
-	/* now let's wait for events */
+	/* Now let's wait for polled events. */
 	wait_time = wake ? 0 : compute_poll_timeout(exp);
 	fd = global.tune.maxpollevents;
 	tv_entering_poll();
@@ -167,7 +167,7 @@
 		}
 		if (timeout || !wait_time)
 			break;
-		if (signal_queue_len || wake)
+		if (wake)
 			break;
 		if (tick_isset(exp) && tick_is_expired(exp, now_ms))
 			break;
diff --git a/src/ev_poll.c b/src/ev_poll.c
index c30aadb..4d136a0 100644
--- a/src/ev_poll.c
+++ b/src/ev_poll.c
@@ -21,6 +21,7 @@
 #include <haproxy/api.h>
 #include <haproxy/fd.h>
 #include <haproxy/global.h>
+#include <haproxy/signal.h>
 #include <haproxy/ticks.h>
 #include <haproxy/time.h>
 
@@ -198,7 +199,7 @@
 		}
 	}
 
-	/* now let's wait for events */
+	/* Now let's wait for polled events. */
 	wait_time = wake ? 0 : compute_poll_timeout(exp);
 	tv_entering_poll();
 	activity_count_runtime();
diff --git a/src/fcgi-app.c b/src/fcgi-app.c
index 52b82b9..8fca1e9 100644
--- a/src/fcgi-app.c
+++ b/src/fcgi-app.c
@@ -589,7 +589,7 @@
 	struct fcgi_flt_conf *fcgi_conf = NULL;
 	int retval = 0;
 
-	if (!(curpx->cap & PR_CAP_BE)) {
+	if ((curpx->cap & PR_CAP_DEF) || !(curpx->cap & PR_CAP_BE)) {
 		memprintf(err, "'%s' only available in backend or listen section", args[0]);
 		retval = -1;
 		goto end;
@@ -663,6 +663,12 @@
 			goto end;
 		}
 
+		/* By default, for FCGI-ready backend, HTTP request header names
+		 * are restricted and the "delete" policy is set
+		 */
+		if (fcgi_conf && !(px->options2 & PR_O2_RSTRICT_REQ_HDR_NAMES_MASK))
+			px->options2 |= PR_O2_RSTRICT_REQ_HDR_NAMES_DEL;
+
 		for (srv = px->srv; srv; srv = srv->next) {
 			if (srv->mux_proto && isteq(srv->mux_proto->token, ist("fcgi"))) {
 				nb_fcgi_srv++;
diff --git a/src/fcgi.c b/src/fcgi.c
index 1c2543d..778ce9e 100644
--- a/src/fcgi.c
+++ b/src/fcgi.c
@@ -47,7 +47,7 @@
 	out->area[len++] = ((h->len >> 8) & 0xff);
 	out->area[len++] = (h->len & 0xff);
 	out->area[len++] = h->padding;
-	len++; /* rsv */
+	out->area[len++] = 0; /* rsv */
 
 	out->data = len;
 	return 1;
@@ -94,7 +94,11 @@
 	out->area[len++] = ((r->role >> 8) & 0xff);
 	out->area[len++] = (r->role & 0xff);
 	out->area[len++] = r->flags;
-	len += 5; /* rsv */
+	out->area[len++] = 0; /* rsv */
+	out->area[len++] = 0;
+	out->area[len++] = 0;
+	out->area[len++] = 0;
+	out->area[len++] = 0;
 
 	out->data = len;
 	return 1;
diff --git a/src/fd.c b/src/fd.c
index fe712a9..3c9629f 100644
--- a/src/fd.c
+++ b/src/fd.c
@@ -206,7 +206,7 @@
 #ifdef HA_CAS_IS_8B
 		 unlikely(!_HA_ATOMIC_CAS(((uint64_t *)&_GET_NEXT(fd, off)), (uint64_t *)&cur_list.u64, next_list.u64))
 #else
-		 unlikely(!_HA_ATOMIC_DWCAS(((long *)&_GET_NEXT(fd, off)), (uint32_t *)&cur_list.u32, &next_list.u32))
+		 unlikely(!_HA_ATOMIC_DWCAS(((long *)&_GET_NEXT(fd, off)), (uint32_t *)&cur_list.u32, (const uint32_t *)&next_list.u32))
 #endif
 	    );
 	next = cur_list.ent.next;
@@ -304,6 +304,9 @@
  */
 void _fd_delete_orphan(int fd)
 {
+	uint fd_disown;
+
+	fd_disown = fdtab[fd].state & FD_DISOWN;
 	if (fdtab[fd].state & FD_LINGER_RISK) {
 		/* this is generally set when connecting to servers */
 		DISGUISE(setsockopt(fd, SOL_SOCKET, SO_LINGER,
@@ -325,7 +328,8 @@
 	/* perform the close() call last as it's what unlocks the instant reuse
 	 * of this FD by any other thread.
 	 */
-	close(fd);
+	if (!fd_disown)
+		close(fd);
 	_HA_ATOMIC_DEC(&ha_used_fds);
 }
 
@@ -662,9 +666,9 @@
 	/* rd and wr are init at the same place, but only rd is init to -1, so
 	  we rely to rd to close.   */
 	if (poller_rd_pipe > -1) {
-		close(poller_rd_pipe);
+		fd_delete(poller_rd_pipe);
 		poller_rd_pipe = -1;
-		close(poller_wr_pipe[tid]);
+		fd_delete(poller_wr_pipe[tid]);
 		poller_wr_pipe[tid] = -1;
 	}
 }
diff --git a/src/filters.c b/src/filters.c
index f64c192..be2b380 100644
--- a/src/filters.c
+++ b/src/filters.c
@@ -292,10 +292,9 @@
 	int err_code = ERR_NONE;
 
 	for (px = proxies_list; px; px = px->next) {
-		if (px->disabled) {
-			flt_deinit(px);
+		if (px->disabled)
 			continue;
-		}
+
 		err_code |= flt_init(px);
 		if (err_code & (ERR_ABORT|ERR_FATAL)) {
 			ha_alert("Failed to initialize filters for proxy '%s'.\n",
diff --git a/src/flt_http_comp.c b/src/flt_http_comp.c
index a9c49f5..66eb601 100644
--- a/src/flt_http_comp.c
+++ b/src/flt_http_comp.c
@@ -304,23 +304,18 @@
 	struct htx_sl *sl;
 	struct http_hdr_ctx ctx;
 
-	/*
-	 * Add Content-Encoding header when it's not identity encoding.
-	 * RFC 2616 : Identity encoding: This content-coding is used only in the
-	 * Accept-Encoding header, and SHOULD NOT be used in the Content-Encoding
-	 * header.
-	 */
-	if (st->comp_algo->cfg_name_len != 8 || memcmp(st->comp_algo->cfg_name, "identity", 8) != 0) {
-		struct ist v = ist2(st->comp_algo->ua_name, st->comp_algo->ua_name_len);
-
-		if (!http_add_header(htx, ist("Content-Encoding"), v))
-			goto error;
-	}
-
 	sl = http_get_stline(htx);
 	if (!sl)
 		goto error;
 
+	/* add "Transfer-Encoding: chunked" header */
+	if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
+		if (!http_add_header(htx, ist("Transfer-Encoding"), ist("chunked")))
+			goto error;
+		msg->flags |= HTTP_MSGF_TE_CHNK;
+		sl->flags |= (HTX_SL_F_XFER_ENC|HTX_SL_F_CHNK);
+	}
+
 	/* remove Content-Length header */
 	if (msg->flags & HTTP_MSGF_CNT_LEN) {
 		ctx.blk = NULL;
@@ -330,14 +325,6 @@
 		sl->flags &= ~HTX_SL_F_CLEN;
 	}
 
-	/* add "Transfer-Encoding: chunked" header */
-	if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
-		if (!http_add_header(htx, ist("Transfer-Encoding"), ist("chunked")))
-			goto error;
-		msg->flags |= HTTP_MSGF_TE_CHNK;
-		sl->flags |= (HTX_SL_F_XFER_ENC|HTX_SL_F_CHNK);
-	}
-
 	/* convert "ETag" header to a weak ETag */
 	ctx.blk = NULL;
 	if (http_find_header(htx, ist("ETag"), &ctx, 1)) {
@@ -355,6 +342,19 @@
 	if (!http_add_header(htx, ist("Vary"), ist("Accept-Encoding")))
 		goto error;
 
+	/*
+	 * Add Content-Encoding header when it's not identity encoding.
+	 * RFC 2616 : Identity encoding: This content-coding is used only in the
+	 * Accept-Encoding header, and SHOULD NOT be used in the Content-Encoding
+	 * header.
+	 */
+	if (st->comp_algo->cfg_name_len != 8 || memcmp(st->comp_algo->cfg_name, "identity", 8) != 0) {
+		struct ist v = ist2(st->comp_algo->ua_name, st->comp_algo->ua_name_len);
+
+		if (!http_add_header(htx, ist("Content-Encoding"), v))
+			goto error;
+	}
+
 	return 1;
 
   error:
diff --git a/src/flt_spoe.c b/src/flt_spoe.c
index cb7eed4..ff90043 100644
--- a/src/flt_spoe.c
+++ b/src/flt_spoe.c
@@ -1245,6 +1245,7 @@
 		if (appctx->st0 == SPOE_APPCTX_ST_IDLE) {
 			eb32_delete(&spoe_appctx->node);
 			_HA_ATOMIC_DEC(&agent->counters.idles);
+			agent->rt[tid].idles--;
 		}
 
 		appctx->st0 = SPOE_APPCTX_ST_END;
@@ -1259,7 +1260,7 @@
 	/* Destroy the task attached to this applet */
 	task_destroy(spoe_appctx->task);
 
-	/* Notify all waiting streams */
+	/* Report an error to all streams in the appctx waiting queue */
 	list_for_each_entry_safe(ctx, back, &spoe_appctx->waiting_queue, list) {
 		LIST_DELETE(&ctx->list);
 		LIST_INIT(&ctx->list);
@@ -1271,8 +1272,8 @@
 		task_wakeup(ctx->strm->task, TASK_WOKEN_MSG);
 	}
 
-	/* If the applet was processing a fragmented frame, notify the
-	 * corresponding stream. */
+	/* If the applet was processing a fragmented frame, report an error to
+	 * the corresponding stream. */
 	if (spoe_appctx->frag_ctx.ctx) {
 		ctx = spoe_appctx->frag_ctx.ctx;
 		ctx->spoe_appctx = NULL;
@@ -1281,7 +1282,11 @@
 		task_wakeup(ctx->strm->task, TASK_WOKEN_MSG);
 	}
 
-	if (!LIST_ISEMPTY(&agent->rt[tid].waiting_queue)) {
+	if (!LIST_ISEMPTY(&agent->rt[tid].applets)) {
+		/* If there are still some running applets, remove reference on
+		 * the current one from streams in the async waiting queue. In
+		 * async mode, the ACK may be received from another appctx.
+		 */
 		list_for_each_entry_safe(ctx, back, &agent->rt[tid].waiting_queue, list) {
 			if (ctx->spoe_appctx == spoe_appctx)
 				ctx->spoe_appctx = NULL;
@@ -1289,16 +1294,25 @@
 		goto end;
 	}
 	else {
-		/* It is the last running applet and the sending and waiting
-		 * queues are not empty. Try to start a new one if HAproxy is
-		 * not stopping.
+		/* It is the last running applet and the sending and async
+		 * waiting queues are not empty. So try to start a new applet if
+		 * HAproxy is not stopping. On success, we remove reference on
+		 * the current appctx from streams in the async waiting queue.
+		 * In async mode, the ACK may be received from another appctx.
 		 */
 		if (!stopping &&
 		    (!LIST_ISEMPTY(&agent->rt[tid].sending_queue) || !LIST_ISEMPTY(&agent->rt[tid].waiting_queue)) &&
-		    spoe_create_appctx(agent->spoe_conf))
+		    spoe_create_appctx(agent->spoe_conf)) {
+			list_for_each_entry_safe(ctx, back, &agent->rt[tid].waiting_queue, list) {
+				if (ctx->spoe_appctx == spoe_appctx)
+					ctx->spoe_appctx = NULL;
+			}
 			goto end;
+		}
 
-		/* otherwise, notify all waiting streams */
+		/* Otherwise, report an error to all streams in the sending and
+		 * async waiting queues.
+		 */
 		list_for_each_entry_safe(ctx, back, &agent->rt[tid].sending_queue, list) {
 			LIST_DELETE(&ctx->list);
 			LIST_INIT(&ctx->list);
@@ -1445,6 +1459,7 @@
 
 		default:
 			_HA_ATOMIC_INC(&agent->counters.idles);
+			agent->rt[tid].idles++;
 			appctx->st0 = SPOE_APPCTX_ST_IDLE;
 			SPOE_APPCTX(appctx)->node.key = 0;
 			eb32_insert(&agent->rt[tid].idle_applets, &SPOE_APPCTX(appctx)->node);
@@ -1687,12 +1702,6 @@
 		      (agent->b.be->nbpend ||
 		       (srv && (srv->nbpend || (srv->maxconn && srv->served >= srv_dynamic_maxconn(srv))))));
 
-	/* Don"t try to send new frame we are waiting for at lease a ack, in
-	 * sync mode or if applet must be closed ASAP
-	 */
-	if (appctx->st0 == SPOE_APPCTX_ST_WAITING_SYNC_ACK || (close_asap && SPOE_APPCTX(appctx)->cur_fpa))
-		skip_sending = 1;
-
 	/* receiving_frame loop */
 	while (!skip_receiving) {
 		ret = spoe_handle_receiving_frame_appctx(appctx, &skip_receiving);
@@ -1713,6 +1722,12 @@
 		}
 	}
 
+	/* Don"t try to send new frame we are waiting for at lease a ack, in
+	 * sync mode or if applet must be closed ASAP
+	 */
+	if (appctx->st0 == SPOE_APPCTX_ST_WAITING_SYNC_ACK || (close_asap && SPOE_APPCTX(appctx)->cur_fpa))
+		skip_sending = 1;
+
 	/* send_frame loop */
 	while (!skip_sending && SPOE_APPCTX(appctx)->cur_fpa < agent->max_fpa) {
 		ret = spoe_handle_sending_frame_appctx(appctx, &skip_sending);
@@ -1759,6 +1774,7 @@
 			goto next;
 		}
 		_HA_ATOMIC_INC(&agent->counters.idles);
+		agent->rt[tid].idles++;
 		appctx->st0 = SPOE_APPCTX_ST_IDLE;
 		eb32_insert(&agent->rt[tid].idle_applets, &SPOE_APPCTX(appctx)->node);
 	}
@@ -1924,6 +1940,7 @@
 
 		case SPOE_APPCTX_ST_IDLE:
 			_HA_ATOMIC_DEC(&agent->counters.idles);
+			agent->rt[tid].idles--;
 			eb32_delete(&SPOE_APPCTX(appctx)->node);
 			if (stopping &&
 			    LIST_ISEMPTY(&agent->rt[tid].sending_queue) &&
@@ -2065,8 +2082,8 @@
 	struct spoe_appctx *spoe_appctx;
 
 	/* Check if we need to create a new SPOE applet or not. */
-	if (!eb_is_empty(&agent->rt[tid].idle_applets) &&
-	    (agent->rt[tid].processing == 1 || agent->rt[tid].processing < read_freq_ctr(&agent->rt[tid].processing_per_sec)))
+	if (agent->rt[tid].processing < agent->rt[tid].idles  ||
+	    agent->rt[tid].processing < read_freq_ctr(&agent->rt[tid].processing_per_sec))
 		goto end;
 
 	SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
@@ -2982,6 +2999,14 @@
 	while (p) {
 		struct flt_conf *fconf;
 
+		/* SPOE filter are not initialized for disabled proxoes. Move to
+		 * the next one
+		 */
+		if (p->disabled) {
+			p = p->next;
+			continue;
+		}
+
 		list_for_each_entry(fconf, &p->filter_configs, list) {
 			struct spoe_config *conf;
 			struct spoe_agent  *agent;
@@ -3023,7 +3048,6 @@
         conf->agent_fe.accept = frontend_accept;
         conf->agent_fe.srv = NULL;
         conf->agent_fe.timeout.client = TICK_ETERNITY;
-	conf->agent_fe.default_target = &spoe_applet.obj_type;
 	conf->agent_fe.fe_req_ana = AN_REQ_SWITCHING_RULES;
 
 	if (!sighandler_registered) {
@@ -3115,6 +3139,7 @@
 		conf->agent->rt[i].engine_id    = NULL;
 		conf->agent->rt[i].frame_size   = conf->agent->max_frame_size;
 		conf->agent->rt[i].processing   = 0;
+		conf->agent->rt[i].idles        = 0;
 		LIST_INIT(&conf->agent->rt[i].applets);
 		LIST_INIT(&conf->agent->rt[i].sending_queue);
 		LIST_INIT(&conf->agent->rt[i].waiting_queue);
diff --git a/src/h1.c b/src/h1.c
index 3a6c1c3..42fe670 100644
--- a/src/h1.c
+++ b/src/h1.c
@@ -34,13 +34,20 @@
 	int not_first = !!(h1m->flags & H1_MF_CLEN);
 	struct ist word;
 
-	word.ptr = value->ptr - 1; // -1 for next loop's pre-increment
+	word.ptr = value->ptr;
 	e = value->ptr + value->len;
 
-	while (++word.ptr < e) {
+	while (1) {
+		if (word.ptr >= e) {
+			/* empty header or empty value */
+			goto fail;
+		}
+
 		/* skip leading delimiter and blanks */
-		if (unlikely(HTTP_IS_LWS(*word.ptr)))
+		if (unlikely(HTTP_IS_LWS(*word.ptr))) {
+			word.ptr++;
 			continue;
+		}
 
 		/* digits only now */
 		for (cl = 0, n = word.ptr; n < e; n++) {
@@ -51,6 +58,14 @@
 					goto fail;
 				break;
 			}
+
+			if (unlikely(!cl && n > word.ptr)) {
+				/* There was a leading zero before this digit,
+				 * let's trim it.
+				 */
+				word.ptr = n;
+			}
+
 			if (unlikely(cl > ULLONG_MAX / 10ULL))
 				goto fail; /* multiply overflow */
 			cl = cl * 10ULL;
@@ -79,6 +94,13 @@
 		h1m->flags |= H1_MF_CLEN;
 		h1m->curr_len = h1m->body_len = cl;
 		*value = word;
+
+		/* Now either n==e and we're done, or n points to the comma,
+		 * and we skip it and continue.
+		 */
+		if (n++ == e)
+			break;
+
 		word.ptr = n;
 	}
 	/* here we've reached the end with a single value or a series of
@@ -130,6 +152,50 @@
 	}
 }
 
+/* Validate the authority and the host header value for CONNECT method. If there
+ * is hast header, its value is normalized. 0 is returned on success, -1 if the
+ * authority is invalid and -2 if the host is invalid.
+ */
+static int h1_validate_connect_authority(struct ist authority, struct ist *host_hdr)
+{
+	struct ist uri_host, uri_port, host, host_port;
+
+	if (!isttest(authority))
+		goto invalid_authority;
+	uri_host = authority;
+	uri_port = http_get_host_port(authority);
+	if (!isttest(uri_port))
+		goto invalid_authority;
+	uri_host.len -= (istlen(uri_port) + 1);
+
+	if (!host_hdr || !isttest(*host_hdr))
+		goto end;
+
+	/* Get the port of the host header value, if any */
+	host = *host_hdr;
+	host_port = http_get_host_port(*host_hdr);
+	if (isttest(host_port)) {
+		host.len -= (istlen(host_port) + 1);
+		if (!isteqi(host, uri_host) || !isteq(host_port, uri_port))
+			goto invalid_host;
+		if (http_is_default_port(IST_NULL, uri_port))
+			*host_hdr = host; /* normalize */
+	}
+	else {
+		if (!http_is_default_port(IST_NULL, uri_port) || !isteqi(host, uri_host))
+			goto invalid_host;
+	}
+
+  end:
+	return 0;
+
+  invalid_authority:
+	return -1;
+
+  invalid_host:
+	return -2;
+}
+
 /* Parse the Connection: header of an HTTP/1 request, looking for "close",
  * "keep-alive", and "upgrade" values, and updating h1m->flags according to
  * what was found there. Note that flags are only added, not removed, so the
@@ -422,13 +488,13 @@
 	case H1_MSG_RQURI:
 	http_msg_rquri:
 #ifdef HA_UNALIGNED_LE
-		/* speedup: skip bytes not between 0x21 and 0x7e inclusive */
+		/* speedup: skip bytes not between 0x24 and 0x7e inclusive */
 		while (ptr <= end - sizeof(int)) {
-			int x = *(int *)ptr - 0x21212121;
+			int x = *(int *)ptr - 0x24242424;
 			if (x & 0x80808080)
 				break;
 
-			x -= 0x5e5e5e5e;
+			x -= 0x5b5b5b5b;
 			if (!(x & 0x80808080))
 				break;
 
@@ -440,8 +506,15 @@
 			goto http_msg_ood;
 		}
 	http_msg_rquri2:
-		if (likely((unsigned char)(*ptr - 33) <= 93)) /* 33 to 126 included */
+		if (likely((unsigned char)(*ptr - 33) <= 93)) { /* 33 to 126 included */
+			if (*ptr == '#') {
+				if (h1m->err_pos < -1) /* PR_O2_REQBUG_OK not set */
+					goto invalid_char;
+				if (h1m->err_pos == -1) /* PR_O2_REQBUG_OK set: just log */
+					h1m->err_pos = ptr - start + skip;
+			}
 			EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rquri2, http_msg_ood, state, H1_MSG_RQURI);
+		}
 
 		if (likely(HTTP_IS_SPHT(*ptr))) {
 			sl.rq.u.len = ptr - sl.rq.u.ptr;
@@ -706,6 +779,10 @@
 
 		if (likely(*ptr == ':')) {
 			col = ptr - start;
+			if (col <= sol) {
+				state = H1_MSG_HDR_NAME;
+				goto http_msg_invalid;
+			}
 			EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_l1_sp, http_msg_ood, state, H1_MSG_HDR_L1_SP);
 		}
 
@@ -868,22 +945,9 @@
 				else if (isteqi(n, ist("upgrade"))) {
 					h1_parse_upgrade_header(h1m, v);
 				}
-				else if (!(h1m->flags & (H1_MF_HDRS_ONLY|H1_MF_RESP)) && isteqi(n, ist("host"))) {
-					if (host_idx == -1) {
-						struct ist authority;
-
-						authority = http_get_authority(sl.rq.u, 1);
-						if (authority.len && !isteqi(v, authority)) {
-							if (h1m->err_pos < -1) {
-								state = H1_MSG_HDR_L2_LWS;
-								ptr = v.ptr; /* Set ptr on the error */
-								goto http_msg_invalid;
-							}
-							if (h1m->err_pos == -1) /* capture the error pointer */
-								h1m->err_pos = v.ptr - start + skip; /* >= 0 now */
-						}
+				else if (!(h1m->flags & H1_MF_RESP) && isteqi(n, ist("host"))) {
+					if (host_idx == -1)
 						host_idx = hdr_count;
-					}
 					else {
 						if (!isteqi(v, hdr[host_idx].v)) {
 							state = H1_MSG_HDR_L2_LWS;
@@ -934,6 +998,48 @@
 		if (restarting)
 			goto restart;
 
+
+		if (!(h1m->flags & (H1_MF_HDRS_ONLY|H1_MF_RESP))) {
+			struct ist authority;
+
+			authority = http_get_authority(sl.rq.u, 1);
+			if (sl.rq.meth == HTTP_METH_CONNECT) {
+				struct ist *host = ((host_idx != -1) ? &hdr[host_idx].v : NULL);
+				int ret;
+
+				ret = h1_validate_connect_authority(authority, host);
+				if (ret < 0) {
+					if (h1m->err_pos < -1) {
+						state = H1_MSG_LAST_LF;
+						/* WT: gcc seems to see a path where sl.rq.u.ptr was used
+						 * uninitialized, but it doesn't know that the function is
+						 * called with initial states making this impossible.
+						 */
+						ALREADY_CHECKED(sl.rq.u.ptr);
+						ptr = ((ret == -1) ? sl.rq.u.ptr : host->ptr); /* Set ptr on the error */
+						goto http_msg_invalid;
+					}
+					if (h1m->err_pos == -1) /* capture the error pointer */
+						h1m->err_pos = ((ret == -1) ? sl.rq.u.ptr : host->ptr) - start + skip; /* >= 0 now */
+				}
+			}
+			else if (host_idx != -1 && istlen(authority)) {
+				struct ist host = hdr[host_idx].v;
+
+				/* For non-CONNECT method, the authority must match the host header value */
+				if (!isteqi(authority, host)) {
+					if (h1m->err_pos < -1) {
+						state = H1_MSG_LAST_LF;
+						ptr = host.ptr; /* Set ptr on the error */
+						goto http_msg_invalid;
+					}
+					if (h1m->err_pos == -1) /* capture the error pointer */
+						h1m->err_pos = v.ptr - start + skip; /* >= 0 now */
+				}
+
+			}
+		}
+
 		state = H1_MSG_DATA;
 		if (h1m->flags & H1_MF_XFER_ENC) {
 			if (h1m->flags & H1_MF_CLEN) {
diff --git a/src/h1_htx.c b/src/h1_htx.c
index 6aa389e..24769f0 100644
--- a/src/h1_htx.c
+++ b/src/h1_htx.c
@@ -57,7 +57,7 @@
 		if (sl->rq.v.len != 8)
 			return 0;
 
-		if (*(sl->rq.v.ptr + 4) != '/' ||
+		if (!istnmatch(sl->rq.v, ist("HTTP/"), 5) ||
 		    !isdigit((unsigned char)*(sl->rq.v.ptr + 5)) ||
 		    *(sl->rq.v.ptr + 6) != '.' ||
 		    !isdigit((unsigned char)*(sl->rq.v.ptr + 7)))
@@ -259,7 +259,7 @@
 			else if (isteqi(hdrs[hdr].n, ist("location"))) {
 				code = 302;
 				status = ist("302");
-				reason = ist("Moved Temporarily");
+				reason = ist("Found");
 			}
 		}
 		if (!code) {
@@ -279,6 +279,9 @@
 		goto output_full;
 	}
 
+	if ((h1m->flags & (H1_MF_CONN_UPG|H1_MF_UPG_WEBSOCKET)) && code != 101)
+		h1m->flags &= ~(H1_MF_CONN_UPG|H1_MF_UPG_WEBSOCKET);
+
 	if (((h1m->flags & H1_MF_METH_CONNECT) && code >= 200 && code < 300) || code == 101) {
 		h1m->flags &= ~(H1_MF_CLEN|H1_MF_CHNK);
 		h1m->flags |= H1_MF_XFER_LEN;
diff --git a/src/h2.c b/src/h2.c
index dd1f7d9..4da78c8 100644
--- a/src/h2.c
+++ b/src/h2.c
@@ -80,13 +80,20 @@
 	int not_first = !!(*msgf & H2_MSGF_BODY_CL);
 	struct ist word;
 
-	word.ptr = value->ptr - 1; // -1 for next loop's pre-increment
+	word.ptr = value->ptr;
 	e = value->ptr + value->len;
 
-	while (++word.ptr < e) {
+	while (1) {
+		if (word.ptr >= e) {
+			/* empty header or empty value */
+			goto fail;
+		}
+
 		/* skip leading delimiter and blanks */
-		if (unlikely(HTTP_IS_LWS(*word.ptr)))
+		if (unlikely(HTTP_IS_LWS(*word.ptr))) {
+			word.ptr++;
 			continue;
+		}
 
 		/* digits only now */
 		for (cl = 0, n = word.ptr; n < e; n++) {
@@ -97,6 +104,14 @@
 					goto fail;
 				break;
 			}
+
+			if (unlikely(!cl && n > word.ptr)) {
+				/* There was a leading zero before this digit,
+				 * let's trim it.
+				 */
+				word.ptr = n;
+			}
+
 			if (unlikely(cl > ULLONG_MAX / 10ULL))
 				goto fail; /* multiply overflow */
 			cl = cl * 10ULL;
@@ -125,6 +140,13 @@
 		*msgf |= H2_MSGF_BODY_CL;
 		*body_len = cl;
 		*value = word;
+
+		/* Now either n==e and we're done, or n points to the comma,
+		 * and we skip it and continue.
+		 */
+		if (n++ == e)
+			break;
+
 		word.ptr = n;
 	}
 	/* here we've reached the end with a single value or a series of
@@ -385,8 +407,12 @@
  *
  * The Cookie header will be reassembled at the end, and for this, the <list>
  * will be used to create a linked list, so its contents may be destroyed.
+ *
+ * When <relaxed> is non-nul, some non-dangerous checks will be ignored. This
+ * is in order to satisfy "option accept-invalid-http-request" for
+ * interoperability purposes.
  */
-int h2_make_htx_request(struct http_hdr *list, struct htx *htx, unsigned int *msgf, unsigned long long *body_len)
+int h2_make_htx_request(struct http_hdr *list, struct htx *htx, unsigned int *msgf, unsigned long long *body_len, int relaxed)
 {
 	struct ist phdr_val[H2_PHDR_NUM_ENTRIES];
 	uint32_t fields; /* bit mask of H2_PHDR_FND_* */
@@ -422,11 +448,18 @@
 		}
 
 		/* RFC7540#10.3: intermediaries forwarding to HTTP/1 must take care of
-		 * rejecting NUL, CR and LF characters.
+		 * rejecting NUL, CR and LF characters. For :path we reject all CTL
+		 * chars, spaces, and '#'.
 		 */
-		ctl = ist_find_ctl(list[idx].v);
-		if (unlikely(ctl) && has_forbidden_char(list[idx].v, ctl))
-			goto fail;
+		if (phdr == H2_PHDR_IDX_PATH && !relaxed) {
+			ctl = ist_find_range(list[idx].v, 0, '#');
+			if (unlikely(ctl) && http_path_has_forbidden_char(list[idx].v, ctl))
+				goto fail;
+		} else {
+			ctl = ist_find_ctl(list[idx].v);
+			if (unlikely(ctl) && has_forbidden_char(list[idx].v, ctl))
+				goto fail;
+		}
 
 		if (phdr > 0 && phdr < H2_PHDR_NUM_ENTRIES) {
 			/* insert a pseudo header by its index (in phdr) and value (in value) */
diff --git a/src/haproxy.c b/src/haproxy.c
index d64c0e4..dd1130d 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -1,6 +1,6 @@
 /*
  * HAProxy : High Availability-enabled HTTP/TCP proxy
- * Copyright 2000-2022 Willy Tarreau <willy@haproxy.org>.
+ * Copyright 2000-2023 Willy Tarreau <willy@haproxy.org>.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -1479,6 +1479,7 @@
 	struct post_check_fct *pcf;
 	int ideal_maxconn;
 
+	setenv("HAPROXY_STARTUP_VERSION", HAPROXY_VERSION, 0);
 	global.mode = MODE_STARTING;
 	old_argv = copy_argv(argc, argv);
 	if (!old_argv) {
@@ -1958,7 +1959,19 @@
 	/* defaults sections are not needed anymore */
 	proxy_destroy_all_defaults();
 
+	/* update the ready date that will be used to count the startup time
+	 * during config checks (e.g. to schedule certain tasks if needed)
+	 */
+	gettimeofday(&date, NULL);
+	ready_date = date;
+
+	/* Note: global.nbthread will be initialized as part of this call */
 	err_code |= check_config_validity();
+
+	/* update the ready date to also account for the check time */
+	gettimeofday(&date, NULL);
+	ready_date = date;
+
 	for (px = proxies_list; px; px = px->next) {
 		struct server *srv;
 		struct post_proxy_check_fct *ppcf;
@@ -2080,6 +2093,10 @@
 			exit(1);
 	}
 
+	/* set the default maxconn in the master, but let it be rewritable with -n */
+	if (global.mode & MODE_MWORKER_WAIT)
+		global.maxconn = DEFAULT_MAXCONN;
+
 	if (cfg_maxconn > 0)
 		global.maxconn = cfg_maxconn;
 
@@ -2477,6 +2494,18 @@
 		free_proxy(p0);
 	}/* end while(p) */
 
+
+	/* we don't need to free sink_proxies_list proxies since it is
+	 * already handled in sink_deinit()
+	 */
+	p = cfg_log_forward;
+	/* we need to manually clean cfg_log_forward proxy list */
+	while (p) {
+		p0 = p;
+		p = p->next;
+		free_proxy(p0);
+	}
+
 	while (ua) {
 		struct stat_scope *scope, *scopep;
 
@@ -2629,7 +2658,7 @@
 		if (killed > 1)
 			break;
 
-		/* expire immediately if events are pending */
+		/* expire immediately if events or signals are pending */
 		wake = 1;
 		if (thread_has_tasks())
 			activity[tid].wake_tasks++;
@@ -2639,6 +2668,10 @@
 			if (thread_has_tasks()) {
 				activity[tid].wake_tasks++;
 				_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
+			} else if (signal_queue_len) {
+				/* this check is required to avoid
+				 * a race with wakeup on signals using wake_threads() */
+				_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
 			} else
 				wake = 0;
 		}
@@ -2839,34 +2872,49 @@
 	int pidfd = -1;
 	int intovf = (unsigned char)argc + 1; /* let the compiler know it's strictly positive */
 
-	/* Catch forced CFLAGS that miss 2-complement integer overflow */
-	if (intovf + 0x7FFFFFFF >= intovf) {
+	/* Catch broken toolchains */
+	if (sizeof(long) != sizeof(void *) || (intovf + 0x7FFFFFFF >= intovf)) {
+		const char *msg;
+
+		if (sizeof(long) != sizeof(void *))
+			/* Apparently MingW64 was not made for us and can also break openssl */
+			msg = "The compiler this program was built with uses unsupported integral type sizes.\n"
+			      "Most likely it follows the unsupported LLP64 model. Never try to link HAProxy\n"
+			      "against libraries built with that compiler either! Please only use a compiler\n"
+			      "producing ILP32 or LP64 programs for both programs and libraries.\n";
+		else if (intovf + 0x7FFFFFFF >= intovf)
+			/* Catch forced CFLAGS that miss 2-complement integer overflow */
+			msg = "The source code was miscompiled by the compiler, which usually indicates that\n"
+			      "some of the CFLAGS needed to work around overzealous compiler optimizations\n"
+			      "were overwritten at build time. Please do not force CFLAGS, and read Makefile\n"
+			      "and INSTALL files to decide on the best way to pass your local build options.\n";
+		else
+			msg = "Bug in the compiler bug detection code, please report it to developers!\n";
+
 		fprintf(stderr,
 		        "FATAL ERROR: invalid code detected -- cannot go further, please recompile!\n"
-			"The source code was miscompiled by the compiler, which usually indicates that\n"
-			"some of the CFLAGS needed to work around overzealous compiler optimizations\n"
-			"were overwritten at build time. Please do not force CFLAGS, and read Makefile\n"
-			"and INSTALL files to decide on the best way to pass your local build options.\n"
-		        "\nBuild options :"
+		        "%s"
+			"\nBuild options :"
 #ifdef BUILD_TARGET
-		       "\n  TARGET  = " BUILD_TARGET
+		        "\n  TARGET  = " BUILD_TARGET
 #endif
 #ifdef BUILD_CPU
-		       "\n  CPU     = " BUILD_CPU
+		        "\n  CPU     = " BUILD_CPU
 #endif
 #ifdef BUILD_CC
-		       "\n  CC      = " BUILD_CC
+		        "\n  CC      = " BUILD_CC
 #endif
 #ifdef BUILD_CFLAGS
-		       "\n  CFLAGS  = " BUILD_CFLAGS
+		        "\n  CFLAGS  = " BUILD_CFLAGS
 #endif
 #ifdef BUILD_OPTIONS
-		       "\n  OPTIONS = " BUILD_OPTIONS
+		        "\n  OPTIONS = " BUILD_OPTIONS
 #endif
 #ifdef BUILD_DEBUG
-		       "\n  DEBUG   = " BUILD_DEBUG
+		        "\n  DEBUG   = " BUILD_DEBUG
 #endif
-		       "\n\n");
+		        "\n\n", msg);
+
 		return 1;
 	}
 
@@ -3130,6 +3178,10 @@
 				 global.maxsock);
 	}
 
+	/* update the ready date a last time to also account for final setup time */
+	gettimeofday(&date, NULL);
+	ready_date = date;
+
 	if (global.mode & (MODE_DAEMON | MODE_MWORKER | MODE_MWORKER_WAIT)) {
 		struct proxy *px;
 		struct peers *curpeers;
@@ -3194,8 +3246,9 @@
 					/* find the right mworker_proc */
 					list_for_each_entry(child, &proc_list, list) {
 						if (child->relative_pid == relative_pid &&
-						    child->reloads == 0 && child->options & PROC_O_TYPE_WORKER) {
-							child->timestamp = now.tv_sec;
+						    child->reloads == 0 && child->options & PROC_O_TYPE_WORKER &&
+						    child->pid == -1) {
+							child->timestamp = date.tv_sec;
 							child->pid = ret;
 							child->version = strdup(haproxy_version);
 							break;
@@ -3277,7 +3330,8 @@
 				if (child->ipc_fd[0] >= 0)
 					close(child->ipc_fd[0]);
 				if (child->relative_pid == relative_pid &&
-				    child->reloads == 0) {
+				    child->reloads == 0 &&
+				    child->pid == -1) {
 					/* keep this struct if this is our pid */
 					proc_self = child;
 					continue;
diff --git a/src/hlua.c b/src/hlua.c
index 0af3eb0..2716f84 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -146,7 +146,7 @@
 /* This function takes the Lua global lock. Keep this function's visibility
  * global so that it can appear in stack dumps and performance profiles!
  */
-void lua_take_global_lock()
+static inline void lua_take_global_lock()
 {
 	HA_SPIN_LOCK(LUA_LOCK, &hlua_global_lock);
 }
@@ -156,16 +156,44 @@
 	HA_SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock);
 }
 
+/* lua lock helpers: only lock when required
+ *
+ * state_id == 0: we're operating on the main lua stack (shared between
+ * os threads), so we need to acquire the main lock
+ *
+ * If the thread already owns the lock (_hlua_locked != 0), skip the lock
+ * attempt. This could happen if we run under protected lua environment.
+ * Not doing this could result in deadlocks because of nested locking
+ * attempts from the same thread
+ */
+static THREAD_LOCAL int _hlua_locked = 0;
+static inline void hlua_lock(struct hlua *hlua)
+{
+	if (hlua->state_id != 0)
+		return;
+	if (!_hlua_locked)
+		lua_take_global_lock();
+	_hlua_locked += 1;
+}
+static inline void hlua_unlock(struct hlua *hlua)
+{
+	if (hlua->state_id != 0)
+		return;
+	BUG_ON(_hlua_locked <= 0);
+	_hlua_locked--;
+	/* drop the lock once the lock count reaches 0 */
+	if (!_hlua_locked)
+		lua_drop_global_lock();
+}
+
 #define SET_SAFE_LJMP_L(__L, __HLUA) \
 	({ \
 		int ret; \
-		if ((__HLUA)->state_id == 0) \
-			lua_take_global_lock(); \
+		hlua_lock(__HLUA); \
 		if (setjmp(safe_ljmp_env) != 0) { \
 			lua_atpanic(__L, hlua_panic_safe); \
 			ret = 0; \
-			if ((__HLUA)->state_id == 0) \
-				lua_drop_global_lock(); \
+			hlua_unlock(__HLUA); \
 		} else { \
 			lua_atpanic(__L, hlua_panic_ljmp); \
 			ret = 1; \
@@ -179,8 +207,7 @@
 #define RESET_SAFE_LJMP_L(__L, __HLUA) \
 	do { \
 		lua_atpanic(__L, hlua_panic_safe); \
-		if ((__HLUA)->state_id == 0) \
-			lua_drop_global_lock(); \
+		hlua_unlock(__HLUA); \
 	} while(0)
 
 #define SET_SAFE_LJMP(__HLUA) \
@@ -351,7 +378,8 @@
 
 /* Used to check an Lua function type in the stack. It creates and
  * returns a reference of the function. This function throws an
- * error if the rgument is not a "function".
+ * error if the argument is not a "function".
+ * When no longer used, the ref must be released with hlua_unref()
  */
 __LJMP unsigned int hlua_checkfunction(lua_State *L, int argno)
 {
@@ -363,14 +391,59 @@
 	return luaL_ref(L, LUA_REGISTRYINDEX);
 }
 
+/* Used to check an Lua table type in the stack. It creates and
+ * returns a reference of the table. This function throws an
+ * error if the argument is not a "table".
+ * When no longer used, the ref must be released with hlua_unref()
+ */
+__LJMP unsigned int hlua_checktable(lua_State *L, int argno)
+{
+	if (!lua_istable(L, argno)) {
+		const char *msg = lua_pushfstring(L, "table expected, got %s", luaL_typename(L, argno));
+		WILL_LJMP(luaL_argerror(L, argno, msg));
+	}
+	lua_pushvalue(L, argno);
+	return luaL_ref(L, LUA_REGISTRYINDEX);
+}
+
+/* Get a reference to the object that is at the top of the stack
+ * The referenced object will be popped from the stack
+ *
+ * The function returns the reference to the object which must
+ * be cleared using hlua_unref() when no longer used
+ */
+__LJMP int hlua_ref(lua_State *L)
+{
+	return MAY_LJMP(luaL_ref(L, LUA_REGISTRYINDEX));
+}
+
+/* Pushes a reference previously created using luaL_ref(L, LUA_REGISTRYINDEX)
+ * on <L> stack
+ * (ie: hlua_checkfunction(), hlua_checktable() or hlua_ref())
+ *
+ * When the reference is no longer used, it should be released by calling
+ * hlua_unref()
+ *
+ * <L> can be from any co-routine as long as it belongs to the same lua
+ * parent state that the one used to get the reference.
+ */
+void hlua_pushref(lua_State *L, int ref)
+{
+	lua_rawgeti(L, LUA_REGISTRYINDEX, ref);
+}
+
-/* Return the string that is of the top of the stack. */
-const char *hlua_get_top_error_string(lua_State *L)
+/* Releases a reference previously created using luaL_ref(L, LUA_REGISTRYINDEX)
+ * (ie: hlua_checkfunction(), hlua_checktable() or hlua_ref())
+ *
+ * This will allow the reference to be reused and the referred object
+ * to be garbage collected.
+ *
+ * <L> can be from any co-routine as long as it belongs to the same lua
+ * parent state that the one used to get the reference.
+ */
+void hlua_unref(lua_State *L, int ref)
 {
-	if (lua_gettop(L) < 1)
-		return "unknown error";
-	if (lua_type(L, -1) != LUA_TSTRING)
-		return "unknown error";
-	return lua_tostring(L, -1);
+	luaL_unref(L, LUA_REGISTRYINDEX, ref);
 }
 
 __LJMP const char *hlua_traceback(lua_State *L, const char* sep)
@@ -738,7 +811,11 @@
 					break;
 
 				case ARGT_TAB:
-					argp[idx].data.prx = p;
+					if (!p->table) {
+						msg = "Mandatory argument expected";
+						goto error;
+					}
+					argp[idx].data.t = p->table;
 					argp[idx].type = ARGT_TAB;
 					argp[idx+1].type = ARGT_STOP;
 					break;
@@ -979,6 +1056,7 @@
 	return 0;
 
   error:
+	argp[idx].type = ARGT_STOP;
 	for (i = 0; i < idx; i++) {
 		if (argp[i].type == ARGT_STR)
 			chunk_destroy(&argp[i].data.str);
@@ -1049,7 +1127,7 @@
 /* This function just ensure that the yield will be always
  * returned with a timeout and permit to set some flags
  */
-__LJMP void hlua_yieldk(lua_State *L, int nresults, int ctx,
+__LJMP void hlua_yieldk(lua_State *L, int nresults, lua_KContext ctx,
                         lua_KFunction k, int timeout, unsigned int flags)
 {
 	struct hlua *hlua;
@@ -1079,21 +1157,12 @@
  * initialisation fails (example: out of memory error), the lua function
  * throws an error (longjmp).
  *
- * In some case (at least one), this function can be called from safe
- * environment, so we must not initialise it. While the support of
- * threads appear, the safe environment set a lock to ensure only one
- * Lua execution at a time. If we initialize safe environment in another
- * safe environment, we have a dead lock.
- *
- * set "already_safe" true if the context is initialized form safe
- * Lua function.
- *
  * This function manipulates two Lua stacks: the main and the thread. Only
  * the main stack can fail. The thread is not manipulated. This function
  * MUST NOT manipulate the created thread stack state, because it is not
  * protected against errors thrown by the thread stack.
  */
-int hlua_ctx_init(struct hlua *lua, int state_id, struct task *task, int already_safe)
+int hlua_ctx_init(struct hlua *lua, int state_id, struct task *task)
 {
 	lua->Mref = LUA_REFNIL;
 	lua->flags = 0;
@@ -1101,30 +1170,26 @@
 	lua->wake_time = TICK_ETERNITY;
 	lua->state_id = state_id;
 	LIST_INIT(&lua->com);
-	if (!already_safe) {
-		if (!SET_SAFE_LJMP_PARENT(lua)) {
-			lua->Tref = LUA_REFNIL;
-			return 0;
-		}
+	if (!SET_SAFE_LJMP_PARENT(lua)) {
+		lua->Tref = LUA_REFNIL;
+		return 0;
 	}
 	lua->T = lua_newthread(hlua_states[state_id]);
 	if (!lua->T) {
 		lua->Tref = LUA_REFNIL;
-		if (!already_safe)
-			RESET_SAFE_LJMP_PARENT(lua);
+		RESET_SAFE_LJMP_PARENT(lua);
 		return 0;
 	}
 	hlua_sethlua(lua);
 	lua->Tref = luaL_ref(hlua_states[state_id], LUA_REGISTRYINDEX);
 	lua->task = task;
-	if (!already_safe)
-		RESET_SAFE_LJMP_PARENT(lua);
+	RESET_SAFE_LJMP_PARENT(lua);
 	return 1;
 }
 
 /* Used to destroy the Lua coroutine when the attached stream or task
  * is destroyed. The destroy also the memory context. The struct "lua"
- * is not freed.
+ * will be freed.
  */
 void hlua_ctx_destroy(struct hlua *lua)
 {
@@ -1286,8 +1351,7 @@
 	/* Lock the whole Lua execution. This lock must be before the
 	 * label "resume_execution".
 	 */
-	if (lua->state_id == 0)
-		lua_take_global_lock();
+	hlua_lock(lua);
 
 resume_execution:
 
@@ -1434,8 +1498,7 @@
 	}
 
 	/* This is the main exit point, remove the Lua lock. */
-	if (lua->state_id == 0)
-		lua_drop_global_lock();
+	hlua_unlock(lua);
 
 	return ret;
 }
@@ -3765,7 +3828,9 @@
 	memset(&smp, 0, sizeof(smp));
 	hlua_lua2smp(L, 3, &smp);
 
-	/* Store the sample in a variable. */
+	/* Store the sample in a variable. We don't need to dup the smp, vars API
+	 * already takes care of duplicating dynamic var data.
+	 */
 	smp_set_owner(&smp, s->be, s->sess, s, 0);
 
 	if (lua_gettop(L) == 4 && lua_toboolean(L, 4))
@@ -4250,7 +4315,9 @@
 	memset(&smp, 0, sizeof(smp));
 	hlua_lua2smp(L, 3, &smp);
 
-	/* Store the sample in a variable. */
+	/* Store the sample in a variable. We don't need to dup the smp, vars API
+	 * already takes care of duplicating dynamic var data.
+	 */
 	smp_set_owner(&smp, s->be, s->sess, s, 0);
 
 	if (lua_gettop(L) == 4 && lua_toboolean(L, 4))
@@ -4421,7 +4488,7 @@
 	/* The message was fully consumed and no more data are expected
 	 * (EOM flag set).
 	 */
-	if (htx_is_empty(htx) && (htx->flags & HTX_FL_EOM))
+	if (htx_is_empty(htx) && (req->flags & CF_EOI))
 		stop = 1;
 
 	htx_to_buf(htx, &req->buf);
@@ -4513,7 +4580,7 @@
 	/* The message was fully consumed and no more data are expected
 	 * (EOM flag set).
 	 */
-	if (htx_is_empty(htx) && (htx->flags & HTX_FL_EOM))
+	if (htx_is_empty(htx) && (req->flags & CF_EOI))
 		len = 0;
 
 	htx_to_buf(htx, &req->buf);
@@ -5345,7 +5412,9 @@
 	memset(&smp, 0, sizeof(smp));
 	hlua_lua2smp(L, 3, &smp);
 
-	/* Store the sample in a variable. */
+	/* Store the sample in a variable. We don't need to dup the smp, vars API
+	 * already takes care of duplicating dynamic var data.
+	 */
 	smp_set_owner(&smp, htxn->p, htxn->s->sess, htxn->s, htxn->dir & SMP_OPT_DIR);
 
 	if (lua_gettop(L) == 4 && lua_toboolean(L, 4))
@@ -6368,6 +6437,11 @@
 
 	MAY_LJMP(check_args(L, 1, "register_init"));
 
+	if (hlua_gethlua(L)) {
+		/* runtime processing */
+		WILL_LJMP(luaL_error(L, "register_init: not available outside of body context"));
+	}
+
 	ref = MAY_LJMP(hlua_checkfunction(L, 1));
 
 	init = calloc(1, sizeof(*init));
@@ -6428,11 +6502,14 @@
 	task->context = hlua;
 	task->process = hlua_process_task;
 
-	if (!hlua_ctx_init(hlua, state_id, task, 1))
+	if (!hlua_ctx_init(hlua, state_id, task))
 		goto alloc_error;
 
 	/* Restore the function in the stack. */
 	lua_rawgeti(hlua->T, LUA_REGISTRYINDEX, ref);
+	/* function ref not needed anymore since it was pushed to the substack */
+	hlua_unref(L, ref);
+
 	hlua->nargs = 0;
 
 	/* Schedule task. */
@@ -6475,7 +6552,7 @@
 		}
 		HLUA_INIT(hlua);
 		stream->hlua = hlua;
-		if (!hlua_ctx_init(stream->hlua, fcn_ref_to_stack_id(fcn), stream->task, 0)) {
+		if (!hlua_ctx_init(stream->hlua, fcn_ref_to_stack_id(fcn), stream->task)) {
 			SEND_ERR(stream->be, "Lua converter '%s': can't initialize Lua context.\n", fcn->name);
 			return 0;
 		}
@@ -6543,6 +6620,10 @@
 
 		/* Convert the returned value in sample. */
 		hlua_lua2smp(stream->hlua->T, -1, smp);
+		/* dup the smp before popping the related lua value and
+		 * returning it to haproxy
+		 */
+		smp_dup(smp);
 		lua_pop(stream->hlua->T, 1);
 		return 1;
 
@@ -6612,7 +6693,7 @@
 		}
 		hlua->T = NULL;
 		stream->hlua = hlua;
-		if (!hlua_ctx_init(stream->hlua, fcn_ref_to_stack_id(fcn), stream->task, 0)) {
+		if (!hlua_ctx_init(stream->hlua, fcn_ref_to_stack_id(fcn), stream->task)) {
 			SEND_ERR(stream->be, "Lua sample-fetch '%s': can't initialize Lua context.\n", fcn->name);
 			return 0;
 		}
@@ -6678,6 +6759,10 @@
 
 		/* Convert the returned value in sample. */
 		hlua_lua2smp(stream->hlua->T, -1, smp);
+		/* dup the smp before popping the related lua value and
+		 * returning it to haproxy
+		 */
+		smp_dup(smp);
 		lua_pop(stream->hlua->T, 1);
 
 		/* Set the end of execution flag. */
@@ -6735,6 +6820,11 @@
 
 	MAY_LJMP(check_args(L, 2, "register_converters"));
 
+	if (hlua_gethlua(L)) {
+		/* runtime processing */
+		WILL_LJMP(luaL_error(L, "register_converters: not available outside of body context"));
+	}
+
 	/* First argument : converter name. */
 	name = MAY_LJMP(luaL_checkstring(L, 1));
 
@@ -6814,6 +6904,11 @@
 
 	MAY_LJMP(check_args(L, 2, "register_fetches"));
 
+	if (hlua_gethlua(L)) {
+		/* runtime processing */
+		WILL_LJMP(luaL_error(L, "register_fetches: not available outside of body context"));
+	}
+
 	/* First argument : sample-fetch name. */
 	name = MAY_LJMP(luaL_checkstring(L, 1));
 
@@ -6940,7 +7035,7 @@
 		}
 		HLUA_INIT(hlua);
 		s->hlua = hlua;
-		if (!hlua_ctx_init(s->hlua, fcn_ref_to_stack_id(rule->arg.hlua_rule->fcn), s->task, 0)) {
+		if (!hlua_ctx_init(s->hlua, fcn_ref_to_stack_id(rule->arg.hlua_rule->fcn), s->task)) {
 			SEND_ERR(px, "Lua action '%s': can't initialize Lua context.\n",
 			         rule->arg.hlua_rule->fcn->name);
 			goto end;
@@ -7125,7 +7220,7 @@
 	 * permits to save performances because a systematic
 	 * Lua initialization cause 5% performances loss.
 	 */
-	if (!hlua_ctx_init(hlua, fcn_ref_to_stack_id(ctx->rule->arg.hlua_rule->fcn), task, 0)) {
+	if (!hlua_ctx_init(hlua, fcn_ref_to_stack_id(ctx->rule->arg.hlua_rule->fcn), task)) {
 		SEND_ERR(px, "Lua applet tcp '%s': can't initialize Lua context.\n",
 		         ctx->rule->arg.hlua_rule->fcn->name);
 		return 0;
@@ -7318,7 +7413,7 @@
 	 * permits to save performances because a systematic
 	 * Lua initialization cause 5% performances loss.
 	 */
-	if (!hlua_ctx_init(hlua, fcn_ref_to_stack_id(ctx->rule->arg.hlua_rule->fcn), task, 0)) {
+	if (!hlua_ctx_init(hlua, fcn_ref_to_stack_id(ctx->rule->arg.hlua_rule->fcn), task)) {
 		SEND_ERR(px, "Lua applet http '%s': can't initialize Lua context.\n",
 		         ctx->rule->arg.hlua_rule->fcn->name);
 		return 0;
@@ -7661,6 +7756,11 @@
 	if (lua_gettop(L) < 3 || lua_gettop(L) > 4)
 		WILL_LJMP(luaL_error(L, "'register_action' needs between 3 and 4 arguments"));
 
+	if (hlua_gethlua(L)) {
+		/* runtime processing */
+		WILL_LJMP(luaL_error(L, "register_action: not available outside of body context"));
+	}
+
 	/* First argument : converter name. */
 	name = MAY_LJMP(luaL_checkstring(L, 1));
 
@@ -7827,6 +7927,11 @@
 
 	MAY_LJMP(check_args(L, 3, "register_service"));
 
+	if (hlua_gethlua(L)) {
+		/* runtime processing */
+		WILL_LJMP(luaL_error(L, "register_service: not available outside of body context"));
+	}
+
 	/* First argument : converter name. */
 	name = MAY_LJMP(luaL_checkstring(L, 1));
 
@@ -7944,7 +8049,7 @@
 	appctx->ctx.hlua_cli.task->process = hlua_applet_wakeup;
 
 	/* Initialises the Lua context */
-	if (!hlua_ctx_init(hlua, fcn_ref_to_stack_id(fcn), appctx->ctx.hlua_cli.task, 0)) {
+	if (!hlua_ctx_init(hlua, fcn_ref_to_stack_id(fcn), appctx->ctx.hlua_cli.task)) {
 		SEND_ERR(NULL, "Lua cli '%s': can't initialize Lua context.\n", fcn->name);
 		goto error;
 	}
@@ -8100,6 +8205,11 @@
 
 	MAY_LJMP(check_args(L, 3, "register_cli"));
 
+	if (hlua_gethlua(L)) {
+		/* runtime processing */
+		WILL_LJMP(luaL_error(L, "register_cli: not available outside of body context"));
+	}
+
 	/* First argument : an array of maximum 5 keywords. */
 	if (!lua_istable(L, 1))
 		WILL_LJMP(luaL_argerror(L, 1, "1st argument must be a table"));
@@ -8541,6 +8651,10 @@
 
 	list_for_each_entry(init, &hlua_init_functions[hlua_state_id], l) {
 		lua_rawgeti(L, LUA_REGISTRYINDEX, init->function_ref);
+		/* function ref should be released right away since it was pushed
+		 * on the stack and will not be used anymore
+		 */
+		hlua_unref(L, init->function_ref);
 
 #if defined(LUA_VERSION_NUM) && LUA_VERSION_NUM >= 504
 		ret = lua_resume(L, L, 0, &nres);
@@ -8779,6 +8893,13 @@
 	/* Init main lua stack. */
 	L = lua_newstate(hlua_alloc, &hlua_global_allocator);
 
+	if (!L) {
+		fprintf(stderr,
+		        "Lua init: critical error: lua_newstate() returned NULL."
+		        " This may possibly be caused by a memory allocation error.\n");
+		exit(1);
+	}
+
 	/* Initialise Lua context to NULL */
 	context = lua_getextraspace(L);
 	*context = NULL;
diff --git a/src/hlua_fcn.c b/src/hlua_fcn.c
index 26aa509..010bd13 100644
--- a/src/hlua_fcn.c
+++ b/src/hlua_fcn.c
@@ -1305,6 +1305,7 @@
 	struct proxy *px;
 
 	px = hlua_check_proxy(L, 1);
+	/* safe to call without PROXY_LOCK - pause_proxy takes it */
 	pause_proxy(px);
 	return 0;
 }
@@ -1314,6 +1315,7 @@
 	struct proxy *px;
 
 	px = hlua_check_proxy(L, 1);
+	/* safe to call without PROXY_LOCK - resume_proxy takes it */
 	resume_proxy(px);
 	return 0;
 }
@@ -1323,6 +1325,7 @@
 	struct proxy *px;
 
 	px = hlua_check_proxy(L, 1);
+	/* safe to call without PROXY_LOCK - stop_proxy takes it */
 	stop_proxy(px);
 	return 0;
 }
diff --git a/src/hpack-dec.c b/src/hpack-dec.c
index 4fa9bfd..ed39007 100644
--- a/src/hpack-dec.c
+++ b/src/hpack-dec.c
@@ -32,6 +32,7 @@
 
 #include <import/ist.h>
 #include <haproxy/chunk.h>
+#include <haproxy/global.h>
 #include <haproxy/h2.h>
 #include <haproxy/hpack-dec.h>
 #include <haproxy/hpack-huff.h>
@@ -419,6 +420,15 @@
 			/* <name> and <value> are correctly filled here */
 		}
 
+		/* We must not accept empty header names (forbidden by the spec and used
+		 * as a list termination).
+		 */
+		if (!name.len) {
+			hpack_debug_printf("##ERR@%d##\n", __LINE__);
+			ret = -HPACK_ERR_INVALID_ARGUMENT;
+			goto leave;
+		}
+
 		/* here's what we have here :
 		 *   - name.len > 0
 		 *   - value is filled with either const data or data allocated from tmp
diff --git a/src/http.c b/src/http.c
index 0b00e47..647f433 100644
--- a/src/http.c
+++ b/src/http.c
@@ -403,7 +403,7 @@
 	case 226: return "IM Used";
 	case 300: return "Multiple Choices";
 	case 301: return "Moved Permanently";
-	case 302: return "Moved Temporarily";
+	case 302: return "Found";
 	case 303: return "See Other";
 	case 304: return "Not Modified";
 	case 305: return "Use Proxy";
@@ -468,6 +468,38 @@
 	}
 }
 
+/* Returns the ist string corresponding to port part (without ':') in the host
+ * <host> or IST_NULL if not found.
+*/
+struct ist http_get_host_port(const struct ist host)
+{
+	char *start, *end, *ptr;
+
+	start = istptr(host);
+	end = istend(host);
+	for (ptr = end; ptr > start && isdigit((unsigned char)*--ptr););
+
+	/* no port found */
+	if (likely(*ptr != ':' || ptr+1 == end || ptr == start))
+		return IST_NULL;
+
+	return istnext(ist2(ptr, end - ptr));
+}
+
+
+/* Return non-zero if the port <port> is a default port. If the scheme <schm> is
+ * set, it is used to detect default ports (HTTP => 80 and HTTPS => 443)
+ * port. Otherwise, both are considered as default ports.
+ */
+int http_is_default_port(const struct ist schm, const struct ist port)
+{
+	if (!isttest(schm))
+		return (isteq(port, ist("443")) || isteq(port, ist("80")));
+	else
+		return (isteq(port, ist("443")) && isteqi(schm, ist("https://"))) ||
+			(isteq(port, ist("80")) && isteqi(schm, ist("http://")));
+}
+
 /* Returns non-zero if the scheme <schm> is syntactically correct according to
  * RFC3986#3.1, otherwise zero. It expects only the scheme and nothing else
  * (particularly not the following "://").
diff --git a/src/http_act.c b/src/http_act.c
index 8574a7d..59e614f 100644
--- a/src/http_act.c
+++ b/src/http_act.c
@@ -1456,7 +1456,7 @@
 static enum act_return http_action_early_hint(struct act_rule *rule, struct proxy *px,
 					      struct session *sess, struct stream *s, int flags)
 {
-	struct act_rule *prev_rule, *next_rule;
+	struct act_rule *next_rule;
 	struct channel *res = &s->res;
 	struct htx *htx = htx_from_buf(&res->buf);
 	struct buffer *value = alloc_trash_chunk();
@@ -1471,13 +1471,10 @@
 		goto error;
 	}
 
-	/* get previous and next rules */
-	prev_rule = LIST_PREV(&rule->list, typeof(rule), list);
-	next_rule = LIST_NEXT(&rule->list, typeof(rule), list);
-
-	/* if no previous rule or previous rule is not early-hint, start a new response. Otherwise,
-	 * continue to add link to a previously started response */
-	if (&prev_rule->list == s->current_rule_list || prev_rule->action_ptr != http_action_early_hint) {
+	/* if there is no pending 103 response, start a new response. Otherwise,
+	 * continue to add link to a previously started response
+         */
+	if (s->txn->status != 103) {
 		struct htx_sl *sl;
 		unsigned int flags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11|
 				      HTX_SL_F_XFER_LEN|HTX_SL_F_BODYLESS);
@@ -1487,6 +1484,7 @@
 		if (!sl)
 			goto error;
 		sl->info.res.status = 103;
+		s->txn->status = 103;
 	}
 
 	/* Add the HTTP Early Hint HTTP 103 response heade */
@@ -1494,18 +1492,16 @@
 	if (!htx_add_header(htx, rule->arg.http.str, ist2(b_head(value), b_data(value))))
 		goto error;
 
-	/* if it is the last rule or the next one is not an early-hint, terminate the current
-	 * response. */
-	if (&next_rule->list == s->current_rule_list || next_rule->action_ptr != http_action_early_hint) {
-		if (!htx_add_endof(htx, HTX_BLK_EOH)) {
-			/* If an error occurred during an Early-hint rule,
-			 * remove the incomplete HTTP 103 response from the
-			 * buffer */
+	/* if it is the last rule or the next one is not an early-hint or an
+	 * conditional early-hint, terminate the current response.
+	 */
+	next_rule = LIST_NEXT(&rule->list, typeof(rule), list);
+	if (&next_rule->list == s->current_rule_list || next_rule->action_ptr != http_action_early_hint || next_rule->cond) {
+		if (!htx_add_endof(htx, HTX_BLK_EOH))
 			goto error;
-		}
-
 		if (!http_forward_proxy_resp(s, 0))
 			goto error;
+		s->txn->status = 0;
 	}
 
   leave:
@@ -1517,6 +1513,7 @@
 	 * HTTP 103 response from the buffer */
 	channel_htx_truncate(res, htx);
 	ret = ACT_RET_ERR;
+	s->txn->status = 0;
 	goto leave;
 }
 
@@ -1880,6 +1877,8 @@
 	free(redir->cookie_str);
 	list_for_each_entry_safe(lf, lfb, &redir->rdr_fmt, list) {
 		LIST_DELETE(&lf->list);
+		release_sample_expr(lf->expr);
+		free(lf->arg);
 		free(lf);
 	}
 	free(redir);
diff --git a/src/http_ana.c b/src/http_ana.c
index 48b25c3..b557da8 100644
--- a/src/http_ana.c
+++ b/src/http_ana.c
@@ -58,6 +58,7 @@
 
 static enum rule_result http_req_get_intercept_rule(struct proxy *px, struct list *rules, struct stream *s);
 static enum rule_result http_res_get_intercept_rule(struct proxy *px, struct list *rules, struct stream *s);
+static enum rule_result http_req_restrict_header_names(struct stream *s, struct htx *htx, struct proxy *px);
 
 static void http_manage_client_side_cookies(struct stream *s, struct channel *req);
 static void http_manage_server_side_cookies(struct stream *s, struct channel *res);
@@ -394,6 +395,12 @@
 		}
 	}
 
+	if (px->options2 & (PR_O2_RSTRICT_REQ_HDR_NAMES_BLK|PR_O2_RSTRICT_REQ_HDR_NAMES_DEL)) {
+		verdict = http_req_restrict_header_names(s, htx, px);
+		if (verdict == HTTP_RULE_RES_DENY)
+			goto deny;
+	}
+
 	if (conn && (conn->flags & CO_FL_EARLY_DATA) &&
 	    (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_SSL_WAIT_HS))) {
 		struct http_hdr_ctx ctx;
@@ -401,7 +408,7 @@
 		ctx.blk = NULL;
 		if (!http_find_header(htx, ist("Early-Data"), &ctx, 0)) {
 			if (unlikely(!http_add_header(htx, ist("Early-Data"), ist("1"))))
-				goto return_int_err;
+				goto return_fail_rewrite;
 		}
 	}
 
@@ -546,6 +553,18 @@
 		_HA_ATOMIC_INC(&sess->listener->counters->denied_req);
 	goto return_prx_err;
 
+ return_fail_rewrite:
+	if (!(s->flags & SF_ERR_MASK))
+		s->flags |= SF_ERR_PRXCOND;
+	_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+	if (s->flags & SF_BE_ASSIGNED)
+		_HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+	if (sess->listener && sess->listener->counters)
+		_HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+	if (objt_server(s->target))
+		_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+	/* fall through */
+
  return_int_err:
 	txn->status = 500;
 	if (!(s->flags & SF_ERR_MASK))
@@ -669,7 +688,7 @@
 		/* send unique ID if a "unique-id-header" is defined */
 		if (isttest(sess->fe->header_unique_id) &&
 		    unlikely(!http_add_header(htx, sess->fe->header_unique_id, s->unique_id)))
-				goto return_int_err;
+				goto return_fail_rewrite;
 	}
 
 	/*
@@ -702,7 +721,7 @@
 				 */
 				chunk_printf(&trash, "%d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]);
 				if (unlikely(!http_add_header(htx, hdr, ist2(trash.area, trash.data))))
-					goto return_int_err;
+					goto return_fail_rewrite;
 			}
 		}
 		else if (cli_conn && conn_get_src(cli_conn) && cli_conn->src->ss_family == AF_INET6) {
@@ -724,7 +743,7 @@
 				 */
 				chunk_printf(&trash, "%s", pn);
 				if (unlikely(!http_add_header(htx, hdr, ist2(trash.area, trash.data))))
-					goto return_int_err;
+					goto return_fail_rewrite;
 			}
 		}
 	}
@@ -752,7 +771,7 @@
 				 */
 				chunk_printf(&trash, "%d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]);
 				if (unlikely(!http_add_header(htx, hdr, ist2(trash.area, trash.data))))
-					goto return_int_err;
+					goto return_fail_rewrite;
 			}
 		}
 		else if (cli_conn && conn_get_dst(cli_conn) && cli_conn->dst->ss_family == AF_INET6) {
@@ -774,7 +793,7 @@
 				 */
 				chunk_printf(&trash, "%s", pn);
 				if (unlikely(!http_add_header(htx, hdr, ist2(trash.area, trash.data))))
-					goto return_int_err;
+					goto return_fail_rewrite;
 			}
 		}
 	}
@@ -819,6 +838,18 @@
 	DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
 	return 1;
 
+ return_fail_rewrite:
+	if (!(s->flags & SF_ERR_MASK))
+		s->flags |= SF_ERR_PRXCOND;
+	_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+	if (s->flags & SF_BE_ASSIGNED)
+		_HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+	if (sess->listener && sess->listener->counters)
+		_HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+	if (objt_server(s->target))
+		_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+	/* fall through */
+
  return_int_err:
 	txn->status = 500;
 	if (!(s->flags & SF_ERR_MASK))
@@ -907,9 +938,8 @@
 {
 	struct session *sess = s->sess;
 	struct http_txn *txn = s->txn;
-	struct http_msg *msg = &s->txn->req;
 
-	DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn, msg);
+	DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn, &s->txn->req);
 
 
 	switch (http_wait_for_msg_body(s, req, s->be->timeout.httpreq, 0)) {
@@ -967,7 +997,7 @@
 	if (!(s->flags & SF_ERR_MASK))
 		s->flags |= SF_ERR_PRXCOND;
 	if (!(s->flags & SF_FINST_MASK))
-		s->flags |= (msg->msg_state < HTTP_MSG_DATA ? SF_FINST_R : SF_FINST_D);
+		s->flags |= SF_FINST_R;
 
 	req->analysers &= AN_REQ_FLT_END;
 	req->analyse_exp = TICK_ETERNITY;
@@ -1360,6 +1390,13 @@
 			if (objt_cs(s->si[1].end))
 				conn = __objt_cs(s->si[1].end)->conn;
 
+			if ((si_b->flags & SI_FL_L7_RETRY) &&
+			    (s->be->retry_type & PR_RE_DISCONNECTED) &&
+			    (!conn || conn->err_code != CO_ER_SSL_EARLY_FAILED)) {
+				if (co_data(rep) || do_l7_retry(s, si_b) == 0)
+					return 0;
+			}
+
 			/* Perform a L7 retry because server refuses the early data. */
 			if ((si_b->flags & SI_FL_L7_RETRY) &&
 			    (s->be->retry_type & PR_RE_EARLY_ERROR) &&
@@ -1955,7 +1992,7 @@
 			chunk_appendf(&trash, "; %s", s->be->cookie_attrs);
 
 		if (unlikely(!http_add_header(htx, ist("Set-Cookie"), ist2(trash.area, trash.data))))
-			goto return_int_err;
+			goto return_fail_rewrite;
 
 		txn->flags &= ~TX_SCK_MASK;
 		if (__objt_server(s->target)->cookie && (s->flags & SF_DIRECT))
@@ -1974,7 +2011,7 @@
 			txn->flags &= ~TX_CACHEABLE & ~TX_CACHE_COOK;
 
 			if (unlikely(!http_add_header(htx, ist("Cache-control"), ist("private"))))
-				goto return_int_err;
+				goto return_fail_rewrite;
 		}
 	}
 
@@ -2043,6 +2080,17 @@
 		_HA_ATOMIC_INC(&__objt_server(s->target)->counters.denied_resp);
 	goto return_prx_err;
 
+ return_fail_rewrite:
+	if (!(s->flags & SF_ERR_MASK))
+		s->flags |= SF_ERR_PRXCOND;
+	_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+	_HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+	if (sess->listener && sess->listener->counters)
+		_HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+	if (objt_server(s->target))
+		_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+	/* fall through */
+
  return_int_err:
 	txn->status = 500;
 	if (!(s->flags & SF_ERR_MASK))
@@ -2595,6 +2643,54 @@
 	return 0;
 }
 
+/* This function filters the request header names to only allow [0-9a-zA-Z-]
+ * characters. Depending on the proxy configuration, headers with a name not
+ * matching this charset are removed or the request is rejected with a
+ * 403-Forbidden response if such name are found. It returns HTTP_RULE_RES_CONT
+ * to continue the request processing or HTTP_RULE_RES_DENY if the request is
+ * rejected.
+ */
+static enum rule_result http_req_restrict_header_names(struct stream *s, struct htx *htx, struct proxy *px)
+{
+	struct htx_blk *blk;
+	enum rule_result rule_ret = HTTP_RULE_RES_CONT;
+
+	blk = htx_get_first_blk(htx);
+	while (blk) {
+		enum htx_blk_type type = htx_get_blk_type(blk);
+
+		if (type == HTX_BLK_HDR) {
+			struct ist n = htx_get_blk_name(htx, blk);
+			int i, end = istlen(n);
+
+			for (i = 0; i < end; i++) {
+				if (!isalnum((unsigned char)n.ptr[i]) && n.ptr[i] != '-') {
+					break;
+				}
+			}
+
+			if (i < end) {
+				/* Disallowed character found - block the request or remove the header */
+				if (px->options2 & PR_O2_RSTRICT_REQ_HDR_NAMES_BLK)
+					goto block;
+				blk = htx_remove_blk(htx, blk);
+				continue;
+			}
+		}
+		if (type == HTX_BLK_EOH)
+			break;
+
+		blk = htx_get_next_blk(htx, blk);
+	}
+  out:
+	return rule_ret;
+  block:
+	/* Block the request returning a 403-Forbidden response */
+	s->txn->status = 403;
+	rule_ret = HTTP_RULE_RES_DENY;
+	goto out;
+}
+
 /* Replace all headers matching the name <name>. The header value is replaced if
  * it matches the regex <re>. <str> is used for the replacement. If <full> is
  * set to 1, the full-line is matched and replaced. Otherwise, comma-separated
@@ -2604,10 +2700,11 @@
 		     const char *str, struct my_regex *re, int full)
 {
 	struct http_hdr_ctx ctx;
-	struct buffer *output = get_trash_chunk();
 
 	ctx.blk = NULL;
 	while (http_find_header(htx, name, &ctx, full)) {
+		struct buffer *output = get_trash_chunk();
+
 		if (!regex_exec_match2(re, ctx.value.ptr, ctx.value.len, MAX_MATCH, pmatch, 0))
 			continue;
 
@@ -2692,6 +2789,7 @@
 
 	if (!http_replace_res_status(htx, ist2(trash.area, trash.data), reason))
 		return -1;
+	s->txn->status = status;
 	return 0;
 }
 
@@ -3772,6 +3870,7 @@
 	struct htx *htx;
 	int has_freshness_info = 0;
 	int has_validator = 0;
+	int has_null_maxage = 0;
 
 	if (txn->status < 200) {
 		/* do not try to cache interim responses! */
@@ -3796,10 +3895,16 @@
 			txn->flags |= TX_CACHEABLE | TX_CACHE_COOK;
 			continue;
 		}
+		/* This max-age might be overridden by a s-maxage directive, do
+		 * not unset the TX_CACHEABLE yet. */
+		if (isteqi(ctx.value, ist("max-age=0"))) {
+			has_null_maxage = 1;
+			continue;
+		}
+
 		if (isteqi(ctx.value, ist("private")) ||
 		    isteqi(ctx.value, ist("no-cache")) ||
 		    isteqi(ctx.value, ist("no-store")) ||
-		    isteqi(ctx.value, ist("max-age=0")) ||
 		    isteqi(ctx.value, ist("s-maxage=0"))) {
 			txn->flags &= ~TX_CACHEABLE & ~TX_CACHE_COOK;
 			continue;
@@ -3810,11 +3915,21 @@
 			continue;
 		}
 
-		if (istmatchi(ctx.value, ist("s-maxage")) ||
-		    istmatchi(ctx.value, ist("max-age"))) {
+		if (istmatchi(ctx.value, ist("s-maxage"))) {
 			has_freshness_info = 1;
+			has_null_maxage = 0;	/* The null max-age is overridden, ignore it */
 			continue;
 		}
+		if (istmatchi(ctx.value, ist("max-age"))) {
+			has_freshness_info = 1;
+			continue;
+		}
+	}
+
+	/* We had a 'max-age=0' directive but no extra s-maxage, do not cache
+	 * the response. */
+	if (has_null_maxage) {
+		txn->flags &= ~TX_CACHEABLE & ~TX_CACHE_COOK;
 	}
 
 	/* If no freshness information could be found in Cache-Control values,
@@ -3839,7 +3954,7 @@
 	/* We won't store an entry that has neither a cache validator nor an
 	 * explicit expiration time, as suggested in RFC 7234#3. */
 	if (!has_freshness_info && !has_validator)
-		txn->flags |= TX_CACHE_IGNORE;
+		txn->flags &= ~TX_CACHEABLE;
 }
 
 /*
@@ -4155,7 +4270,7 @@
 	if (!(s->flags & SF_ERR_MASK))
 		s->flags |= SF_ERR_CLITO;
 	if (!(s->flags & SF_FINST_MASK))
-		s->flags |= SF_FINST_D;
+		s->flags |= SF_FINST_R;
 	_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
 	if (sess->listener && sess->listener->counters)
 		_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
@@ -4168,7 +4283,7 @@
 	if (!(s->flags & SF_ERR_MASK))
 		s->flags |= SF_ERR_SRVTO;
 	if (!(s->flags & SF_FINST_MASK))
-		s->flags |= SF_FINST_D;
+		s->flags |= SF_FINST_R;
 	stream_inc_http_fail_ctr(s);
 	http_reply_and_close(s, txn->status, http_error_message(s));
 	ret = HTTP_RULE_RES_ABRT;
@@ -4321,10 +4436,6 @@
 			 * poll for reads.
 			 */
 			channel_auto_read(chn);
-			if (b_data(&chn->buf)) {
-				DBG_TRACE_DEVEL("waiting to flush the request", STRM_EV_HTTP_ANA, s, txn);
-				return;
-			}
 			txn->req.msg_state = HTTP_MSG_TUNNEL;
 		}
 		else {
@@ -4444,10 +4555,6 @@
 		 */
 		if (txn->flags & TX_CON_WANT_TUN) {
 			channel_auto_read(chn);
-			if (b_data(&chn->buf)) {
-				DBG_TRACE_DEVEL("waiting to flush the respone", STRM_EV_HTTP_ANA, s, txn);
-				return;
-			}
 			txn->rsp.msg_state = HTTP_MSG_TUNNEL;
 		}
 		else {
@@ -5106,6 +5213,7 @@
 		return NULL;
 	s->txn = txn;
 
+	txn->meth = HTTP_METH_OTHER;
 	txn->flags = ((cs && cs->flags & CS_FL_NOT_FIRST) ? TX_NOT_FIRST : 0);
 	txn->status = -1;
 	txn->http_reply = NULL;
@@ -5126,8 +5234,10 @@
 
 	txn->auth.method = HTTP_AUTH_UNKNOWN;
 
-	vars_init(&s->vars_txn,    SCOPE_TXN);
-	vars_init(&s->vars_reqres, SCOPE_REQ);
+	/* here we don't want to re-initialize s->vars_txn and s->vars_reqres
+	 * variable lists, because they were already initialized upon stream
+	 * creation in stream_new(), and thus may already contain some variables
+	 */
 
 	return txn;
 }
diff --git a/src/http_fetch.c b/src/http_fetch.c
index 798f7f6..6678c85 100644
--- a/src/http_fetch.c
+++ b/src/http_fetch.c
@@ -222,7 +222,7 @@
 	if (IS_HTX_STRM(s)) {
 		htx = htxbuf(&chn->buf);
 
-		if (msg->msg_state == HTTP_MSG_ERROR || (htx->flags & HTX_FL_PARSING_ERROR))
+		if (htx->flags & HTX_FL_PARSING_ERROR)
 			return NULL;
 
 		if (msg->msg_state < HTTP_MSG_BODY) {
@@ -307,7 +307,7 @@
 			if (txn->meth == HTTP_METH_GET || txn->meth == HTTP_METH_HEAD)
 				s->flags |= SF_REDIRECTABLE;
 		}
-		else
+		else if (txn->status == -1)
 			txn->status = sl->info.res.status;
 		if (sl->flags & HTX_SL_F_VER_11)
 			msg->flags |= HTTP_MSGF_VER_11;
@@ -330,27 +330,25 @@
 {
 	struct channel *chn = SMP_REQ_CHN(smp);
 	struct http_txn *txn;
+	struct htx *htx = NULL;
 	int meth;
 
-	txn = smp->strm->txn;
+	txn = (smp->strm ? smp->strm->txn : NULL);
 	if (!txn)
 		return 0;
 
 	meth = txn->meth;
-	smp->data.type = SMP_T_METH;
-	smp->data.u.meth.meth = meth;
 	if (meth == HTTP_METH_OTHER) {
-		struct htx *htx;
-		struct htx_sl *sl;
-
-		if ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) {
-			/* ensure the indexes are not affected */
-			return 0;
-		}
-
 		htx = smp_prefetch_htx(smp, chn, NULL, 1);
 		if (!htx)
 			return 0;
+		meth = txn->meth;
+	}
+
+	smp->data.type = SMP_T_METH;
+	smp->data.u.meth.meth = meth;
+	if (meth == HTTP_METH_OTHER) {
+		struct htx_sl *sl;
 
 		sl = http_get_stline(htx);
 		smp->flags |= SMP_F_CONST;
diff --git a/src/http_htx.c b/src/http_htx.c
index f0e0a43..60525bb 100644
--- a/src/http_htx.c
+++ b/src/http_htx.c
@@ -388,6 +388,9 @@
 		goto fail;
 
 	sl = http_get_stline(htx);
+	ALREADY_CHECKED(sl); /* the stline exists because http_replace_stline() succeded */
+	sl->flags &= ~HTX_SL_F_NORMALIZED_URI;
+
 	if (!http_update_host(htx, sl, uri))
 		goto fail;
 
@@ -918,7 +921,7 @@
 	ret = h1_headers_to_hdr_list(raw.ptr, raw.ptr + raw.len,
 				     hdrs, sizeof(hdrs)/sizeof(hdrs[0]), &h1m, &h1sl);
 	if (ret <= 0) {
-		memprintf(errmsg, "unabled to parse headers (error offset: %d)", h1m.err_pos);
+		memprintf(errmsg, "unable to parse headers (error offset: %d)", h1m.err_pos);
 		goto error;
 	}
 
@@ -1565,6 +1568,7 @@
 			fd = -1;
 			obj[objlen] = '\0';
 			reply->type = HTTP_REPLY_LOGFMT;
+			LIST_INIT(&reply->body.fmt);
 			cur_arg++;
 		}
 		else if (strcmp(args[cur_arg], "lf-string") == 0) {
@@ -1581,6 +1585,7 @@
 			obj = strdup(args[cur_arg]);
 			objlen = strlen(args[cur_arg]);
 			reply->type = HTTP_REPLY_LOGFMT;
+			LIST_INIT(&reply->body.fmt);
 			cur_arg++;
 		}
 		else if (strcmp(args[cur_arg], "hdr") == 0) {
@@ -1721,12 +1726,6 @@
 	return NULL;
 }
 
-static int uri_is_default_port(const struct ist scheme, const struct ist port)
-{
-	return (isteq(port, ist("443")) && isteqi(scheme, ist("https://"))) ||
-	        (isteq(port, ist("80")) && isteqi(scheme, ist("http://")));
-}
-
 /* Apply schemed-based normalization as described on rfc3986 on section 6.3.2.
  * Returns 0 if no error has been found else non-zero.
  *
@@ -1741,7 +1740,6 @@
 	struct http_hdr_ctx ctx;
 	struct htx_sl *sl;
 	struct ist uri, scheme, authority, host, port;
-	char *start, *end, *ptr;
 
 	sl = http_get_stline(htx);
 
@@ -1755,28 +1753,19 @@
 	if (!isttest(scheme))
 		return 0;
 
-	/* Extract the port if present in authority. To properly support ipv6
-	 * hostnames, do a reverse search on the last ':' separator as long as
-	 * digits are found.
-	 */
-	authority = http_get_authority(uri, 0);
-	start = istptr(authority);
-	end = istend(authority);
-	for (ptr = end; ptr > start && isdigit((unsigned char)*--ptr); )
-		;
-
-	/* if no port found, no normalization to proceed */
-	if (likely(*ptr != ':'))
+	/* Extract the port if present in authority */
+	authority = http_get_authority(uri, 1);
+	port = http_get_host_port(authority);
+	if (!isttest(port)) {
+		/* if no port found, no normalization to proceed */
 		return 0;
-
-	/* split host/port on the ':' separator found */
-	host = ist2(start, ptr - start);
-	port = istnext(ist2(ptr, end - ptr));
+	}
+	host = isttrim(authority, istlen(authority) - istlen(port) - 1);
 
-	if (istlen(port) && uri_is_default_port(scheme, port)) {
+	if (istlen(port) && http_is_default_port(scheme, port)) {
 		/* reconstruct the uri with removal of the port */
 		struct buffer *temp = get_trash_chunk();
-		struct ist meth, vsn, path;
+		struct ist meth, vsn;
 
 		/* meth */
 		chunk_memcat(temp, HTX_SL_REQ_MPTR(sl), HTX_SL_REQ_MLEN(sl));
@@ -1787,12 +1776,10 @@
 		vsn = ist2(temp->area + meth.len, HTX_SL_REQ_VLEN(sl));
 
 		/* reconstruct uri without port */
-		path = http_get_path(uri);
-		chunk_istcat(temp, scheme);
+		chunk_memcat(temp, uri.ptr, authority.ptr - uri.ptr);
 		chunk_istcat(temp, host);
-		chunk_istcat(temp, path);
-		uri = ist2(temp->area + meth.len + vsn.len,
-		           scheme.len + host.len + path.len);
+		chunk_memcat(temp, istend(authority), istend(uri) - istend(authority));
+		uri = ist2(temp->area + meth.len + vsn.len, host.len + uri.len - authority.len); /* uri */
 
 		http_replace_stline(htx, meth, uri, vsn);
 
diff --git a/src/http_rules.c b/src/http_rules.c
index 1b21133..52b77c6 100644
--- a/src/http_rules.c
+++ b/src/http_rules.c
@@ -301,6 +301,26 @@
 	return NULL;
 }
 
+/* completely free redirect rule */
+void http_free_redirect_rule(struct redirect_rule *rdr)
+{
+	struct logformat_node *lf, *lfb;
+
+	if (rdr->cond) {
+		prune_acl_cond(rdr->cond);
+		free(rdr->cond);
+	}
+	free(rdr->rdr_str);
+	free(rdr->cookie_str);
+	list_for_each_entry_safe(lf, lfb, &rdr->rdr_fmt, list) {
+		LIST_DELETE(&lf->list);
+		release_sample_expr(lf->expr);
+		free(lf->arg);
+		free(lf);
+	}
+	free(rdr);
+}
+
 /* Parses a redirect rule. Returns the redirect rule on success or NULL on error,
  * with <err> filled with the error message. If <use_fmt> is not null, builds a
  * dynamic log-format rule instead of a static string. Parameter <dir> indicates
@@ -309,7 +329,7 @@
 struct redirect_rule *http_parse_redirect_rule(const char *file, int linenum, struct proxy *curproxy,
                                                const char **args, char **errmsg, int use_fmt, int dir)
 {
-	struct redirect_rule *rule;
+	struct redirect_rule *rule = NULL;
 	int cur_arg;
 	int type = REDIRECT_TYPE_NONE;
 	int code = 302;
@@ -370,7 +390,7 @@
 				memprintf(errmsg,
 				          "'%s': unsupported HTTP code '%s' (must be one of 301, 302, 303, 307 or 308)",
 				          args[cur_arg - 1], args[cur_arg]);
-				return NULL;
+				goto err;
 			}
 		}
 		else if (strcmp(args[cur_arg], "drop-query") == 0) {
@@ -384,7 +404,7 @@
 			cond = build_acl_cond(file, linenum, &curproxy->acl, curproxy, (const char **)args + cur_arg, errmsg);
 			if (!cond) {
 				memprintf(errmsg, "error in condition: %s", *errmsg);
-				return NULL;
+				goto err;
 			}
 			break;
 		}
@@ -392,32 +412,32 @@
 			memprintf(errmsg,
 			          "expects 'code', 'prefix', 'location', 'scheme', 'set-cookie', 'clear-cookie', 'drop-query' or 'append-slash' (was '%s')",
 			          args[cur_arg]);
-			return NULL;
+			goto err;
 		}
 		cur_arg++;
 	}
 
 	if (type == REDIRECT_TYPE_NONE) {
 		memprintf(errmsg, "redirection type expected ('prefix', 'location', or 'scheme')");
-		return NULL;
+		goto err;
 	}
 
 	if (dir && type != REDIRECT_TYPE_LOCATION) {
 		memprintf(errmsg, "response only supports redirect type 'location'");
-		return NULL;
+		goto err;
 	}
 
 	rule = calloc(1, sizeof(*rule));
-	if (!rule) {
-		memprintf(errmsg, "parsing [%s:%d]: out of memory.", file, linenum);
-		return NULL;
-	}
+	if (!rule)
+		goto out_of_memory;
 	rule->cond = cond;
 	LIST_INIT(&rule->rdr_fmt);
 
 	if (!use_fmt) {
 		/* old-style static redirect rule */
 		rule->rdr_str = strdup(destination);
+		if (!rule->rdr_str)
+			goto out_of_memory;
 		rule->rdr_len = strlen(destination);
 	}
 	else {
@@ -435,7 +455,7 @@
 			cap |= (dir ? SMP_VAL_BE_HRS_HDR : SMP_VAL_BE_HRQ_HDR);
 		if (!(type == REDIRECT_TYPE_PREFIX && destination[0] == '/' && destination[1] == '\0')) {
 			if (!parse_logformat_string(destination, curproxy, &rule->rdr_fmt, LOG_OPT_HTTP, cap, errmsg)) {
-				return  NULL;
+				goto err;
 			}
 			free(curproxy->conf.lfs_file);
 			curproxy->conf.lfs_file = strdup(curproxy->conf.args.file);
@@ -450,11 +470,15 @@
 		rule->cookie_len = strlen(cookie);
 		if (cookie_set) {
 			rule->cookie_str = malloc(rule->cookie_len + 10);
+			if (!rule->cookie_str)
+				goto out_of_memory;
 			memcpy(rule->cookie_str, cookie, rule->cookie_len);
 			memcpy(rule->cookie_str + rule->cookie_len, "; path=/;", 10);
 			rule->cookie_len += 9;
 		} else {
 			rule->cookie_str = malloc(rule->cookie_len + 21);
+			if (!rule->cookie_str)
+				goto out_of_memory;
 			memcpy(rule->cookie_str, cookie, rule->cookie_len);
 			memcpy(rule->cookie_str + rule->cookie_len, "; path=/; Max-Age=0;", 21);
 			rule->cookie_len += 20;
@@ -468,6 +492,18 @@
 
  missing_arg:
 	memprintf(errmsg, "missing argument for '%s'", args[cur_arg]);
+	goto err;
+ out_of_memory:
+	memprintf(errmsg, "parsing [%s:%d]: out of memory.", file, linenum);
+ err:
+	if (rule)
+		http_free_redirect_rule(rule);
+	else if (cond) {
+		/* rule not yet allocated, but cond already is */
+		prune_acl_cond(cond);
+		free(cond);
+	}
+
 	return NULL;
 }
 
diff --git a/src/listener.c b/src/listener.c
index 0ffd0fe..cd295e9 100644
--- a/src/listener.c
+++ b/src/listener.c
@@ -45,6 +45,7 @@
 /* list of the temporarily limited listeners because of lack of resource */
 static struct mt_list global_listener_queue = MT_LIST_HEAD_INIT(global_listener_queue);
 static struct task *global_listener_queue_task;
+__decl_thread(static HA_RWLOCK_T global_listener_rwlock);
 
 /* listener status for stats */
 const char* li_status_st[LI_STATE_COUNT] = {
@@ -250,6 +251,7 @@
 		case LI_LIMITED:
 			BUG_ON(l->rx.fd == -1);
 			_HA_ATOMIC_INC(&px->li_ready);
+			l->flags |= LI_F_FINALIZED;
 			break;
 		}
 	}
@@ -296,12 +298,13 @@
 }
 
 /*
- * This function completely stops a listener. It will need to operate under the
- * proxy's lock, the protocol's lock, and the listener's lock. The caller is
- * responsible for indicating in lpx, lpr, lli whether the respective locks are
- * already held (non-zero) or not (zero) so that the function picks the missing
- * ones, in this order. The proxy's listeners count is updated and the proxy is
+ * This function completely stops a listener.
+ * The proxy's listeners count is updated and the proxy is
  * disabled and woken up after the last one is gone.
+ * It will need to operate under the proxy's lock, the protocol's lock and
+ * the listener's lock. The caller is responsible for indicating in lpx,
+ * lpr, lli whether the respective locks are already held (non-zero) or
+ * not (zero) so that the function picks the missing ones, in this order.
  */
 void stop_listener(struct listener *l, int lpx, int lpr, int lli)
 {
@@ -314,7 +317,7 @@
 		return;
 	}
 
-	if (!lpx)
+	if (!lpx && px)
 		HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
 
 	if (!lpr)
@@ -329,7 +332,8 @@
 		if (l->state >= LI_ASSIGNED)
 			__delete_listener(l);
 
-		proxy_cond_disable(px);
+		if (px)
+			proxy_cond_disable(px);
 	}
 
 	if (!lli)
@@ -338,7 +342,7 @@
 	if (!lpr)
 		HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
 
-	if (!lpx)
+	if (!lpx && px)
 		HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
 }
 
@@ -359,19 +363,17 @@
 
 /* default function called to suspend a listener: it simply passes the call to
  * the underlying receiver. This is find for most socket-based protocols. This
- * must be called under the listener's lock. It will return non-zero on success,
- * 0 on failure. If no receiver-level suspend is provided, the operation is
- * assumed to succeed.
+ * must be called under the listener's lock. It will return < 0 in case of
+ * failure, 0 if the listener was totally stopped, or > 0 if correctly paused..
+ * If no receiver-level suspend is provided, the operation is assumed
+ * to succeed.
  */
 int default_suspend_listener(struct listener *l)
 {
-	int ret = 1;
-
 	if (!l->rx.proto->rx_suspend)
 		return 1;
 
-	ret = l->rx.proto->rx_suspend(&l->rx);
-	return ret > 0 ? ret : 0;
+	return l->rx.proto->rx_suspend(&l->rx);
 }
 
 
@@ -389,8 +391,28 @@
 
 	if (l->state == LI_ASSIGNED) {
 		char msg[100];
+		char *errmsg;
 		int err;
 
+		/* first, try to bind the receiver */
+		err = l->rx.proto->fam->bind(&l->rx, &errmsg);
+		if (err != ERR_NONE) {
+			if (err & ERR_WARN)
+				ha_warning("Resuming listener: %s\n", errmsg);
+			else if (err & ERR_ALERT)
+				ha_alert("Resuming listener: %s\n", errmsg);
+			ha_free(&errmsg);
+			if (err & (ERR_FATAL | ERR_ABORT)) {
+				ret = 0;
+				goto end;
+			}
+		}
+
+		/* then, try to listen:
+		 * for now there's still always a listening function
+		 * (same check performed in protocol_bind_all()
+		 */
+		BUG_ON(!l->rx.proto->listen);
 		err = l->rx.proto->listen(l, msg, sizeof(msg));
 		if (err & ERR_ALERT)
 			ha_alert("Resuming listener: %s\n", msg);
@@ -422,34 +444,68 @@
  * closes upon SHUT_WR and refuses to rebind. So a common validation path
  * involves SHUT_WR && listen && SHUT_RD. In case of success, the FD's polling
  * is disabled. It normally returns non-zero, unless an error is reported.
+ * suspend() may totally stop a listener if it doesn't support the PAUSED
+ * state, in which case state will be set to ASSIGNED.
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
  */
-int pause_listener(struct listener *l)
+int suspend_listener(struct listener *l, int lpx, int lli)
 {
 	struct proxy *px = l->bind_conf->frontend;
 	int ret = 1;
 
-	HA_RWLOCK_WRLOCK(LISTENER_LOCK, &l->lock);
+	if (!lpx && px)
+		HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+
+	if (!lli)
+		HA_RWLOCK_WRLOCK(LISTENER_LOCK, &l->lock);
 
 	if ((global.mode & (MODE_DAEMON | MODE_MWORKER)) &&
 	    !(proc_mask(l->rx.settings->bind_proc) & pid_bit))
 		goto end;
 
-	if (l->state <= LI_PAUSED)
+	if (!(l->flags & LI_F_FINALIZED) || l->state <= LI_PAUSED)
 		goto end;
 
-	if (l->rx.proto->suspend)
+	if (l->rx.proto->suspend) {
 		ret = l->rx.proto->suspend(l);
+		/* if the suspend() fails, we don't want to change the
+		 * current listener state
+		 */
+		if (ret < 0)
+			goto end;
+	}
 
 	MT_LIST_DELETE(&l->wait_queue);
 
-	listener_set_state(l, LI_PAUSED);
+	/* ret == 0 means that the suspend() has been turned into
+	 * an unbind(), meaning the listener is now stopped (ie: ABNS), we need
+	 * to report this state change properly
+	 */
+	listener_set_state(l, ((ret) ? LI_PAUSED : LI_ASSIGNED));
+
+	if (px && !(l->flags & LI_F_SUSPENDED))
+		px->li_suspended++;
+	l->flags |= LI_F_SUSPENDED;
+
+	/* at this point, everything is under control, no error should be
+	 * returned to calling function
+	 */
+	ret = 1;
 
 	if (px && !px->li_ready) {
 		ha_warning("Paused %s %s.\n", proxy_cap_str(px->cap), px->id);
 		send_log(px, LOG_WARNING, "Paused %s %s.\n", proxy_cap_str(px->cap), px->id);
 	}
   end:
-	HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &l->lock);
+	if (!lli)
+		HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &l->lock);
+
+	if (!lpx && px)
+		HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
+
 	return ret;
 }
 
@@ -459,17 +515,24 @@
  * or LI_FULL. 0 is returned in case of failure to resume (eg: dead socket).
  * Listeners bound to a different process are not woken up unless we're in
  * foreground mode, and are ignored. If the listener was only in the assigned
- * state, it's totally rebound. This can happen if a pause() has completely
+ * state, it's totally rebound. This can happen if a suspend() has completely
  * stopped it. If the resume fails, 0 is returned and an error might be
  * displayed.
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
  */
-int resume_listener(struct listener *l)
+int resume_listener(struct listener *l, int lpx, int lli)
 {
 	struct proxy *px = l->bind_conf->frontend;
-	int was_paused = px && px->li_paused;
 	int ret = 1;
 
-	HA_RWLOCK_WRLOCK(LISTENER_LOCK, &l->lock);
+	if (!lpx && px)
+		HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+
+	if (!lli)
+		HA_RWLOCK_WRLOCK(LISTENER_LOCK, &l->lock);
 
 	/* check that another thread didn't to the job in parallel (e.g. at the
 	 * end of listen_accept() while we'd come from dequeue_all_listeners().
@@ -481,11 +544,14 @@
 	    !(proc_mask(l->rx.settings->bind_proc) & pid_bit))
 		goto end;
 
-	if (l->state == LI_READY)
+	if (!(l->flags & LI_F_FINALIZED) || l->state == LI_READY)
 		goto end;
 
-	if (l->rx.proto->resume)
+	if (l->rx.proto->resume) {
 		ret = l->rx.proto->resume(l);
+		if (!ret)
+			goto end; /* failure to resume */
+	}
 
 	if (l->maxconn && l->nbconn >= l->maxconn) {
 		l->rx.proto->disable(l);
@@ -497,17 +563,61 @@
 	listener_set_state(l, LI_READY);
 
   done:
-	if (was_paused && !px->li_paused) {
+	if (px && (l->flags & LI_F_SUSPENDED))
+		px->li_suspended--;
+	l->flags &= ~LI_F_SUSPENDED;
+
+	if (px && !px->li_suspended) {
 		ha_warning("Resumed %s %s.\n", proxy_cap_str(px->cap), px->id);
 		send_log(px, LOG_WARNING, "Resumed %s %s.\n", proxy_cap_str(px->cap), px->id);
 	}
   end:
-	HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &l->lock);
+	if (!lli)
+		HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &l->lock);
+
+	if (!lpx && px)
+		HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
+
+	return ret;
+}
+
+/* Same as resume_listener(), but will only work to resume from
+ * LI_FULL or LI_LIMITED states because we try to relax listeners that
+ * were temporarily restricted and not to resume inactive listeners that
+ * may have been paused or completely stopped in the meantime.
+ * Returns positive value for success and 0 for failure.
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
+ */
+int relax_listener(struct listener *l, int lpx, int lli)
+{
+	struct proxy *px = l->bind_conf->frontend;
+	int ret = 1;
+
+	if (!lpx && px)
+		HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+
+	if (!lli)
+		HA_RWLOCK_WRLOCK(LISTENER_LOCK, &l->lock);
+
+	if (l->state != LI_FULL && l->state != LI_LIMITED)
+		goto end; /* listener may be suspended or even stopped */
+	ret = resume_listener(l, 1, 1);
+
+ end:
+	if (!lli)
+		HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &l->lock);
+
+	if (!lpx && px)
+		HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
+
 	return ret;
 }
 
 /* Marks a ready listener as full so that the stream code tries to re-enable
- * it upon next close() using resume_listener().
+ * it upon next close() using relax_listener().
  */
 static void listener_full(struct listener *l)
 {
@@ -545,7 +655,7 @@
 		/* This cannot fail because the listeners are by definition in
 		 * the LI_LIMITED state.
 		 */
-		resume_listener(listener);
+		relax_listener(listener, 0, 0);
 	}
 }
 
@@ -558,7 +668,7 @@
 		/* This cannot fail because the listeners are by definition in
 		 * the LI_LIMITED state.
 		 */
-		resume_listener(listener);
+		relax_listener(listener, 0, 0);
 	}
 }
 
@@ -1084,7 +1194,7 @@
 	      (!tick_isset(global_listener_queue_task->expire) ||
 	       tick_is_expired(global_listener_queue_task->expire, now_ms))))) {
 		/* at least one thread has to this when quitting */
-		resume_listener(l);
+		relax_listener(l, 0, 0);
 
 		/* Dequeues all of the listeners waiting for a resource */
 		dequeue_all_listeners();
@@ -1103,7 +1213,7 @@
 	 * Let's put it to pause in this case.
 	 */
 	if (l->rx.proto && l->rx.proto->rx_listening(&l->rx) == 0) {
-		pause_listener(l);
+		suspend_listener(l, 0, 0);
 		goto end;
 	}
 
@@ -1112,7 +1222,9 @@
 	 * later than <expire> ahead. The listener turns to LI_LIMITED.
 	 */
 	limit_listener(l, &global_listener_queue);
+	HA_RWLOCK_RDLOCK(LISTENER_LOCK, &global_listener_rwlock);
 	task_schedule(global_listener_queue_task, expire);
+	HA_RWLOCK_RDUNLOCK(LISTENER_LOCK, &global_listener_rwlock);
 	goto end;
 
  limit_proxy:
@@ -1141,12 +1253,12 @@
 	_HA_ATOMIC_DEC(&l->thr_conn[tid]);
 
 	if (l->state == LI_FULL || l->state == LI_LIMITED)
-		resume_listener(l);
+		relax_listener(l, 0, 0);
 
 	/* Dequeues all of the listeners waiting for a resource */
 	dequeue_all_listeners();
 
-	if (!MT_LIST_ISEMPTY(&fe->listener_queue) &&
+	if (fe && !MT_LIST_ISEMPTY(&fe->listener_queue) &&
 	    (!fe->fe_sps_lim || freq_ctr_remain(&fe->fe_sess_per_sec, fe->fe_sps_lim, 0) > 0))
 		dequeue_proxy_listeners(fe);
 }
@@ -1162,6 +1274,7 @@
 	/* very simple initialization, users will queue the task if needed */
 	global_listener_queue_task->context = NULL; /* not even a context! */
 	global_listener_queue_task->process = manage_global_listener_queue;
+	HA_RWLOCK_INIT(&global_listener_rwlock);
 
 	return 0;
 }
@@ -1199,7 +1312,9 @@
 	dequeue_all_listeners();
 
  out:
+	HA_RWLOCK_WRLOCK(LISTENER_LOCK, &global_listener_rwlock);
 	t->expire = TICK_ETERNITY;
+	HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &global_listener_rwlock);
 	task_queue(t);
 	return t;
 }
diff --git a/src/log.c b/src/log.c
index c431937..c112a49 100644
--- a/src/log.c
+++ b/src/log.c
@@ -856,6 +856,10 @@
 			}
 
 			node = malloc(sizeof(*node));
+			if (!node) {
+				memprintf(err, "out of memory error");
+				goto error;
+			}
 			memcpy(node, logsrv, sizeof(struct logsrv));
 			node->ref = logsrv;
 			LIST_INIT(&node->list);
@@ -1381,17 +1385,21 @@
 	}
 
 	if (src && len) {
-		if (++len > size)
-			len = size;
+		/* escape_string and strlcpy2 will both try to add terminating NULL-byte
+		 * to dst, so we need to make sure that extra byte will fit into dst
+		 * before calling them
+		 */
 		if (node->options & LOG_OPT_ESC) {
 			char *ret;
 
-			ret = escape_string(dst, dst + len, '\\', rfc5424_escape_map, src);
+			ret = escape_string(dst, (dst + size - 1), '\\', rfc5424_escape_map, src, src + len);
 			if (ret == NULL || *ret != '\0')
 				return NULL;
 			len = ret - dst;
 		}
 		else {
+			if (++len > size)
+				len = size;
 			len = strlcpy2(dst, src, len);
 		}
 
@@ -1402,6 +1410,7 @@
 		if (size < 2)
 			return NULL;
 		*(dst++) = '-';
+		size -= 1;
 	}
 
 	if (node->options & LOG_OPT_QUOTE) {
@@ -1872,12 +1881,18 @@
  send:
 	if (logsrv->type == LOG_TARGET_BUFFER) {
 		struct ist msg;
+		size_t maxlen = logsrv->maxlen;
 
 		msg = ist2(message, size);
 		if (msg.len > logsrv->maxlen)
 			msg.len = logsrv->maxlen;
 
+		/* make room for the final '\n' which may be forcefully inserted
+		 * by tcp forwarder applet (sink_forward_io_handler)
+		 */
+		maxlen -= 1;
+
-		sent = sink_write(logsrv->sink, &msg, 1, level, logsrv->facility, metadata);
+		sent = sink_write(logsrv->sink, maxlen, &msg, 1, level, facility, metadata);
 	}
 	else if (logsrv->addr.ss_family == AF_CUST_EXISTING_FD) {
 		struct ist msg;
@@ -1890,7 +1905,7 @@
 	}
 	else {
 		int i = 0;
-		int totlen = logsrv->maxlen;
+		int totlen = logsrv->maxlen - 1; /* save space for the final '\n' */
 
 		for (i = 0 ; i < nbelem ; i++ ) {
 			iovec[i].iov_base = msg_header[i].ptr;
@@ -3418,6 +3433,7 @@
 		 */
 
 		p += 2;
+		*size -= 2;
 		/* timestamp is NILVALUE '-' */
 		if (*size > 2 && (p[0] == '-') && p[1] == ' ') {
 			metadata[LOG_META_TIME] = ist2(p, 1);
@@ -3761,7 +3777,7 @@
 	size_t size;
 
 	max_accept = l->maxaccept ? l->maxaccept : 1;
-	while (co_data(si_oc(si))) {
+	while (1) {
 		char c;
 
 		if (max_accept <= 0)
@@ -3811,7 +3827,7 @@
 			if (buf->area[to_skip - 1] != ' ')
 				goto parse_error;
 
-			msglen = strtol(trash.area, &p, 10);
+			msglen = strtol(buf->area, &p, 10);
 			if (!msglen || p != &buf->area[to_skip - 1])
 				goto parse_error;
 
@@ -3892,14 +3908,14 @@
  */
 int cfg_parse_log_forward(const char *file, int linenum, char **args, int kwm)
 {
-	int err_code = 0;
+	int err_code = ERR_NONE;
 	struct proxy *px;
 	char *errmsg = NULL;
 	const char *err = NULL;
 
 	if (strcmp(args[0], "log-forward") == 0) {
 		if (!*args[1]) {
-			ha_alert("parsing [%s:%d] : missing name for ip-forward section.\n", file, linenum);
+			ha_alert("parsing [%s:%d] : missing name for log-forward section.\n", file, linenum);
 			err_code |= ERR_ALERT | ERR_ABORT;
 			goto out;
 		}
@@ -3920,6 +3936,7 @@
 			ha_alert("Parsing [%s:%d]: log-forward section '%s' has the same name as another log-forward section declared at %s:%d.\n",
 				 file, linenum, args[1], px->conf.file, px->conf.line);
 			err_code |= ERR_ALERT | ERR_FATAL;
+			goto out;
 		}
 
 		px = proxy_find_by_name(args[1], 0, 0);
@@ -3928,6 +3945,7 @@
 			         file, linenum, args[1], proxy_type_str(px),
 			         px->id, px->conf.file, px->conf.line);
 			err_code |= ERR_ALERT | ERR_FATAL;
+			goto out;
 		}
 
 		px = calloc(1, sizeof *px);
@@ -3949,7 +3967,6 @@
 		px->accept = frontend_accept;
 		px->default_target = &syslog_applet.obj_type;
 		px->id = strdup(args[1]);
-
 	}
 	else if (strcmp(args[0], "maxconn") == 0) {  /* maxconn */
 		if (warnifnotcap(cfg_log_forward, PR_CAP_FE, file, linenum, args[0], " Maybe you want 'fullconn' instead ?"))
@@ -4001,9 +4018,9 @@
 			else {
 				ha_alert("parsing [%s:%d] : '%s %s' : error encountered while parsing listening address %s.\n",
 				         file, linenum, args[0], args[1], args[2]);
-				err_code |= ERR_ALERT | ERR_FATAL;
-				goto out;
 			}
+			err_code |= ERR_ALERT | ERR_FATAL;
+			goto out;
 		}
 		list_for_each_entry(l, &bind_conf->listeners, by_bind) {
 			l->maxaccept = global.tune.maxaccept ? global.tune.maxaccept : MAX_ACCEPT;
@@ -4141,7 +4158,6 @@
 		}
 		else if (res) {
 			memprintf(&errmsg, "unexpected character '%c' in 'timeout client'", *res);
-			return -1;
 		}
 
 		if (res) {
@@ -4157,6 +4173,7 @@
 		goto out;
 	}
 out:
+	ha_free(&errmsg);
 	return err_code;
 }
 
diff --git a/src/mailers.c b/src/mailers.c
index 5e680f2..1a696d5 100644
--- a/src/mailers.c
+++ b/src/mailers.c
@@ -120,6 +120,7 @@
 
 		LIST_INIT(&q->email_alerts);
 		HA_SPIN_INIT(&q->lock);
+		check->obj_type = OBJ_TYPE_CHECK;
 		check->inter = mls->timeout.mail;
 		check->rise = DEF_AGENT_RISETIME;
 		check->proxy = p;
diff --git a/src/mjson.c b/src/mjson.c
index 549b0d5..73b7a57 100644
--- a/src/mjson.c
+++ b/src/mjson.c
@@ -192,7 +192,7 @@
 }
 
 static int plen2(const char *s) {
-  int i = 0, n = 0;
+  int i = 0, __attribute__((unused)) n = 0;
   while (s[i] != '\0' && s[i] != '.' && s[i] != '[')
     n++, i += s[i] == '\\' ? 2 : 1;
   // printf("PLEN: s: [%s], [%.*s] => %d\n", s, i, s, n);
@@ -724,7 +724,7 @@
 /* NOTE: strtod() implementation by Yasuhiro Matsumoto. */
 static double mystrtod(const char *str, char **end) {
   double d = 0.0;
-  int sign = 1, n = 0;
+  int sign = 1, __attribute__((unused)) n = 0;
   const char *p = str, *a = str;
 
   /* decimal part */
diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
index d916496..5025163 100644
--- a/src/mux_fcgi.c
+++ b/src/mux_fcgi.c
@@ -1399,7 +1399,7 @@
 		 * captured
 		 */
 		params->scriptname = ist2(path.ptr + pmatch[1].rm_so, pmatch[1].rm_eo - pmatch[1].rm_so);
-		if (!(params->mask & FCGI_SP_PATH_INFO) &&  (pmatch[2].rm_so == -1 || pmatch[2].rm_eo == -1))
+		if (!(params->mask & FCGI_SP_PATH_INFO) && !(pmatch[2].rm_so == -1 || pmatch[2].rm_eo == -1))
 			params->pathinfo = ist2(path.ptr + pmatch[2].rm_so, pmatch[2].rm_eo - pmatch[2].rm_so);
 
 	  check_index:
@@ -2188,9 +2188,10 @@
 	size = htx_get_blksz(blk);
 	if (unlikely(size == count && htx_nbblks(htx) == 1 && type == HTX_BLK_DATA)) {
 		void *old_area = mbuf->area;
+		int eom = (htx->flags & HTX_FL_EOM);
 
 		 /* Last block of the message: Reserve the size for the empty stdin record */
-		if (htx->flags & HTX_FL_EOM)
+		if (eom)
 			extra_bytes = FCGI_RECORD_HEADER_SZ;
 
 		if (b_data(mbuf)) {
@@ -2226,6 +2227,8 @@
 
 		htx = (struct htx *)buf->area;
 		htx_reset(htx);
+		if (eom)
+			goto empty_stdin;
 		goto end;
 	}
 
@@ -2275,7 +2278,9 @@
 					    b_data(&outbuf) + v.len + extra_bytes <= b_room(mbuf) &&
 					    b_data(mbuf) <= MAX_DATA_REALIGN)
 						goto realign_again;
-					v.len = b_room(&outbuf) - FCGI_RECORD_HEADER_SZ - extra_bytes;
+					v.len = (FCGI_RECORD_HEADER_SZ + extra_bytes > b_room(&outbuf)
+						 ? 0
+						 : b_room(&outbuf) - FCGI_RECORD_HEADER_SZ - extra_bytes);
 				}
 				if (!v.len || !chunk_memcat(&outbuf, v.ptr, v.len)) {
 					if (outbuf.data == FCGI_RECORD_HEADER_SZ)
@@ -2306,6 +2311,7 @@
 
 	/* Send the empty stding here to finish the message */
 	if (htx_is_empty(htx) && (htx->flags & HTX_FL_EOM)) {
+	  empty_stdin:
 		TRACE_PROTO("sending FCGI STDIN record", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx);
 		if (!fcgi_strm_send_empty_stdin(fconn, fstrm)) {
 			/* bytes already reserved for this record. It should not fail */
@@ -3025,7 +3031,7 @@
 		conn = fconn->conn;
 		TRACE_POINT(FCGI_EV_FCONN_WAKE, conn);
 
-		conn_in_list = conn->flags & CO_FL_LIST_MASK;
+		conn_in_list = conn_get_idle_flag(conn);
 		if (conn_in_list)
 			conn_delete_from_tree(&conn->hash_node->node);
 
@@ -4184,14 +4190,14 @@
 		if (fstrm->cs)
 			chunk_appendf(msg, " .cs.flg=0x%08x .cs.data=%p",
 				      fstrm->cs->flags, fstrm->cs->data);
-		chunk_appendf(&trash, " .subs=%p", fstrm->subs);
+		chunk_appendf(msg, " .subs=%p", fstrm->subs);
 		if (fstrm->subs) {
-			chunk_appendf(&trash, "(ev=%d tl=%p", fstrm->subs->events, fstrm->subs->tasklet);
-			chunk_appendf(&trash, " tl.calls=%d tl.ctx=%p tl.fct=",
+			chunk_appendf(msg, "(ev=%d tl=%p", fstrm->subs->events, fstrm->subs->tasklet);
+			chunk_appendf(msg, " tl.calls=%d tl.ctx=%p tl.fct=",
 				      fstrm->subs->tasklet->calls,
 				      fstrm->subs->tasklet->context);
-			resolve_sym_name(&trash, NULL, fstrm->subs->tasklet->process);
-			chunk_appendf(&trash, ")");
+			resolve_sym_name(msg, NULL, fstrm->subs->tasklet->process);
+			chunk_appendf(msg, ")");
 		}
 	}
 	return 0;
diff --git a/src/mux_h1.c b/src/mux_h1.c
index fd568e9..1324c20 100644
--- a/src/mux_h1.c
+++ b/src/mux_h1.c
@@ -935,8 +935,10 @@
 			h1c->task = NULL;
 		}
 
-		if (h1c->wait_event.tasklet)
+		if (h1c->wait_event.tasklet) {
 			tasklet_free(h1c->wait_event.tasklet);
+			h1c->wait_event.tasklet = NULL;
+		}
 
 		h1s_destroy(h1c->h1s);
 		if (conn) {
@@ -1651,7 +1653,6 @@
 
 	b_del(&h1c->ibuf, total);
 
-	htx_to_buf(htx, buf);
 	TRACE_DEVEL("incoming data parsed", H1_EV_RX_DATA, h1c->conn, h1s, htx, (size_t[]){ret});
 
 	ret = htx->data - data;
@@ -1751,6 +1752,7 @@
 	}
 
   end:
+	htx_to_buf(htx, buf);
 	TRACE_LEAVE(H1_EV_RX_DATA, h1c->conn, h1s, htx, (size_t[]){ret});
 	return ret;
 
@@ -2133,7 +2135,7 @@
 					/* EOM flag is set or empty payload (C-L to 0) and it is the last block */
 					if (htx_is_unique_blk(chn_htx, blk) &&
 					    ((chn_htx->flags & HTX_FL_EOM) || ((h1m->flags & H1_MF_CLEN) && !h1m->curr_len))) {
-						if (h1m->flags & H1_MF_CHNK) {
+						if ((h1m->flags & H1_MF_CHNK) && !(h1s->flags & H1S_F_BODYLESS_RESP)) {
 							if (!chunk_memcat(&tmp, "\r\n0\r\n\r\n", 7))
 								goto full;
 						}
@@ -2556,9 +2558,10 @@
 		_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
 
 	h1c->errcode = 408;
+	ret = h1_send_error(h1c);
 	if (b_data(&h1c->ibuf) || !(sess->fe->options & PR_O_NULLNOLOG))
-		ret = h1_send_error(h1c);
-	sess_log(sess);
+		sess_log(sess);
+
   end:
 	return ret;
 }
@@ -2831,7 +2834,8 @@
 				h1s->flags |= H1S_F_REOS;
 				TRACE_STATE("read0 on connection", H1_EV_H1C_RECV, conn, h1s);
 			}
-			if ((h1c->flags & H1C_F_ST_ERROR) || ((conn->flags & CO_FL_ERROR) && !b_data(&h1c->ibuf)))
+			if ((h1c->flags & H1C_F_ST_ERROR) || ((conn->flags & CO_FL_ERROR) &&
+			      ((h1s->cs->flags & (CS_FL_EOI|CS_FL_EOS)) || !b_data(&h1c->ibuf))))
 				h1s->cs->flags |= CS_FL_ERROR;
 			TRACE_POINT(H1_EV_STRM_WAKE, h1c->conn, h1s);
 			h1_alert(h1s);
@@ -2910,7 +2914,7 @@
 		/* Remove the connection from the list, to be sure nobody attempts
 		 * to use it while we handle the I/O events
 		 */
-		conn_in_list = conn->flags & CO_FL_LIST_MASK;
+		conn_in_list = conn_get_idle_flag(conn);
 		if (conn_in_list)
 			conn_delete_from_tree(&conn->hash_node->node);
 
@@ -3069,6 +3073,9 @@
 	struct conn_stream *cs = NULL;
 	struct h1s *h1s;
 
+	/* this connection is no more idle (if it was at all) */
+	h1c->flags &= ~H1C_F_ST_SILENT_SHUT;
+
 	TRACE_ENTER(H1_EV_STRM_NEW, conn);
 	if (h1c->flags & H1C_F_ST_ERROR) {
 		TRACE_ERROR("h1c on error", H1_EV_STRM_NEW|H1_EV_STRM_END|H1_EV_STRM_ERR, conn);
@@ -3153,6 +3160,11 @@
 	h1s_destroy(h1s);
 
 	if ((h1c->flags & (H1C_F_IS_BACK|H1C_F_ST_IDLE)) == (H1C_F_IS_BACK|H1C_F_ST_IDLE)) {
+		/* this connection may be killed at any moment, we want it to
+		 * die "cleanly" (i.e. only an RST).
+		 */
+		h1c->flags |= H1C_F_ST_SILENT_SHUT;
+
 		/* If there are any excess server data in the input buffer,
 		 * release it and close the connection ASAP (some data may
 		 * remain in the output buffer). This happens if a server sends
@@ -3330,6 +3342,10 @@
 	TRACE_ENTER(H1_EV_H1C_END, conn);
 	conn_xprt_shutw(conn);
 	conn_sock_shutw(conn, (h1c && !(h1c->flags & H1C_F_ST_SILENT_SHUT)));
+
+	if (h1c->wait_event.tasklet && !h1c->wait_event.events)
+		tasklet_wakeup(h1c->wait_event.tasklet);
+
 	TRACE_LEAVE(H1_EV_H1C_END, conn);
 }
 
@@ -3492,18 +3508,24 @@
 		else
 			TRACE_DEVEL("h1c obuf not allocated", H1_EV_STRM_SEND|H1_EV_H1S_BLK, h1c->conn, h1s);
 
+		if (!ret)
+			break;
+
 		if ((count - ret) > 0)
 			h1c->flags |= H1C_F_CO_MSG_MORE;
 
-		if (!ret)
-			break;
 		total += ret;
 		count -= ret;
+
 		if ((h1c->wait_event.events & SUB_RETRY_SEND) || !h1_send(h1c))
 			break;
+
+		if ((h1c->conn->flags & (CO_FL_ERROR|CO_FL_SOCK_WR_SH)))
+			break;
 	}
 
-	if (h1c->flags & H1C_F_ST_ERROR) {
+	if ((h1c->flags & H1C_F_ST_ERROR) || ((h1c->conn->flags & CO_FL_ERROR) &&
+	      ((cs->flags & (CS_FL_EOI|CS_FL_EOS)) || !b_data(&h1c->ibuf)))) {
 		cs->flags |= CS_FL_ERROR;
 		TRACE_ERROR("reporting error to the app-layer stream", H1_EV_STRM_SEND|H1_EV_H1S_ERR|H1_EV_STRM_ERR, h1c->conn, h1s);
 	}
@@ -3675,16 +3697,16 @@
 			chunk_appendf(msg, " .cs.flg=0x%08x .cs.data=%p",
 				      h1s->cs->flags, h1s->cs->data);
 
-		chunk_appendf(&trash, " .subs=%p", h1s->subs);
+		chunk_appendf(msg, " .subs=%p", h1s->subs);
 		if (h1s->subs) {
-			chunk_appendf(&trash, "(ev=%d tl=%p", h1s->subs->events, h1s->subs->tasklet);
-			chunk_appendf(&trash, " tl.calls=%d tl.ctx=%p tl.fct=",
+			chunk_appendf(msg, "(ev=%d tl=%p", h1s->subs->events, h1s->subs->tasklet);
+			chunk_appendf(msg, " tl.calls=%d tl.ctx=%p tl.fct=",
 				      h1s->subs->tasklet->calls,
 				      h1s->subs->tasklet->context);
 			if (h1s->subs->tasklet->calls >= 1000000)
 				ret = 1;
-			resolve_sym_name(&trash, NULL, h1s->subs->tasklet->process);
-			chunk_appendf(&trash, ")");
+			resolve_sym_name(msg, NULL, h1s->subs->tasklet->process);
+			chunk_appendf(msg, ")");
 		}
 	}
 	return ret;
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 5916ee8..61fd1a4 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -61,6 +61,7 @@
                                             // (SHORT_READ is also excluded)
 
 #define H2_CF_DEM_SHORT_READ    0x00000200  // demux blocked on incomplete frame
+#define H2_CF_DEM_IN_PROGRESS   0x00000400  // demux in progress (dsi,dfl,dft are valid)
 
 /* other flags */
 #define H2_CF_GOAWAY_SENT       0x00001000  // a GOAWAY frame was successfully sent
@@ -104,7 +105,7 @@
 	uint32_t streams_limit; /* maximum number of concurrent streams the peer supports */
 	int32_t max_id; /* highest ID known on this connection, <0 before preface */
 	uint32_t rcvd_c; /* newly received data to ACK for the connection */
-	uint32_t rcvd_s; /* newly received data to ACK for the current stream (dsi) */
+	uint32_t rcvd_s; /* newly received data to ACK for the current stream (dsi) or zero */
 
 	/* states for the demux direction */
 	struct hpack_dht *ddht; /* demux dynamic header table */
@@ -633,7 +634,7 @@
 		if (h2c->errcode)
 			chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2c->errcode), h2c->errcode);
 
-		if (h2c->dsi >= 0 &&
+		if (h2c->flags & H2_CF_DEM_IN_PROGRESS && // frame processing has started, type and length are valid
 		    (mask & (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) == (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) {
 			chunk_appendf(&trace_buf, " dft=%s/%02x dfl=%d", h2_ft_str(h2c->dft), h2c->dff, h2c->dfl);
 		}
@@ -986,6 +987,7 @@
 
 	h2c->proxy = prx;
 	h2c->task = NULL;
+	h2c->wait_event.tasklet = NULL;
 	h2c->idle_start = now_ms;
 	if (tick_isset(h2c->timeout)) {
 		t = task_new(tid_bit);
@@ -2756,8 +2758,11 @@
 		if (h2s->st != H2_SS_CLOSED) {
 			error = h2c_decode_headers(h2c, &h2s->rxbuf, &h2s->flags, &body_len, NULL);
 			/* unrecoverable error ? */
-			if (h2c->st0 >= H2_CS_ERROR)
+			if (h2c->st0 >= H2_CS_ERROR) {
+				TRACE_USER("Unrecoverable error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
+				sess_log(h2c->conn->owner);
 				goto out;
+			}
 
 			if (error == 0) {
 				/* Demux not blocked because of the stream, it is an incomplete frame */
@@ -2770,7 +2775,9 @@
 				/* Failed to decode this frame (e.g. too large request)
 				 * but the HPACK decompressor is still synchronized.
 				 */
+				sess_log(h2c->conn->owner);
 				h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
+				TRACE_USER("Stream error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
 				h2c->st0 = H2_CS_FRAME_E;
 				goto out;
 			}
@@ -2780,6 +2787,7 @@
 		 * the data and send another RST.
 		 */
 		error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
+		sess_log(h2c->conn->owner);
 		h2s = (struct h2s*)h2_error_stream;
 		goto send_rst;
 	}
@@ -2797,8 +2805,11 @@
 	error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
 
 	/* unrecoverable error ? */
-	if (h2c->st0 >= H2_CS_ERROR)
+	if (h2c->st0 >= H2_CS_ERROR) {
+		TRACE_USER("Unrecoverable error decoding H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
+		sess_log(h2c->conn->owner);
 		goto out;
+	}
 
 	if (error <= 0) {
 		if (error == 0) {
@@ -2811,6 +2822,7 @@
 		/* Failed to decode this stream (e.g. too large request)
 		 * but the HPACK decompressor is still synchronized.
 		 */
+		sess_log(h2c->conn->owner);
 		h2s = (struct h2s*)h2_error_stream;
 		goto send_rst;
 	}
@@ -2912,8 +2924,10 @@
 	}
 
 	/* unrecoverable error ? */
-	if (h2c->st0 >= H2_CS_ERROR)
+	if (h2c->st0 >= H2_CS_ERROR) {
+		TRACE_USER("Unrecoverable error decoding H2 HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
 		goto fail;
+	}
 
 	if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
 		/* RFC7540#5.1 */
@@ -3338,8 +3352,6 @@
 		}
 
 		if (h2c->st0 == H2_CS_FRAME_H) {
-			h2c->rcvd_s = 0;
-
 			TRACE_STATE("expecting H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
 			if (!h2_peek_frame_hdr(&h2c->dbuf, 0, &hdr)) {
 				h2c->flags |= H2_CF_DEM_SHORT_READ;
@@ -3357,6 +3369,16 @@
 				break;
 			}
 
+			if (h2c->rcvd_s && h2c->dsi != hdr.sid) {
+				/* changed stream with a pending WU, need to
+				 * send it now.
+				 */
+				TRACE_PROTO("sending stream WINDOW_UPDATE frame on stream switch", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
+				ret = h2c_send_strm_wu(h2c);
+				if (ret <= 0)
+					break;
+			}
+
 			padlen = 0;
 			if (h2_ft_bit(hdr.ft) & H2_FT_PADDED_MASK && hdr.ff & H2_F_PADDED) {
 				/* If the frame is padded (HEADERS, PUSH_PROMISE or DATA),
@@ -3409,6 +3431,7 @@
 			h2c->dft = hdr.ft;
 			h2c->dff = hdr.ff;
 			h2c->dpl = padlen;
+			h2c->flags |= H2_CF_DEM_IN_PROGRESS;
 			TRACE_STATE("rcvd H2 frame header, switching to FRAME_P state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
 			h2c->st0 = H2_CS_FRAME_P;
 
@@ -3527,8 +3550,8 @@
 			HA_ATOMIC_INC(&h2c->px_counters->data_rcvd);
 
 			if (h2c->st0 == H2_CS_FRAME_A) {
-				TRACE_PROTO("sending stream WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn, h2s);
-				ret = h2c_send_strm_wu(h2c);
+				/* rcvd_s will suffice to trigger the sending of a WU */
+				h2c->st0 = H2_CS_FRAME_H;
 			}
 			break;
 
@@ -3591,13 +3614,19 @@
 			b_del(&h2c->dbuf, ret);
 			h2c->dfl -= ret;
 			if (!h2c->dfl) {
+				h2c->flags &= ~H2_CF_DEM_IN_PROGRESS;
 				TRACE_STATE("switching to FRAME_H", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
 				h2c->st0 = H2_CS_FRAME_H;
-				h2c->dsi = -1;
 			}
 		}
 	}
 
+	if (h2c->rcvd_s > 0 &&
+	    !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MBUSY | H2_CF_DEM_MROOM))) {
+		TRACE_PROTO("sending stream WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn, h2s);
+		h2c_send_strm_wu(h2c);
+	}
+
 	if (h2c->rcvd_c > 0 &&
 	    !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MBUSY | H2_CF_DEM_MROOM))) {
 		TRACE_PROTO("sending H2 WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
@@ -3954,11 +3983,10 @@
 		conn = h2c->conn;
 		TRACE_ENTER(H2_EV_H2C_WAKE, conn);
 
-		conn_in_list = conn->flags & CO_FL_LIST_MASK;
-
 		/* Remove the connection from the list, to be sure nobody attempts
 		 * to use it while we handle the I/O events
 		 */
+		conn_in_list = conn_get_idle_flag(conn);
 		if (conn_in_list)
 			conn_delete_from_tree(&conn->hash_node->node);
 
@@ -4363,7 +4391,7 @@
 		/* refresh the timeout if none was active, so that the last
 		 * leaving stream may arm it.
 		 */
-		if (!tick_isset(h2c->task->expire))
+		if (h2c->task && !tick_isset(h2c->task->expire))
 			h2c_update_timeout(h2c);
 		return;
 	}
@@ -4889,7 +4917,8 @@
 	if (h2c->flags & H2_CF_IS_BACK)
 		outlen = h2_make_htx_response(list, htx, &msgf, body_len, upgrade_protocol);
 	else
-		outlen = h2_make_htx_request(list, htx, &msgf, body_len);
+		outlen = h2_make_htx_request(list, htx, &msgf, body_len,
+					     !!(((const struct session *)h2c->conn->owner)->fe->options2 & PR_O2_REQBUG_OK));
 
 	if (outlen < 0 || htx_free_space(htx) < global.tune.maxrewrite) {
 		/* too large headers? this is a stream error only */
@@ -4926,6 +4955,11 @@
 		*flags |= H2_SF_HEADERS_RCVD;
 
 	if (h2c->dff & H2_F_HEADERS_END_STREAM) {
+		if (msgf & H2_MSGF_RSP_1XX) {
+			/* RFC9113#8.1 : HEADERS frame with the ES flag set that carries an informational status code is malformed */
+			TRACE_STATE("invalid interim response with ES flag!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
+			goto fail;
+		}
 		/* no more data are expected for this message */
 		htx->flags |= HTX_FL_EOM;
 	}
@@ -6722,16 +6756,16 @@
 			chunk_appendf(msg, "(.flg=0x%08x .data=%p)",
 				      h2s->cs->flags, h2s->cs->data);
 
-		chunk_appendf(&trash, " .subs=%p", h2s->subs);
+		chunk_appendf(msg, " .subs=%p", h2s->subs);
 		if (h2s->subs) {
-			chunk_appendf(&trash, "(ev=%d tl=%p", h2s->subs->events, h2s->subs->tasklet);
-			chunk_appendf(&trash, " tl.calls=%d tl.ctx=%p tl.fct=",
+			chunk_appendf(msg, "(ev=%d tl=%p", h2s->subs->events, h2s->subs->tasklet);
+			chunk_appendf(msg, " tl.calls=%d tl.ctx=%p tl.fct=",
 				      h2s->subs->tasklet->calls,
 				      h2s->subs->tasklet->context);
 			if (h2s->subs->tasklet->calls >= 1000000)
 				ret = 1;
-			resolve_sym_name(&trash, NULL, h2s->subs->tasklet->process);
-			chunk_appendf(&trash, ")");
+			resolve_sym_name(msg, NULL, h2s->subs->tasklet->process);
+			chunk_appendf(msg, ")");
 		}
 	}
 	return ret;
diff --git a/src/mworker.c b/src/mworker.c
index 7a8feda..6415fd4 100644
--- a/src/mworker.c
+++ b/src/mworker.c
@@ -130,15 +130,24 @@
 
 /*
  * unserialize the proc list from the environment
+ * Return < 0 upon error.
  */
 int mworker_env_to_proc_list()
 {
-	char *msg, *token = NULL, *s1;
+	char *env, *msg, *omsg = NULL, *token = NULL, *s1;
+	int err = 0;
 
-	msg = getenv("HAPROXY_PROCESSES");
-	if (!msg)
+	env = getenv("HAPROXY_PROCESSES");
+	if (!env)
 		return 0;
 
+	omsg = msg = strdup(env);
+	if (!msg) {
+		ha_alert("Out of memory while trying to allocate a worker process structure.");
+		err = -1;
+		goto out;
+	}
+
 	while ((token = strtok_r(msg, "|", &s1))) {
 		struct mworker_proc *child;
 		char *subtoken = NULL;
@@ -148,8 +157,9 @@
 
 		child = calloc(1, sizeof(*child));
 		if (!child) {
-			ha_alert("Out of memory while trying to allocate a worker process structure.");
-			return -1;
+			ha_alert("out of memory while trying to allocate a worker process structure.");
+			err = -1;
+			goto out;
 		}
 
 		while ((subtoken = strtok_r(token, ";", &s2))) {
@@ -171,6 +181,8 @@
 
 			} else if (strncmp(subtoken, "fd=", 3) == 0) {
 				child->ipc_fd[0] = atoi(subtoken+3);
+				if (child->ipc_fd[0] > -1)
+					global.maxsock++;
 			} else if (strncmp(subtoken, "pid=", 4) == 0) {
 				child->pid = atoi(subtoken+4);
 			} else if (strncmp(subtoken, "rpid=", 5) == 0) {
@@ -198,7 +210,9 @@
 
 	unsetenv("HAPROXY_PROCESSES");
 
-	return 0;
+out:
+	free(omsg);
+	return err;
 }
 
 /* Signal blocking and unblocking */
@@ -394,11 +408,14 @@
 	if (tid != 0)
 		return 1;
 
+	if (proc_self->ipc_fd[1] < 0) /* proc_self was incomplete and we can't find the socketpair */
+		return 1;
+
 	fcntl(proc_self->ipc_fd[1], F_SETFL, O_NONBLOCK);
 	/* In multi-tread, we need only one thread to process
 	 * events on the pipe with master
 	 */
-	fd_insert(proc_self->ipc_fd[1], fdtab[proc_self->ipc_fd[1]].owner, mworker_accept_wrapper, tid_bit);
+	fdtab[proc_self->ipc_fd[1]].iocb = mworker_accept_wrapper;
 	fd_want_recv(proc_self->ipc_fd[1]);
 	return 1;
 }
@@ -423,8 +440,10 @@
 
 		stop_proxy(curpeers->peers_fe);
 		/* disable this peer section so that it kills itself */
-		signal_unregister_handler(curpeers->sighandler);
-		task_destroy(curpeers->sync_task);
+		if (curpeers->sighandler)
+			signal_unregister_handler(curpeers->sighandler);
+		if (curpeers->sync_task)
+			task_destroy(curpeers->sync_task);
 		curpeers->sync_task = NULL;
 		task_destroy(curpeers->peers_fe->task);
 		curpeers->peers_fe->task = NULL;
@@ -485,12 +504,15 @@
 	struct stream_interface *si = appctx->owner;
 	struct mworker_proc *child;
 	int old = 0;
-	int up = now.tv_sec - proc_self->timestamp;
+	int up = date.tv_sec - proc_self->timestamp;
 	char *uptime = NULL;
 
 	if (unlikely(si_ic(si)->flags & (CF_WRITE_ERROR|CF_SHUTW)))
 		return 1;
 
+	if (up < 0) /* must never be negative because of clock drift */
+		up = 0;
+
 	chunk_reset(&trash);
 
 	chunk_printf(&trash, "#%-14s %-15s %-15s %-15s %-15s %-15s\n", "<PID>", "<type>", "<relative PID>", "<reloads>", "<uptime>", "<version>");
@@ -502,7 +524,9 @@
 
 	chunk_appendf(&trash, "# workers\n");
 	list_for_each_entry(child, &proc_list, list) {
-		up = now.tv_sec - child->timestamp;
+		up = date.tv_sec - child->timestamp;
+		if (up < 0) /* must never be negative because of clock drift */
+			up = 0;
 
 		if (!(child->options & PROC_O_TYPE_WORKER))
 			continue;
@@ -523,7 +547,9 @@
 
 		chunk_appendf(&trash, "# old workers\n");
 		list_for_each_entry(child, &proc_list, list) {
-			up = now.tv_sec - child->timestamp;
+			up = date.tv_sec - child->timestamp;
+			if (up <= 0) /* must never be negative because of clock drift */
+				up = 0;
 
 			if (!(child->options & PROC_O_TYPE_WORKER))
 				continue;
@@ -542,7 +568,9 @@
 	chunk_appendf(&trash, "# programs\n");
 	old = 0;
 	list_for_each_entry(child, &proc_list, list) {
-		up = now.tv_sec - child->timestamp;
+		up = date.tv_sec - child->timestamp;
+		if (up < 0) /* must never be negative because of clock drift */
+			up = 0;
 
 		if (!(child->options & PROC_O_TYPE_PROG))
 			continue;
@@ -559,7 +587,9 @@
 	if (old) {
 		chunk_appendf(&trash, "# old programs\n");
 		list_for_each_entry(child, &proc_list, list) {
-			up = now.tv_sec - child->timestamp;
+			up = date.tv_sec - child->timestamp;
+			if (up < 0) /* must never be negative because of clock drift */
+				up = 0;
 
 			if (!(child->options & PROC_O_TYPE_PROG))
 				continue;
diff --git a/src/namespace.c b/src/namespace.c
index 1fc8439..9cc85a3 100644
--- a/src/namespace.c
+++ b/src/namespace.c
@@ -54,6 +54,7 @@
 		entry = container_of(node, struct netns_entry, node);
 		free(entry->node.key);
 		close(entry->fd);
+		free(entry);
 		node = next;
 	}
 }
diff --git a/src/pattern.c b/src/pattern.c
index 265b05f..417841b 100644
--- a/src/pattern.c
+++ b/src/pattern.c
@@ -476,7 +476,7 @@
 		while (node) {
 			elt = ebmb_entry(node, struct pattern_tree, node);
 			if (elt->ref->gen_id != expr->ref->curr_gen) {
-				node = ebmb_next(node);
+				node = ebmb_next_dup(node);
 				continue;
 			}
 			if (fill) {
@@ -671,7 +671,7 @@
 		while (node) {
 			elt = ebmb_entry(node, struct pattern_tree, node);
 			if (elt->ref->gen_id != expr->ref->curr_gen) {
-				node = ebmb_next(node);
+				node = ebmb_lookup_shorter(node);
 				continue;
 			}
 			if (fill) {
@@ -982,7 +982,7 @@
 		while (node) {
 			elt = ebmb_entry(node, struct pattern_tree, node);
 			if (elt->ref->gen_id != expr->ref->curr_gen) {
-				node = ebmb_next(node);
+				node = ebmb_lookup_shorter(node);
 				continue;
 			}
 			if (fill) {
@@ -1008,7 +1008,7 @@
 		while (node) {
 			elt = ebmb_entry(node, struct pattern_tree, node);
 			if (elt->ref->gen_id != expr->ref->curr_gen) {
-				node = ebmb_next(node);
+				node = ebmb_lookup_shorter(node);
 				continue;
 			}
 			if (fill) {
@@ -1032,7 +1032,7 @@
 		while (node) {
 			elt = ebmb_entry(node, struct pattern_tree, node);
 			if (elt->ref->gen_id != expr->ref->curr_gen) {
-				node = ebmb_next(node);
+				node = ebmb_lookup_shorter(node);
 				continue;
 			}
 			if (fill) {
@@ -1069,7 +1069,7 @@
 			while (node) {
 				elt = ebmb_entry(node, struct pattern_tree, node);
 				if (elt->ref->gen_id != expr->ref->curr_gen) {
-					node = ebmb_next(node);
+					node = ebmb_lookup_shorter(node);
 					continue;
 				}
 				if (fill) {
diff --git a/src/peers.c b/src/peers.c
index 5c26751..ece789d 100644
--- a/src/peers.c
+++ b/src/peers.c
@@ -98,6 +98,7 @@
 
 #define PEER_RESYNC_TIMEOUT         5000 /* 5 seconds */
 #define PEER_RECONNECT_TIMEOUT      5000 /* 5 seconds */
+#define PEER_LOCAL_RECONNECT_TIMEOUT 500 /* 500ms */
 #define PEER_HEARTBEAT_TIMEOUT      3000 /* 3 seconds */
 
 /* flags for "show peers" */
@@ -1545,7 +1546,6 @@
 static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, int exp,
                                 char **msg_cur, char *msg_end, int msg_len, int totl)
 {
-	struct stream_interface *si = appctx->owner;
 	struct shared_table *st = p->remote_table;
 	struct stksess *ts, *newts;
 	uint32_t update;
@@ -1790,14 +1790,10 @@
 
 	HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 	stktable_touch_remote(st->table, ts, 1);
-	TRACE_LEAVE(PEERS_EV_UPDTMSG, NULL, p);
-	return 1;
 
  ignore_msg:
-	/* skip consumed message */
-	co_skip(si_oc(si), totl);
-	TRACE_DEVEL("leaving in error", PEERS_EV_UPDTMSG);
-	return 0;
+	TRACE_LEAVE(PEERS_EV_UPDTMSG, NULL, p);
+	return 1;
 
  malformed_unlock:
 	/* malformed message */
@@ -1904,7 +1900,6 @@
 static inline int peer_treat_definemsg(struct appctx *appctx, struct peer *p,
                                       char **msg_cur, char *msg_end, int totl)
 {
-	struct stream_interface *si = appctx->owner;
 	int table_id_len;
 	struct shared_table *st;
 	int table_type;
@@ -1978,11 +1973,9 @@
 
 	p->remote_table->remote_data = table_data;
 	p->remote_table->remote_id = table_id;
-	return 1;
 
  ignore_msg:
-	co_skip(si_oc(si), totl);
-	return 0;
+	return 1;
 
  malformed_exit:
 	/* malformed message */
@@ -2674,7 +2667,7 @@
 					}
 				}
 
-				if (si_ic(si)->flags & CF_WRITE_PARTIAL)
+				if (si_ic(si)->flags & CF_WROTE_DATA)
 					curpeer->statuscode = PEER_SESS_SC_CONNECTEDCODE;
 
 				reql = peer_getline(appctx);
@@ -2860,7 +2853,9 @@
 	fe->mode = PR_MODE_PEERS;
 	fe->maxconn = 0;
 	fe->conn_retries = CONN_RETRIES;
+	fe->timeout.connect = MS_TO_TICKS(1000);
 	fe->timeout.client = MS_TO_TICKS(5000);
+	fe->timeout.server = MS_TO_TICKS(5000);
 	fe->accept = frontend_accept;
 	fe->default_target = &peer_applet.obj_type;
 	fe->options2 |= PR_O2_INDEPSTR | PR_O2_SMARTCON | PR_O2_SMARTACC;
@@ -2878,7 +2873,7 @@
 	struct stream *s;
 
 	peer->new_conn++;
-	peer->reconnect = tick_add(now_ms, MS_TO_TICKS(PEER_RECONNECT_TIMEOUT));
+	peer->reconnect = tick_add(now_ms, (stopping ? MS_TO_TICKS(PEER_LOCAL_RECONNECT_TIMEOUT) : MS_TO_TICKS(PEER_RECONNECT_TIMEOUT)));
 	peer->heartbeat = TICK_ETERNITY;
 	peer->statuscode = PEER_SESS_SC_CONNECTCODE;
 	peer->last_hdshk = now_ms;
@@ -3132,6 +3127,10 @@
 						peer_session_forceshutdown(ps);
 					}
 				}
+
+				/* Set resync timeout for the local peer and request a immediate reconnect */
+				peers->resync_timeout = tick_add(now_ms, MS_TO_TICKS(PEER_RESYNC_TIMEOUT));
+				peers->local->reconnect = now_ms;
 			}
 		}
 
@@ -3146,19 +3145,33 @@
 			}
 		}
 		else if (!ps->appctx) {
+			/* Re-arm resync timeout if necessary */
+			if (!tick_isset(peers->resync_timeout))
+				peers->resync_timeout = tick_add(now_ms, MS_TO_TICKS(PEER_RESYNC_TIMEOUT));
+
 			/* If there's no active peer connection */
-			if (ps->statuscode == 0 ||
-			    ps->statuscode == PEER_SESS_SC_SUCCESSCODE ||
-			    ps->statuscode == PEER_SESS_SC_CONNECTEDCODE ||
-			    ps->statuscode == PEER_SESS_SC_TRYAGAIN) {
-				/* connection never tried
-				 * or previous peer connection was successfully established
-				 * or previous tcp connect succeeded but init state incomplete
-				 * or during previous connect, peer replies a try again statuscode */
+			if ((peers->flags & PEERS_RESYNC_STATEMASK) == PEERS_RESYNC_FINISHED &&
+			    !tick_is_expired(peers->resync_timeout, now_ms) &&
+			    (ps->statuscode == 0 ||
+			     ps->statuscode == PEER_SESS_SC_SUCCESSCODE ||
+			     ps->statuscode == PEER_SESS_SC_CONNECTEDCODE ||
+			     ps->statuscode == PEER_SESS_SC_TRYAGAIN)) {
+				/* The resync is finished for the local peer and
+				 *   the resync timeout is not expired and
+				 *   connection never tried
+				 *   or previous peer connection was successfully established
+				 *   or previous tcp connect succeeded but init state incomplete
+				 *   or during previous connect, peer replies a try again statuscode */
 
-				/* connect to the local peer if we must push a local sync */
-				if (peers->flags & PEERS_F_DONOTSTOP) {
-					peer_session_create(peers, ps);
+				if (!tick_is_expired(ps->reconnect, now_ms)) {
+					/* reconnection timer is not expired. reschedule task for reconnect */
+					task->expire = tick_first(task->expire, ps->reconnect);
+				}
+				else  {
+					/* connect to the local peer if we must push a local sync */
+					if (peers->flags & PEERS_F_DONOTSTOP) {
+						peer_session_create(peers, ps);
+					}
 				}
 			}
 			else {
@@ -3173,6 +3186,9 @@
 			}
 		}
 		else if (ps->statuscode == PEER_SESS_SC_SUCCESSCODE ) {
+			/* Reset resync timeout during a resync */
+			peers->resync_timeout = TICK_ETERNITY;
+
 			/* current peer connection is active and established
 			 * wake up all peer handlers to push remaining local updates */
 			for (st = ps->tables; st ; st = st->next) {
diff --git a/src/pool.c b/src/pool.c
index b74ffd2..ce709f9 100644
--- a/src/pool.c
+++ b/src/pool.c
@@ -525,7 +525,7 @@
 void dump_pools_to_trash()
 {
 	struct pool_head *entry;
-	unsigned long allocated, used;
+	unsigned long long allocated, used;
 	int nbpools;
 
 	allocated = used = nbpools = 0;
@@ -534,21 +534,21 @@
 #ifndef CONFIG_HAP_LOCKLESS_POOLS
 		HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
 #endif
-		chunk_appendf(&trash, "  - Pool %s (%u bytes) : %u allocated (%u bytes), %u used, needed_avg %u, %u failures, %u users, @%p%s\n",
+		chunk_appendf(&trash, "  - Pool %s (%u bytes) : %u allocated (%llu bytes), %u used, needed_avg %u, %u failures, %u users, @%p%s\n",
 			 entry->name, entry->size, entry->allocated,
-		         entry->size * entry->allocated, entry->used,
+			 (ullong)entry->size * entry->allocated, entry->used,
 		         swrate_avg(entry->needed_avg, POOL_AVG_SAMPLES), entry->failed,
 			 entry->users, entry,
 			 (entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
 
-		allocated += entry->allocated * entry->size;
-		used += entry->used * entry->size;
+		allocated += entry->allocated * (ullong)entry->size;
+		used += entry->used * (ullong)entry->size;
 		nbpools++;
 #ifndef CONFIG_HAP_LOCKLESS_POOLS
 		HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
 #endif
 	}
-	chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
+	chunk_appendf(&trash, "Total: %d pools, %llu bytes allocated, %llu used.\n",
 		 nbpools, allocated, used);
 }
 
@@ -571,24 +571,24 @@
 }
 
 /* This function returns the total amount of memory allocated in pools (in bytes) */
-unsigned long pool_total_allocated()
+unsigned long long pool_total_allocated()
 {
 	struct pool_head *entry;
-	unsigned long allocated = 0;
+	unsigned long long allocated = 0;
 
 	list_for_each_entry(entry, &pools, list)
-		allocated += entry->allocated * entry->size;
+		allocated += entry->allocated * (ullong)entry->size;
 	return allocated;
 }
 
 /* This function returns the total amount of memory used in pools (in bytes) */
-unsigned long pool_total_used()
+unsigned long long pool_total_used()
 {
 	struct pool_head *entry;
-	unsigned long used = 0;
+	unsigned long long used = 0;
 
 	list_for_each_entry(entry, &pools, list)
-		used += entry->used * entry->size;
+		used += entry->used * (ullong)entry->size;
 	return used;
 }
 
diff --git a/src/proto_sockpair.c b/src/proto_sockpair.c
index 0357552..21728eb 100644
--- a/src/proto_sockpair.c
+++ b/src/proto_sockpair.c
@@ -247,7 +247,7 @@
 
 	if (sendmsg(fd, &msghdr, 0) != sizeof(iobuf)) {
 		ha_warning("Failed to transfer socket\n");
-		return 1;
+		return -1;
 	}
 
 	return 0;
diff --git a/src/proto_tcp.c b/src/proto_tcp.c
index 93efcdc..c423b88 100644
--- a/src/proto_tcp.c
+++ b/src/proto_tcp.c
@@ -754,22 +754,24 @@
 }
 
 /* Suspend a receiver. Returns < 0 in case of failure, 0 if the receiver
- * was totally stopped, or > 0 if correctly suspended.
+ * was totally stopped, or > 0 if correctly suspended. Note that inherited FDs
+ * are neither suspended nor resumed, we only enable/disable polling on them.
  */
 static int tcp_suspend_receiver(struct receiver *rx)
 {
 	const struct sockaddr sa = { .sa_family = AF_UNSPEC };
 	int ret;
 
-	/* we never do that with a shared FD otherwise we'd break it in the
+	/* We never disconnect a shared FD otherwise we'd break it in the
 	 * parent process and any possible subsequent worker inheriting it.
+	 * Thus we just stop receiving from it.
 	 */
 	if (rx->flags & RX_F_INHERITED)
-		return -1;
+		goto done;
 
 	if (connect(rx->fd, &sa, sizeof(sa)) < 0)
 		goto check_already_done;
-
+ done:
 	fd_stop_recv(rx->fd);
 	return 1;
 
@@ -790,7 +792,8 @@
 }
 
 /* Resume a receiver. Returns < 0 in case of failure, 0 if the receiver
- * was totally stopped, or > 0 if correctly suspended.
+ * was totally stopped, or > 0 if correctly resumed. Note that inherited FDs
+ * are neither suspended nor resumed, we only enable/disable polling on them.
  */
 static int tcp_resume_receiver(struct receiver *rx)
 {
@@ -799,7 +802,7 @@
 	if (rx->fd < 0)
 		return 0;
 
-	if (listen(rx->fd, listener_backlog(l)) == 0) {
+	if ((rx->flags & RX_F_INHERITED) || listen(rx->fd, listener_backlog(l)) == 0) {
 		fd_want_recv(l->rx.fd);
 		return 1;
 	}
diff --git a/src/proto_udp.c b/src/proto_udp.c
index f07170f..55ef649 100644
--- a/src/proto_udp.c
+++ b/src/proto_udp.c
@@ -175,7 +175,9 @@
  * suspend the receiver, we want it to stop receiving traffic, which means that
  * the socket must be unhashed from the kernel's socket table. The simple way
  * to do this is to connect to any address that is reachable and will not be
- * used by regular traffic, and a great one is reconnecting to self.
+ * used by regular traffic, and a great one is reconnecting to self. Note that
+ * inherited FDs are neither suspended nor resumed, we only enable/disable
+ * polling on them.
  */
 int udp_suspend_receiver(struct receiver *rx)
 {
@@ -189,14 +191,14 @@
 	 * parent process and any possible subsequent worker inheriting it.
 	 */
 	if (rx->flags & RX_F_INHERITED)
-		return -1;
+		goto done;
 
 	if (getsockname(rx->fd, (struct sockaddr *)&ss, &len) < 0)
 		return -1;
 
 	if (connect(rx->fd, (struct sockaddr *)&ss, len) < 0)
 		return -1;
-
+ done:
 	/* not necessary but may make debugging clearer */
 	fd_stop_recv(rx->fd);
 	return 1;
@@ -206,7 +208,8 @@
  * was totally stopped, or > 0 if correctly suspended.
  * The principle is to reverse the change above, we'll break the connection by
  * connecting to AF_UNSPEC. The association breaks and the socket starts to
- * receive from everywhere again.
+ * receive from everywhere again. Note that inherited FDs are neither suspended
+ * nor resumed, we only enable/disable polling on them.
  */
 int udp_resume_receiver(struct receiver *rx)
 {
@@ -215,7 +218,7 @@
 	if (rx->fd < 0)
 		return 0;
 
-	if (connect(rx->fd, &sa, sizeof(sa)) < 0)
+	if (!(rx->flags & RX_F_INHERITED) && connect(rx->fd, &sa, sizeof(sa)) < 0)
 		return -1;
 
 	fd_want_recv(rx->fd);
diff --git a/src/proto_uxdg.c b/src/proto_uxdg.c
index 7eb5a62..b4583c5 100644
--- a/src/proto_uxdg.c
+++ b/src/proto_uxdg.c
@@ -30,6 +30,7 @@
 #include <haproxy/protocol.h>
 #include <haproxy/sock.h>
 #include <haproxy/sock_unix.h>
+#include <haproxy/tools.h>
 
 static int uxdg_bind_listener(struct listener *listener, char *errmsg, int errlen);
 static void uxdg_enable_listener(struct listener *listener);
@@ -95,6 +96,7 @@
 
 	if (!(listener->rx.flags & RX_F_BOUND)) {
 		msg = "receiving socket not bound";
+		err |= ERR_FATAL | ERR_ALERT;
 		goto uxdg_return;
 	}
 
@@ -102,8 +104,11 @@
 
  uxdg_return:
 	if (msg && errlen) {
-		const char *path = ((struct sockaddr_un *)&listener->rx.addr)->sun_path;
-                snprintf(errmsg, errlen, "%s [%s]", msg, path);
+		char *path_str;
+
+		path_str = sa2str((struct sockaddr_storage *)&listener->rx.addr, 0, 0);
+		snprintf(errmsg, errlen, "%s for [%s]", msg, ((path_str) ? path_str : ""));
+		ha_free(&path_str);
 	}
 	return err;
 }
@@ -125,17 +130,20 @@
 }
 
 /* Suspend a receiver. Returns < 0 in case of failure, 0 if the receiver
- * was totally stopped, or > 0 if correctly suspended. Nothing is done for
- * plain unix sockets since currently it's the new process which handles
- * the renaming. Abstract sockets are completely unbound and closed so
- * there's no need to stop the poller.
+ * was totally stopped, or > 0 if correctly suspended. For plain unix sockets
+ * we only disable the listener to prevent data from being handled but nothing
+ * more is done since currently it's the new process which handles the renaming.
+ * Abstract sockets are completely unbound and closed so there's no need to stop
+ * the poller.
  */
 static int uxdg_suspend_receiver(struct receiver *rx)
 {
         struct listener *l = LIST_ELEM(rx, struct listener *, rx);
 
-        if (((struct sockaddr_un *)&rx->addr)->sun_path[0])
+        if (((struct sockaddr_un *)&rx->addr)->sun_path[0]) {
+		uxdg_disable_listener(l);
                 return 1;
+	}
 
         /* Listener's lock already held. Call lockless version of
          * unbind_listener. */
diff --git a/src/proto_uxst.c b/src/proto_uxst.c
index 621eb39..121450b 100644
--- a/src/proto_uxst.c
+++ b/src/proto_uxst.c
@@ -59,6 +59,7 @@
 	.add            = default_add_listener,
 	.unbind         = default_unbind_listener,
 	.suspend        = default_suspend_listener,
+	.resume         = default_resume_listener,
 	.accept_conn    = sock_accept_conn,
 	.ctrl_init      = sock_conn_ctrl_init,
 	.ctrl_close     = sock_conn_ctrl_close,
@@ -120,6 +121,7 @@
 
 	if (!(listener->rx.flags & RX_F_BOUND)) {
 		msg = "receiving socket not bound";
+		err |= ERR_FATAL | ERR_ALERT;
 		goto uxst_return;
 	}
 
@@ -141,8 +143,11 @@
 	close(fd);
  uxst_return:
 	if (msg && errlen) {
-		const char *path = ((struct sockaddr_un *)&listener->rx.addr)->sun_path;
-		snprintf(errmsg, errlen, "%s [%s]", msg, path);
+		char *path_str;
+
+		path_str = sa2str((struct sockaddr_storage *)&listener->rx.addr, 0, 0);
+		snprintf(errmsg, errlen, "%s for [%s]", msg, ((path_str) ? path_str : ""));
+		ha_free(&path_str);
 	}
 	return err;
 }
@@ -164,17 +169,20 @@
 }
 
 /* Suspend a receiver. Returns < 0 in case of failure, 0 if the receiver
- * was totally stopped, or > 0 if correctly suspended. Nothing is done for
- * plain unix sockets since currently it's the new process which handles
- * the renaming. Abstract sockets are completely unbound and closed so
- * there's no need to stop the poller.
+ * was totally stopped, or > 0 if correctly suspended. For plain unix sockets
+ * we only disable the listener to prevent data from being handled but nothing
+ * more is done since currently it's the new process which handles the renaming.
+ * Abstract sockets are completely unbound and closed so there's no need to stop
+ * the poller.
  */
 static int uxst_suspend_receiver(struct receiver *rx)
 {
 	struct listener *l = LIST_ELEM(rx, struct listener *, rx);
 
-	if (((struct sockaddr_un *)&rx->addr)->sun_path[0])
+	if (((struct sockaddr_un *)&rx->addr)->sun_path[0]) {
+		uxst_disable_listener(l);
 		return 1;
+	}
 
 	/* Listener's lock already held. Call lockless version of
 	 * unbind_listener. */
diff --git a/src/protocol.c b/src/protocol.c
index 767e03a..7e56b9a 100644
--- a/src/protocol.c
+++ b/src/protocol.c
@@ -86,8 +86,10 @@
 				else if (lerr & ERR_WARN)
 					ha_warning("Starting %s %s: %s\n",
 						   proxy_type_str(px), px->id, errmsg);
-				ha_free(&errmsg);
 			}
+			if (lerr != ERR_NONE)
+				ha_free(&errmsg);
+
 			if (lerr & ERR_ABORT)
 				break;
 
@@ -160,10 +162,10 @@
 	HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
 }
 
-/* pauses all listeners of all registered protocols. This is typically
+/* suspends all listeners of all registered protocols. This is typically
  * used on SIG_TTOU to release all listening sockets for the time needed to
- * try to bind a new process. The listeners enter LI_PAUSED. It returns
- * ERR_NONE, with ERR_FATAL on failure.
+ * try to bind a new process. The listeners enter LI_PAUSED or LI_ASSIGNED.
+ * It returns ERR_NONE, with ERR_FATAL on failure.
  */
 int protocol_pause_all(void)
 {
@@ -175,7 +177,7 @@
 	HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
 	list_for_each_entry(proto, &protocols, list) {
 		list_for_each_entry(listener, &proto->receivers, rx.proto_list)
-			if (!pause_listener(listener))
+			if (!suspend_listener(listener, 0, 0))
 				err |= ERR_FATAL;
 	}
 	HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
@@ -197,7 +199,7 @@
 	HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
 	list_for_each_entry(proto, &protocols, list) {
 		list_for_each_entry(listener, &proto->receivers, rx.proto_list)
-			if (!resume_listener(listener))
+			if (!resume_listener(listener, 0, 0))
 				err |= ERR_FATAL;
 	}
 	HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
diff --git a/src/proxy.c b/src/proxy.c
index 5d0f1f8..b1a829e 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -32,6 +32,7 @@
 #include <haproxy/global.h>
 #include <haproxy/http_ana.h>
 #include <haproxy/http_htx.h>
+#include <haproxy/http_rules.h>
 #include <haproxy/listener.h>
 #include <haproxy/log.h>
 #include <haproxy/obj_type-t.h>
@@ -112,8 +113,8 @@
 	{ "http-no-delay",                PR_O2_NODELAY,   PR_CAP_FE|PR_CAP_BE, 0, PR_MODE_HTTP },
 	{ "http-use-htx",                 0,               PR_CAP_FE|PR_CAP_BE, 0, 0 }, // deprecated
 
-	{"h1-case-adjust-bogus-client",   PR_O2_H1_ADJ_BUGCLI, PR_CAP_FE, 0, PR_MODE_HTTP },
-	{"h1-case-adjust-bogus-server",   PR_O2_H1_ADJ_BUGSRV, PR_CAP_BE, 0, PR_MODE_HTTP },
+	{"h1-case-adjust-bogus-client",   PR_O2_H1_ADJ_BUGCLI, PR_CAP_FE, 0, 0 },
+	{"h1-case-adjust-bogus-server",   PR_O2_H1_ADJ_BUGSRV, PR_CAP_BE, 0, 0 },
 	{"disable-h2-upgrade",            PR_O2_NO_H2_UPGRADE, PR_CAP_FE, 0, PR_MODE_HTTP },
 	{ NULL, 0, 0, 0 }
 };
@@ -146,6 +147,9 @@
 	struct proxy_deinit_fct *pxdf;
 	struct server_deinit_fct *srvdf;
 
+	if (!p)
+		return;
+
 	free(p->conf.file);
 	free(p->id);
 	free(p->cookie_name);
@@ -218,16 +222,7 @@
 
 	list_for_each_entry_safe(rdr, rdrb, &p->redirect_rules, list) {
 		LIST_DELETE(&rdr->list);
-		if (rdr->cond) {
-			prune_acl_cond(rdr->cond);
-			free(rdr->cond);
-		}
-		free(rdr->rdr_str);
-		list_for_each_entry_safe(lf, lfb, &rdr->rdr_fmt, list) {
-			LIST_DELETE(&lf->list);
-			free(lf);
-		}
-		free(rdr);
+		http_free_redirect_rule(rdr);
 	}
 
 	list_for_each_entry_safe(log, logb, &p->logsrvs, list) {
@@ -308,6 +303,7 @@
 			bind_conf->xprt->destroy_bind_conf(bind_conf);
 		free(bind_conf->file);
 		free(bind_conf->arg);
+		free(bind_conf->settings.interface);
 		LIST_DELETE(&bind_conf->by_fe);
 		free(bind_conf);
 	}
@@ -1532,7 +1528,7 @@
 	char *tmpmsg = NULL;
 
 	/* set default values from the specified default proxy */
-	memcpy(&curproxy->defsrv, &defproxy->defsrv, sizeof(curproxy->defsrv));
+	srv_settings_cpy(&curproxy->defsrv, &defproxy->defsrv, 0);
 
 	curproxy->disabled = defproxy->disabled;
 	curproxy->options = defproxy->options;
@@ -1891,11 +1887,40 @@
 			 * to push to a new process and
 			 * we are free to flush the table.
 			 */
-			stktable_trash_oldest(p->table, p->table->current);
-			pool_gc(NULL);
+			int budget;
+			int cleaned_up;
+
+			/* We purposely enforce a budget limitation since we don't want
+			 * to spend too much time purging old entries
+			 *
+			 * This is known to cause the watchdog to occasionnaly trigger if
+			 * the table is huge and all entries become available for purge
+			 * at the same time
+			 *
+			 * Moreover, we must also anticipate the pool_gc() call which
+			 * will also be much slower if there is too much work at once
+			 */
+			budget = MIN(p->table->current, (1 << 15)); /* max: 32K */
+			cleaned_up = stktable_trash_oldest(p->table, budget);
+			if (cleaned_up) {
+				/* immediately release freed memory since we are stopping */
+				pool_gc(NULL);
+				if (cleaned_up > (budget / 2)) {
+					/* most of the budget was used to purge entries,
+					 * it is very likely that there are still trashable
+					 * entries in the table, reschedule a new cleanup
+					 * attempt ASAP
+					 */
+					t->expire = TICK_ETERNITY;
+					task_wakeup(t, TASK_WOKEN_RES);
+					return t;
+				}
+			}
 		}
 		if (p->table->current) {
-			/* some entries still remain, let's recheck in one second */
+			/* some entries still remain but are not yet available
+			 * for cleanup, let's recheck in one second
+			 */
 			next = tick_first(next, tick_add(now_ms, 1000));
 		}
 	}
@@ -2058,22 +2083,29 @@
 /* Temporarily disables listening on all of the proxy's listeners. Upon
  * success, the proxy enters the PR_PAUSED state. The function returns 0
  * if it fails, or non-zero on success.
+ * The function takes the proxy's lock so it's safe to
+ * call from multiple places.
  */
 int pause_proxy(struct proxy *p)
 {
 	struct listener *l;
 
+	HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->lock);
+
 	if (!(p->cap & PR_CAP_FE) || p->disabled || !p->li_ready)
-		return 1;
+		goto end;
 
 	list_for_each_entry(l, &p->conf.listeners, by_fe)
-		pause_listener(l);
+		suspend_listener(l, 1, 0);
 
 	if (p->li_ready) {
 		ha_warning("%s %s failed to enter pause mode.\n", proxy_cap_str(p->cap), p->id);
 		send_log(p, LOG_WARNING, "%s %s failed to enter pause mode.\n", proxy_cap_str(p->cap), p->id);
+		HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
 		return 0;
 	}
+end:
+	HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
 	return 1;
 }
 
@@ -2082,7 +2114,8 @@
  * to be called when going down in order to release the ports so that another
  * process may bind to them. It must also be called on disabled proxies at the
  * end of start-up. If all listeners are closed, the proxy is set to the
- * PR_STSTOPPED state. The function takes the proxy's lock so it's safe to
+ * PR_STSTOPPED state.
+ * The function takes the proxy's lock so it's safe to
  * call from multiple places.
  */
 void stop_proxy(struct proxy *p)
@@ -2106,18 +2139,22 @@
  * listeners and tries to enable them all. If any of them fails, the proxy is
  * put back to the paused state. It returns 1 upon success, or zero if an error
  * is encountered.
+ * The function takes the proxy's lock so it's safe to
+ * call from multiple places.
  */
 int resume_proxy(struct proxy *p)
 {
 	struct listener *l;
 	int fail;
 
+	HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->lock);
+
 	if (p->disabled || !p->li_paused)
-		return 1;
+		goto end;
 
 	fail = 0;
 	list_for_each_entry(l, &p->conf.listeners, by_fe) {
-		if (!resume_listener(l)) {
+		if (!resume_listener(l, 1, 0)) {
 			int port;
 
 			port = get_host_port(&l->rx.addr);
@@ -2141,9 +2178,13 @@
 	}
 
 	if (fail) {
+		HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
+		/* pause_proxy will take PROXY_LOCK */
 		pause_proxy(p);
 		return 0;
 	}
+end:
+	HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
 	return 1;
 }
 
@@ -2316,8 +2357,8 @@
 	} else {
 		es = HA_ATOMIC_XCHG(&proxy->invalid_req, es);
 	}
-	free(es);
 	HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &proxy->lock);
+	ha_free(&es);
 }
 
 /* Configure all proxies which lack a maxconn setting to use the global one by
@@ -2561,7 +2602,7 @@
 			             px->uuid, px->id,
 			             srv->puid, srv->id, srv_addr,
 			             srv->cur_state, srv->cur_admin, srv->uweight, srv->iweight, (long int)srv_time_since_last_change,
-			             srv->check.status, srv->check.result, srv->check.health, srv->check.state, srv->agent.state,
+			             srv->check.status, srv->check.result, srv->check.health, srv->check.state & 0x0F, srv->agent.state & 0x1F,
 			             bk_f_forced_id, srv_f_forced_id, srv->hostname ? srv->hostname : "-", srv->svc_port,
 			             srvrecord ? srvrecord : "-", srv->use_ssl, srv->check.port,
 				     srv_check_addr, srv_agent_addr, srv->agent.port);
@@ -2816,7 +2857,7 @@
 	px->maxconn = v;
 	list_for_each_entry(l, &px->conf.listeners, by_fe) {
 		if (l->state == LI_FULL)
-			resume_listener(l);
+			relax_listener(l, 1, 0);
 	}
 
 	if (px->maxconn > px->feconn)
@@ -2871,9 +2912,8 @@
 	if (!px->li_ready)
 		return cli_msg(appctx, LOG_NOTICE, "All sockets are already disabled.\n");
 
-	HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+	/* pause_proxy will take PROXY_LOCK */
 	ret = pause_proxy(px);
-	HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
 
 	if (!ret)
 		return cli_err(appctx, "Failed to pause frontend, check logs for precise cause.\n");
@@ -2903,9 +2943,8 @@
 	if (px->li_ready == px->li_all)
 		return cli_msg(appctx, LOG_NOTICE, "All sockets are already enabled.\n");
 
-	HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+	/* resume_proxy will take PROXY_LOCK */
 	ret = resume_proxy(px);
-	HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
 
 	if (!ret)
 		return cli_err(appctx, "Failed to resume frontend, check logs for precise cause (port conflict?).\n");
diff --git a/src/regex.c b/src/regex.c
index 45a7e90..19c7eda 100644
--- a/src/regex.c
+++ b/src/regex.c
@@ -372,12 +372,16 @@
 	 * We end if it is an error not related to lack of JIT support
 	 * in a case of JIT support missing pcre2_jit_compile is "no-op"
 	 */
-	if (jit < 0 && jit != PCRE2_ERROR_JIT_BADOPTION) {
-		pcre2_code_free(regex->reg);
-		memprintf(err, "regex '%s' jit compilation failed", str);
-		goto out_fail_alloc;
-	} else {
+	if (!jit)
 		regex->mfn = &pcre2_jit_match;
+	else {
+		if (jit != PCRE2_ERROR_JIT_BADOPTION) {
+			pcre2_code_free(regex->reg);
+			memprintf(err, "regex '%s' jit compilation failed", str);
+			goto out_fail_alloc;
+		}
+		else
+			regex->mfn = &pcre2_match;
 	}
 #endif
 
diff --git a/src/resolvers.c b/src/resolvers.c
index 52a1ced..b0bc6de 100644
--- a/src/resolvers.c
+++ b/src/resolvers.c
@@ -292,11 +292,11 @@
 	next = tick_add(now_ms, resolvers->timeout.resolve);
 	if (!LIST_ISEMPTY(&resolvers->resolutions.curr)) {
 		res  = LIST_NEXT(&resolvers->resolutions.curr, struct resolv_resolution *, list);
-		next = MIN(next, tick_add(res->last_query, resolvers->timeout.retry));
+		next = tick_first(next, tick_add(res->last_query, resolvers->timeout.retry));
 	}
 
 	list_for_each_entry(res, &resolvers->resolutions.wait, list)
-		next = MIN(next, tick_add(res->last_resolution, resolv_resolution_timeout(res)));
+		next = tick_first(next, tick_add(res->last_resolution, resolv_resolution_timeout(res)));
 
 	resolvers->t->expire = next;
 	task_queue(resolvers->t);
@@ -461,8 +461,17 @@
 	 * valid */
 	exp = tick_add(res->last_resolution, resolvers->hold.valid);
 	if (resolvers->t && (res->status != RSLV_STATUS_VALID ||
-	    !tick_isset(res->last_resolution) || tick_is_expired(exp, now_ms)))
+	    !tick_isset(res->last_resolution) || tick_is_expired(exp, now_ms))) {
+		/* If the resolution is not running and the requester is a
+		 * server, reset the resoltion timer to force a quick
+		 * resolution.
+		 */
+		if (res->step == RSLV_STEP_NONE &&
+		    (obj_type(req->owner) == OBJ_TYPE_SERVER ||
+		     obj_type(req->owner) == OBJ_TYPE_SRVRQ))
+			res->last_resolution = TICK_ETERNITY;
 		task_wakeup(resolvers->t, TASK_WOKEN_OTHER);
+	}
 
 	leave_resolver_code();
 }
@@ -595,6 +604,11 @@
 /* Add a resolution to the death_row. */
 static void abort_resolution(struct resolv_resolution *res)
 {
+	/* Remove the resolution from query_ids tree and from any resolvers list */
+	eb32_delete(&res->qid);
+	res->query_id = 0;
+	res->qid.key   = 0;
+
 	LIST_DEL_INIT(&res->list);
 	LIST_APPEND(&death_row, &res->list);
 }
@@ -805,6 +819,9 @@
 				srv->flags &= ~SRV_F_NO_RESOLUTION;
 				srv->srvrq_check->expire = TICK_ETERNITY;
 
+				srv->svc_port = item->port;
+				srv->flags   &= ~SRV_F_MAPPORTS;
+
 				/* Check if an Additional Record is associated to this SRV record.
 				 * Perform some sanity checks too to ensure the record can be used.
 				 * If all fine, we simply pick up the IP address found and associate
@@ -859,9 +876,6 @@
 				/* Update the server status */
 				srvrq_update_srv_status(srv, (srv->addr.ss_family != AF_INET && srv->addr.ss_family != AF_INET6));
 
-				srv->svc_port = item->port;
-				srv->flags   &= ~SRV_F_MAPPORTS;
-
 				if (!srv->resolv_opts.ignore_weight) {
 					char weight[9];
 					int ha_weight;
@@ -2430,7 +2444,6 @@
 	struct resolv_requester  *req, *reqback;
 	struct resolv_srvrq    *srvrq, *srvrqback;
 
-	enter_resolver_code();
 	list_for_each_entry_safe(resolvers, resolversback, &sec_resolvers, list) {
 		list_for_each_entry_safe(ns, nsback, &resolvers->nameservers, list) {
 			free(ns->id);
@@ -2463,7 +2476,7 @@
 				LIST_DEL_INIT(&req->list);
 				pool_free(resolv_requester_pool, req);
 			}
-			abort_resolution(res);
+			resolv_free_resolution(res);
 		}
 
 		list_for_each_entry_safe(res, resback, &resolvers->resolutions.wait, list) {
@@ -2471,7 +2484,7 @@
 				LIST_DEL_INIT(&req->list);
 				pool_free(resolv_requester_pool, req);
 			}
-			abort_resolution(res);
+			resolv_free_resolution(res);
 		}
 
 		free_proxy(resolvers->px);
@@ -2488,13 +2501,11 @@
 		LIST_DEL_INIT(&srvrq->list);
 		free(srvrq);
 	}
-
-	leave_resolver_code();
 }
 
 /* Finalizes the DNS configuration by allocating required resources and checking
  * live parameters.
- * Returns 0 on success, ERR_* flags otherwise.
+ * Returns 0 on success, 1 on error.
  */
 static int resolvers_finalize_config(void)
 {
@@ -2549,6 +2560,13 @@
 	for (px = proxies_list; px; px = px->next) {
 		struct server *srv;
 
+		if (px->disabled) {
+			/* must not run and will not work anyway since
+			 * nothing in the proxy is initialized.
+			 */
+			continue;
+		}
+
 		for (srv = px->srv; srv; srv = srv->next) {
 			struct resolvers *resolvers;
 
@@ -2598,11 +2616,11 @@
 		goto err;
 
 	leave_resolver_code();
-	return err_code;
+	return 0;
   err:
 	leave_resolver_code();
 	resolvers_deinit();
-	return err_code;
+	return 1;
 
 }
 
@@ -2925,7 +2943,12 @@
 		if (resolution->step == RSLV_STEP_RUNNING)
 			goto yield;
 		if (resolution->step == RSLV_STEP_NONE) {
-			/* We update the variable only if we have a valid response. */
+			/* We update the variable only if we have a valid
+			 * response. If the response was not received yet, we
+			 * must yield.
+			 */
+			if (resolution->status == RSLV_STATUS_NONE)
+				goto yield;
 			if (resolution->status == RSLV_STATUS_VALID) {
 				struct sample smp;
 				short ip_sin_family = 0;
@@ -3183,7 +3206,7 @@
 	px->conn_retries = 1;
 	px->timeout.server = TICK_ETERNITY;
 	px->timeout.client = TICK_ETERNITY;
-	px->timeout.connect = TICK_ETERNITY;
+	px->timeout.connect = 1000; // by default same than timeout.resolve
 	px->accept = NULL;
 	px->options2 |= PR_O2_INDEPSTR | PR_O2_SMARTCON;
 	px->bind_proc = 0; /* will be filled by users */
@@ -3604,8 +3627,11 @@
 			}
 			if (args[1][2] == 't')
 				curr_resolvers->timeout.retry = tout;
-			else
+			else {
 				curr_resolvers->timeout.resolve = tout;
+				curr_resolvers->px->timeout.connect = tout;
+			}
+
 		}
 		else {
 			ha_alert("parsing [%s:%d] : '%s' expects 'retry' or 'resolve' and <time> as arguments got '%s'.\n",
diff --git a/src/ring.c b/src/ring.c
index 4f1bca2..977b58e 100644
--- a/src/ring.c
+++ b/src/ring.c
@@ -91,7 +91,6 @@
 		b_getblk(&ring->buf, area, ring->buf.data, 0);
 		area = HA_ATOMIC_XCHG(&ring->buf.area, area);
 		ring->buf.size = size;
-		ring->buf.head = 0;
 	}
 
 	HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
@@ -280,6 +279,7 @@
 	struct ring *ring = appctx->ctx.cli.p0;
 	struct buffer *buf = &ring->buf;
 	size_t ofs = appctx->ctx.cli.o0;
+	size_t last_ofs;
 	uint64_t msg_len;
 	size_t len, cnt;
 	int ret;
@@ -353,6 +353,7 @@
 
 	HA_ATOMIC_INC(b_peek(buf, ofs));
 	ofs += ring->ofs;
+	last_ofs = ring->ofs;
 	appctx->ctx.cli.o0 = ofs;
 	HA_RWLOCK_RDUNLOCK(LOGSRV_LOCK, &ring->lock);
 
@@ -364,8 +365,16 @@
 			/* let's be woken up once new data arrive */
 			HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
 			LIST_APPEND(&ring->waiters, &appctx->wait_entry);
+			ofs = ring->ofs;
 			HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
-			si_rx_endp_done(si);
+			if (ofs != last_ofs) {
+				/* more data was added into the ring between the
+				 * unlock and the lock, and the writer might not
+				 * have seen us. We need to reschedule a read.
+				 */
+				si_rx_endp_more(si);
+			} else
+				si_rx_endp_done(si);
 			ret = 0;
 		}
 		/* always drain all the request */
diff --git a/src/sample.c b/src/sample.c
index 9b16621..a294f80 100644
--- a/src/sample.c
+++ b/src/sample.c
@@ -1528,7 +1528,7 @@
 
  done:
 	line = ist2(buf->area, buf->data);
-	sink_write(sink, &line, 1, 0, 0, NULL);
+	sink_write(sink, 0, &line, 1, 0, 0, NULL);
  end:
 	free_trash_chunk(buf);
 	return 1;
@@ -2721,13 +2721,14 @@
 	if (!smp->data.u.str.data)
 		return 1;
 
-	smp->data.u.str.area = start;
 
 	/* Compute remaining size if needed
            Note: smp->data.u.str.size cannot be set to 0 */
 	if (smp->data.u.str.size)
 		smp->data.u.str.size -= start - smp->data.u.str.area;
 
+	smp->data.u.str.area = start;
+
 	return 1;
 }
 
@@ -2963,12 +2964,12 @@
 	 * +------+----------+----------+
 	 */
 	if ((a ^ b) >= 0) {
-		/* signs are different. */
+		/* signs are same. */
 		if (a < 0) {
 			if (LLONG_MIN - a > b)
 				return LLONG_MIN;
 		}
-		if (LLONG_MAX - a < b)
+		else if (LLONG_MAX - a < b)
 			return LLONG_MAX;
 	}
 	return a + b;
diff --git a/src/server.c b/src/server.c
index 22d6202..d59821a 100644
--- a/src/server.c
+++ b/src/server.c
@@ -2012,8 +2012,7 @@
 		         msg, quote, token, quote);
 }
 
-static void srv_conn_src_sport_range_cpy(struct server *srv,
-                                            struct server *src)
+static void srv_conn_src_sport_range_cpy(struct server *srv, const struct server *src)
 {
 	int range_sz;
 
@@ -2034,7 +2033,7 @@
 /*
  * Copy <src> server connection source settings to <srv> server everything needed.
  */
-static void srv_conn_src_cpy(struct server *srv, struct server *src)
+static void srv_conn_src_cpy(struct server *srv, const struct server *src)
 {
 	srv->conn_src.opts = src->conn_src.opts;
 	srv->conn_src.source_addr = src->conn_src.source_addr;
@@ -2060,7 +2059,7 @@
  * everything needed.
  */
 #if defined(USE_OPENSSL)
-static void srv_ssl_settings_cpy(struct server *srv, struct server *src)
+static void srv_ssl_settings_cpy(struct server *srv, const struct server *src)
 {
 	/* <src> is the current proxy's default server and SSL is enabled */
 	BUG_ON(src->ssl_ctx.ctx != NULL); /* the SSL_CTX must never be initialized in a default-server */
@@ -2169,7 +2168,7 @@
  * <srv_tmpl> distinguishes these two cases (must be 1 if <srv> is a template,
  * 0 if not).
  */
-static void srv_settings_cpy(struct server *srv, struct server *src, int srv_tmpl)
+void srv_settings_cpy(struct server *srv, const struct server *src, int srv_tmpl)
 {
 	/* Connection source settings copy */
 	srv_conn_src_cpy(srv, src);
@@ -2280,6 +2279,7 @@
 	if (srv_tmpl)
 		srv->srvrq = src->srvrq;
 
+	srv->netns                    = src->netns;
 	srv->check.via_socks4         = src->check.via_socks4;
 	srv->socks4_addr              = src->socks4_addr;
 }
@@ -4947,6 +4947,7 @@
 	struct proxy *px = s->proxy;
 	int prev_srv_count = s->proxy->srv_bck + s->proxy->srv_act;
 	int srv_was_stopping = (s->cur_state == SRV_ST_STOPPING) || (s->cur_admin & SRV_ADMF_DRAIN);
+	enum srv_state srv_prev_state = s->cur_state;
 	int log_level;
 	struct buffer *tmptrash = NULL;
 
@@ -4961,7 +4962,6 @@
 		s->next_admin = s->cur_admin;
 
 		if ((s->cur_state != SRV_ST_STOPPED) && (s->next_state == SRV_ST_STOPPED)) {
-			s->last_change = now.tv_sec;
 			if (s->proxy->lbprm.set_server_status_down)
 				s->proxy->lbprm.set_server_status_down(s);
 
@@ -4993,13 +4993,9 @@
 				free_trash_chunk(tmptrash);
 				tmptrash = NULL;
 			}
-			if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
-				set_backend_down(s->proxy);
-
 			s->counters.down_trans++;
 		}
 		else if ((s->cur_state != SRV_ST_STOPPING) && (s->next_state == SRV_ST_STOPPING)) {
-			s->last_change = now.tv_sec;
 			if (s->proxy->lbprm.set_server_status_down)
 				s->proxy->lbprm.set_server_status_down(s);
 
@@ -5023,22 +5019,10 @@
 				free_trash_chunk(tmptrash);
 				tmptrash = NULL;
 			}
-
-			if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
-				set_backend_down(s->proxy);
 		}
 		else if (((s->cur_state != SRV_ST_RUNNING) && (s->next_state == SRV_ST_RUNNING))
 			 || ((s->cur_state != SRV_ST_STARTING) && (s->next_state == SRV_ST_STARTING))) {
-			if (s->proxy->srv_bck == 0 && s->proxy->srv_act == 0) {
-				if (s->proxy->last_change < now.tv_sec)		// ignore negative times
-					s->proxy->down_time += now.tv_sec - s->proxy->last_change;
-				s->proxy->last_change = now.tv_sec;
-			}
-
-			if (s->cur_state == SRV_ST_STOPPED && s->last_change < now.tv_sec)	// ignore negative times
-				s->down_time += now.tv_sec - s->last_change;
 
-			s->last_change = now.tv_sec;
 			if (s->next_state == SRV_ST_STARTING && s->warmup)
 				task_schedule(s->warmup, tick_add(now_ms, MS_TO_TICKS(MAX(1000, s->slowstart / 20))));
 
@@ -5084,9 +5068,6 @@
 				free_trash_chunk(tmptrash);
 				tmptrash = NULL;
 			}
-
-			if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
-				set_backend_down(s->proxy);
 		}
 		else if (s->cur_eweight != s->next_eweight) {
 			/* now propagate the status change to any LB algorithms */
@@ -5100,9 +5081,6 @@
 				if (px->lbprm.set_server_status_down)
 					px->lbprm.set_server_status_down(s);
 			}
-
-			if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
-				set_backend_down(s->proxy);
 		}
 
 		s->next_admin = next_admin;
@@ -5140,13 +5118,9 @@
 				free_trash_chunk(tmptrash);
 				tmptrash = NULL;
 			}
-			/* commit new admin status */
-
-			s->cur_admin = s->next_admin;
 		}
 		else {	/* server was still running */
 			check->health = 0; /* failure */
-			s->last_change = now.tv_sec;
 
 			s->next_state = SRV_ST_STOPPED;
 			if (s->proxy->lbprm.set_server_status_down)
@@ -5181,9 +5155,6 @@
 				free_trash_chunk(tmptrash);
 				tmptrash = NULL;
 			}
-			if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
-				set_backend_down(s->proxy);
-
 			s->counters.down_trans++;
 		}
 	}
@@ -5203,7 +5174,6 @@
 		 * that the server might still be in drain mode, which is naturally dealt
 		 * with by the lower level functions.
 		 */
-
 		if (s->check.state & CHK_ST_ENABLED) {
 			s->check.state &= ~CHK_ST_PAUSED;
 			check->health = check->rise; /* start OK but check immediately */
@@ -5216,7 +5186,6 @@
 				s->next_state = SRV_ST_STOPPING;
 			}
 			else {
-				s->last_change = now.tv_sec;
 				s->next_state = SRV_ST_STARTING;
 				if (s->slowstart > 0) {
 					if (s->warmup)
@@ -5274,11 +5243,6 @@
 				px->lbprm.set_server_status_down(s);
 		}
 
-		if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
-			set_backend_down(s->proxy);
-		else if (!prev_srv_count && (s->proxy->srv_bck || s->proxy->srv_act))
-			s->proxy->last_change = now.tv_sec;
-
 		/* If the server is set with "on-marked-up shutdown-backup-sessions",
 		 * and it's not a backup server and its effective weight is > 0,
 		 * then it can accept new connections, so we shut down all streams
@@ -5348,15 +5312,12 @@
 			}
 		}
 		/* don't report anything when leaving drain mode and remaining in maintenance */
-
-		s->cur_admin = s->next_admin;
 	}
 
 	if (!(s->next_admin & SRV_ADMF_MAINT)) {
 		if (!(s->cur_admin & SRV_ADMF_DRAIN) && (s->next_admin & SRV_ADMF_DRAIN)) {
 			/* drain state is applied only if not yet in maint */
 
-			s->last_change = now.tv_sec;
 			if (px->lbprm.set_server_status_down)
 				px->lbprm.set_server_status_down(s);
 
@@ -5384,26 +5345,14 @@
 				free_trash_chunk(tmptrash);
 				tmptrash = NULL;
 			}
-
-			if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
-				set_backend_down(s->proxy);
 		}
 		else if ((s->cur_admin & SRV_ADMF_DRAIN) && !(s->next_admin & SRV_ADMF_DRAIN)) {
 			/* OK completely leaving drain mode */
-			if (s->proxy->srv_bck == 0 && s->proxy->srv_act == 0) {
-				if (s->proxy->last_change < now.tv_sec)         // ignore negative times
-					s->proxy->down_time += now.tv_sec - s->proxy->last_change;
-				s->proxy->last_change = now.tv_sec;
-			}
-
-			if (s->last_change < now.tv_sec)                        // ignore negative times
-				s->down_time += now.tv_sec - s->last_change;
-			s->last_change = now.tv_sec;
 			server_recalc_eweight(s, 0);
 
 			tmptrash = alloc_trash_chunk();
 			if (tmptrash) {
-				if (!(s->next_admin & SRV_ADMF_FDRAIN)) {
+				if (s->cur_admin & SRV_ADMF_FDRAIN) {
 					chunk_printf(tmptrash,
 						     "%sServer %s/%s is %s (leaving forced drain)",
 						     s->flags & SRV_F_BACKUP ? "Backup " : "",
@@ -5467,15 +5416,38 @@
 				free_trash_chunk(tmptrash);
 				tmptrash = NULL;
 			}
-
-			/* commit new admin status */
-
-			s->cur_admin = s->next_admin;
 		}
 	}
 
 	/* Re-set log strings to empty */
 	*s->adm_st_chg_cause = 0;
+
+	/* explicitly commit state changes (even if it was already applied implicitly
+	 * by some lb state change function), so we don't miss anything
+	 */
+	srv_lb_commit_status(s);
+
+	/* check if server stats must be updated due the the server state change */
+	if (srv_prev_state != s->cur_state) {
+		if (srv_prev_state == SRV_ST_STOPPED) {
+			/* server was down and no longer is */
+			if (s->last_change < now.tv_sec)                        // ignore negative times
+				s->down_time += now.tv_sec - s->last_change;
+		}
+		s->last_change = now.tv_sec;
+	}
+
+	/* check if backend stats must be updated due to the server state change */
+	if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
+		set_backend_down(s->proxy); /* backend going down */
+	else if (!prev_srv_count && (s->proxy->srv_bck || s->proxy->srv_act)) {
+		/* backend was down and is back up again:
+		 * no helper function, updating last_change and backend downtime stats
+		 */
+		if (s->proxy->last_change < now.tv_sec)         // ignore negative times
+			s->proxy->down_time += now.tv_sec - s->proxy->last_change;
+		s->proxy->last_change = now.tv_sec;
+	}
 }
 
 struct task *srv_cleanup_toremove_conns(struct task *task, void *context, unsigned int state)
@@ -5593,7 +5565,7 @@
 		if (srv->est_need_conns < srv->max_used_conns)
 			srv->est_need_conns = srv->max_used_conns;
 
-		srv->max_used_conns = srv->curr_used_conns;
+		HA_ATOMIC_STORE(&srv->max_used_conns, srv->curr_used_conns);
 
 		if (exceed_conns <= 0)
 			goto remove;
diff --git a/src/server_state.c b/src/server_state.c
index 18d9cd6..4ef5167 100644
--- a/src/server_state.c
+++ b/src/server_state.c
@@ -322,7 +322,7 @@
 			srv_adm_set_drain(srv);
 	}
 
-	srv->last_change = date.tv_sec - srv_last_time_change;
+	srv->last_change = now.tv_sec - srv_last_time_change;
 	srv->check.status = srv_check_status;
 	srv->check.result = srv_check_result;
 
diff --git a/src/signal.c b/src/signal.c
index 5ec7142..3d7a9a8 100644
--- a/src/signal.c
+++ b/src/signal.c
@@ -56,6 +56,9 @@
 	signal_state[sig].count++;
 	if (sig)
 		signal(sig, signal_handler); /* re-arm signal */
+
+	/* If the thread is TH_FL_SLEEPING we need to wake it */
+	wake_thread(tid);
 }
 
 /* Call handlers of all pending signals and clear counts and queue length. The
diff --git a/src/sink.c b/src/sink.c
index 0f421cc..4d811eb 100644
--- a/src/sink.c
+++ b/src/sink.c
@@ -35,6 +35,9 @@
 
 struct list sink_list = LIST_HEAD_INIT(sink_list);
 
+/* sink proxies list */
+struct proxy *sink_proxies_list;
+
 struct sink *cfg_sink;
 
 struct sink *sink_find(const char *name)
@@ -159,10 +162,13 @@
  * array <msg> to sink <sink>. Formatting according to the sink's preference is
  * done here. Lost messages are NOT accounted for. It is preferable to call
  * sink_write() instead which will also try to emit the number of dropped
- * messages when there are any. It returns >0 if it could write anything,
- * <=0 otherwise.
+ * messages when there are any. It will stop writing at <maxlen> instead of
+ * sink->maxlen if <maxlen> is positive and inferior to sink->maxlen.
+ *
+ * It returns >0 if it could write anything, <=0 otherwise.
  */
- ssize_t __sink_write(struct sink *sink, const struct ist msg[], size_t nmsg,
+ ssize_t __sink_write(struct sink *sink, size_t maxlen,
+	             const struct ist msg[], size_t nmsg,
 	             int level, int facility, struct ist *metadata)
  {
 	struct ist *pfx = NULL;
@@ -174,11 +180,13 @@
 	pfx = build_log_header(sink->fmt, level, facility, metadata, &npfx);
 
 send:
+	if (!maxlen)
+		maxlen = ~0;
 	if (sink->type == SINK_TYPE_FD) {
-		return fd_write_frag_line(sink->ctx.fd, sink->maxlen, pfx, npfx, msg, nmsg, 1);
+		return fd_write_frag_line(sink->ctx.fd, MIN(maxlen, sink->maxlen), pfx, npfx, msg, nmsg, 1);
 	}
 	else if (sink->type == SINK_TYPE_BUFFER) {
-		return ring_write(sink->ctx.ring, sink->maxlen, pfx, npfx, msg, nmsg);
+		return ring_write(sink->ctx.ring, MIN(maxlen, sink->maxlen), pfx, npfx, msg, nmsg);
 	}
 	return 0;
 }
@@ -220,7 +228,7 @@
 			metadata[LOG_META_PID] = ist2(pidstr, strlen(pidstr));
 		}
 
-		if (__sink_write(sink, msgvec, 1, LOG_NOTICE, facility, metadata) <= 0)
+		if (__sink_write(sink, 0, msgvec, 1, LOG_NOTICE, facility, metadata) <= 0)
 			return 0;
 		/* success! */
 		HA_ATOMIC_SUB(&sink->ctx.dropped, dropped);
@@ -279,7 +287,7 @@
 void sink_setup_proxy(struct proxy *px)
 {
 	px->last_change = now.tv_sec;
-	px->cap = PR_CAP_FE | PR_CAP_BE;
+	px->cap = PR_CAP_BE;
 	px->maxconn = 0;
 	px->conn_retries = 1;
 	px->timeout.server = TICK_ETERNITY;
@@ -288,6 +296,8 @@
 	px->accept = NULL;
 	px->options2 |= PR_O2_INDEPSTR | PR_O2_SMARTCON | PR_O2_SMARTACC;
 	px->bind_proc = 0; /* will be filled by users */
+	px->next = sink_proxies_list;
+	sink_proxies_list = px;
 }
 
 /*
@@ -302,7 +312,7 @@
 	struct ring *ring = sink->ctx.ring;
 	struct buffer *buf = &ring->buf;
 	uint64_t msg_len;
-	size_t len, cnt, ofs;
+	size_t len, cnt, ofs, last_ofs;
 	int ret = 0;
 
 	/* if stopping was requested, close immediately */
@@ -406,6 +416,7 @@
 		HA_ATOMIC_INC(b_peek(buf, ofs));
 		ofs += ring->ofs;
 		sft->ofs = ofs;
+		last_ofs = ring->ofs;
 	}
 	HA_RWLOCK_RDUNLOCK(LOGSRV_LOCK, &ring->lock);
 
@@ -413,8 +424,16 @@
 		/* let's be woken up once new data arrive */
 		HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
 		LIST_APPEND(&ring->waiters, &appctx->wait_entry);
+		ofs = ring->ofs;
 		HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
-		si_rx_endp_done(si);
+		if (ofs != last_ofs) {
+			/* more data was added into the ring between the
+			 * unlock and the lock, and the writer might not
+			 * have seen us. We need to reschedule a read.
+			 */
+			si_rx_endp_more(si);
+		} else
+			si_rx_endp_done(si);
 	}
 	HA_SPIN_UNLOCK(SFT_LOCK, &sft->lock);
 
@@ -755,7 +774,7 @@
 	size_t size = BUFSIZE;
 	struct proxy *p;
 
-	if (strcmp(args[0], "ring") == 0) { /* new peers section */
+	if (strcmp(args[0], "ring") == 0) { /* new ring section */
 		if (!*args[1]) {
 			ha_alert("parsing [%s:%d] : missing ring name.\n", file, linenum);
 			err_code |= ERR_ALERT | ERR_FATAL;
@@ -799,6 +818,12 @@
 		cfg_sink->forward_px = p;
 	}
 	else if (strcmp(args[0], "size") == 0) {
+		if (!cfg_sink || (cfg_sink->type != SINK_TYPE_BUFFER)) {
+			ha_alert("parsing [%s:%d] : 'size' directive not usable with this type of sink.\n", file, linenum);
+			err_code |= ERR_ALERT | ERR_FATAL;
+			goto err;
+		}
+
 		size = atol(args[1]);
 		if (!size) {
 			ha_alert("parsing [%s:%d] : invalid size '%s' for new sink buffer.\n", file, linenum, args[1]);
@@ -806,14 +831,27 @@
 			goto err;
 		}
 
+		if (size < cfg_sink->ctx.ring->buf.size) {
+			ha_warning("parsing [%s:%d] : ignoring new size '%llu' that is smaller than current size '%llu' for ring '%s'.\n",
+				   file, linenum, (ullong)size, (ullong)cfg_sink->ctx.ring->buf.size, cfg_sink->name);
+			err_code |= ERR_WARN;
+			goto err;
+		}
+
-		if (!cfg_sink || (cfg_sink->type != SINK_TYPE_BUFFER)
-		              || !ring_resize(cfg_sink->ctx.ring, size)) {
-			ha_alert("parsing [%s:%d] : fail to set sink buffer size '%s'.\n", file, linenum, args[1]);
+		if (!ring_resize(cfg_sink->ctx.ring, size)) {
+			ha_alert("parsing [%s:%d] : fail to set sink buffer size '%llu' for ring '%s'.\n", file, linenum,
+				 (ullong)cfg_sink->ctx.ring->buf.size, cfg_sink->name);
 			err_code |= ERR_ALERT | ERR_FATAL;
 			goto err;
 		}
 	}
 	else if (strcmp(args[0],"server") == 0) {
+		if (!cfg_sink || (cfg_sink->type != SINK_TYPE_BUFFER)) {
+			ha_alert("parsing [%s:%d] : unable to create server '%s'.\n", file, linenum, args[1]);
+			err_code |= ERR_ALERT | ERR_FATAL;
+			goto err;
+		}
+
 		err_code |= parse_server(file, linenum, args, cfg_sink->forward_px, NULL,
 		                         SRV_PARSE_PARSE_ADDR|SRV_PARSE_INITIAL_RESOLVE);
 	}
@@ -854,7 +892,7 @@
 				err_code |= ERR_ALERT | ERR_FATAL;
 				goto err;
 			}
-                        if (args[1][2] == 'c')
+                        if (args[1][0] == 'c')
                                 cfg_sink->forward_px->timeout.connect = tout;
                         else
                                 cfg_sink->forward_px->timeout.server = tout;
@@ -956,6 +994,10 @@
 	p->conf.args.file = p->conf.file = strdup(logsrv->conf.file);
 	p->conf.args.line = p->conf.line = logsrv->conf.line;
 
+	/* Set default connect and server timeout */
+	p->timeout.connect = MS_TO_TICKS(1000);
+	p->timeout.server = MS_TO_TICKS(5000);
+
 	/* allocate a new server to forward messages
 	 * from ring buffer
 	 */
@@ -986,8 +1028,8 @@
 	/* the servers are linked backwards
 	 * first into proxy
 	 */
-	p->srv = srv;
 	srv->next = p->srv;
+	p->srv = srv;
 
 	/* allocate sink_forward_target descriptor */
 	sft = calloc(1, sizeof(*sft));
@@ -1037,6 +1079,9 @@
 
 	return sink;
 error:
+	if (srv)
+		free_server(srv);
+
 	if (p) {
 		if (p->id)
 			free(p->id);
@@ -1046,16 +1091,6 @@
 		free(p);
 	}
 
-	if (srv) {
-		if (srv->id)
-			free(srv->id);
-		if (srv->conf.file)
-			free((void *)srv->conf.file);
-		if (srv->per_thr)
-		       free(srv->per_thr);
-		free(srv);
-	}
-
 	if (sft)
 		free(sft);
 
@@ -1088,7 +1123,7 @@
 			ha_warning("ring '%s' event max length '%u' exceeds size, forced to size '%lu'.\n",
 			           cfg_sink->name, cfg_sink->maxlen, (unsigned long)b_size(&cfg_sink->ctx.ring->buf));
 			cfg_sink->maxlen = b_size(&cfg_sink->ctx.ring->buf);
-			err_code |= ERR_ALERT;
+			err_code |= ERR_WARN;
 		}
 
 		/* prepare forward server descriptors */
@@ -1096,13 +1131,6 @@
 			srv = cfg_sink->forward_px->srv;
 			while (srv) {
 				struct sink_forward_target *sft;
-				/* init ssl if needed */
-				if (srv->use_ssl == 1 && xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->prepare_srv) {
-					if (xprt_get(XPRT_SSL)->prepare_srv(srv)) {
-						ha_alert("unable to prepare SSL for server '%s' in ring '%s'.\n", srv->id, cfg_sink->name);
-						err_code |= ERR_ALERT | ERR_FATAL;
-					}
-				}
 
 				/* allocate sink_forward_target descriptor */
 				sft = calloc(1, sizeof(*sft));
@@ -1121,11 +1149,16 @@
 				if (!ring_attach(cfg_sink->ctx.ring)) {
 					ha_alert("server '%s' sets too many watchers > 255 on ring '%s'.\n", srv->id, cfg_sink->name);
 					err_code |= ERR_ALERT | ERR_FATAL;
+					ha_free(&sft);
+					break;
 				}
 				cfg_sink->sft = sft;
 				srv = srv->next;
 			}
-			sink_init_forward(cfg_sink);
+			if (sink_init_forward(cfg_sink) == 0) {
+				ha_alert("error when trying to initialize sink buffer forwarding.\n");
+				err_code |= ERR_ALERT | ERR_FATAL;
+			}
 		}
 	}
 	cfg_sink = NULL;
@@ -1254,13 +1287,21 @@
 static void sink_deinit()
 {
 	struct sink *sink, *sb;
+	struct sink_forward_target *sft_next;
 
 	list_for_each_entry_safe(sink, sb, &sink_list, sink_list) {
 		if (sink->type == SINK_TYPE_BUFFER)
 			ring_free(sink->ctx.ring);
 		LIST_DELETE(&sink->sink_list);
+		task_destroy(sink->forward_task);
+		free_proxy(sink->forward_px);
 		free(sink->name);
 		free(sink->desc);
+		while (sink->sft) {
+			sft_next = sink->sft->next;
+			free(sink->sft);
+			sink->sft = sft_next;
+		}
 		free(sink);
 	}
 }
diff --git a/src/sock_inet.c b/src/sock_inet.c
index fb69981..7523617 100644
--- a/src/sock_inet.c
+++ b/src/sock_inet.c
@@ -313,6 +313,24 @@
 		}
 	}
 
+	if (ext && fd < global.maxsock && fdtab[fd].owner) {
+		/* This FD was already bound so this means that it was already
+		 * known and registered before parsing, hence it's an inherited
+		 * FD. The only reason why it's already known here is that it
+		 * has been registered multiple times (multiple listeners on the
+		 * same, or a "shards" directive on the line). There cannot be
+		 * multiple listeners on one FD but at least we can create a
+		 * new one from the original one. We won't reconfigure it,
+		 * however, as this was already done for the first one.
+		 */
+		fd = dup(fd);
+		if (fd == -1) {
+			err |= ERR_RETRYABLE | ERR_ALERT;
+			memprintf(errmsg, "cannot dup() receiving socket (%s)", strerror(errno));
+			goto bind_return;
+		}
+	}
+
 	if (fd >= global.maxsock) {
 		err |= ERR_FATAL | ERR_ABORT | ERR_ALERT;
 		memprintf(errmsg, "not enough free sockets (raise '-n' parameter)");
diff --git a/src/sock_unix.c b/src/sock_unix.c
index 9913f4f..d62c164 100644
--- a/src/sock_unix.c
+++ b/src/sock_unix.c
@@ -94,7 +94,21 @@
 
 	/* Now we have a difference. It's OK if they are within or after a
 	 * sequence of digits following a dot, and are followed by ".tmp".
+	 *
+	 * make sure to perform the check against tempname if the compared
+	 * string is in "final" format (does not end with ".XXXX.tmp").
+	 *
+	 * Examples:
+	 *     /tmp/test matches with /tmp/test.1822.tmp
+	 *     /tmp/test.1822.tmp matches with /tmp/test.XXXX.tmp
 	 */
+	if (au->sun_path[idx] == 0 || bu->sun_path[idx] == 0) {
+		if (au->sun_path[idx] == '.' || bu->sun_path[idx] == '.')
+			dot = idx; /* try to match against temp path */
+		else
+			return -1; /* invalid temp path */
+	}
+
 	if (!dot)
 		return -1;
 
@@ -227,6 +241,24 @@
 	}
 
  fd_ready:
+	if (ext && fd < global.maxsock && fdtab[fd].owner) {
+		/* This FD was already bound so this means that it was already
+		 * known and registered before parsing, hence it's an inherited
+		 * FD. The only reason why it's already known here is that it
+		 * has been registered multiple times (multiple listeners on the
+		 * same, or a "shards" directive on the line). There cannot be
+		 * multiple listeners on one FD but at least we can create a
+		 * new one from the original one. We won't reconfigure it,
+		 * however, as this was already done for the first one.
+		 */
+		fd = dup(fd);
+		if (fd == -1) {
+			err |= ERR_RETRYABLE | ERR_ALERT;
+			memprintf(errmsg, "cannot dup() receiving socket (%s)", strerror(errno));
+			goto bind_return;
+		}
+	}
+
 	if (fd >= global.maxsock) {
 		err |= ERR_FATAL | ERR_ABORT | ERR_ALERT;
 		memprintf(errmsg, "not enough free sockets (raise '-n' parameter)");
@@ -306,8 +338,13 @@
 		unlink(backname);
  bind_return:
 	if (errmsg && *errmsg) {
-		if (!ext)
-			memprintf(errmsg, "%s [%s]", *errmsg, path);
+		if (!ext) {
+			char *path_str;
+
+			path_str = sa2str((struct sockaddr_storage *)&rx->addr, 0, 0);
+			memprintf(errmsg, "%s [%s]", *errmsg, ((path_str) ? path_str : ""));
+			ha_free(&path_str);
+		}
 		else
 			memprintf(errmsg, "%s [fd %d]", *errmsg, fd);
 	}
diff --git a/src/ssl_ckch.c b/src/ssl_ckch.c
index 86cc584..1bba553 100644
--- a/src/ssl_ckch.c
+++ b/src/ssl_ckch.c
@@ -43,7 +43,6 @@
 } ckchs_transaction;
 
 
-
 /********************  cert_key_and_chain functions *************************
  * These are the functions that fills a cert_key_and_chain structure. For the
  * functions filling a SSL_CTX from a cert_key_and_chain, see ssl_sock.c
@@ -247,6 +246,7 @@
 {
 	struct buffer *fp = NULL;
 	int ret = 1;
+	struct stat st;
 
 	/* try to load the PEM */
 	if (ssl_sock_load_pem_into_ckch(path, NULL, ckch , err) != 0) {
@@ -281,14 +281,19 @@
 
 	}
 
-	/* try to load an external private key if it wasn't in the PEM */
-	if ((ckch->key == NULL) && (global_ssl.extra_files & SSL_GF_KEY)) {
-		struct stat st;
-
+	if (ckch->key == NULL) {
+		/* If no private key was found yet and we cannot look for it in extra
+		 * files, raise an error.
+		 */
+		if (!(global_ssl.extra_files & SSL_GF_KEY)) {
+			memprintf(err, "%sNo Private Key found in '%s'.\n", err && *err ? *err : "", fp->area);
+			goto end;
+		}
 
+		/* try to load an external private key if it wasn't in the PEM */
 		if (!chunk_strcat(fp, ".key") || (b_data(fp) > MAXPATHLEN)) {
 			memprintf(err, "%s '%s' filename too long'.\n",
-			          err && *err ? *err : "", fp->area);
+				  err && *err ? *err : "", fp->area);
 			ret = 1;
 			goto end;
 		}
@@ -310,6 +315,7 @@
 		b_sub(fp, strlen(".key"));
 	}
 
+
 	if (!X509_check_private_key(ckch->cert, ckch->key)) {
 		memprintf(err, "%sinconsistencies between private key and certificate loaded '%s'.\n",
 		          err && *err ? *err : "", path);
@@ -969,17 +975,18 @@
 	struct buffer *trash = alloc_trash_chunk();
 	struct ebmb_node *node;
 	struct stream_interface *si = appctx->owner;
-	struct ckch_store *ckchs;
+	struct ckch_store *ckchs = NULL;
 
 	if (trash == NULL)
 		return 1;
 
-	if (!appctx->ctx.ssl.old_ckchs) {
-		if (ckchs_transaction.old_ckchs) {
-			ckchs = ckchs_transaction.old_ckchs;
-			chunk_appendf(trash, "# transaction\n");
-			chunk_appendf(trash, "*%s\n", ckchs->path);
-		}
+	if (!appctx->ctx.ssl.old_ckchs && ckchs_transaction.old_ckchs) {
+		ckchs = ckchs_transaction.old_ckchs;
+		chunk_appendf(trash, "# transaction\n");
+		chunk_appendf(trash, "*%s\n", ckchs->path);
+		if (ci_putchk(si_ic(si), trash) == -1)
+			goto yield;
+		appctx->ctx.ssl.old_ckchs = ckchs_transaction.old_ckchs;
 	}
 
 	if (!appctx->ctx.cli.p0) {
@@ -1237,17 +1244,14 @@
 /* release function of the  `set ssl cert' command, free things and unlock the spinlock */
 static void cli_release_commit_cert(struct appctx *appctx)
 {
-	struct ckch_store *new_ckchs;
+	struct ckch_store *new_ckchs = appctx->ctx.ssl.new_ckchs;
 
 	HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
 
-	if (appctx->st2 != SETCERT_ST_FIN) {
-		/* free every new sni_ctx and the new store, which are not in the trees so no spinlock there */
-		new_ckchs = appctx->ctx.ssl.new_ckchs;
-
-		/* if the allocation failed, we need to free everything from the temporary list */
+	/* free every new sni_ctx and the new store, which are not in the trees so no spinlock there */
+	if (new_ckchs)
 		ckch_store_free(new_ckchs);
-	}
+	ha_free(&appctx->ctx.ssl.err);
 }
 
 /*
@@ -1257,30 +1261,24 @@
 {
 	struct stream_interface *si = appctx->owner;
 	int y = 0;
-	char *err = NULL;
 	int errcode = 0;
 	int retval = 0;
 	struct ckch_store *old_ckchs, *new_ckchs = NULL;
 	struct ckch_inst *ckchi, *ckchis;
-	struct buffer *trash = alloc_trash_chunk();
 	struct sni_ctx *sc0, *sc0s;
 	struct crtlist_entry *entry;
 
-	if (trash == NULL)
-		goto error;
 
 	if (unlikely(si_ic(si)->flags & (CF_WRITE_ERROR|CF_SHUTW)))
-		goto error;
+		goto end;
 
 	while (1) {
 		switch (appctx->st2) {
 			case SETCERT_ST_INIT:
 				/* This state just print the update message */
-				chunk_printf(trash, "Committing %s", ckchs_transaction.path);
-				if (ci_putchk(si_ic(si), trash) == -1) {
-					si_rx_room_blk(si);
+				chunk_printf(&trash, "Committing %s", ckchs_transaction.path);
+				if (ci_putchk(si_ic(si), &trash) == -1)
 					goto yield;
-				}
 				appctx->st2 = SETCERT_ST_GEN;
 				/* fallthrough */
 			case SETCERT_ST_GEN:
@@ -1295,9 +1293,6 @@
 				old_ckchs = appctx->ctx.ssl.old_ckchs;
 				new_ckchs = appctx->ctx.ssl.new_ckchs;
 
-				if (!new_ckchs)
-					continue;
-
 				/* get the next ckchi to regenerate */
 				ckchi = appctx->ctx.ssl.next_ckchi;
 				/* we didn't start yet, set it to the first elem */
@@ -1310,25 +1305,34 @@
 					char **sni_filter = NULL;
 					int fcount = 0;
 
+					/* save the next ckchi to compute in case of yield */
+					appctx->ctx.ssl.next_ckchi = ckchi;
+
 					/* it takes a lot of CPU to creates SSL_CTXs, so we yield every 10 CKCH instances */
 					if (y >= 10) {
-						/* save the next ckchi to compute */
-						appctx->ctx.ssl.next_ckchi = ckchi;
+						si_rx_endp_more(si);
 						goto yield;
 					}
 
+					/* display one dot per new instance */
+					if (ci_putstr(si_ic(si), ".") == -1)
+						goto yield;
+
 					if (ckchi->crtlist_entry) {
 						sni_filter = ckchi->crtlist_entry->filters;
 						fcount = ckchi->crtlist_entry->fcount;
 					}
 
+					appctx->ctx.ssl.err = NULL;
 					if (ckchi->is_server_instance)
-						errcode |= ckch_inst_new_load_srv_store(new_ckchs->path, new_ckchs, &new_inst, &err);
+						errcode |= ckch_inst_new_load_srv_store(new_ckchs->path, new_ckchs, &new_inst, &appctx->ctx.ssl.err);
 					else
-						errcode |= ckch_inst_new_load_store(new_ckchs->path, new_ckchs, ckchi->bind_conf, ckchi->ssl_conf, sni_filter, fcount, &new_inst, &err);
+						errcode |= ckch_inst_new_load_store(new_ckchs->path, new_ckchs, ckchi->bind_conf, ckchi->ssl_conf, sni_filter, fcount, &new_inst, &appctx->ctx.ssl.err);
 
-					if (errcode & ERR_CODE)
+					if (errcode & ERR_CODE) {
+						appctx->st2 = SETCERT_ST_ERROR;
 						goto error;
+					}
 
 					/* if the previous ckchi was used as the default */
 					if (ckchi->is_default)
@@ -1339,8 +1343,10 @@
 					/* Create a new SSL_CTX and link it to the new instance. */
 					if (new_inst->is_server_instance) {
 						retval = ssl_sock_prepare_srv_ssl_ctx(ckchi->server, new_inst->ctx);
-						if (retval)
+						if (retval) {
+							appctx->st2 = SETCERT_ST_ERROR;
 							goto error;
+						}
 					}
 
 					/* create the link to the crtlist_entry */
@@ -1350,15 +1356,15 @@
 					/* this iterate on the newly generated SNIs in the new instance to prepare their SSL_CTX */
 					list_for_each_entry_safe(sc0, sc0s, &new_inst->sni_ctx, by_ckch_inst) {
 						if (!sc0->order) { /* we initialized only the first SSL_CTX because it's the same in the other sni_ctx's */
-							errcode |= ssl_sock_prepare_ctx(ckchi->bind_conf, ckchi->ssl_conf, sc0->ctx, &err);
-							if (errcode & ERR_CODE)
+							appctx->ctx.ssl.err = NULL;
+							errcode |= ssl_sock_prepare_ctx(ckchi->bind_conf, ckchi->ssl_conf, sc0->ctx, &appctx->ctx.ssl.err);
+							if (errcode & ERR_CODE) {
+								appctx->st2 = SETCERT_ST_ERROR;
 								goto error;
+							}
 						}
 					}
 
-
-					/* display one dot per new instance */
-					chunk_appendf(trash, ".");
 					/* link the new ckch_inst to the duplicate */
 					LIST_APPEND(&new_ckchs->ckch_inst, &new_inst->by_ckchs);
 					y++;
@@ -1371,9 +1377,6 @@
 				old_ckchs = appctx->ctx.ssl.old_ckchs;
 				new_ckchs = appctx->ctx.ssl.new_ckchs;
 
-				if (!new_ckchs)
-					continue;
-
 				/* get the list of crtlist_entry in the old store, and update the pointers to the store */
 				LIST_SPLICE(&new_ckchs->crtlist_entry, &old_ckchs->crtlist_entry);
 				list_for_each_entry(entry, &new_ckchs->crtlist_entry, by_ckch_store) {
@@ -1436,45 +1439,37 @@
 				/* Replace the old ckchs by the new one */
 				ckch_store_free(old_ckchs);
 				ebst_insert(&ckchs_tree, &new_ckchs->node);
+				appctx->ctx.ssl.old_ckchs =  appctx->ctx.ssl.new_ckchs = NULL;
+				appctx->st2 = SETCERT_ST_SUCCESS;
+				/* fallthrough */
+			case SETCERT_ST_SUCCESS:
+				if (ci_putstr(si_ic(si), "\nSuccess!\n") == -1)
+					goto yield;
 				appctx->st2 = SETCERT_ST_FIN;
 				/* fallthrough */
 			case SETCERT_ST_FIN:
 				/* we achieved the transaction, we can set everything to NULL */
-				ha_free(&ckchs_transaction.path);
 				ckchs_transaction.new_ckchs = NULL;
 				ckchs_transaction.old_ckchs = NULL;
+				ckchs_transaction.path = NULL;
 				goto end;
+
+			case SETCERT_ST_ERROR:
+			  error:
+				chunk_printf(&trash, "\n%sFailed!\n", appctx->ctx.ssl.err);
+				if (ci_putchk(si_ic(si), &trash) == -1)
+					goto yield;
+				appctx->st2 = SETCERT_ST_FIN;
+				break;
 		}
 	}
 end:
-
-	chunk_appendf(trash, "\n");
-	if (errcode & ERR_WARN)
-		chunk_appendf(trash, "%s", err);
-	chunk_appendf(trash, "Success!\n");
-	if (ci_putchk(si_ic(si), trash) == -1)
-		si_rx_room_blk(si);
-	free_trash_chunk(trash);
 	/* success: call the release function and don't come back */
 	return 1;
+
 yield:
-	/* store the state */
-	if (ci_putchk(si_ic(si), trash) == -1)
-		si_rx_room_blk(si);
-	free_trash_chunk(trash);
-	si_rx_endp_more(si); /* let's come back later */
+	si_rx_room_blk(si);
 	return 0; /* should come back */
-
-error:
-	/* spin unlock and free are done in the release  function */
-	if (trash) {
-		chunk_appendf(trash, "\n%sFailed!\n", err);
-		if (ci_putchk(si_ic(si), trash) == -1)
-			si_rx_room_blk(si);
-		free_trash_chunk(trash);
-	}
-	/* error: call the release function and don't come back */
-	return 1;
 }
 
 /*
@@ -1641,16 +1636,6 @@
 		goto end;
 	}
 
-	if (!appctx->ctx.ssl.path) {
-	/* this is a new transaction, set the path of the transaction */
-		appctx->ctx.ssl.path = strdup(appctx->ctx.ssl.old_ckchs->path);
-		if (!appctx->ctx.ssl.path) {
-			memprintf(&err, "%sCan't allocate memory\n", err ? err : "");
-			errcode |= ERR_ALERT | ERR_FATAL;
-			goto end;
-		}
-	}
-
 	old_ckchs = appctx->ctx.ssl.old_ckchs;
 
 	/* duplicate the ckch store */
@@ -1678,7 +1663,7 @@
 	/* if there wasn't a transaction, update the old ckchs */
 	if (!ckchs_transaction.old_ckchs) {
 		ckchs_transaction.old_ckchs = appctx->ctx.ssl.old_ckchs;
-		ckchs_transaction.path = appctx->ctx.ssl.path;
+		ckchs_transaction.path = appctx->ctx.ssl.old_ckchs->path;
 		err = memprintf(&err, "Transaction created for certificate %s!\n", ckchs_transaction.path);
 	} else {
 		err = memprintf(&err, "Transaction updated for certificate %s!\n", ckchs_transaction.path);
@@ -1703,8 +1688,6 @@
 
 		appctx->ctx.ssl.old_ckchs = NULL;
 
-		ha_free(&appctx->ctx.ssl.path);
-
 		HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
 		return cli_dynerr(appctx, memprintf(&err, "%sCan't update %s!\n", err ? err : "", args[3]));
 	} else {
@@ -1745,7 +1728,7 @@
 	ckch_store_free(ckchs_transaction.new_ckchs);
 	ckchs_transaction.new_ckchs = NULL;
 	ckchs_transaction.old_ckchs = NULL;
-	ha_free(&ckchs_transaction.path);
+	ckchs_transaction.path = NULL;
 
 	HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
 
@@ -1821,6 +1804,11 @@
 
 	filename = args[3];
 
+	if (ckchs_transaction.path && strcmp(ckchs_transaction.path, filename) == 0) {
+		memprintf(&err, "ongoing transaction for the certificate '%s'", filename);
+		goto error;
+	}
+
 	store = ckchs_lookup(filename);
 	if (store == NULL) {
 		memprintf(&err, "certificate '%s' doesn't exist!\n", filename);
diff --git a/src/ssl_crtlist.c b/src/ssl_crtlist.c
index 6b400e2..894a690 100644
--- a/src/ssl_crtlist.c
+++ b/src/ssl_crtlist.c
@@ -117,6 +117,15 @@
 		if (!dst->ecdhe)
 			goto error;
 	}
+
+	dst->ssl_methods_cfg.flags = src->ssl_methods_cfg.flags;
+	dst->ssl_methods_cfg.min = src->ssl_methods_cfg.min;
+	dst->ssl_methods_cfg.max = src->ssl_methods_cfg.max;
+
+	dst->ssl_methods.flags = src->ssl_methods.flags;
+	dst->ssl_methods.min = src->ssl_methods.min;
+	dst->ssl_methods.max = src->ssl_methods.max;
+
 	return dst;
 
 error:
@@ -379,6 +388,11 @@
 	*crt_path = args[0];
 
 	if (ssl_b) {
+		if (ssl_b > 1) {
+			memprintf(err, "parsing [%s:%d]: malformated line, filters can't be between filename and options!", file, linenum);
+			cfgerr |= ERR_WARN;
+		}
+
 		ssl_conf = calloc(1, sizeof *ssl_conf);
 		if (!ssl_conf) {
 			memprintf(err, "not enough memory!");
@@ -1012,8 +1026,9 @@
 {
 	struct crtlist_entry *entry = appctx->ctx.cli.p1;
 
-	if (appctx->st2 != SETCERT_ST_FIN) {
+	if (entry) {
 		struct ckch_inst *inst, *inst_s;
+
 		/* upon error free the ckch_inst and everything inside */
 		ebpt_delete(&entry->node);
 		LIST_DELETE(&entry->by_crtlist);
@@ -1027,8 +1042,8 @@
 		free(entry->ssl_conf);
 		free(entry);
 	}
-
 	HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+	ha_free(&appctx->ctx.cli.err);
 }
 
 
@@ -1045,30 +1060,23 @@
 	struct crtlist *crtlist = appctx->ctx.cli.p0;
 	struct crtlist_entry *entry = appctx->ctx.cli.p1;
 	struct ckch_store *store = entry->node.key;
-	struct buffer *trash = alloc_trash_chunk();
 	struct ckch_inst *new_inst;
-	char *err = NULL;
 	int i = 0;
 	int errcode = 0;
 
-	if (trash == NULL)
-		goto error;
-
 	/* for each bind_conf which use the crt-list, a new ckch_inst must be
 	 * created.
 	 */
 	if (unlikely(si_ic(si)->flags & (CF_WRITE_ERROR|CF_SHUTW)))
-		goto error;
+		goto end;
 
 	while (1) {
 		switch (appctx->st2) {
 			case SETCERT_ST_INIT:
 				/* This state just print the update message */
-				chunk_printf(trash, "Inserting certificate '%s' in crt-list '%s'", store->path, crtlist->node.key);
-				if (ci_putchk(si_ic(si), trash) == -1) {
-					si_rx_room_blk(si);
+				chunk_printf(&trash, "Inserting certificate '%s' in crt-list '%s'", store->path, crtlist->node.key);
+				if (ci_putchk(si_ic(si), &trash) == -1)
 					goto yield;
-				}
 				appctx->st2 = SETCERT_ST_GEN;
 				/* fallthrough */
 			case SETCERT_ST_GEN:
@@ -1079,28 +1087,39 @@
 					struct bind_conf *bind_conf = bind_conf_node->bind_conf;
 					struct sni_ctx *sni;
 
+					appctx->ctx.cli.p2 = bind_conf_node;
+
 					/* yield every 10 generations */
 					if (i > 10) {
-						appctx->ctx.cli.p2 = bind_conf_node;
+						si_rx_endp_more(si); /* let's come back later */
 						goto yield;
 					}
 
+					/* display one dot for each new instance */
+					if (ci_putstr(si_ic(si), ".") == -1)
+						goto yield;
+
 					/* we don't support multi-cert bundles, only simple ones */
-					errcode |= ckch_inst_new_load_store(store->path, store, bind_conf, entry->ssl_conf, entry->filters, entry->fcount, &new_inst, &err);
-					if (errcode & ERR_CODE)
+					appctx->ctx.cli.err = NULL;
+					errcode |= ckch_inst_new_load_store(store->path, store, bind_conf, entry->ssl_conf, entry->filters, entry->fcount, &new_inst, &appctx->ctx.cli.err);
+					if (errcode & ERR_CODE) {
+						appctx->st2 = SETCERT_ST_ERROR;
 						goto error;
+					}
 
 					/* we need to initialize the SSL_CTX generated */
 					/* this iterate on the newly generated SNIs in the new instance to prepare their SSL_CTX */
 					list_for_each_entry(sni, &new_inst->sni_ctx, by_ckch_inst) {
 						if (!sni->order) { /* we initialized only the first SSL_CTX because it's the same in the other sni_ctx's */
-							errcode |= ssl_sock_prepare_ctx(bind_conf, new_inst->ssl_conf, sni->ctx, &err);
-							if (errcode & ERR_CODE)
+							appctx->ctx.cli.err = NULL;
+							errcode |= ssl_sock_prepare_ctx(bind_conf, new_inst->ssl_conf, sni->ctx, &appctx->ctx.cli.err);
+							if (errcode & ERR_CODE) {
+								appctx->st2 = SETCERT_ST_ERROR;
 								goto error;
+							}
 						}
 					}
-					/* display one dot for each new instance */
-					chunk_appendf(trash, ".");
+
 					i++;
 					LIST_APPEND(&store->ckch_inst, &new_inst->by_ckchs);
 					LIST_APPEND(&entry->ckch_inst, &new_inst->by_crtlist_entry);
@@ -1109,46 +1128,52 @@
 				appctx->st2 = SETCERT_ST_INSERT;
 				/* fallthrough */
 			case SETCERT_ST_INSERT:
-				/* insert SNIs in bind_conf */
+				/* the insertion is called for every instance of the store, not
+				 * only the one we generated.
+				 * But the ssl_sock_load_cert_sni() skip the sni already
+				 * inserted. Not every instance has a bind_conf, it could be
+				 * the store of a server so we should be careful */
+
 				list_for_each_entry(new_inst, &store->ckch_inst, by_ckchs) {
+					if (!new_inst->bind_conf) /* this is a server instance */
+						continue;
 					HA_RWLOCK_WRLOCK(SNI_LOCK, &new_inst->bind_conf->sni_lock);
 					ssl_sock_load_cert_sni(new_inst, new_inst->bind_conf);
 					HA_RWLOCK_WRUNLOCK(SNI_LOCK, &new_inst->bind_conf->sni_lock);
 				}
 				entry->linenum = ++crtlist->linecount;
+				appctx->ctx.cli.p1 = NULL;
+				appctx->st2 = SETCERT_ST_SUCCESS;
+				/* fallthrough */
+			case SETCERT_ST_SUCCESS:
+				chunk_reset(&trash);
+				chunk_appendf(&trash, "\n");
+				if (appctx->ctx.cli.err)
+					chunk_appendf(&trash, "%s", appctx->ctx.cli.err);
+				chunk_appendf(&trash, "Success!\n");
+				if (ci_putchk(si_ic(si), &trash) == -1)
+					goto yield;
 				appctx->st2 = SETCERT_ST_FIN;
 				goto end;
+
+			case SETCERT_ST_ERROR:
+			  error:
+				chunk_printf(&trash, "\n%sFailed!\n", appctx->ctx.cli.err);
+				if (ci_putchk(si_ic(si), &trash) == -1)
+					goto yield;
+				goto end;
+
+			default:
+				goto end;
 		}
 	}
 
 end:
-	chunk_appendf(trash, "\n");
-	if (errcode & ERR_WARN)
-		chunk_appendf(trash, "%s", err);
-	chunk_appendf(trash, "Success!\n");
-	if (ci_putchk(si_ic(si), trash) == -1)
-		si_rx_room_blk(si);
-	free_trash_chunk(trash);
 	/* success: call the release function and don't come back */
 	return 1;
 yield:
-	/* store the state */
-	if (ci_putchk(si_ic(si), trash) == -1)
-		si_rx_room_blk(si);
-	free_trash_chunk(trash);
-	si_rx_endp_more(si); /* let's come back later */
+	si_rx_room_blk(si);
 	return 0; /* should come back */
-
-error:
-	/* spin unlock and free are done in the release function */
-	if (trash) {
-		chunk_appendf(trash, "\n%sFailed!\n", err);
-		if (ci_putchk(si_ic(si), trash) == -1)
-			si_rx_room_blk(si);
-		free_trash_chunk(trash);
-	}
-	/* error: call the release function and don't come back */
-	return 1;
 }
 
 
diff --git a/src/ssl_sample.c b/src/ssl_sample.c
index 5509e1f..f1ee39d 100644
--- a/src/ssl_sample.c
+++ b/src/ssl_sample.c
@@ -1198,6 +1198,9 @@
 	char *src = NULL;
 	const char *sfx;
 
+	if (global_ssl.keylog <= 0)
+		return 0;
+
 	conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
 	       smp->strm ? cs_conn(objt_cs(smp->strm->si[1].end)) : NULL;
 
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 7f74977..e69812f 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -751,8 +751,14 @@
 	}
 
 	SSL_get_all_async_fds(ssl, all_fd, &num_all_fds);
-	for (i=0 ; i < num_all_fds ; i++)
-		fd_stop_both(all_fd[i]);
+	for (i=0 ; i < num_all_fds ; i++) {
+		/* We want to remove the fd from the fdtab
+		 * but we flag it to disown because the
+		 * close is performed by the engine itself
+		 */
+		fdtab[all_fd[i]].state |= FD_DISOWN;
+		fd_delete(all_fd[i]);
+	}
 
 	/* Now we can safely call SSL_free, no more pending job in engines */
 	SSL_free(ssl);
@@ -782,8 +788,14 @@
 	SSL_get_changed_async_fds(ssl, add_fd, &num_add_fds, del_fd, &num_del_fds);
 
 	/* We remove unused fds from the fdtab */
-	for (i=0 ; i < num_del_fds ; i++)
-		fd_stop_both(del_fd[i]);
+	for (i=0 ; i < num_del_fds ; i++) {
+		/* We want to remove the fd from the fdtab
+		 * but we flag it to disown because the
+		 * close is performed by the engine itself
+		 */
+		fdtab[del_fd[i]].state |= FD_DISOWN;
+		fd_delete(del_fd[i]);
+	}
 
 	/* We add new fds to the fdtab */
 	for (i=0 ; i < num_add_fds ; i++) {
@@ -1321,7 +1333,7 @@
 	if (!ocsp ||
 	    !ocsp->response.area ||
 	    !ocsp->response.data ||
-	    (ocsp->expire < now.tv_sec))
+	    (ocsp->expire < date.tv_sec))
 		return SSL_TLSEXT_ERR_NOACK;
 
 	ssl_buf = OPENSSL_malloc(ocsp->response.data);
@@ -1493,7 +1505,7 @@
 		OCSP_CERTID_free(cid);
 
 	if (ocsp)
-		free(ocsp);
+		ssl_sock_free_ocsp(ocsp);
 
 	if (warn)
 		free(warn);
@@ -1614,7 +1626,8 @@
 			ctx->xprt_st |= SSL_SOCK_CAEDEPTH_TO_ST(depth);
 		}
 
-		if (err < 64 && __objt_listener(conn->target)->bind_conf->ca_ignerr & (1ULL << err)) {
+		if (err <= SSL_MAX_VFY_ERROR_CODE &&
+		    cert_ignerr_bitfield_get(__objt_listener(conn->target)->bind_conf->ca_ignerr_bitfield, err)) {
 			ssl_sock_dump_errors(conn);
 			ERR_clear_error();
 			return 1;
@@ -1628,7 +1641,8 @@
 		ctx->xprt_st |= SSL_SOCK_CRTERROR_TO_ST(err);
 
 	/* check if certificate error needs to be ignored */
-	if (err < 64 && __objt_listener(conn->target)->bind_conf->crt_ignerr & (1ULL << err)) {
+	if (err <= SSL_MAX_VFY_ERROR_CODE &&
+	    cert_ignerr_bitfield_get(__objt_listener(conn->target)->bind_conf->crt_ignerr_bitfield, err)) {
 		ssl_sock_dump_errors(conn);
 		ERR_clear_error();
 		return 1;
@@ -3112,6 +3126,8 @@
  *     ERR_FATAL in any fatal error case
  *     ERR_ALERT if the reason of the error is available in err
  *     ERR_WARN if a warning is available into err
+ * The caller is responsible of freeing the newly built or newly refcounted
+ * find_chain element.
  * The value 0 means there is no error nor warning and
  * the operation succeed.
  */
@@ -3133,13 +3149,13 @@
 	}
 
 	if (ckch->chain) {
-		*find_chain = ckch->chain;
+		*find_chain = X509_chain_up_ref(ckch->chain);
 	} else {
 		/* Find Certificate Chain in global */
 		struct issuer_chain *issuer;
 		issuer = ssl_get0_issuer_chain(ckch->cert);
 		if (issuer)
-			*find_chain = issuer->chain;
+			*find_chain = X509_chain_up_ref(issuer->chain);
 	}
 
 	if (!*find_chain) {
@@ -3159,14 +3175,11 @@
 #else
 	{ /* legacy compat (< openssl 1.0.2) */
 		X509 *ca;
-		STACK_OF(X509) *chain;
-		chain = X509_chain_up_ref(*find_chain);
-		while ((ca = sk_X509_shift(chain)))
+		while ((ca = sk_X509_shift(*find_chain)))
 			if (!SSL_CTX_add_extra_chain_cert(ctx, ca)) {
 				memprintf(err, "%sunable to load chain certificate into SSL Context '%s'.\n",
 					  err && *err ? *err : "", path);
 				X509_free(ca);
-				sk_X509_pop_free(chain, X509_free);
 				errcode |= ERR_ALERT | ERR_FATAL;
 				goto end;
 			}
@@ -3254,6 +3267,7 @@
 #endif
 
  end:
+	sk_X509_pop_free(find_chain, X509_free);
 	return errcode;
 }
 
@@ -3291,6 +3305,7 @@
 	}
 
 end:
+	sk_X509_pop_free(find_chain, X509_free);
 	return errcode;
 }
 
@@ -3976,6 +3991,7 @@
 	if (oldsh_ssl_sess != sh_ssl_sess) {
 		 /* NOTE: Row couldn't be in use because we lock read & write function */
 		/* release the reserved row */
+		first->len = 0; /* the len must be liberated in order not to call the release callback on it */
 		shctx_row_dec_hot(ssl_shctx, first);
 		/* replace the previous session already in the tree */
 		sh_ssl_sess = oldsh_ssl_sess;
@@ -4455,7 +4471,9 @@
 	SSL_CTX_set_msg_callback(ctx, ssl_sock_msgcbk);
 #endif
 #ifdef HAVE_SSL_KEYLOG
-	SSL_CTX_set_keylog_callback(ctx, SSL_CTX_keylog);
+	/* only activate the keylog callback if it was required to prevent performance loss */
+	if (global_ssl.keylog > 0)
+		SSL_CTX_set_keylog_callback(ctx, SSL_CTX_keylog);
 #endif
 
 #if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
@@ -5930,7 +5948,7 @@
 			return NULL;
 		}
 		conn = ctx->conn;
-		conn_in_list = conn->flags & CO_FL_LIST_MASK;
+		conn_in_list = conn_get_idle_flag(conn);
 		if (conn_in_list)
 			conn_delete_from_tree(&conn->hash_node->node);
 		HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
@@ -6326,8 +6344,14 @@
 			 * because the fd is  owned by the engine.
 			 * the engine is responsible to close
 			 */
-			for (i=0 ; i < num_all_fds ; i++)
-				fd_stop_both(all_fd[i]);
+			for (i=0 ; i < num_all_fds ; i++) {
+				/* We want to remove the fd from the fdtab
+				 * but we flag it to disown because the
+				 * close is performed by the engine itself
+				 */
+				fdtab[all_fd[i]].state |= FD_DISOWN;
+				fd_delete(all_fd[i]);
+			}
 		}
 #endif
 		SSL_free(ctx->ssl);
@@ -6669,8 +6693,8 @@
 				break;
 			}
 		}
-		AUTHORITY_KEYID_free(akid);
 	}
+	AUTHORITY_KEYID_free(akid);
 	return issuer;
 }
 
diff --git a/src/stats.c b/src/stats.c
index ceb6bd8..d1f3daa 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -258,6 +258,8 @@
 	[ST_F_NEED_CONN_EST]                 = { .name = "need_conn_est",               .desc = "Estimated needed number of connections"},
 	[ST_F_UWEIGHT]                       = { .name = "uweight",                     .desc = "Server's user weight, or sum of active servers' user weights for a backend" },
 	[ST_F_AGG_SRV_CHECK_STATUS]          = { .name = "agg_server_check_status",     .desc = "Backend's aggregated gauge of servers' state check status" },
+	[ST_F_AGG_SRV_STATUS ]               = { .name = "agg_server_status",           .desc = "Backend's aggregated gauge of servers' status" },
+	[ST_F_AGG_CHECK_STATUS]              = { .name = "agg_check_status",            .desc = "Backend's aggregated gauge of servers' state check status" },
 };
 
 /* one line of info */
@@ -688,13 +690,13 @@
 			goto err;
 	}
 
-	if (!chunk_strcat(out, "]"))
+	if (!chunk_strcat(out, "]\n"))
 		goto err;
 	return 1;
 
 err:
 	chunk_reset(out);
-	chunk_appendf(out, "{\"errorStr\":\"output buffer too short\"}");
+	chunk_appendf(out, "{\"errorStr\":\"output buffer too short\"}\n");
 	return 0;
 }
 
@@ -1693,9 +1695,18 @@
 			case ST_F_DSES:
 				metric = mkf_u64(FN_COUNTER, px->fe_counters.denied_sess);
 				break;
-			case ST_F_STATUS:
-				metric = mkf_str(FO_STATUS, px->disabled ? "STOP" : "OPEN");
+			case ST_F_STATUS: {
+				const char *state;
+
+				if (px->disabled)
+					state = "STOP";
+				else if (!px->li_ready)
+					state = "PAUSED";
+				else
+					state = "OPEN";
+				metric = mkf_str(FO_STATUS, state);
 				break;
+			}
 			case ST_F_PID:
 				metric = mkf_u32(FO_KEY, relative_pid);
 				break;
@@ -2657,9 +2668,13 @@
 					chunk_appendf(out, " (%d/%d)", nbup, nbsrv);
 				metric = mkf_str(FO_STATUS, fld);
 				break;
-			case ST_F_AGG_SRV_CHECK_STATUS:
+			case ST_F_AGG_SRV_CHECK_STATUS:   // DEPRECATED
+			case ST_F_AGG_SRV_STATUS:
 				metric = mkf_u32(FN_GAUGE, 0);
 				break;
+			case ST_F_AGG_CHECK_STATUS:
+				metric = mkf_u32(FN_GAUGE, 0);
+				break;
 			case ST_F_WEIGHT:
 				metric = mkf_u32(FN_AVG, (px->lbprm.tot_weight * px->lbprm.wmult + px->lbprm.wdiv - 1) / px->lbprm.wdiv);
 				break;
@@ -3607,7 +3622,7 @@
  */
 static void stats_dump_json_end()
 {
-	chunk_strcat(&trash, "]");
+	chunk_strcat(&trash, "]\n");
 }
 
 /* Uses <appctx.ctx.stats.obj1> as a pointer to the current proxy and <obj2> as
@@ -4401,9 +4416,9 @@
 	info[INF_MEMMAX_MB]                      = mkf_u32(FO_CONFIG|FN_LIMIT, global.rlimit_memmax);
 	info[INF_MEMMAX_BYTES]                   = mkf_u32(FO_CONFIG|FN_LIMIT, global.rlimit_memmax * 1048576L);
 	info[INF_POOL_ALLOC_MB]                  = mkf_u32(0, (unsigned)(pool_total_allocated() / 1048576L));
-	info[INF_POOL_ALLOC_BYTES]               = mkf_u32(0, pool_total_allocated());
+	info[INF_POOL_ALLOC_BYTES]               = mkf_u64(0, pool_total_allocated());
 	info[INF_POOL_USED_MB]                   = mkf_u32(0, (unsigned)(pool_total_used() / 1048576L));
-	info[INF_POOL_USED_BYTES]                = mkf_u32(0, pool_total_used());
+	info[INF_POOL_USED_BYTES]                = mkf_u64(0, pool_total_used());
 	info[INF_POOL_FAILED]                    = mkf_u32(FN_COUNTER, pool_total_failures());
 	info[INF_ULIMIT_N]                       = mkf_u32(FO_CONFIG|FN_LIMIT, global.rlimit_nofile);
 	info[INF_MAXSOCK]                        = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxsock);
@@ -4709,6 +4724,7 @@
 		chunk_appendf(out,
 			      "{\"errorStr\":\"output buffer too short\"}");
 	}
+	chunk_appendf(out, "\n");
 }
 
 /* This function dumps the schema onto the stream interface's read buffer.
diff --git a/src/stick_table.c b/src/stick_table.c
index 665a815..851ae10 100644
--- a/src/stick_table.c
+++ b/src/stick_table.c
@@ -222,7 +222,18 @@
 			ts->exp.key = ts->expire;
 			eb32_insert(&t->exps, &ts->exp);
 
-			if (!eb || eb->key > ts->exp.key)
+			/* the update might have jumped beyond the next element,
+			 * possibly causing a wrapping. We need to check whether
+			 * the next element should be used instead. If the next
+			 * element doesn't exist it means we're on the right
+			 * side and have to check the first one then. If it
+			 * exists and is closer, we must use it, otherwise we
+			 * use the current one.
+			 */
+			if (!eb)
+				eb = eb32_first(&t->exps);
+
+			if (!eb || tick_is_lt(ts->exp.key, eb->key))
 				eb = &ts->exp;
 
 			continue;
@@ -554,11 +565,12 @@
 	return ts;
 }
 /*
- * Trash expired sticky sessions from table <t>. The next expiration date is
- * returned.
+ * Task processing function to trash expired sticky sessions. A pointer to the
+ * task itself is returned since it never dies.
  */
-static int stktable_trash_expired(struct stktable *t)
+struct task *process_table_expire(struct task *task, void *context, unsigned int state)
 {
+	struct stktable *t = context;
 	struct stksess *ts;
 	struct eb32_node *eb;
 	int looped = 0;
@@ -604,7 +616,18 @@
 			ts->exp.key = ts->expire;
 			eb32_insert(&t->exps, &ts->exp);
 
+			/* the update might have jumped beyond the next element,
+			 * possibly causing a wrapping. We need to check whether
+			 * the next element should be used instead. If the next
+			 * element doesn't exist it means we're on the right
+			 * side and have to check the first one then. If it
+			 * exists and is closer, we must use it, otherwise we
+			 * use the current one.
+			 */
+			if (!eb)
+				eb = eb32_first(&t->exps);
+
-			if (!eb || eb->key > ts->exp.key)
+			if (!eb || tick_is_lt(ts->exp.key, eb->key))
 				eb = &ts->exp;
 			continue;
 		}
@@ -618,19 +641,8 @@
 	/* We have found no task to expire in any tree */
 	t->exp_next = TICK_ETERNITY;
 out_unlock:
+	task->expire = t->exp_next;
 	HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
-	return t->exp_next;
-}
-
-/*
- * Task processing function to trash expired sticky sessions. A pointer to the
- * task itself is returned since it never dies.
- */
-struct task *process_table_expire(struct task *task, void *context, unsigned int state)
-{
-	struct stktable *t = context;
-
-	task->expire = stktable_trash_expired(t);
 	return task;
 }
 
@@ -679,7 +691,7 @@
  * Returns 0 on successful parsing, else 1.
  * <myidx> is set at next configuration <args> index.
  */
-int stktable_parse_type(char **args, int *myidx, unsigned long *type, size_t *key_size)
+int stktable_parse_type(char **args, int *myidx, unsigned long *type, size_t *key_size, const char *file, int linenum)
 {
 	for (*type = 0; *type < SMP_TYPES; (*type)++) {
 		if (!stktable_types[*type].kw)
@@ -692,10 +704,14 @@
 
 		if (stktable_types[*type].flags & STK_F_CUSTOM_KEYSIZE) {
 			if (strcmp("len", args[*myidx]) == 0) {
+				char *stop;
+
 				(*myidx)++;
-				*key_size = atol(args[*myidx]);
-				if (!*key_size)
-					break;
+				*key_size = strtol(args[*myidx], &stop, 10);
+				if (*stop != '\0' || !*key_size) {
+					ha_alert("parsing [%s:%d] : 'len' expects a positive integer argument.\n", file, linenum);
+					return 1;
+				}
 				if (*type == SMP_T_STR) {
 					/* null terminated string needs +1 for '\0'. */
 					(*key_size)++;
@@ -705,6 +721,7 @@
 		}
 		return 0;
 	}
+	ha_alert("parsing [%s:%d] : %s: unknown type '%s'.\n", file, linenum, args[0], args[*myidx]);
 	return 1;
 }
 
@@ -850,9 +867,7 @@
 		}
 		else if (strcmp(args[idx], "type") == 0) {
 			idx++;
-			if (stktable_parse_type(args, &idx, &t->type, &t->key_size) != 0) {
-				ha_alert("parsing [%s:%d] : %s: unknown type '%s'.\n",
-					 file, linenum, args[0], args[idx]);
+			if (stktable_parse_type(args, &idx, &t->type, &t->key_size, file, linenum) != 0) {
 				err_code |= ERR_ALERT | ERR_FATAL;
 				goto out;
 			}
diff --git a/src/stream.c b/src/stream.c
index c241642..f81d6fa 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -451,8 +451,17 @@
 	s->req_cap = NULL;
 	s->res_cap = NULL;
 
-	/* Initialise all the variables contexts even if not used.
+	/* Initialize all the variables contexts even if not used.
 	 * This permits to prune these contexts without errors.
+	 *
+	 * We need to make sure that those lists are not re-initialized
+	 * by stream-dependant underlying code because we could lose
+	 * track of already defined variables, leading to data inconsistency
+	 * and memory leaks...
+	 *
+	 * For reference: we had a very old bug caused by vars_txn and
+	 * vars_reqres being accidentally re-initialized in http_create_txn()
+	 * (https://github.com/haproxy/haproxy/issues/1935)
 	 */
 	vars_init(&s->vars_txn,    SCOPE_TXN);
 	vars_init(&s->vars_reqres, SCOPE_REQ);
@@ -1435,7 +1444,7 @@
 		struct dict_entry *de;
 		struct stktable *t = s->store[i].table;
 
-		if (objt_server(s->target) && __objt_server(s->target)->flags & SRV_F_NON_STICK) {
+		if (!objt_server(s->target) || (__objt_server(s->target)->flags & SRV_F_NON_STICK)) {
 			stksess_free(s->store[i].table, s->store[i].ts);
 			s->store[i].ts = NULL;
 			continue;
@@ -1448,24 +1457,25 @@
 		}
 		s->store[i].ts = NULL;
 
-		HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
-		ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
-		stktable_data_cast(ptr, server_id) = __objt_server(s->target)->puid;
-		HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
-
 		if (t->server_key_type == STKTABLE_SRV_NAME)
 			key = __objt_server(s->target)->id;
 		else if (t->server_key_type == STKTABLE_SRV_ADDR)
 			key = __objt_server(s->target)->addr_node.key;
 		else
-			continue;
+			key = NULL;
 
 		HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
-		de = dict_insert(&server_key_dict, key);
-		if (de) {
-			ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
-			stktable_data_cast(ptr, server_key) = de;
+		ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
+		stktable_data_cast(ptr, server_id) = __objt_server(s->target)->puid;
+
+		if (key) {
+			de = dict_insert(&server_key_dict, key);
+			if (de) {
+				ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
+				stktable_data_cast(ptr, server_key) = de;
+			}
 		}
+
 		HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
 
 		stktable_touch_local(t, ts, 1);
diff --git a/src/stream_interface.c b/src/stream_interface.c
index 830db34..d2f4752 100644
--- a/src/stream_interface.c
+++ b/src/stream_interface.c
@@ -154,6 +154,30 @@
 	channel_shutr_now(oc);
 }
 
+/* Conditionnaly forward the close to the wirte side. It return 1 if it can be
+ * forwarded. It is the caller responsibility to forward the close to the write
+ * side. Otherwise, 0 is returned. In this case, CF_SHUTW_NOW flag may be set on
+ * the channel if we are only waiting for the outgoing data to be flushed.
+ */
+static inline int si_cond_forward_shutw(struct stream_interface *si)
+{
+	/* The close must not be forwarded */
+	if (!(si_ic(si)->flags & CF_SHUTR) || !(si->flags & SI_FL_NOHALF))
+		return 0;
+
+	if (!channel_is_empty(si_ic(si))) {
+		/* the close to the write side cannot be forwarded now because
+		 * we should flush outgoing data first. But instruct the output
+		 * channel it should be done ASAP.
+		 */
+		channel_shutw_now(si_oc(si));
+		return 0;
+	}
+
+	/* the close can be immediately forwarded to the write side */
+	return 1;
+}
+
 /*
  * This function performs a shutdown-read on a detached stream interface in a
  * connected or init state (it does nothing for other states). It either shuts
@@ -178,7 +202,7 @@
 		si->state = SI_ST_DIS;
 		si->exp = TICK_ETERNITY;
 	}
-	else if (si->flags & SI_FL_NOHALF) {
+	else if (si_cond_forward_shutw(si)) {
 		/* we want to immediately forward this close to the write side */
 		return stream_int_shutw(si);
 	}
@@ -950,6 +974,22 @@
 		return;
 
 	si_cs_send(cs);
+
+	if (likely(oc->flags & CF_WRITE_ACTIVITY)) {
+		struct channel *ic = si_ic(si);
+
+		if (tick_isset(ic->rex) && !(si->flags & SI_FL_INDEP_STR)) {
+			/* Note: to prevent the client from expiring read timeouts
+			 * during writes, we refresh it. We only do this if the
+			 * interface is not configured for "independent streams",
+			 * because for some applications it's better not to do this,
+			 * for instance when continuously exchanging small amounts
+			 * of data which can full the socket buffers long before a
+			 * write timeout is detected.
+			 */
+			ic->rex = tick_add_ifset(now_ms, ic->rto);
+		}
+	}
 }
 
 /* Updates at once the channel flags, and timers of both stream interfaces of a
@@ -1023,7 +1063,7 @@
 		si->state = SI_ST_DIS;
 		si->exp = TICK_ETERNITY;
 	}
-	else if (si->flags & SI_FL_NOHALF) {
+	else if (si_cond_forward_shutw(si)) {
 		/* we want to immediately forward this close to the write side */
 		return stream_int_shutw_conn(si);
 	}
@@ -1572,7 +1612,7 @@
 	if (oc->flags & CF_SHUTW)
 		goto do_close;
 
-	if (si->flags & SI_FL_NOHALF) {
+	if (si_cond_forward_shutw(si)) {
 		/* we want to immediately forward this close to the write side */
 		/* force flag on ssl to keep stream in cache */
 		cs_shutw(cs, CS_SHW_SILENT);
@@ -1660,7 +1700,7 @@
 		si->state = SI_ST_DIS;
 		si->exp = TICK_ETERNITY;
 	}
-	else if (si->flags & SI_FL_NOHALF) {
+	else if (si_cond_forward_shutw(si)) {
 		/* we want to immediately forward this close to the write side */
 		return stream_int_shutw_applet(si);
 	}
diff --git a/src/task.c b/src/task.c
index 88dd2d0..3bd4a24 100644
--- a/src/task.c
+++ b/src/task.c
@@ -731,16 +731,31 @@
 	 */
 	max_total = max[TL_URGENT] + max[TL_NORMAL] + max[TL_BULK] + max[TL_HEAVY];
 	if (!max_total)
-		return;
+		goto leave;
 
 	for (queue = 0; queue < TL_CLASSES; queue++)
 		max[queue]  = ((unsigned)max_processed * max[queue] + max_total - 1) / max_total;
 
-	/* The heavy queue must never process more than one task at once
-	 * anyway.
+	/* The heavy queue must never process more than very few tasks at once
+	 * anyway. We set the limit to 1 if running on low_latency scheduling,
+	 * given that we know that other values can have an impact on latency
+	 * (~500us end-to-end connection achieved at 130kcps in SSL), 1 + one
+	 * per 1024 tasks if there is at least one non-heavy task while still
+	 * respecting the ratios above, or 1 + one per 128 tasks if only heavy
+	 * tasks are present. This allows to drain excess SSL handshakes more
+	 * efficiently if the queue becomes congested.
 	 */
-	if (max[TL_HEAVY] > 1)
-		max[TL_HEAVY] = 1;
+	if (max[TL_HEAVY] > 1) {
+		if (global.tune.options & GTUNE_SCHED_LOW_LATENCY)
+			budget = 1;
+		else if (tt->tl_class_mask & ~(1 << TL_HEAVY))
+			budget = 1 + tt->rq_total / 1024;
+		else
+			budget = 1 + tt->rq_total / 128;
+
+		if (max[TL_HEAVY] > budget)
+			max[TL_HEAVY] = budget;
+	}
 
 	lrq = grq = NULL;
 
@@ -841,6 +856,7 @@
 	if (max_processed > 0 && thread_has_tasks())
 		goto not_done_yet;
 
+ leave:
 	if (tt->tl_class_mask)
 		activity[tid].long_rq++;
 }
diff --git a/src/tcp_rules.c b/src/tcp_rules.c
index f525895..cf8324c 100644
--- a/src/tcp_rules.c
+++ b/src/tcp_rules.c
@@ -112,10 +112,14 @@
 	 * - if one rule returns KO, then return KO
 	 */
 
-	if ((req->flags & (CF_EOI|CF_SHUTR|CF_READ_ERROR)) || channel_full(req, global.tune.maxrewrite) ||
+	if ((req->flags & (CF_SHUTR|CF_READ_ERROR)) || channel_full(req, global.tune.maxrewrite) ||
 	    si_rx_blocked_room(chn_prod(req)) ||
-	    !s->be->tcp_req.inspect_delay || tick_is_expired(s->rules_exp, now_ms))
+	    !s->be->tcp_req.inspect_delay || tick_is_expired(s->rules_exp, now_ms)) {
 		partial = SMP_OPT_FINAL;
+		/* Action may yield while the inspect_delay is not expired and there is no read error */
+		if ((req->flags & CF_READ_ERROR) || !s->be->tcp_req.inspect_delay || tick_is_expired(s->rules_exp, now_ms))
+			act_opts |= ACT_OPT_FINAL;
+	}
 	else
 		partial = 0;
 
@@ -148,12 +152,8 @@
 		if (ret) {
 			act_opts |= ACT_OPT_FIRST;
 resume_execution:
-
 			/* Always call the action function if defined */
 			if (rule->action_ptr) {
-				if (partial & SMP_OPT_FINAL)
-					act_opts |= ACT_OPT_FINAL;
-
 				switch (rule->action_ptr(rule, s->be, s->sess, s, act_opts)) {
 					case ACT_RET_CONT:
 						break;
@@ -162,7 +162,7 @@
 						goto end;
 					case ACT_RET_YIELD:
 						s->current_rule = rule;
-						if (partial & SMP_OPT_FINAL) {
+						if (act_opts & ACT_OPT_FINAL) {
 							send_log(s->be, LOG_WARNING,
 								 "Internal error: yield not allowed if the inspect-delay expired "
 								 "for the tcp-request content actions.");
@@ -268,10 +268,14 @@
 	 * - if one rule returns OK, then return OK
 	 * - if one rule returns KO, then return KO
 	 */
-	if ((rep->flags & (CF_EOI|CF_SHUTR|CF_READ_ERROR)) || channel_full(rep, global.tune.maxrewrite) ||
+	if ((rep->flags & (CF_SHUTR|CF_READ_ERROR)) || channel_full(rep, global.tune.maxrewrite) ||
 	    si_rx_blocked_room(chn_prod(rep)) ||
-	    !s->be->tcp_rep.inspect_delay || tick_is_expired(s->rules_exp, now_ms))
+	    !s->be->tcp_rep.inspect_delay || tick_is_expired(s->rules_exp, now_ms)) {
 		partial = SMP_OPT_FINAL;
+		/* Action may yield while the inspect_delay is not expired and there is no read error */
+		if ((rep->flags & CF_READ_ERROR) || !s->be->tcp_rep.inspect_delay || tick_is_expired(s->rules_exp, now_ms))
+			act_opts |= ACT_OPT_FINAL;
+	}
 	else
 		partial = 0;
 
@@ -307,9 +311,6 @@
 resume_execution:
 			/* Always call the action function if defined */
 			if (rule->action_ptr) {
-				if (partial & SMP_OPT_FINAL)
-					act_opts |= ACT_OPT_FINAL;
-
 				switch (rule->action_ptr(rule, s->be, s->sess, s, act_opts)) {
 					case ACT_RET_CONT:
 						break;
@@ -318,7 +319,7 @@
 						goto end;
 					case ACT_RET_YIELD:
 						s->current_rule = rule;
-						if (partial & SMP_OPT_FINAL) {
+						if (act_opts & ACT_OPT_FINAL) {
 							send_log(s->be, LOG_WARNING,
 								 "Internal error: yield not allowed if the inspect-delay expired "
 								 "for the tcp-response content actions.");
diff --git a/src/tcp_sample.c b/src/tcp_sample.c
index 8242fa4..d574534 100644
--- a/src/tcp_sample.c
+++ b/src/tcp_sample.c
@@ -413,9 +413,9 @@
  * instance v4/v6 must be declared v4.
  */
 static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
-	{ "bc_dst",      smp_fetch_dst,   0, NULL, SMP_T_SINT, SMP_USE_L4SRV },
+	{ "bc_dst",      smp_fetch_dst,   0, NULL, SMP_T_IPV4, SMP_USE_L4SRV },
 	{ "bc_dst_port", smp_fetch_dport, 0, NULL, SMP_T_SINT, SMP_USE_L4SRV },
-	{ "bc_src",      smp_fetch_src,   0, NULL, SMP_T_SINT, SMP_USE_L4SRV },
+	{ "bc_src",      smp_fetch_src,   0, NULL, SMP_T_IPV4, SMP_USE_L4SRV },
 	{ "bc_src_port", smp_fetch_sport, 0, NULL, SMP_T_SINT, SMP_USE_L4SRV },
 
 	{ "dst",      smp_fetch_dst,   0, NULL, SMP_T_IPV4, SMP_USE_L4CLI },
diff --git a/src/tcpcheck.c b/src/tcpcheck.c
index 59d7688..8eefc4e 100644
--- a/src/tcpcheck.c
+++ b/src/tcpcheck.c
@@ -6,6 +6,7 @@
  * Copyright 2013 Baptiste Assmann <bedis9@gmail.com>
  * Copyright 2020 Gaetan Rivet <grive@u256.net>
  * Copyright 2020 Christopher Faulet <cfaulet@haproxy.com>
+ * Crown Copyright 2022 Defence Science and Technology Laboratory <dstlipgroup@dstl.gov.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -1190,10 +1191,10 @@
 	if (proto && proto->connect) {
 		int flags = 0;
 
-		if (check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK)
-			flags |= CONNECT_HAS_DATA;
-		if (!next || next->action != TCPCHK_ACT_EXPECT)
+		if (!next)
 			flags |= CONNECT_DELACK_ALWAYS;
+		if (connect->options & TCPCHK_OPT_HAS_DATA)
+			flags |= (CONNECT_HAS_DATA|CONNECT_DELACK_ALWAYS);
 		status = proto->connect(conn, flags);
 	}
 
@@ -1414,7 +1415,7 @@
 		    (istlen(vsn) == 8 && (*(vsn.ptr+5) > '1' || (*(vsn.ptr+5) == '1' && *(vsn.ptr+7) >= '1'))))
 			slflags |= HTX_SL_F_VER_11;
 		slflags |= (HTX_SL_F_XFER_LEN|HTX_SL_F_CLEN);
-		if (!isttest(send->http.body))
+		if (!(send->http.flags & TCPCHK_SND_HTTP_FL_BODY_FMT) && !isttest(send->http.body))
 			slflags |= HTX_SL_F_BODYLESS;
 
 		htx = htx_from_buf(&check->bo);
@@ -1461,12 +1462,18 @@
 		}
 		else
 			body = send->http.body;
-		clen = ist((!istlen(body) ? "0" : ultoa(istlen(body))));
 
-		if ((!connection_hdr && !htx_add_header(htx, ist("Connection"), ist("close"))) ||
-		    !htx_add_header(htx, ist("Content-length"), clen))
+		if (!connection_hdr && !htx_add_header(htx, ist("Connection"), ist("close")))
 			goto error_htx;
 
+		if ((send->http.meth.meth != HTTP_METH_OPTIONS &&
+		     send->http.meth.meth != HTTP_METH_GET &&
+		     send->http.meth.meth != HTTP_METH_HEAD &&
+		     send->http.meth.meth != HTTP_METH_DELETE) || istlen(body)) {
+			clen = ist((!istlen(body) ? "0" : ultoa(istlen(body))));
+			if (!htx_add_header(htx, ist("Content-length"), clen))
+				goto error_htx;
+		}
 
 		if (!htx_add_endof(htx, HTX_BLK_EOH) ||
 		    (istlen(body) && !htx_add_data_atonce(htx, body)))
@@ -3533,10 +3540,12 @@
 		new->send.http.vsn = IST_NULL;
 	}
 
-	free_tcpcheck_http_hdrs(&old->send.http.hdrs);
-	list_for_each_entry_safe(hdr, bhdr, &new->send.http.hdrs, list) {
-		LIST_DELETE(&hdr->list);
-		LIST_APPEND(&old->send.http.hdrs, &hdr->list);
+	if (!LIST_ISEMPTY(&new->send.http.hdrs)) {
+		free_tcpcheck_http_hdrs(&old->send.http.hdrs);
+		list_for_each_entry_safe(hdr, bhdr, &new->send.http.hdrs, list) {
+			LIST_DELETE(&hdr->list);
+			LIST_APPEND(&old->send.http.hdrs, &hdr->list);
+		}
 	}
 
 	if (!(new->send.http.flags & TCPCHK_SND_HTTP_FL_BODY_FMT) && isttest(new->send.http.body)) {
@@ -3734,6 +3743,8 @@
 	 * comment is assigned to the following rule(s).
 	 */
 	list_for_each_entry_safe(chk, back, px->tcpcheck_rules.list, list) {
+		struct tcpcheck_rule *next;
+
 		if (chk->action != prev_action && prev_action != TCPCHK_ACT_COMMENT)
 			ha_free(&comment);
 
@@ -3748,6 +3759,9 @@
 		case TCPCHK_ACT_CONNECT:
 			if (!chk->comment && comment)
 				chk->comment = strdup(comment);
+			next = get_next_tcpcheck_rule(&px->tcpcheck_rules, chk);
+			if (next && next->action == TCPCHK_ACT_SEND)
+				chk->connect.options |= TCPCHK_OPT_HAS_DATA;
 			/* fall through */
 		case TCPCHK_ACT_ACTION_KW:
 			ha_free(&comment);
@@ -4336,8 +4350,7 @@
 	chk->index = 3;
 	LIST_APPEND(&rs->rules, &chk->list);
 
-	chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rstring", "^2[0-9]{2}[- \r]",
-				               "min-recv", "4",
+	chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rstring", "^(2[0-9]{2}-[^\r]*\r\n)*2[0-9]{2}[ \r]",
 				               "error-status", "L7STS",
 				               "on-error", "%[res.payload(4,0),ltrim(' '),cut_crlf]",
 				               "on-success", "%[res.payload(4,0),ltrim(' '),cut_crlf]",
@@ -4351,6 +4364,32 @@
 	chk->index = 4;
 	LIST_APPEND(&rs->rules, &chk->list);
 
+        /* Send an SMTP QUIT to ensure clean disconnect (issue 1812), and expect a 2xx response code */
+
+        chk = parse_tcpcheck_send((char *[]){"tcp-check", "send", "QUIT\r\n", ""},
+                                  1, curpx, &rs->rules, file, line, &errmsg);
+        if (!chk) {
+                ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+                goto error;
+        }
+        chk->index = 5;
+        LIST_APPEND(&rs->rules, &chk->list);
+
+        chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rstring", "^2[0-9]{2}[- \r]",
+                                               "min-recv", "4",
+                                               "error-status", "L7STS",
+                                               "on-error", "%[res.payload(4,0),ltrim(' '),cut_crlf]",
+                                               "on-success", "%[res.payload(4,0),ltrim(' '),cut_crlf]",
+                                               "status-code", "res.payload(0,3)",
+                                               ""},
+                                    1, curpx, &rs->rules, TCPCHK_RULES_SMTP_CHK, file, line, &errmsg);
+        if (!chk) {
+                ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+                goto error;
+        }
+        chk->index = 6;
+        LIST_APPEND(&rs->rules, &chk->list);
+
   ruleset_found:
 	rules->list = &rs->rules;
 	rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
@@ -4483,7 +4522,7 @@
 	chk->index = 2;
 	LIST_APPEND(&rs->rules, &chk->list);
 
-	chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rbinary", "^52000000(08|0A|0C)000000(00|02|03|04|05|06)",
+	chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rbinary", "^52000000[A-Z0-9]{2}000000(00|02|03|04|05|06|07|09|0A)",
 				               "min-recv", "9",
 				               "error-status", "L7STS",
 				               "on-success", "PostgreSQL server is ok",
diff --git a/src/thread.c b/src/thread.c
index bc655bd..262c3bf 100644
--- a/src/thread.c
+++ b/src/thread.c
@@ -169,7 +169,9 @@
 	HA_RWLOCK_INIT(l);
 }
 
-/* returns the number of CPUs the current process is enabled to run on */
+/* returns the number of CPUs the current process is enabled to run on,
+ * regardless of any MAX_THREADS limitation.
+ */
 static int thread_cpus_enabled()
 {
 	int ret = 1;
@@ -190,7 +192,6 @@
 #endif
 #endif
 	ret = MAX(ret, 1);
-	ret = MIN(ret, MAX_THREADS);
 	return ret;
 }
 
@@ -243,6 +244,7 @@
 	preload_libgcc_s();
 
 	thread_cpus_enabled_at_boot = thread_cpus_enabled();
+	thread_cpus_enabled_at_boot = MIN(thread_cpus_enabled_at_boot, MAX_THREADS);
 
 	memprintf(&ptr, "Built with multi-threading support (MAX_THREADS=%d, default=%d).",
 		  MAX_THREADS, thread_cpus_enabled_at_boot);
diff --git a/src/time.c b/src/time.c
index d6ab185..6335dc4 100644
--- a/src/time.c
+++ b/src/time.c
@@ -18,12 +18,14 @@
 #include <haproxy/ticks.h>
 #include <haproxy/tools.h>
 
+struct timeval start_date;                   /* the process's start date */
+struct timeval ready_date;                   /* date when the process was considered ready */
+
 THREAD_LOCAL unsigned int   now_ms;          /* internal date in milliseconds (may wrap) */
 THREAD_LOCAL unsigned int   samp_time;       /* total elapsed time over current sample */
 THREAD_LOCAL unsigned int   idle_time;       /* total idle time over current sample */
 THREAD_LOCAL struct timeval now;             /* internal date is a monotonic function of real clock */
 THREAD_LOCAL struct timeval date;            /* the real current date */
-struct timeval start_date;      /* the process's start date */
 THREAD_LOCAL struct timeval before_poll;     /* system date before calling poll() */
 THREAD_LOCAL struct timeval after_poll;      /* system date after leaving poll() */
 
@@ -298,6 +300,7 @@
 	now.tv_usec = (uint)old_now;
 	samp_time = idle_time = 0;
 	ti->idle_pct = 100;
+	ti->prev_cpu_time  = now_cpu_time();
 	tv_update_date(0, 1);
 }
 
diff --git a/src/tools.c b/src/tools.c
index bbd6a76..bec0a73 100644
--- a/src/tools.c
+++ b/src/tools.c
@@ -95,6 +95,9 @@
  */
 THREAD_LOCAL unsigned int statistical_prng_state = 2463534242U;
 
+/* set to true if this is a static build */
+int build_is_static = 0;
+
 /*
  * unsigned long long ASCII representation
  *
@@ -953,12 +956,12 @@
 
 	str2 = back = env_expand(strdup(str));
 	if (str2 == NULL) {
-		memprintf(err, "out of memory in '%s'\n", __FUNCTION__);
+		memprintf(err, "out of memory in '%s'", __FUNCTION__);
 		goto out;
 	}
 
 	if (!*str2) {
-		memprintf(err, "'%s' resolves to an empty address (environment variable missing?)\n", str);
+		memprintf(err, "'%s' resolves to an empty address (environment variable missing?)", str);
 		goto out;
 	}
 
@@ -1077,14 +1080,14 @@
 
 		new_fd = strtol(str2, &endptr, 10);
 		if (!*str2 || new_fd < 0 || *endptr) {
-			memprintf(err, "file descriptor '%s' is not a valid integer in '%s'\n", str2, str);
+			memprintf(err, "file descriptor '%s' is not a valid integer in '%s'", str2, str);
 			goto out;
 		}
 
 		/* just verify that it's a socket */
 		addr_len = sizeof(ss2);
 		if (getsockname(new_fd, (struct sockaddr *)&ss2, &addr_len) == -1) {
-			memprintf(err, "cannot use file descriptor '%d' : %s.\n", new_fd, strerror(errno));
+			memprintf(err, "cannot use file descriptor '%d' : %s.", new_fd, strerror(errno));
 			goto out;
 		}
 
@@ -1096,7 +1099,7 @@
 
 		new_fd = strtol(str2, &endptr, 10);
 		if (!*str2 || new_fd < 0 || *endptr) {
-			memprintf(err, "file descriptor '%s' is not a valid integer in '%s'\n", str2, str);
+			memprintf(err, "file descriptor '%s' is not a valid integer in '%s'", str2, str);
 			goto out;
 		}
 
@@ -1106,14 +1109,14 @@
 
 			addr_len = sizeof(ss);
 			if (getsockname(new_fd, (struct sockaddr *)&ss, &addr_len) == -1) {
-				memprintf(err, "cannot use file descriptor '%d' : %s.\n", new_fd, strerror(errno));
+				memprintf(err, "cannot use file descriptor '%d' : %s.", new_fd, strerror(errno));
 				goto out;
 			}
 
 			addr_len = sizeof(type);
 			if (getsockopt(new_fd, SOL_SOCKET, SO_TYPE, &type, &addr_len) != 0 ||
 			    (type == SOCK_STREAM) != (sock_type == SOCK_STREAM)) {
-				memprintf(err, "socket on file descriptor '%d' is of the wrong type.\n", new_fd);
+				memprintf(err, "socket on file descriptor '%d' is of the wrong type.", new_fd);
 				goto out;
 			}
 
@@ -1122,7 +1125,7 @@
 			((struct sockaddr_in *)&ss)->sin_addr.s_addr = new_fd;
 			((struct sockaddr_in *)&ss)->sin_port = 0;
 		} else {
-			memprintf(err, "a file descriptor is not acceptable here in '%s'\n", str);
+			memprintf(err, "a file descriptor is not acceptable here in '%s'", str);
 			goto out;
 		}
 	}
@@ -1141,7 +1144,7 @@
 
 		adr_len = strlen(str2);
 		if (adr_len > max_path_len) {
-			memprintf(err, "socket path '%s' too long (max %d)\n", str, max_path_len);
+			memprintf(err, "socket path '%s' too long (max %d)", str, max_path_len);
 			goto out;
 		}
 
@@ -1232,7 +1235,7 @@
 			porta = porth;
 		}
 		else if (*port1) { /* other any unexpected char */
-			memprintf(err, "invalid character '%c' in port number '%s' in '%s'\n", *port1, port1, str);
+			memprintf(err, "invalid character '%c' in port number '%s' in '%s'", *port1, port1, str);
 			goto out;
 		}
 		else if (opts & PA_O_PORT_MAND) {
@@ -1248,7 +1251,7 @@
 		if (str2ip2(str2, &ss, 0) == NULL) {
 			if ((!(opts & PA_O_RESOLVE) && !fqdn) ||
 			    ((opts & PA_O_RESOLVE) && str2ip2(str2, &ss, 1) == NULL)) {
-				memprintf(err, "invalid address: '%s' in '%s'\n", str2, str);
+				memprintf(err, "invalid address: '%s' in '%s'", str2, str);
 				goto out;
 			}
 
@@ -1263,11 +1266,11 @@
 	}
 
 	if (ctrl_type == SOCK_STREAM && !(opts & PA_O_STREAM)) {
-		memprintf(err, "stream-type socket not acceptable in '%s'\n", str);
+		memprintf(err, "stream-type socket not acceptable in '%s'", str);
 		goto out;
 	}
 	else if (ctrl_type == SOCK_DGRAM && !(opts & PA_O_DGRAM)) {
-		memprintf(err, "dgram-type socket not acceptable in '%s'\n", str);
+		memprintf(err, "dgram-type socket not acceptable in '%s'", str);
 		goto out;
 	}
 
@@ -1346,7 +1349,7 @@
 	default:
 		return NULL;
 	}
-	inet_ntop(addr->ss_family, ptr, buffer, get_addr_len(addr));
+	inet_ntop(addr->ss_family, ptr, buffer, sizeof(buffer));
 	if (map_ports)
 		return memprintf(&out, "%s:%+d", buffer, port);
 	else
@@ -1932,7 +1935,8 @@
 
 /*
  * Tries to prefix characters tagged in the <map> with the <escape>
- * character. The input <string> must be zero-terminated. The result will
+ * character. The input <string> is processed until string_stop
+ * is reached or NULL-byte is encountered. The result will
  * be stored between <start> (included) and <stop> (excluded). This
  * function will always try to terminate the resulting string with a '\0'
  * before <stop>, and will return its position if the conversion
@@ -1940,11 +1944,11 @@
  */
 char *escape_string(char *start, char *stop,
 		    const char escape, const long *map,
-		    const char *string)
+		    const char *string, const char *string_stop)
 {
 	if (start < stop) {
 		stop--; /* reserve one byte for the final '\0' */
-		while (start < stop && *string != '\0') {
+		while (start < stop && string < string_stop && *string != '\0') {
 			if (!ha_bit_test((unsigned char)(*string), map))
 				*start++ = *string;
 			else {
@@ -4796,6 +4800,8 @@
 			break;
 		}
 	}
+#elif defined(__sun)
+	ret = getexecname();
 #endif
 	return ret;
 }
@@ -4824,6 +4830,26 @@
 	return ret;
 }
 
+/* Sets build_is_static to true if we detect a static build. Some older glibcs
+ * tend to crash inside dlsym() in static builds, but tests show that at least
+ * dladdr() still works (and will fail to resolve anything of course). Thus we
+ * try to determine if we're on a static build to avoid calling dlsym() in this
+ * case.
+ */
+void check_if_static_build()
+{
+	Dl_info dli = { };
+	size_t size = 0;
+
+	/* Now let's try to be smarter */
+	if (!dladdr_and_size(&main, &dli, &size))
+		build_is_static = 1;
+	else
+		build_is_static = 0;
+}
+
+INITCALL0(STG_PREPARE, check_if_static_build);
+
 /* Tries to retrieve the address of the first occurrence symbol <name>.
  * Note that NULL in return is not always an error as a symbol may have that
  * address in special situations.
@@ -4833,7 +4859,8 @@
 	void *ptr = NULL;
 
 #ifdef RTLD_DEFAULT
-	ptr = dlsym(RTLD_DEFAULT, name);
+	if (!build_is_static)
+		ptr = dlsym(RTLD_DEFAULT, name);
 #endif
 	return ptr;
 }
@@ -4848,7 +4875,8 @@
 	void *ptr = NULL;
 
 #ifdef RTLD_NEXT
-	ptr = dlsym(RTLD_NEXT, name);
+	if (!build_is_static)
+		ptr = dlsym(RTLD_NEXT, name);
 #endif
 	return ptr;
 }
diff --git a/src/trace.c b/src/trace.c
index 4c5270c..96d422a 100644
--- a/src/trace.c
+++ b/src/trace.c
@@ -136,7 +136,7 @@
 	}
 	if (check) {
 		srv = check->server;
-		be = srv->proxy;
+		be = (srv ? srv->proxy : NULL);
 	}
 
 	if (!srv && conn)
@@ -240,7 +240,7 @@
 	}
 
 	if (src->sink)
-		sink_write(src->sink, line, words, 0, 0, NULL);
+		sink_write(src->sink, 0, line, words, 0, 0, NULL);
 
  end:
 	/* check if we need to stop the trace now */