blob: 5b07ae091773d2acf3dcdf5666989e3e9b00527b [file] [log] [blame]
Willy Tarreau62f52692017-10-08 23:01:42 +02001/*
2 * HTTP/2 mux-demux for connections
3 *
4 * Copyright 2017 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreaudfd3de82020-06-04 23:46:14 +020013#include <import/eb32tree.h>
Willy Tarreau63617db2021-10-06 18:23:40 +020014#include <import/ebmbtree.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020015#include <haproxy/api.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020016#include <haproxy/cfgparse.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020017#include <haproxy/connection.h>
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +020018#include <haproxy/dynbuf.h>
Willy Tarreaubf073142020-06-03 12:04:01 +020019#include <haproxy/h2.h>
Willy Tarreaube327fa2020-06-03 09:09:57 +020020#include <haproxy/hpack-dec.h>
21#include <haproxy/hpack-enc.h>
22#include <haproxy/hpack-tbl.h>
Willy Tarreau87735332020-06-04 09:08:41 +020023#include <haproxy/http_htx.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020024#include <haproxy/htx.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020025#include <haproxy/istbuf.h>
Willy Tarreau36979d92020-06-05 17:27:29 +020026#include <haproxy/log.h>
Willy Tarreau6c0fadf2022-09-12 19:07:51 +020027#include <haproxy/mux_h2-t.h>
Willy Tarreau6131d6a2020-06-02 16:48:09 +020028#include <haproxy/net_helper.h>
Frédéric Lécaille9969adb2023-01-18 11:52:21 +010029#include <haproxy/proxy.h>
Willy Tarreau48d25b32020-06-04 18:58:52 +020030#include <haproxy/session-t.h>
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +010031#include <haproxy/stats.h>
Willy Tarreaucb086c62022-05-27 09:47:12 +020032#include <haproxy/stconn.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020033#include <haproxy/stream.h>
Willy Tarreauc6d61d72020-06-04 19:02:42 +020034#include <haproxy/trace.h>
Willy Tarreau62f52692017-10-08 23:01:42 +020035
36
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010037/* dummy streams returned for closed, error, refused, idle and states */
Willy Tarreau2a856182017-05-16 15:20:39 +020038static const struct h2s *h2_closed_stream;
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010039static const struct h2s *h2_error_stream;
Willy Tarreau8d0d58b2018-12-23 18:29:12 +010040static const struct h2s *h2_refused_stream;
Willy Tarreau2a856182017-05-16 15:20:39 +020041static const struct h2s *h2_idle_stream;
42
Willy Tarreau5ab6b572017-09-22 08:05:00 +020043
Willy Tarreau6c0fadf2022-09-12 19:07:51 +020044/**** H2 connection descriptor ****/
Willy Tarreau5ab6b572017-09-22 08:05:00 +020045struct h2c {
46 struct connection *conn;
47
48 enum h2_cs st0; /* mux state */
49 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
50
51 /* 16 bit hole here */
52 uint32_t flags; /* connection flags: H2_CF_* */
Willy Tarreau2e2083a2019-01-31 10:34:07 +010053 uint32_t streams_limit; /* maximum number of concurrent streams the peer supports */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020054 int32_t max_id; /* highest ID known on this connection, <0 before preface */
55 uint32_t rcvd_c; /* newly received data to ACK for the connection */
Willy Tarreau617592c2022-06-08 16:32:22 +020056 uint32_t rcvd_s; /* newly received data to ACK for the current stream (dsi) or zero */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020057
58 /* states for the demux direction */
59 struct hpack_dht *ddht; /* demux dynamic header table */
Willy Tarreauc9fa0482018-07-10 17:43:27 +020060 struct buffer dbuf; /* demux buffer */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020061
62 int32_t dsi; /* demux stream ID (<0 = idle) */
63 int32_t dfl; /* demux frame length (if dsi >= 0) */
64 int8_t dft; /* demux frame type (if dsi >= 0) */
65 int8_t dff; /* demux frame flags (if dsi >= 0) */
Willy Tarreau05e5daf2017-12-11 15:17:36 +010066 uint8_t dpl; /* demux pad length (part of dfl), init to 0 */
67 /* 8 bit hole here */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020068 int32_t last_sid; /* last processed stream ID for GOAWAY, <0 before preface */
69
70 /* states for the mux direction */
Willy Tarreau51330962019-05-26 09:38:07 +020071 struct buffer mbuf[H2C_MBUF_CNT]; /* mux buffers (ring) */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020072 int32_t miw; /* mux initial window size for all new streams */
73 int32_t mws; /* mux window size. Can be negative. */
74 int32_t mfs; /* mux's max frame size */
75
Willy Tarreauea392822017-10-31 10:02:25 +010076 int timeout; /* idle timeout duration in ticks */
Willy Tarreau599391a2017-11-24 10:16:00 +010077 int shut_timeout; /* idle timeout duration in ticks after GOAWAY was sent */
Willy Tarreauf279a2f2023-05-30 15:42:35 +020078 int idle_start; /* date of the last time the connection went idle (no stream + empty mbuf), or the start of current http req */
Willy Tarreau15a47332022-03-18 15:57:34 +010079 /* 32-bit hole here */
Willy Tarreau49745612017-12-03 18:56:02 +010080 unsigned int nb_streams; /* number of streams in the tree */
Willy Tarreau36c22322022-05-27 10:41:24 +020081 unsigned int nb_sc; /* number of attached stream connectors */
Willy Tarreaud64a3eb2019-01-23 10:22:21 +010082 unsigned int nb_reserved; /* number of reserved streams */
Willy Tarreaue9634bd2019-01-23 10:25:10 +010083 unsigned int stream_cnt; /* total number of streams seen */
Willy Tarreau0b37d652018-10-03 10:33:02 +020084 struct proxy *proxy; /* the proxy this connection was created for */
Willy Tarreauea392822017-10-31 10:02:25 +010085 struct task *task; /* timeout management task */
Amaury Denoyellec92697d2020-10-27 17:16:01 +010086 struct h2_counters *px_counters; /* h2 counters attached to proxy */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020087 struct eb_root streams_by_id; /* all active streams by their ID */
88 struct list send_list; /* list of blocked streams requesting to send */
89 struct list fctl_list; /* list of streams blocked by connection's fctl */
Willy Tarreau9edf6db2019-10-02 10:49:59 +020090 struct list blocked_list; /* list of streams blocked for other reasons (e.g. sfctl, dep) */
Willy Tarreau44e973f2018-03-01 17:49:30 +010091 struct buffer_wait buf_wait; /* wait list for buffer allocations */
Olivier Houchardfa8aa862018-10-10 18:25:41 +020092 struct wait_event wait_event; /* To be used if we're waiting for I/Os */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020093};
94
Willy Tarreau2c249eb2019-05-13 18:06:17 +020095
Willy Tarreau18312642017-10-11 07:57:07 +020096/* H2 stream descriptor, describing the stream as it appears in the H2C, and as
Christopher Fauletfafd1b02020-11-03 18:25:52 +010097 * it is being processed in the internal HTTP representation (HTX).
Willy Tarreau18312642017-10-11 07:57:07 +020098 */
99struct h2s {
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200100 struct sedesc *sd;
Olivier Houchardf502aca2018-12-14 19:42:40 +0100101 struct session *sess;
Willy Tarreau18312642017-10-11 07:57:07 +0200102 struct h2c *h2c;
Willy Tarreau18312642017-10-11 07:57:07 +0200103 struct eb32_node by_id; /* place in h2c's streams_by_id */
Willy Tarreau18312642017-10-11 07:57:07 +0200104 int32_t id; /* stream ID */
105 uint32_t flags; /* H2_SF_* */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +0200106 int sws; /* stream window size, to be added to the mux's initial window size */
Willy Tarreau18312642017-10-11 07:57:07 +0200107 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
108 enum h2_ss st;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +0200109 uint16_t status; /* HTTP response status */
Willy Tarreau1915ca22019-01-24 11:49:37 +0100110 unsigned long long body_len; /* remaining body length according to content-length if H2_SF_DATA_CLEN */
Olivier Houchard638b7992018-08-16 15:41:52 +0200111 struct buffer rxbuf; /* receive buffer, always valid (buf_empty or real buffer) */
Willy Tarreau4596fe22022-05-17 19:07:51 +0200112 struct wait_event *subs; /* recv wait_event the stream connector associated is waiting on (via h2_subscribe) */
Olivier Houchardfa8aa862018-10-10 18:25:41 +0200113 struct list list; /* To be used when adding in h2c->send_list or h2c->fctl_lsit */
Willy Tarreau5723f292020-01-10 15:16:57 +0100114 struct tasklet *shut_tl; /* deferred shutdown tasklet, to retry to send an RST after we failed to,
115 * in case there's no other subscription to do it */
Amaury Denoyelle74162742020-12-11 17:53:05 +0100116
117 char upgrade_protocol[16]; /* rfc 8441: requested protocol on Extended CONNECT */
Willy Tarreau18312642017-10-11 07:57:07 +0200118};
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200119
Willy Tarreauc6405142017-09-21 20:23:50 +0200120/* descriptor for an h2 frame header */
121struct h2_fh {
122 uint32_t len; /* length, host order, 24 bits */
123 uint32_t sid; /* stream id, host order, 31 bits */
124 uint8_t ft; /* frame type */
125 uint8_t ff; /* frame flags */
126};
127
Willy Tarreau12ae2122019-08-08 18:23:12 +0200128/* trace source and events */
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200129static void h2_trace(enum trace_level level, uint64_t mask, \
130 const struct trace_source *src,
131 const struct ist where, const struct ist func,
132 const void *a1, const void *a2, const void *a3, const void *a4);
Willy Tarreau12ae2122019-08-08 18:23:12 +0200133
134/* The event representation is split like this :
135 * strm - application layer
136 * h2s - internal H2 stream
137 * h2c - internal H2 connection
138 * conn - external connection
139 *
140 */
141static const struct trace_event h2_trace_events[] = {
142#define H2_EV_H2C_NEW (1ULL << 0)
Willy Tarreau87951942019-08-30 07:34:36 +0200143 { .mask = H2_EV_H2C_NEW, .name = "h2c_new", .desc = "new H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200144#define H2_EV_H2C_RECV (1ULL << 1)
Willy Tarreau87951942019-08-30 07:34:36 +0200145 { .mask = H2_EV_H2C_RECV, .name = "h2c_recv", .desc = "Rx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200146#define H2_EV_H2C_SEND (1ULL << 2)
Willy Tarreau87951942019-08-30 07:34:36 +0200147 { .mask = H2_EV_H2C_SEND, .name = "h2c_send", .desc = "Tx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200148#define H2_EV_H2C_FCTL (1ULL << 3)
Willy Tarreau87951942019-08-30 07:34:36 +0200149 { .mask = H2_EV_H2C_FCTL, .name = "h2c_fctl", .desc = "H2 connection flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200150#define H2_EV_H2C_BLK (1ULL << 4)
Willy Tarreau87951942019-08-30 07:34:36 +0200151 { .mask = H2_EV_H2C_BLK, .name = "h2c_blk", .desc = "H2 connection blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200152#define H2_EV_H2C_WAKE (1ULL << 5)
Willy Tarreau87951942019-08-30 07:34:36 +0200153 { .mask = H2_EV_H2C_WAKE, .name = "h2c_wake", .desc = "H2 connection woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200154#define H2_EV_H2C_END (1ULL << 6)
Willy Tarreau87951942019-08-30 07:34:36 +0200155 { .mask = H2_EV_H2C_END, .name = "h2c_end", .desc = "H2 connection terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200156#define H2_EV_H2C_ERR (1ULL << 7)
Willy Tarreau87951942019-08-30 07:34:36 +0200157 { .mask = H2_EV_H2C_ERR, .name = "h2c_err", .desc = "error on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200158#define H2_EV_RX_FHDR (1ULL << 8)
Willy Tarreau87951942019-08-30 07:34:36 +0200159 { .mask = H2_EV_RX_FHDR, .name = "rx_fhdr", .desc = "H2 frame header received" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200160#define H2_EV_RX_FRAME (1ULL << 9)
Willy Tarreau87951942019-08-30 07:34:36 +0200161 { .mask = H2_EV_RX_FRAME, .name = "rx_frame", .desc = "receipt of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200162#define H2_EV_RX_EOI (1ULL << 10)
Willy Tarreau87951942019-08-30 07:34:36 +0200163 { .mask = H2_EV_RX_EOI, .name = "rx_eoi", .desc = "receipt of end of H2 input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200164#define H2_EV_RX_PREFACE (1ULL << 11)
Willy Tarreau87951942019-08-30 07:34:36 +0200165 { .mask = H2_EV_RX_PREFACE, .name = "rx_preface", .desc = "receipt of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200166#define H2_EV_RX_DATA (1ULL << 12)
Willy Tarreau87951942019-08-30 07:34:36 +0200167 { .mask = H2_EV_RX_DATA, .name = "rx_data", .desc = "receipt of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200168#define H2_EV_RX_HDR (1ULL << 13)
Willy Tarreau87951942019-08-30 07:34:36 +0200169 { .mask = H2_EV_RX_HDR, .name = "rx_hdr", .desc = "receipt of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200170#define H2_EV_RX_PRIO (1ULL << 14)
Willy Tarreau87951942019-08-30 07:34:36 +0200171 { .mask = H2_EV_RX_PRIO, .name = "rx_prio", .desc = "receipt of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200172#define H2_EV_RX_RST (1ULL << 15)
Willy Tarreau87951942019-08-30 07:34:36 +0200173 { .mask = H2_EV_RX_RST, .name = "rx_rst", .desc = "receipt of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200174#define H2_EV_RX_SETTINGS (1ULL << 16)
Willy Tarreau87951942019-08-30 07:34:36 +0200175 { .mask = H2_EV_RX_SETTINGS, .name = "rx_settings", .desc = "receipt of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200176#define H2_EV_RX_PUSH (1ULL << 17)
Willy Tarreau87951942019-08-30 07:34:36 +0200177 { .mask = H2_EV_RX_PUSH, .name = "rx_push", .desc = "receipt of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200178#define H2_EV_RX_PING (1ULL << 18)
Willy Tarreau87951942019-08-30 07:34:36 +0200179 { .mask = H2_EV_RX_PING, .name = "rx_ping", .desc = "receipt of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200180#define H2_EV_RX_GOAWAY (1ULL << 19)
Willy Tarreau87951942019-08-30 07:34:36 +0200181 { .mask = H2_EV_RX_GOAWAY, .name = "rx_goaway", .desc = "receipt of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200182#define H2_EV_RX_WU (1ULL << 20)
Willy Tarreau87951942019-08-30 07:34:36 +0200183 { .mask = H2_EV_RX_WU, .name = "rx_wu", .desc = "receipt of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200184#define H2_EV_RX_CONT (1ULL << 21)
Willy Tarreau87951942019-08-30 07:34:36 +0200185 { .mask = H2_EV_RX_CONT, .name = "rx_cont", .desc = "receipt of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200186#define H2_EV_TX_FRAME (1ULL << 22)
Willy Tarreau87951942019-08-30 07:34:36 +0200187 { .mask = H2_EV_TX_FRAME, .name = "tx_frame", .desc = "transmission of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200188#define H2_EV_TX_EOI (1ULL << 23)
Willy Tarreau87951942019-08-30 07:34:36 +0200189 { .mask = H2_EV_TX_EOI, .name = "tx_eoi", .desc = "transmission of H2 end of input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200190#define H2_EV_TX_PREFACE (1ULL << 24)
Willy Tarreau87951942019-08-30 07:34:36 +0200191 { .mask = H2_EV_TX_PREFACE, .name = "tx_preface", .desc = "transmission of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200192#define H2_EV_TX_DATA (1ULL << 25)
Willy Tarreau87951942019-08-30 07:34:36 +0200193 { .mask = H2_EV_TX_DATA, .name = "tx_data", .desc = "transmission of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200194#define H2_EV_TX_HDR (1ULL << 26)
Willy Tarreau87951942019-08-30 07:34:36 +0200195 { .mask = H2_EV_TX_HDR, .name = "tx_hdr", .desc = "transmission of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200196#define H2_EV_TX_PRIO (1ULL << 27)
Willy Tarreau87951942019-08-30 07:34:36 +0200197 { .mask = H2_EV_TX_PRIO, .name = "tx_prio", .desc = "transmission of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200198#define H2_EV_TX_RST (1ULL << 28)
Willy Tarreau87951942019-08-30 07:34:36 +0200199 { .mask = H2_EV_TX_RST, .name = "tx_rst", .desc = "transmission of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200200#define H2_EV_TX_SETTINGS (1ULL << 29)
Willy Tarreau87951942019-08-30 07:34:36 +0200201 { .mask = H2_EV_TX_SETTINGS, .name = "tx_settings", .desc = "transmission of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200202#define H2_EV_TX_PUSH (1ULL << 30)
Willy Tarreau87951942019-08-30 07:34:36 +0200203 { .mask = H2_EV_TX_PUSH, .name = "tx_push", .desc = "transmission of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200204#define H2_EV_TX_PING (1ULL << 31)
Willy Tarreau87951942019-08-30 07:34:36 +0200205 { .mask = H2_EV_TX_PING, .name = "tx_ping", .desc = "transmission of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200206#define H2_EV_TX_GOAWAY (1ULL << 32)
Willy Tarreau87951942019-08-30 07:34:36 +0200207 { .mask = H2_EV_TX_GOAWAY, .name = "tx_goaway", .desc = "transmission of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200208#define H2_EV_TX_WU (1ULL << 33)
Willy Tarreau87951942019-08-30 07:34:36 +0200209 { .mask = H2_EV_TX_WU, .name = "tx_wu", .desc = "transmission of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200210#define H2_EV_TX_CONT (1ULL << 34)
Willy Tarreau87951942019-08-30 07:34:36 +0200211 { .mask = H2_EV_TX_CONT, .name = "tx_cont", .desc = "transmission of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200212#define H2_EV_H2S_NEW (1ULL << 35)
Willy Tarreau87951942019-08-30 07:34:36 +0200213 { .mask = H2_EV_H2S_NEW, .name = "h2s_new", .desc = "new H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200214#define H2_EV_H2S_RECV (1ULL << 36)
Willy Tarreau87951942019-08-30 07:34:36 +0200215 { .mask = H2_EV_H2S_RECV, .name = "h2s_recv", .desc = "Rx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200216#define H2_EV_H2S_SEND (1ULL << 37)
Willy Tarreau87951942019-08-30 07:34:36 +0200217 { .mask = H2_EV_H2S_SEND, .name = "h2s_send", .desc = "Tx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200218#define H2_EV_H2S_FCTL (1ULL << 38)
Willy Tarreau87951942019-08-30 07:34:36 +0200219 { .mask = H2_EV_H2S_FCTL, .name = "h2s_fctl", .desc = "H2 stream flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200220#define H2_EV_H2S_BLK (1ULL << 39)
Willy Tarreau87951942019-08-30 07:34:36 +0200221 { .mask = H2_EV_H2S_BLK, .name = "h2s_blk", .desc = "H2 stream blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200222#define H2_EV_H2S_WAKE (1ULL << 40)
Willy Tarreau87951942019-08-30 07:34:36 +0200223 { .mask = H2_EV_H2S_WAKE, .name = "h2s_wake", .desc = "H2 stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200224#define H2_EV_H2S_END (1ULL << 41)
Willy Tarreau87951942019-08-30 07:34:36 +0200225 { .mask = H2_EV_H2S_END, .name = "h2s_end", .desc = "H2 stream terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200226#define H2_EV_H2S_ERR (1ULL << 42)
Willy Tarreau87951942019-08-30 07:34:36 +0200227 { .mask = H2_EV_H2S_ERR, .name = "h2s_err", .desc = "error on H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200228#define H2_EV_STRM_NEW (1ULL << 43)
Willy Tarreau87951942019-08-30 07:34:36 +0200229 { .mask = H2_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200230#define H2_EV_STRM_RECV (1ULL << 44)
Willy Tarreau87951942019-08-30 07:34:36 +0200231 { .mask = H2_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200232#define H2_EV_STRM_SEND (1ULL << 45)
Willy Tarreau87951942019-08-30 07:34:36 +0200233 { .mask = H2_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200234#define H2_EV_STRM_FULL (1ULL << 46)
Willy Tarreau87951942019-08-30 07:34:36 +0200235 { .mask = H2_EV_STRM_FULL, .name = "strm_full", .desc = "stream buffer full" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200236#define H2_EV_STRM_WAKE (1ULL << 47)
Willy Tarreau87951942019-08-30 07:34:36 +0200237 { .mask = H2_EV_STRM_WAKE, .name = "strm_wake", .desc = "stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200238#define H2_EV_STRM_SHUT (1ULL << 48)
Willy Tarreau87951942019-08-30 07:34:36 +0200239 { .mask = H2_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200240#define H2_EV_STRM_END (1ULL << 49)
Willy Tarreau87951942019-08-30 07:34:36 +0200241 { .mask = H2_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200242#define H2_EV_STRM_ERR (1ULL << 50)
Willy Tarreau87951942019-08-30 07:34:36 +0200243 { .mask = H2_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200244#define H2_EV_PROTO_ERR (1ULL << 51)
Willy Tarreau87951942019-08-30 07:34:36 +0200245 { .mask = H2_EV_PROTO_ERR, .name = "proto_err", .desc = "protocol error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200246 { }
247};
248
249static const struct name_desc h2_trace_lockon_args[4] = {
250 /* arg1 */ { /* already used by the connection */ },
251 /* arg2 */ { .name="h2s", .desc="H2 stream" },
252 /* arg3 */ { },
253 /* arg4 */ { }
254};
255
256static const struct name_desc h2_trace_decoding[] = {
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200257#define H2_VERB_CLEAN 1
258 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
259#define H2_VERB_MINIMAL 2
Willy Tarreau12ae2122019-08-08 18:23:12 +0200260 { .name="minimal", .desc="report only h2c/h2s state and flags, no real decoding" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200261#define H2_VERB_SIMPLE 3
Willy Tarreau12ae2122019-08-08 18:23:12 +0200262 { .name="simple", .desc="add request/response status line or frame info when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200263#define H2_VERB_ADVANCED 4
Willy Tarreau12ae2122019-08-08 18:23:12 +0200264 { .name="advanced", .desc="add header fields or frame decoding when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200265#define H2_VERB_COMPLETE 5
Willy Tarreau12ae2122019-08-08 18:23:12 +0200266 { .name="complete", .desc="add full data dump when available" },
267 { /* end */ }
268};
269
Willy Tarreau6eb3d372021-04-10 19:29:26 +0200270static struct trace_source trace_h2 __read_mostly = {
Willy Tarreau12ae2122019-08-08 18:23:12 +0200271 .name = IST("h2"),
272 .desc = "HTTP/2 multiplexer",
273 .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200274 .default_cb = h2_trace,
Willy Tarreau12ae2122019-08-08 18:23:12 +0200275 .known_events = h2_trace_events,
276 .lockon_args = h2_trace_lockon_args,
277 .decoding = h2_trace_decoding,
278 .report_events = ~0, // report everything by default
279};
280
281#define TRACE_SOURCE &trace_h2
282INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
283
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100284/* h2 stats module */
285enum {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100286 H2_ST_HEADERS_RCVD,
287 H2_ST_DATA_RCVD,
288 H2_ST_SETTINGS_RCVD,
289 H2_ST_RST_STREAM_RCVD,
290 H2_ST_GOAWAY_RCVD,
291
Amaury Denoyellea8879232020-10-27 17:16:03 +0100292 H2_ST_CONN_PROTO_ERR,
293 H2_ST_STRM_PROTO_ERR,
294 H2_ST_RST_STREAM_RESP,
295 H2_ST_GOAWAY_RESP,
296
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100297 H2_ST_OPEN_CONN,
298 H2_ST_OPEN_STREAM,
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100299 H2_ST_TOTAL_CONN,
300 H2_ST_TOTAL_STREAM,
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100301
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100302 H2_STATS_COUNT /* must be the last member of the enum */
303};
304
305static struct name_desc h2_stats[] = {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100306 [H2_ST_HEADERS_RCVD] = { .name = "h2_headers_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100307 .desc = "Total number of received HEADERS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100308 [H2_ST_DATA_RCVD] = { .name = "h2_data_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100309 .desc = "Total number of received DATA frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100310 [H2_ST_SETTINGS_RCVD] = { .name = "h2_settings_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100311 .desc = "Total number of received SETTINGS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100312 [H2_ST_RST_STREAM_RCVD] = { .name = "h2_rst_stream_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100313 .desc = "Total number of received RST_STREAM frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100314 [H2_ST_GOAWAY_RCVD] = { .name = "h2_goaway_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100315 .desc = "Total number of received GOAWAY frames" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100316
317 [H2_ST_CONN_PROTO_ERR] = { .name = "h2_detected_conn_protocol_errors",
318 .desc = "Total number of connection protocol errors" },
319 [H2_ST_STRM_PROTO_ERR] = { .name = "h2_detected_strm_protocol_errors",
320 .desc = "Total number of stream protocol errors" },
321 [H2_ST_RST_STREAM_RESP] = { .name = "h2_rst_stream_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100322 .desc = "Total number of RST_STREAM sent on detected error" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100323 [H2_ST_GOAWAY_RESP] = { .name = "h2_goaway_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100324 .desc = "Total number of GOAWAY sent on detected error" },
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100325
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100326 [H2_ST_OPEN_CONN] = { .name = "h2_open_connections",
327 .desc = "Count of currently open connections" },
328 [H2_ST_OPEN_STREAM] = { .name = "h2_backend_open_streams",
329 .desc = "Count of currently open streams" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100330 [H2_ST_TOTAL_CONN] = { .name = "h2_total_connections",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100331 .desc = "Total number of connections" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100332 [H2_ST_TOTAL_STREAM] = { .name = "h2_backend_total_streams",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100333 .desc = "Total number of streams" },
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100334};
335
336static struct h2_counters {
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100337 long long headers_rcvd; /* total number of HEADERS frame received */
338 long long data_rcvd; /* total number of DATA frame received */
339 long long settings_rcvd; /* total number of SETTINGS frame received */
340 long long rst_stream_rcvd; /* total number of RST_STREAM frame received */
341 long long goaway_rcvd; /* total number of GOAWAY frame received */
Amaury Denoyellea8879232020-10-27 17:16:03 +0100342
343 long long conn_proto_err; /* total number of protocol errors detected */
344 long long strm_proto_err; /* total number of protocol errors detected */
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100345 long long rst_stream_resp; /* total number of RST_STREAM frame sent on error */
346 long long goaway_resp; /* total number of GOAWAY frame sent on error */
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100347
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100348 long long open_conns; /* count of currently open connections */
349 long long open_streams; /* count of currently open streams */
350 long long total_conns; /* total number of connections */
351 long long total_streams; /* total number of streams */
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100352} h2_counters;
353
354static void h2_fill_stats(void *data, struct field *stats)
355{
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100356 struct h2_counters *counters = data;
357
358 stats[H2_ST_HEADERS_RCVD] = mkf_u64(FN_COUNTER, counters->headers_rcvd);
359 stats[H2_ST_DATA_RCVD] = mkf_u64(FN_COUNTER, counters->data_rcvd);
360 stats[H2_ST_SETTINGS_RCVD] = mkf_u64(FN_COUNTER, counters->settings_rcvd);
361 stats[H2_ST_RST_STREAM_RCVD] = mkf_u64(FN_COUNTER, counters->rst_stream_rcvd);
362 stats[H2_ST_GOAWAY_RCVD] = mkf_u64(FN_COUNTER, counters->goaway_rcvd);
Amaury Denoyellea8879232020-10-27 17:16:03 +0100363
364 stats[H2_ST_CONN_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->conn_proto_err);
365 stats[H2_ST_STRM_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->strm_proto_err);
366 stats[H2_ST_RST_STREAM_RESP] = mkf_u64(FN_COUNTER, counters->rst_stream_resp);
367 stats[H2_ST_GOAWAY_RESP] = mkf_u64(FN_COUNTER, counters->goaway_resp);
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100368
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100369 stats[H2_ST_OPEN_CONN] = mkf_u64(FN_GAUGE, counters->open_conns);
370 stats[H2_ST_OPEN_STREAM] = mkf_u64(FN_GAUGE, counters->open_streams);
371 stats[H2_ST_TOTAL_CONN] = mkf_u64(FN_COUNTER, counters->total_conns);
372 stats[H2_ST_TOTAL_STREAM] = mkf_u64(FN_COUNTER, counters->total_streams);
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100373}
374
375static struct stats_module h2_stats_module = {
376 .name = "h2",
377 .fill_stats = h2_fill_stats,
378 .stats = h2_stats,
379 .stats_count = H2_STATS_COUNT,
380 .counters = &h2_counters,
381 .counters_size = sizeof(h2_counters),
382 .domain_flags = MK_STATS_PROXY_DOMAIN(STATS_PX_CAP_FE|STATS_PX_CAP_BE),
383 .clearable = 1,
384};
385
386INITCALL1(STG_REGISTER, stats_register_module, &h2_stats_module);
387
Willy Tarreau8ceae722018-11-26 11:58:30 +0100388/* the h2c connection pool */
389DECLARE_STATIC_POOL(pool_head_h2c, "h2c", sizeof(struct h2c));
390
391/* the h2s stream pool */
392DECLARE_STATIC_POOL(pool_head_h2s, "h2s", sizeof(struct h2s));
393
Willy Tarreaudc572362018-12-12 08:08:05 +0100394/* The default connection window size is 65535, it may only be enlarged using
395 * a WINDOW_UPDATE message. Since the window must never be larger than 2G-1,
396 * we'll pretend we already received the difference between the two to send
397 * an equivalent window update to enlarge it to 2G-1.
398 */
399#define H2_INITIAL_WINDOW_INCREMENT ((1U<<31)-1 - 65535)
400
Willy Tarreau455d5682019-05-24 19:42:18 +0200401/* maximum amount of data we're OK with re-aligning for buffer optimizations */
402#define MAX_DATA_REALIGN 1024
403
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200404/* a few settings from the global section */
405static int h2_settings_header_table_size = 4096; /* initial value */
Willy Tarreau9d7abda2023-04-17 15:04:34 +0200406static int h2_settings_initial_window_size = 65536; /* default initial value */
407static int h2_be_settings_initial_window_size = 0; /* backend's default initial value */
408static int h2_fe_settings_initial_window_size = 0; /* frontend's default initial value */
Willy Tarreauca1027c2023-04-18 15:57:03 +0200409static unsigned int h2_settings_max_concurrent_streams = 100; /* default value */
410static unsigned int h2_be_settings_max_concurrent_streams = 0; /* backend value */
411static unsigned int h2_fe_settings_max_concurrent_streams = 0; /* frontend value */
Willy Tarreaua24b35c2019-02-21 13:24:36 +0100412static int h2_settings_max_frame_size = 0; /* unset */
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200413
Willy Tarreau4869ed52023-10-13 18:11:59 +0200414/* other non-protocol settings */
415static unsigned int h2_fe_max_total_streams = 0; /* frontend value */
416
Willy Tarreaub22b5f02022-05-10 14:57:16 +0200417/* a dummy closed endpoint */
Willy Tarreauea59b022022-05-17 17:53:22 +0200418static const struct sedesc closed_ep = {
Willy Tarreauc1054922022-05-18 07:43:52 +0200419 .sc = NULL,
Willy Tarreaub605c422022-05-17 17:04:55 +0200420 .flags = SE_FL_DETACHED,
Willy Tarreaub22b5f02022-05-10 14:57:16 +0200421};
422
Willy Tarreau2a856182017-05-16 15:20:39 +0200423/* a dmumy closed stream */
424static const struct h2s *h2_closed_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200425 .sd = (struct sedesc *)&closed_ep,
Willy Tarreau2a856182017-05-16 15:20:39 +0200426 .h2c = NULL,
427 .st = H2_SS_CLOSED,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100428 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreauab837502017-12-27 15:07:30 +0100429 .flags = H2_SF_RST_RCVD,
Willy Tarreau2a856182017-05-16 15:20:39 +0200430 .id = 0,
431};
432
Willy Tarreauecb9dcd2019-01-03 12:00:17 +0100433/* a dmumy closed stream returning a PROTOCOL_ERROR error */
434static const struct h2s *h2_error_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200435 .sd = (struct sedesc *)&closed_ep,
Willy Tarreauecb9dcd2019-01-03 12:00:17 +0100436 .h2c = NULL,
437 .st = H2_SS_CLOSED,
438 .errcode = H2_ERR_PROTOCOL_ERROR,
439 .flags = 0,
440 .id = 0,
441};
442
Willy Tarreau8d0d58b2018-12-23 18:29:12 +0100443/* a dmumy closed stream returning a REFUSED_STREAM error */
444static const struct h2s *h2_refused_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200445 .sd = (struct sedesc *)&closed_ep,
Willy Tarreau8d0d58b2018-12-23 18:29:12 +0100446 .h2c = NULL,
447 .st = H2_SS_CLOSED,
448 .errcode = H2_ERR_REFUSED_STREAM,
449 .flags = 0,
450 .id = 0,
451};
452
Willy Tarreau2a856182017-05-16 15:20:39 +0200453/* and a dummy idle stream for use with any unannounced stream */
454static const struct h2s *h2_idle_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200455 .sd = (struct sedesc *)&closed_ep,
Willy Tarreau2a856182017-05-16 15:20:39 +0200456 .h2c = NULL,
457 .st = H2_SS_IDLE,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100458 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreau2a856182017-05-16 15:20:39 +0200459 .id = 0,
460};
461
Willy Tarreaude4a5382023-10-17 08:25:19 +0200462
Willy Tarreau144f84a2021-03-02 16:09:26 +0100463struct task *h2_timeout_task(struct task *t, void *context, unsigned int state);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +0200464static int h2_send(struct h2c *h2c);
465static int h2_recv(struct h2c *h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +0200466static int h2_process(struct h2c *h2c);
Willy Tarreau691d5032021-01-20 14:55:01 +0100467/* h2_io_cb is exported to see it resolved in "show fd" */
Willy Tarreau144f84a2021-03-02 16:09:26 +0100468struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state);
Willy Tarreau0b559072018-02-26 15:22:17 +0100469static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id);
Willy Tarreau7cfbb812023-01-26 16:02:01 +0100470static int h2c_dec_hdrs(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol);
Willy Tarreaua56a6de2018-02-26 15:59:07 +0100471static int h2_frt_transfer_data(struct h2s *h2s);
Willy Tarreau144f84a2021-03-02 16:09:26 +0100472struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state);
Willy Tarreau36c22322022-05-27 10:41:24 +0200473static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct stconn *sc, struct session *sess);
Willy Tarreau8b2757c2018-12-19 17:36:48 +0100474static void h2s_alert(struct h2s *h2s);
Willy Tarreaude4a5382023-10-17 08:25:19 +0200475static inline void h2_remove_from_list(struct h2s *h2s);
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200476
Willy Tarreau7be4ee02022-05-18 07:31:41 +0200477/* returns the stconn associated to the H2 stream */
478static forceinline struct stconn *h2s_sc(const struct h2s *h2s)
479{
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200480 return h2s->sd->sc;
Willy Tarreau7be4ee02022-05-18 07:31:41 +0200481}
482
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200483/* the H2 traces always expect that arg1, if non-null, is of type connection
484 * (from which we can derive h2c), that arg2, if non-null, is of type h2s, and
485 * that arg3, if non-null, is either of type htx for tx headers, or of type
486 * buffer for everything else.
487 */
488static void h2_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
489 const struct ist where, const struct ist func,
490 const void *a1, const void *a2, const void *a3, const void *a4)
491{
492 const struct connection *conn = a1;
493 const struct h2c *h2c = conn ? conn->ctx : NULL;
494 const struct h2s *h2s = a2;
495 const struct buffer *buf = a3;
496 const struct htx *htx;
497 int pos;
498
499 if (!h2c) // nothing to add
500 return;
501
Willy Tarreau17104d42019-08-30 07:12:55 +0200502 if (src->verbosity > H2_VERB_CLEAN) {
Willy Tarreau73db4342019-09-25 07:28:44 +0200503 chunk_appendf(&trace_buf, " : h2c=%p(%c,%s)", h2c, conn_is_back(conn) ? 'B' : 'F', h2c_st_to_str(h2c->st0));
504
Willy Tarreau8e6f7492021-06-16 17:47:24 +0200505 if (mask & H2_EV_H2C_NEW) // inside h2_init, otherwise it's hard to match conn & h2c
506 conn_append_debug_info(&trace_buf, conn, " : ");
507
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100508 if (h2c->errcode)
509 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2c->errcode), h2c->errcode);
510
Willy Tarreau0f458712022-08-18 11:19:57 +0200511 if (h2c->flags & H2_CF_DEM_IN_PROGRESS && // frame processing has started, type and length are valid
Willy Tarreau73db4342019-09-25 07:28:44 +0200512 (mask & (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) == (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) {
Willy Tarreau8520d872020-09-18 07:39:29 +0200513 chunk_appendf(&trace_buf, " dft=%s/%02x dfl=%d", h2_ft_str(h2c->dft), h2c->dff, h2c->dfl);
Willy Tarreau73db4342019-09-25 07:28:44 +0200514 }
515
516 if (h2s) {
517 if (h2s->id <= 0)
518 chunk_appendf(&trace_buf, " dsi=%d", h2c->dsi);
Willy Tarreauf9f44992023-02-20 16:57:47 +0100519 if (h2s == h2_idle_stream)
520 chunk_appendf(&trace_buf, " h2s=IDL");
Willy Tarreau4ba6f9e2023-10-20 17:32:13 +0200521 else if (h2s != h2_closed_stream && h2s != h2_refused_stream && h2s != h2_error_stream)
Willy Tarreauf9f44992023-02-20 16:57:47 +0100522 chunk_appendf(&trace_buf, " h2s=%p(%d,%s)", h2s, h2s->id, h2s_st_to_str(h2s->st));
Willy Tarreau4ba6f9e2023-10-20 17:32:13 +0200523 else if (h2c->dsi > 0) // don't show that before sid is known
524 chunk_appendf(&trace_buf, " h2s=CLO");
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100525 if (h2s->id && h2s->errcode)
526 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2s->errcode), h2s->errcode);
Willy Tarreau73db4342019-09-25 07:28:44 +0200527 }
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200528 }
529
530 /* Let's dump decoded requests and responses right after parsing. They
531 * are traced at level USER with a few recognizable flags.
532 */
533 if ((mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW) ||
534 mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR)) && buf)
535 htx = htxbuf(buf); // recv req/res
536 else if (mask == (H2_EV_TX_FRAME|H2_EV_TX_HDR))
537 htx = a3; // send req/res
538 else
539 htx = NULL;
540
Willy Tarreau94f1dcf2019-08-30 07:11:30 +0200541 if (level == TRACE_LEVEL_USER && src->verbosity != H2_VERB_MINIMAL && htx && (pos = htx_get_head(htx)) != -1) {
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200542 const struct htx_blk *blk = htx_get_blk(htx, pos);
543 const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
544 enum htx_blk_type type = htx_get_blk_type(blk);
545
546 if (type == HTX_BLK_REQ_SL)
547 chunk_appendf(&trace_buf, " : [%d] H2 REQ: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200548 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200549 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
550 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
551 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
552 else if (type == HTX_BLK_RES_SL)
553 chunk_appendf(&trace_buf, " : [%d] H2 RES: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200554 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200555 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
556 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
557 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
558 }
559}
560
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200561
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100562/* Detect a pending read0 for a H2 connection. It happens if a read0 was
563 * already reported on a previous xprt->rcvbuf() AND a frame parser failed
564 * to parse pending data, confirming no more progress is possible because
565 * we're facing a truncated frame. The function returns 1 to report a read0
566 * or 0 otherwise.
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200567 */
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100568static inline int h2c_read0_pending(struct h2c *h2c)
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200569{
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100570 return !!(h2c->flags & H2_CF_END_REACHED);
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200571}
572
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200573/* returns true if the connection is allowed to expire, false otherwise. A
Willy Tarreau34395832022-03-18 14:59:54 +0100574 * connection may expire when it has no attached streams. As long as streams
575 * are attached, the application layer is responsible for timeout management,
576 * and each layer will detach when it doesn't want to wait anymore. When the
577 * last one leaves, the connection must take over timeout management.
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200578 */
579static inline int h2c_may_expire(const struct h2c *h2c)
580{
Willy Tarreau36c22322022-05-27 10:41:24 +0200581 return !h2c->nb_sc;
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200582}
583
Willy Tarreauca1027c2023-04-18 15:57:03 +0200584/* returns the number of max concurrent streams permitted on a connection,
585 * depending on its side (frontend or backend), falling back to the default
586 * h2_settings_max_concurrent_streams. It may even be zero.
587 */
588static inline int h2c_max_concurrent_streams(const struct h2c *h2c)
589{
590 int ret;
591
592 ret = (h2c->flags & H2_CF_IS_BACK) ?
593 h2_be_settings_max_concurrent_streams :
594 h2_fe_settings_max_concurrent_streams;
595
596 ret = ret ? ret : h2_settings_max_concurrent_streams;
597 return ret;
598}
599
600
Willy Tarreau15a47332022-03-18 15:57:34 +0100601/* update h2c timeout if needed */
602static void h2c_update_timeout(struct h2c *h2c)
603{
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200604 int is_idle_conn = 0;
605
Willy Tarreau15a47332022-03-18 15:57:34 +0100606 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
607
608 if (!h2c->task)
609 goto leave;
610
611 if (h2c_may_expire(h2c)) {
612 /* no more streams attached */
Willy Tarreaud38d8c62023-05-15 11:28:48 +0200613 if (br_data(h2c->mbuf)) {
Willy Tarreau15a47332022-03-18 15:57:34 +0100614 /* pending output data: always the regular data timeout */
615 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
Willy Tarreaud38d8c62023-05-15 11:28:48 +0200616 } else {
617 /* no stream, no output data */
618 if (!(h2c->flags & H2_CF_IS_BACK)) {
619 int to;
Willy Tarreau86b08a32022-04-13 17:40:28 +0200620
Willy Tarreaud38d8c62023-05-15 11:28:48 +0200621 if (h2c->max_id > 0 && !b_data(&h2c->dbuf) &&
622 tick_isset(h2c->proxy->timeout.httpka)) {
623 /* idle after having seen one stream => keep-alive */
624 to = h2c->proxy->timeout.httpka;
625 } else {
626 /* before first request, or started to deserialize a
627 * new req => http-request.
628 */
629 to = h2c->proxy->timeout.httpreq;
630 }
Willy Tarreau86b08a32022-04-13 17:40:28 +0200631
Willy Tarreaud38d8c62023-05-15 11:28:48 +0200632 h2c->task->expire = tick_add_ifset(h2c->idle_start, to);
633 is_idle_conn = 1;
634 }
635
636 if (h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED)) {
637 /* GOAWAY sent (or failed), closing in progress */
638 int exp = tick_add_ifset(now_ms, h2c->shut_timeout);
639
640 h2c->task->expire = tick_first(h2c->task->expire, exp);
641 is_idle_conn = 1;
642 }
643
644 /* if a timeout above was not set, fall back to the default one */
645 if (!tick_isset(h2c->task->expire))
646 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
Willy Tarreau15a47332022-03-18 15:57:34 +0100647 }
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200648
649 if ((h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) &&
650 is_idle_conn && tick_isset(global.close_spread_end)) {
651 /* If a soft-stop is in progress and a close-spread-time
652 * is set, we want to spread idle connection closing roughly
653 * evenly across the defined window. This should only
654 * act on idle frontend connections.
655 * If the window end is already in the past, we wake the
656 * timeout task up immediately so that it can be closed.
657 */
658 int remaining_window = tick_remain(now_ms, global.close_spread_end);
659 if (remaining_window) {
660 /* We don't need to reset the expire if it would
661 * already happen before the close window end.
662 */
663 if (tick_isset(h2c->task->expire) &&
664 tick_is_le(global.close_spread_end, h2c->task->expire)) {
665 /* Set an expire value shorter than the current value
666 * because the close spread window end comes earlier.
667 */
668 h2c->task->expire = tick_add(now_ms, statistical_prng_range(remaining_window));
669 }
670 }
671 else {
672 /* We are past the soft close window end, wake the timeout
673 * task up immediately.
674 */
675 task_wakeup(h2c->task, TASK_WOKEN_TIMER);
676 }
677 }
678
Willy Tarreau15a47332022-03-18 15:57:34 +0100679 } else {
680 h2c->task->expire = TICK_ETERNITY;
681 }
682 task_queue(h2c->task);
683 leave:
684 TRACE_LEAVE(H2_EV_H2C_WAKE);
685}
686
Olivier Houchard7a977432019-03-21 15:47:13 +0100687static __inline int
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200688h2c_is_dead(const struct h2c *h2c)
Olivier Houchard7a977432019-03-21 15:47:13 +0100689{
690 if (eb_is_empty(&h2c->streams_by_id) && /* don't close if streams exist */
Christopher Fauletff7925d2022-10-11 19:12:40 +0200691 ((h2c->flags & H2_CF_ERROR) || /* errors close immediately */
Christopher Faulet21fb6bd2023-03-28 12:16:53 +0200692 (h2c->flags & H2_CF_ERR_PENDING && h2c->st0 < H2_CS_FRAME_H) || /* early error during connect */
Olivier Houchard7a977432019-03-21 15:47:13 +0100693 (h2c->st0 >= H2_CS_ERROR && !h2c->task) || /* a timeout stroke earlier */
694 (!(h2c->conn->owner)) || /* Nobody's left to take care of the connection, drop it now */
Willy Tarreau662fafc2019-05-26 09:43:07 +0200695 (!br_data(h2c->mbuf) && /* mux buffer empty, also process clean events below */
Christopher Fauletff7925d2022-10-11 19:12:40 +0200696 ((h2c->flags & H2_CF_RCVD_SHUT) ||
Olivier Houchard7a977432019-03-21 15:47:13 +0100697 (h2c->last_sid >= 0 && h2c->max_id >= h2c->last_sid)))))
698 return 1;
699
700 return 0;
Olivier Houchard7a977432019-03-21 15:47:13 +0100701}
702
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200703/*****************************************************/
704/* functions below are for dynamic buffer management */
705/*****************************************************/
706
Willy Tarreau315d8072017-12-10 22:17:57 +0100707/* indicates whether or not the we may call the h2_recv() function to attempt
708 * to receive data into the buffer and/or demux pending data. The condition is
709 * a bit complex due to some API limits for now. The rules are the following :
710 * - if an error or a shutdown was detected on the connection and the buffer
711 * is empty, we must not attempt to receive
712 * - if the demux buf failed to be allocated, we must not try to receive and
713 * we know there is nothing pending
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100714 * - if no flag indicates a blocking condition, we may attempt to receive,
715 * regardless of whether the demux buffer is full or not, so that only
716 * de demux part decides whether or not to block. This is needed because
717 * the connection API indeed prevents us from re-enabling receipt that is
718 * already enabled in a polled state, so we must always immediately stop
719 * as soon as the demux can't proceed so as never to hit an end of read
720 * with data pending in the buffers.
Willy Tarreau315d8072017-12-10 22:17:57 +0100721 * - otherwise must may not attempt
722 */
723static inline int h2_recv_allowed(const struct h2c *h2c)
724{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200725 if (b_data(&h2c->dbuf) == 0 &&
Christopher Fauletff7925d2022-10-11 19:12:40 +0200726 ((h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERROR)) || h2c->st0 >= H2_CS_ERROR))
Willy Tarreau315d8072017-12-10 22:17:57 +0100727 return 0;
728
729 if (!(h2c->flags & H2_CF_DEM_DALLOC) &&
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100730 !(h2c->flags & H2_CF_DEM_BLOCK_ANY))
Willy Tarreau315d8072017-12-10 22:17:57 +0100731 return 1;
732
733 return 0;
734}
735
Willy Tarreau47b515a2018-12-21 16:09:41 +0100736/* restarts reading on the connection if it was not enabled */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200737static inline void h2c_restart_reading(const struct h2c *h2c, int consider_buffer)
Willy Tarreau47b515a2018-12-21 16:09:41 +0100738{
739 if (!h2_recv_allowed(h2c))
740 return;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200741 if ((!consider_buffer || !b_data(&h2c->dbuf))
742 && (h2c->wait_event.events & SUB_RETRY_RECV))
Willy Tarreau47b515a2018-12-21 16:09:41 +0100743 return;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200744 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau47b515a2018-12-21 16:09:41 +0100745}
746
747
Willy Tarreau4596fe22022-05-17 19:07:51 +0200748/* returns true if the front connection has too many stream connectors attached */
Willy Tarreau36c22322022-05-27 10:41:24 +0200749static inline int h2_frt_has_too_many_sc(const struct h2c *h2c)
Willy Tarreauf2101912018-07-19 10:11:38 +0200750{
Willy Tarreauca1027c2023-04-18 15:57:03 +0200751 return h2c->nb_sc > h2c_max_concurrent_streams(h2c);
Willy Tarreauf2101912018-07-19 10:11:38 +0200752}
753
Willy Tarreau44e973f2018-03-01 17:49:30 +0100754/* Tries to grab a buffer and to re-enable processing on mux <target>. The h2c
755 * flags are used to figure what buffer was requested. It returns 1 if the
756 * allocation succeeds, in which case the connection is woken up, or 0 if it's
757 * impossible to wake up and we prefer to be woken up later.
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200758 */
Willy Tarreau44e973f2018-03-01 17:49:30 +0100759static int h2_buf_available(void *target)
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200760{
761 struct h2c *h2c = target;
Willy Tarreau0b559072018-02-26 15:22:17 +0100762 struct h2s *h2s;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200763
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100764 if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200765 h2c->flags &= ~H2_CF_DEM_DALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200766 h2c_restart_reading(h2c, 1);
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200767 return 1;
768 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200769
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100770 if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100771 h2c->flags &= ~H2_CF_MUX_MALLOC;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200772
773 if (h2c->flags & H2_CF_DEM_MROOM) {
774 h2c->flags &= ~H2_CF_DEM_MROOM;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200775 h2c_restart_reading(h2c, 1);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200776 }
Willy Tarreau14398122017-09-22 14:26:04 +0200777 return 1;
778 }
Willy Tarreau0b559072018-02-26 15:22:17 +0100779
780 if ((h2c->flags & H2_CF_DEM_SALLOC) &&
Willy Tarreau7be4ee02022-05-18 07:31:41 +0200781 (h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s_sc(h2s) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100782 b_alloc(&h2s->rxbuf)) {
Willy Tarreau0b559072018-02-26 15:22:17 +0100783 h2c->flags &= ~H2_CF_DEM_SALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200784 h2c_restart_reading(h2c, 1);
Willy Tarreau0b559072018-02-26 15:22:17 +0100785 return 1;
786 }
787
Willy Tarreau14398122017-09-22 14:26:04 +0200788 return 0;
789}
790
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200791static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200792{
793 struct buffer *buf = NULL;
794
Willy Tarreau2b718102021-04-21 07:32:39 +0200795 if (likely(!LIST_INLIST(&h2c->buf_wait.list)) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100796 unlikely((buf = b_alloc(bptr)) == NULL)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100797 h2c->buf_wait.target = h2c;
798 h2c->buf_wait.wakeup_cb = h2_buf_available;
Willy Tarreaub4e34762021-09-30 19:02:18 +0200799 LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +0200800 }
801 return buf;
802}
803
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200804static inline void h2_release_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200805{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200806 if (bptr->size) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100807 b_free(bptr);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100808 offer_buffers(NULL, 1);
Willy Tarreau14398122017-09-22 14:26:04 +0200809 }
810}
811
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200812static inline void h2_release_mbuf(struct h2c *h2c)
813{
814 struct buffer *buf;
815 unsigned int count = 0;
816
817 while (b_size(buf = br_head_pick(h2c->mbuf))) {
818 b_free(buf);
819 count++;
820 }
821 if (count)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100822 offer_buffers(NULL, count);
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200823}
824
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100825/* returns the number of allocatable outgoing streams for the connection taking
826 * the last_sid and the reserved ones into account.
827 */
828static inline int h2_streams_left(const struct h2c *h2c)
829{
830 int ret;
831
832 /* consider the number of outgoing streams we're allowed to create before
833 * reaching the last GOAWAY frame seen. max_id is the last assigned id,
834 * nb_reserved is the number of streams which don't yet have an ID.
835 */
836 ret = (h2c->last_sid >= 0) ? h2c->last_sid : 0x7FFFFFFF;
837 ret = (unsigned int)(ret - h2c->max_id) / 2 - h2c->nb_reserved - 1;
838 if (ret < 0)
839 ret = 0;
840 return ret;
841}
842
Willy Tarreau00f18a32019-01-26 12:19:01 +0100843/* returns the number of streams in use on a connection to figure if it's
Willy Tarreau36c22322022-05-27 10:41:24 +0200844 * idle or not. We check nb_sc and not nb_streams as the caller will want
Willy Tarreau00f18a32019-01-26 12:19:01 +0100845 * to know if it was the last one after a detach().
846 */
847static int h2_used_streams(struct connection *conn)
848{
849 struct h2c *h2c = conn->ctx;
850
Willy Tarreau36c22322022-05-27 10:41:24 +0200851 return h2c->nb_sc;
Willy Tarreau00f18a32019-01-26 12:19:01 +0100852}
853
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100854/* returns the number of concurrent streams available on the connection */
Olivier Houchardd540b362018-11-05 18:37:53 +0100855static int h2_avail_streams(struct connection *conn)
856{
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100857 struct server *srv = objt_server(conn->target);
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100858 struct h2c *h2c = conn->ctx;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100859 int ret1, ret2;
Olivier Houchardd540b362018-11-05 18:37:53 +0100860
Willy Tarreau6afec462019-01-28 06:40:19 +0100861 /* RFC7540#6.8: Receivers of a GOAWAY frame MUST NOT open additional
862 * streams on the connection.
863 */
864 if (h2c->last_sid >= 0)
865 return 0;
866
Willy Tarreauc61966f2019-10-31 15:10:03 +0100867 if (h2c->st0 >= H2_CS_ERROR)
868 return 0;
869
Willy Tarreau86949782019-01-31 10:42:05 +0100870 /* note: may be negative if a SETTINGS frame changes the limit */
871 ret1 = h2c->streams_limit - h2c->nb_streams;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100872
873 /* we must also consider the limit imposed by stream IDs */
874 ret2 = h2_streams_left(h2c);
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100875 ret1 = MIN(ret1, ret2);
Willy Tarreau86949782019-01-31 10:42:05 +0100876 if (ret1 > 0 && srv && srv->max_reuse >= 0) {
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100877 ret2 = h2c->stream_cnt <= srv->max_reuse ? srv->max_reuse - h2c->stream_cnt + 1: 0;
878 ret1 = MIN(ret1, ret2);
879 }
880 return ret1;
Olivier Houchardd540b362018-11-05 18:37:53 +0100881}
882
Ilya Shipitsin07be66d2023-04-01 12:26:42 +0200883/* Unconditionally produce a trace of the header. Please do not call this one
Willy Tarreau11e8a8c2023-01-24 19:43:11 +0100884 * and use h2_trace_header() instead which first checks if traces are enabled.
885 */
886void _h2_trace_header(const struct ist hn, const struct ist hv,
887 uint64_t mask, const struct ist trc_loc, const char *func,
888 const struct h2c *h2c, const struct h2s *h2s)
889{
890 struct ist n_ist, v_ist;
891 const char *c_str, *s_str;
892
893 chunk_reset(&trash);
894 c_str = chunk_newstr(&trash);
895 if (h2c) {
896 chunk_appendf(&trash, "h2c=%p(%c,%s) ",
897 h2c, (h2c->flags & H2_CF_IS_BACK) ? 'B' : 'F', h2c_st_to_str(h2c->st0));
898 }
899
900 s_str = chunk_newstr(&trash);
901 if (h2s) {
902 if (h2s->id <= 0)
903 chunk_appendf(&trash, "dsi=%d ", h2s->h2c->dsi);
904 chunk_appendf(&trash, "h2s=%p(%d,%s) ", h2s, h2s->id, h2s_st_to_str(h2s->st));
905 }
906 else if (h2c)
907 chunk_appendf(&trash, "dsi=%d ", h2c->dsi);
908
909 n_ist = ist2(chunk_newstr(&trash), 0);
910 istscpy(&n_ist, hn, 256);
911 trash.data += n_ist.len;
912 if (n_ist.len != hn.len)
913 chunk_appendf(&trash, " (... +%ld)", (long)(hn.len - n_ist.len));
914
915 v_ist = ist2(chunk_newstr(&trash), 0);
916 istscpy(&v_ist, hv, 1024);
917 trash.data += v_ist.len;
918 if (v_ist.len != hv.len)
919 chunk_appendf(&trash, " (... +%ld)", (long)(hv.len - v_ist.len));
920
921 TRACE_PRINTF_LOC(TRACE_LEVEL_USER, mask, trc_loc, func,
Christopher Fauletc2545162023-01-30 08:26:09 +0100922 (h2c ? h2c->conn : 0), 0, 0, 0,
Willy Tarreau11e8a8c2023-01-24 19:43:11 +0100923 "%s%s%s %s: %s", c_str, s_str,
924 (mask & H2_EV_TX_HDR) ? "sndh" : "rcvh",
925 n_ist.ptr, v_ist.ptr);
926}
927
928/* produce a trace of the header after checking that tracing is enabled */
929static inline void h2_trace_header(const struct ist hn, const struct ist hv,
930 uint64_t mask, const struct ist trc_loc, const char *func,
931 const struct h2c *h2c, const struct h2s *h2s)
932{
933 if ((TRACE_SOURCE)->verbosity >= H2_VERB_ADVANCED &&
934 TRACE_ENABLED(TRACE_LEVEL_USER, mask, h2c ? h2c->conn : 0, h2s, 0, 0))
935 _h2_trace_header(hn, hv, mask, trc_loc, func, h2c, h2s);
936}
937
938/* hpack-encode header name <hn> and value <hv>, possibly emitting a trace if
939 * currently enabled. This is done on behalf of function <func> at <trc_loc>
940 * passed as ist(TRC_LOC), h2c <h2c>, and h2s <h2s>, all of which may be NULL.
941 * The trace is only emitted if the header is emitted (in which case non-zero
942 * is returned). The trash is modified. In the traces, the header's name will
943 * be truncated to 256 chars and the header's value to 1024 chars.
944 */
945static inline int h2_encode_header(struct buffer *buf, const struct ist hn, const struct ist hv,
946 uint64_t mask, const struct ist trc_loc, const char *func,
947 const struct h2c *h2c, const struct h2s *h2s)
948{
949 int ret;
950
951 ret = hpack_encode_header(buf, hn, hv);
952 if (ret)
953 h2_trace_header(hn, hv, mask, trc_loc, func, h2c, h2s);
954
955 return ret;
956}
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200957
Willy Tarreau62f52692017-10-08 23:01:42 +0200958/*****************************************************************/
959/* functions below are dedicated to the mux setup and management */
960/*****************************************************************/
961
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200962/* Initialize the mux once it's attached. For outgoing connections, the context
963 * is already initialized before installing the mux, so we detect incoming
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200964 * connections from the fact that the context is still NULL (even during mux
965 * upgrades). <input> is always used as Input buffer and may contain data. It is
966 * the caller responsibility to not reuse it anymore. Returns < 0 on error.
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200967 */
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200968static int h2_init(struct connection *conn, struct proxy *prx, struct session *sess,
969 struct buffer *input)
Willy Tarreau32218eb2017-09-22 08:07:25 +0200970{
971 struct h2c *h2c;
Willy Tarreauea392822017-10-31 10:02:25 +0100972 struct task *t = NULL;
Christopher Fauletf81ef032019-10-04 15:19:43 +0200973 void *conn_ctx = conn->ctx;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200974
Christopher Fauletf81ef032019-10-04 15:19:43 +0200975 TRACE_ENTER(H2_EV_H2C_NEW);
Willy Tarreau7838a792019-08-12 18:42:03 +0200976
Willy Tarreaubafbe012017-11-24 17:34:44 +0100977 h2c = pool_alloc(pool_head_h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +0200978 if (!h2c)
mildiscd2d7de2018-10-02 16:44:18 +0200979 goto fail_no_h2c;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200980
Christopher Faulete9b70722019-04-08 10:46:02 +0200981 if (conn_is_back(conn)) {
Willy Tarreau01b44822018-10-03 14:26:37 +0200982 h2c->flags = H2_CF_IS_BACK;
983 h2c->shut_timeout = h2c->timeout = prx->timeout.server;
984 if (tick_isset(prx->timeout.serverfin))
985 h2c->shut_timeout = prx->timeout.serverfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +0100986
987 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_be,
988 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +0200989 } else {
990 h2c->flags = H2_CF_NONE;
991 h2c->shut_timeout = h2c->timeout = prx->timeout.client;
992 if (tick_isset(prx->timeout.clientfin))
993 h2c->shut_timeout = prx->timeout.clientfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +0100994
995 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe,
996 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +0200997 }
Willy Tarreau3f133572017-10-31 19:21:06 +0100998
Willy Tarreau0b37d652018-10-03 10:33:02 +0200999 h2c->proxy = prx;
Willy Tarreau33400292017-11-05 11:23:40 +01001000 h2c->task = NULL;
Willy Tarreau389ab0d2023-03-20 19:16:04 +01001001 h2c->wait_event.tasklet = NULL;
Willy Tarreau15a47332022-03-18 15:57:34 +01001002 h2c->idle_start = now_ms;
Willy Tarreau3f133572017-10-31 19:21:06 +01001003 if (tick_isset(h2c->timeout)) {
Willy Tarreaubeeabf52021-10-01 18:23:30 +02001004 t = task_new_here();
Willy Tarreau3f133572017-10-31 19:21:06 +01001005 if (!t)
1006 goto fail;
1007
1008 h2c->task = t;
1009 t->process = h2_timeout_task;
1010 t->context = h2c;
1011 t->expire = tick_add(now_ms, h2c->timeout);
1012 }
Willy Tarreauea392822017-10-31 10:02:25 +01001013
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001014 h2c->wait_event.tasklet = tasklet_new();
1015 if (!h2c->wait_event.tasklet)
Olivier Houchard910b2bc2018-07-17 18:49:38 +02001016 goto fail;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001017 h2c->wait_event.tasklet->process = h2_io_cb;
1018 h2c->wait_event.tasklet->context = h2c;
Willy Tarreau4f6516d2018-12-19 13:59:17 +01001019 h2c->wait_event.events = 0;
Amaury Denoyelled3a88c12021-05-03 10:47:51 +02001020 if (!conn_is_back(conn)) {
1021 /* Connection might already be in the stopping_list if subject
1022 * to h1->h2 upgrade.
1023 */
1024 if (!LIST_INLIST(&conn->stopping_list)) {
1025 LIST_APPEND(&mux_stopping_data[tid].list,
1026 &conn->stopping_list);
1027 }
1028 }
Olivier Houchard910b2bc2018-07-17 18:49:38 +02001029
Willy Tarreau2bdcc702020-05-19 11:31:11 +02001030 h2c->ddht = hpack_dht_alloc();
Willy Tarreau32218eb2017-09-22 08:07:25 +02001031 if (!h2c->ddht)
1032 goto fail;
1033
1034 /* Initialise the context. */
1035 h2c->st0 = H2_CS_PREFACE;
1036 h2c->conn = conn;
Willy Tarreauca1027c2023-04-18 15:57:03 +02001037 h2c->streams_limit = h2c_max_concurrent_streams(h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001038 h2c->max_id = -1;
1039 h2c->errcode = H2_ERR_NO_ERROR;
Willy Tarreau97aaa672018-12-23 09:49:04 +01001040 h2c->rcvd_c = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001041 h2c->rcvd_s = 0;
Willy Tarreau49745612017-12-03 18:56:02 +01001042 h2c->nb_streams = 0;
Willy Tarreau36c22322022-05-27 10:41:24 +02001043 h2c->nb_sc = 0;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001044 h2c->nb_reserved = 0;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001045 h2c->stream_cnt = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001046
Christopher Faulet51f73eb2019-04-08 11:22:47 +02001047 h2c->dbuf = *input;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001048 h2c->dsi = -1;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001049
Willy Tarreau32218eb2017-09-22 08:07:25 +02001050 h2c->last_sid = -1;
1051
Willy Tarreau51330962019-05-26 09:38:07 +02001052 br_init(h2c->mbuf, sizeof(h2c->mbuf) / sizeof(h2c->mbuf[0]));
Willy Tarreau32218eb2017-09-22 08:07:25 +02001053 h2c->miw = 65535; /* mux initial window size */
1054 h2c->mws = 65535; /* mux window size */
1055 h2c->mfs = 16384; /* initial max frame size */
Willy Tarreau751f2d02018-10-05 09:35:00 +02001056 h2c->streams_by_id = EB_ROOT;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001057 LIST_INIT(&h2c->send_list);
1058 LIST_INIT(&h2c->fctl_list);
Willy Tarreau9edf6db2019-10-02 10:49:59 +02001059 LIST_INIT(&h2c->blocked_list);
Willy Tarreau90f366b2021-02-20 11:49:49 +01001060 LIST_INIT(&h2c->buf_wait.list);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001061
Christopher Fauletf81ef032019-10-04 15:19:43 +02001062 conn->ctx = h2c;
1063
Willy Tarreau8e6f7492021-06-16 17:47:24 +02001064 TRACE_USER("new H2 connection", H2_EV_H2C_NEW, conn);
1065
Willy Tarreau3f133572017-10-31 19:21:06 +01001066 if (t)
1067 task_queue(t);
Willy Tarreauea392822017-10-31 10:02:25 +01001068
Willy Tarreau01b44822018-10-03 14:26:37 +02001069 if (h2c->flags & H2_CF_IS_BACK) {
1070 /* FIXME: this is temporary, for outgoing connections we need
1071 * to immediately allocate a stream until the code is modified
Willy Tarreau36c22322022-05-27 10:41:24 +02001072 * so that the caller calls ->attach(). For now the outgoing sc
Christopher Fauletf81ef032019-10-04 15:19:43 +02001073 * is stored as conn->ctx by the caller and saved in conn_ctx.
Willy Tarreau01b44822018-10-03 14:26:37 +02001074 */
1075 struct h2s *h2s;
1076
Christopher Fauletf81ef032019-10-04 15:19:43 +02001077 h2s = h2c_bck_stream_new(h2c, conn_ctx, sess);
Willy Tarreau01b44822018-10-03 14:26:37 +02001078 if (!h2s)
1079 goto fail_stream;
1080 }
1081
Frédéric Lécaille9969adb2023-01-18 11:52:21 +01001082 proxy_inc_fe_cum_sess_ver_ctr(sess->listener, prx, 2);
Willy Tarreau4781b152021-04-06 13:53:36 +02001083 HA_ATOMIC_INC(&h2c->px_counters->open_conns);
1084 HA_ATOMIC_INC(&h2c->px_counters->total_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001085
Willy Tarreau0f383582018-10-03 14:22:21 +02001086 /* prepare to read something */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02001087 h2c_restart_reading(h2c, 1);
Willy Tarreau7838a792019-08-12 18:42:03 +02001088 TRACE_LEAVE(H2_EV_H2C_NEW, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001089 return 0;
Willy Tarreau01b44822018-10-03 14:26:37 +02001090 fail_stream:
1091 hpack_dht_free(h2c->ddht);
mildiscd2d7de2018-10-02 16:44:18 +02001092 fail:
Willy Tarreauf6562792019-05-07 19:05:35 +02001093 task_destroy(t);
Tim Duesterhusb1ec21d2023-04-22 17:47:32 +02001094 tasklet_free(h2c->wait_event.tasklet);
Willy Tarreaubafbe012017-11-24 17:34:44 +01001095 pool_free(pool_head_h2c, h2c);
mildiscd2d7de2018-10-02 16:44:18 +02001096 fail_no_h2c:
Willy Tarreau3b990fe2022-01-12 17:24:26 +01001097 if (!conn_is_back(conn))
1098 LIST_DEL_INIT(&conn->stopping_list);
Christopher Fauletf81ef032019-10-04 15:19:43 +02001099 conn->ctx = conn_ctx; /* restore saved ctx */
1100 TRACE_DEVEL("leaving in error", H2_EV_H2C_NEW|H2_EV_H2C_END|H2_EV_H2C_ERR);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001101 return -1;
1102}
1103
Willy Tarreau751f2d02018-10-05 09:35:00 +02001104/* returns the next allocatable outgoing stream ID for the H2 connection, or
1105 * -1 if no more is allocatable.
1106 */
1107static inline int32_t h2c_get_next_sid(const struct h2c *h2c)
1108{
1109 int32_t id = (h2c->max_id + 1) | 1;
Willy Tarreaua80dca82019-01-24 17:08:28 +01001110
1111 if ((id & 0x80000000U) || (h2c->last_sid >= 0 && id > h2c->last_sid))
Willy Tarreau751f2d02018-10-05 09:35:00 +02001112 id = -1;
1113 return id;
1114}
1115
Willy Tarreau2373acc2017-10-12 17:35:14 +02001116/* returns the stream associated with id <id> or NULL if not found */
1117static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id)
1118{
1119 struct eb32_node *node;
1120
Willy Tarreau751f2d02018-10-05 09:35:00 +02001121 if (id == 0)
1122 return (struct h2s *)h2_closed_stream;
1123
Willy Tarreau2a856182017-05-16 15:20:39 +02001124 if (id > h2c->max_id)
1125 return (struct h2s *)h2_idle_stream;
1126
Willy Tarreau2373acc2017-10-12 17:35:14 +02001127 node = eb32_lookup(&h2c->streams_by_id, id);
1128 if (!node)
Willy Tarreau2a856182017-05-16 15:20:39 +02001129 return (struct h2s *)h2_closed_stream;
Willy Tarreau2373acc2017-10-12 17:35:14 +02001130
1131 return container_of(node, struct h2s, by_id);
1132}
1133
Christopher Faulet73c12072019-04-08 11:23:22 +02001134/* release function. This one should be called to free all resources allocated
1135 * to the mux.
Willy Tarreau62f52692017-10-08 23:01:42 +02001136 */
Christopher Faulet73c12072019-04-08 11:23:22 +02001137static void h2_release(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02001138{
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001139 struct connection *conn = h2c->conn;
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001140
Willy Tarreau7838a792019-08-12 18:42:03 +02001141 TRACE_ENTER(H2_EV_H2C_END);
1142
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001143 hpack_dht_free(h2c->ddht);
Christopher Faulet61840e72019-04-15 09:33:32 +02001144
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001145 if (LIST_INLIST(&h2c->buf_wait.list))
1146 LIST_DEL_INIT(&h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +02001147
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001148 h2_release_buf(h2c, &h2c->dbuf);
1149 h2_release_mbuf(h2c);
Willy Tarreau14398122017-09-22 14:26:04 +02001150
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001151 if (h2c->task) {
1152 h2c->task->context = NULL;
1153 task_wakeup(h2c->task, TASK_WOKEN_OTHER);
1154 h2c->task = NULL;
1155 }
Tim Duesterhusb1ec21d2023-04-22 17:47:32 +02001156 tasklet_free(h2c->wait_event.tasklet);
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001157 if (conn && h2c->wait_event.events != 0)
1158 conn->xprt->unsubscribe(conn, conn->xprt_ctx, h2c->wait_event.events,
1159 &h2c->wait_event);
Willy Tarreauea392822017-10-31 10:02:25 +01001160
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001161 HA_ATOMIC_DEC(&h2c->px_counters->open_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001162
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001163 pool_free(pool_head_h2c, h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001164
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001165 if (conn) {
Amaury Denoyelled3a88c12021-05-03 10:47:51 +02001166 if (!conn_is_back(conn))
1167 LIST_DEL_INIT(&conn->stopping_list);
1168
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001169 conn->mux = NULL;
1170 conn->ctx = NULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02001171 TRACE_DEVEL("freeing conn", H2_EV_H2C_END, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001172
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001173 conn_stop_tracking(conn);
Willy Tarreau0b222472021-10-21 22:24:31 +02001174
1175 /* there might be a GOAWAY frame still pending in the TCP
1176 * stack, and if the peer continues to send (i.e. window
1177 * updates etc), this can result in losing the GOAWAY. For
1178 * this reason we try to drain anything received in between.
1179 */
1180 conn->flags |= CO_FL_WANT_DRAIN;
1181
1182 conn_xprt_shutw(conn);
1183 conn_xprt_close(conn);
1184 conn_sock_shutw(conn, !conn_is_back(conn));
1185 conn_ctrl_close(conn);
1186
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001187 if (conn->destroy_cb)
1188 conn->destroy_cb(conn);
1189 conn_free(conn);
1190 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001191
1192 TRACE_LEAVE(H2_EV_H2C_END);
Willy Tarreau62f52692017-10-08 23:01:42 +02001193}
1194
1195
Willy Tarreau71681172017-10-23 14:39:06 +02001196/******************************************************/
1197/* functions below are for the H2 protocol processing */
1198/******************************************************/
1199
1200/* returns the stream if of stream <h2s> or 0 if <h2s> is NULL */
Willy Tarreau1f094672017-11-20 21:27:45 +01001201static inline __maybe_unused int h2s_id(const struct h2s *h2s)
Willy Tarreau71681172017-10-23 14:39:06 +02001202{
1203 return h2s ? h2s->id : 0;
1204}
1205
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001206/* returns the sum of the stream's own window size and the mux's initial
1207 * window, which together form the stream's effective window size.
1208 */
1209static inline int h2s_mws(const struct h2s *h2s)
1210{
1211 return h2s->sws + h2s->h2c->miw;
1212}
1213
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001214/* marks an error on the connection. Before settings are sent, we must not send
1215 * a GOAWAY frame, and the error state will prevent h2c_send_goaway_error()
1216 * from verifying this so we set H2_CF_GOAWAY_FAILED to make sure it will not
1217 * even try.
1218 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001219static inline __maybe_unused void h2c_error(struct h2c *h2c, enum h2_err err)
Willy Tarreau741d6df2017-10-17 08:00:59 +02001220{
Willy Tarreau022e5e52020-09-10 09:33:15 +02001221 TRACE_POINT(H2_EV_H2C_ERR, h2c->conn, 0, 0, (void *)(long)(err));
Willy Tarreau741d6df2017-10-17 08:00:59 +02001222 h2c->errcode = err;
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001223 if (h2c->st0 < H2_CS_SETTINGS1)
1224 h2c->flags |= H2_CF_GOAWAY_FAILED;
Willy Tarreau741d6df2017-10-17 08:00:59 +02001225 h2c->st0 = H2_CS_ERROR;
1226}
1227
Willy Tarreau175cebb2019-01-24 10:02:24 +01001228/* marks an error on the stream. It may also update an already closed stream
1229 * (e.g. to report an error after an RST was received).
1230 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001231static inline __maybe_unused void h2s_error(struct h2s *h2s, enum h2_err err)
Willy Tarreau2e43f082017-10-17 08:03:59 +02001232{
Willy Tarreau175cebb2019-01-24 10:02:24 +01001233 if (h2s->id && h2s->st != H2_SS_ERROR) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02001234 TRACE_POINT(H2_EV_H2S_ERR, h2s->h2c->conn, h2s, 0, (void *)(long)(err));
Willy Tarreau2e43f082017-10-17 08:03:59 +02001235 h2s->errcode = err;
Willy Tarreau175cebb2019-01-24 10:02:24 +01001236 if (h2s->st < H2_SS_ERROR)
1237 h2s->st = H2_SS_ERROR;
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001238 se_fl_set_error(h2s->sd);
Willy Tarreau2e43f082017-10-17 08:03:59 +02001239 }
1240}
1241
Willy Tarreau7e094452018-12-19 18:08:52 +01001242/* attempt to notify the data layer of recv availability */
1243static void __maybe_unused h2s_notify_recv(struct h2s *h2s)
1244{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001245 if (h2s->subs && h2s->subs->events & SUB_RETRY_RECV) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001246 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01001247 tasklet_wakeup(h2s->subs->tasklet);
1248 h2s->subs->events &= ~SUB_RETRY_RECV;
1249 if (!h2s->subs->events)
1250 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001251 }
1252}
1253
1254/* attempt to notify the data layer of send availability */
1255static void __maybe_unused h2s_notify_send(struct h2s *h2s)
1256{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001257 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001258 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01001259 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01001260 tasklet_wakeup(h2s->subs->tasklet);
1261 h2s->subs->events &= ~SUB_RETRY_SEND;
1262 if (!h2s->subs->events)
1263 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001264 }
Willy Tarreau5723f292020-01-10 15:16:57 +01001265 else if (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) {
1266 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
1267 tasklet_wakeup(h2s->shut_tl);
1268 }
Willy Tarreau7e094452018-12-19 18:08:52 +01001269}
1270
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001271/* alerts the data layer, trying to wake it up by all means, following
1272 * this sequence :
1273 * - if the h2s' data layer is subscribed to recv, then it's woken up for recv
1274 * - if its subscribed to send, then it's woken up for send
1275 * - if it was subscribed to neither, its ->wake() callback is called
1276 * It is safe to call this function with a closed stream which doesn't have a
Willy Tarreau4596fe22022-05-17 19:07:51 +02001277 * stream connector anymore.
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001278 */
1279static void __maybe_unused h2s_alert(struct h2s *h2s)
1280{
Willy Tarreau7838a792019-08-12 18:42:03 +02001281 TRACE_ENTER(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
1282
Willy Tarreauf96508a2020-01-10 11:12:48 +01001283 if (h2s->subs ||
Willy Tarreau5723f292020-01-10 15:16:57 +01001284 (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW))) {
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001285 h2s_notify_recv(h2s);
1286 h2s_notify_send(h2s);
1287 }
Willy Tarreau2f2318d2022-05-18 10:17:16 +02001288 else if (h2s_sc(h2s) && h2s_sc(h2s)->app_ops->wake != NULL) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001289 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreau2f2318d2022-05-18 10:17:16 +02001290 h2s_sc(h2s)->app_ops->wake(h2s_sc(h2s));
Willy Tarreau7838a792019-08-12 18:42:03 +02001291 }
1292
1293 TRACE_LEAVE(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001294}
1295
Willy Tarreaue4820742017-07-27 13:37:23 +02001296/* writes the 24-bit frame size <len> at address <frame> */
Willy Tarreau1f094672017-11-20 21:27:45 +01001297static inline __maybe_unused void h2_set_frame_size(void *frame, uint32_t len)
Willy Tarreaue4820742017-07-27 13:37:23 +02001298{
1299 uint8_t *out = frame;
1300
1301 *out = len >> 16;
1302 write_n16(out + 1, len);
1303}
1304
Willy Tarreau54c15062017-10-10 17:10:03 +02001305/* reads <bytes> bytes from buffer <b> starting at relative offset <o> from the
1306 * current pointer, dealing with wrapping, and stores the result in <dst>. It's
1307 * the caller's responsibility to verify that there are at least <bytes> bytes
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001308 * available in the buffer's input prior to calling this function. The buffer
1309 * is assumed not to hold any output data.
Willy Tarreau54c15062017-10-10 17:10:03 +02001310 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001311static inline __maybe_unused void h2_get_buf_bytes(void *dst, size_t bytes,
Willy Tarreau54c15062017-10-10 17:10:03 +02001312 const struct buffer *b, int o)
1313{
Willy Tarreau591d4452018-06-15 17:21:00 +02001314 readv_bytes(dst, bytes, b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001315}
1316
Willy Tarreau1f094672017-11-20 21:27:45 +01001317static inline __maybe_unused uint16_t h2_get_n16(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001318{
Willy Tarreau591d4452018-06-15 17:21:00 +02001319 return readv_n16(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001320}
1321
Willy Tarreau1f094672017-11-20 21:27:45 +01001322static inline __maybe_unused uint32_t h2_get_n32(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001323{
Willy Tarreau591d4452018-06-15 17:21:00 +02001324 return readv_n32(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001325}
1326
Willy Tarreau1f094672017-11-20 21:27:45 +01001327static inline __maybe_unused uint64_t h2_get_n64(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001328{
Willy Tarreau591d4452018-06-15 17:21:00 +02001329 return readv_n64(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001330}
1331
1332
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001333/* Peeks an H2 frame header from offset <o> of buffer <b> into descriptor <h>.
1334 * The algorithm is not obvious. It turns out that H2 headers are neither
1335 * aligned nor do they use regular sizes. And to add to the trouble, the buffer
1336 * may wrap so each byte read must be checked. The header is formed like this :
Willy Tarreau715d5312017-07-11 15:20:24 +02001337 *
1338 * b0 b1 b2 b3 b4 b5..b8
1339 * +----------+---------+--------+----+----+----------------------+
1340 * |len[23:16]|len[15:8]|len[7:0]|type|flag|sid[31:0] (big endian)|
1341 * +----------+---------+--------+----+----+----------------------+
1342 *
1343 * Here we read a big-endian 64 bit word from h[1]. This way in a single read
1344 * we get the sid properly aligned and ordered, and 16 bits of len properly
1345 * ordered as well. The type and flags can be extracted using bit shifts from
1346 * the word, and only one extra read is needed to fetch len[16:23].
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001347 * Returns zero if some bytes are missing, otherwise non-zero on success. The
1348 * buffer is assumed not to contain any output data.
Willy Tarreau715d5312017-07-11 15:20:24 +02001349 */
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001350static __maybe_unused int h2_peek_frame_hdr(const struct buffer *b, int o, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001351{
1352 uint64_t w;
1353
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001354 if (b_data(b) < o + 9)
Willy Tarreau715d5312017-07-11 15:20:24 +02001355 return 0;
1356
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001357 w = h2_get_n64(b, o + 1);
1358 h->len = *(uint8_t*)b_peek(b, o) << 16;
Willy Tarreau715d5312017-07-11 15:20:24 +02001359 h->sid = w & 0x7FFFFFFF; /* RFC7540#4.1: R bit must be ignored */
1360 h->ff = w >> 32;
1361 h->ft = w >> 40;
1362 h->len += w >> 48;
1363 return 1;
1364}
1365
1366/* skip the next 9 bytes corresponding to the frame header possibly parsed by
1367 * h2_peek_frame_hdr() above.
1368 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001369static inline __maybe_unused void h2_skip_frame_hdr(struct buffer *b)
Willy Tarreau715d5312017-07-11 15:20:24 +02001370{
Willy Tarreaue5f12ce2018-06-15 10:28:05 +02001371 b_del(b, 9);
Willy Tarreau715d5312017-07-11 15:20:24 +02001372}
1373
1374/* same as above, automatically advances the buffer on success */
Willy Tarreau1f094672017-11-20 21:27:45 +01001375static inline __maybe_unused int h2_get_frame_hdr(struct buffer *b, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001376{
1377 int ret;
1378
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001379 ret = h2_peek_frame_hdr(b, 0, h);
Willy Tarreau715d5312017-07-11 15:20:24 +02001380 if (ret > 0)
1381 h2_skip_frame_hdr(b);
1382 return ret;
1383}
1384
Willy Tarreaucb985a42019-10-07 16:56:34 +02001385
1386/* try to fragment the headers frame present at the beginning of buffer <b>,
1387 * enforcing a limit of <mfs> bytes per frame. Returns 0 on failure, 1 on
1388 * success. Typical causes of failure include a buffer not large enough to
1389 * add extra frame headers. The existing frame size is read in the current
1390 * frame. Its EH flag will be cleared if CONTINUATION frames need to be added,
1391 * and its length will be adjusted. The stream ID for continuation frames will
1392 * be copied from the initial frame's.
1393 */
1394static int h2_fragment_headers(struct buffer *b, uint32_t mfs)
1395{
1396 size_t remain = b->data - 9;
1397 int extra_frames = (remain - 1) / mfs;
1398 size_t fsize;
1399 char *fptr;
1400 int frame;
1401
1402 if (b->data <= mfs + 9)
1403 return 1;
1404
1405 /* Too large a frame, we need to fragment it using CONTINUATION
1406 * frames. We start from the end and move tails as needed.
1407 */
1408 if (b->data + extra_frames * 9 > b->size)
1409 return 0;
1410
1411 for (frame = extra_frames; frame; frame--) {
1412 fsize = ((remain - 1) % mfs) + 1;
1413 remain -= fsize;
1414
1415 /* move data */
1416 fptr = b->area + 9 + remain + (frame - 1) * 9;
1417 memmove(fptr + 9, b->area + 9 + remain, fsize);
1418 b->data += 9;
1419
1420 /* write new frame header */
1421 h2_set_frame_size(fptr, fsize);
1422 fptr[3] = H2_FT_CONTINUATION;
1423 fptr[4] = (frame == extra_frames) ? H2_F_HEADERS_END_HEADERS : 0;
1424 write_n32(fptr + 5, read_n32(b->area + 5));
1425 }
1426
1427 b->area[4] &= ~H2_F_HEADERS_END_HEADERS;
1428 h2_set_frame_size(b->area, remain);
1429 return 1;
1430}
1431
1432
Willy Tarreau00dd0782018-03-01 16:31:34 +01001433/* marks stream <h2s> as CLOSED and decrement the number of active streams for
1434 * its connection if the stream was not yet closed. Please use this exclusively
Willy Tarreaubcdc6cc2023-03-20 19:14:47 +01001435 * before closing a stream to ensure stream count is well maintained. Note that
1436 * it does explicitly support being called with a partially initialized h2s
1437 * (e.g. sd==NULL).
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001438 */
Willy Tarreau00dd0782018-03-01 16:31:34 +01001439static inline void h2s_close(struct h2s *h2s)
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001440{
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001441 if (h2s->st != H2_SS_CLOSED) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001442 TRACE_ENTER(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001443 h2s->h2c->nb_streams--;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001444 if (!h2s->id)
1445 h2s->h2c->nb_reserved--;
Willy Tarreaubcdc6cc2023-03-20 19:14:47 +01001446 if (h2s->sd && h2s_sc(h2s)) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001447 if (!se_fl_test(h2s->sd, SE_FL_EOS) && !b_data(&h2s->rxbuf))
Willy Tarreaua27db382019-03-25 18:13:16 +01001448 h2s_notify_recv(h2s);
1449 }
Willy Tarreau4781b152021-04-06 13:53:36 +02001450 HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001451
Willy Tarreau7838a792019-08-12 18:42:03 +02001452 TRACE_LEAVE(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001453 }
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001454 h2s->st = H2_SS_CLOSED;
1455}
1456
Christopher Faulet531dd052023-05-24 11:14:38 +02001457/* Check h2c and h2s flags to evaluate if EOI/EOS/ERR_PENDING/ERROR flags must
1458 * be set on the SE.
1459 */
1460static inline void h2s_propagate_term_flags(struct h2c *h2c, struct h2s *h2s)
1461{
1462 if (h2s->flags & H2_SF_ES_RCVD) {
1463 se_fl_set(h2s->sd, SE_FL_EOI);
1464 /* Add EOS flag for tunnel */
1465 if (h2s->flags & H2_SF_BODY_TUNNEL)
1466 se_fl_set(h2s->sd, SE_FL_EOS);
1467 }
1468 if (h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED) {
1469 se_fl_set(h2s->sd, SE_FL_EOS);
1470 if (!se_fl_test(h2s->sd, SE_FL_EOI))
1471 se_fl_set(h2s->sd, SE_FL_ERROR);
1472 }
1473 if (se_fl_test(h2s->sd, SE_FL_ERR_PENDING))
1474 se_fl_set(h2s->sd, SE_FL_ERROR);
1475}
1476
Willy Tarreau71049cc2018-03-28 13:56:39 +02001477/* detaches an H2 stream from its H2C and releases it to the H2S pool. */
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001478/* h2s_destroy should only ever be called by the thread that owns the stream,
1479 * that means that a tasklet should be used if we want to destroy the h2s
1480 * from another thread
1481 */
Willy Tarreau71049cc2018-03-28 13:56:39 +02001482static void h2s_destroy(struct h2s *h2s)
Willy Tarreau0a10de62018-03-01 16:27:53 +01001483{
Willy Tarreau7838a792019-08-12 18:42:03 +02001484 struct connection *conn = h2s->h2c->conn;
1485
1486 TRACE_ENTER(H2_EV_H2S_END, conn, h2s);
1487
Willy Tarreau0a10de62018-03-01 16:27:53 +01001488 h2s_close(h2s);
1489 eb32_delete(&h2s->by_id);
Olivier Houchard638b7992018-08-16 15:41:52 +02001490 if (b_size(&h2s->rxbuf)) {
1491 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01001492 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02001493 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001494
1495 if (h2s->subs)
1496 h2s->subs->events = 0;
1497
Joseph Herlantd77575d2018-11-25 10:54:45 -08001498 /* There's no need to explicitly call unsubscribe here, the only
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001499 * reference left would be in the h2c send_list/fctl_list, and if
1500 * we're in it, we're getting out anyway
1501 */
Willy Tarreaude4a5382023-10-17 08:25:19 +02001502 h2_remove_from_list(h2s);
Willy Tarreau5723f292020-01-10 15:16:57 +01001503
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001504 /* ditto, calling tasklet_free() here should be ok */
Willy Tarreau5723f292020-01-10 15:16:57 +01001505 tasklet_free(h2s->shut_tl);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001506 BUG_ON(h2s->sd && !se_fl_test(h2s->sd, SE_FL_ORPHAN));
1507 sedesc_free(h2s->sd);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001508 pool_free(pool_head_h2s, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02001509
1510 TRACE_LEAVE(H2_EV_H2S_END, conn);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001511}
1512
Willy Tarreaua8e49542018-10-03 18:53:55 +02001513/* allocates a new stream <id> for connection <h2c> and adds it into h2c's
1514 * stream tree. In case of error, nothing is added and NULL is returned. The
1515 * causes of errors can be any failed memory allocation. The caller is
1516 * responsible for checking if the connection may support an extra stream
1517 * prior to calling this function.
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001518 */
Willy Tarreaua8e49542018-10-03 18:53:55 +02001519static struct h2s *h2s_new(struct h2c *h2c, int id)
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001520{
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001521 struct h2s *h2s;
1522
Willy Tarreau7838a792019-08-12 18:42:03 +02001523 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1524
Willy Tarreaubafbe012017-11-24 17:34:44 +01001525 h2s = pool_alloc(pool_head_h2s);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001526 if (!h2s)
1527 goto out;
1528
Willy Tarreau5723f292020-01-10 15:16:57 +01001529 h2s->shut_tl = tasklet_new();
1530 if (!h2s->shut_tl) {
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001531 pool_free(pool_head_h2s, h2s);
1532 goto out;
1533 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001534 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01001535 h2s->shut_tl->process = h2_deferred_shut;
1536 h2s->shut_tl->context = h2s;
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001537 LIST_INIT(&h2s->list);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001538 h2s->h2c = h2c;
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001539 h2s->sd = NULL;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001540 h2s->sws = 0;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001541 h2s->flags = H2_SF_NONE;
1542 h2s->errcode = H2_ERR_NO_ERROR;
1543 h2s->st = H2_SS_IDLE;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +02001544 h2s->status = 0;
Willy Tarreau1915ca22019-01-24 11:49:37 +01001545 h2s->body_len = 0;
Olivier Houchard638b7992018-08-16 15:41:52 +02001546 h2s->rxbuf = BUF_NULL;
Amaury Denoyelle74162742020-12-11 17:53:05 +01001547 memset(h2s->upgrade_protocol, 0, sizeof(h2s->upgrade_protocol));
Willy Tarreau751f2d02018-10-05 09:35:00 +02001548
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001549 h2s->by_id.key = h2s->id = id;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001550 if (id > 0)
1551 h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001552 else
1553 h2c->nb_reserved++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001554
1555 eb32_insert(&h2c->streams_by_id, &h2s->by_id);
Willy Tarreau49745612017-12-03 18:56:02 +01001556 h2c->nb_streams++;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001557
Willy Tarreau4781b152021-04-06 13:53:36 +02001558 HA_ATOMIC_INC(&h2c->px_counters->open_streams);
1559 HA_ATOMIC_INC(&h2c->px_counters->total_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001560
Willy Tarreau7838a792019-08-12 18:42:03 +02001561 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001562 return h2s;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001563 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001564 TRACE_DEVEL("leaving in error", H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001565 return NULL;
1566}
1567
1568/* creates a new stream <id> on the h2c connection and returns it, or NULL in
Christopher Faulet7d013e72020-12-15 16:56:50 +01001569 * case of memory allocation error. <input> is used as input buffer for the new
1570 * stream. On success, it is transferred to the stream and the mux is no longer
1571 * responsible of it. On error, <input> is unchanged, thus the mux must still
1572 * take care of it.
Willy Tarreaua8e49542018-10-03 18:53:55 +02001573 */
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02001574static struct h2s *h2c_frt_stream_new(struct h2c *h2c, int id, struct buffer *input, uint32_t flags)
Willy Tarreaua8e49542018-10-03 18:53:55 +02001575{
1576 struct session *sess = h2c->conn->owner;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001577 struct h2s *h2s;
1578
Willy Tarreau7838a792019-08-12 18:42:03 +02001579 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1580
Willy Tarreauca1027c2023-04-18 15:57:03 +02001581 if (h2c->nb_streams >= h2c_max_concurrent_streams(h2c)) {
Willy Tarreaue872f752022-05-12 09:24:41 +02001582 TRACE_ERROR("HEADERS frame causing MAX_CONCURRENT_STREAMS to be exceeded", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreaud6e5cde2023-10-20 18:38:34 +02001583 session_inc_http_req_ctr(sess);
1584 session_inc_http_err_ctr(sess);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001585 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001586 }
Willy Tarreaua8e49542018-10-03 18:53:55 +02001587
1588 h2s = h2s_new(h2c, id);
1589 if (!h2s)
Willy Tarreaue872f752022-05-12 09:24:41 +02001590 goto out_alloc;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001591
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001592 h2s->sd = sedesc_new();
1593 if (!h2s->sd)
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001594 goto out_close;
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001595 h2s->sd->se = h2s;
1596 h2s->sd->conn = h2c->conn;
1597 se_fl_set(h2s->sd, SE_FL_T_MUX | SE_FL_ORPHAN | SE_FL_NOT_FIRST);
Christopher Faulet4403cdf2023-05-04 15:49:12 +02001598 /* The request is not finished, don't expect data from the opposite side
1599 * yet
1600 */
Christopher Faulet78b1eb22023-05-24 11:44:53 +02001601 if (!(h2c->dff & (H2_F_HEADERS_END_STREAM| H2_F_DATA_END_STREAM)) && !(flags & H2_SF_BODY_TUNNEL))
Christopher Faulet4403cdf2023-05-04 15:49:12 +02001602 se_expect_no_data(h2s->sd);
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +01001603
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02001604 /* FIXME wrong analogy between ext-connect and websocket, this need to
1605 * be refine.
1606 */
1607 if (flags & H2_SF_EXT_CONNECT_RCVD)
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001608 se_fl_set(h2s->sd, SE_FL_WEBSOCKET);
Christopher Fauletb669d682022-03-22 18:37:19 +01001609
Willy Tarreaud0de6772022-02-04 09:05:37 +01001610 /* The stream will record the request's accept date (which is either the
1611 * end of the connection's or the date immediately after the previous
1612 * request) and the idle time, which is the delay since the previous
1613 * request. We can set the value now, it will be copied by stream_new().
1614 */
Willy Tarreau69530f52023-04-28 09:16:15 +02001615 sess->t_idle = ns_to_ms(now_ns - sess->accept_ts) - sess->t_handshake;
Christopher Fauleta9e8b392022-03-23 11:01:09 +01001616
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001617 if (!sc_new_from_endp(h2s->sd, sess, input))
Christopher Fauleta9e8b392022-03-23 11:01:09 +01001618 goto out_close;
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02001619
Willy Tarreau36c22322022-05-27 10:41:24 +02001620 h2c->nb_sc++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001621
Willy Tarreau590a0512018-09-05 11:56:48 +02001622 /* We want the accept date presented to the next stream to be the one
1623 * we have now, the handshake time to be null (since the next stream
1624 * is not delayed by a handshake), and the idle time to count since
1625 * right now.
1626 */
1627 sess->accept_date = date;
Willy Tarreau69530f52023-04-28 09:16:15 +02001628 sess->accept_ts = now_ns;
Willy Tarreau590a0512018-09-05 11:56:48 +02001629 sess->t_handshake = 0;
Willy Tarreaud0de6772022-02-04 09:05:37 +01001630 sess->t_idle = 0;
Willy Tarreau590a0512018-09-05 11:56:48 +02001631
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001632 /* OK done, the stream lives its own life now */
Willy Tarreau36c22322022-05-27 10:41:24 +02001633 if (h2_frt_has_too_many_sc(h2c))
Willy Tarreauf2101912018-07-19 10:11:38 +02001634 h2c->flags |= H2_CF_DEM_TOOMANY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001635 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001636 return h2s;
1637
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001638 out_close:
Willy Tarreau71049cc2018-03-28 13:56:39 +02001639 h2s_destroy(h2s);
Willy Tarreaue872f752022-05-12 09:24:41 +02001640 out_alloc:
1641 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001642 out:
Willy Tarreau45efc072018-10-03 18:27:52 +02001643 sess_log(sess);
Willy Tarreau7838a792019-08-12 18:42:03 +02001644 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreau45efc072018-10-03 18:27:52 +02001645 return NULL;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001646}
1647
Willy Tarreau36c22322022-05-27 10:41:24 +02001648/* allocates a new stream associated to stream connector <sc> on the h2c
Willy Tarreau4596fe22022-05-17 19:07:51 +02001649 * connection and returns it, or NULL in case of memory allocation error or if
1650 * the highest possible stream ID was reached.
Willy Tarreau751f2d02018-10-05 09:35:00 +02001651 */
Willy Tarreau36c22322022-05-27 10:41:24 +02001652static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct stconn *sc, struct session *sess)
Willy Tarreau751f2d02018-10-05 09:35:00 +02001653{
1654 struct h2s *h2s = NULL;
1655
Willy Tarreau7838a792019-08-12 18:42:03 +02001656 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1657
Willy Tarreaue872f752022-05-12 09:24:41 +02001658 if (h2c->nb_streams >= h2c->streams_limit) {
1659 TRACE_ERROR("Aborting stream since negotiated limit is too low", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001660 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001661 }
Willy Tarreau751f2d02018-10-05 09:35:00 +02001662
Willy Tarreaue872f752022-05-12 09:24:41 +02001663 if (h2_streams_left(h2c) < 1) {
1664 TRACE_ERROR("Aborting stream since no more streams left", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreaua80dca82019-01-24 17:08:28 +01001665 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001666 }
Willy Tarreaua80dca82019-01-24 17:08:28 +01001667
Willy Tarreau751f2d02018-10-05 09:35:00 +02001668 /* Defer choosing the ID until we send the first message to create the stream */
1669 h2s = h2s_new(h2c, 0);
Willy Tarreaue872f752022-05-12 09:24:41 +02001670 if (!h2s) {
1671 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001672 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001673 }
Willy Tarreau751f2d02018-10-05 09:35:00 +02001674
Willy Tarreau36c22322022-05-27 10:41:24 +02001675 if (sc_attach_mux(sc, h2s, h2c->conn) < 0) {
Willy Tarreaue872f752022-05-12 09:24:41 +02001676 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW, h2c->conn);
Christopher Faulet070b91b2022-03-31 19:27:18 +02001677 h2s_destroy(h2s);
1678 h2s = NULL;
1679 goto out;
1680 }
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001681 h2s->sd = sc->sedesc;
Olivier Houchardf502aca2018-12-14 19:42:40 +01001682 h2s->sess = sess;
Willy Tarreau36c22322022-05-27 10:41:24 +02001683 h2c->nb_sc++;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001684
Willy Tarreau54310dc2024-01-12 18:36:57 +01001685 /* on the backend we can afford to only count total streams upon success */
1686 h2c->stream_cnt++;
1687
Willy Tarreau751f2d02018-10-05 09:35:00 +02001688 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001689 if (likely(h2s))
1690 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
1691 else
1692 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001693 return h2s;
1694}
1695
Willy Tarreaube5b7152017-09-25 16:25:39 +02001696/* try to send a settings frame on the connection. Returns > 0 on success, 0 if
1697 * it couldn't do anything. It may return an error in h2c. See RFC7540#11.3 for
1698 * the various settings codes.
1699 */
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001700static int h2c_send_settings(struct h2c *h2c)
Willy Tarreaube5b7152017-09-25 16:25:39 +02001701{
1702 struct buffer *res;
1703 char buf_data[100]; // enough for 15 settings
Willy Tarreau83061a82018-07-13 11:56:34 +02001704 struct buffer buf;
Willy Tarreau9d7abda2023-04-17 15:04:34 +02001705 int iws;
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001706 int mfs;
Willy Tarreauca1027c2023-04-18 15:57:03 +02001707 int mcs;
Willy Tarreau7838a792019-08-12 18:42:03 +02001708 int ret = 0;
1709
1710 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001711
Willy Tarreaube5b7152017-09-25 16:25:39 +02001712 chunk_init(&buf, buf_data, sizeof(buf_data));
1713 chunk_memcpy(&buf,
1714 "\x00\x00\x00" /* length : 0 for now */
1715 "\x04\x00" /* type : 4 (settings), flags : 0 */
1716 "\x00\x00\x00\x00", /* stream ID : 0 */
1717 9);
1718
Willy Tarreau0bbad6b2019-02-26 16:01:52 +01001719 if (h2c->flags & H2_CF_IS_BACK) {
1720 /* send settings_enable_push=0 */
1721 chunk_memcat(&buf, "\x00\x02\x00\x00\x00\x00", 6);
1722 }
1723
Amaury Denoyellebefeae82021-07-09 17:14:30 +02001724 /* rfc 8441 #3 SETTINGS_ENABLE_CONNECT_PROTOCOL=1,
1725 * sent automatically unless disabled in the global config */
1726 if (!(global.tune.options & GTUNE_DISABLE_H2_WEBSOCKET))
1727 chunk_memcat(&buf, "\x00\x08\x00\x00\x00\x01", 6);
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01001728
Willy Tarreaube5b7152017-09-25 16:25:39 +02001729 if (h2_settings_header_table_size != 4096) {
1730 char str[6] = "\x00\x01"; /* header_table_size */
1731
1732 write_n32(str + 2, h2_settings_header_table_size);
1733 chunk_memcat(&buf, str, 6);
1734 }
1735
Willy Tarreau9d7abda2023-04-17 15:04:34 +02001736 iws = (h2c->flags & H2_CF_IS_BACK) ?
1737 h2_be_settings_initial_window_size:
1738 h2_fe_settings_initial_window_size;
1739 iws = iws ? iws : h2_settings_initial_window_size;
1740
1741 if (iws != 65535) {
Willy Tarreaube5b7152017-09-25 16:25:39 +02001742 char str[6] = "\x00\x04"; /* initial_window_size */
1743
Willy Tarreau9d7abda2023-04-17 15:04:34 +02001744 write_n32(str + 2, iws);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001745 chunk_memcat(&buf, str, 6);
1746 }
1747
Willy Tarreauca1027c2023-04-18 15:57:03 +02001748 mcs = h2c_max_concurrent_streams(h2c);
1749 if (mcs != 0) {
Willy Tarreaube5b7152017-09-25 16:25:39 +02001750 char str[6] = "\x00\x03"; /* max_concurrent_streams */
1751
1752 /* Note: 0 means "unlimited" for haproxy's config but not for
1753 * the protocol, so never send this value!
1754 */
Willy Tarreauca1027c2023-04-18 15:57:03 +02001755 write_n32(str + 2, mcs);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001756 chunk_memcat(&buf, str, 6);
1757 }
1758
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001759 mfs = h2_settings_max_frame_size;
1760 if (mfs > global.tune.bufsize)
1761 mfs = global.tune.bufsize;
1762
1763 if (!mfs)
1764 mfs = global.tune.bufsize;
1765
1766 if (mfs != 16384) {
Willy Tarreaube5b7152017-09-25 16:25:39 +02001767 char str[6] = "\x00\x05"; /* max_frame_size */
1768
1769 /* note: similarly we could also emit MAX_HEADER_LIST_SIZE to
1770 * match bufsize - rewrite size, but at the moment it seems
1771 * that clients don't take care of it.
1772 */
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001773 write_n32(str + 2, mfs);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001774 chunk_memcat(&buf, str, 6);
1775 }
1776
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001777 h2_set_frame_size(buf.area, buf.data - 9);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001778
1779 res = br_tail(h2c->mbuf);
1780 retry:
1781 if (!h2_get_buf(h2c, res)) {
1782 h2c->flags |= H2_CF_MUX_MALLOC;
1783 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001784 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001785 }
1786
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001787 ret = b_istput(res, ist2(buf.area, buf.data));
Willy Tarreaube5b7152017-09-25 16:25:39 +02001788 if (unlikely(ret <= 0)) {
1789 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001790 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1791 goto retry;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001792 h2c->flags |= H2_CF_MUX_MFULL;
1793 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001794 }
1795 else {
1796 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001797 ret = 0;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001798 }
1799 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001800 out:
1801 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001802 return ret;
1803}
1804
Willy Tarreau52eed752017-09-22 15:05:09 +02001805/* Try to receive a connection preface, then upon success try to send our
1806 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1807 * missing data. It may return an error in h2c.
1808 */
1809static int h2c_frt_recv_preface(struct h2c *h2c)
1810{
1811 int ret1;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001812 int ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001813
Willy Tarreau7838a792019-08-12 18:42:03 +02001814 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
1815
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001816 ret1 = b_isteq(&h2c->dbuf, 0, b_data(&h2c->dbuf), ist(H2_CONN_PREFACE));
Willy Tarreau52eed752017-09-22 15:05:09 +02001817
1818 if (unlikely(ret1 <= 0)) {
Christopher Fauletb5f7b522021-07-26 12:06:53 +02001819 if (!ret1)
1820 h2c->flags |= H2_CF_DEM_SHORT_READ;
Christopher Fauletff7925d2022-10-11 19:12:40 +02001821 if (ret1 < 0 || (h2c->flags & H2_CF_RCVD_SHUT)) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01001822 TRACE_ERROR("I/O error or short read", H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02001823 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauee4684f2021-06-17 08:08:48 +02001824 if (b_data(&h2c->dbuf) ||
1825 !(((const struct session *)h2c->conn->owner)->fe->options & PR_O_IGNORE_PRB))
1826 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01001827 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001828 ret2 = 0;
1829 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02001830 }
1831
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001832 ret2 = h2c_send_settings(h2c);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001833 if (ret2 > 0)
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001834 b_del(&h2c->dbuf, ret1);
Willy Tarreau7838a792019-08-12 18:42:03 +02001835 out:
1836 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001837 return ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001838}
1839
Willy Tarreau01b44822018-10-03 14:26:37 +02001840/* Try to send a connection preface, then upon success try to send our
1841 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1842 * missing data. It may return an error in h2c.
1843 */
1844static int h2c_bck_send_preface(struct h2c *h2c)
1845{
1846 struct buffer *res;
Willy Tarreau7838a792019-08-12 18:42:03 +02001847 int ret = 0;
1848
1849 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02001850
Willy Tarreaubcc45952019-05-26 10:05:50 +02001851 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001852 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001853 if (!h2_get_buf(h2c, res)) {
Willy Tarreau01b44822018-10-03 14:26:37 +02001854 h2c->flags |= H2_CF_MUX_MALLOC;
1855 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001856 goto out;
Willy Tarreau01b44822018-10-03 14:26:37 +02001857 }
1858
1859 if (!b_data(res)) {
1860 /* preface not yet sent */
Willy Tarreau9c218e72019-05-26 10:08:28 +02001861 ret = b_istput(res, ist(H2_CONN_PREFACE));
1862 if (unlikely(ret <= 0)) {
1863 if (!ret) {
1864 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1865 goto retry;
1866 h2c->flags |= H2_CF_MUX_MFULL;
1867 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001868 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001869 }
1870 else {
1871 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001872 ret = 0;
1873 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001874 }
1875 }
Willy Tarreau01b44822018-10-03 14:26:37 +02001876 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001877 ret = h2c_send_settings(h2c);
1878 out:
1879 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
1880 return ret;
Willy Tarreau01b44822018-10-03 14:26:37 +02001881}
1882
Willy Tarreau081d4722017-05-16 21:51:05 +02001883/* try to send a GOAWAY frame on the connection to report an error or a graceful
1884 * shutdown, with h2c->errcode as the error code. Returns > 0 on success or zero
1885 * if nothing was done. It uses h2c->last_sid as the advertised ID, or copies it
1886 * from h2c->max_id if it's not set yet (<0). In case of lack of room to write
1887 * the message, it subscribes the requester (either <h2s> or <h2c>) to future
1888 * notifications. It sets H2_CF_GOAWAY_SENT on success, and H2_CF_GOAWAY_FAILED
1889 * on unrecoverable failure. It will not attempt to send one again in this last
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001890 * case, nor will it send one if settings were not sent (e.g. still waiting for
1891 * a preface) so that it is safe to use h2c_error() to report such errors.
Willy Tarreau081d4722017-05-16 21:51:05 +02001892 */
1893static int h2c_send_goaway_error(struct h2c *h2c, struct h2s *h2s)
1894{
1895 struct buffer *res;
1896 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02001897 int ret = 0;
Willy Tarreau081d4722017-05-16 21:51:05 +02001898
Willy Tarreau7838a792019-08-12 18:42:03 +02001899 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
1900
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001901 if ((h2c->flags & H2_CF_GOAWAY_FAILED) || h2c->st0 < H2_CS_SETTINGS1) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001902 ret = 1; // claim that it worked
1903 goto out;
1904 }
Willy Tarreau081d4722017-05-16 21:51:05 +02001905
Willy Tarreau9c218e72019-05-26 10:08:28 +02001906 /* len: 8, type: 7, flags: none, sid: 0 */
1907 memcpy(str, "\x00\x00\x08\x07\x00\x00\x00\x00\x00", 9);
1908
1909 if (h2c->last_sid < 0)
1910 h2c->last_sid = h2c->max_id;
1911
1912 write_n32(str + 9, h2c->last_sid);
1913 write_n32(str + 13, h2c->errcode);
1914
Willy Tarreaubcc45952019-05-26 10:05:50 +02001915 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001916 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001917 if (!h2_get_buf(h2c, res)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02001918 h2c->flags |= H2_CF_MUX_MALLOC;
1919 if (h2s)
1920 h2s->flags |= H2_SF_BLK_MROOM;
1921 else
1922 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001923 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001924 }
1925
Willy Tarreauea1b06d2018-07-12 09:02:47 +02001926 ret = b_istput(res, ist2(str, 17));
Willy Tarreau081d4722017-05-16 21:51:05 +02001927 if (unlikely(ret <= 0)) {
1928 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001929 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1930 goto retry;
Willy Tarreau081d4722017-05-16 21:51:05 +02001931 h2c->flags |= H2_CF_MUX_MFULL;
1932 if (h2s)
1933 h2s->flags |= H2_SF_BLK_MROOM;
1934 else
1935 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001936 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001937 }
1938 else {
1939 /* we cannot report this error using GOAWAY, so we mark
1940 * it and claim a success.
1941 */
1942 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
1943 h2c->flags |= H2_CF_GOAWAY_FAILED;
Willy Tarreau7838a792019-08-12 18:42:03 +02001944 ret = 1;
1945 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001946 }
1947 }
1948 h2c->flags |= H2_CF_GOAWAY_SENT;
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001949
1950 /* some codes are not for real errors, just attempts to close cleanly */
1951 switch (h2c->errcode) {
1952 case H2_ERR_NO_ERROR:
1953 case H2_ERR_ENHANCE_YOUR_CALM:
1954 case H2_ERR_REFUSED_STREAM:
1955 case H2_ERR_CANCEL:
1956 break;
1957 default:
Willy Tarreau4781b152021-04-06 13:53:36 +02001958 HA_ATOMIC_INC(&h2c->px_counters->goaway_resp);
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001959 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001960 out:
1961 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
Willy Tarreau081d4722017-05-16 21:51:05 +02001962 return ret;
1963}
1964
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001965/* Try to send an RST_STREAM frame on the connection for the indicated stream
1966 * during mux operations. This stream must be valid and cannot be closed
1967 * already. h2s->id will be used for the stream ID and h2s->errcode will be
1968 * used for the error code. h2s->st will be update to H2_SS_CLOSED if it was
1969 * not yet.
1970 *
1971 * Returns > 0 on success or zero if nothing was done. In case of lack of room
1972 * to write the message, it subscribes the stream to future notifications.
1973 */
1974static int h2s_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
1975{
1976 struct buffer *res;
1977 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02001978 int ret = 0;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001979
Willy Tarreau7838a792019-08-12 18:42:03 +02001980 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
1981
1982 if (!h2s || h2s->st == H2_SS_CLOSED) {
1983 ret = 1;
1984 goto out;
1985 }
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001986
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001987 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
1988 * RST_STREAM in response to a RST_STREAM frame.
1989 */
Willy Tarreau231f6162019-08-06 10:01:40 +02001990 if (h2c->dsi == h2s->id && h2c->dft == H2_FT_RST_STREAM) {
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001991 ret = 1;
1992 goto ignore;
1993 }
1994
Willy Tarreau9c218e72019-05-26 10:08:28 +02001995 /* len: 4, type: 3, flags: none */
1996 memcpy(str, "\x00\x00\x04\x03\x00", 5);
1997 write_n32(str + 5, h2s->id);
1998 write_n32(str + 9, h2s->errcode);
1999
Willy Tarreaubcc45952019-05-26 10:05:50 +02002000 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002001 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002002 if (!h2_get_buf(h2c, res)) {
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002003 h2c->flags |= H2_CF_MUX_MALLOC;
2004 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002005 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002006 }
2007
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002008 ret = b_istput(res, ist2(str, 13));
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002009 if (unlikely(ret <= 0)) {
2010 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002011 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2012 goto retry;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002013 h2c->flags |= H2_CF_MUX_MFULL;
2014 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002015 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002016 }
2017 else {
2018 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002019 ret = 0;
2020 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002021 }
2022 }
2023
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002024 ignore:
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002025 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01002026 h2s_close(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002027 out:
2028 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002029 return ret;
2030}
2031
2032/* Try to send an RST_STREAM frame on the connection for the stream being
2033 * demuxed using h2c->dsi for the stream ID. It will use h2s->errcode as the
Willy Tarreaue6888ff2018-12-23 18:26:26 +01002034 * error code, even if the stream is one of the dummy ones, and will update
2035 * h2s->st to H2_SS_CLOSED if it was not yet.
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002036 *
2037 * Returns > 0 on success or zero if nothing was done. In case of lack of room
2038 * to write the message, it blocks the demuxer and subscribes it to future
Joseph Herlantd77575d2018-11-25 10:54:45 -08002039 * notifications. It's worth mentioning that an RST may even be sent for a
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002040 * closed stream.
Willy Tarreau27a84c92017-10-17 08:10:17 +02002041 */
2042static int h2c_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
2043{
2044 struct buffer *res;
2045 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02002046 int ret = 0;
2047
2048 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02002049
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002050 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
2051 * RST_STREAM in response to a RST_STREAM frame.
2052 */
2053 if (h2c->dft == H2_FT_RST_STREAM) {
2054 ret = 1;
2055 goto ignore;
2056 }
2057
Willy Tarreau9c218e72019-05-26 10:08:28 +02002058 /* len: 4, type: 3, flags: none */
2059 memcpy(str, "\x00\x00\x04\x03\x00", 5);
2060
2061 write_n32(str + 5, h2c->dsi);
2062 write_n32(str + 9, h2s->errcode);
2063
Willy Tarreaubcc45952019-05-26 10:05:50 +02002064 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002065 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002066 if (!h2_get_buf(h2c, res)) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02002067 h2c->flags |= H2_CF_MUX_MALLOC;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002068 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002069 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002070 }
2071
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002072 ret = b_istput(res, ist2(str, 13));
Willy Tarreau27a84c92017-10-17 08:10:17 +02002073 if (unlikely(ret <= 0)) {
2074 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002075 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2076 goto retry;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002077 h2c->flags |= H2_CF_MUX_MFULL;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002078 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002079 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002080 }
2081 else {
2082 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002083 ret = 0;
2084 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002085 }
2086 }
2087
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002088 ignore:
Willy Tarreauab0e1da2018-10-05 10:16:37 +02002089 if (h2s->id) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02002090 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01002091 h2s_close(h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002092 }
2093
Willy Tarreau7838a792019-08-12 18:42:03 +02002094 out:
Willy Tarreau4781b152021-04-06 13:53:36 +02002095 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_resp);
Willy Tarreau7838a792019-08-12 18:42:03 +02002096 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02002097 return ret;
2098}
2099
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002100/* try to send an empty DATA frame with the ES flag set to notify about the
2101 * end of stream and match a shutdown(write). If an ES was already sent as
2102 * indicated by HLOC/ERROR/RESET/CLOSED states, nothing is done. Returns > 0
2103 * on success or zero if nothing was done. In case of lack of room to write the
2104 * message, it subscribes the requesting stream to future notifications.
2105 */
2106static int h2_send_empty_data_es(struct h2s *h2s)
2107{
2108 struct h2c *h2c = h2s->h2c;
2109 struct buffer *res;
2110 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02002111 int ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002112
Willy Tarreau7838a792019-08-12 18:42:03 +02002113 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
2114
2115 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_ERROR || h2s->st == H2_SS_CLOSED) {
2116 ret = 1;
2117 goto out;
2118 }
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002119
Willy Tarreau9c218e72019-05-26 10:08:28 +02002120 /* len: 0x000000, type: 0(DATA), flags: ES=1 */
2121 memcpy(str, "\x00\x00\x00\x00\x01", 5);
2122 write_n32(str + 5, h2s->id);
2123
Willy Tarreaubcc45952019-05-26 10:05:50 +02002124 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002125 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002126 if (!h2_get_buf(h2c, res)) {
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002127 h2c->flags |= H2_CF_MUX_MALLOC;
2128 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002129 goto out;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002130 }
2131
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002132 ret = b_istput(res, ist2(str, 9));
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002133 if (likely(ret > 0)) {
2134 h2s->flags |= H2_SF_ES_SENT;
2135 }
2136 else if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002137 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2138 goto retry;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002139 h2c->flags |= H2_CF_MUX_MFULL;
2140 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002141 }
2142 else {
2143 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002144 ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002145 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002146 out:
2147 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002148 return ret;
2149}
2150
Willy Tarreau4596fe22022-05-17 19:07:51 +02002151/* wake a specific stream and assign its stream connector some SE_FL_* flags
2152 * among SE_FL_ERR_PENDING and SE_FL_ERROR if needed. The stream's state
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002153 * is automatically updated accordingly. If the stream is orphaned, it is
2154 * destroyed.
Christopher Fauletf02ca002019-03-07 16:21:34 +01002155 */
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002156static void h2s_wake_one_stream(struct h2s *h2s)
Christopher Fauletf02ca002019-03-07 16:21:34 +01002157{
Willy Tarreau7838a792019-08-12 18:42:03 +02002158 struct h2c *h2c = h2s->h2c;
2159
2160 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn, h2s);
2161
Willy Tarreau7be4ee02022-05-18 07:31:41 +02002162 if (!h2s_sc(h2s)) {
Christopher Fauletf02ca002019-03-07 16:21:34 +01002163 /* this stream was already orphaned */
2164 h2s_destroy(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002165 TRACE_DEVEL("leaving with no h2s", H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002166 return;
2167 }
2168
Christopher Fauletaade4ed2020-10-08 15:38:41 +02002169 if (h2c_read0_pending(h2s->h2c)) {
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002170 if (h2s->st == H2_SS_OPEN)
2171 h2s->st = H2_SS_HREM;
2172 else if (h2s->st == H2_SS_HLOC)
2173 h2s_close(h2s);
2174 }
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002175
Christopher Fauletff7925d2022-10-11 19:12:40 +02002176 if (h2s->h2c->st0 >= H2_CS_ERROR || (h2s->h2c->flags & (H2_CF_ERR_PENDING|H2_CF_ERROR)) ||
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002177 (h2s->h2c->last_sid > 0 && (!h2s->id || h2s->id > h2s->h2c->last_sid))) {
Christopher Fauletff7925d2022-10-11 19:12:40 +02002178 se_fl_set_error(h2s->sd);
Willy Tarreau23482912019-05-07 15:23:14 +02002179
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002180 if (h2s->st < H2_SS_ERROR)
2181 h2s->st = H2_SS_ERROR;
2182 }
Christopher Fauletf02ca002019-03-07 16:21:34 +01002183
2184 h2s_alert(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002185 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002186}
2187
2188/* wake the streams attached to the connection, whose id is greater than <last>
2189 * or unassigned.
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002190 */
Willy Tarreau23482912019-05-07 15:23:14 +02002191static void h2_wake_some_streams(struct h2c *h2c, int last)
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002192{
2193 struct eb32_node *node;
2194 struct h2s *h2s;
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002195
Willy Tarreau7838a792019-08-12 18:42:03 +02002196 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn);
2197
Christopher Fauletf02ca002019-03-07 16:21:34 +01002198 /* Wake all streams with ID > last */
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002199 node = eb32_lookup_ge(&h2c->streams_by_id, last + 1);
2200 while (node) {
2201 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002202 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002203 h2s_wake_one_stream(h2s);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002204 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01002205
Christopher Fauletf02ca002019-03-07 16:21:34 +01002206 /* Wake all streams with unassigned ID (ID == 0) */
2207 node = eb32_lookup(&h2c->streams_by_id, 0);
2208 while (node) {
2209 h2s = container_of(node, struct h2s, by_id);
2210 if (h2s->id > 0)
2211 break;
2212 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002213 h2s_wake_one_stream(h2s);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002214 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002215
2216 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002217}
2218
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002219/* Wake up all blocked streams whose window size has become positive after the
2220 * mux's initial window was adjusted. This should be done after having processed
2221 * SETTINGS frames which have updated the mux's initial window size.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002222 */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002223static void h2c_unblock_sfctl(struct h2c *h2c)
Willy Tarreau3421aba2017-07-27 15:41:03 +02002224{
2225 struct h2s *h2s;
2226 struct eb32_node *node;
2227
Willy Tarreau7838a792019-08-12 18:42:03 +02002228 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
2229
Willy Tarreau3421aba2017-07-27 15:41:03 +02002230 node = eb32_first(&h2c->streams_by_id);
2231 while (node) {
2232 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002233 if (h2s->flags & H2_SF_BLK_SFCTL && h2s_mws(h2s) > 0) {
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002234 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002235 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002236 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2237 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002238 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002239 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002240 node = eb32_next(node);
2241 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002242
2243 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002244}
2245
2246/* processes a SETTINGS frame whose payload is <payload> for <plen> bytes, and
2247 * ACKs it if needed. Returns > 0 on success or zero on missing data. It may
Willy Tarreaub860c732019-01-30 15:39:55 +01002248 * return an error in h2c. The caller must have already verified frame length
2249 * and stream ID validity. Described in RFC7540#6.5.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002250 */
2251static int h2c_handle_settings(struct h2c *h2c)
2252{
2253 unsigned int offset;
2254 int error;
2255
Willy Tarreau7838a792019-08-12 18:42:03 +02002256 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
2257
Willy Tarreau3421aba2017-07-27 15:41:03 +02002258 if (h2c->dff & H2_F_SETTINGS_ACK) {
2259 if (h2c->dfl) {
2260 error = H2_ERR_FRAME_SIZE_ERROR;
2261 goto fail;
2262 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002263 goto done;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002264 }
2265
Willy Tarreau3421aba2017-07-27 15:41:03 +02002266 /* process full frame only */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002267 if (b_data(&h2c->dbuf) < h2c->dfl) {
2268 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002269 goto out0;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002270 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002271
2272 /* parse the frame */
2273 for (offset = 0; offset < h2c->dfl; offset += 6) {
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002274 uint16_t type = h2_get_n16(&h2c->dbuf, offset);
2275 int32_t arg = h2_get_n32(&h2c->dbuf, offset + 2);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002276
2277 switch (type) {
2278 case H2_SETTINGS_INITIAL_WINDOW_SIZE:
2279 /* we need to update all existing streams with the
2280 * difference from the previous iws.
2281 */
2282 if (arg < 0) { // RFC7540#6.5.2
2283 error = H2_ERR_FLOW_CONTROL_ERROR;
2284 goto fail;
2285 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002286 h2c->miw = arg;
2287 break;
2288 case H2_SETTINGS_MAX_FRAME_SIZE:
2289 if (arg < 16384 || arg > 16777215) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002290 TRACE_ERROR("MAX_FRAME_SIZE out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002291 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002292 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002293 goto fail;
2294 }
2295 h2c->mfs = arg;
2296 break;
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01002297 case H2_SETTINGS_HEADER_TABLE_SIZE:
2298 h2c->flags |= H2_CF_SHTS_UPDATED;
2299 break;
Willy Tarreau1b38b462017-12-03 19:02:28 +01002300 case H2_SETTINGS_ENABLE_PUSH:
2301 if (arg < 0 || arg > 1) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002302 TRACE_ERROR("ENABLE_PUSH out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002303 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002304 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002305 goto fail;
2306 }
2307 break;
Willy Tarreau2e2083a2019-01-31 10:34:07 +01002308 case H2_SETTINGS_MAX_CONCURRENT_STREAMS:
2309 if (h2c->flags & H2_CF_IS_BACK) {
2310 /* the limit is only for the backend; for the frontend it is our limit */
Willy Tarreauca1027c2023-04-18 15:57:03 +02002311 if ((unsigned int)arg > h2c_max_concurrent_streams(h2c))
2312 arg = h2c_max_concurrent_streams(h2c);
Willy Tarreau2e2083a2019-01-31 10:34:07 +01002313 h2c->streams_limit = arg;
2314 }
2315 break;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002316 case H2_SETTINGS_ENABLE_CONNECT_PROTOCOL:
Amaury Denoyelle0df04362021-10-18 09:43:29 +02002317 if (arg == 1)
2318 h2c->flags |= H2_CF_RCVD_RFC8441;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002319 break;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002320 }
2321 }
2322
2323 /* need to ACK this frame now */
2324 h2c->st0 = H2_CS_FRAME_A;
Willy Tarreau7838a792019-08-12 18:42:03 +02002325 done:
2326 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002327 return 1;
2328 fail:
Willy Tarreau9364a5f2019-10-23 11:06:35 +02002329 if (!(h2c->flags & H2_CF_IS_BACK))
2330 sess_log(h2c->conn->owner);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002331 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002332 out0:
2333 TRACE_DEVEL("leaving with missing data or error", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002334 return 0;
2335}
2336
2337/* try to send an ACK for a settings frame on the connection. Returns > 0 on
2338 * success or one of the h2_status values.
2339 */
2340static int h2c_ack_settings(struct h2c *h2c)
2341{
2342 struct buffer *res;
2343 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02002344 int ret = 0;
2345
2346 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002347
Willy Tarreau9c218e72019-05-26 10:08:28 +02002348 memcpy(str,
2349 "\x00\x00\x00" /* length : 0 (no data) */
2350 "\x04" "\x01" /* type : 4, flags : ACK */
2351 "\x00\x00\x00\x00" /* stream ID */, 9);
2352
Willy Tarreaubcc45952019-05-26 10:05:50 +02002353 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002354 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002355 if (!h2_get_buf(h2c, res)) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02002356 h2c->flags |= H2_CF_MUX_MALLOC;
2357 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002358 goto out;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002359 }
2360
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002361 ret = b_istput(res, ist2(str, 9));
Willy Tarreau3421aba2017-07-27 15:41:03 +02002362 if (unlikely(ret <= 0)) {
2363 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002364 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2365 goto retry;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002366 h2c->flags |= H2_CF_MUX_MFULL;
2367 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002368 }
2369 else {
2370 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002371 ret = 0;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002372 }
2373 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002374 out:
2375 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002376 return ret;
2377}
2378
Willy Tarreaucf68c782017-10-10 17:11:41 +02002379/* processes a PING frame and schedules an ACK if needed. The caller must pass
2380 * the pointer to the payload in <payload>. Returns > 0 on success or zero on
Willy Tarreaub860c732019-01-30 15:39:55 +01002381 * missing data. The caller must have already verified frame length
2382 * and stream ID validity.
Willy Tarreaucf68c782017-10-10 17:11:41 +02002383 */
2384static int h2c_handle_ping(struct h2c *h2c)
2385{
Willy Tarreaucf68c782017-10-10 17:11:41 +02002386 /* schedule a response */
Willy Tarreau68ed6412017-12-03 18:15:56 +01002387 if (!(h2c->dff & H2_F_PING_ACK))
Willy Tarreaucf68c782017-10-10 17:11:41 +02002388 h2c->st0 = H2_CS_FRAME_A;
2389 return 1;
2390}
2391
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002392/* Try to send a window update for stream id <sid> and value <increment>.
2393 * Returns > 0 on success or zero on missing room or failure. It may return an
2394 * error in h2c.
2395 */
2396static int h2c_send_window_update(struct h2c *h2c, int sid, uint32_t increment)
2397{
2398 struct buffer *res;
2399 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02002400 int ret = 0;
2401
2402 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002403
Willy Tarreau9c218e72019-05-26 10:08:28 +02002404 /* length: 4, type: 8, flags: none */
2405 memcpy(str, "\x00\x00\x04\x08\x00", 5);
2406 write_n32(str + 5, sid);
2407 write_n32(str + 9, increment);
2408
Willy Tarreaubcc45952019-05-26 10:05:50 +02002409 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002410 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002411 if (!h2_get_buf(h2c, res)) {
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002412 h2c->flags |= H2_CF_MUX_MALLOC;
2413 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002414 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002415 }
2416
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002417 ret = b_istput(res, ist2(str, 13));
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002418 if (unlikely(ret <= 0)) {
2419 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002420 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2421 goto retry;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002422 h2c->flags |= H2_CF_MUX_MFULL;
2423 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002424 }
2425 else {
2426 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002427 ret = 0;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002428 }
2429 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002430 out:
2431 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002432 return ret;
2433}
2434
2435/* try to send pending window update for the connection. It's safe to call it
2436 * with no pending updates. Returns > 0 on success or zero on missing room or
2437 * failure. It may return an error in h2c.
2438 */
2439static int h2c_send_conn_wu(struct h2c *h2c)
2440{
2441 int ret = 1;
2442
Willy Tarreau7838a792019-08-12 18:42:03 +02002443 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2444
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002445 if (h2c->rcvd_c <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002446 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002447
Willy Tarreau97aaa672018-12-23 09:49:04 +01002448 if (!(h2c->flags & H2_CF_WINDOW_OPENED)) {
2449 /* increase the advertised connection window to 2G on
2450 * first update.
2451 */
2452 h2c->flags |= H2_CF_WINDOW_OPENED;
2453 h2c->rcvd_c += H2_INITIAL_WINDOW_INCREMENT;
2454 }
2455
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002456 /* send WU for the connection */
2457 ret = h2c_send_window_update(h2c, 0, h2c->rcvd_c);
2458 if (ret > 0)
2459 h2c->rcvd_c = 0;
2460
Willy Tarreau7838a792019-08-12 18:42:03 +02002461 out:
2462 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002463 return ret;
2464}
2465
2466/* try to send pending window update for the current dmux stream. It's safe to
2467 * call it with no pending updates. Returns > 0 on success or zero on missing
2468 * room or failure. It may return an error in h2c.
2469 */
2470static int h2c_send_strm_wu(struct h2c *h2c)
2471{
2472 int ret = 1;
2473
Willy Tarreau7838a792019-08-12 18:42:03 +02002474 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2475
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002476 if (h2c->rcvd_s <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002477 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002478
2479 /* send WU for the stream */
2480 ret = h2c_send_window_update(h2c, h2c->dsi, h2c->rcvd_s);
2481 if (ret > 0)
2482 h2c->rcvd_s = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002483 out:
2484 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002485 return ret;
2486}
2487
Willy Tarreaucf68c782017-10-10 17:11:41 +02002488/* try to send an ACK for a ping frame on the connection. Returns > 0 on
2489 * success, 0 on missing data or one of the h2_status values.
2490 */
2491static int h2c_ack_ping(struct h2c *h2c)
2492{
2493 struct buffer *res;
2494 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02002495 int ret = 0;
2496
2497 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002498
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002499 if (b_data(&h2c->dbuf) < 8)
Willy Tarreau7838a792019-08-12 18:42:03 +02002500 goto out;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002501
Willy Tarreaucf68c782017-10-10 17:11:41 +02002502 memcpy(str,
2503 "\x00\x00\x08" /* length : 8 (same payload) */
2504 "\x06" "\x01" /* type : 6, flags : ACK */
2505 "\x00\x00\x00\x00" /* stream ID */, 9);
2506
2507 /* copy the original payload */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002508 h2_get_buf_bytes(str + 9, 8, &h2c->dbuf, 0);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002509
Willy Tarreau9c218e72019-05-26 10:08:28 +02002510 res = br_tail(h2c->mbuf);
2511 retry:
2512 if (!h2_get_buf(h2c, res)) {
2513 h2c->flags |= H2_CF_MUX_MALLOC;
2514 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002515 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02002516 }
2517
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002518 ret = b_istput(res, ist2(str, 17));
Willy Tarreaucf68c782017-10-10 17:11:41 +02002519 if (unlikely(ret <= 0)) {
2520 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002521 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2522 goto retry;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002523 h2c->flags |= H2_CF_MUX_MFULL;
2524 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002525 }
2526 else {
2527 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002528 ret = 0;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002529 }
2530 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002531 out:
2532 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002533 return ret;
2534}
2535
Willy Tarreau26f95952017-07-27 17:18:30 +02002536/* processes a WINDOW_UPDATE frame whose payload is <payload> for <plen> bytes.
2537 * Returns > 0 on success or zero on missing data. It may return an error in
Willy Tarreaub860c732019-01-30 15:39:55 +01002538 * h2c or h2s. The caller must have already verified frame length and stream ID
2539 * validity. Described in RFC7540#6.9.
Willy Tarreau26f95952017-07-27 17:18:30 +02002540 */
2541static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s)
2542{
2543 int32_t inc;
2544 int error;
2545
Willy Tarreau7838a792019-08-12 18:42:03 +02002546 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
2547
Willy Tarreau26f95952017-07-27 17:18:30 +02002548 /* process full frame only */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002549 if (b_data(&h2c->dbuf) < h2c->dfl) {
2550 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002551 goto out0;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002552 }
Willy Tarreau26f95952017-07-27 17:18:30 +02002553
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002554 inc = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau26f95952017-07-27 17:18:30 +02002555
2556 if (h2c->dsi != 0) {
2557 /* stream window update */
Willy Tarreau26f95952017-07-27 17:18:30 +02002558
2559 /* it's not an error to receive WU on a closed stream */
2560 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau7838a792019-08-12 18:42:03 +02002561 goto done;
Willy Tarreau26f95952017-07-27 17:18:30 +02002562
2563 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002564 TRACE_ERROR("stream WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002565 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002566 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002567 goto strm_err;
2568 }
2569
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002570 if (h2s_mws(h2s) >= 0 && h2s_mws(h2s) + inc < 0) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002571 TRACE_ERROR("stream WINDOW_UPDATE inc<0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002572 error = H2_ERR_FLOW_CONTROL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002573 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002574 goto strm_err;
2575 }
2576
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002577 h2s->sws += inc;
2578 if (h2s_mws(h2s) > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
Willy Tarreau26f95952017-07-27 17:18:30 +02002579 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002580 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002581 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2582 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002583 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreau26f95952017-07-27 17:18:30 +02002584 }
2585 }
2586 else {
2587 /* connection window update */
2588 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002589 TRACE_ERROR("conn WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002590 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002591 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002592 goto conn_err;
2593 }
2594
2595 if (h2c->mws >= 0 && h2c->mws + inc < 0) {
2596 error = H2_ERR_FLOW_CONTROL_ERROR;
2597 goto conn_err;
2598 }
2599
2600 h2c->mws += inc;
2601 }
2602
Willy Tarreau7838a792019-08-12 18:42:03 +02002603 done:
2604 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002605 return 1;
2606
2607 conn_err:
2608 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002609 out0:
2610 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002611 return 0;
2612
2613 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01002614 h2s_error(h2s, error);
2615 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002616 TRACE_DEVEL("leaving on stream error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002617 return 0;
2618}
2619
Willy Tarreaue96b0922017-10-30 00:28:29 +01002620/* processes a GOAWAY frame, and signals all streams whose ID is greater than
Willy Tarreaub860c732019-01-30 15:39:55 +01002621 * the last ID. Returns > 0 on success or zero on missing data. The caller must
2622 * have already verified frame length and stream ID validity. Described in
2623 * RFC7540#6.8.
Willy Tarreaue96b0922017-10-30 00:28:29 +01002624 */
2625static int h2c_handle_goaway(struct h2c *h2c)
2626{
Willy Tarreaue96b0922017-10-30 00:28:29 +01002627 int last;
2628
Willy Tarreau7838a792019-08-12 18:42:03 +02002629 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002630 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002631 if (b_data(&h2c->dbuf) < h2c->dfl) {
2632 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002633 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002634 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002635 }
Willy Tarreaue96b0922017-10-30 00:28:29 +01002636
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002637 last = h2_get_n32(&h2c->dbuf, 0);
2638 h2c->errcode = h2_get_n32(&h2c->dbuf, 4);
Willy Tarreau11cc2d62017-12-03 10:27:47 +01002639 if (h2c->last_sid < 0)
2640 h2c->last_sid = last;
Willy Tarreau23482912019-05-07 15:23:14 +02002641 h2_wake_some_streams(h2c, last);
Willy Tarreau7838a792019-08-12 18:42:03 +02002642 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002643 return 1;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002644}
2645
Willy Tarreau92153fc2017-12-03 19:46:19 +01002646/* processes a PRIORITY frame, and either skips it or rejects if it is
Willy Tarreaub860c732019-01-30 15:39:55 +01002647 * invalid. Returns > 0 on success or zero on missing data. It may return an
2648 * error in h2c. The caller must have already verified frame length and stream
2649 * ID validity. Described in RFC7540#6.3.
Willy Tarreau92153fc2017-12-03 19:46:19 +01002650 */
2651static int h2c_handle_priority(struct h2c *h2c)
2652{
Willy Tarreau7838a792019-08-12 18:42:03 +02002653 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
2654
Willy Tarreau92153fc2017-12-03 19:46:19 +01002655 /* process full frame only */
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002656 if (b_data(&h2c->dbuf) < h2c->dfl) {
Willy Tarreau7838a792019-08-12 18:42:03 +02002657 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002658 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002659 return 0;
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002660 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01002661
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002662 if (h2_get_n32(&h2c->dbuf, 0) == h2c->dsi) {
Willy Tarreau92153fc2017-12-03 19:46:19 +01002663 /* 7540#5.3 : can't depend on itself */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002664 TRACE_ERROR("PRIORITY depends on itself", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002665 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02002666 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002667 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002668 return 0;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002669 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002670 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreau92153fc2017-12-03 19:46:19 +01002671 return 1;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002672}
2673
Willy Tarreaucd234e92017-08-18 10:59:39 +02002674/* processes an RST_STREAM frame, and sets the 32-bit error code on the stream.
Willy Tarreaub860c732019-01-30 15:39:55 +01002675 * Returns > 0 on success or zero on missing data. The caller must have already
2676 * verified frame length and stream ID validity. Described in RFC7540#6.4.
Willy Tarreaucd234e92017-08-18 10:59:39 +02002677 */
2678static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s)
2679{
Willy Tarreau7838a792019-08-12 18:42:03 +02002680 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
2681
Willy Tarreaucd234e92017-08-18 10:59:39 +02002682 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002683 if (b_data(&h2c->dbuf) < h2c->dfl) {
2684 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002685 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002686 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002687 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002688
2689 /* late RST, already handled */
Willy Tarreau7838a792019-08-12 18:42:03 +02002690 if (h2s->st == H2_SS_CLOSED) {
2691 TRACE_DEVEL("leaving on stream closed", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002692 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02002693 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002694
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002695 h2s->errcode = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau00dd0782018-03-01 16:31:34 +01002696 h2s_close(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002697
Willy Tarreau7be4ee02022-05-18 07:31:41 +02002698 if (h2s_sc(h2s)) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02002699 se_fl_set_error(h2s->sd);
Willy Tarreauf830f012018-12-19 17:44:55 +01002700 h2s_alert(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002701 }
2702
2703 h2s->flags |= H2_SF_RST_RCVD;
Willy Tarreau7838a792019-08-12 18:42:03 +02002704 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002705 return 1;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002706}
2707
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002708/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2709 * It may return an error in h2c or h2s. The caller must consider that the
2710 * return value is the new h2s in case one was allocated (most common case).
2711 * Described in RFC7540#6.2. Most of the
Willy Tarreau13278b42017-10-13 19:23:14 +02002712 * errors here are reported as connection errors since it's impossible to
2713 * recover from such errors after the compression context has been altered.
2714 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002715static struct h2s *h2c_frt_handle_headers(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau13278b42017-10-13 19:23:14 +02002716{
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002717 struct buffer rxbuf = BUF_NULL;
Willy Tarreau4790f7c2019-01-24 11:33:02 +01002718 unsigned long long body_len = 0;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002719 uint32_t flags = 0;
Willy Tarreau13278b42017-10-13 19:23:14 +02002720 int error;
2721
Willy Tarreau7838a792019-08-12 18:42:03 +02002722 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2723
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002724 if (!b_size(&h2c->dbuf)) {
2725 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002726 goto out; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002727 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002728
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002729 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2730 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002731 goto out; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002732 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002733
2734 /* now either the frame is complete or the buffer is complete */
2735 if (h2s->st != H2_SS_IDLE) {
Willy Tarreau88d138e2019-01-02 19:38:14 +01002736 /* The stream exists/existed, this must be a trailers frame */
2737 if (h2s->st != H2_SS_CLOSED) {
Willy Tarreau7cfbb812023-01-26 16:02:01 +01002738 error = h2c_dec_hdrs(h2c, &h2s->rxbuf, &h2s->flags, &body_len, NULL);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002739 /* unrecoverable error ? */
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002740 if (h2c->st0 >= H2_CS_ERROR) {
Willy Tarreau17c630b2023-01-19 23:58:11 +01002741 TRACE_USER("Unrecoverable error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002742 sess_log(h2c->conn->owner);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002743 goto out;
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002744 }
Willy Tarreauaab1a602019-05-06 11:12:18 +02002745
Christopher Faulet485da0b2021-10-08 08:56:00 +02002746 if (error == 0) {
2747 /* Demux not blocked because of the stream, it is an incomplete frame */
2748 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2749 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002750 goto out; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002751 }
Willy Tarreauaab1a602019-05-06 11:12:18 +02002752
2753 if (error < 0) {
2754 /* Failed to decode this frame (e.g. too large request)
2755 * but the HPACK decompressor is still synchronized.
2756 */
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002757 sess_log(h2c->conn->owner);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002758 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
Willy Tarreau17c630b2023-01-19 23:58:11 +01002759 TRACE_USER("Stream error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002760 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau88d138e2019-01-02 19:38:14 +01002761 goto out;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002762 }
Willy Tarreau88d138e2019-01-02 19:38:14 +01002763 goto done;
2764 }
Willy Tarreaudf1cc5d2023-10-20 17:47:33 +02002765 /* the stream was already killed by an RST, let's consume
Willy Tarreau1f035502019-01-30 11:44:07 +01002766 * the data and send another RST.
2767 */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01002768 error = h2c_dec_hdrs(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002769 sess_log(h2c->conn->owner);
Willy Tarreau1f035502019-01-30 11:44:07 +01002770 h2s = (struct h2s*)h2_error_stream;
Willy Tarreaudf1cc5d2023-10-20 17:47:33 +02002771 TRACE_USER("rcvd H2 trailers on closed stream", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, h2s, &rxbuf);
Willy Tarreau1f035502019-01-30 11:44:07 +01002772 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002773 }
2774 else if (h2c->dsi <= h2c->max_id || !(h2c->dsi & 1)) {
2775 /* RFC7540#5.1.1 stream id > prev ones, and must be odd here */
2776 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002777 TRACE_ERROR("HEADERS on invalid stream ID", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau4781b152021-04-06 13:53:36 +02002778 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau22de8d32018-09-05 19:55:58 +02002779 sess_log(h2c->conn->owner);
Willy Tarreaud6e5cde2023-10-20 18:38:34 +02002780 session_inc_http_req_ctr(h2c->conn->owner);
2781 session_inc_http_err_ctr(h2c->conn->owner);
Willy Tarreau13278b42017-10-13 19:23:14 +02002782 goto conn_err;
2783 }
Willy Tarreau4869ed52023-10-13 18:11:59 +02002784 else if (h2c->flags & H2_CF_DEM_TOOMANY) {
Willy Tarreau36c22322022-05-27 10:41:24 +02002785 goto out; // IDLE but too many sc still present
Willy Tarreau4869ed52023-10-13 18:11:59 +02002786 }
2787 else if (h2_fe_max_total_streams &&
2788 h2c->stream_cnt >= h2_fe_max_total_streams + h2c_max_concurrent_streams(h2c)) {
2789 /* We've already told this client we were going to close a
2790 * while ago and apparently it didn't care, so it's time to
2791 * stop processing its requests for real.
2792 */
2793 error = H2_ERR_ENHANCE_YOUR_CALM;
2794 TRACE_STATE("Stream limit violated", H2_EV_STRM_SHUT, h2c->conn);
2795 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
2796 sess_log(h2c->conn->owner);
2797 session_inc_http_req_ctr(h2c->conn->owner);
2798 session_inc_http_err_ctr(h2c->conn->owner);
2799 goto conn_err;
2800 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002801
Willy Tarreau7cfbb812023-01-26 16:02:01 +01002802 error = h2c_dec_hdrs(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002803
Willy Tarreaufb25b6e2024-01-18 17:01:45 +01002804 if (error == 0) {
2805 /* No error but missing data for demuxing, it is an incomplete frame */
2806 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2807 h2c->flags |= H2_CF_DEM_SHORT_READ;
2808 goto out;
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002809 }
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002810
Willy Tarreaufb25b6e2024-01-18 17:01:45 +01002811 /* Now we cannot roll back and we won't come back here anymore for this
2812 * stream, so this stream ID is open from a protocol perspective, even
2813 * if incomplete or broken, we want to count it as attempted.
2814 */
2815 if (h2c->dsi > h2c->max_id)
2816 h2c->max_id = h2c->dsi;
2817 h2c->stream_cnt++;
Willy Tarreau25919232019-01-03 14:48:18 +01002818
Willy Tarreaufb25b6e2024-01-18 17:01:45 +01002819 if (error < 0) {
2820 /* Failed to decode this stream. This might be due to a
2821 * recoverable error affecting only the stream (e.g. too large
2822 * request for buffer, that leaves the HPACK decompressor still
2823 * synchronized), or a non-recoverable error such as an invalid
2824 * frame type sequence (e.g. other frame type interleaved with
2825 * CONTINUATION), in which h2c_dec_hdrs() has already set the
2826 * error code in the connection and counted it in the relevant
2827 * stats. We still count a req error in both cases.
Willy Tarreau25919232019-01-03 14:48:18 +01002828 */
Willy Tarreaufb25b6e2024-01-18 17:01:45 +01002829 sess_log(h2c->conn->owner);
2830 session_inc_http_req_ctr(h2c->conn->owner);
2831 session_inc_http_err_ctr(h2c->conn->owner);
2832
2833 if (h2c->st0 >= H2_CS_ERROR) {
2834 TRACE_USER("Unrecoverable error decoding H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
2835 goto out;
2836 }
2837
2838 /* recoverable stream error (e.g. too large request) */
Willy Tarreaudf1cc5d2023-10-20 17:47:33 +02002839 TRACE_USER("rcvd unparsable H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, h2s, &rxbuf);
Willy Tarreau54310dc2024-01-12 18:36:57 +01002840 goto strm_err;
Willy Tarreau25919232019-01-03 14:48:18 +01002841 }
2842
Willy Tarreau29268e92021-06-17 08:29:14 +02002843 TRACE_USER("rcvd H2 request ", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW, h2c->conn, 0, &rxbuf);
2844
Willy Tarreaudf1cc5d2023-10-20 17:47:33 +02002845 /* Note: we don't emit any other logs below because if we return
Willy Tarreaua8e49542018-10-03 18:53:55 +02002846 * positively from h2c_frt_stream_new(), the stream will report the error,
2847 * and if we return in error, h2c_frt_stream_new() will emit the error.
Christopher Faulet7d013e72020-12-15 16:56:50 +01002848 *
2849 * Xfer the rxbuf to the stream. On success, the new stream owns the
2850 * rxbuf. On error, it is released here.
Willy Tarreau22de8d32018-09-05 19:55:58 +02002851 */
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02002852 h2s = h2c_frt_stream_new(h2c, h2c->dsi, &rxbuf, flags);
Willy Tarreau13278b42017-10-13 19:23:14 +02002853 if (!h2s) {
Willy Tarreau96a10c22018-12-23 18:30:44 +01002854 h2s = (struct h2s*)h2_refused_stream;
Willy Tarreaudf1cc5d2023-10-20 17:47:33 +02002855 TRACE_USER("refused H2 req. ", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, h2s, &rxbuf);
Willy Tarreau96a10c22018-12-23 18:30:44 +01002856 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002857 }
2858
2859 h2s->st = H2_SS_OPEN;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002860 h2s->flags |= flags;
Willy Tarreau1915ca22019-01-24 11:49:37 +01002861 h2s->body_len = body_len;
Christopher Fauletc2f1d0e2023-05-24 11:34:45 +02002862 h2s_propagate_term_flags(h2c, h2s);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002863
Willy Tarreau88d138e2019-01-02 19:38:14 +01002864 done:
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002865 if (h2s->flags & H2_SF_ES_RCVD) {
Willy Tarreaufc10f592019-01-30 19:28:32 +01002866 if (h2s->st == H2_SS_OPEN)
2867 h2s->st = H2_SS_HREM;
2868 else
2869 h2s_close(h2s);
Willy Tarreau13278b42017-10-13 19:23:14 +02002870 }
Willy Tarreau0d6e5d22023-02-20 17:05:10 +01002871 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau4869ed52023-10-13 18:11:59 +02002872 goto leave;
Willy Tarreau13278b42017-10-13 19:23:14 +02002873
2874 conn_err:
2875 h2c_error(h2c, error);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002876 out:
2877 h2_release_buf(h2c, &rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002878 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau4869ed52023-10-13 18:11:59 +02002879 h2s = NULL;
2880 goto leave;
Willy Tarreau96a10c22018-12-23 18:30:44 +01002881
Willy Tarreau54310dc2024-01-12 18:36:57 +01002882 strm_err:
Willy Tarreau54310dc2024-01-12 18:36:57 +01002883 h2s = (struct h2s*)h2_error_stream;
2884
Willy Tarreau96a10c22018-12-23 18:30:44 +01002885 send_rst:
2886 /* make the demux send an RST for the current stream. We may only
2887 * do this if we're certain that the HEADERS frame was properly
2888 * decompressed so that the HPACK decoder is still kept up to date.
2889 */
2890 h2_release_buf(h2c, &rxbuf);
2891 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002892
Willy Tarreau7838a792019-08-12 18:42:03 +02002893 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau4869ed52023-10-13 18:11:59 +02002894
2895 leave:
2896 if (h2_fe_max_total_streams && h2c->stream_cnt >= h2_fe_max_total_streams) {
2897 /* we've had enough streams on this connection, time to renew it.
2898 * In order to gracefully do this, we'll advertise a stream limit
2899 * of the current one plus the max concurrent streams value in the
2900 * GOAWAY frame, so that we're certain that the client is aware of
2901 * the limit before creating a new stream, but knows we won't harm
2902 * the streams in flight. Remember that client stream IDs are odd
2903 * so we apply twice the concurrent streams value to the current
2904 * ID.
2905 */
2906 if (h2c->last_sid <= 0 ||
2907 h2c->last_sid > h2c->max_id + 2 * h2c_max_concurrent_streams(h2c)) {
2908 /* not set yet or was too high */
2909 h2c->last_sid = h2c->max_id + 2 * h2c_max_concurrent_streams(h2c);
2910 h2c_send_goaway_error(h2c, NULL);
2911 }
2912 }
2913
Willy Tarreau96a10c22018-12-23 18:30:44 +01002914 return h2s;
Willy Tarreau13278b42017-10-13 19:23:14 +02002915}
2916
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002917/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2918 * It may return an error in h2c or h2s. Described in RFC7540#6.2. Most of the
2919 * errors here are reported as connection errors since it's impossible to
2920 * recover from such errors after the compression context has been altered.
2921 */
2922static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
2923{
Christopher Faulet6884aa32019-09-23 15:28:20 +02002924 struct buffer rxbuf = BUF_NULL;
2925 unsigned long long body_len = 0;
2926 uint32_t flags = 0;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002927 int error;
2928
Willy Tarreau7838a792019-08-12 18:42:03 +02002929 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2930
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002931 if (!b_size(&h2c->dbuf)) {
2932 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002933 goto fail; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002934 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002935
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002936 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2937 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002938 goto fail; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002939 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002940
Christopher Faulet6884aa32019-09-23 15:28:20 +02002941 if (h2s->st != H2_SS_CLOSED) {
Willy Tarreau7cfbb812023-01-26 16:02:01 +01002942 error = h2c_dec_hdrs(h2c, &h2s->rxbuf, &h2s->flags, &h2s->body_len, h2s->upgrade_protocol);
Christopher Faulet6884aa32019-09-23 15:28:20 +02002943 }
2944 else {
2945 /* the connection was already killed by an RST, let's consume
2946 * the data and send another RST.
2947 */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01002948 error = h2c_dec_hdrs(h2c, &rxbuf, &flags, &body_len, NULL);
Christopher Fauletea7a7782019-09-26 16:19:13 +02002949 h2s = (struct h2s*)h2_error_stream;
Christopher Faulet6884aa32019-09-23 15:28:20 +02002950 h2c->st0 = H2_CS_FRAME_E;
2951 goto send_rst;
2952 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002953
Willy Tarreau25919232019-01-03 14:48:18 +01002954 /* unrecoverable error ? */
Willy Tarreau17c630b2023-01-19 23:58:11 +01002955 if (h2c->st0 >= H2_CS_ERROR) {
2956 TRACE_USER("Unrecoverable error decoding H2 HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002957 goto fail;
Willy Tarreau17c630b2023-01-19 23:58:11 +01002958 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002959
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002960 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
2961 /* RFC7540#5.1 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002962 TRACE_ERROR("response HEADERS in invalid state", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002963 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
2964 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002965 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002966 goto fail;
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002967 }
2968
Willy Tarreau25919232019-01-03 14:48:18 +01002969 if (error <= 0) {
Christopher Faulet485da0b2021-10-08 08:56:00 +02002970 if (error == 0) {
2971 /* Demux not blocked because of the stream, it is an incomplete frame */
2972 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2973 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002974 goto fail; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002975 }
Willy Tarreau25919232019-01-03 14:48:18 +01002976
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002977 /* stream error : send RST_STREAM */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002978 TRACE_ERROR("couldn't decode response HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau25919232019-01-03 14:48:18 +01002979 h2s_error(h2s, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002980 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002981 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002982 goto fail;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002983 }
2984
Willy Tarreau95acc8b2022-05-27 16:14:10 +02002985 if (se_fl_test(h2s->sd, SE_FL_ERROR) && h2s->st < H2_SS_ERROR)
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002986 h2s->st = H2_SS_ERROR;
Christopher Fauletfa922f02019-05-07 10:55:17 +02002987 else if (h2s->flags & H2_SF_ES_RCVD) {
2988 if (h2s->st == H2_SS_OPEN)
2989 h2s->st = H2_SS_HREM;
2990 else if (h2s->st == H2_SS_HLOC)
2991 h2s_close(h2s);
2992 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002993
Christopher Fauletf95f8762021-01-22 11:59:07 +01002994 /* Unblock busy server h2s waiting for the response headers to validate
2995 * the tunnel establishment or the end of the response of an oborted
2996 * tunnel
2997 */
2998 if ((h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY)) == (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY) ||
2999 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
3000 TRACE_STATE("Unblock h2s blocked on tunnel establishment/abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3001 h2s->flags &= ~H2_SF_BLK_MBUSY;
3002 }
3003
Willy Tarreau9abb3172021-06-16 18:32:42 +02003004 TRACE_USER("rcvd H2 response ", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, 0, &h2s->rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02003005 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003006 return h2s;
Willy Tarreau7838a792019-08-12 18:42:03 +02003007 fail:
3008 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
3009 return NULL;
Christopher Faulet6884aa32019-09-23 15:28:20 +02003010
3011 send_rst:
3012 /* make the demux send an RST for the current stream. We may only
3013 * do this if we're certain that the HEADERS frame was properly
3014 * decompressed so that the HPACK decoder is still kept up to date.
3015 */
3016 h2_release_buf(h2c, &rxbuf);
3017 h2c->st0 = H2_CS_FRAME_E;
3018
Willy Tarreau022e5e52020-09-10 09:33:15 +02003019 TRACE_USER("rejected H2 response", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Christopher Faulet6884aa32019-09-23 15:28:20 +02003020 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
3021 return h2s;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003022}
3023
Willy Tarreau454f9052017-10-26 19:40:35 +02003024/* processes a DATA frame. Returns > 0 on success or zero on missing data.
3025 * It may return an error in h2c or h2s. Described in RFC7540#6.1.
3026 */
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01003027static int h2c_handle_data(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02003028{
3029 int error;
3030
Willy Tarreau7838a792019-08-12 18:42:03 +02003031 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3032
Willy Tarreau454f9052017-10-26 19:40:35 +02003033 /* note that empty DATA frames are perfectly valid and sometimes used
3034 * to signal an end of stream (with the ES flag).
3035 */
3036
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003037 if (!b_size(&h2c->dbuf) && h2c->dfl) {
3038 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02003039 goto fail; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003040 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003041
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003042 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
3043 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02003044 goto fail; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003045 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003046
3047 /* now either the frame is complete or the buffer is complete */
3048
Willy Tarreau454f9052017-10-26 19:40:35 +02003049 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
3050 /* RFC7540#6.1 */
3051 error = H2_ERR_STREAM_CLOSED;
3052 goto strm_err;
3053 }
3054
Christopher Faulet35feba62023-09-13 16:21:58 +02003055 if (!(h2s->flags & H2_SF_HEADERS_RCVD)) {
3056 /* RFC9113#8.1: The header section must be received before the message content */
3057 TRACE_ERROR("Unexpected DATA frame before the message headers", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3058 error = H2_ERR_PROTOCOL_ERROR;
3059 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
3060 goto strm_err;
3061 }
Christopher Faulet4f09ec82019-06-19 09:25:58 +02003062 if ((h2s->flags & H2_SF_DATA_CLEN) && (h2c->dfl - h2c->dpl) > h2s->body_len) {
Willy Tarreau1915ca22019-01-24 11:49:37 +01003063 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003064 TRACE_ERROR("DATA frame larger than content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003065 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02003066 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003067 goto strm_err;
3068 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01003069 if (!(h2c->flags & H2_CF_IS_BACK) &&
3070 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT) &&
3071 ((h2c->dfl - h2c->dpl) || !(h2c->dff & H2_F_DATA_END_STREAM))) {
3072 /* a tunnel attempt was aborted but the client still try to send some raw data.
3073 * Thus the stream is closed with the CANCEL error. Here we take care it is not
3074 * an empty DATA Frame with the ES flag. The error is only handled if ES was
3075 * already sent to the client because depending on the scheduling, these data may
Ilya Shipitsinacf84592021-02-06 22:29:08 +05003076 * have been sent before the server response but not handle here.
Christopher Faulet91b21dc2021-01-22 12:13:15 +01003077 */
3078 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3079 error = H2_ERR_CANCEL;
3080 goto strm_err;
3081 }
Willy Tarreau1915ca22019-01-24 11:49:37 +01003082
Willy Tarreaua56a6de2018-02-26 15:59:07 +01003083 if (!h2_frt_transfer_data(h2s))
Willy Tarreau7838a792019-08-12 18:42:03 +02003084 goto fail;
Willy Tarreaua56a6de2018-02-26 15:59:07 +01003085
Willy Tarreau454f9052017-10-26 19:40:35 +02003086 /* call the upper layers to process the frame, then let the upper layer
3087 * notify the stream about any change.
3088 */
Willy Tarreau7be4ee02022-05-18 07:31:41 +02003089 if (!h2s_sc(h2s)) {
Willy Tarreau082c4572019-08-06 10:11:02 +02003090 /* The upper layer has already closed, this may happen on
3091 * 4xx/redirects during POST, or when receiving a response
3092 * from an H2 server after the client has aborted.
3093 */
3094 error = H2_ERR_CANCEL;
Willy Tarreau454f9052017-10-26 19:40:35 +02003095 goto strm_err;
3096 }
3097
Willy Tarreau8f650c32017-11-21 19:36:21 +01003098 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02003099 goto fail;
Willy Tarreau8f650c32017-11-21 19:36:21 +01003100
Willy Tarreau721c9742017-11-07 11:05:42 +01003101 if (h2s->st >= H2_SS_ERROR) {
Willy Tarreau454f9052017-10-26 19:40:35 +02003102 /* stream error : send RST_STREAM */
Willy Tarreaua20a5192017-12-27 11:02:06 +01003103 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau454f9052017-10-26 19:40:35 +02003104 }
3105
3106 /* check for completion : the callee will change this to FRAME_A or
3107 * FRAME_H once done.
3108 */
3109 if (h2c->st0 == H2_CS_FRAME_P)
Willy Tarreau7838a792019-08-12 18:42:03 +02003110 goto fail;
Willy Tarreau454f9052017-10-26 19:40:35 +02003111
Willy Tarreauc4134ba2017-12-11 18:45:08 +01003112 /* last frame */
3113 if (h2c->dff & H2_F_DATA_END_STREAM) {
Christopher Fauletfa922f02019-05-07 10:55:17 +02003114 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreaufc10f592019-01-30 19:28:32 +01003115 if (h2s->st == H2_SS_OPEN)
3116 h2s->st = H2_SS_HREM;
3117 else
3118 h2s_close(h2s);
3119
Willy Tarreau1915ca22019-01-24 11:49:37 +01003120 if (h2s->flags & H2_SF_DATA_CLEN && h2s->body_len) {
3121 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003122 TRACE_ERROR("ES on DATA frame before content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003123 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02003124 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003125 goto strm_err;
3126 }
Willy Tarreauc4134ba2017-12-11 18:45:08 +01003127 }
3128
Christopher Fauletf95f8762021-01-22 11:59:07 +01003129 /* Unblock busy server h2s waiting for the end of the response for an
3130 * aborted tunnel
3131 */
3132 if ((h2c->flags & H2_CF_IS_BACK) &&
3133 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
3134 TRACE_STATE("Unblock h2s blocked on tunnel abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3135 h2s->flags &= ~H2_SF_BLK_MBUSY;
3136 }
3137
Willy Tarreau7838a792019-08-12 18:42:03 +02003138 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003139 return 1;
3140
Willy Tarreau454f9052017-10-26 19:40:35 +02003141 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01003142 h2s_error(h2s, error);
3143 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02003144 fail:
3145 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003146 return 0;
3147}
3148
Willy Tarreau63864812019-08-07 14:25:20 +02003149/* check that the current frame described in h2c->{dsi,dft,dfl,dff,...} is
3150 * valid for the current stream state. This is needed only after parsing the
3151 * frame header but in practice it can be performed at any time during
3152 * H2_CS_FRAME_P since no state transition happens there. Returns >0 on success
3153 * or 0 in case of error, in which case either h2s or h2c will carry an error.
3154 */
3155static int h2_frame_check_vs_state(struct h2c *h2c, struct h2s *h2s)
3156{
Willy Tarreau7838a792019-08-12 18:42:03 +02003157 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
3158
Willy Tarreau63864812019-08-07 14:25:20 +02003159 if (h2s->st == H2_SS_IDLE &&
3160 h2c->dft != H2_FT_HEADERS && h2c->dft != H2_FT_PRIORITY) {
3161 /* RFC7540#5.1: any frame other than HEADERS or PRIORITY in
3162 * this state MUST be treated as a connection error
3163 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003164 TRACE_ERROR("invalid frame type for IDLE state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003165 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003166 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau63864812019-08-07 14:25:20 +02003167 /* only log if no other stream can report the error */
3168 sess_log(h2c->conn->owner);
3169 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003170 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02003171 TRACE_DEVEL("leaving in error (idle&!hdrs&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003172 return 0;
3173 }
3174
Willy Tarreau57a18162019-11-24 14:57:53 +01003175 if (h2s->st == H2_SS_IDLE && (h2c->flags & H2_CF_IS_BACK)) {
3176 /* only PUSH_PROMISE would be permitted here */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003177 TRACE_ERROR("invalid frame type for IDLE state (back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau57a18162019-11-24 14:57:53 +01003178 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02003179 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau57a18162019-11-24 14:57:53 +01003180 TRACE_DEVEL("leaving in error (idle&back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
3181 return 0;
3182 }
3183
Willy Tarreau63864812019-08-07 14:25:20 +02003184 if (h2s->st == H2_SS_HREM && h2c->dft != H2_FT_WINDOW_UPDATE &&
3185 h2c->dft != H2_FT_RST_STREAM && h2c->dft != H2_FT_PRIORITY) {
3186 /* RFC7540#5.1: any frame other than WU/PRIO/RST in
3187 * this state MUST be treated as a stream error.
3188 * 6.2, 6.6 and 6.10 further mandate that HEADERS/
3189 * PUSH_PROMISE/CONTINUATION cause connection errors.
3190 */
Amaury Denoyellea8879232020-10-27 17:16:03 +01003191 if (h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003192 TRACE_ERROR("invalid frame type for HREM state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003193 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02003194 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01003195 }
3196 else {
Willy Tarreau63864812019-08-07 14:25:20 +02003197 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
Amaury Denoyellea8879232020-10-27 17:16:03 +01003198 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003199 TRACE_DEVEL("leaving in error (hrem&!wu&!rst&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003200 return 0;
3201 }
3202
3203 /* Below the management of frames received in closed state is a
3204 * bit hackish because the spec makes strong differences between
3205 * streams closed by receiving RST, sending RST, and seeing ES
3206 * in both directions. In addition to this, the creation of a
3207 * new stream reusing the identifier of a closed one will be
3208 * detected here. Given that we cannot keep track of all closed
3209 * streams forever, we consider that unknown closed streams were
3210 * closed on RST received, which allows us to respond with an
3211 * RST without breaking the connection (eg: to abort a transfer).
3212 * Some frames have to be silently ignored as well.
3213 */
3214 if (h2s->st == H2_SS_CLOSED && h2c->dsi) {
3215 if (!(h2c->flags & H2_CF_IS_BACK) && h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
3216 /* #5.1.1: The identifier of a newly
3217 * established stream MUST be numerically
3218 * greater than all streams that the initiating
3219 * endpoint has opened or reserved. This
3220 * governs streams that are opened using a
3221 * HEADERS frame and streams that are reserved
3222 * using PUSH_PROMISE. An endpoint that
3223 * receives an unexpected stream identifier
3224 * MUST respond with a connection error.
3225 */
3226 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003227 TRACE_DEVEL("leaving in error (closed&hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003228 return 0;
3229 }
3230
Willy Tarreau4c08f122019-09-26 08:47:15 +02003231 if (h2s->flags & H2_SF_RST_RCVD &&
3232 !(h2_ft_bit(h2c->dft) & (H2_FT_HDR_MASK | H2_FT_RST_STREAM_BIT | H2_FT_PRIORITY_BIT | H2_FT_WINDOW_UPDATE_BIT))) {
Willy Tarreau63864812019-08-07 14:25:20 +02003233 /* RFC7540#5.1:closed: an endpoint that
3234 * receives any frame other than PRIORITY after
3235 * receiving a RST_STREAM MUST treat that as a
3236 * stream error of type STREAM_CLOSED.
3237 *
3238 * Note that old streams fall into this category
3239 * and will lead to an RST being sent.
3240 *
3241 * However, we cannot generalize this to all frame types. Those
3242 * carrying compression state must still be processed before
3243 * being dropped or we'll desynchronize the decoder. This can
3244 * happen with request trailers received after sending an
3245 * RST_STREAM, or with header/trailers responses received after
3246 * sending RST_STREAM (aborted stream).
Willy Tarreau4c08f122019-09-26 08:47:15 +02003247 *
3248 * In addition, since our CLOSED streams always carry the
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003249 * RST_RCVD bit, we don't want to accidentally catch valid
Willy Tarreau4c08f122019-09-26 08:47:15 +02003250 * frames for a closed stream, i.e. RST/PRIO/WU.
Willy Tarreau63864812019-08-07 14:25:20 +02003251 */
3252 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
3253 h2c->st0 = H2_CS_FRAME_E;
Christopher Faulet6884aa32019-09-23 15:28:20 +02003254 TRACE_DEVEL("leaving in error (rst_rcvd&!hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003255 return 0;
3256 }
3257
3258 /* RFC7540#5.1:closed: if this state is reached as a
3259 * result of sending a RST_STREAM frame, the peer that
3260 * receives the RST_STREAM might have already sent
3261 * frames on the stream that cannot be withdrawn. An
3262 * endpoint MUST ignore frames that it receives on
3263 * closed streams after it has sent a RST_STREAM
3264 * frame. An endpoint MAY choose to limit the period
3265 * over which it ignores frames and treat frames that
3266 * arrive after this time as being in error.
3267 */
3268 if (h2s->id && !(h2s->flags & H2_SF_RST_SENT)) {
3269 /* RFC7540#5.1:closed: any frame other than
3270 * PRIO/WU/RST in this state MUST be treated as
3271 * a connection error
3272 */
3273 if (h2c->dft != H2_FT_RST_STREAM &&
3274 h2c->dft != H2_FT_PRIORITY &&
3275 h2c->dft != H2_FT_WINDOW_UPDATE) {
3276 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003277 TRACE_DEVEL("leaving in error (rst_sent&!rst&!prio&!wu)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003278 return 0;
3279 }
3280 }
3281 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003282 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003283 return 1;
3284}
3285
Willy Tarreaubc933932017-10-09 16:21:43 +02003286/* process Rx frames to be demultiplexed */
3287static void h2_process_demux(struct h2c *h2c)
3288{
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003289 struct h2s *h2s = NULL, *tmp_h2s;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003290 struct h2_fh hdr;
3291 unsigned int padlen = 0;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003292 int32_t old_iw = h2c->miw;
Willy Tarreauf3ee0692017-10-17 08:18:25 +02003293
Willy Tarreau7838a792019-08-12 18:42:03 +02003294 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3295
Willy Tarreau081d4722017-05-16 21:51:05 +02003296 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02003297 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02003298
3299 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3300 if (h2c->st0 == H2_CS_PREFACE) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003301 TRACE_STATE("expecting preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02003302 if (h2c->flags & H2_CF_IS_BACK)
Willy Tarreau7838a792019-08-12 18:42:03 +02003303 goto out;
3304
Willy Tarreau52eed752017-09-22 15:05:09 +02003305 if (unlikely(h2c_frt_recv_preface(h2c) <= 0)) {
3306 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau22de8d32018-09-05 19:55:58 +02003307 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003308 TRACE_PROTO("failed to receive preface", H2_EV_RX_PREFACE|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003309 h2c->st0 = H2_CS_ERROR2;
Willy Tarreauee4684f2021-06-17 08:08:48 +02003310 if (b_data(&h2c->dbuf) ||
Christopher Faulet3f35da22021-07-26 10:18:35 +02003311 !(((const struct session *)h2c->conn->owner)->fe->options & (PR_O_NULLNOLOG|PR_O_IGNORE_PRB)))
Willy Tarreauee4684f2021-06-17 08:08:48 +02003312 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003313 }
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003314 goto done;
Willy Tarreau52eed752017-09-22 15:05:09 +02003315 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003316 TRACE_PROTO("received preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003317
3318 h2c->max_id = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02003319 TRACE_STATE("switching to SETTINGS1", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreaued2b9d92022-08-18 15:30:41 +02003320 h2c->st0 = H2_CS_SETTINGS1;
Willy Tarreau52eed752017-09-22 15:05:09 +02003321 }
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003322
3323 if (h2c->st0 == H2_CS_SETTINGS1) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003324 /* ensure that what is pending is a valid SETTINGS frame
3325 * without an ACK.
3326 */
Willy Tarreau7838a792019-08-12 18:42:03 +02003327 TRACE_STATE("expecting settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003328 if (!h2_get_frame_hdr(&h2c->dbuf, &hdr)) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003329 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003330 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau22de8d32018-09-05 19:55:58 +02003331 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003332 TRACE_ERROR("failed to receive settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003333 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003334 if (!(h2c->flags & H2_CF_IS_BACK))
3335 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003336 }
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003337 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003338 }
3339
3340 if (hdr.sid || hdr.ft != H2_FT_SETTINGS || hdr.ff & H2_F_SETTINGS_ACK) {
3341 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003342 TRACE_ERROR("unexpected frame type or flags", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003343 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
3344 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003345 if (!(h2c->flags & H2_CF_IS_BACK))
3346 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003347 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003348 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003349 }
3350
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003351 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003352 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003353 TRACE_ERROR("invalid settings frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003354 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
3355 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003356 if (!(h2c->flags & H2_CF_IS_BACK))
3357 sess_log(h2c->conn->owner);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003358 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003359 }
3360
Willy Tarreau3bf69182018-12-21 15:34:50 +01003361 /* that's OK, switch to FRAME_P to process it. This is
3362 * a SETTINGS frame whose header has already been
3363 * deleted above.
3364 */
Willy Tarreau54f46e52019-01-30 15:11:03 +01003365 padlen = 0;
Willy Tarreau4781b152021-04-06 13:53:36 +02003366 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003367 goto new_frame;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003368 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003369 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003370
3371 /* process as many incoming frames as possible below */
Willy Tarreau7838a792019-08-12 18:42:03 +02003372 while (1) {
Willy Tarreau7e98c052017-10-10 15:56:59 +02003373 int ret = 0;
3374
Willy Tarreau7838a792019-08-12 18:42:03 +02003375 if (!b_data(&h2c->dbuf)) {
3376 TRACE_DEVEL("no more Rx data", H2_EV_RX_FRAME, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003377 h2c->flags |= H2_CF_DEM_SHORT_READ;
3378 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003379 }
3380
3381 if (h2c->st0 >= H2_CS_ERROR) {
3382 TRACE_STATE("end of connection reported", H2_EV_RX_FRAME|H2_EV_RX_EOI, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003383 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003384 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003385
3386 if (h2c->st0 == H2_CS_FRAME_H) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003387 TRACE_STATE("expecting H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003388 if (!h2_peek_frame_hdr(&h2c->dbuf, 0, &hdr)) {
3389 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003390 break;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003391 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003392
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003393 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003394 TRACE_ERROR("invalid H2 frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003395 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003396 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau22de8d32018-09-05 19:55:58 +02003397 /* only log if no other stream can report the error */
3398 sess_log(h2c->conn->owner);
3399 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003400 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003401 break;
3402 }
3403
Willy Tarreau617592c2022-06-08 16:32:22 +02003404 if (h2c->rcvd_s && h2c->dsi != hdr.sid) {
3405 /* changed stream with a pending WU, need to
3406 * send it now.
3407 */
3408 TRACE_PROTO("sending stream WINDOW_UPDATE frame on stream switch", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
3409 ret = h2c_send_strm_wu(h2c);
3410 if (ret <= 0)
3411 break;
3412 }
3413
Christopher Fauletdd2a5622019-06-18 12:22:38 +02003414 padlen = 0;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003415 if (h2_ft_bit(hdr.ft) & H2_FT_PADDED_MASK && hdr.ff & H2_F_PADDED) {
3416 /* If the frame is padded (HEADERS, PUSH_PROMISE or DATA),
3417 * we read the pad length and drop it from the remaining
3418 * payload (one byte + the 9 remaining ones = 10 total
3419 * removed), so we have a frame payload starting after the
3420 * pad len. Flow controlled frames (DATA) also count the
3421 * padlen in the flow control, so it must be adjusted.
3422 */
3423 if (hdr.len < 1) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003424 TRACE_ERROR("invalid H2 padded frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003425 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003426 if (!(h2c->flags & H2_CF_IS_BACK))
3427 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003428 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003429 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003430 }
3431 hdr.len--;
3432
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003433 if (b_data(&h2c->dbuf) < 10) {
3434 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003435 break; // missing padlen
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003436 }
Willy Tarreau3bf69182018-12-21 15:34:50 +01003437
3438 padlen = *(uint8_t *)b_peek(&h2c->dbuf, 9);
3439
3440 if (padlen > hdr.len) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003441 TRACE_ERROR("invalid H2 padding length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003442 /* RFC7540#6.1 : pad length = length of
3443 * frame payload or greater => error.
3444 */
3445 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003446 if (!(h2c->flags & H2_CF_IS_BACK))
3447 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003448 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003449 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003450 }
3451
3452 if (h2_ft_bit(hdr.ft) & H2_FT_FC_MASK) {
3453 h2c->rcvd_c++;
3454 h2c->rcvd_s++;
3455 }
3456 b_del(&h2c->dbuf, 1);
3457 }
3458 h2_skip_frame_hdr(&h2c->dbuf);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003459
3460 new_frame:
Willy Tarreau7e98c052017-10-10 15:56:59 +02003461 h2c->dfl = hdr.len;
3462 h2c->dsi = hdr.sid;
3463 h2c->dft = hdr.ft;
3464 h2c->dff = hdr.ff;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003465 h2c->dpl = padlen;
Willy Tarreau0f458712022-08-18 11:19:57 +02003466 h2c->flags |= H2_CF_DEM_IN_PROGRESS;
Willy Tarreau73db4342019-09-25 07:28:44 +02003467 TRACE_STATE("rcvd H2 frame header, switching to FRAME_P state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003468 h2c->st0 = H2_CS_FRAME_P;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003469
3470 /* check for minimum basic frame format validity */
3471 ret = h2_frame_check(h2c->dft, 1, h2c->dsi, h2c->dfl, global.tune.bufsize);
3472 if (ret != H2_ERR_NO_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003473 TRACE_ERROR("received invalid H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003474 h2c_error(h2c, ret);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003475 if (!(h2c->flags & H2_CF_IS_BACK))
3476 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003477 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003478 goto done;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003479 }
Willy Tarreau15a47332022-03-18 15:57:34 +01003480
3481 /* transition to HEADERS frame ends the keep-alive idle
Willy Tarreauf279a2f2023-05-30 15:42:35 +02003482 * timer and starts the http-request idle delay. It uses
3483 * the idle_start timer as well.
Willy Tarreau15a47332022-03-18 15:57:34 +01003484 */
3485 if (hdr.ft == H2_FT_HEADERS)
3486 h2c->idle_start = now_ms;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003487 }
3488
Willy Tarreau9fd5aa82019-08-06 15:21:45 +02003489 /* Only H2_CS_FRAME_P, H2_CS_FRAME_A and H2_CS_FRAME_E here.
3490 * H2_CS_FRAME_P indicates an incomplete previous operation
3491 * (most often the first attempt) and requires some validity
3492 * checks for the frame and the current state. The two other
3493 * ones are set after completion (or abortion) and must skip
3494 * validity checks.
3495 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003496 tmp_h2s = h2c_st_by_id(h2c, h2c->dsi);
3497
Willy Tarreau7be4ee02022-05-18 07:31:41 +02003498 if (tmp_h2s != h2s && h2s && h2s_sc(h2s) &&
Willy Tarreau567beb82018-12-18 16:52:44 +01003499 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003500 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003501 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003502 (h2s->flags & H2_SF_ES_RCVD) ||
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003503 se_fl_test(h2s->sd, SE_FL_ERROR | SE_FL_ERR_PENDING | SE_FL_EOS))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003504 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003505 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_STRM_WAKE, h2c->conn, h2s);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003506 se_fl_set(h2s->sd, SE_FL_RCV_MORE);
Willy Tarreau7e094452018-12-19 18:08:52 +01003507 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003508 }
3509 h2s = tmp_h2s;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003510
Willy Tarreau63864812019-08-07 14:25:20 +02003511 if (h2c->st0 == H2_CS_FRAME_E ||
Willy Tarreau7838a792019-08-12 18:42:03 +02003512 (h2c->st0 == H2_CS_FRAME_P && !h2_frame_check_vs_state(h2c, h2s))) {
3513 TRACE_PROTO("stream error reported", H2_EV_RX_FRAME|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003514 goto strm_err;
Willy Tarreau7838a792019-08-12 18:42:03 +02003515 }
Willy Tarreauc0da1962017-10-30 18:38:00 +01003516
Willy Tarreau7e98c052017-10-10 15:56:59 +02003517 switch (h2c->dft) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02003518 case H2_FT_SETTINGS:
Willy Tarreau7838a792019-08-12 18:42:03 +02003519 if (h2c->st0 == H2_CS_FRAME_P) {
3520 TRACE_PROTO("receiving H2 SETTINGS frame", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003521 ret = h2c_handle_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003522 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003523 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003524
Willy Tarreau7838a792019-08-12 18:42:03 +02003525 if (h2c->st0 == H2_CS_FRAME_A) {
3526 TRACE_PROTO("sending H2 SETTINGS ACK frame", H2_EV_TX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003527 ret = h2c_ack_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003528 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02003529 break;
3530
Willy Tarreaucf68c782017-10-10 17:11:41 +02003531 case H2_FT_PING:
Willy Tarreau7838a792019-08-12 18:42:03 +02003532 if (h2c->st0 == H2_CS_FRAME_P) {
3533 TRACE_PROTO("receiving H2 PING frame", H2_EV_RX_FRAME|H2_EV_RX_PING, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003534 ret = h2c_handle_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003535 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003536
Willy Tarreau7838a792019-08-12 18:42:03 +02003537 if (h2c->st0 == H2_CS_FRAME_A) {
3538 TRACE_PROTO("sending H2 PING ACK frame", H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003539 ret = h2c_ack_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003540 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003541 break;
3542
Willy Tarreau26f95952017-07-27 17:18:30 +02003543 case H2_FT_WINDOW_UPDATE:
Willy Tarreau7838a792019-08-12 18:42:03 +02003544 if (h2c->st0 == H2_CS_FRAME_P) {
3545 TRACE_PROTO("receiving H2 WINDOW_UPDATE frame", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02003546 ret = h2c_handle_window_update(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003547 }
Willy Tarreau26f95952017-07-27 17:18:30 +02003548 break;
3549
Willy Tarreau61290ec2017-10-17 08:19:21 +02003550 case H2_FT_CONTINUATION:
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003551 /* RFC7540#6.10: CONTINUATION may only be preceded by
Willy Tarreauea18f862018-12-22 20:19:26 +01003552 * a HEADERS/PUSH_PROMISE/CONTINUATION frame. These
3553 * frames' parsers consume all following CONTINUATION
3554 * frames so this one is out of sequence.
Willy Tarreau61290ec2017-10-17 08:19:21 +02003555 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003556 TRACE_ERROR("received unexpected H2 CONTINUATION frame", H2_EV_RX_FRAME|H2_EV_RX_CONT|H2_EV_H2C_ERR, h2c->conn, h2s);
Willy Tarreauea18f862018-12-22 20:19:26 +01003557 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003558 if (!(h2c->flags & H2_CF_IS_BACK))
3559 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003560 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003561 goto done;
Willy Tarreau61290ec2017-10-17 08:19:21 +02003562
Willy Tarreau13278b42017-10-13 19:23:14 +02003563 case H2_FT_HEADERS:
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003564 if (h2c->st0 == H2_CS_FRAME_P) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003565 TRACE_PROTO("receiving H2 HEADERS frame", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003566 if (h2c->flags & H2_CF_IS_BACK)
3567 tmp_h2s = h2c_bck_handle_headers(h2c, h2s);
3568 else
3569 tmp_h2s = h2c_frt_handle_headers(h2c, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003570 if (tmp_h2s) {
3571 h2s = tmp_h2s;
3572 ret = 1;
3573 }
3574 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003575 HA_ATOMIC_INC(&h2c->px_counters->headers_rcvd);
Willy Tarreau13278b42017-10-13 19:23:14 +02003576 break;
3577
Willy Tarreau454f9052017-10-26 19:40:35 +02003578 case H2_FT_DATA:
Willy Tarreau7838a792019-08-12 18:42:03 +02003579 if (h2c->st0 == H2_CS_FRAME_P) {
3580 TRACE_PROTO("receiving H2 DATA frame", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01003581 ret = h2c_handle_data(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003582 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003583 HA_ATOMIC_INC(&h2c->px_counters->data_rcvd);
Willy Tarreau454f9052017-10-26 19:40:35 +02003584
Willy Tarreau7838a792019-08-12 18:42:03 +02003585 if (h2c->st0 == H2_CS_FRAME_A) {
Willy Tarreau617592c2022-06-08 16:32:22 +02003586 /* rcvd_s will suffice to trigger the sending of a WU */
3587 h2c->st0 = H2_CS_FRAME_H;
Willy Tarreau7838a792019-08-12 18:42:03 +02003588 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003589 break;
Willy Tarreaucd234e92017-08-18 10:59:39 +02003590
Willy Tarreau92153fc2017-12-03 19:46:19 +01003591 case H2_FT_PRIORITY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003592 if (h2c->st0 == H2_CS_FRAME_P) {
3593 TRACE_PROTO("receiving H2 PRIORITY frame", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn, h2s);
Willy Tarreau92153fc2017-12-03 19:46:19 +01003594 ret = h2c_handle_priority(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003595 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01003596 break;
3597
Willy Tarreaucd234e92017-08-18 10:59:39 +02003598 case H2_FT_RST_STREAM:
Willy Tarreau7838a792019-08-12 18:42:03 +02003599 if (h2c->st0 == H2_CS_FRAME_P) {
3600 TRACE_PROTO("receiving H2 RST_STREAM frame", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003601 ret = h2c_handle_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003602 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003603 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_rcvd);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003604 break;
3605
Willy Tarreaue96b0922017-10-30 00:28:29 +01003606 case H2_FT_GOAWAY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003607 if (h2c->st0 == H2_CS_FRAME_P) {
3608 TRACE_PROTO("receiving H2 GOAWAY frame", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn, h2s);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003609 ret = h2c_handle_goaway(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003610 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003611 HA_ATOMIC_INC(&h2c->px_counters->goaway_rcvd);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003612 break;
3613
Willy Tarreau1c661982017-10-30 13:52:01 +01003614 /* implement all extra frame types here */
Willy Tarreau7e98c052017-10-10 15:56:59 +02003615 default:
Willy Tarreau7838a792019-08-12 18:42:03 +02003616 TRACE_PROTO("receiving H2 ignored frame", H2_EV_RX_FRAME, h2c->conn, h2s);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003617 /* drop frames that we ignore. They may be larger than
3618 * the buffer so we drain all of their contents until
3619 * we reach the end.
3620 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003621 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3622 b_del(&h2c->dbuf, ret);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003623 h2c->dfl -= ret;
3624 ret = h2c->dfl == 0;
3625 }
3626
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003627 strm_err:
Willy Tarreaua20a5192017-12-27 11:02:06 +01003628 /* We may have to send an RST if not done yet */
Willy Tarreau7838a792019-08-12 18:42:03 +02003629 if (h2s->st == H2_SS_ERROR) {
3630 TRACE_STATE("stream error, switching to FRAME_E", H2_EV_RX_FRAME|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003631 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02003632 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003633
Willy Tarreau7838a792019-08-12 18:42:03 +02003634 if (h2c->st0 == H2_CS_FRAME_E) {
3635 TRACE_PROTO("sending H2 RST_STREAM frame", H2_EV_TX_FRAME|H2_EV_TX_RST|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003636 ret = h2c_send_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003637 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003638
Willy Tarreau7e98c052017-10-10 15:56:59 +02003639 /* error or missing data condition met above ? */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003640 if (ret <= 0)
Willy Tarreau7e98c052017-10-10 15:56:59 +02003641 break;
3642
3643 if (h2c->st0 != H2_CS_FRAME_H) {
Willy Tarreaubba7a4d2020-09-18 07:41:28 +02003644 if (h2c->dfl)
3645 TRACE_DEVEL("skipping remaining frame payload", H2_EV_RX_FRAME, h2c->conn, h2s);
Christopher Faulet5112a602019-09-26 16:38:28 +02003646 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3647 b_del(&h2c->dbuf, ret);
3648 h2c->dfl -= ret;
3649 if (!h2c->dfl) {
Willy Tarreau0f458712022-08-18 11:19:57 +02003650 h2c->flags &= ~H2_CF_DEM_IN_PROGRESS;
Christopher Faulet5112a602019-09-26 16:38:28 +02003651 TRACE_STATE("switching to FRAME_H", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
3652 h2c->st0 = H2_CS_FRAME_H;
Christopher Faulet5112a602019-09-26 16:38:28 +02003653 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003654 }
3655 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003656
Willy Tarreau617592c2022-06-08 16:32:22 +02003657 if (h2c->rcvd_s > 0 &&
Christopher Faulet68ee7842022-10-12 10:21:33 +02003658 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))) {
Willy Tarreau617592c2022-06-08 16:32:22 +02003659 TRACE_PROTO("sending stream WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn, h2s);
3660 h2c_send_strm_wu(h2c);
3661 }
3662
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003663 if (h2c->rcvd_c > 0 &&
Christopher Faulet68ee7842022-10-12 10:21:33 +02003664 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003665 TRACE_PROTO("sending H2 WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003666 h2c_send_conn_wu(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003667 }
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003668
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003669 done:
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003670 if (h2c->st0 >= H2_CS_ERROR || (h2c->flags & H2_CF_DEM_SHORT_READ)) {
3671 if (h2c->flags & H2_CF_RCVD_SHUT)
3672 h2c->flags |= H2_CF_END_REACHED;
3673 }
3674
Willy Tarreau7be4ee02022-05-18 07:31:41 +02003675 if (h2s && h2s_sc(h2s) &&
Willy Tarreau567beb82018-12-18 16:52:44 +01003676 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003677 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003678 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003679 (h2s->flags & H2_SF_ES_RCVD) ||
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003680 se_fl_test(h2s->sd, SE_FL_ERROR | SE_FL_ERR_PENDING | SE_FL_EOS))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003681 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003682 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn, h2s);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003683 se_fl_set(h2s->sd, SE_FL_RCV_MORE);
Willy Tarreau7e094452018-12-19 18:08:52 +01003684 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003685 }
Willy Tarreau1ed87b72018-11-25 08:45:16 +01003686
Willy Tarreau7838a792019-08-12 18:42:03 +02003687 if (old_iw != h2c->miw) {
3688 TRACE_STATE("notifying streams about SFCTL increase", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003689 h2c_unblock_sfctl(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003690 }
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003691
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02003692 h2c_restart_reading(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02003693 out:
3694 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003695 return;
Willy Tarreaubc933932017-10-09 16:21:43 +02003696}
3697
Willy Tarreau989539b2020-01-10 17:01:29 +01003698/* resume each h2s eligible for sending in list head <head> */
3699static void h2_resume_each_sending_h2s(struct h2c *h2c, struct list *head)
3700{
3701 struct h2s *h2s, *h2s_back;
3702
3703 TRACE_ENTER(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3704
3705 list_for_each_entry_safe(h2s, h2s_back, head, list) {
3706 if (h2c->mws <= 0 ||
3707 h2c->flags & H2_CF_MUX_BLOCK_ANY ||
3708 h2c->st0 >= H2_CS_ERROR)
3709 break;
3710
3711 h2s->flags &= ~H2_SF_BLK_ANY;
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003712
Willy Tarreaud9464162020-01-10 18:25:07 +01003713 if (h2s->flags & H2_SF_NOTIFIED)
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003714 continue;
3715
Willy Tarreau5723f292020-01-10 15:16:57 +01003716 /* If the sender changed his mind and unsubscribed, let's just
3717 * remove the stream from the send_list.
Willy Tarreau989539b2020-01-10 17:01:29 +01003718 */
Willy Tarreauf96508a2020-01-10 11:12:48 +01003719 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) &&
3720 (!h2s->subs || !(h2s->subs->events & SUB_RETRY_SEND))) {
Willy Tarreau989539b2020-01-10 17:01:29 +01003721 LIST_DEL_INIT(&h2s->list);
3722 continue;
3723 }
3724
Willy Tarreauf96508a2020-01-10 11:12:48 +01003725 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau5723f292020-01-10 15:16:57 +01003726 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01003727 tasklet_wakeup(h2s->subs->tasklet);
3728 h2s->subs->events &= ~SUB_RETRY_SEND;
3729 if (!h2s->subs->events)
3730 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01003731 }
3732 else if (h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) {
3733 tasklet_wakeup(h2s->shut_tl);
3734 }
Willy Tarreau989539b2020-01-10 17:01:29 +01003735 }
3736
3737 TRACE_LEAVE(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3738}
3739
Willy Tarreaude4a5382023-10-17 08:25:19 +02003740/* removes a stream from the list it may be in. If a stream has recently been
3741 * appended to the send_list, it might have been waiting on this one when
3742 * entering h2_snd_buf() and expecting it to complete before starting to send
3743 * in turn. For this reason we check (and clear) H2_CF_WAIT_INLIST to detect
3744 * this condition, and we try to resume sending streams if it happens. Note
3745 * that we don't need to do it for fctl_list as this list is relevant before
3746 * (only consulted after) a window update on the connection, and not because
3747 * of any competition with other streams.
3748 */
3749static inline void h2_remove_from_list(struct h2s *h2s)
3750{
3751 struct h2c *h2c = h2s->h2c;
3752
3753 if (!LIST_INLIST(&h2s->list))
3754 return;
3755
3756 LIST_DEL_INIT(&h2s->list);
3757 if (h2c->flags & H2_CF_WAIT_INLIST) {
3758 h2c->flags &= ~H2_CF_WAIT_INLIST;
3759 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
3760 }
3761}
3762
Willy Tarreaubc933932017-10-09 16:21:43 +02003763/* process Tx frames from streams to be multiplexed. Returns > 0 if it reached
3764 * the end.
3765 */
3766static int h2_process_mux(struct h2c *h2c)
3767{
Willy Tarreau7838a792019-08-12 18:42:03 +02003768 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3769
Willy Tarreau01b44822018-10-03 14:26:37 +02003770 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3771 if (unlikely(h2c->st0 == H2_CS_PREFACE && (h2c->flags & H2_CF_IS_BACK))) {
3772 if (unlikely(h2c_bck_send_preface(h2c) <= 0)) {
3773 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003774 if (h2c->st0 == H2_CS_ERROR)
Willy Tarreau01b44822018-10-03 14:26:37 +02003775 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau01b44822018-10-03 14:26:37 +02003776 goto fail;
3777 }
3778 h2c->st0 = H2_CS_SETTINGS1;
3779 }
3780 /* need to wait for the other side */
Willy Tarreau75a930a2018-12-12 08:03:58 +01003781 if (h2c->st0 < H2_CS_FRAME_H)
Willy Tarreau7838a792019-08-12 18:42:03 +02003782 goto done;
Willy Tarreau01b44822018-10-03 14:26:37 +02003783 }
3784
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003785 /* start by sending possibly pending window updates */
Willy Tarreaue74679a2019-08-06 15:39:32 +02003786 if (h2c->rcvd_s > 0 &&
3787 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3788 h2c_send_strm_wu(h2c) < 0)
3789 goto fail;
3790
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003791 if (h2c->rcvd_c > 0 &&
3792 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3793 h2c_send_conn_wu(h2c) < 0)
3794 goto fail;
3795
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003796 /* First we always process the flow control list because the streams
3797 * waiting there were already elected for immediate emission but were
3798 * blocked just on this.
3799 */
Willy Tarreaude4a5382023-10-17 08:25:19 +02003800 h2c->flags &= ~H2_CF_WAIT_INLIST;
Willy Tarreau989539b2020-01-10 17:01:29 +01003801 h2_resume_each_sending_h2s(h2c, &h2c->fctl_list);
3802 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003803
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003804 fail:
Willy Tarreau3eabe9b2017-11-07 11:03:01 +01003805 if (unlikely(h2c->st0 >= H2_CS_ERROR)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02003806 if (h2c->st0 == H2_CS_ERROR) {
3807 if (h2c->max_id >= 0) {
3808 h2c_send_goaway_error(h2c, NULL);
3809 if (h2c->flags & H2_CF_MUX_BLOCK_ANY)
Willy Tarreau7838a792019-08-12 18:42:03 +02003810 goto out0;
Willy Tarreau081d4722017-05-16 21:51:05 +02003811 }
3812
3813 h2c->st0 = H2_CS_ERROR2; // sent (or failed hard) !
3814 }
Willy Tarreau081d4722017-05-16 21:51:05 +02003815 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003816 done:
3817 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
3818 return 1;
3819 out0:
3820 TRACE_DEVEL("leaving in blocked situation", H2_EV_H2C_WAKE, h2c->conn);
3821 return 0;
Willy Tarreaubc933932017-10-09 16:21:43 +02003822}
3823
Willy Tarreau62f52692017-10-08 23:01:42 +02003824
Willy Tarreau479998a2018-11-18 06:30:59 +01003825/* Attempt to read data, and subscribe if none available.
3826 * The function returns 1 if data has been received, otherwise zero.
3827 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003828static int h2_recv(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003829{
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003830 struct connection *conn = h2c->conn;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003831 struct buffer *buf;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003832 int max;
Olivier Houchard7505f942018-08-21 18:10:44 +02003833 size_t ret;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003834
Willy Tarreau7838a792019-08-12 18:42:03 +02003835 TRACE_ENTER(H2_EV_H2C_RECV, h2c->conn);
3836
3837 if (h2c->wait_event.events & SUB_RETRY_RECV) {
3838 TRACE_DEVEL("leaving on sub_recv", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003839 return (b_data(&h2c->dbuf));
Willy Tarreau7838a792019-08-12 18:42:03 +02003840 }
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003841
Willy Tarreau7838a792019-08-12 18:42:03 +02003842 if (!h2_recv_allowed(h2c)) {
3843 TRACE_DEVEL("leaving on !recv_allowed", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003844 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02003845 }
Willy Tarreaua2af5122017-10-09 11:56:46 +02003846
Willy Tarreau44e973f2018-03-01 17:49:30 +01003847 buf = h2_get_buf(h2c, &h2c->dbuf);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003848 if (!buf) {
3849 h2c->flags |= H2_CF_DEM_DALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02003850 TRACE_DEVEL("leaving on !alloc", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003851 return 0;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003852 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003853
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003854 if (!b_data(buf)) {
3855 /* try to pre-align the buffer like the
3856 * rxbufs will be to optimize memory copies. We'll make
3857 * sure that the frame header lands at the end of the
3858 * HTX block to alias it upon recv. We cannot use the
3859 * head because rcv_buf() will realign the buffer if
3860 * it's empty. Thus we cheat and pretend we already
3861 * have a few bytes there.
3862 */
3863 max = buf_room_for_htx_data(buf) + 9;
3864 buf->head = sizeof(struct htx) - 9;
3865 }
3866 else
3867 max = b_room(buf);
Willy Tarreau2a59e872018-12-12 08:23:47 +01003868
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003869 ret = max ? conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, max, 0) : 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003870
Christopher Fauletde9d6052021-04-23 12:25:18 +02003871 if (max && !ret && h2_recv_allowed(h2c)) {
3872 TRACE_DATA("failed to receive data, subscribing", H2_EV_H2C_RECV, h2c->conn);
3873 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h2c->wait_event);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003874 } else if (ret) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02003875 TRACE_DATA("received data", H2_EV_H2C_RECV, h2c->conn, 0, 0, (void*)(long)ret);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003876 h2c->flags &= ~H2_CF_DEM_SHORT_READ;
3877 }
Olivier Houchard81a15af2018-10-19 17:26:49 +02003878
Christopher Fauletde9d6052021-04-23 12:25:18 +02003879 if (conn_xprt_read0_pending(h2c->conn)) {
3880 TRACE_DATA("received read0", H2_EV_H2C_RECV, h2c->conn);
3881 h2c->flags |= H2_CF_RCVD_SHUT;
3882 }
Christopher Fauletff7925d2022-10-11 19:12:40 +02003883 if (h2c->conn->flags & CO_FL_ERROR) {
3884 TRACE_DATA("connection error", H2_EV_H2C_RECV, h2c->conn);
3885 h2c->flags |= H2_CF_ERROR;
3886 }
Christopher Fauletde9d6052021-04-23 12:25:18 +02003887
Olivier Houcharda1411e62018-08-17 18:42:48 +02003888 if (!b_data(buf)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +01003889 h2_release_buf(h2c, &h2c->dbuf);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003890 goto end;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003891 }
3892
Willy Tarreau7838a792019-08-12 18:42:03 +02003893 if (b_data(buf) == buf->size) {
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003894 h2c->flags |= H2_CF_DEM_DFULL;
Willy Tarreau35fb8462019-10-02 11:05:46 +02003895 TRACE_STATE("demux buffer full", H2_EV_H2C_RECV|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau7838a792019-08-12 18:42:03 +02003896 }
3897
Christopher Fauletff7925d2022-10-11 19:12:40 +02003898 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02003899 TRACE_LEAVE(H2_EV_H2C_RECV, h2c->conn);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003900 return !!ret || (h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERROR));
Willy Tarreau62f52692017-10-08 23:01:42 +02003901}
3902
Willy Tarreau479998a2018-11-18 06:30:59 +01003903/* Try to send data if possible.
3904 * The function returns 1 if data have been sent, otherwise zero.
3905 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003906static int h2_send(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003907{
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003908 struct connection *conn = h2c->conn;
Willy Tarreaubc933932017-10-09 16:21:43 +02003909 int done;
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003910 int sent = 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003911
Willy Tarreau7838a792019-08-12 18:42:03 +02003912 TRACE_ENTER(H2_EV_H2C_SEND, h2c->conn);
Willy Tarreaua2af5122017-10-09 11:56:46 +02003913
Christopher Fauletff7925d2022-10-11 19:12:40 +02003914 if (h2c->flags & (H2_CF_ERROR|H2_CF_ERR_PENDING)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003915 TRACE_DEVEL("leaving on error", H2_EV_H2C_SEND, h2c->conn);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003916 if (h2c->flags & H2_CF_RCVD_SHUT)
3917 h2c->flags |= H2_CF_ERROR;
3918 b_reset(br_tail(h2c->mbuf));
Willy Tarreauf279a2f2023-05-30 15:42:35 +02003919 h2c->idle_start = now_ms;
Willy Tarreau7838a792019-08-12 18:42:03 +02003920 return 1;
3921 }
Olivier Houchard7505f942018-08-21 18:10:44 +02003922
Willy Tarreaubc933932017-10-09 16:21:43 +02003923 /* This loop is quite simple : it tries to fill as much as it can from
3924 * pending streams into the existing buffer until it's reportedly full
3925 * or the end of send requests is reached. Then it tries to send this
3926 * buffer's contents out, marks it not full if at least one byte could
3927 * be sent, and tries again.
3928 *
3929 * The snd_buf() function normally takes a "flags" argument which may
3930 * be made of a combination of CO_SFL_MSG_MORE to indicate that more
3931 * data immediately comes and CO_SFL_STREAMER to indicate that the
3932 * connection is streaming lots of data (used to increase TLS record
3933 * size at the expense of latency). The former can be sent any time
3934 * there's a buffer full flag, as it indicates at least one stream
3935 * attempted to send and failed so there are pending data. An
3936 * alternative would be to set it as long as there's an active stream
3937 * but that would be problematic for ACKs until we have an absolute
3938 * guarantee that all waiters have at least one byte to send. The
3939 * latter should possibly not be set for now.
3940 */
3941
3942 done = 0;
Christopher Faulet21fb6bd2023-03-28 12:16:53 +02003943 while (!(conn->flags & CO_FL_WAIT_XPRT) && !done) {
Willy Tarreaubc933932017-10-09 16:21:43 +02003944 unsigned int flags = 0;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003945 unsigned int released = 0;
3946 struct buffer *buf;
Willy Tarreau464fa062023-03-17 16:09:14 +01003947 uint to_send;
Willy Tarreaubc933932017-10-09 16:21:43 +02003948
3949 /* fill as much as we can into the current buffer */
3950 while (((h2c->flags & (H2_CF_MUX_MFULL|H2_CF_MUX_MALLOC)) == 0) && !done)
3951 done = h2_process_mux(h2c);
3952
Olivier Houchard2b094432019-01-29 18:28:36 +01003953 if (h2c->flags & H2_CF_MUX_MALLOC)
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003954 done = 1; // we won't go further without extra buffers
Olivier Houchard2b094432019-01-29 18:28:36 +01003955
Christopher Faulet9a3d3fc2020-10-22 16:24:58 +02003956 if ((conn->flags & (CO_FL_SOCK_WR_SH|CO_FL_ERROR)) ||
Willy Tarreaue6dc7a02021-10-21 17:30:06 +02003957 (h2c->flags & H2_CF_GOAWAY_FAILED))
Willy Tarreaubc933932017-10-09 16:21:43 +02003958 break;
3959
Christopher Faulet68ee7842022-10-12 10:21:33 +02003960 if (h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))
Willy Tarreaubc933932017-10-09 16:21:43 +02003961 flags |= CO_SFL_MSG_MORE;
3962
Willy Tarreau464fa062023-03-17 16:09:14 +01003963 to_send = br_count(h2c->mbuf);
3964 if (to_send > 1) {
Willy Tarreau14ea98a2023-03-16 17:30:30 +01003965 /* usually we want to emit small TLS records to speed
3966 * up the decoding on the client. That's what is being
3967 * done by default. However if there is more than one
3968 * buffer being allocated, we're streaming large data
3969 * so we stich to large records.
3970 */
3971 flags |= CO_SFL_STREAMER;
3972 }
3973
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003974 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
3975 if (b_data(buf)) {
Willy Tarreau464fa062023-03-17 16:09:14 +01003976 int ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, b_data(buf),
3977 flags | (to_send > 1 ? CO_SFL_MSG_MORE : 0));
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003978 if (!ret) {
3979 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003980 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003981 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003982 sent = 1;
Willy Tarreau464fa062023-03-17 16:09:14 +01003983 to_send--;
Willy Tarreau022e5e52020-09-10 09:33:15 +02003984 TRACE_DATA("sent data", H2_EV_H2C_SEND, h2c->conn, 0, buf, (void*)(long)ret);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003985 b_del(buf, ret);
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003986 if (b_data(buf)) {
3987 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003988 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003989 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003990 }
3991 b_free(buf);
3992 released++;
Willy Tarreau787db9a2018-06-14 18:31:46 +02003993 }
Willy Tarreaubc933932017-10-09 16:21:43 +02003994
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003995 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01003996 offer_buffers(NULL, released);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003997
Willy Tarreau93c55112023-03-16 16:47:44 +01003998 /* Normally if wrote at least one byte, the buffer is not full
3999 * anymore. However, if it was marked full because all of its
4000 * buffers were used, we don't want to instantly wake up many
4001 * streams because we'd create a thundering herd effect, notably
4002 * when data are flushed in small chunks. Instead we wait for
4003 * the buffer to be decongested again before allowing to send
4004 * again. It also has the added benefit of not pumping more
4005 * data from the other side when it's known that this one is
4006 * still congested.
4007 */
4008 if (sent && br_single(h2c->mbuf))
Christopher Faulet69fe5ce2019-10-24 10:31:01 +02004009 h2c->flags &= ~(H2_CF_MUX_MFULL | H2_CF_DEM_MROOM);
Willy Tarreaubc933932017-10-09 16:21:43 +02004010 }
4011
Christopher Fauletff7925d2022-10-11 19:12:40 +02004012 if (conn->flags & CO_FL_ERROR) {
4013 h2c->flags |= H2_CF_ERR_PENDING;
4014 if (h2c->flags & H2_CF_RCVD_SHUT)
4015 h2c->flags |= H2_CF_ERROR;
Willy Tarreau51330962019-05-26 09:38:07 +02004016 b_reset(br_tail(h2c->mbuf));
Willy Tarreaua2af5122017-10-09 11:56:46 +02004017 }
Christopher Fauletff7925d2022-10-11 19:12:40 +02004018
Olivier Houchard6ff20392018-07-17 18:46:31 +02004019 /* We're not full anymore, so we can wake any task that are waiting
4020 * for us.
4021 */
Willy Tarreaude4a5382023-10-17 08:25:19 +02004022 if (!(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM)) && h2c->st0 >= H2_CS_FRAME_H) {
4023 h2c->flags &= ~H2_CF_WAIT_INLIST;
Willy Tarreau989539b2020-01-10 17:01:29 +01004024 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Willy Tarreaude4a5382023-10-17 08:25:19 +02004025 }
Olivier Houchardd360ac62019-03-22 17:37:16 +01004026
Olivier Houchard910b2bc2018-07-17 18:49:38 +02004027 /* We're done, no more to send */
Christopher Faulet21fb6bd2023-03-28 12:16:53 +02004028 if (!(conn->flags & CO_FL_WAIT_XPRT) && !br_data(h2c->mbuf)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004029 TRACE_DEVEL("leaving with everything sent", H2_EV_H2C_SEND, h2c->conn);
Willy Tarreau936db562023-10-18 11:39:43 +02004030 if (h2c->flags & H2_CF_MBUF_HAS_DATA && !h2c->nb_sc) {
4031 h2c->flags &= ~H2_CF_MBUF_HAS_DATA;
Willy Tarreauf279a2f2023-05-30 15:42:35 +02004032 h2c->idle_start = now_ms;
Willy Tarreau936db562023-10-18 11:39:43 +02004033 }
Christopher Fauletff7925d2022-10-11 19:12:40 +02004034 goto end;
Willy Tarreau7838a792019-08-12 18:42:03 +02004035 }
Christopher Faulet21fb6bd2023-03-28 12:16:53 +02004036
Willy Tarreau7838a792019-08-12 18:42:03 +02004037 if (!(conn->flags & CO_FL_ERROR) && !(h2c->wait_event.events & SUB_RETRY_SEND)) {
4038 TRACE_STATE("more data to send, subscribing", H2_EV_H2C_SEND, h2c->conn);
Olivier Houcharde179d0e2019-03-21 18:27:17 +01004039 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &h2c->wait_event);
Willy Tarreau7838a792019-08-12 18:42:03 +02004040 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004041 TRACE_DEVEL("leaving with some data left to send", H2_EV_H2C_SEND, h2c->conn);
Christopher Fauletff7925d2022-10-11 19:12:40 +02004042end:
4043 return sent || (h2c->flags & (H2_CF_ERR_PENDING|H2_CF_ERROR));
Olivier Houchard29fb89d2018-08-02 18:56:36 +02004044}
4045
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004046/* this is the tasklet referenced in h2c->wait_event.tasklet */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004047struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
Olivier Houchard29fb89d2018-08-02 18:56:36 +02004048{
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004049 struct connection *conn;
4050 struct tasklet *tl = (struct tasklet *)t;
4051 int conn_in_list;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004052 struct h2c *h2c = ctx;
Olivier Houchard7505f942018-08-21 18:10:44 +02004053 int ret = 0;
Olivier Houchard29fb89d2018-08-02 18:56:36 +02004054
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004055 if (state & TASK_F_USR1) {
4056 /* the tasklet was idling on an idle connection, it might have
4057 * been stolen, let's be careful!
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004058 */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004059 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4060 if (t->context == NULL) {
4061 /* The connection has been taken over by another thread,
4062 * we're no longer responsible for it, so just free the
4063 * tasklet, and do nothing.
4064 */
4065 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4066 tasklet_free(tl);
Willy Tarreau74163142021-03-13 11:30:19 +01004067 t = NULL;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004068 goto leave;
4069 }
4070 conn = h2c->conn;
4071 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004072
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004073 /* Remove the connection from the list, to be sure nobody attempts
4074 * to use it while we handle the I/O events
4075 */
Christopher Faulet3a7b5392023-03-16 11:43:05 +01004076 conn_in_list = conn_get_idle_flag(conn);
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004077 if (conn_in_list)
4078 conn_delete_from_tree(&conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004079
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004080 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4081 } else {
4082 /* we're certain the connection was not in an idle list */
4083 conn = h2c->conn;
4084 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4085 conn_in_list = 0;
4086 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004087
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004088 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Olivier Houchard7505f942018-08-21 18:10:44 +02004089 ret = h2_send(h2c);
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004090 if (!(h2c->wait_event.events & SUB_RETRY_RECV))
Olivier Houchard7505f942018-08-21 18:10:44 +02004091 ret |= h2_recv(h2c);
Willy Tarreaucef5c8e2018-12-18 10:29:54 +01004092 if (ret || b_data(&h2c->dbuf))
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004093 ret = h2_process(h2c);
4094
4095 /* If we were in an idle list, we want to add it back into it,
4096 * unless h2_process() returned -1, which mean it has destroyed
4097 * the connection (testing !ret is enough, if h2_process() wasn't
4098 * called then ret will be 0 anyway.
4099 */
Willy Tarreau74163142021-03-13 11:30:19 +01004100 if (ret < 0)
4101 t = NULL;
4102
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004103 if (!ret && conn_in_list) {
4104 struct server *srv = objt_server(conn->target);
4105
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004106 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004107 if (conn_in_list == CO_FL_SAFE_LIST)
Willy Tarreau85223482022-09-29 20:32:43 +02004108 eb64_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004109 else
Willy Tarreau85223482022-09-29 20:32:43 +02004110 eb64_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node);
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004111 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004112 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004113
Willy Tarreau38468772020-06-28 00:31:13 +02004114leave:
Willy Tarreau7838a792019-08-12 18:42:03 +02004115 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreau74163142021-03-13 11:30:19 +01004116 return t;
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004117}
Willy Tarreaua2af5122017-10-09 11:56:46 +02004118
Willy Tarreau62f52692017-10-08 23:01:42 +02004119/* callback called on any event by the connection handler.
4120 * It applies changes and returns zero, or < 0 if it wants immediate
4121 * destruction of the connection (which normally doesn not happen in h2).
4122 */
Olivier Houchard7505f942018-08-21 18:10:44 +02004123static int h2_process(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02004124{
Olivier Houchard7505f942018-08-21 18:10:44 +02004125 struct connection *conn = h2c->conn;
Willy Tarreaua2af5122017-10-09 11:56:46 +02004126
Willy Tarreau7838a792019-08-12 18:42:03 +02004127 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4128
Willy Tarreauf0961222021-02-05 11:41:46 +01004129 if (!(h2c->flags & H2_CF_DEM_BLOCK_ANY) &&
4130 (b_data(&h2c->dbuf) || (h2c->flags & H2_CF_RCVD_SHUT))) {
Willy Tarreaud13bf272017-12-14 10:34:52 +01004131 h2_process_demux(h2c);
4132
Christopher Fauletff7925d2022-10-11 19:12:40 +02004133 if (h2c->st0 >= H2_CS_ERROR || (h2c->flags & H2_CF_ERROR))
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004134 b_reset(&h2c->dbuf);
Willy Tarreaud13bf272017-12-14 10:34:52 +01004135
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004136 if (!b_full(&h2c->dbuf))
Willy Tarreaud13bf272017-12-14 10:34:52 +01004137 h2c->flags &= ~H2_CF_DEM_DFULL;
4138 }
Olivier Houchard7505f942018-08-21 18:10:44 +02004139 h2_send(h2c);
Willy Tarreaud13bf272017-12-14 10:34:52 +01004140
Christopher Fauletdfd10ab2021-10-06 14:24:19 +02004141 if (unlikely(h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) && !(h2c->flags & H2_CF_IS_BACK)) {
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +02004142 int send_goaway = 1;
4143 /* If a close-spread-time option is set, we want to avoid
4144 * closing all the active HTTP2 connections at once so we add a
4145 * random factor that will spread the closing.
4146 */
4147 if (tick_isset(global.close_spread_end)) {
4148 int remaining_window = tick_remain(now_ms, global.close_spread_end);
4149 if (remaining_window) {
4150 /* This should increase the closing rate the
4151 * further along the window we are. */
4152 send_goaway = (remaining_window <= statistical_prng_range(global.close_spread_time));
4153 }
4154 }
Remi Tricot-Le Breton4d7fdc62022-04-26 15:17:18 +02004155 else if (global.tune.options & GTUNE_DISABLE_ACTIVE_CLOSE)
4156 send_goaway = 0; /* let the client close his connection himself */
Willy Tarreau8ec14062017-12-30 18:08:13 +01004157 /* frontend is stopping, reload likely in progress, let's try
4158 * to announce a graceful shutdown if not yet done. We don't
4159 * care if it fails, it will be tried again later.
4160 */
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +02004161 if (send_goaway) {
4162 TRACE_STATE("proxy stopped, sending GOAWAY", H2_EV_H2C_WAKE|H2_EV_TX_FRAME, conn);
4163 if (!(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
4164 if (h2c->last_sid < 0)
4165 h2c->last_sid = (1U << 31) - 1;
4166 h2c_send_goaway_error(h2c, NULL);
4167 }
Willy Tarreau8ec14062017-12-30 18:08:13 +01004168 }
4169 }
4170
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004171 /*
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004172 * If we received early data, and the handshake is done, wake
4173 * any stream that was waiting for it.
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004174 */
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004175 if (!(h2c->flags & H2_CF_WAIT_FOR_HS) &&
Willy Tarreau911db9b2020-01-23 16:27:54 +01004176 (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT | CO_FL_EARLY_DATA)) == CO_FL_EARLY_DATA) {
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004177 struct eb32_node *node;
4178 struct h2s *h2s;
4179
4180 h2c->flags |= H2_CF_WAIT_FOR_HS;
4181 node = eb32_lookup_ge(&h2c->streams_by_id, 1);
4182
4183 while (node) {
4184 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004185 if (se_fl_test(h2s->sd, SE_FL_WAIT_FOR_HS))
Willy Tarreau7e094452018-12-19 18:08:52 +01004186 h2s_notify_recv(h2s);
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004187 node = eb32_next(node);
4188 }
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004189 }
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004190
Christopher Fauletff7925d2022-10-11 19:12:40 +02004191 if ((h2c->flags & H2_CF_ERROR) || h2c_read0_pending(h2c) ||
Willy Tarreau29a98242017-10-31 06:59:15 +01004192 h2c->st0 == H2_CS_ERROR2 || h2c->flags & H2_CF_GOAWAY_FAILED ||
4193 (eb_is_empty(&h2c->streams_by_id) && h2c->last_sid >= 0 &&
4194 h2c->max_id >= h2c->last_sid)) {
Willy Tarreau23482912019-05-07 15:23:14 +02004195 h2_wake_some_streams(h2c, 0);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004196
4197 if (eb_is_empty(&h2c->streams_by_id)) {
4198 /* no more stream, kill the connection now */
Christopher Faulet73c12072019-04-08 11:23:22 +02004199 h2_release(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004200 TRACE_DEVEL("leaving after releasing the connection", H2_EV_H2C_WAKE);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004201 return -1;
4202 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004203
4204 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004205 if (conn->flags & CO_FL_LIST_MASK) {
4206 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004207 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004208 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4209 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004210 }
4211 else if (h2c->st0 == H2_CS_ERROR) {
4212 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004213 if (conn->flags & CO_FL_LIST_MASK) {
4214 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004215 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004216 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4217 }
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004218 }
4219
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004220 if (!b_data(&h2c->dbuf))
Willy Tarreau44e973f2018-03-01 17:49:30 +01004221 h2_release_buf(h2c, &h2c->dbuf);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004222
Christopher Fauletff7925d2022-10-11 19:12:40 +02004223 if (h2c->st0 == H2_CS_ERROR2 || (h2c->flags & H2_CF_GOAWAY_FAILED) ||
Olivier Houchard53216e72018-10-10 15:46:36 +02004224 (h2c->st0 != H2_CS_ERROR &&
Willy Tarreau662fafc2019-05-26 09:43:07 +02004225 !br_data(h2c->mbuf) &&
Olivier Houchard53216e72018-10-10 15:46:36 +02004226 (h2c->mws <= 0 || LIST_ISEMPTY(&h2c->fctl_list)) &&
4227 ((h2c->flags & H2_CF_MUX_BLOCK_ANY) || LIST_ISEMPTY(&h2c->send_list))))
Willy Tarreau2e3c0002019-05-26 09:45:23 +02004228 h2_release_mbuf(h2c);
Willy Tarreaua2af5122017-10-09 11:56:46 +02004229
Willy Tarreau15a47332022-03-18 15:57:34 +01004230 h2c_update_timeout(h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +02004231 h2_send(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004232 TRACE_LEAVE(H2_EV_H2C_WAKE, conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02004233 return 0;
4234}
4235
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004236/* wake-up function called by the connection layer (mux_ops.wake) */
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004237static int h2_wake(struct connection *conn)
4238{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004239 struct h2c *h2c = conn->ctx;
Willy Tarreau7838a792019-08-12 18:42:03 +02004240 int ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004241
Willy Tarreau7838a792019-08-12 18:42:03 +02004242 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4243 ret = h2_process(h2c);
Willy Tarreau508f9892020-02-11 04:38:56 +01004244 if (ret >= 0)
4245 h2_wake_some_streams(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02004246 TRACE_LEAVE(H2_EV_H2C_WAKE);
4247 return ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004248}
4249
Willy Tarreauea392822017-10-31 10:02:25 +01004250/* Connection timeout management. The principle is that if there's no receipt
4251 * nor sending for a certain amount of time, the connection is closed. If the
4252 * MUX buffer still has lying data or is not allocatable, the connection is
4253 * immediately killed. If it's allocatable and empty, we attempt to send a
4254 * GOAWAY frame.
4255 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004256struct task *h2_timeout_task(struct task *t, void *context, unsigned int state)
Willy Tarreauea392822017-10-31 10:02:25 +01004257{
Olivier Houchard9f6af332018-05-25 14:04:04 +02004258 struct h2c *h2c = context;
Willy Tarreauea392822017-10-31 10:02:25 +01004259 int expired = tick_is_expired(t->expire, now_ms);
4260
Willy Tarreau7838a792019-08-12 18:42:03 +02004261 TRACE_ENTER(H2_EV_H2C_WAKE, h2c ? h2c->conn : NULL);
4262
Willy Tarreaubd42e922020-06-30 11:19:23 +02004263 if (h2c) {
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004264 /* Make sure nobody stole the connection from us */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004265 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004266
4267 /* Somebody already stole the connection from us, so we should not
4268 * free it, we just have to free the task.
4269 */
4270 if (!t->context) {
4271 h2c = NULL;
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004272 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004273 goto do_leave;
4274 }
4275
4276
Willy Tarreaubd42e922020-06-30 11:19:23 +02004277 if (!expired) {
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004278 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004279 TRACE_DEVEL("leaving (not expired)", H2_EV_H2C_WAKE, h2c->conn);
4280 return t;
4281 }
Willy Tarreauea392822017-10-31 10:02:25 +01004282
Willy Tarreaubd42e922020-06-30 11:19:23 +02004283 if (!h2c_may_expire(h2c)) {
4284 /* we do still have streams but all of them are idle, waiting
4285 * for the data layer, so we must not enforce the timeout here.
4286 */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004287 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004288 t->expire = TICK_ETERNITY;
4289 return t;
4290 }
Willy Tarreauc2ea47f2019-10-01 10:12:00 +02004291
Willy Tarreaubd42e922020-06-30 11:19:23 +02004292 /* We're about to destroy the connection, so make sure nobody attempts
4293 * to steal it from us.
4294 */
Christopher Faulet3a7b5392023-03-16 11:43:05 +01004295 if (h2c->conn->flags & CO_FL_LIST_MASK)
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004296 conn_delete_from_tree(&h2c->conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004297
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004298 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004299 }
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004300
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004301do_leave:
Olivier Houchard3f795f72019-04-17 22:51:06 +02004302 task_destroy(t);
Willy Tarreau0975f112018-03-29 15:22:59 +02004303
4304 if (!h2c) {
4305 /* resources were already deleted */
Willy Tarreau7838a792019-08-12 18:42:03 +02004306 TRACE_DEVEL("leaving (not more h2c)", H2_EV_H2C_WAKE);
Willy Tarreau0975f112018-03-29 15:22:59 +02004307 return NULL;
4308 }
4309
4310 h2c->task = NULL;
Willy Tarreauea392822017-10-31 10:02:25 +01004311 h2c_error(h2c, H2_ERR_NO_ERROR);
Willy Tarreau23482912019-05-07 15:23:14 +02004312 h2_wake_some_streams(h2c, 0);
Willy Tarreauea392822017-10-31 10:02:25 +01004313
Willy Tarreau662fafc2019-05-26 09:43:07 +02004314 if (br_data(h2c->mbuf)) {
Willy Tarreauea392822017-10-31 10:02:25 +01004315 /* don't even try to send a GOAWAY, the buffer is stuck */
4316 h2c->flags |= H2_CF_GOAWAY_FAILED;
4317 }
4318
4319 /* try to send but no need to insist */
Willy Tarreau599391a2017-11-24 10:16:00 +01004320 h2c->last_sid = h2c->max_id;
Willy Tarreauea392822017-10-31 10:02:25 +01004321 if (h2c_send_goaway_error(h2c, NULL) <= 0)
4322 h2c->flags |= H2_CF_GOAWAY_FAILED;
4323
Willy Tarreau662fafc2019-05-26 09:43:07 +02004324 if (br_data(h2c->mbuf) && !(h2c->flags & H2_CF_GOAWAY_FAILED) && conn_xprt_ready(h2c->conn)) {
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004325 unsigned int released = 0;
4326 struct buffer *buf;
4327
4328 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
4329 if (b_data(buf)) {
4330 int ret = h2c->conn->xprt->snd_buf(h2c->conn, h2c->conn->xprt_ctx, buf, b_data(buf), 0);
4331 if (!ret)
4332 break;
4333 b_del(buf, ret);
4334 if (b_data(buf))
4335 break;
4336 b_free(buf);
4337 released++;
4338 }
Willy Tarreau787db9a2018-06-14 18:31:46 +02004339 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004340
4341 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01004342 offer_buffers(NULL, released);
Willy Tarreau787db9a2018-06-14 18:31:46 +02004343 }
Willy Tarreauea392822017-10-31 10:02:25 +01004344
Willy Tarreau4481e262019-10-31 15:36:30 +01004345 /* in any case this connection must not be considered idle anymore */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004346 if (h2c->conn->flags & CO_FL_LIST_MASK) {
4347 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004348 conn_delete_from_tree(&h2c->conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004349 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4350 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004351
Willy Tarreau0975f112018-03-29 15:22:59 +02004352 /* either we can release everything now or it will be done later once
4353 * the last stream closes.
4354 */
4355 if (eb_is_empty(&h2c->streams_by_id))
Christopher Faulet73c12072019-04-08 11:23:22 +02004356 h2_release(h2c);
Willy Tarreauea392822017-10-31 10:02:25 +01004357
Willy Tarreau7838a792019-08-12 18:42:03 +02004358 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreauea392822017-10-31 10:02:25 +01004359 return NULL;
4360}
4361
4362
Willy Tarreau62f52692017-10-08 23:01:42 +02004363/*******************************************/
4364/* functions below are used by the streams */
4365/*******************************************/
4366
4367/*
4368 * Attach a new stream to a connection
4369 * (Used for outgoing connections)
4370 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004371static int h2_attach(struct connection *conn, struct sedesc *sd, struct session *sess)
Willy Tarreau62f52692017-10-08 23:01:42 +02004372{
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004373 struct h2s *h2s;
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004374 struct h2c *h2c = conn->ctx;
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004375
Willy Tarreau7838a792019-08-12 18:42:03 +02004376 TRACE_ENTER(H2_EV_H2S_NEW, conn);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004377 h2s = h2c_bck_stream_new(h2c, sd->sc, sess);
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004378 if (!h2s) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004379 TRACE_DEVEL("leaving on stream creation failure", H2_EV_H2S_NEW|H2_EV_H2S_ERR, conn);
Christopher Faulete00ad352021-12-16 14:44:31 +01004380 return -1;
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004381 }
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004382
4383 /* the connection is not idle anymore, let's mark this */
4384 HA_ATOMIC_AND(&h2c->wait_event.tasklet->state, ~TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004385 xprt_set_used(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004386
Willy Tarreau7838a792019-08-12 18:42:03 +02004387 TRACE_LEAVE(H2_EV_H2S_NEW, conn, h2s);
Christopher Faulete00ad352021-12-16 14:44:31 +01004388 return 0;
Willy Tarreau62f52692017-10-08 23:01:42 +02004389}
4390
Willy Tarreau4596fe22022-05-17 19:07:51 +02004391/* Retrieves the first valid stream connector from this connection, or returns
4392 * NULL. We have to scan because we may have some orphan streams. It might be
Willy Tarreaufafd3982018-11-18 21:29:20 +01004393 * beneficial to scan backwards from the end to reduce the likeliness to find
4394 * orphans.
4395 */
Willy Tarreaud1373532022-05-27 11:00:59 +02004396static struct stconn *h2_get_first_sc(const struct connection *conn)
Willy Tarreaufafd3982018-11-18 21:29:20 +01004397{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004398 struct h2c *h2c = conn->ctx;
Willy Tarreaufafd3982018-11-18 21:29:20 +01004399 struct h2s *h2s;
4400 struct eb32_node *node;
4401
4402 node = eb32_first(&h2c->streams_by_id);
4403 while (node) {
4404 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau7be4ee02022-05-18 07:31:41 +02004405 if (h2s_sc(h2s))
4406 return h2s_sc(h2s);
Willy Tarreaufafd3982018-11-18 21:29:20 +01004407 node = eb32_next(node);
4408 }
4409 return NULL;
4410}
4411
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004412static int h2_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
4413{
4414 int ret = 0;
4415 struct h2c *h2c = conn->ctx;
4416
4417 switch (mux_ctl) {
4418 case MUX_STATUS:
4419 /* Only consider the mux to be ready if we're done with
4420 * the preface and settings, and we had no error.
4421 */
4422 if (h2c->st0 >= H2_CS_FRAME_H && h2c->st0 < H2_CS_ERROR)
4423 ret |= MUX_STATUS_READY;
4424 return ret;
Christopher Faulet4c8ad842020-10-06 14:59:17 +02004425 case MUX_EXIT_STATUS:
4426 return MUX_ES_UNKNOWN;
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004427 default:
4428 return -1;
4429 }
4430}
4431
Willy Tarreau62f52692017-10-08 23:01:42 +02004432/*
Olivier Houchard060ed432018-11-06 16:32:42 +01004433 * Destroy the mux and the associated connection, if it is no longer used
4434 */
Christopher Faulet73c12072019-04-08 11:23:22 +02004435static void h2_destroy(void *ctx)
Olivier Houchard060ed432018-11-06 16:32:42 +01004436{
Christopher Faulet73c12072019-04-08 11:23:22 +02004437 struct h2c *h2c = ctx;
Olivier Houchard060ed432018-11-06 16:32:42 +01004438
Willy Tarreau7838a792019-08-12 18:42:03 +02004439 TRACE_ENTER(H2_EV_H2C_END, h2c->conn);
Christopher Faulet4e610962022-04-14 11:23:50 +02004440 if (eb_is_empty(&h2c->streams_by_id)) {
4441 BUG_ON(h2c->conn->ctx != h2c);
Christopher Faulet73c12072019-04-08 11:23:22 +02004442 h2_release(h2c);
Christopher Faulet4e610962022-04-14 11:23:50 +02004443 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004444 TRACE_LEAVE(H2_EV_H2C_END);
Olivier Houchard060ed432018-11-06 16:32:42 +01004445}
4446
4447/*
Willy Tarreau62f52692017-10-08 23:01:42 +02004448 * Detach the stream from the connection and possibly release the connection.
4449 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004450static void h2_detach(struct sedesc *sd)
Willy Tarreau62f52692017-10-08 23:01:42 +02004451{
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004452 struct h2s *h2s = sd->se;
Willy Tarreau60935142017-10-16 18:11:19 +02004453 struct h2c *h2c;
Olivier Houchardf502aca2018-12-14 19:42:40 +01004454 struct session *sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004455
Willy Tarreau7838a792019-08-12 18:42:03 +02004456 TRACE_ENTER(H2_EV_STRM_END, h2s ? h2s->h2c->conn : NULL, h2s);
4457
Willy Tarreau7838a792019-08-12 18:42:03 +02004458 if (!h2s) {
4459 TRACE_LEAVE(H2_EV_STRM_END);
Willy Tarreau60935142017-10-16 18:11:19 +02004460 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004461 }
Willy Tarreau60935142017-10-16 18:11:19 +02004462
Willy Tarreaud9464162020-01-10 18:25:07 +01004463 /* there's no txbuf so we're certain not to be able to send anything */
4464 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02004465
Olivier Houchardf502aca2018-12-14 19:42:40 +01004466 sess = h2s->sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004467 h2c = h2s->h2c;
Willy Tarreau36c22322022-05-27 10:41:24 +02004468 h2c->nb_sc--;
Willy Tarreauf279a2f2023-05-30 15:42:35 +02004469 if (!h2c->nb_sc && !br_data(h2c->mbuf))
Willy Tarreau15a47332022-03-18 15:57:34 +01004470 h2c->idle_start = now_ms;
4471
Willy Tarreaufa1d3572019-01-31 10:31:51 +01004472 if ((h2c->flags & (H2_CF_IS_BACK|H2_CF_DEM_TOOMANY)) == H2_CF_DEM_TOOMANY &&
Willy Tarreau36c22322022-05-27 10:41:24 +02004473 !h2_frt_has_too_many_sc(h2c)) {
Willy Tarreaufa1d3572019-01-31 10:31:51 +01004474 /* frontend connection was blocking new streams creation */
Willy Tarreauf2101912018-07-19 10:11:38 +02004475 h2c->flags &= ~H2_CF_DEM_TOOMANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004476 h2c_restart_reading(h2c, 1);
Willy Tarreauf2101912018-07-19 10:11:38 +02004477 }
Willy Tarreau60935142017-10-16 18:11:19 +02004478
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004479 /* this stream may be blocked waiting for some data to leave (possibly
4480 * an ES or RST frame), so orphan it in this case.
4481 */
Christopher Fauletff7925d2022-10-11 19:12:40 +02004482 if (!(h2c->flags & (H2_CF_ERR_PENDING|H2_CF_ERROR)) &&
Willy Tarreaua2b51812018-07-27 09:55:14 +02004483 (h2c->st0 < H2_CS_ERROR) &&
Willy Tarreau5723f292020-01-10 15:16:57 +01004484 (h2s->flags & (H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL)) &&
Willy Tarreauf96508a2020-01-10 11:12:48 +01004485 ((h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) || h2s->subs)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004486 TRACE_DEVEL("leaving on stream blocked", H2_EV_STRM_END|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau15a47332022-03-18 15:57:34 +01004487 /* refresh the timeout if none was active, so that the last
4488 * leaving stream may arm it.
4489 */
Willy Tarreau3fb2c6d2023-03-16 18:06:19 +01004490 if (h2c->task && !tick_isset(h2c->task->expire))
Willy Tarreau15a47332022-03-18 15:57:34 +01004491 h2c_update_timeout(h2c);
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004492 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004493 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004494
Christopher Faulet68ee7842022-10-12 10:21:33 +02004495 if ((h2c->flags & H2_CF_DEM_BLOCK_ANY && h2s->id == h2c->dsi)) {
Willy Tarreau45f752e2017-10-30 15:44:59 +01004496 /* unblock the connection if it was blocked on this
4497 * stream.
4498 */
4499 h2c->flags &= ~H2_CF_DEM_BLOCK_ANY;
4500 h2c->flags &= ~H2_CF_MUX_BLOCK_ANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004501 h2c_restart_reading(h2c, 1);
Willy Tarreau45f752e2017-10-30 15:44:59 +01004502 }
4503
Willy Tarreau71049cc2018-03-28 13:56:39 +02004504 h2s_destroy(h2s);
Willy Tarreau60935142017-10-16 18:11:19 +02004505
Christopher Faulet9b79a102019-07-15 11:22:56 +02004506 if (h2c->flags & H2_CF_IS_BACK) {
Christopher Fauletff7925d2022-10-11 19:12:40 +02004507 if (!(h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERR_PENDING|H2_CF_ERROR))) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004508 if (h2c->conn->flags & CO_FL_PRIVATE) {
Christopher Faulet08016ab2020-07-01 16:10:06 +02004509 /* Add the connection in the session server list, if not already done */
4510 if (!session_add_conn(sess, h2c->conn, h2c->conn->target)) {
4511 h2c->conn->owner = NULL;
4512 if (eb_is_empty(&h2c->streams_by_id)) {
4513 h2c->conn->mux->destroy(h2c);
4514 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4515 return;
Christopher Fauletc5579d12020-07-01 15:45:41 +02004516 }
4517 }
Christopher Faulet08016ab2020-07-01 16:10:06 +02004518 if (eb_is_empty(&h2c->streams_by_id)) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004519 if (session_check_idle_conn(h2c->conn->owner, h2c->conn) != 0) {
4520 /* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
4521 TRACE_DEVEL("leaving without reusable idle connection", H2_EV_STRM_END);
Olivier Houchard351411f2018-12-27 17:20:54 +01004522 return;
4523 }
4524 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004525 }
Christopher Fauletc5579d12020-07-01 15:45:41 +02004526 else {
4527 if (eb_is_empty(&h2c->streams_by_id)) {
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004528 /* If the connection is owned by the session, first remove it
4529 * from its list
4530 */
4531 if (h2c->conn->owner) {
4532 session_unown_conn(h2c->conn->owner, h2c->conn);
4533 h2c->conn->owner = NULL;
4534 }
4535
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004536 /* mark that the tasklet may lose its context to another thread and
4537 * that the handler needs to check it under the idle conns lock.
4538 */
4539 HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004540 xprt_set_idle(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
4541
Olivier Houcharddc2f2752020-02-13 19:12:07 +01004542 if (!srv_add_to_idle_list(objt_server(h2c->conn->target), h2c->conn, 1)) {
Olivier Houchard2444aa52020-01-20 13:56:01 +01004543 /* The server doesn't want it, let's kill the connection right away */
4544 h2c->conn->mux->destroy(h2c);
4545 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4546 return;
4547 }
Olivier Houchard199d4fa2020-03-22 23:25:51 +01004548 /* At this point, the connection has been added to the
4549 * server idle list, so another thread may already have
4550 * hijacked it, so we can't do anything with it.
4551 */
Olivier Houchard2444aa52020-01-20 13:56:01 +01004552 TRACE_DEVEL("reusable idle connection", H2_EV_STRM_END);
4553 return;
Olivier Houchard8a786902018-12-15 16:05:40 +01004554
Olivier Houchard8a786902018-12-15 16:05:40 +01004555 }
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004556 else if (!h2c->conn->hash_node->node.node.leaf_p &&
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004557 h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02004558 !LIST_INLIST(&h2c->conn->session_list)) {
Willy Tarreau85223482022-09-29 20:32:43 +02004559 eb64_insert(&__objt_server(h2c->conn->target)->per_thr[tid].avail_conns,
4560 &h2c->conn->hash_node->node);
Christopher Fauletc5579d12020-07-01 15:45:41 +02004561 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004562 }
4563 }
4564 }
4565
Willy Tarreaue323f342018-03-28 13:51:45 +02004566 /* We don't want to close right now unless we're removing the
4567 * last stream, and either the connection is in error, or it
4568 * reached the ID already specified in a GOAWAY frame received
4569 * or sent (as seen by last_sid >= 0).
4570 */
Olivier Houchard7a977432019-03-21 15:47:13 +01004571 if (h2c_is_dead(h2c)) {
Willy Tarreaue323f342018-03-28 13:51:45 +02004572 /* no more stream will come, kill it now */
Willy Tarreau7838a792019-08-12 18:42:03 +02004573 TRACE_DEVEL("leaving and killing dead connection", H2_EV_STRM_END, h2c->conn);
Christopher Faulet73c12072019-04-08 11:23:22 +02004574 h2_release(h2c);
Willy Tarreaue323f342018-03-28 13:51:45 +02004575 }
4576 else if (h2c->task) {
Willy Tarreau15a47332022-03-18 15:57:34 +01004577 h2c_update_timeout(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004578 TRACE_DEVEL("leaving, refreshing connection's timeout", H2_EV_STRM_END, h2c->conn);
Willy Tarreau60935142017-10-16 18:11:19 +02004579 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004580 else
4581 TRACE_DEVEL("leaving", H2_EV_STRM_END, h2c->conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02004582}
4583
Willy Tarreau88bdba32019-05-13 18:17:53 +02004584/* Performs a synchronous or asynchronous shutr(). */
4585static void h2_do_shutr(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004586{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004587 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004588
Willy Tarreauf983d002019-05-14 10:40:21 +02004589 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004590 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004591
Willy Tarreau7838a792019-08-12 18:42:03 +02004592 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4593
Christopher Faulet24b85802023-10-16 19:30:02 +02004594 if (h2s->flags & H2_SF_WANT_SHUTW)
4595 goto add_to_list;
4596
Willy Tarreau18059042019-01-31 19:12:48 +01004597 /* a connstream may require us to immediately kill the whole connection
4598 * for example because of a "tcp-request content reject" rule that is
4599 * normally used to limit abuse. In this case we schedule a goaway to
4600 * close the connection.
Willy Tarreau926fa4c2017-11-07 14:42:12 +01004601 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004602 if (se_fl_test(h2s->sd, SE_FL_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004603 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004604 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004605 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4606 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4607 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004608 else if (!(h2s->flags & H2_SF_HEADERS_SENT)) {
4609 /* Nothing was never sent for this stream, so reset with
4610 * REFUSED_STREAM error to let the client retry the
4611 * request.
4612 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004613 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004614 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4615 }
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004616 else {
4617 /* a final response was already provided, we don't want this
4618 * stream anymore. This may happen when the server responds
4619 * before the end of an upload and closes quickly (redirect,
4620 * deny, ...)
4621 */
4622 h2s_error(h2s, H2_ERR_CANCEL);
4623 }
Willy Tarreau18059042019-01-31 19:12:48 +01004624
Willy Tarreau90c32322017-11-24 08:00:30 +01004625 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004626 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004627 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004628
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004629 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004630 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau00dd0782018-03-01 16:31:34 +01004631 h2s_close(h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004632 done:
4633 h2s->flags &= ~H2_SF_WANT_SHUTR;
Willy Tarreau7838a792019-08-12 18:42:03 +02004634 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004635 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004636add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004637 /* Let the handler know we want to shutr, and add ourselves to the
4638 * most relevant list if not yet done. h2_deferred_shut() will be
4639 * automatically called via the shut_tl tasklet when there's room
4640 * again.
4641 */
4642 h2s->flags |= H2_SF_WANT_SHUTR;
Willy Tarreau2b718102021-04-21 07:32:39 +02004643 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004644 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004645 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004646 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004647 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004648 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004649 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004650 return;
Willy Tarreau62f52692017-10-08 23:01:42 +02004651}
4652
Willy Tarreau88bdba32019-05-13 18:17:53 +02004653/* Performs a synchronous or asynchronous shutw(). */
4654static void h2_do_shutw(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004655{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004656 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004657
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004658 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004659 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004660
Willy Tarreau7838a792019-08-12 18:42:03 +02004661 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4662
Willy Tarreau473e0e52022-08-18 16:12:15 +02004663 if (h2s->st != H2_SS_ERROR &&
4664 (h2s->flags & (H2_SF_HEADERS_SENT | H2_SF_MORE_HTX_DATA)) == H2_SF_HEADERS_SENT) {
4665 /* we can cleanly close using an empty data frame only after headers
4666 * and if no more data is expected to be sent.
4667 */
Willy Tarreau58e32082017-11-07 14:41:09 +01004668 if (!(h2s->flags & (H2_SF_ES_SENT|H2_SF_RST_SENT)) &&
4669 h2_send_empty_data_es(h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004670 goto add_to_list;
Willy Tarreau58e32082017-11-07 14:41:09 +01004671
4672 if (h2s->st == H2_SS_HREM)
Willy Tarreau00dd0782018-03-01 16:31:34 +01004673 h2s_close(h2s);
Willy Tarreau58e32082017-11-07 14:41:09 +01004674 else
4675 h2s->st = H2_SS_HLOC;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004676 } else {
Willy Tarreau18059042019-01-31 19:12:48 +01004677 /* a connstream may require us to immediately kill the whole connection
4678 * for example because of a "tcp-request content reject" rule that is
4679 * normally used to limit abuse. In this case we schedule a goaway to
4680 * close the connection.
Willy Tarreaua1349f02017-10-31 07:41:55 +01004681 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004682 if (se_fl_test(h2s->sd, SE_FL_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004683 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004684 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004685 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4686 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4687 }
Willy Tarreau473e0e52022-08-18 16:12:15 +02004688 else if (h2s->flags & H2_SF_MORE_HTX_DATA) {
4689 /* some unsent data were pending (e.g. abort during an upload),
4690 * let's send a CANCEL.
4691 */
4692 TRACE_STATE("shutw before end of data, sending CANCEL", H2_EV_STRM_SHUT, h2c->conn, h2s);
4693 h2s_error(h2s, H2_ERR_CANCEL);
4694 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004695 else {
4696 /* Nothing was never sent for this stream, so reset with
4697 * REFUSED_STREAM error to let the client retry the
4698 * request.
4699 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004700 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004701 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4702 }
Willy Tarreau18059042019-01-31 19:12:48 +01004703
Willy Tarreau90c32322017-11-24 08:00:30 +01004704 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004705 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004706 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004707
Willy Tarreau00dd0782018-03-01 16:31:34 +01004708 h2s_close(h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004709 }
4710
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004711 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004712 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau7838a792019-08-12 18:42:03 +02004713
4714 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
4715
Willy Tarreau88bdba32019-05-13 18:17:53 +02004716 done:
4717 h2s->flags &= ~H2_SF_WANT_SHUTW;
4718 return;
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004719
4720 add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004721 /* Let the handler know we want to shutw, and add ourselves to the
4722 * most relevant list if not yet done. h2_deferred_shut() will be
4723 * automatically called via the shut_tl tasklet when there's room
4724 * again.
4725 */
4726 h2s->flags |= H2_SF_WANT_SHUTW;
Willy Tarreau2b718102021-04-21 07:32:39 +02004727 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004728 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004729 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004730 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004731 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004732 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004733 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004734 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004735}
4736
Willy Tarreau5723f292020-01-10 15:16:57 +01004737/* This is the tasklet referenced in h2s->shut_tl, it is used for
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004738 * deferred shutdowns when the h2_detach() was done but the mux buffer was full
4739 * and prevented the last frame from being emitted.
4740 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004741struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004742{
4743 struct h2s *h2s = ctx;
Willy Tarreau88bdba32019-05-13 18:17:53 +02004744 struct h2c *h2c = h2s->h2c;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004745
Willy Tarreau7838a792019-08-12 18:42:03 +02004746 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4747
Willy Tarreau5723f292020-01-10 15:16:57 +01004748 if (h2s->flags & H2_SF_NOTIFIED) {
4749 /* some data processing remains to be done first */
4750 goto end;
4751 }
4752
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004753 if (h2s->flags & H2_SF_WANT_SHUTW)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004754 h2_do_shutw(h2s);
4755
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004756 if (h2s->flags & H2_SF_WANT_SHUTR)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004757 h2_do_shutr(h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004758
Willy Tarreau88bdba32019-05-13 18:17:53 +02004759 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Olivier Houchardafc7cb82019-03-25 14:08:01 +01004760 /* We're done trying to send, remove ourself from the send_list */
Willy Tarreaude4a5382023-10-17 08:25:19 +02004761 h2_remove_from_list(h2s);
Olivier Houchard7a977432019-03-21 15:47:13 +01004762
Willy Tarreau7be4ee02022-05-18 07:31:41 +02004763 if (!h2s_sc(h2s)) {
Willy Tarreau88bdba32019-05-13 18:17:53 +02004764 h2s_destroy(h2s);
Willy Tarreau74163142021-03-13 11:30:19 +01004765 if (h2c_is_dead(h2c)) {
Willy Tarreau88bdba32019-05-13 18:17:53 +02004766 h2_release(h2c);
Willy Tarreau74163142021-03-13 11:30:19 +01004767 t = NULL;
4768 }
Willy Tarreau88bdba32019-05-13 18:17:53 +02004769 }
Olivier Houchard7a977432019-03-21 15:47:13 +01004770 }
Willy Tarreau5723f292020-01-10 15:16:57 +01004771 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02004772 TRACE_LEAVE(H2_EV_STRM_SHUT);
Willy Tarreau74163142021-03-13 11:30:19 +01004773 return t;
Willy Tarreau62f52692017-10-08 23:01:42 +02004774}
4775
Willy Tarreau4596fe22022-05-17 19:07:51 +02004776/* shutr() called by the stream connector (mux_ops.shutr) */
Willy Tarreau36c22322022-05-27 10:41:24 +02004777static void h2_shutr(struct stconn *sc, enum co_shr_mode mode)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004778{
Willy Tarreau36c22322022-05-27 10:41:24 +02004779 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004780
Willy Tarreau7838a792019-08-12 18:42:03 +02004781 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004782 if (mode)
4783 h2_do_shutr(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004784 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004785}
4786
Willy Tarreau4596fe22022-05-17 19:07:51 +02004787/* shutw() called by the stream connector (mux_ops.shutw) */
Willy Tarreau36c22322022-05-27 10:41:24 +02004788static void h2_shutw(struct stconn *sc, enum co_shw_mode mode)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004789{
Willy Tarreau36c22322022-05-27 10:41:24 +02004790 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004791
Willy Tarreau7838a792019-08-12 18:42:03 +02004792 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004793 h2_do_shutw(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004794 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004795}
4796
Christopher Faulet9b79a102019-07-15 11:22:56 +02004797/* Decode the payload of a HEADERS frame and produce the HTX request or response
4798 * depending on the connection's side. Returns a positive value on success, a
4799 * negative value on failure, or 0 if it couldn't proceed. May report connection
4800 * errors in h2c->errcode if the frame is non-decodable and the connection
4801 * unrecoverable. In absence of connection error when a failure is reported, the
4802 * caller must assume a stream error.
Willy Tarreauea18f862018-12-22 20:19:26 +01004803 *
4804 * The function may fold CONTINUATION frames into the initial HEADERS frame
4805 * by removing padding and next frame header, then moving the CONTINUATION
4806 * frame's payload and adjusting h2c->dfl to match the new aggregated frame,
4807 * leaving a hole between the main frame and the beginning of the next one.
4808 * The possibly remaining incomplete or next frame at the end may be moved
4809 * if the aggregated frame is not deleted, in order to fill the hole. Wrapped
4810 * HEADERS frames are unwrapped into a temporary buffer before decoding.
4811 *
4812 * A buffer at the beginning of processing may look like this :
4813 *
4814 * ,---.---------.-----.--------------.--------------.------.---.
4815 * |///| HEADERS | PAD | CONTINUATION | CONTINUATION | DATA |///|
4816 * `---^---------^-----^--------------^--------------^------^---'
4817 * | | <-----> | |
4818 * area | dpl | wrap
4819 * |<--------------> |
4820 * | dfl |
4821 * |<-------------------------------------------------->|
4822 * head data
4823 *
4824 * Padding is automatically overwritten when folding, participating to the
4825 * hole size after dfl :
4826 *
4827 * ,---.------------------------.-----.--------------.------.---.
4828 * |///| HEADERS : CONTINUATION |/////| CONTINUATION | DATA |///|
4829 * `---^------------------------^-----^--------------^------^---'
4830 * | | <-----> | |
4831 * area | hole | wrap
4832 * |<-----------------------> |
4833 * | dfl |
4834 * |<-------------------------------------------------->|
4835 * head data
4836 *
4837 * Please note that the HEADERS frame is always deprived from its PADLEN byte
4838 * however it may start with the 5 stream-dep+weight bytes in case of PRIORITY
4839 * bit.
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004840 *
4841 * The <flags> field must point to either the stream's flags or to a copy of it
4842 * so that the function can update the following flags :
4843 * - H2_SF_DATA_CLEN when content-length is seen
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004844 * - H2_SF_HEADERS_RCVD once the frame is successfully decoded
Willy Tarreau88d138e2019-01-02 19:38:14 +01004845 *
4846 * The H2_SF_HEADERS_RCVD flag is also looked at in the <flags> field prior to
4847 * decoding, in order to detect if we're dealing with a headers or a trailers
4848 * block (the trailers block appears after H2_SF_HEADERS_RCVD was seen).
Willy Tarreau13278b42017-10-13 19:23:14 +02004849 */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01004850static int h2c_dec_hdrs(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol)
Willy Tarreau13278b42017-10-13 19:23:14 +02004851{
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004852 const uint8_t *hdrs = (uint8_t *)b_head(&h2c->dbuf);
Willy Tarreau83061a82018-07-13 11:56:34 +02004853 struct buffer *tmp = get_trash_chunk();
Christopher Faulete4ab11b2019-06-11 15:05:37 +02004854 struct http_hdr list[global.tune.max_http_hdr * 2];
Willy Tarreau83061a82018-07-13 11:56:34 +02004855 struct buffer *copy = NULL;
Willy Tarreau174b06a2018-04-25 18:13:58 +02004856 unsigned int msgf;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004857 struct htx *htx = NULL;
Willy Tarreauea18f862018-12-22 20:19:26 +01004858 int flen; // header frame len
4859 int hole = 0;
Willy Tarreau86277d42019-01-02 15:36:11 +01004860 int ret = 0;
4861 int outlen;
Willy Tarreau13278b42017-10-13 19:23:14 +02004862 int wrap;
Willy Tarreau13278b42017-10-13 19:23:14 +02004863
Willy Tarreau7838a792019-08-12 18:42:03 +02004864 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
4865
Willy Tarreauea18f862018-12-22 20:19:26 +01004866next_frame:
4867 if (b_data(&h2c->dbuf) - hole < h2c->dfl)
4868 goto leave; // incomplete input frame
4869
4870 /* No END_HEADERS means there's one or more CONTINUATION frames. In
4871 * this case, we'll try to paste it immediately after the initial
4872 * HEADERS frame payload and kill any possible padding. The initial
4873 * frame's length will be increased to represent the concatenation
4874 * of the two frames. The next frame is read from position <tlen>
4875 * and written at position <flen> (minus padding if some is present).
4876 */
4877 if (unlikely(!(h2c->dff & H2_F_HEADERS_END_HEADERS))) {
4878 struct h2_fh hdr;
4879 int clen; // CONTINUATION frame's payload length
4880
Willy Tarreau7838a792019-08-12 18:42:03 +02004881 TRACE_STATE("EH missing, expecting continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004882 if (!h2_peek_frame_hdr(&h2c->dbuf, h2c->dfl + hole, &hdr)) {
4883 /* no more data, the buffer may be full, either due to
4884 * too large a frame or because of too large a hole that
4885 * we're going to compact at the end.
4886 */
4887 goto leave;
4888 }
4889
4890 if (hdr.ft != H2_FT_CONTINUATION) {
4891 /* RFC7540#6.10: frame of unexpected type */
Willy Tarreau7838a792019-08-12 18:42:03 +02004892 TRACE_STATE("not continuation!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004893 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004894 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004895 goto fail;
4896 }
4897
4898 if (hdr.sid != h2c->dsi) {
4899 /* RFC7540#6.10: frame of different stream */
Willy Tarreau7838a792019-08-12 18:42:03 +02004900 TRACE_STATE("different stream ID!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004901 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004902 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004903 goto fail;
4904 }
4905
4906 if ((unsigned)hdr.len > (unsigned)global.tune.bufsize) {
4907 /* RFC7540#4.2: invalid frame length */
Willy Tarreau7838a792019-08-12 18:42:03 +02004908 TRACE_STATE("too large frame!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004909 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4910 goto fail;
4911 }
4912
4913 /* detect when we must stop aggragating frames */
4914 h2c->dff |= hdr.ff & H2_F_HEADERS_END_HEADERS;
4915
4916 /* Take as much as we can of the CONTINUATION frame's payload */
4917 clen = b_data(&h2c->dbuf) - (h2c->dfl + hole + 9);
4918 if (clen > hdr.len)
4919 clen = hdr.len;
4920
4921 /* Move the frame's payload over the padding, hole and frame
4922 * header. At least one of hole or dpl is null (see diagrams
4923 * above). The hole moves after the new aggragated frame.
4924 */
4925 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole + 9), clen, -(h2c->dpl + hole + 9));
Christopher Fauletcb1847c2021-04-21 11:11:21 +02004926 h2c->dfl += hdr.len - h2c->dpl;
Willy Tarreauea18f862018-12-22 20:19:26 +01004927 hole += h2c->dpl + 9;
4928 h2c->dpl = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02004929 TRACE_STATE("waiting for next continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_CONT|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004930 goto next_frame;
4931 }
4932
4933 flen = h2c->dfl - h2c->dpl;
Willy Tarreau68472622017-12-11 18:36:37 +01004934
Willy Tarreau13278b42017-10-13 19:23:14 +02004935 /* if the input buffer wraps, take a temporary copy of it (rare) */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004936 wrap = b_wrap(&h2c->dbuf) - b_head(&h2c->dbuf);
Willy Tarreau13278b42017-10-13 19:23:14 +02004937 if (wrap < h2c->dfl) {
Willy Tarreau68dd9852017-07-03 14:44:26 +02004938 copy = alloc_trash_chunk();
4939 if (!copy) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004940 TRACE_DEVEL("failed to allocate temporary buffer", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR, h2c->conn);
Willy Tarreau68dd9852017-07-03 14:44:26 +02004941 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
4942 goto fail;
4943 }
Willy Tarreau843b7cb2018-07-13 10:54:26 +02004944 memcpy(copy->area, b_head(&h2c->dbuf), wrap);
4945 memcpy(copy->area + wrap, b_orig(&h2c->dbuf), h2c->dfl - wrap);
4946 hdrs = (uint8_t *) copy->area;
Willy Tarreau13278b42017-10-13 19:23:14 +02004947 }
4948
Willy Tarreau13278b42017-10-13 19:23:14 +02004949 /* Skip StreamDep and weight for now (we don't support PRIORITY) */
4950 if (h2c->dff & H2_F_HEADERS_PRIORITY) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004951 if (read_n32(hdrs) == h2c->dsi) {
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004952 /* RFC7540#5.3.1 : stream dep may not depend on itself */
Willy Tarreau7838a792019-08-12 18:42:03 +02004953 TRACE_STATE("invalid stream dependency!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004954 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004955 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreaua0d11b62018-09-05 18:30:05 +02004956 goto fail;
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004957 }
4958
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004959 if (flen < 5) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004960 TRACE_STATE("frame too short for priority!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004961 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4962 goto fail;
4963 }
4964
Willy Tarreau13278b42017-10-13 19:23:14 +02004965 hdrs += 5; // stream dep = 4, weight = 1
4966 flen -= 5;
4967 }
4968
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004969 if (!h2_get_buf(h2c, rxbuf)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004970 TRACE_STATE("waiting for h2c rxbuf allocation", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau937f7602018-02-26 15:22:17 +01004971 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau86277d42019-01-02 15:36:11 +01004972 goto leave;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004973 }
Willy Tarreau13278b42017-10-13 19:23:14 +02004974
Willy Tarreau937f7602018-02-26 15:22:17 +01004975 /* we can't retry a failed decompression operation so we must be very
4976 * careful not to take any risks. In practice the output buffer is
4977 * always empty except maybe for trailers, in which case we simply have
4978 * to wait for the upper layer to finish consuming what is available.
4979 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004980 htx = htx_from_buf(rxbuf);
4981 if (!htx_is_empty(htx)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004982 TRACE_STATE("waiting for room in h2c rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004983 h2c->flags |= H2_CF_DEM_SFULL;
4984 goto leave;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004985 }
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004986
Willy Tarreau25919232019-01-03 14:48:18 +01004987 /* past this point we cannot roll back in case of error */
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004988 outlen = hpack_decode_frame(h2c->ddht, hdrs, flen, list,
4989 sizeof(list)/sizeof(list[0]), tmp);
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01004990
4991 if (outlen > 0 &&
4992 (TRACE_SOURCE)->verbosity >= H2_VERB_ADVANCED &&
4993 TRACE_ENABLED(TRACE_LEVEL_USER, H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, 0, 0, 0)) {
4994 struct ist n;
4995 int i;
4996
4997 for (i = 0; list[i].n.len; i++) {
4998 n = list[i].n;
4999
5000 if (!isttest(n)) {
5001 /* this is in fact a pseudo header whose number is in n.len */
5002 n = h2_phdr_to_ist(n.len);
5003 }
5004
5005 h2_trace_header(n, list[i].v, H2_EV_RX_FRAME|H2_EV_RX_HDR,
5006 ist(TRC_LOC), __FUNCTION__, h2c, NULL);
5007 }
5008 }
5009
Willy Tarreau59a10fb2017-11-21 20:03:02 +01005010 if (outlen < 0) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005011 TRACE_STATE("failed to decompress HPACK", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau59a10fb2017-11-21 20:03:02 +01005012 h2c_error(h2c, H2_ERR_COMPRESSION_ERROR);
5013 goto fail;
5014 }
5015
Willy Tarreau25919232019-01-03 14:48:18 +01005016 /* The PACK decompressor was updated, let's update the input buffer and
5017 * the parser's state to commit these changes and allow us to later
5018 * fail solely on the stream if needed.
5019 */
5020 b_del(&h2c->dbuf, h2c->dfl + hole);
5021 h2c->dfl = hole = 0;
5022 h2c->st0 = H2_CS_FRAME_H;
5023
Willy Tarreau59a10fb2017-11-21 20:03:02 +01005024 /* OK now we have our header list in <list> */
Willy Tarreau880f5802019-01-03 08:10:14 +01005025 msgf = (h2c->dff & H2_F_HEADERS_END_STREAM) ? 0 : H2_MSGF_BODY;
Christopher Fauletd0db4232021-01-22 11:46:30 +01005026 msgf |= (*flags & H2_SF_BODY_TUNNEL) ? H2_MSGF_BODY_TUNNEL: 0;
Amaury Denoyelle74162742020-12-11 17:53:05 +01005027 /* If an Extended CONNECT has been sent on this stream, set message flag
Ilya Shipitsinacf84592021-02-06 22:29:08 +05005028 * to convert 200 response to 101 htx response */
Amaury Denoyelle74162742020-12-11 17:53:05 +01005029 msgf |= (*flags & H2_SF_EXT_CONNECT_SENT) ? H2_MSGF_EXT_CONNECT: 0;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01005030
Willy Tarreau88d138e2019-01-02 19:38:14 +01005031 if (*flags & H2_SF_HEADERS_RCVD)
5032 goto trailers;
5033
5034 /* This is the first HEADERS frame so it's a headers block */
Christopher Faulet9b79a102019-07-15 11:22:56 +02005035 if (h2c->flags & H2_CF_IS_BACK)
Amaury Denoyelle74162742020-12-11 17:53:05 +01005036 outlen = h2_make_htx_response(list, htx, &msgf, body_len, upgrade_protocol);
Christopher Faulet9b79a102019-07-15 11:22:56 +02005037 else
Willy Tarreaub6be1a42023-08-08 15:38:28 +02005038 outlen = h2_make_htx_request(list, htx, &msgf, body_len,
5039 !!(((const struct session *)h2c->conn->owner)->fe->options2 & PR_O2_REQBUG_OK));
Willy Tarreau59a10fb2017-11-21 20:03:02 +01005040
Christopher Faulet3d875582021-04-26 17:46:13 +02005041 if (outlen < 0 || htx_free_space(htx) < global.tune.maxrewrite) {
Willy Tarreau25919232019-01-03 14:48:18 +01005042 /* too large headers? this is a stream error only */
Willy Tarreau577fc272023-08-08 15:27:02 +02005043 TRACE_STATE("message headers too large or invalid", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR|H2_EV_PROTO_ERR, h2c->conn);
Christopher Faulet3d875582021-04-26 17:46:13 +02005044 htx->flags |= HTX_FL_PARSING_ERROR;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01005045 goto fail;
5046 }
Willy Tarreau13278b42017-10-13 19:23:14 +02005047
Willy Tarreau174b06a2018-04-25 18:13:58 +02005048 if (msgf & H2_MSGF_BODY) {
5049 /* a payload is present */
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01005050 if (msgf & H2_MSGF_BODY_CL) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01005051 *flags |= H2_SF_DATA_CLEN;
Christopher Faulet9b79a102019-07-15 11:22:56 +02005052 htx->extra = *body_len;
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01005053 }
Willy Tarreau174b06a2018-04-25 18:13:58 +02005054 }
Christopher Faulet7d247f02020-12-02 14:26:36 +01005055 if (msgf & H2_MSGF_BODYLESS_RSP)
5056 *flags |= H2_SF_BODYLESS_RESP;
Willy Tarreau174b06a2018-04-25 18:13:58 +02005057
Christopher Fauletd0db4232021-01-22 11:46:30 +01005058 if (msgf & H2_MSGF_BODY_TUNNEL)
5059 *flags |= H2_SF_BODY_TUNNEL;
5060 else {
5061 /* Abort the tunnel attempt, if any */
5062 if (*flags & H2_SF_BODY_TUNNEL)
5063 *flags |= H2_SF_TUNNEL_ABRT;
5064 *flags &= ~H2_SF_BODY_TUNNEL;
5065 }
5066
Willy Tarreau88d138e2019-01-02 19:38:14 +01005067 done:
Christopher Faulet0b465482019-02-19 15:14:23 +01005068 /* indicate that a HEADERS frame was received for this stream, except
5069 * for 1xx responses. For 1xx responses, another HEADERS frame is
5070 * expected.
5071 */
5072 if (!(msgf & H2_MSGF_RSP_1XX))
5073 *flags |= H2_SF_HEADERS_RCVD;
Willy Tarreau6cc85a52019-01-02 15:49:20 +01005074
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005075 if (h2c->dff & H2_F_HEADERS_END_STREAM) {
Christopher Faulet827a6292022-12-22 09:47:01 +01005076 if (msgf & H2_MSGF_RSP_1XX) {
5077 /* RFC9113#8.1 : HEADERS frame with the ES flag set that carries an informational status code is malformed */
5078 TRACE_STATE("invalid interim response with ES flag!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
5079 goto fail;
5080 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005081 /* no more data are expected for this message */
5082 htx->flags |= HTX_FL_EOM;
Christopher Faulet1a60a662023-05-24 11:02:50 +02005083 *flags |= H2_SF_ES_RCVD;
Willy Tarreau88d138e2019-01-02 19:38:14 +01005084 }
Willy Tarreau937f7602018-02-26 15:22:17 +01005085
Amaury Denoyelleefe22762020-12-11 17:53:08 +01005086 if (msgf & H2_MSGF_EXT_CONNECT)
5087 *flags |= H2_SF_EXT_CONNECT_RCVD;
5088
Willy Tarreau86277d42019-01-02 15:36:11 +01005089 /* success */
5090 ret = 1;
5091
Willy Tarreau68dd9852017-07-03 14:44:26 +02005092 leave:
Willy Tarreau86277d42019-01-02 15:36:11 +01005093 /* If there is a hole left and it's not at the end, we are forced to
Willy Tarreauea18f862018-12-22 20:19:26 +01005094 * move the remaining data over it.
5095 */
5096 if (hole) {
5097 if (b_data(&h2c->dbuf) > h2c->dfl + hole)
5098 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole),
5099 b_data(&h2c->dbuf) - (h2c->dfl + hole), -hole);
5100 b_sub(&h2c->dbuf, hole);
5101 }
5102
Christopher Faulete3ec2172023-12-13 15:36:52 +01005103 if (b_full(&h2c->dbuf) && h2c->dfl && (!htx || htx_is_empty(htx))) {
Willy Tarreauea18f862018-12-22 20:19:26 +01005104 /* too large frames */
5105 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau86277d42019-01-02 15:36:11 +01005106 ret = -1;
Willy Tarreauea18f862018-12-22 20:19:26 +01005107 }
5108
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01005109 if (htx)
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01005110 htx_to_buf(htx, rxbuf);
Willy Tarreau68dd9852017-07-03 14:44:26 +02005111 free_trash_chunk(copy);
Willy Tarreau7838a792019-08-12 18:42:03 +02005112 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau86277d42019-01-02 15:36:11 +01005113 return ret;
5114
Willy Tarreau68dd9852017-07-03 14:44:26 +02005115 fail:
Willy Tarreau86277d42019-01-02 15:36:11 +01005116 ret = -1;
Willy Tarreau68dd9852017-07-03 14:44:26 +02005117 goto leave;
Willy Tarreau88d138e2019-01-02 19:38:14 +01005118
5119 trailers:
5120 /* This is the last HEADERS frame hence a trailer */
Willy Tarreau88d138e2019-01-02 19:38:14 +01005121 if (!(h2c->dff & H2_F_HEADERS_END_STREAM)) {
5122 /* It's a trailer but it's missing ES flag */
Willy Tarreau7838a792019-08-12 18:42:03 +02005123 TRACE_STATE("missing EH on trailers frame", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau88d138e2019-01-02 19:38:14 +01005124 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02005125 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau88d138e2019-01-02 19:38:14 +01005126 goto fail;
5127 }
5128
Christopher Faulet9b79a102019-07-15 11:22:56 +02005129 /* Trailers terminate a DATA sequence */
Willy Tarreau7838a792019-08-12 18:42:03 +02005130 if (h2_make_htx_trailers(list, htx) <= 0) {
5131 TRACE_STATE("failed to append HTX trailers into rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02005132 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02005133 }
Christopher Faulet1a60a662023-05-24 11:02:50 +02005134 *flags |= H2_SF_ES_RCVD;
Willy Tarreau88d138e2019-01-02 19:38:14 +01005135 goto done;
Willy Tarreau13278b42017-10-13 19:23:14 +02005136}
5137
Christopher Faulet9b79a102019-07-15 11:22:56 +02005138/* Transfer the payload of a DATA frame to the HTTP/1 side. The HTTP/2 frame
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005139 * parser state is automatically updated. Returns > 0 if it could completely
5140 * send the current frame, 0 if it couldn't complete, in which case
Willy Tarreaub605c422022-05-17 17:04:55 +02005141 * SE_FL_RCV_MORE must be checked to know if some data remain pending (an empty
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005142 * DATA frame can return 0 as a valid result). Stream errors are reported in
5143 * h2s->errcode and connection errors in h2c->errcode. The caller must already
5144 * have checked the frame header and ensured that the frame was complete or the
5145 * buffer full. It changes the frame state to FRAME_A once done.
Willy Tarreau454f9052017-10-26 19:40:35 +02005146 */
Willy Tarreau454b57b2018-02-26 15:50:05 +01005147static int h2_frt_transfer_data(struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02005148{
5149 struct h2c *h2c = h2s->h2c;
Christopher Faulet9b79a102019-07-15 11:22:56 +02005150 int block;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005151 unsigned int flen = 0;
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005152 struct htx *htx = NULL;
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02005153 struct buffer *scbuf;
Christopher Faulet9b79a102019-07-15 11:22:56 +02005154 unsigned int sent;
Willy Tarreau454f9052017-10-26 19:40:35 +02005155
Willy Tarreau7838a792019-08-12 18:42:03 +02005156 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
5157
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005158 h2c->flags &= ~H2_CF_DEM_SFULL;
Willy Tarreau454f9052017-10-26 19:40:35 +02005159
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02005160 scbuf = h2_get_buf(h2c, &h2s->rxbuf);
5161 if (!scbuf) {
Willy Tarreaud755ea62018-02-26 15:44:54 +01005162 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02005163 TRACE_STATE("waiting for an h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005164 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005165 }
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02005166 htx = htx_from_buf(scbuf);
Willy Tarreaud755ea62018-02-26 15:44:54 +01005167
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005168try_again:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005169 flen = h2c->dfl - h2c->dpl;
5170 if (!flen)
Willy Tarreau4a28da12018-01-04 14:41:00 +01005171 goto end_transfer;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005172
Willy Tarreauc9fa0482018-07-10 17:43:27 +02005173 if (flen > b_data(&h2c->dbuf)) {
5174 flen = b_data(&h2c->dbuf);
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005175 if (!flen)
Willy Tarreau454b57b2018-02-26 15:50:05 +01005176 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005177 }
5178
Christopher Faulet9b79a102019-07-15 11:22:56 +02005179 block = htx_free_data_space(htx);
5180 if (!block) {
5181 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02005182 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Christopher Faulet9b79a102019-07-15 11:22:56 +02005183 goto fail;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005184 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02005185 if (flen > block)
5186 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005187
Christopher Faulet9b79a102019-07-15 11:22:56 +02005188 /* here, flen is the max we can copy into the output buffer */
5189 block = b_contig_data(&h2c->dbuf, 0);
5190 if (flen > block)
5191 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005192
Christopher Faulet9b79a102019-07-15 11:22:56 +02005193 sent = htx_add_data(htx, ist2(b_head(&h2c->dbuf), flen));
Willy Tarreau022e5e52020-09-10 09:33:15 +02005194 TRACE_DATA("move some data to h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s, 0, (void *)(long)sent);
Willy Tarreau454f9052017-10-26 19:40:35 +02005195
Christopher Faulet9b79a102019-07-15 11:22:56 +02005196 b_del(&h2c->dbuf, sent);
5197 h2c->dfl -= sent;
5198 h2c->rcvd_c += sent;
5199 h2c->rcvd_s += sent; // warning, this can also affect the closed streams!
Willy Tarreau454f9052017-10-26 19:40:35 +02005200
Christopher Faulet9b79a102019-07-15 11:22:56 +02005201 if (h2s->flags & H2_SF_DATA_CLEN) {
5202 h2s->body_len -= sent;
5203 htx->extra = h2s->body_len;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005204 }
5205
Christopher Faulet9b79a102019-07-15 11:22:56 +02005206 if (sent < flen) {
Willy Tarreaud755ea62018-02-26 15:44:54 +01005207 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02005208 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005209 goto fail;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005210 }
5211
Christopher Faulet9b79a102019-07-15 11:22:56 +02005212 goto try_again;
5213
Willy Tarreau4a28da12018-01-04 14:41:00 +01005214 end_transfer:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005215 /* here we're done with the frame, all the payload (except padding) was
5216 * transferred.
5217 */
Willy Tarreaueba10f22018-04-25 20:44:22 +02005218
Christopher Faulet5be651d2021-01-22 15:28:03 +01005219 if (!(h2s->flags & H2_SF_BODY_TUNNEL) && (h2c->dff & H2_F_DATA_END_STREAM)) {
5220 /* no more data are expected for this message. This add the EOM
5221 * flag but only on the response path or if no tunnel attempt
5222 * was aborted. Otherwise (request path + tunnel abrted), the
5223 * EOM was already reported.
5224 */
Christopher Faulet33724322021-02-10 09:04:59 +01005225 if ((h2c->flags & H2_CF_IS_BACK) || !(h2s->flags & H2_SF_TUNNEL_ABRT)) {
Amaury Denoyelle25cf19d2023-05-11 16:50:04 +02005226 /* htx may be empty if receiving an empty DATA frame. */
5227 if (!htx_set_eom(htx))
5228 goto fail;
Christopher Faulet33724322021-02-10 09:04:59 +01005229 }
Willy Tarreaueba10f22018-04-25 20:44:22 +02005230 }
5231
Willy Tarreaud1023bb2018-03-22 16:53:12 +01005232 h2c->rcvd_c += h2c->dpl;
5233 h2c->rcvd_s += h2c->dpl;
5234 h2c->dpl = 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02005235 h2c->st0 = H2_CS_FRAME_A; // send the corresponding window update
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02005236 htx_to_buf(htx, scbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02005237 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005238 return 1;
Willy Tarreau454b57b2018-02-26 15:50:05 +01005239 fail:
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01005240 if (htx)
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02005241 htx_to_buf(htx, scbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02005242 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005243 return 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02005244}
5245
Willy Tarreau115e83b2018-12-01 19:17:53 +01005246/* Try to send a HEADERS frame matching HTX response present in HTX message
5247 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
5248 * must check the stream's status to detect any error which might have happened
5249 * subsequently to a successful send. The htx blocks are automatically removed
5250 * from the message. The htx message is assumed to be valid since produced from
5251 * the internal code, hence it contains a start line, an optional series of
5252 * header blocks and an end of header, otherwise an invalid frame could be
5253 * emitted and the resulting htx message could be left in an inconsistent state.
5254 */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01005255static size_t h2s_snd_fhdrs(struct h2s *h2s, struct htx *htx)
Willy Tarreau115e83b2018-12-01 19:17:53 +01005256{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02005257 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau115e83b2018-12-01 19:17:53 +01005258 struct h2c *h2c = h2s->h2c;
5259 struct htx_blk *blk;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005260 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005261 struct buffer *mbuf;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005262 struct htx_sl *sl;
5263 enum htx_blk_type type;
5264 int es_now = 0;
5265 int ret = 0;
5266 int hdr;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005267
Willy Tarreau7838a792019-08-12 18:42:03 +02005268 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5269
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005270 /* get the start line (we do have one) and the rest of the headers,
5271 * that we dump starting at header 0 */
5272 sl = NULL;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005273 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005274 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau115e83b2018-12-01 19:17:53 +01005275 type = htx_get_blk_type(blk);
5276
5277 if (type == HTX_BLK_UNUSED)
5278 continue;
5279
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005280 if (type == HTX_BLK_EOH)
Willy Tarreau115e83b2018-12-01 19:17:53 +01005281 break;
5282
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005283 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005284 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005285 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5286 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5287 goto fail;
5288 }
5289
5290 list[hdr].n = htx_get_blk_name(htx, blk);
5291 list[hdr].v = htx_get_blk_value(htx, blk);
5292 hdr++;
5293 }
5294 else if (type == HTX_BLK_RES_SL) {
Christopher Faulet56498132021-01-29 11:39:43 +01005295 BUG_ON(sl); /* Only one start-line expected */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005296 sl = htx_get_blk_ptr(htx, blk);
5297 h2s->status = sl->info.res.status;
Christopher Faulet7d247f02020-12-02 14:26:36 +01005298 if (h2s->status == 204 || h2s->status == 304)
5299 h2s->flags |= H2_SF_BODYLESS_RESP;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005300 if (h2s->status < 100 || h2s->status > 999) {
5301 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5302 goto fail;
5303 }
5304 else if (h2s->status == 101) {
Amaury Denoyelleefe22762020-12-11 17:53:08 +01005305 if (unlikely(h2s->flags & H2_SF_EXT_CONNECT_RCVD)) {
5306 /* If an Extended CONNECT has been received, we need to convert 101 to 200 */
5307 h2s->status = 200;
5308 h2s->flags &= ~H2_SF_EXT_CONNECT_RCVD;
5309 }
5310 else {
5311 /* Otherwise, 101 responses are not supported in H2, so return a error (RFC7540#8.1.1) */
5312 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5313 goto fail;
5314 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005315 }
5316 else if ((h2s->flags & H2_SF_BODY_TUNNEL) && h2s->status >= 300) {
5317 /* Abort the tunnel attempt */
5318 h2s->flags &= ~H2_SF_BODY_TUNNEL;
5319 h2s->flags |= H2_SF_TUNNEL_ABRT;
5320 }
5321 }
5322 else {
5323 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005324 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02005325 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005326 }
5327
Christopher Faulet56498132021-01-29 11:39:43 +01005328 /* The start-line me be defined */
5329 BUG_ON(!sl);
5330
Willy Tarreau115e83b2018-12-01 19:17:53 +01005331 /* marker for end of headers */
5332 list[hdr].n = ist("");
5333
Willy Tarreau9c218e72019-05-26 10:08:28 +02005334 mbuf = br_tail(h2c->mbuf);
5335 retry:
5336 if (!h2_get_buf(h2c, mbuf)) {
5337 h2c->flags |= H2_CF_MUX_MALLOC;
5338 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005339 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005340 return 0;
5341 }
5342
Willy Tarreau115e83b2018-12-01 19:17:53 +01005343 chunk_reset(&outbuf);
5344
5345 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005346 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5347 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005348 break;
5349 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005350 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau115e83b2018-12-01 19:17:53 +01005351 }
5352
5353 if (outbuf.size < 9)
5354 goto full;
5355
5356 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5357 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5358 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5359 outbuf.data = 9;
5360
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005361 if ((h2c->flags & (H2_CF_SHTS_UPDATED|H2_CF_DTSU_EMITTED)) == H2_CF_SHTS_UPDATED) {
5362 /* SETTINGS_HEADER_TABLE_SIZE changed, we must send an HPACK
5363 * dynamic table size update so that some clients are not
5364 * confused. In practice we only need to send the DTSU when the
5365 * advertised size is lower than the current one, and since we
5366 * don't use it and don't care about the default 4096 bytes,
5367 * we only ack it with a zero size thus we at most have to deal
5368 * with this once. See RFC7541#4.2 and #6.3 for the spec, and
5369 * below for the whole context and interoperability risks:
5370 * https://lists.w3.org/Archives/Public/ietf-http-wg/2021OctDec/0235.html
5371 */
5372 if (b_room(&outbuf) < 1)
5373 goto full;
5374 outbuf.area[outbuf.data++] = 0x20; // HPACK DTSU 0 bytes
5375
5376 /* let's not update the flags now but only once the buffer is
5377 * really committed.
5378 */
5379 }
5380
Willy Tarreau115e83b2018-12-01 19:17:53 +01005381 /* encode status, which necessarily is the first one */
Willy Tarreauaafdf582018-12-10 18:06:40 +01005382 if (!hpack_encode_int_status(&outbuf, h2s->status)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005383 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005384 goto realign_again;
5385 goto full;
5386 }
5387
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005388 if ((TRACE_SOURCE)->verbosity >= H2_VERB_ADVANCED) {
5389 char sts[4];
5390
5391 h2_trace_header(ist(":status"), ist(ultoa_r(h2s->status, sts, sizeof(sts))),
5392 H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__,
5393 h2c, h2s);
5394 }
5395
Willy Tarreau115e83b2018-12-01 19:17:53 +01005396 /* encode all headers, stop at empty name */
5397 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
5398 /* these ones do not exist in H2 and must be dropped. */
5399 if (isteq(list[hdr].n, ist("connection")) ||
5400 isteq(list[hdr].n, ist("proxy-connection")) ||
5401 isteq(list[hdr].n, ist("keep-alive")) ||
5402 isteq(list[hdr].n, ist("upgrade")) ||
5403 isteq(list[hdr].n, ist("transfer-encoding")))
5404 continue;
5405
Christopher Faulet86d144c2019-08-14 16:32:25 +02005406 /* Skip all pseudo-headers */
5407 if (*(list[hdr].n.ptr) == ':')
5408 continue;
5409
Willy Tarreau115e83b2018-12-01 19:17:53 +01005410 if (isteq(list[hdr].n, ist("")))
5411 break; // end
5412
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005413 if (!h2_encode_header(&outbuf, list[hdr].n, list[hdr].v, H2_EV_TX_FRAME|H2_EV_TX_HDR,
5414 ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Willy Tarreau115e83b2018-12-01 19:17:53 +01005415 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005416 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005417 goto realign_again;
5418 goto full;
5419 }
5420 }
5421
Willy Tarreaucb985a42019-10-07 16:56:34 +02005422 /* update the frame's size */
5423 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5424
5425 if (outbuf.data > h2c->mfs + 9) {
5426 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5427 /* output full */
5428 if (b_space_wraps(mbuf))
5429 goto realign_again;
5430 goto full;
5431 }
5432 }
5433
Willy Tarreau3a537072021-06-17 08:40:04 +02005434 TRACE_USER("sent H2 response ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5435
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005436 /* remove all header blocks including the EOH and compute the
5437 * corresponding size.
Willy Tarreau115e83b2018-12-01 19:17:53 +01005438 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005439 ret = 0;
5440 blk = htx_get_head_blk(htx);
5441 while (blk) {
5442 type = htx_get_blk_type(blk);
5443 ret += htx_get_blksz(blk);
5444 blk = htx_remove_blk(htx, blk);
5445 /* The removed block is the EOH */
5446 if (type == HTX_BLK_EOH)
5447 break;
Christopher Faulet5be651d2021-01-22 15:28:03 +01005448 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005449
Willy Tarreau95acc8b2022-05-27 16:14:10 +02005450 if (!h2s_sc(h2s) || se_fl_test(h2s->sd, SE_FL_SHW)) {
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005451 /* Response already closed: add END_STREAM */
5452 es_now = 1;
5453 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005454 else if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx) && h2s->status >= 200) {
5455 /* EOM+empty: we may need to add END_STREAM except for 1xx
Christopher Faulet991febd2020-12-02 15:17:31 +01005456 * responses and tunneled response.
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005457 */
Christopher Faulet991febd2020-12-02 15:17:31 +01005458 if (!(h2s->flags & H2_SF_BODY_TUNNEL) || h2s->status >= 300)
5459 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005460 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005461
Willy Tarreau115e83b2018-12-01 19:17:53 +01005462 if (es_now)
5463 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5464
5465 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005466 b_add(mbuf, outbuf.data);
Willy Tarreau936db562023-10-18 11:39:43 +02005467 h2c->flags |= H2_CF_MBUF_HAS_DATA;
Christopher Faulet0b465482019-02-19 15:14:23 +01005468
5469 /* indicates the HEADERS frame was sent, except for 1xx responses. For
5470 * 1xx responses, another HEADERS frame is expected.
5471 */
Christopher Faulet89899422020-12-07 18:24:43 +01005472 if (h2s->status >= 200)
Christopher Faulet0b465482019-02-19 15:14:23 +01005473 h2s->flags |= H2_SF_HEADERS_SENT;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005474
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005475 if (h2c->flags & H2_CF_SHTS_UPDATED) {
5476 /* was sent above */
5477 h2c->flags |= H2_CF_DTSU_EMITTED;
Willy Tarreauc7d85482022-02-16 14:28:14 +01005478 h2c->flags &= ~H2_CF_SHTS_UPDATED;
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005479 }
5480
Willy Tarreau115e83b2018-12-01 19:17:53 +01005481 if (es_now) {
5482 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02005483 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005484 if (h2s->st == H2_SS_OPEN)
5485 h2s->st = H2_SS_HLOC;
5486 else
5487 h2s_close(h2s);
5488 }
5489
5490 /* OK we could properly deliver the response */
Willy Tarreau115e83b2018-12-01 19:17:53 +01005491 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02005492 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005493 return ret;
5494 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005495 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5496 goto retry;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005497 h2c->flags |= H2_CF_MUX_MFULL;
5498 h2s->flags |= H2_SF_BLK_MROOM;
5499 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005500 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005501 goto end;
5502 fail:
5503 /* unparsable HTX messages, too large ones to be produced in the local
5504 * list etc go here (unrecoverable errors).
5505 */
5506 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5507 ret = 0;
5508 goto end;
5509}
5510
Willy Tarreau80739692018-10-05 11:35:57 +02005511/* Try to send a HEADERS frame matching HTX request present in HTX message
5512 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
5513 * must check the stream's status to detect any error which might have happened
5514 * subsequently to a successful send. The htx blocks are automatically removed
5515 * from the message. The htx message is assumed to be valid since produced from
5516 * the internal code, hence it contains a start line, an optional series of
5517 * header blocks and an end of header, otherwise an invalid frame could be
5518 * emitted and the resulting htx message could be left in an inconsistent state.
5519 */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01005520static size_t h2s_snd_bhdrs(struct h2s *h2s, struct htx *htx)
Willy Tarreau80739692018-10-05 11:35:57 +02005521{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02005522 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau80739692018-10-05 11:35:57 +02005523 struct h2c *h2c = h2s->h2c;
5524 struct htx_blk *blk;
Willy Tarreau80739692018-10-05 11:35:57 +02005525 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005526 struct buffer *mbuf;
Willy Tarreau80739692018-10-05 11:35:57 +02005527 struct htx_sl *sl;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005528 struct ist meth, uri, auth, host = IST_NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005529 enum htx_blk_type type;
5530 int es_now = 0;
5531 int ret = 0;
5532 int hdr;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005533 int extended_connect = 0;
Willy Tarreau80739692018-10-05 11:35:57 +02005534
Willy Tarreau7838a792019-08-12 18:42:03 +02005535 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5536
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005537 /* get the start line (we do have one) and the rest of the headers,
5538 * that we dump starting at header 0 */
5539 sl = NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005540 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005541 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005542 type = htx_get_blk_type(blk);
5543
5544 if (type == HTX_BLK_UNUSED)
5545 continue;
5546
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005547 if (type == HTX_BLK_EOH)
Willy Tarreau80739692018-10-05 11:35:57 +02005548 break;
5549
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005550 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005551 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005552 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5553 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5554 goto fail;
5555 }
Willy Tarreau80739692018-10-05 11:35:57 +02005556
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005557 list[hdr].n = htx_get_blk_name(htx, blk);
5558 list[hdr].v = htx_get_blk_value(htx, blk);
Christopher Faulet67d58092019-10-02 10:51:38 +02005559
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005560 /* Skip header if same name is used to add the server name */
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005561 if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name) &&
5562 isteq(list[hdr].n, h2c->proxy->server_id_hdr_name))
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005563 continue;
Christopher Faulet67d58092019-10-02 10:51:38 +02005564
Ilya Shipitsinacf84592021-02-06 22:29:08 +05005565 /* Convert connection: upgrade to Extended connect from rfc 8441 */
Christopher Faulet52a5ec22021-09-09 09:52:51 +02005566 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteqi(list[hdr].n, ist("connection"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005567 /* rfc 7230 #6.1 Connection = list of tokens */
5568 struct ist connection_ist = list[hdr].v;
5569 do {
5570 if (isteqi(iststop(connection_ist, ','),
5571 ist("upgrade"))) {
Amaury Denoyelle0df04362021-10-18 09:43:29 +02005572 if (!(h2c->flags & H2_CF_RCVD_RFC8441)) {
5573 TRACE_STATE("reject upgrade because of no RFC8441 support", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5574 goto fail;
5575 }
5576
Amaury Denoyellee0c258c2021-10-18 10:05:16 +02005577 TRACE_STATE("convert upgrade to extended connect method", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005578 h2s->flags |= (H2_SF_BODY_TUNNEL|H2_SF_EXT_CONNECT_SENT);
5579 sl->info.req.meth = HTTP_METH_CONNECT;
5580 meth = ist("CONNECT");
5581
5582 extended_connect = 1;
5583 break;
5584 }
5585
5586 connection_ist = istadv(istfind(connection_ist, ','), 1);
5587 } while (istlen(connection_ist));
5588 }
5589
Christopher Faulet52a5ec22021-09-09 09:52:51 +02005590 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteq(list[hdr].n, ist("upgrade"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005591 /* rfc 7230 #6.7 Upgrade = list of protocols
5592 * rfc 8441 #4 Extended connect = :protocol is single-valued
5593 *
5594 * only first HTTP/1 protocol is preserved
5595 */
5596 const struct ist protocol = iststop(list[hdr].v, ',');
5597 /* upgrade_protocol field is 16 bytes long in h2s */
5598 istpad(h2s->upgrade_protocol, isttrim(protocol, 15));
5599 }
5600
5601 if (isteq(list[hdr].n, ist("host")))
5602 host = list[hdr].v;
5603
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005604 hdr++;
5605 }
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005606 else if (type == HTX_BLK_REQ_SL) {
5607 BUG_ON(sl); /* Only one start-line expected */
5608 sl = htx_get_blk_ptr(htx, blk);
5609 meth = htx_sl_req_meth(sl);
5610 uri = htx_sl_req_uri(sl);
5611 if (sl->info.req.meth == HTTP_METH_HEAD)
5612 h2s->flags |= H2_SF_BODYLESS_RESP;
5613 if (unlikely(uri.len == 0)) {
5614 TRACE_ERROR("no URI in HTX request", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5615 goto fail;
5616 }
5617 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005618 else {
5619 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5620 goto fail;
5621 }
Willy Tarreau80739692018-10-05 11:35:57 +02005622 }
5623
Christopher Faulet56498132021-01-29 11:39:43 +01005624 /* The start-line me be defined */
5625 BUG_ON(!sl);
5626
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005627 /* Now add the server name to a header (if requested) */
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005628 if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name)) {
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005629 struct server *srv = objt_server(h2c->conn->target);
5630
5631 if (srv) {
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005632 list[hdr].n = h2c->proxy->server_id_hdr_name;
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005633 list[hdr].v = ist(srv->id);
5634 hdr++;
5635 }
5636 }
5637
Willy Tarreau80739692018-10-05 11:35:57 +02005638 /* marker for end of headers */
5639 list[hdr].n = ist("");
5640
Willy Tarreau9c218e72019-05-26 10:08:28 +02005641 mbuf = br_tail(h2c->mbuf);
5642 retry:
5643 if (!h2_get_buf(h2c, mbuf)) {
5644 h2c->flags |= H2_CF_MUX_MALLOC;
5645 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005646 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005647 return 0;
5648 }
5649
Willy Tarreau80739692018-10-05 11:35:57 +02005650 chunk_reset(&outbuf);
5651
5652 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005653 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5654 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005655 break;
5656 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005657 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau80739692018-10-05 11:35:57 +02005658 }
5659
5660 if (outbuf.size < 9)
5661 goto full;
5662
5663 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5664 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5665 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5666 outbuf.data = 9;
5667
5668 /* encode the method, which necessarily is the first one */
Willy Tarreaubdabc3a2018-12-10 18:25:11 +01005669 if (!hpack_encode_method(&outbuf, sl->info.req.meth, meth)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005670 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005671 goto realign_again;
5672 goto full;
5673 }
5674
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005675 h2_trace_header(ist(":method"), meth, H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__, h2c, h2s);
5676
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005677 auth = ist(NULL);
5678
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005679 /* RFC7540 #8.3: the CONNECT method must have :
5680 * - :authority set to the URI part (host:port)
5681 * - :method set to CONNECT
5682 * - :scheme and :path omitted
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005683 *
5684 * Note that this is not applicable in case of the Extended CONNECT
5685 * protocol from rfc 8441.
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005686 */
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005687 if (unlikely(sl->info.req.meth == HTTP_METH_CONNECT) && !extended_connect) {
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005688 auth = uri;
5689
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005690 if (!h2_encode_header(&outbuf, ist(":authority"), auth, H2_EV_TX_FRAME|H2_EV_TX_HDR,
5691 ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005692 /* output full */
5693 if (b_space_wraps(mbuf))
5694 goto realign_again;
5695 goto full;
5696 }
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005697
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005698 h2s->flags |= H2_SF_BODY_TUNNEL;
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005699 } else {
5700 /* other methods need a :scheme. If an authority is known from
5701 * the request line, it must be sent, otherwise only host is
5702 * sent. Host is never sent as the authority.
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005703 *
5704 * This code is also applicable for Extended CONNECT protocol
5705 * from rfc 8441.
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005706 */
5707 struct ist scheme = { };
Christopher Faulet3b44c542019-06-14 10:46:51 +02005708
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005709 if (uri.ptr[0] != '/' && uri.ptr[0] != '*') {
5710 /* the URI seems to start with a scheme */
5711 int len = 1;
5712
5713 while (len < uri.len && uri.ptr[len] != ':')
5714 len++;
5715
5716 if (len + 2 < uri.len && uri.ptr[len + 1] == '/' && uri.ptr[len + 2] == '/') {
5717 /* make the uri start at the authority now */
Tim Duesterhus9f75ed12021-03-02 18:57:26 +01005718 scheme = ist2(uri.ptr, len);
Tim Duesterhus154374c2021-03-02 18:57:27 +01005719 uri = istadv(uri, len + 3);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005720
5721 /* find the auth part of the URI */
Tim Duesterhus92c696e2021-02-28 16:11:36 +01005722 auth = ist2(uri.ptr, 0);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005723 while (auth.len < uri.len && auth.ptr[auth.len] != '/')
5724 auth.len++;
5725
Tim Duesterhus154374c2021-03-02 18:57:27 +01005726 uri = istadv(uri, auth.len);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005727 }
5728 }
5729
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005730 /* For Extended CONNECT, the :authority must be present.
5731 * Use host value for it.
5732 */
5733 if (unlikely(extended_connect) && isttest(host))
5734 auth = host;
5735
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005736 if (!scheme.len) {
5737 /* no explicit scheme, we're using an origin-form URI,
5738 * probably from an H1 request transcoded to H2 via an
5739 * external layer, then received as H2 without authority.
5740 * So we have to look up the scheme from the HTX flags.
5741 * In such a case only http and https are possible, and
5742 * https is the default (sent by browsers).
5743 */
5744 if ((sl->flags & (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP)) == (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP))
5745 scheme = ist("http");
5746 else
5747 scheme = ist("https");
5748 }
Christopher Faulet3b44c542019-06-14 10:46:51 +02005749
5750 if (!hpack_encode_scheme(&outbuf, scheme)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005751 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005752 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005753 goto realign_again;
5754 goto full;
5755 }
Willy Tarreau80739692018-10-05 11:35:57 +02005756
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005757 if (auth.len &&
5758 !h2_encode_header(&outbuf, ist(":authority"), auth, H2_EV_TX_FRAME|H2_EV_TX_HDR,
5759 ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005760 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005761 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005762 goto realign_again;
5763 goto full;
5764 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005765
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005766 /* encode the path. RFC7540#8.1.2.3: if path is empty it must
5767 * be sent as '/' or '*'.
5768 */
5769 if (unlikely(!uri.len)) {
5770 if (sl->info.req.meth == HTTP_METH_OPTIONS)
5771 uri = ist("*");
5772 else
5773 uri = ist("/");
Willy Tarreau053c1572019-02-01 16:13:59 +01005774 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005775
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005776 if (!hpack_encode_path(&outbuf, uri)) {
5777 /* output full */
5778 if (b_space_wraps(mbuf))
5779 goto realign_again;
5780 goto full;
5781 }
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005782
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005783 h2_trace_header(ist(":path"), uri, H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__, h2c, h2s);
5784
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005785 /* encode the pseudo-header protocol from rfc8441 if using
5786 * Extended CONNECT method.
5787 */
5788 if (unlikely(extended_connect)) {
5789 const struct ist protocol = ist(h2s->upgrade_protocol);
5790 if (isttest(protocol)) {
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005791 if (!h2_encode_header(&outbuf, ist(":protocol"), protocol, H2_EV_TX_FRAME|H2_EV_TX_HDR,
5792 ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005793 /* output full */
5794 if (b_space_wraps(mbuf))
5795 goto realign_again;
5796 goto full;
5797 }
5798 }
5799 }
Willy Tarreau80739692018-10-05 11:35:57 +02005800 }
5801
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005802 /* encode all headers, stop at empty name. Host is only sent if we
5803 * do not provide an authority.
5804 */
Willy Tarreau80739692018-10-05 11:35:57 +02005805 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005806 struct ist n = list[hdr].n;
5807 struct ist v = list[hdr].v;
5808
Willy Tarreau80739692018-10-05 11:35:57 +02005809 /* these ones do not exist in H2 and must be dropped. */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005810 if (isteq(n, ist("connection")) ||
5811 (auth.len && isteq(n, ist("host"))) ||
5812 isteq(n, ist("proxy-connection")) ||
5813 isteq(n, ist("keep-alive")) ||
5814 isteq(n, ist("upgrade")) ||
5815 isteq(n, ist("transfer-encoding")))
Willy Tarreau80739692018-10-05 11:35:57 +02005816 continue;
5817
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005818 if (isteq(n, ist("te"))) {
5819 /* "te" may only be sent with "trailers" if this value
5820 * is present, otherwise it must be deleted.
5821 */
5822 v = istist(v, ist("trailers"));
Tim Duesterhus7b5777d2021-03-02 18:57:28 +01005823 if (!isttest(v) || (v.len > 8 && v.ptr[8] != ','))
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005824 continue;
5825 v = ist("trailers");
5826 }
5827
Christopher Faulet86d144c2019-08-14 16:32:25 +02005828 /* Skip all pseudo-headers */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005829 if (*(n.ptr) == ':')
Christopher Faulet86d144c2019-08-14 16:32:25 +02005830 continue;
5831
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005832 if (isteq(n, ist("")))
Willy Tarreau80739692018-10-05 11:35:57 +02005833 break; // end
5834
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005835 if (!h2_encode_header(&outbuf, n, v, H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005836 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005837 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005838 goto realign_again;
5839 goto full;
5840 }
5841 }
5842
Willy Tarreaucb985a42019-10-07 16:56:34 +02005843 /* update the frame's size */
5844 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5845
5846 if (outbuf.data > h2c->mfs + 9) {
5847 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5848 /* output full */
5849 if (b_space_wraps(mbuf))
5850 goto realign_again;
5851 goto full;
5852 }
5853 }
5854
Willy Tarreau3a537072021-06-17 08:40:04 +02005855 TRACE_USER("sent H2 request ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5856
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005857 /* remove all header blocks including the EOH and compute the
5858 * corresponding size.
Willy Tarreau80739692018-10-05 11:35:57 +02005859 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005860 ret = 0;
5861 blk = htx_get_head_blk(htx);
5862 while (blk) {
5863 type = htx_get_blk_type(blk);
5864 ret += htx_get_blksz(blk);
5865 blk = htx_remove_blk(htx, blk);
5866 /* The removed block is the EOH */
5867 if (type == HTX_BLK_EOH)
5868 break;
Christopher Fauletd0db4232021-01-22 11:46:30 +01005869 }
Willy Tarreau80739692018-10-05 11:35:57 +02005870
Willy Tarreau95acc8b2022-05-27 16:14:10 +02005871 if (!h2s_sc(h2s) || se_fl_test(h2s->sd, SE_FL_SHW)) {
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005872 /* Request already closed: add END_STREAM */
Willy Tarreau80739692018-10-05 11:35:57 +02005873 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005874 }
5875 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
5876 /* EOM+empty: we may need to add END_STREAM (except for CONNECT
5877 * request)
5878 */
5879 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5880 es_now = 1;
5881 }
Willy Tarreau80739692018-10-05 11:35:57 +02005882
Willy Tarreau80739692018-10-05 11:35:57 +02005883 if (es_now)
5884 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5885
5886 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005887 b_add(mbuf, outbuf.data);
Willy Tarreau936db562023-10-18 11:39:43 +02005888 h2c->flags |= H2_CF_MBUF_HAS_DATA;
Willy Tarreau80739692018-10-05 11:35:57 +02005889 h2s->flags |= H2_SF_HEADERS_SENT;
5890 h2s->st = H2_SS_OPEN;
5891
Willy Tarreau80739692018-10-05 11:35:57 +02005892 if (es_now) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005893 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02005894 // trim any possibly pending data (eg: inconsistent content-length)
5895 h2s->flags |= H2_SF_ES_SENT;
5896 h2s->st = H2_SS_HLOC;
5897 }
5898
Willy Tarreau80739692018-10-05 11:35:57 +02005899 end:
5900 return ret;
5901 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005902 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5903 goto retry;
Willy Tarreau80739692018-10-05 11:35:57 +02005904 h2c->flags |= H2_CF_MUX_MFULL;
5905 h2s->flags |= H2_SF_BLK_MROOM;
5906 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005907 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005908 goto end;
5909 fail:
5910 /* unparsable HTX messages, too large ones to be produced in the local
5911 * list etc go here (unrecoverable errors).
5912 */
5913 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5914 ret = 0;
5915 goto end;
5916}
5917
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005918/* Try to send a DATA frame matching HTTP response present in HTX structure
Willy Tarreau98de12a2018-12-12 07:03:00 +01005919 * present in <buf>, for stream <h2s>. Returns the number of bytes sent. The
5920 * caller must check the stream's status to detect any error which might have
5921 * happened subsequently to a successful send. Returns the number of data bytes
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005922 * consumed, or zero if nothing done.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005923 */
Christopher Faulet142854b2020-12-02 15:12:40 +01005924static size_t h2s_make_data(struct h2s *h2s, struct buffer *buf, size_t count)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005925{
5926 struct h2c *h2c = h2s->h2c;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005927 struct htx *htx;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005928 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005929 struct buffer *mbuf;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005930 size_t total = 0;
5931 int es_now = 0;
5932 int bsize; /* htx block size */
5933 int fsize; /* h2 frame size */
5934 struct htx_blk *blk;
5935 enum htx_blk_type type;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01005936 int trunc_out; /* non-zero if truncated on out buf */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005937
Willy Tarreau7838a792019-08-12 18:42:03 +02005938 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5939
Willy Tarreau98de12a2018-12-12 07:03:00 +01005940 htx = htx_from_buf(buf);
5941
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005942 /* We only come here with HTX_BLK_DATA blocks */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005943
5944 new_frame:
Willy Tarreauee573762018-12-04 15:25:57 +01005945 if (!count || htx_is_empty(htx))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005946 goto end;
5947
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005948 if ((h2c->flags & H2_CF_IS_BACK) &&
Christopher Fauletf95f8762021-01-22 11:59:07 +01005949 (h2s->flags & (H2_SF_HEADERS_RCVD|H2_SF_BODY_TUNNEL)) == H2_SF_BODY_TUNNEL) {
5950 /* The response HEADERS frame not received yet. Thus the tunnel
5951 * is not fully established yet. In this situation, we block
5952 * data sending.
5953 */
5954 h2s->flags |= H2_SF_BLK_MBUSY;
5955 TRACE_STATE("Request DATA frame blocked waiting for tunnel establishment", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5956 goto end;
5957 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01005958 else if ((h2c->flags & H2_CF_IS_BACK) && (h2s->flags & H2_SF_TUNNEL_ABRT)) {
5959 /* a tunnel attempt was aborted but the is pending raw data to xfer to the server.
5960 * Thus the stream is closed with the CANCEL error. The error will be reported to
5961 * the upper layer as aserver abort. But at this stage there is nothing more we can
5962 * do. We just wait for the end of the response to be sure to not truncate it.
5963 */
5964 if (!(h2s->flags & H2_SF_ES_RCVD)) {
5965 TRACE_STATE("Request DATA frame blocked waiting end of aborted tunnel", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5966 h2s->flags |= H2_SF_BLK_MBUSY;
5967 }
5968 else {
5969 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
5970 h2s_error(h2s, H2_ERR_CANCEL);
5971 }
5972 goto end;
5973 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01005974
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005975 blk = htx_get_head_blk(htx);
5976 type = htx_get_blk_type(blk);
5977 bsize = htx_get_blksz(blk);
5978 fsize = bsize;
5979 trunc_out = 0;
5980 if (type != HTX_BLK_DATA)
5981 goto end;
5982
Willy Tarreau9c218e72019-05-26 10:08:28 +02005983 mbuf = br_tail(h2c->mbuf);
5984 retry:
5985 if (!h2_get_buf(h2c, mbuf)) {
5986 h2c->flags |= H2_CF_MUX_MALLOC;
5987 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005988 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005989 goto end;
5990 }
5991
Willy Tarreau98de12a2018-12-12 07:03:00 +01005992 /* Perform some optimizations to reduce the number of buffer copies.
5993 * First, if the mux's buffer is empty and the htx area contains
5994 * exactly one data block of the same size as the requested count, and
5995 * this count fits within the frame size, the stream's window size, and
5996 * the connection's window size, then it's possible to simply swap the
5997 * caller's buffer with the mux's output buffer and adjust offsets and
5998 * length to match the entire DATA HTX block in the middle. In this
5999 * case we perform a true zero-copy operation from end-to-end. This is
6000 * the situation that happens all the time with large files. Second, if
6001 * this is not possible, but the mux's output buffer is empty, we still
6002 * have an opportunity to avoid the copy to the intermediary buffer, by
6003 * making the intermediary buffer's area point to the output buffer's
6004 * area. In this case we want to skip the HTX header to make sure that
6005 * copies remain aligned and that this operation remains possible all
6006 * the time. This goes for headers, data blocks and any data extracted
6007 * from the HTX blocks.
6008 */
6009 if (unlikely(fsize == count &&
Christopher Faulet192c6a22019-06-11 16:32:24 +02006010 htx_nbblks(htx) == 1 && type == HTX_BLK_DATA &&
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006011 fsize <= h2s_mws(h2s) && fsize <= h2c->mws && fsize <= h2c->mfs)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02006012 void *old_area = mbuf->area;
Willy Tarreau98de12a2018-12-12 07:03:00 +01006013
Willy Tarreaubcc45952019-05-26 10:05:50 +02006014 if (b_data(mbuf)) {
Willy Tarreau8ab128c2019-03-21 17:47:28 +01006015 /* Too bad there are data left there. We're willing to memcpy/memmove
6016 * up to 1/4 of the buffer, which means that it's OK to copy a large
6017 * frame into a buffer containing few data if it needs to be realigned,
6018 * and that it's also OK to copy few data without realigning. Otherwise
6019 * we'll pretend the mbuf is full and wait for it to become empty.
Willy Tarreau98de12a2018-12-12 07:03:00 +01006020 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006021 if (fsize + 9 <= b_room(mbuf) &&
6022 (b_data(mbuf) <= b_size(mbuf) / 4 ||
Willy Tarreau7838a792019-08-12 18:42:03 +02006023 (fsize <= b_size(mbuf) / 4 && fsize + 9 <= b_contig_space(mbuf)))) {
6024 TRACE_STATE("small data present in output buffer, appending", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01006025 goto copy;
Willy Tarreau7838a792019-08-12 18:42:03 +02006026 }
Willy Tarreau8ab128c2019-03-21 17:47:28 +01006027
Willy Tarreau9c218e72019-05-26 10:08:28 +02006028 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6029 goto retry;
6030
Willy Tarreau98de12a2018-12-12 07:03:00 +01006031 h2c->flags |= H2_CF_MUX_MFULL;
6032 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006033 TRACE_STATE("too large data present in output buffer, waiting for emptiness", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01006034 goto end;
6035 }
6036
Christopher Faulet925abdf2021-04-27 22:51:07 +02006037 if (htx->flags & HTX_FL_EOM) {
6038 /* EOM+empty: we may need to add END_STREAM (except for tunneled
6039 * message)
6040 */
6041 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
6042 es_now = 1;
6043 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01006044 /* map an H2 frame to the HTX block so that we can put the
6045 * frame header there.
6046 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006047 *mbuf = b_make(buf->area, buf->size, sizeof(struct htx) + blk->addr - 9, fsize + 9);
6048 outbuf.area = b_head(mbuf);
Willy Tarreau98de12a2018-12-12 07:03:00 +01006049
6050 /* prepend an H2 DATA frame header just before the DATA block */
6051 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
6052 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
Christopher Faulet925abdf2021-04-27 22:51:07 +02006053 if (es_now)
6054 outbuf.area[4] |= H2_F_DATA_END_STREAM;
Willy Tarreau98de12a2018-12-12 07:03:00 +01006055 h2_set_frame_size(outbuf.area, fsize);
6056
6057 /* update windows */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006058 h2s->sws -= fsize;
Willy Tarreau98de12a2018-12-12 07:03:00 +01006059 h2c->mws -= fsize;
6060
6061 /* and exchange with our old area */
6062 buf->area = old_area;
6063 buf->data = buf->head = 0;
6064 total += fsize;
Christopher Faulet925abdf2021-04-27 22:51:07 +02006065 fsize = 0;
Willy Tarreau936db562023-10-18 11:39:43 +02006066 h2c->flags |= H2_CF_MBUF_HAS_DATA;
Willy Tarreau7838a792019-08-12 18:42:03 +02006067
6068 TRACE_PROTO("sent H2 DATA frame (zero-copy)", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Christopher Faulet925abdf2021-04-27 22:51:07 +02006069 goto out;
Willy Tarreau98de12a2018-12-12 07:03:00 +01006070 }
Willy Tarreau2fb1d4c2018-12-04 15:28:03 +01006071
Willy Tarreau98de12a2018-12-12 07:03:00 +01006072 copy:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006073 /* for DATA and EOM we'll have to emit a frame, even if empty */
6074
6075 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02006076 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
6077 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006078 break;
6079 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02006080 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006081 }
6082
6083 if (outbuf.size < 9) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02006084 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6085 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006086 h2c->flags |= H2_CF_MUX_MFULL;
6087 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006088 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006089 goto end;
6090 }
6091
6092 /* len: 0x000000 (fill later), type: 0(DATA), flags: none=0 */
6093 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
6094 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
6095 outbuf.data = 9;
6096
6097 /* we have in <fsize> the exact number of bytes we need to copy from
6098 * the HTX buffer. We need to check this against the connection's and
6099 * the stream's send windows, and to ensure that this fits in the max
6100 * frame size and in the buffer's available space minus 9 bytes (for
6101 * the frame header). The connection's flow control is applied last so
6102 * that we can use a separate list of streams which are immediately
6103 * unblocked on window opening. Note: we don't implement padding.
6104 */
6105
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006106 if (!fsize)
6107 goto send_empty;
6108
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006109 if (h2s_mws(h2s) <= 0) {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006110 h2s->flags |= H2_SF_BLK_SFCTL;
Willy Tarreau2b718102021-04-21 07:32:39 +02006111 if (LIST_INLIST(&h2s->list))
Willy Tarreaude4a5382023-10-17 08:25:19 +02006112 h2_remove_from_list(h2s);
Willy Tarreau2b718102021-04-21 07:32:39 +02006113 LIST_APPEND(&h2c->blocked_list, &h2s->list);
Willy Tarreau7838a792019-08-12 18:42:03 +02006114 TRACE_STATE("stream window <=0, flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006115 goto end;
6116 }
6117
Willy Tarreauee573762018-12-04 15:25:57 +01006118 if (fsize > count)
6119 fsize = count;
6120
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006121 if (fsize > h2s_mws(h2s))
6122 fsize = h2s_mws(h2s); // >0
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006123
6124 if (h2c->mfs && fsize > h2c->mfs)
6125 fsize = h2c->mfs; // >0
6126
6127 if (fsize + 9 > outbuf.size) {
Willy Tarreau455d5682019-05-24 19:42:18 +02006128 /* It doesn't fit at once. If it at least fits once split and
6129 * the amount of data to move is low, let's defragment the
6130 * buffer now.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006131 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006132 if (b_space_wraps(mbuf) &&
6133 (fsize + 9 <= b_room(mbuf)) &&
6134 b_data(mbuf) <= MAX_DATA_REALIGN)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006135 goto realign_again;
6136 fsize = outbuf.size - 9;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01006137 trunc_out = 1;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006138
6139 if (fsize <= 0) {
6140 /* no need to send an empty frame here */
Willy Tarreau9c218e72019-05-26 10:08:28 +02006141 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6142 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006143 h2c->flags |= H2_CF_MUX_MFULL;
6144 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006145 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006146 goto end;
6147 }
6148 }
6149
6150 if (h2c->mws <= 0) {
6151 h2s->flags |= H2_SF_BLK_MFCTL;
Willy Tarreau7838a792019-08-12 18:42:03 +02006152 TRACE_STATE("connection window <=0, stream flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2C_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006153 goto end;
6154 }
6155
6156 if (fsize > h2c->mws)
6157 fsize = h2c->mws;
6158
6159 /* now let's copy this this into the output buffer */
6160 memcpy(outbuf.area + 9, htx_get_blk_ptr(htx, blk), fsize);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006161 h2s->sws -= fsize;
Willy Tarreau0f799ca2018-12-04 15:20:11 +01006162 h2c->mws -= fsize;
Willy Tarreauee573762018-12-04 15:25:57 +01006163 count -= fsize;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006164
6165 send_empty:
6166 /* update the frame's size */
6167 h2_set_frame_size(outbuf.area, fsize);
6168
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006169 /* consume incoming HTX block */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006170 total += fsize;
6171 if (fsize == bsize) {
6172 htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006173 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
6174 /* EOM+empty: we may need to add END_STREAM (except for tunneled
6175 * message)
6176 */
6177 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
6178 es_now = 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02006179 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006180 }
6181 else {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006182 /* we've truncated this block */
6183 htx_cut_data_blk(htx, blk, fsize);
6184 }
6185
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006186 if (es_now)
6187 outbuf.area[4] |= H2_F_DATA_END_STREAM;
6188
6189 /* commit the H2 response */
6190 b_add(mbuf, fsize + 9);
Willy Tarreau936db562023-10-18 11:39:43 +02006191 h2c->flags |= H2_CF_MBUF_HAS_DATA;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006192
Christopher Faulet925abdf2021-04-27 22:51:07 +02006193 out:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006194 if (es_now) {
6195 if (h2s->st == H2_SS_OPEN)
6196 h2s->st = H2_SS_HLOC;
6197 else
6198 h2s_close(h2s);
6199
6200 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02006201 TRACE_PROTO("ES flag set on outgoing frame", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006202 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006203 else if (fsize) {
6204 if (fsize == bsize) {
6205 TRACE_DEVEL("more data may be available, trying to send another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6206 goto new_frame;
6207 }
6208 else if (trunc_out) {
6209 /* we've truncated this block */
6210 goto new_frame;
6211 }
6212 }
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006213
6214 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02006215 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006216 return total;
6217}
6218
Christopher Faulet991febd2020-12-02 15:17:31 +01006219/* Skip the message payload (DATA blocks) and emit an empty DATA frame with the
6220 * ES flag set for stream <h2s>. This function is called for response known to
6221 * have no payload. Only DATA blocks are skipped. This means the trailers are
Ilya Shipitsinacf84592021-02-06 22:29:08 +05006222 * still emitted. The caller must check the stream's status to detect any error
Christopher Faulet991febd2020-12-02 15:17:31 +01006223 * which might have happened subsequently to a successful send. Returns the
6224 * number of data bytes consumed, or zero if nothing done.
6225 */
6226static size_t h2s_skip_data(struct h2s *h2s, struct buffer *buf, size_t count)
6227{
6228 struct h2c *h2c = h2s->h2c;
6229 struct htx *htx;
6230 int bsize; /* htx block size */
6231 int fsize; /* h2 frame size */
6232 struct htx_blk *blk;
6233 enum htx_blk_type type;
6234 size_t total = 0;
6235
6236 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6237
Christopher Faulet991febd2020-12-02 15:17:31 +01006238 htx = htx_from_buf(buf);
6239
6240 next_data:
6241 if (!count || htx_is_empty(htx))
6242 goto end;
6243 blk = htx_get_head_blk(htx);
6244 type = htx_get_blk_type(blk);
6245 bsize = htx_get_blksz(blk);
6246 fsize = bsize;
6247 if (type != HTX_BLK_DATA)
6248 goto end;
6249
6250 if (fsize > count)
6251 fsize = count;
6252
6253 if (fsize != bsize)
6254 goto skip_data;
6255
6256 if (!(htx->flags & HTX_FL_EOM) || !htx_is_unique_blk(htx, blk))
6257 goto skip_data;
6258
6259 /* Here, it is the last block and it is also the end of the message. So
6260 * we can emit an empty DATA frame with the ES flag set
6261 */
6262 if (h2_send_empty_data_es(h2s) <= 0)
6263 goto end;
6264
6265 if (h2s->st == H2_SS_OPEN)
6266 h2s->st = H2_SS_HLOC;
6267 else
6268 h2s_close(h2s);
6269
6270 skip_data:
6271 /* consume incoming HTX block */
6272 total += fsize;
6273 if (fsize == bsize) {
6274 TRACE_DEVEL("more data may be available, trying to skip another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6275 htx_remove_blk(htx, blk);
6276 goto next_data;
6277 }
6278 else {
6279 /* we've truncated this block */
6280 htx_cut_data_blk(htx, blk, fsize);
6281 }
6282
6283 end:
6284 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6285 return total;
6286}
6287
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006288/* Try to send a HEADERS frame matching HTX_BLK_TLR series of blocks present in
6289 * HTX message <htx> for the H2 stream <h2s>. Returns the number of bytes
6290 * processed. The caller must check the stream's status to detect any error
6291 * which might have happened subsequently to a successful send. The htx blocks
6292 * are automatically removed from the message. The htx message is assumed to be
6293 * valid since produced from the internal code. Processing stops when meeting
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006294 * the EOT, which *is* removed. All trailers are processed at once and sent as a
6295 * single frame. The ES flag is always set.
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006296 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006297static size_t h2s_make_trailers(struct h2s *h2s, struct htx *htx)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006298{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02006299 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006300 struct h2c *h2c = h2s->h2c;
6301 struct htx_blk *blk;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006302 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02006303 struct buffer *mbuf;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006304 enum htx_blk_type type;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006305 int ret = 0;
6306 int hdr;
6307 int idx;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006308
Willy Tarreau7838a792019-08-12 18:42:03 +02006309 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
6310
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006311 /* get trailers. */
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006312 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006313 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006314 type = htx_get_blk_type(blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006315
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006316 if (type == HTX_BLK_UNUSED)
6317 continue;
6318
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006319 if (type == HTX_BLK_EOT)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006320 break;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006321 if (type == HTX_BLK_TLR) {
6322 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
6323 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
6324 goto fail;
6325 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006326
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006327 list[hdr].n = htx_get_blk_name(htx, blk);
6328 list[hdr].v = htx_get_blk_value(htx, blk);
6329 hdr++;
6330 }
6331 else {
6332 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006333 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02006334 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006335 }
6336
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006337 /* marker for end of trailers */
6338 list[hdr].n = ist("");
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006339
Willy Tarreau9c218e72019-05-26 10:08:28 +02006340 mbuf = br_tail(h2c->mbuf);
6341 retry:
6342 if (!h2_get_buf(h2c, mbuf)) {
6343 h2c->flags |= H2_CF_MUX_MALLOC;
6344 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006345 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02006346 goto end;
6347 }
6348
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006349 chunk_reset(&outbuf);
6350
6351 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02006352 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
6353 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006354 break;
6355 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02006356 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006357 }
6358
6359 if (outbuf.size < 9)
6360 goto full;
6361
6362 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4,ES=1 */
6363 memcpy(outbuf.area, "\x00\x00\x00\x01\x05", 5);
6364 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
6365 outbuf.data = 9;
6366
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006367 /* encode all headers */
6368 for (idx = 0; idx < hdr; idx++) {
6369 /* these ones do not exist in H2 or must not appear in
6370 * trailers and must be dropped.
6371 */
6372 if (isteq(list[idx].n, ist("host")) ||
6373 isteq(list[idx].n, ist("content-length")) ||
6374 isteq(list[idx].n, ist("connection")) ||
6375 isteq(list[idx].n, ist("proxy-connection")) ||
6376 isteq(list[idx].n, ist("keep-alive")) ||
6377 isteq(list[idx].n, ist("upgrade")) ||
6378 isteq(list[idx].n, ist("te")) ||
6379 isteq(list[idx].n, ist("transfer-encoding")))
6380 continue;
6381
Christopher Faulet86d144c2019-08-14 16:32:25 +02006382 /* Skip all pseudo-headers */
6383 if (*(list[idx].n.ptr) == ':')
6384 continue;
6385
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01006386 if (!h2_encode_header(&outbuf, list[idx].n, list[idx].v, H2_EV_TX_FRAME|H2_EV_TX_HDR,
6387 ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006388 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006389 if (b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006390 goto realign_again;
6391 goto full;
6392 }
6393 }
6394
Willy Tarreau5121e5d2019-05-06 15:13:41 +02006395 if (outbuf.data == 9) {
6396 /* here we have a problem, we have nothing to emit (either we
6397 * received an empty trailers block followed or we removed its
6398 * contents above). Because of this we can't send a HEADERS
6399 * frame, so we have to cheat and instead send an empty DATA
6400 * frame conveying the ES flag.
Willy Tarreau67b8cae2019-02-21 18:16:35 +01006401 */
6402 outbuf.area[3] = H2_FT_DATA;
6403 outbuf.area[4] = H2_F_DATA_END_STREAM;
6404 }
6405
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006406 /* update the frame's size */
6407 h2_set_frame_size(outbuf.area, outbuf.data - 9);
6408
Willy Tarreau572d9f52019-10-11 16:58:37 +02006409 if (outbuf.data > h2c->mfs + 9) {
6410 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
6411 /* output full */
6412 if (b_space_wraps(mbuf))
6413 goto realign_again;
6414 goto full;
6415 }
6416 }
6417
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006418 /* commit the H2 response */
Willy Tarreau7838a792019-08-12 18:42:03 +02006419 TRACE_PROTO("sent H2 trailers HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006420 b_add(mbuf, outbuf.data);
Willy Tarreau936db562023-10-18 11:39:43 +02006421 h2c->flags |= H2_CF_MBUF_HAS_DATA;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006422 h2s->flags |= H2_SF_ES_SENT;
6423
6424 if (h2s->st == H2_SS_OPEN)
6425 h2s->st = H2_SS_HLOC;
6426 else
6427 h2s_close(h2s);
6428
6429 /* OK we could properly deliver the response */
6430 done:
Willy Tarreaufb07b3f2019-05-06 11:23:29 +02006431 /* remove all header blocks till the end and compute the corresponding size. */
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006432 ret = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006433 blk = htx_get_head_blk(htx);
6434 while (blk) {
6435 type = htx_get_blk_type(blk);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006436 ret += htx_get_blksz(blk);
6437 blk = htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006438 /* The removed block is the EOT */
6439 if (type == HTX_BLK_EOT)
6440 break;
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006441 }
6442
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006443 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02006444 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006445 return ret;
6446 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02006447 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6448 goto retry;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006449 h2c->flags |= H2_CF_MUX_MFULL;
6450 h2s->flags |= H2_SF_BLK_MROOM;
6451 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006452 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006453 goto end;
6454 fail:
6455 /* unparsable HTX messages, too large ones to be produced in the local
6456 * list etc go here (unrecoverable errors).
6457 */
6458 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
6459 ret = 0;
6460 goto end;
6461}
6462
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006463/* Called from the upper layer, to subscribe <es> to events <event_type>. The
6464 * event subscriber <es> is not allowed to change from a previous call as long
6465 * as at least one event is still subscribed. The <event_type> must only be a
6466 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006467 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006468static int h2_subscribe(struct stconn *sc, int event_type, struct wait_event *es)
Olivier Houchard6ff20392018-07-17 18:46:31 +02006469{
Willy Tarreau36c22322022-05-27 10:41:24 +02006470 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard4cf7fb12018-08-02 19:23:05 +02006471 struct h2c *h2c = h2s->h2c;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006472
Willy Tarreau7838a792019-08-12 18:42:03 +02006473 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006474
6475 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006476 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006477
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006478 es->events |= event_type;
6479 h2s->subs = es;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006480
6481 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006482 TRACE_DEVEL("subscribe(recv)", H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006483
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006484 if (event_type & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006485 TRACE_DEVEL("subscribe(send)", H2_EV_STRM_SEND, h2c->conn, h2s);
Olivier Houchardf8338152019-05-14 17:50:32 +02006486 if (!(h2s->flags & H2_SF_BLK_SFCTL) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02006487 !LIST_INLIST(&h2s->list)) {
Willy Tarreaude4a5382023-10-17 08:25:19 +02006488 if (h2s->flags & H2_SF_BLK_MFCTL) {
6489 TRACE_DEVEL("Adding to fctl list", H2_EV_STRM_SEND, h2c->conn, h2s);
Willy Tarreau2b718102021-04-21 07:32:39 +02006490 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreaude4a5382023-10-17 08:25:19 +02006491 }
6492 else {
6493 TRACE_DEVEL("Adding to send list", H2_EV_STRM_SEND, h2c->conn, h2s);
Willy Tarreau2b718102021-04-21 07:32:39 +02006494 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaude4a5382023-10-17 08:25:19 +02006495 }
Olivier Houcharde1c6dbc2018-08-01 17:06:43 +02006496 }
Olivier Houchard6ff20392018-07-17 18:46:31 +02006497 }
Willy Tarreau7838a792019-08-12 18:42:03 +02006498 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006499 return 0;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006500}
6501
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006502/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
6503 * The <es> pointer is not allowed to differ from the one passed to the
6504 * subscribe() call. It always returns zero.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006505 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006506static int h2_unsubscribe(struct stconn *sc, int event_type, struct wait_event *es)
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006507{
Willy Tarreau36c22322022-05-27 10:41:24 +02006508 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006509
Willy Tarreau7838a792019-08-12 18:42:03 +02006510 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006511
6512 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006513 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006514
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006515 es->events &= ~event_type;
6516 if (!es->events)
Willy Tarreauf96508a2020-01-10 11:12:48 +01006517 h2s->subs = NULL;
6518
6519 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006520 TRACE_DEVEL("unsubscribe(recv)", H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006521
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006522 if (event_type & SUB_RETRY_SEND) {
Frédéric Lécaille67fda162022-06-30 12:01:54 +02006523 TRACE_DEVEL("unsubscribe(send)", H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006524 h2s->flags &= ~H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006525 if (!(h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)))
Willy Tarreaude4a5382023-10-17 08:25:19 +02006526 h2_remove_from_list(h2s);
Olivier Houchardd846c262018-10-19 17:24:29 +02006527 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01006528
Willy Tarreau7838a792019-08-12 18:42:03 +02006529 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006530 return 0;
6531}
6532
6533
Christopher Faulet564e39c2021-09-21 15:50:55 +02006534/* Called from the upper layer, to receive data
6535 *
6536 * The caller is responsible for defragmenting <buf> if necessary. But <flags>
6537 * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
6538 * means the caller wants to flush input data (from the mux buffer and the
6539 * channel buffer) to be able to use kernel splicing or any kind of mux-to-mux
6540 * xfer. If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
6541 * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
6542 * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
6543 * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
6544 * copy as much data as possible.
6545 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006546static size_t h2_rcv_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
Olivier Houchard511efea2018-08-16 15:30:32 +02006547{
Willy Tarreau36c22322022-05-27 10:41:24 +02006548 struct h2s *h2s = __sc_mux_strm(sc);
Willy Tarreau082f5592018-11-25 08:03:32 +01006549 struct h2c *h2c = h2s->h2c;
Willy Tarreau86724e22018-12-01 23:19:43 +01006550 struct htx *h2s_htx = NULL;
6551 struct htx *buf_htx = NULL;
Olivier Houchard511efea2018-08-16 15:30:32 +02006552 size_t ret = 0;
6553
Willy Tarreau7838a792019-08-12 18:42:03 +02006554 TRACE_ENTER(H2_EV_STRM_RECV, h2c->conn, h2s);
6555
Olivier Houchard511efea2018-08-16 15:30:32 +02006556 /* transfer possibly pending data to the upper layer */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006557 h2s_htx = htx_from_buf(&h2s->rxbuf);
Christopher Fauletec361bb2022-02-21 15:12:54 +01006558 if (htx_is_empty(h2s_htx) && !(h2s_htx->flags & HTX_FL_PARSING_ERROR)) {
Christopher Faulet9b79a102019-07-15 11:22:56 +02006559 /* Here htx_to_buf() will set buffer data to 0 because
6560 * the HTX is empty.
6561 */
6562 htx_to_buf(h2s_htx, &h2s->rxbuf);
6563 goto end;
6564 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02006565 ret = h2s_htx->data;
6566 buf_htx = htx_from_buf(buf);
Willy Tarreau7196dd62019-03-05 10:51:11 +01006567
Christopher Faulet9b79a102019-07-15 11:22:56 +02006568 /* <buf> is empty and the message is small enough, swap the
6569 * buffers. */
6570 if (htx_is_empty(buf_htx) && htx_used_space(h2s_htx) <= count) {
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01006571 htx_to_buf(buf_htx, buf);
6572 htx_to_buf(h2s_htx, &h2s->rxbuf);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006573 b_xfer(buf, &h2s->rxbuf, b_data(&h2s->rxbuf));
6574 goto end;
Willy Tarreau86724e22018-12-01 23:19:43 +01006575 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02006576
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006577 htx_xfer_blks(buf_htx, h2s_htx, count, HTX_BLK_UNUSED);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006578
6579 if (h2s_htx->flags & HTX_FL_PARSING_ERROR) {
6580 buf_htx->flags |= HTX_FL_PARSING_ERROR;
6581 if (htx_is_empty(buf_htx))
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006582 se_fl_set(h2s->sd, SE_FL_EOI);
Willy Tarreau86724e22018-12-01 23:19:43 +01006583 }
Christopher Faulet72722c02023-02-23 14:26:34 +01006584 else if (htx_is_empty(h2s_htx)) {
Christopher Faulet42432f32020-11-20 17:43:16 +01006585 buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM);
Christopher Faulet72722c02023-02-23 14:26:34 +01006586 }
6587
Christopher Faulet9b79a102019-07-15 11:22:56 +02006588 buf_htx->extra = (h2s_htx->extra ? (h2s_htx->data + h2s_htx->extra) : 0);
6589 htx_to_buf(buf_htx, buf);
6590 htx_to_buf(h2s_htx, &h2s->rxbuf);
6591 ret -= h2s_htx->data;
6592
Christopher Faulet37070b22019-02-14 15:12:14 +01006593 end:
Olivier Houchard638b7992018-08-16 15:41:52 +02006594 if (b_data(&h2s->rxbuf))
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006595 se_fl_set(h2s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
Olivier Houchard511efea2018-08-16 15:30:32 +02006596 else {
Christopher Faulet34f81d52023-05-04 16:41:37 +02006597 if (!(h2c->flags & H2_CF_IS_BACK) && (h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_ES_RCVD))) {
Christopher Faulet4403cdf2023-05-04 15:49:12 +02006598 /* If request ES is reported to the upper layer, it means the
6599 * H2S now expects data from the opposite side.
6600 */
6601 se_expect_data(h2s->sd);
6602 }
6603
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006604 se_fl_clr(h2s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
Christopher Faulet531dd052023-05-24 11:14:38 +02006605 h2s_propagate_term_flags(h2c, h2s);
Olivier Houchard638b7992018-08-16 15:41:52 +02006606 if (b_size(&h2s->rxbuf)) {
6607 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01006608 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02006609 }
Olivier Houchard511efea2018-08-16 15:30:32 +02006610 }
6611
Willy Tarreau082f5592018-11-25 08:03:32 +01006612 if (ret && h2c->dsi == h2s->id) {
6613 /* demux is blocking on this stream's buffer */
6614 h2c->flags &= ~H2_CF_DEM_SFULL;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02006615 h2c_restart_reading(h2c, 1);
Willy Tarreau082f5592018-11-25 08:03:32 +01006616 }
Christopher Faulet37070b22019-02-14 15:12:14 +01006617
Willy Tarreau7838a792019-08-12 18:42:03 +02006618 TRACE_LEAVE(H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard511efea2018-08-16 15:30:32 +02006619 return ret;
6620}
6621
Olivier Houchardd846c262018-10-19 17:24:29 +02006622
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006623/* Called from the upper layer, to send data from buffer <buf> for no more than
6624 * <count> bytes. Returns the number of bytes effectively sent. Some status
Willy Tarreau4596fe22022-05-17 19:07:51 +02006625 * flags may be updated on the stream connector.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006626 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006627static size_t h2_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
Willy Tarreau62f52692017-10-08 23:01:42 +02006628{
Willy Tarreau36c22322022-05-27 10:41:24 +02006629 struct h2s *h2s = __sc_mux_strm(sc);
Willy Tarreau1dc41e72018-06-14 13:21:28 +02006630 size_t total = 0;
Willy Tarreau5dd17352018-06-14 13:33:30 +02006631 size_t ret;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006632 struct htx *htx;
6633 struct htx_blk *blk;
6634 enum htx_blk_type btype;
6635 uint32_t bsize;
6636 int32_t idx;
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006637
Willy Tarreau7838a792019-08-12 18:42:03 +02006638 TRACE_ENTER(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
6639
Olivier Houchardd360ac62019-03-22 17:37:16 +01006640 /* If we were not just woken because we wanted to send but couldn't,
6641 * and there's somebody else that is waiting to send, do nothing,
6642 * we will subscribe later and be put at the end of the list
6643 */
Willy Tarreaud9464162020-01-10 18:25:07 +01006644 if (!(h2s->flags & H2_SF_NOTIFIED) &&
Willy Tarreau7838a792019-08-12 18:42:03 +02006645 (!LIST_ISEMPTY(&h2s->h2c->send_list) || !LIST_ISEMPTY(&h2s->h2c->fctl_list))) {
Willy Tarreaude4a5382023-10-17 08:25:19 +02006646 if (LIST_INLIST(&h2s->list))
6647 TRACE_DEVEL("stream already waiting, leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
6648 else {
6649 TRACE_DEVEL("other streams already waiting, going to the queue and leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
6650 h2s->h2c->flags |= H2_CF_WAIT_INLIST;
6651 }
Olivier Houchardd360ac62019-03-22 17:37:16 +01006652 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006653 }
Willy Tarreaud9464162020-01-10 18:25:07 +01006654 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02006655
Willy Tarreau7838a792019-08-12 18:42:03 +02006656 if (h2s->h2c->st0 < H2_CS_FRAME_H) {
6657 TRACE_DEVEL("connection not ready, leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006658 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006659 }
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006660
Willy Tarreaucab22952019-10-31 15:48:18 +01006661 if (h2s->h2c->st0 >= H2_CS_ERROR) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006662 se_fl_set(h2s->sd, SE_FL_ERROR);
Willy Tarreaucab22952019-10-31 15:48:18 +01006663 TRACE_DEVEL("connection is in error, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
6664 return 0;
6665 }
6666
Christopher Faulet9b79a102019-07-15 11:22:56 +02006667 htx = htx_from_buf(buf);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006668
Willy Tarreau0bad0432018-06-14 16:54:01 +02006669 if (!(h2s->flags & H2_SF_OUTGOING_DATA) && count)
Willy Tarreauc4312d32017-11-07 12:01:53 +01006670 h2s->flags |= H2_SF_OUTGOING_DATA;
6671
Christopher Faulet2e47e3a2023-01-13 11:40:24 +01006672 if (htx->extra && htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
Willy Tarreau48770452022-08-18 16:03:51 +02006673 h2s->flags |= H2_SF_MORE_HTX_DATA;
6674 else
6675 h2s->flags &= ~H2_SF_MORE_HTX_DATA;
6676
Willy Tarreau751f2d02018-10-05 09:35:00 +02006677 if (h2s->id == 0) {
6678 int32_t id = h2c_get_next_sid(h2s->h2c);
6679
6680 if (id < 0) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006681 se_fl_set(h2s->sd, SE_FL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02006682 TRACE_DEVEL("couldn't get a stream ID, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02006683 return 0;
6684 }
6685
6686 eb32_delete(&h2s->by_id);
6687 h2s->by_id.key = h2s->id = id;
6688 h2s->h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01006689 h2s->h2c->nb_reserved--;
Willy Tarreau751f2d02018-10-05 09:35:00 +02006690 eb32_insert(&h2s->h2c->streams_by_id, &h2s->by_id);
6691 }
6692
Christopher Faulet9b79a102019-07-15 11:22:56 +02006693 while (h2s->st < H2_SS_HLOC && !(h2s->flags & H2_SF_BLK_ANY) &&
6694 count && !htx_is_empty(htx)) {
6695 idx = htx_get_head(htx);
6696 blk = htx_get_blk(htx, idx);
6697 btype = htx_get_blk_type(blk);
6698 bsize = htx_get_blksz(blk);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006699
Christopher Faulet9b79a102019-07-15 11:22:56 +02006700 switch (btype) {
Willy Tarreau80739692018-10-05 11:35:57 +02006701 case HTX_BLK_REQ_SL:
6702 /* start-line before headers */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01006703 ret = h2s_snd_bhdrs(h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02006704 if (ret > 0) {
6705 total += ret;
6706 count -= ret;
6707 if (ret < bsize)
6708 goto done;
6709 }
6710 break;
6711
Willy Tarreau115e83b2018-12-01 19:17:53 +01006712 case HTX_BLK_RES_SL:
6713 /* start-line before headers */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01006714 ret = h2s_snd_fhdrs(h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01006715 if (ret > 0) {
6716 total += ret;
6717 count -= ret;
6718 if (ret < bsize)
6719 goto done;
6720 }
6721 break;
6722
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006723 case HTX_BLK_DATA:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006724 /* all these cause the emission of a DATA frame (possibly empty) */
Christopher Faulet991febd2020-12-02 15:17:31 +01006725 if (!(h2s->h2c->flags & H2_CF_IS_BACK) &&
6726 (h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BODYLESS_RESP)) == H2_SF_BODYLESS_RESP)
6727 ret = h2s_skip_data(h2s, buf, count);
6728 else
6729 ret = h2s_make_data(h2s, buf, count);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006730 if (ret > 0) {
Willy Tarreau98de12a2018-12-12 07:03:00 +01006731 htx = htx_from_buf(buf);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006732 total += ret;
6733 count -= ret;
6734 if (ret < bsize)
6735 goto done;
6736 }
6737 break;
6738
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006739 case HTX_BLK_TLR:
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006740 case HTX_BLK_EOT:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006741 /* This is the first trailers block, all the subsequent ones */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006742 ret = h2s_make_trailers(h2s, htx);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006743 if (ret > 0) {
6744 total += ret;
6745 count -= ret;
6746 if (ret < bsize)
6747 goto done;
6748 }
6749 break;
6750
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006751 default:
6752 htx_remove_blk(htx, blk);
6753 total += bsize;
6754 count -= bsize;
6755 break;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006756 }
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006757 }
6758
Christopher Faulet9b79a102019-07-15 11:22:56 +02006759 done:
Willy Tarreau2b778482019-05-06 15:00:22 +02006760 if (h2s->st >= H2_SS_HLOC) {
Willy Tarreau00610962018-07-19 10:58:28 +02006761 /* trim any possibly pending data after we close (extra CR-LF,
6762 * unprocessed trailers, abnormal extra data, ...)
6763 */
Willy Tarreau0bad0432018-06-14 16:54:01 +02006764 total += count;
6765 count = 0;
Willy Tarreau00610962018-07-19 10:58:28 +02006766 }
6767
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006768 /* RST are sent similarly to frame acks */
Willy Tarreau02492192017-12-07 15:59:29 +01006769 if (h2s->st == H2_SS_ERROR || h2s->flags & H2_SF_RST_RCVD) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006770 TRACE_DEVEL("reporting RST/error to the app-layer stream", H2_EV_H2S_SEND|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006771 se_fl_set_error(h2s->sd);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01006772 if (h2s_send_rst_stream(h2s->h2c, h2s) > 0)
Willy Tarreau00dd0782018-03-01 16:31:34 +01006773 h2s_close(h2s);
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006774 }
6775
Christopher Faulet9b79a102019-07-15 11:22:56 +02006776 htx_to_buf(htx, buf);
Olivier Houchardd846c262018-10-19 17:24:29 +02006777
Olivier Houchard7505f942018-08-21 18:10:44 +02006778 if (total > 0) {
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006779 if (!(h2s->h2c->wait_event.events & SUB_RETRY_SEND)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006780 TRACE_DEVEL("data queued, waking up h2c sender", H2_EV_H2S_SEND|H2_EV_H2C_SEND, h2s->h2c->conn, h2s);
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02006781 tasklet_wakeup(h2s->h2c->wait_event.tasklet);
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006782 }
Olivier Houchardd846c262018-10-19 17:24:29 +02006783
Olivier Houchard7505f942018-08-21 18:10:44 +02006784 }
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006785 /* If we're waiting for flow control, and we got a shutr on the
6786 * connection, we will never be unlocked, so add an error on
Willy Tarreau4596fe22022-05-17 19:07:51 +02006787 * the stream connector.
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006788 */
Christopher Fauletff7925d2022-10-11 19:12:40 +02006789 if ((h2s->h2c->flags & H2_CF_RCVD_SHUT) &&
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006790 !b_data(&h2s->h2c->dbuf) &&
6791 (h2s->flags & (H2_SF_BLK_SFCTL | H2_SF_BLK_MFCTL))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006792 TRACE_DEVEL("fctl with shutr, reporting error to app-layer", H2_EV_H2S_SEND|H2_EV_STRM_SEND|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau35c4dd02023-01-17 16:25:29 +01006793 se_fl_set_error(h2s->sd);
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006794 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006795
Willy Tarreau5723f292020-01-10 15:16:57 +01006796 if (total > 0 && !(h2s->flags & H2_SF_BLK_SFCTL) &&
6797 !(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006798 /* Ok we managed to send something, leave the send_list if we were still there */
Willy Tarreaude4a5382023-10-17 08:25:19 +02006799 h2_remove_from_list(h2s);
6800 TRACE_DEVEL("Removed from h2s list", H2_EV_H2S_SEND|H2_EV_H2C_SEND, h2s->h2c->conn, h2s);
Olivier Houchardd360ac62019-03-22 17:37:16 +01006801 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006802
Willy Tarreau7838a792019-08-12 18:42:03 +02006803 TRACE_LEAVE(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006804 return total;
Willy Tarreau62f52692017-10-08 23:01:42 +02006805}
6806
Willy Tarreau90bffa22022-09-01 19:06:44 +02006807/* appends some info about stream <h2s> to buffer <msg>, or does nothing if
Willy Tarreau7051f732022-09-02 15:22:12 +02006808 * <h2s> is NULL. Returns non-zero if the stream is considered suspicious. May
6809 * emit multiple lines, each new one being prefixed with <pfx>, if <pfx> is not
6810 * NULL, otherwise a single line is used.
Willy Tarreau90bffa22022-09-01 19:06:44 +02006811 */
Willy Tarreau7051f732022-09-02 15:22:12 +02006812static int h2_dump_h2s_info(struct buffer *msg, const struct h2s *h2s, const char *pfx)
Willy Tarreau90bffa22022-09-01 19:06:44 +02006813{
6814 int ret = 0;
6815
6816 if (!h2s)
6817 return ret;
6818
Willy Tarreau7051f732022-09-02 15:22:12 +02006819 chunk_appendf(msg, " h2s.id=%d .st=%s .flg=0x%04x .rxbuf=%u@%p+%u/%u",
Willy Tarreau90bffa22022-09-01 19:06:44 +02006820 h2s->id, h2s_st_to_str(h2s->st), h2s->flags,
6821 (unsigned int)b_data(&h2s->rxbuf), b_orig(&h2s->rxbuf),
Willy Tarreau7051f732022-09-02 15:22:12 +02006822 (unsigned int)b_head_ofs(&h2s->rxbuf), (unsigned int)b_size(&h2s->rxbuf));
6823
6824 if (pfx)
6825 chunk_appendf(msg, "\n%s", pfx);
6826
6827 chunk_appendf(msg, " .sc=%p", h2s_sc(h2s));
Willy Tarreau90bffa22022-09-01 19:06:44 +02006828 if (h2s_sc(h2s))
6829 chunk_appendf(msg, "(.flg=0x%08x .app=%p)",
6830 h2s_sc(h2s)->flags, h2s_sc(h2s)->app);
6831
Willy Tarreau7051f732022-09-02 15:22:12 +02006832 chunk_appendf(msg, " .sd=%p", h2s->sd);
Willy Tarreau90bffa22022-09-01 19:06:44 +02006833 chunk_appendf(msg, "(.flg=0x%08x)", se_fl_get(h2s->sd));
6834
Willy Tarreau7051f732022-09-02 15:22:12 +02006835 if (pfx)
6836 chunk_appendf(msg, "\n%s", pfx);
6837
Willy Tarreau90bffa22022-09-01 19:06:44 +02006838 chunk_appendf(msg, " .subs=%p", h2s->subs);
6839 if (h2s->subs) {
6840 chunk_appendf(msg, "(ev=%d tl=%p", h2s->subs->events, h2s->subs->tasklet);
6841 chunk_appendf(msg, " tl.calls=%d tl.ctx=%p tl.fct=",
6842 h2s->subs->tasklet->calls,
6843 h2s->subs->tasklet->context);
6844 if (h2s->subs->tasklet->calls >= 1000000)
6845 ret = 1;
6846 resolve_sym_name(msg, NULL, h2s->subs->tasklet->process);
6847 chunk_appendf(msg, ")");
6848 }
6849 return ret;
6850}
6851
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006852/* appends some info about connection <h2c> to buffer <msg>, or does nothing if
6853 * <h2c> is NULL. Returns non-zero if the connection is considered suspicious.
Willy Tarreau7051f732022-09-02 15:22:12 +02006854 * May emit multiple lines, each new one being prefixed with <pfx>, if <pfx> is
6855 * not NULL, otherwise a single line is used.
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006856 */
Willy Tarreau7051f732022-09-02 15:22:12 +02006857static int h2_dump_h2c_info(struct buffer *msg, struct h2c *h2c, const char *pfx)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006858{
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006859 const struct buffer *hmbuf, *tmbuf;
6860 const struct h2s *h2s = NULL;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006861 struct eb32_node *node;
6862 int fctl_cnt = 0;
6863 int send_cnt = 0;
6864 int tree_cnt = 0;
6865 int orph_cnt = 0;
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006866 int ret = 0;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006867
6868 if (!h2c)
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006869 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006870
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006871 list_for_each_entry(h2s, &h2c->fctl_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006872 fctl_cnt++;
6873
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006874 list_for_each_entry(h2s, &h2c->send_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006875 send_cnt++;
6876
6877 node = eb32_first(&h2c->streams_by_id);
6878 while (node) {
6879 h2s = container_of(node, struct h2s, by_id);
6880 tree_cnt++;
Willy Tarreau7be4ee02022-05-18 07:31:41 +02006881 if (!h2s_sc(h2s))
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006882 orph_cnt++;
6883 node = eb32_next(node);
6884 }
6885
Willy Tarreau60f62682019-05-26 11:32:27 +02006886 hmbuf = br_head(h2c->mbuf);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006887 tmbuf = br_tail(h2c->mbuf);
Willy Tarreauab2ec452019-08-30 07:07:08 +02006888 chunk_appendf(msg, " h2c.st0=%s .err=%d .maxid=%d .lastid=%d .flg=0x%04x"
Willy Tarreau7051f732022-09-02 15:22:12 +02006889 " .nbst=%u .nbsc=%u",
Willy Tarreauab2ec452019-08-30 07:07:08 +02006890 h2c_st_to_str(h2c->st0), h2c->errcode, h2c->max_id, h2c->last_sid, h2c->flags,
Willy Tarreau7051f732022-09-02 15:22:12 +02006891 h2c->nb_streams, h2c->nb_sc);
6892
6893 if (pfx)
6894 chunk_appendf(msg, "\n%s", pfx);
6895
6896 chunk_appendf(msg, " .fctl_cnt=%d .send_cnt=%d .tree_cnt=%d"
6897 " .orph_cnt=%d .sub=%d .dsi=%d .dbuf=%u@%p+%u/%u",
6898 fctl_cnt, send_cnt, tree_cnt, orph_cnt,
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006899 h2c->wait_event.events, h2c->dsi,
Willy Tarreau987c0632018-12-18 10:32:05 +01006900 (unsigned int)b_data(&h2c->dbuf), b_orig(&h2c->dbuf),
Willy Tarreau7051f732022-09-02 15:22:12 +02006901 (unsigned int)b_head_ofs(&h2c->dbuf), (unsigned int)b_size(&h2c->dbuf));
6902
6903 if (pfx)
6904 chunk_appendf(msg, "\n%s", pfx);
6905
Christopher Faulet68ee7842022-10-12 10:21:33 +02006906 chunk_appendf(msg, " .mbuf=[%u..%u|%u],h=[%u@%p+%u/%u],t=[%u@%p+%u/%u]",
Willy Tarreau60f62682019-05-26 11:32:27 +02006907 br_head_idx(h2c->mbuf), br_tail_idx(h2c->mbuf), br_size(h2c->mbuf),
6908 (unsigned int)b_data(hmbuf), b_orig(hmbuf),
6909 (unsigned int)b_head_ofs(hmbuf), (unsigned int)b_size(hmbuf),
Willy Tarreaubcc45952019-05-26 10:05:50 +02006910 (unsigned int)b_data(tmbuf), b_orig(tmbuf),
6911 (unsigned int)b_head_ofs(tmbuf), (unsigned int)b_size(tmbuf));
Willy Tarreau987c0632018-12-18 10:32:05 +01006912
Willy Tarreauf8c77092022-11-29 15:26:43 +01006913 chunk_appendf(msg, " .task=%p", h2c->task);
6914 if (h2c->task) {
6915 chunk_appendf(msg, " .exp=%s",
6916 h2c->task->expire ? tick_is_expired(h2c->task->expire, now_ms) ? "<PAST>" :
6917 human_time(TICKS_TO_MS(h2c->task->expire - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
6918 }
Willy Tarreau7051f732022-09-02 15:22:12 +02006919
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006920 return ret;
6921}
6922
6923/* for debugging with CLI's "show fd" command */
6924static int h2_show_fd(struct buffer *msg, struct connection *conn)
6925{
6926 struct h2c *h2c = conn->ctx;
6927 const struct h2s *h2s;
6928 struct eb32_node *node;
6929 int ret = 0;
6930
6931 if (!h2c)
6932 return ret;
6933
Willy Tarreau7051f732022-09-02 15:22:12 +02006934 ret |= h2_dump_h2c_info(msg, h2c, NULL);
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006935
6936 node = eb32_last(&h2c->streams_by_id);
6937 if (node) {
6938 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau90bffa22022-09-01 19:06:44 +02006939 chunk_appendf(msg, " last_h2s=%p", h2s);
Willy Tarreau7051f732022-09-02 15:22:12 +02006940 ret |= h2_dump_h2s_info(msg, h2s, NULL);
Willy Tarreau987c0632018-12-18 10:32:05 +01006941 }
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006942
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006943 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006944}
Willy Tarreau62f52692017-10-08 23:01:42 +02006945
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02006946/* for debugging with CLI's "show sess" command. May emit multiple lines, each
6947 * new one being prefixed with <pfx>, if <pfx> is not NULL, otherwise a single
6948 * line is used. Each field starts with a space so it's safe to print it after
6949 * existing fields.
6950 */
6951static int h2_show_sd(struct buffer *msg, struct sedesc *sd, const char *pfx)
6952{
6953 struct h2s *h2s = sd->se;
6954 int ret = 0;
6955
6956 if (!h2s)
6957 return ret;
6958
6959 chunk_appendf(msg, " h2s=%p", h2s);
Willy Tarreau7051f732022-09-02 15:22:12 +02006960 ret |= h2_dump_h2s_info(msg, h2s, pfx);
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02006961 if (pfx)
6962 chunk_appendf(msg, "\n%s", pfx);
6963 chunk_appendf(msg, " h2c=%p", h2s->h2c);
Willy Tarreau7051f732022-09-02 15:22:12 +02006964 ret |= h2_dump_h2c_info(msg, h2s->h2c, pfx);
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02006965 return ret;
6966}
6967
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006968/* Migrate the the connection to the current thread.
6969 * Return 0 if successful, non-zero otherwise.
6970 * Expected to be called with the old thread lock held.
6971 */
Olivier Houchard1662cdb2020-07-03 14:04:37 +02006972static int h2_takeover(struct connection *conn, int orig_tid)
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006973{
6974 struct h2c *h2c = conn->ctx;
Willy Tarreau617e80f2020-07-01 16:39:33 +02006975 struct task *task;
Willy Tarreau960f37c2023-11-17 10:56:33 +01006976 struct task *new_task;
6977 struct tasklet *new_tasklet;
6978
6979 /* Pre-allocate tasks so that we don't have to roll back after the xprt
6980 * has been migrated.
6981 */
6982 new_task = task_new_here();
6983 new_tasklet = tasklet_new();
6984 if (!new_task || !new_tasklet)
6985 goto fail;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006986
6987 if (fd_takeover(conn->handle.fd, conn) != 0)
Willy Tarreau960f37c2023-11-17 10:56:33 +01006988 goto fail;
Olivier Houcharda74bb7e2020-07-03 14:01:21 +02006989
6990 if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
6991 /* We failed to takeover the xprt, even if the connection may
6992 * still be valid, flag it as error'd, as we have already
6993 * taken over the fd, and wake the tasklet, so that it will
6994 * destroy it.
6995 */
6996 conn->flags |= CO_FL_ERROR;
6997 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
Willy Tarreau960f37c2023-11-17 10:56:33 +01006998 goto fail;
Olivier Houcharda74bb7e2020-07-03 14:01:21 +02006999 }
7000
Olivier Houchardcd4159f2020-03-10 18:39:42 +01007001 if (h2c->wait_event.events)
7002 h2c->conn->xprt->unsubscribe(h2c->conn, h2c->conn->xprt_ctx,
7003 h2c->wait_event.events, &h2c->wait_event);
Willy Tarreau617e80f2020-07-01 16:39:33 +02007004
7005 task = h2c->task;
7006 if (task) {
Willy Tarreau960f37c2023-11-17 10:56:33 +01007007 /* only assign a task if there was already one, otherwise
7008 * the preallocated new task will be released.
7009 */
Willy Tarreau617e80f2020-07-01 16:39:33 +02007010 task->context = NULL;
7011 h2c->task = NULL;
7012 __ha_barrier_store();
7013 task_kill(task);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01007014
Willy Tarreau960f37c2023-11-17 10:56:33 +01007015 h2c->task = new_task;
7016 new_task = NULL;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01007017 h2c->task->process = h2_timeout_task;
7018 h2c->task->context = h2c;
7019 }
Willy Tarreau960f37c2023-11-17 10:56:33 +01007020
7021 /* To let the tasklet know it should free itself, and do nothing else,
7022 * set its context to NULL.
7023 */
7024 h2c->wait_event.tasklet->context = NULL;
7025 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
7026
7027 h2c->wait_event.tasklet = new_tasklet;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01007028 h2c->wait_event.tasklet->process = h2_io_cb;
7029 h2c->wait_event.tasklet->context = h2c;
7030 h2c->conn->xprt->subscribe(h2c->conn, h2c->conn->xprt_ctx,
7031 SUB_RETRY_RECV, &h2c->wait_event);
7032
Willy Tarreau960f37c2023-11-17 10:56:33 +01007033 if (new_task)
7034 __task_free(new_task);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01007035 return 0;
Willy Tarreau960f37c2023-11-17 10:56:33 +01007036 fail:
7037 if (new_task)
7038 __task_free(new_task);
7039 tasklet_free(new_tasklet);
7040 return -1;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01007041}
7042
Willy Tarreau62f52692017-10-08 23:01:42 +02007043/*******************************************************/
7044/* functions below are dedicated to the config parsers */
7045/*******************************************************/
7046
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02007047/* config parser for global "tune.h2.header-table-size" */
7048static int h2_parse_header_table_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01007049 const struct proxy *defpx, const char *file, int line,
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02007050 char **err)
7051{
7052 if (too_many_args(1, args, err, NULL))
7053 return -1;
7054
7055 h2_settings_header_table_size = atoi(args[1]);
7056 if (h2_settings_header_table_size < 4096 || h2_settings_header_table_size > 65536) {
7057 memprintf(err, "'%s' expects a numeric value between 4096 and 65536.", args[0]);
7058 return -1;
7059 }
7060 return 0;
7061}
Willy Tarreau62f52692017-10-08 23:01:42 +02007062
Willy Tarreau9d7abda2023-04-17 15:04:34 +02007063/* config parser for global "tune.h2.{be.,fe.,}initial-window-size" */
Willy Tarreaue6baec02017-07-27 11:45:11 +02007064static int h2_parse_initial_window_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01007065 const struct proxy *defpx, const char *file, int line,
Willy Tarreaue6baec02017-07-27 11:45:11 +02007066 char **err)
7067{
Willy Tarreau9d7abda2023-04-17 15:04:34 +02007068 int *vptr;
7069
Willy Tarreaue6baec02017-07-27 11:45:11 +02007070 if (too_many_args(1, args, err, NULL))
7071 return -1;
7072
Willy Tarreau9d7abda2023-04-17 15:04:34 +02007073 /* backend/frontend/default */
7074 vptr = (args[0][8] == 'b') ? &h2_be_settings_initial_window_size :
7075 (args[0][8] == 'f') ? &h2_fe_settings_initial_window_size :
7076 &h2_settings_initial_window_size;
7077
7078 *vptr = atoi(args[1]);
7079 if (*vptr < 0) {
Willy Tarreaue6baec02017-07-27 11:45:11 +02007080 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
7081 return -1;
7082 }
7083 return 0;
7084}
7085
Willy Tarreauca1027c2023-04-18 15:57:03 +02007086/* config parser for global "tune.h2.{be.,fe.,}max-concurrent-streams" */
Willy Tarreau5242ef82017-07-27 11:47:28 +02007087static int h2_parse_max_concurrent_streams(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01007088 const struct proxy *defpx, const char *file, int line,
Willy Tarreau5242ef82017-07-27 11:47:28 +02007089 char **err)
7090{
Willy Tarreauca1027c2023-04-18 15:57:03 +02007091 uint *vptr;
7092
Willy Tarreau5242ef82017-07-27 11:47:28 +02007093 if (too_many_args(1, args, err, NULL))
7094 return -1;
7095
Willy Tarreauca1027c2023-04-18 15:57:03 +02007096 /* backend/frontend/default */
7097 vptr = (args[0][8] == 'b') ? &h2_be_settings_max_concurrent_streams :
7098 (args[0][8] == 'f') ? &h2_fe_settings_max_concurrent_streams :
7099 &h2_settings_max_concurrent_streams;
7100
7101 *vptr = atoi(args[1]);
7102 if ((int)*vptr < 0) {
Willy Tarreau5242ef82017-07-27 11:47:28 +02007103 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
7104 return -1;
7105 }
7106 return 0;
7107}
7108
Willy Tarreau4869ed52023-10-13 18:11:59 +02007109/* config parser for global "tune.h2.fe.max-total-streams" */
7110static int h2_parse_max_total_streams(char **args, int section_type, struct proxy *curpx,
7111 const struct proxy *defpx, const char *file, int line,
7112 char **err)
7113{
7114 uint *vptr;
7115
7116 if (too_many_args(1, args, err, NULL))
7117 return -1;
7118
7119 /* frontend only for now */
7120 vptr = &h2_fe_max_total_streams;
7121
7122 *vptr = atoi(args[1]);
7123 if ((int)*vptr < 0) {
7124 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
7125 return -1;
7126 }
7127 return 0;
7128}
7129
Willy Tarreaua24b35c2019-02-21 13:24:36 +01007130/* config parser for global "tune.h2.max-frame-size" */
7131static int h2_parse_max_frame_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01007132 const struct proxy *defpx, const char *file, int line,
Willy Tarreaua24b35c2019-02-21 13:24:36 +01007133 char **err)
7134{
7135 if (too_many_args(1, args, err, NULL))
7136 return -1;
7137
7138 h2_settings_max_frame_size = atoi(args[1]);
7139 if (h2_settings_max_frame_size < 16384 || h2_settings_max_frame_size > 16777215) {
7140 memprintf(err, "'%s' expects a numeric value between 16384 and 16777215.", args[0]);
7141 return -1;
7142 }
7143 return 0;
7144}
7145
Willy Tarreau62f52692017-10-08 23:01:42 +02007146
7147/****************************************/
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05007148/* MUX initialization and instantiation */
Willy Tarreau62f52692017-10-08 23:01:42 +02007149/***************************************/
7150
7151/* The mux operations */
Willy Tarreau680b2bd2018-11-27 07:30:17 +01007152static const struct mux_ops h2_ops = {
Willy Tarreau62f52692017-10-08 23:01:42 +02007153 .init = h2_init,
Olivier Houchard21df6cc2018-09-14 23:21:44 +02007154 .wake = h2_wake,
Willy Tarreau62f52692017-10-08 23:01:42 +02007155 .snd_buf = h2_snd_buf,
Olivier Houchard511efea2018-08-16 15:30:32 +02007156 .rcv_buf = h2_rcv_buf,
Olivier Houchard6ff20392018-07-17 18:46:31 +02007157 .subscribe = h2_subscribe,
Olivier Houchard83a0cd82018-09-28 17:57:58 +02007158 .unsubscribe = h2_unsubscribe,
Willy Tarreau62f52692017-10-08 23:01:42 +02007159 .attach = h2_attach,
Willy Tarreaud1373532022-05-27 11:00:59 +02007160 .get_first_sc = h2_get_first_sc,
Willy Tarreau62f52692017-10-08 23:01:42 +02007161 .detach = h2_detach,
Olivier Houchard060ed432018-11-06 16:32:42 +01007162 .destroy = h2_destroy,
Olivier Houchardd540b362018-11-05 18:37:53 +01007163 .avail_streams = h2_avail_streams,
Willy Tarreau00f18a32019-01-26 12:19:01 +01007164 .used_streams = h2_used_streams,
Willy Tarreau62f52692017-10-08 23:01:42 +02007165 .shutr = h2_shutr,
7166 .shutw = h2_shutw,
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02007167 .ctl = h2_ctl,
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02007168 .show_fd = h2_show_fd,
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02007169 .show_sd = h2_show_sd,
Olivier Houchardcd4159f2020-03-10 18:39:42 +01007170 .takeover = h2_takeover,
Christopher Fauleta97cced2022-04-12 18:04:10 +02007171 .flags = MX_FL_HTX|MX_FL_HOL_RISK|MX_FL_NO_UPG,
Willy Tarreau62f52692017-10-08 23:01:42 +02007172 .name = "H2",
7173};
7174
Christopher Faulet32f61c02018-04-10 14:33:41 +02007175static struct mux_proto_list mux_proto_h2 =
Christopher Fauletc985f6c2019-07-15 11:42:52 +02007176 { .token = IST("h2"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BOTH, .mux = &h2_ops };
Willy Tarreau62f52692017-10-08 23:01:42 +02007177
Willy Tarreau0108d902018-11-25 19:14:37 +01007178INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_h2);
7179
Willy Tarreau62f52692017-10-08 23:01:42 +02007180/* config keyword parsers */
7181static struct cfg_kw_list cfg_kws = {ILH, {
Willy Tarreau9d7abda2023-04-17 15:04:34 +02007182 { CFG_GLOBAL, "tune.h2.be.initial-window-size", h2_parse_initial_window_size },
Willy Tarreauca1027c2023-04-18 15:57:03 +02007183 { CFG_GLOBAL, "tune.h2.be.max-concurrent-streams", h2_parse_max_concurrent_streams },
Willy Tarreau9d7abda2023-04-17 15:04:34 +02007184 { CFG_GLOBAL, "tune.h2.fe.initial-window-size", h2_parse_initial_window_size },
Willy Tarreauca1027c2023-04-18 15:57:03 +02007185 { CFG_GLOBAL, "tune.h2.fe.max-concurrent-streams", h2_parse_max_concurrent_streams },
Willy Tarreau4869ed52023-10-13 18:11:59 +02007186 { CFG_GLOBAL, "tune.h2.fe.max-total-streams", h2_parse_max_total_streams },
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02007187 { CFG_GLOBAL, "tune.h2.header-table-size", h2_parse_header_table_size },
Willy Tarreaue6baec02017-07-27 11:45:11 +02007188 { CFG_GLOBAL, "tune.h2.initial-window-size", h2_parse_initial_window_size },
Willy Tarreau5242ef82017-07-27 11:47:28 +02007189 { CFG_GLOBAL, "tune.h2.max-concurrent-streams", h2_parse_max_concurrent_streams },
Willy Tarreaua24b35c2019-02-21 13:24:36 +01007190 { CFG_GLOBAL, "tune.h2.max-frame-size", h2_parse_max_frame_size },
Willy Tarreau62f52692017-10-08 23:01:42 +02007191 { 0, NULL, NULL }
7192}};
7193
Willy Tarreau0108d902018-11-25 19:14:37 +01007194INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
Willy Tarreau2bdcc702020-05-19 11:31:11 +02007195
7196/* initialize internal structs after the config is parsed.
7197 * Returns zero on success, non-zero on error.
7198 */
7199static int init_h2()
7200{
7201 pool_head_hpack_tbl = create_pool("hpack_tbl",
7202 h2_settings_header_table_size,
7203 MEM_F_SHARED|MEM_F_EXACT);
Christopher Faulet52140992020-11-06 15:23:39 +01007204 if (!pool_head_hpack_tbl) {
7205 ha_alert("failed to allocate hpack_tbl memory pool\n");
7206 return (ERR_ALERT | ERR_FATAL);
7207 }
7208 return ERR_NONE;
Willy Tarreau2bdcc702020-05-19 11:31:11 +02007209}
7210
7211REGISTER_POST_CHECK(init_h2);