blob: 869779968ecdadcc78ac5f297638eafb63558f52 [file] [log] [blame]
Willy Tarreau62f52692017-10-08 23:01:42 +02001/*
2 * HTTP/2 mux-demux for connections
3 *
4 * Copyright 2017 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreaudfd3de82020-06-04 23:46:14 +020013#include <import/eb32tree.h>
Willy Tarreau63617db2021-10-06 18:23:40 +020014#include <import/ebmbtree.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020015#include <haproxy/api.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020016#include <haproxy/cfgparse.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020017#include <haproxy/connection.h>
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +020018#include <haproxy/dynbuf.h>
Willy Tarreaubf073142020-06-03 12:04:01 +020019#include <haproxy/h2.h>
Willy Tarreaube327fa2020-06-03 09:09:57 +020020#include <haproxy/hpack-dec.h>
21#include <haproxy/hpack-enc.h>
22#include <haproxy/hpack-tbl.h>
Willy Tarreau87735332020-06-04 09:08:41 +020023#include <haproxy/http_htx.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020024#include <haproxy/htx.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020025#include <haproxy/istbuf.h>
Willy Tarreau36979d92020-06-05 17:27:29 +020026#include <haproxy/log.h>
Willy Tarreau6c0fadf2022-09-12 19:07:51 +020027#include <haproxy/mux_h2-t.h>
Willy Tarreau6131d6a2020-06-02 16:48:09 +020028#include <haproxy/net_helper.h>
Frédéric Lécaille9969adb2023-01-18 11:52:21 +010029#include <haproxy/proxy.h>
Willy Tarreau48d25b32020-06-04 18:58:52 +020030#include <haproxy/session-t.h>
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +010031#include <haproxy/stats.h>
Willy Tarreaucb086c62022-05-27 09:47:12 +020032#include <haproxy/stconn.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020033#include <haproxy/stream.h>
Willy Tarreauc6d61d72020-06-04 19:02:42 +020034#include <haproxy/trace.h>
Willy Tarreau62f52692017-10-08 23:01:42 +020035
36
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010037/* dummy streams returned for closed, error, refused, idle and states */
Willy Tarreau2a856182017-05-16 15:20:39 +020038static const struct h2s *h2_closed_stream;
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010039static const struct h2s *h2_error_stream;
Willy Tarreau8d0d58b2018-12-23 18:29:12 +010040static const struct h2s *h2_refused_stream;
Willy Tarreau2a856182017-05-16 15:20:39 +020041static const struct h2s *h2_idle_stream;
42
Willy Tarreau5ab6b572017-09-22 08:05:00 +020043
Willy Tarreau6c0fadf2022-09-12 19:07:51 +020044/**** H2 connection descriptor ****/
Willy Tarreau5ab6b572017-09-22 08:05:00 +020045struct h2c {
46 struct connection *conn;
47
48 enum h2_cs st0; /* mux state */
49 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
50
51 /* 16 bit hole here */
52 uint32_t flags; /* connection flags: H2_CF_* */
Willy Tarreau2e2083a2019-01-31 10:34:07 +010053 uint32_t streams_limit; /* maximum number of concurrent streams the peer supports */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020054 int32_t max_id; /* highest ID known on this connection, <0 before preface */
55 uint32_t rcvd_c; /* newly received data to ACK for the connection */
Willy Tarreau617592c2022-06-08 16:32:22 +020056 uint32_t rcvd_s; /* newly received data to ACK for the current stream (dsi) or zero */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020057
58 /* states for the demux direction */
59 struct hpack_dht *ddht; /* demux dynamic header table */
Willy Tarreauc9fa0482018-07-10 17:43:27 +020060 struct buffer dbuf; /* demux buffer */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020061
62 int32_t dsi; /* demux stream ID (<0 = idle) */
63 int32_t dfl; /* demux frame length (if dsi >= 0) */
64 int8_t dft; /* demux frame type (if dsi >= 0) */
65 int8_t dff; /* demux frame flags (if dsi >= 0) */
Willy Tarreau05e5daf2017-12-11 15:17:36 +010066 uint8_t dpl; /* demux pad length (part of dfl), init to 0 */
67 /* 8 bit hole here */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020068 int32_t last_sid; /* last processed stream ID for GOAWAY, <0 before preface */
69
70 /* states for the mux direction */
Willy Tarreau51330962019-05-26 09:38:07 +020071 struct buffer mbuf[H2C_MBUF_CNT]; /* mux buffers (ring) */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020072 int32_t miw; /* mux initial window size for all new streams */
73 int32_t mws; /* mux window size. Can be negative. */
74 int32_t mfs; /* mux's max frame size */
75
Willy Tarreauea392822017-10-31 10:02:25 +010076 int timeout; /* idle timeout duration in ticks */
Willy Tarreau599391a2017-11-24 10:16:00 +010077 int shut_timeout; /* idle timeout duration in ticks after GOAWAY was sent */
Willy Tarreau15a47332022-03-18 15:57:34 +010078 int idle_start; /* date of the last time the connection went idle */
79 /* 32-bit hole here */
Willy Tarreau49745612017-12-03 18:56:02 +010080 unsigned int nb_streams; /* number of streams in the tree */
Willy Tarreau36c22322022-05-27 10:41:24 +020081 unsigned int nb_sc; /* number of attached stream connectors */
Willy Tarreaud64a3eb2019-01-23 10:22:21 +010082 unsigned int nb_reserved; /* number of reserved streams */
Willy Tarreaue9634bd2019-01-23 10:25:10 +010083 unsigned int stream_cnt; /* total number of streams seen */
Willy Tarreau0b37d652018-10-03 10:33:02 +020084 struct proxy *proxy; /* the proxy this connection was created for */
Willy Tarreauea392822017-10-31 10:02:25 +010085 struct task *task; /* timeout management task */
Amaury Denoyellec92697d2020-10-27 17:16:01 +010086 struct h2_counters *px_counters; /* h2 counters attached to proxy */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020087 struct eb_root streams_by_id; /* all active streams by their ID */
88 struct list send_list; /* list of blocked streams requesting to send */
89 struct list fctl_list; /* list of streams blocked by connection's fctl */
Willy Tarreau9edf6db2019-10-02 10:49:59 +020090 struct list blocked_list; /* list of streams blocked for other reasons (e.g. sfctl, dep) */
Willy Tarreau44e973f2018-03-01 17:49:30 +010091 struct buffer_wait buf_wait; /* wait list for buffer allocations */
Olivier Houchardfa8aa862018-10-10 18:25:41 +020092 struct wait_event wait_event; /* To be used if we're waiting for I/Os */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020093};
94
Willy Tarreau2c249eb2019-05-13 18:06:17 +020095
Willy Tarreau18312642017-10-11 07:57:07 +020096/* H2 stream descriptor, describing the stream as it appears in the H2C, and as
Christopher Fauletfafd1b02020-11-03 18:25:52 +010097 * it is being processed in the internal HTTP representation (HTX).
Willy Tarreau18312642017-10-11 07:57:07 +020098 */
99struct h2s {
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200100 struct sedesc *sd;
Olivier Houchardf502aca2018-12-14 19:42:40 +0100101 struct session *sess;
Willy Tarreau18312642017-10-11 07:57:07 +0200102 struct h2c *h2c;
Willy Tarreau18312642017-10-11 07:57:07 +0200103 struct eb32_node by_id; /* place in h2c's streams_by_id */
Willy Tarreau18312642017-10-11 07:57:07 +0200104 int32_t id; /* stream ID */
105 uint32_t flags; /* H2_SF_* */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +0200106 int sws; /* stream window size, to be added to the mux's initial window size */
Willy Tarreau18312642017-10-11 07:57:07 +0200107 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
108 enum h2_ss st;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +0200109 uint16_t status; /* HTTP response status */
Willy Tarreau1915ca22019-01-24 11:49:37 +0100110 unsigned long long body_len; /* remaining body length according to content-length if H2_SF_DATA_CLEN */
Olivier Houchard638b7992018-08-16 15:41:52 +0200111 struct buffer rxbuf; /* receive buffer, always valid (buf_empty or real buffer) */
Willy Tarreau4596fe22022-05-17 19:07:51 +0200112 struct wait_event *subs; /* recv wait_event the stream connector associated is waiting on (via h2_subscribe) */
Olivier Houchardfa8aa862018-10-10 18:25:41 +0200113 struct list list; /* To be used when adding in h2c->send_list or h2c->fctl_lsit */
Willy Tarreau5723f292020-01-10 15:16:57 +0100114 struct tasklet *shut_tl; /* deferred shutdown tasklet, to retry to send an RST after we failed to,
115 * in case there's no other subscription to do it */
Amaury Denoyelle74162742020-12-11 17:53:05 +0100116
117 char upgrade_protocol[16]; /* rfc 8441: requested protocol on Extended CONNECT */
Willy Tarreau18312642017-10-11 07:57:07 +0200118};
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200119
Willy Tarreauc6405142017-09-21 20:23:50 +0200120/* descriptor for an h2 frame header */
121struct h2_fh {
122 uint32_t len; /* length, host order, 24 bits */
123 uint32_t sid; /* stream id, host order, 31 bits */
124 uint8_t ft; /* frame type */
125 uint8_t ff; /* frame flags */
126};
127
Willy Tarreau12ae2122019-08-08 18:23:12 +0200128/* trace source and events */
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200129static void h2_trace(enum trace_level level, uint64_t mask, \
130 const struct trace_source *src,
131 const struct ist where, const struct ist func,
132 const void *a1, const void *a2, const void *a3, const void *a4);
Willy Tarreau12ae2122019-08-08 18:23:12 +0200133
134/* The event representation is split like this :
135 * strm - application layer
136 * h2s - internal H2 stream
137 * h2c - internal H2 connection
138 * conn - external connection
139 *
140 */
141static const struct trace_event h2_trace_events[] = {
142#define H2_EV_H2C_NEW (1ULL << 0)
Willy Tarreau87951942019-08-30 07:34:36 +0200143 { .mask = H2_EV_H2C_NEW, .name = "h2c_new", .desc = "new H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200144#define H2_EV_H2C_RECV (1ULL << 1)
Willy Tarreau87951942019-08-30 07:34:36 +0200145 { .mask = H2_EV_H2C_RECV, .name = "h2c_recv", .desc = "Rx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200146#define H2_EV_H2C_SEND (1ULL << 2)
Willy Tarreau87951942019-08-30 07:34:36 +0200147 { .mask = H2_EV_H2C_SEND, .name = "h2c_send", .desc = "Tx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200148#define H2_EV_H2C_FCTL (1ULL << 3)
Willy Tarreau87951942019-08-30 07:34:36 +0200149 { .mask = H2_EV_H2C_FCTL, .name = "h2c_fctl", .desc = "H2 connection flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200150#define H2_EV_H2C_BLK (1ULL << 4)
Willy Tarreau87951942019-08-30 07:34:36 +0200151 { .mask = H2_EV_H2C_BLK, .name = "h2c_blk", .desc = "H2 connection blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200152#define H2_EV_H2C_WAKE (1ULL << 5)
Willy Tarreau87951942019-08-30 07:34:36 +0200153 { .mask = H2_EV_H2C_WAKE, .name = "h2c_wake", .desc = "H2 connection woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200154#define H2_EV_H2C_END (1ULL << 6)
Willy Tarreau87951942019-08-30 07:34:36 +0200155 { .mask = H2_EV_H2C_END, .name = "h2c_end", .desc = "H2 connection terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200156#define H2_EV_H2C_ERR (1ULL << 7)
Willy Tarreau87951942019-08-30 07:34:36 +0200157 { .mask = H2_EV_H2C_ERR, .name = "h2c_err", .desc = "error on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200158#define H2_EV_RX_FHDR (1ULL << 8)
Willy Tarreau87951942019-08-30 07:34:36 +0200159 { .mask = H2_EV_RX_FHDR, .name = "rx_fhdr", .desc = "H2 frame header received" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200160#define H2_EV_RX_FRAME (1ULL << 9)
Willy Tarreau87951942019-08-30 07:34:36 +0200161 { .mask = H2_EV_RX_FRAME, .name = "rx_frame", .desc = "receipt of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200162#define H2_EV_RX_EOI (1ULL << 10)
Willy Tarreau87951942019-08-30 07:34:36 +0200163 { .mask = H2_EV_RX_EOI, .name = "rx_eoi", .desc = "receipt of end of H2 input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200164#define H2_EV_RX_PREFACE (1ULL << 11)
Willy Tarreau87951942019-08-30 07:34:36 +0200165 { .mask = H2_EV_RX_PREFACE, .name = "rx_preface", .desc = "receipt of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200166#define H2_EV_RX_DATA (1ULL << 12)
Willy Tarreau87951942019-08-30 07:34:36 +0200167 { .mask = H2_EV_RX_DATA, .name = "rx_data", .desc = "receipt of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200168#define H2_EV_RX_HDR (1ULL << 13)
Willy Tarreau87951942019-08-30 07:34:36 +0200169 { .mask = H2_EV_RX_HDR, .name = "rx_hdr", .desc = "receipt of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200170#define H2_EV_RX_PRIO (1ULL << 14)
Willy Tarreau87951942019-08-30 07:34:36 +0200171 { .mask = H2_EV_RX_PRIO, .name = "rx_prio", .desc = "receipt of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200172#define H2_EV_RX_RST (1ULL << 15)
Willy Tarreau87951942019-08-30 07:34:36 +0200173 { .mask = H2_EV_RX_RST, .name = "rx_rst", .desc = "receipt of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200174#define H2_EV_RX_SETTINGS (1ULL << 16)
Willy Tarreau87951942019-08-30 07:34:36 +0200175 { .mask = H2_EV_RX_SETTINGS, .name = "rx_settings", .desc = "receipt of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200176#define H2_EV_RX_PUSH (1ULL << 17)
Willy Tarreau87951942019-08-30 07:34:36 +0200177 { .mask = H2_EV_RX_PUSH, .name = "rx_push", .desc = "receipt of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200178#define H2_EV_RX_PING (1ULL << 18)
Willy Tarreau87951942019-08-30 07:34:36 +0200179 { .mask = H2_EV_RX_PING, .name = "rx_ping", .desc = "receipt of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200180#define H2_EV_RX_GOAWAY (1ULL << 19)
Willy Tarreau87951942019-08-30 07:34:36 +0200181 { .mask = H2_EV_RX_GOAWAY, .name = "rx_goaway", .desc = "receipt of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200182#define H2_EV_RX_WU (1ULL << 20)
Willy Tarreau87951942019-08-30 07:34:36 +0200183 { .mask = H2_EV_RX_WU, .name = "rx_wu", .desc = "receipt of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200184#define H2_EV_RX_CONT (1ULL << 21)
Willy Tarreau87951942019-08-30 07:34:36 +0200185 { .mask = H2_EV_RX_CONT, .name = "rx_cont", .desc = "receipt of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200186#define H2_EV_TX_FRAME (1ULL << 22)
Willy Tarreau87951942019-08-30 07:34:36 +0200187 { .mask = H2_EV_TX_FRAME, .name = "tx_frame", .desc = "transmission of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200188#define H2_EV_TX_EOI (1ULL << 23)
Willy Tarreau87951942019-08-30 07:34:36 +0200189 { .mask = H2_EV_TX_EOI, .name = "tx_eoi", .desc = "transmission of H2 end of input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200190#define H2_EV_TX_PREFACE (1ULL << 24)
Willy Tarreau87951942019-08-30 07:34:36 +0200191 { .mask = H2_EV_TX_PREFACE, .name = "tx_preface", .desc = "transmission of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200192#define H2_EV_TX_DATA (1ULL << 25)
Willy Tarreau87951942019-08-30 07:34:36 +0200193 { .mask = H2_EV_TX_DATA, .name = "tx_data", .desc = "transmission of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200194#define H2_EV_TX_HDR (1ULL << 26)
Willy Tarreau87951942019-08-30 07:34:36 +0200195 { .mask = H2_EV_TX_HDR, .name = "tx_hdr", .desc = "transmission of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200196#define H2_EV_TX_PRIO (1ULL << 27)
Willy Tarreau87951942019-08-30 07:34:36 +0200197 { .mask = H2_EV_TX_PRIO, .name = "tx_prio", .desc = "transmission of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200198#define H2_EV_TX_RST (1ULL << 28)
Willy Tarreau87951942019-08-30 07:34:36 +0200199 { .mask = H2_EV_TX_RST, .name = "tx_rst", .desc = "transmission of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200200#define H2_EV_TX_SETTINGS (1ULL << 29)
Willy Tarreau87951942019-08-30 07:34:36 +0200201 { .mask = H2_EV_TX_SETTINGS, .name = "tx_settings", .desc = "transmission of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200202#define H2_EV_TX_PUSH (1ULL << 30)
Willy Tarreau87951942019-08-30 07:34:36 +0200203 { .mask = H2_EV_TX_PUSH, .name = "tx_push", .desc = "transmission of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200204#define H2_EV_TX_PING (1ULL << 31)
Willy Tarreau87951942019-08-30 07:34:36 +0200205 { .mask = H2_EV_TX_PING, .name = "tx_ping", .desc = "transmission of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200206#define H2_EV_TX_GOAWAY (1ULL << 32)
Willy Tarreau87951942019-08-30 07:34:36 +0200207 { .mask = H2_EV_TX_GOAWAY, .name = "tx_goaway", .desc = "transmission of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200208#define H2_EV_TX_WU (1ULL << 33)
Willy Tarreau87951942019-08-30 07:34:36 +0200209 { .mask = H2_EV_TX_WU, .name = "tx_wu", .desc = "transmission of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200210#define H2_EV_TX_CONT (1ULL << 34)
Willy Tarreau87951942019-08-30 07:34:36 +0200211 { .mask = H2_EV_TX_CONT, .name = "tx_cont", .desc = "transmission of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200212#define H2_EV_H2S_NEW (1ULL << 35)
Willy Tarreau87951942019-08-30 07:34:36 +0200213 { .mask = H2_EV_H2S_NEW, .name = "h2s_new", .desc = "new H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200214#define H2_EV_H2S_RECV (1ULL << 36)
Willy Tarreau87951942019-08-30 07:34:36 +0200215 { .mask = H2_EV_H2S_RECV, .name = "h2s_recv", .desc = "Rx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200216#define H2_EV_H2S_SEND (1ULL << 37)
Willy Tarreau87951942019-08-30 07:34:36 +0200217 { .mask = H2_EV_H2S_SEND, .name = "h2s_send", .desc = "Tx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200218#define H2_EV_H2S_FCTL (1ULL << 38)
Willy Tarreau87951942019-08-30 07:34:36 +0200219 { .mask = H2_EV_H2S_FCTL, .name = "h2s_fctl", .desc = "H2 stream flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200220#define H2_EV_H2S_BLK (1ULL << 39)
Willy Tarreau87951942019-08-30 07:34:36 +0200221 { .mask = H2_EV_H2S_BLK, .name = "h2s_blk", .desc = "H2 stream blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200222#define H2_EV_H2S_WAKE (1ULL << 40)
Willy Tarreau87951942019-08-30 07:34:36 +0200223 { .mask = H2_EV_H2S_WAKE, .name = "h2s_wake", .desc = "H2 stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200224#define H2_EV_H2S_END (1ULL << 41)
Willy Tarreau87951942019-08-30 07:34:36 +0200225 { .mask = H2_EV_H2S_END, .name = "h2s_end", .desc = "H2 stream terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200226#define H2_EV_H2S_ERR (1ULL << 42)
Willy Tarreau87951942019-08-30 07:34:36 +0200227 { .mask = H2_EV_H2S_ERR, .name = "h2s_err", .desc = "error on H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200228#define H2_EV_STRM_NEW (1ULL << 43)
Willy Tarreau87951942019-08-30 07:34:36 +0200229 { .mask = H2_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200230#define H2_EV_STRM_RECV (1ULL << 44)
Willy Tarreau87951942019-08-30 07:34:36 +0200231 { .mask = H2_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200232#define H2_EV_STRM_SEND (1ULL << 45)
Willy Tarreau87951942019-08-30 07:34:36 +0200233 { .mask = H2_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200234#define H2_EV_STRM_FULL (1ULL << 46)
Willy Tarreau87951942019-08-30 07:34:36 +0200235 { .mask = H2_EV_STRM_FULL, .name = "strm_full", .desc = "stream buffer full" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200236#define H2_EV_STRM_WAKE (1ULL << 47)
Willy Tarreau87951942019-08-30 07:34:36 +0200237 { .mask = H2_EV_STRM_WAKE, .name = "strm_wake", .desc = "stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200238#define H2_EV_STRM_SHUT (1ULL << 48)
Willy Tarreau87951942019-08-30 07:34:36 +0200239 { .mask = H2_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200240#define H2_EV_STRM_END (1ULL << 49)
Willy Tarreau87951942019-08-30 07:34:36 +0200241 { .mask = H2_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200242#define H2_EV_STRM_ERR (1ULL << 50)
Willy Tarreau87951942019-08-30 07:34:36 +0200243 { .mask = H2_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200244#define H2_EV_PROTO_ERR (1ULL << 51)
Willy Tarreau87951942019-08-30 07:34:36 +0200245 { .mask = H2_EV_PROTO_ERR, .name = "proto_err", .desc = "protocol error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200246 { }
247};
248
249static const struct name_desc h2_trace_lockon_args[4] = {
250 /* arg1 */ { /* already used by the connection */ },
251 /* arg2 */ { .name="h2s", .desc="H2 stream" },
252 /* arg3 */ { },
253 /* arg4 */ { }
254};
255
256static const struct name_desc h2_trace_decoding[] = {
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200257#define H2_VERB_CLEAN 1
258 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
259#define H2_VERB_MINIMAL 2
Willy Tarreau12ae2122019-08-08 18:23:12 +0200260 { .name="minimal", .desc="report only h2c/h2s state and flags, no real decoding" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200261#define H2_VERB_SIMPLE 3
Willy Tarreau12ae2122019-08-08 18:23:12 +0200262 { .name="simple", .desc="add request/response status line or frame info when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200263#define H2_VERB_ADVANCED 4
Willy Tarreau12ae2122019-08-08 18:23:12 +0200264 { .name="advanced", .desc="add header fields or frame decoding when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200265#define H2_VERB_COMPLETE 5
Willy Tarreau12ae2122019-08-08 18:23:12 +0200266 { .name="complete", .desc="add full data dump when available" },
267 { /* end */ }
268};
269
Willy Tarreau6eb3d372021-04-10 19:29:26 +0200270static struct trace_source trace_h2 __read_mostly = {
Willy Tarreau12ae2122019-08-08 18:23:12 +0200271 .name = IST("h2"),
272 .desc = "HTTP/2 multiplexer",
273 .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200274 .default_cb = h2_trace,
Willy Tarreau12ae2122019-08-08 18:23:12 +0200275 .known_events = h2_trace_events,
276 .lockon_args = h2_trace_lockon_args,
277 .decoding = h2_trace_decoding,
278 .report_events = ~0, // report everything by default
279};
280
281#define TRACE_SOURCE &trace_h2
282INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
283
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100284/* h2 stats module */
285enum {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100286 H2_ST_HEADERS_RCVD,
287 H2_ST_DATA_RCVD,
288 H2_ST_SETTINGS_RCVD,
289 H2_ST_RST_STREAM_RCVD,
290 H2_ST_GOAWAY_RCVD,
291
Amaury Denoyellea8879232020-10-27 17:16:03 +0100292 H2_ST_CONN_PROTO_ERR,
293 H2_ST_STRM_PROTO_ERR,
294 H2_ST_RST_STREAM_RESP,
295 H2_ST_GOAWAY_RESP,
296
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100297 H2_ST_OPEN_CONN,
298 H2_ST_OPEN_STREAM,
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100299 H2_ST_TOTAL_CONN,
300 H2_ST_TOTAL_STREAM,
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100301
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100302 H2_STATS_COUNT /* must be the last member of the enum */
303};
304
305static struct name_desc h2_stats[] = {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100306 [H2_ST_HEADERS_RCVD] = { .name = "h2_headers_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100307 .desc = "Total number of received HEADERS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100308 [H2_ST_DATA_RCVD] = { .name = "h2_data_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100309 .desc = "Total number of received DATA frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100310 [H2_ST_SETTINGS_RCVD] = { .name = "h2_settings_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100311 .desc = "Total number of received SETTINGS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100312 [H2_ST_RST_STREAM_RCVD] = { .name = "h2_rst_stream_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100313 .desc = "Total number of received RST_STREAM frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100314 [H2_ST_GOAWAY_RCVD] = { .name = "h2_goaway_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100315 .desc = "Total number of received GOAWAY frames" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100316
317 [H2_ST_CONN_PROTO_ERR] = { .name = "h2_detected_conn_protocol_errors",
318 .desc = "Total number of connection protocol errors" },
319 [H2_ST_STRM_PROTO_ERR] = { .name = "h2_detected_strm_protocol_errors",
320 .desc = "Total number of stream protocol errors" },
321 [H2_ST_RST_STREAM_RESP] = { .name = "h2_rst_stream_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100322 .desc = "Total number of RST_STREAM sent on detected error" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100323 [H2_ST_GOAWAY_RESP] = { .name = "h2_goaway_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100324 .desc = "Total number of GOAWAY sent on detected error" },
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100325
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100326 [H2_ST_OPEN_CONN] = { .name = "h2_open_connections",
327 .desc = "Count of currently open connections" },
328 [H2_ST_OPEN_STREAM] = { .name = "h2_backend_open_streams",
329 .desc = "Count of currently open streams" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100330 [H2_ST_TOTAL_CONN] = { .name = "h2_total_connections",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100331 .desc = "Total number of connections" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100332 [H2_ST_TOTAL_STREAM] = { .name = "h2_backend_total_streams",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100333 .desc = "Total number of streams" },
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100334};
335
336static struct h2_counters {
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100337 long long headers_rcvd; /* total number of HEADERS frame received */
338 long long data_rcvd; /* total number of DATA frame received */
339 long long settings_rcvd; /* total number of SETTINGS frame received */
340 long long rst_stream_rcvd; /* total number of RST_STREAM frame received */
341 long long goaway_rcvd; /* total number of GOAWAY frame received */
Amaury Denoyellea8879232020-10-27 17:16:03 +0100342
343 long long conn_proto_err; /* total number of protocol errors detected */
344 long long strm_proto_err; /* total number of protocol errors detected */
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100345 long long rst_stream_resp; /* total number of RST_STREAM frame sent on error */
346 long long goaway_resp; /* total number of GOAWAY frame sent on error */
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100347
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100348 long long open_conns; /* count of currently open connections */
349 long long open_streams; /* count of currently open streams */
350 long long total_conns; /* total number of connections */
351 long long total_streams; /* total number of streams */
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100352} h2_counters;
353
354static void h2_fill_stats(void *data, struct field *stats)
355{
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100356 struct h2_counters *counters = data;
357
358 stats[H2_ST_HEADERS_RCVD] = mkf_u64(FN_COUNTER, counters->headers_rcvd);
359 stats[H2_ST_DATA_RCVD] = mkf_u64(FN_COUNTER, counters->data_rcvd);
360 stats[H2_ST_SETTINGS_RCVD] = mkf_u64(FN_COUNTER, counters->settings_rcvd);
361 stats[H2_ST_RST_STREAM_RCVD] = mkf_u64(FN_COUNTER, counters->rst_stream_rcvd);
362 stats[H2_ST_GOAWAY_RCVD] = mkf_u64(FN_COUNTER, counters->goaway_rcvd);
Amaury Denoyellea8879232020-10-27 17:16:03 +0100363
364 stats[H2_ST_CONN_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->conn_proto_err);
365 stats[H2_ST_STRM_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->strm_proto_err);
366 stats[H2_ST_RST_STREAM_RESP] = mkf_u64(FN_COUNTER, counters->rst_stream_resp);
367 stats[H2_ST_GOAWAY_RESP] = mkf_u64(FN_COUNTER, counters->goaway_resp);
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100368
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100369 stats[H2_ST_OPEN_CONN] = mkf_u64(FN_GAUGE, counters->open_conns);
370 stats[H2_ST_OPEN_STREAM] = mkf_u64(FN_GAUGE, counters->open_streams);
371 stats[H2_ST_TOTAL_CONN] = mkf_u64(FN_COUNTER, counters->total_conns);
372 stats[H2_ST_TOTAL_STREAM] = mkf_u64(FN_COUNTER, counters->total_streams);
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100373}
374
375static struct stats_module h2_stats_module = {
376 .name = "h2",
377 .fill_stats = h2_fill_stats,
378 .stats = h2_stats,
379 .stats_count = H2_STATS_COUNT,
380 .counters = &h2_counters,
381 .counters_size = sizeof(h2_counters),
382 .domain_flags = MK_STATS_PROXY_DOMAIN(STATS_PX_CAP_FE|STATS_PX_CAP_BE),
383 .clearable = 1,
384};
385
386INITCALL1(STG_REGISTER, stats_register_module, &h2_stats_module);
387
Willy Tarreau8ceae722018-11-26 11:58:30 +0100388/* the h2c connection pool */
389DECLARE_STATIC_POOL(pool_head_h2c, "h2c", sizeof(struct h2c));
390
391/* the h2s stream pool */
392DECLARE_STATIC_POOL(pool_head_h2s, "h2s", sizeof(struct h2s));
393
Willy Tarreaudc572362018-12-12 08:08:05 +0100394/* The default connection window size is 65535, it may only be enlarged using
395 * a WINDOW_UPDATE message. Since the window must never be larger than 2G-1,
396 * we'll pretend we already received the difference between the two to send
397 * an equivalent window update to enlarge it to 2G-1.
398 */
399#define H2_INITIAL_WINDOW_INCREMENT ((1U<<31)-1 - 65535)
400
Willy Tarreau455d5682019-05-24 19:42:18 +0200401/* maximum amount of data we're OK with re-aligning for buffer optimizations */
402#define MAX_DATA_REALIGN 1024
403
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200404/* a few settings from the global section */
405static int h2_settings_header_table_size = 4096; /* initial value */
Glenn Strauss0012f892022-06-04 22:11:50 -0400406static int h2_settings_initial_window_size = 65536; /* initial value */
Willy Tarreau5a490b62019-01-31 10:39:51 +0100407static unsigned int h2_settings_max_concurrent_streams = 100;
Willy Tarreaua24b35c2019-02-21 13:24:36 +0100408static int h2_settings_max_frame_size = 0; /* unset */
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200409
Willy Tarreaub22b5f02022-05-10 14:57:16 +0200410/* a dummy closed endpoint */
Willy Tarreauea59b022022-05-17 17:53:22 +0200411static const struct sedesc closed_ep = {
Willy Tarreauc1054922022-05-18 07:43:52 +0200412 .sc = NULL,
Willy Tarreaub605c422022-05-17 17:04:55 +0200413 .flags = SE_FL_DETACHED,
Willy Tarreaub22b5f02022-05-10 14:57:16 +0200414};
415
Willy Tarreau2a856182017-05-16 15:20:39 +0200416/* a dmumy closed stream */
417static const struct h2s *h2_closed_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200418 .sd = (struct sedesc *)&closed_ep,
Willy Tarreau2a856182017-05-16 15:20:39 +0200419 .h2c = NULL,
420 .st = H2_SS_CLOSED,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100421 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreauab837502017-12-27 15:07:30 +0100422 .flags = H2_SF_RST_RCVD,
Willy Tarreau2a856182017-05-16 15:20:39 +0200423 .id = 0,
424};
425
Willy Tarreauecb9dcd2019-01-03 12:00:17 +0100426/* a dmumy closed stream returning a PROTOCOL_ERROR error */
427static const struct h2s *h2_error_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200428 .sd = (struct sedesc *)&closed_ep,
Willy Tarreauecb9dcd2019-01-03 12:00:17 +0100429 .h2c = NULL,
430 .st = H2_SS_CLOSED,
431 .errcode = H2_ERR_PROTOCOL_ERROR,
432 .flags = 0,
433 .id = 0,
434};
435
Willy Tarreau8d0d58b2018-12-23 18:29:12 +0100436/* a dmumy closed stream returning a REFUSED_STREAM error */
437static const struct h2s *h2_refused_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200438 .sd = (struct sedesc *)&closed_ep,
Willy Tarreau8d0d58b2018-12-23 18:29:12 +0100439 .h2c = NULL,
440 .st = H2_SS_CLOSED,
441 .errcode = H2_ERR_REFUSED_STREAM,
442 .flags = 0,
443 .id = 0,
444};
445
Willy Tarreau2a856182017-05-16 15:20:39 +0200446/* and a dummy idle stream for use with any unannounced stream */
447static const struct h2s *h2_idle_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200448 .sd = (struct sedesc *)&closed_ep,
Willy Tarreau2a856182017-05-16 15:20:39 +0200449 .h2c = NULL,
450 .st = H2_SS_IDLE,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100451 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreau2a856182017-05-16 15:20:39 +0200452 .id = 0,
453};
454
Willy Tarreau144f84a2021-03-02 16:09:26 +0100455struct task *h2_timeout_task(struct task *t, void *context, unsigned int state);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +0200456static int h2_send(struct h2c *h2c);
457static int h2_recv(struct h2c *h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +0200458static int h2_process(struct h2c *h2c);
Willy Tarreau691d5032021-01-20 14:55:01 +0100459/* h2_io_cb is exported to see it resolved in "show fd" */
Willy Tarreau144f84a2021-03-02 16:09:26 +0100460struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state);
Willy Tarreau0b559072018-02-26 15:22:17 +0100461static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id);
Willy Tarreau7cfbb812023-01-26 16:02:01 +0100462static int h2c_dec_hdrs(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol);
Willy Tarreaua56a6de2018-02-26 15:59:07 +0100463static int h2_frt_transfer_data(struct h2s *h2s);
Willy Tarreau144f84a2021-03-02 16:09:26 +0100464struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state);
Willy Tarreau36c22322022-05-27 10:41:24 +0200465static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct stconn *sc, struct session *sess);
Willy Tarreau8b2757c2018-12-19 17:36:48 +0100466static void h2s_alert(struct h2s *h2s);
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200467
Willy Tarreau7be4ee02022-05-18 07:31:41 +0200468/* returns the stconn associated to the H2 stream */
469static forceinline struct stconn *h2s_sc(const struct h2s *h2s)
470{
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200471 return h2s->sd->sc;
Willy Tarreau7be4ee02022-05-18 07:31:41 +0200472}
473
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200474/* the H2 traces always expect that arg1, if non-null, is of type connection
475 * (from which we can derive h2c), that arg2, if non-null, is of type h2s, and
476 * that arg3, if non-null, is either of type htx for tx headers, or of type
477 * buffer for everything else.
478 */
479static void h2_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
480 const struct ist where, const struct ist func,
481 const void *a1, const void *a2, const void *a3, const void *a4)
482{
483 const struct connection *conn = a1;
484 const struct h2c *h2c = conn ? conn->ctx : NULL;
485 const struct h2s *h2s = a2;
486 const struct buffer *buf = a3;
487 const struct htx *htx;
488 int pos;
489
490 if (!h2c) // nothing to add
491 return;
492
Willy Tarreau17104d42019-08-30 07:12:55 +0200493 if (src->verbosity > H2_VERB_CLEAN) {
Willy Tarreau73db4342019-09-25 07:28:44 +0200494 chunk_appendf(&trace_buf, " : h2c=%p(%c,%s)", h2c, conn_is_back(conn) ? 'B' : 'F', h2c_st_to_str(h2c->st0));
495
Willy Tarreau8e6f7492021-06-16 17:47:24 +0200496 if (mask & H2_EV_H2C_NEW) // inside h2_init, otherwise it's hard to match conn & h2c
497 conn_append_debug_info(&trace_buf, conn, " : ");
498
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100499 if (h2c->errcode)
500 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2c->errcode), h2c->errcode);
501
Willy Tarreau0f458712022-08-18 11:19:57 +0200502 if (h2c->flags & H2_CF_DEM_IN_PROGRESS && // frame processing has started, type and length are valid
Willy Tarreau73db4342019-09-25 07:28:44 +0200503 (mask & (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) == (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) {
Willy Tarreau8520d872020-09-18 07:39:29 +0200504 chunk_appendf(&trace_buf, " dft=%s/%02x dfl=%d", h2_ft_str(h2c->dft), h2c->dff, h2c->dfl);
Willy Tarreau73db4342019-09-25 07:28:44 +0200505 }
506
507 if (h2s) {
508 if (h2s->id <= 0)
509 chunk_appendf(&trace_buf, " dsi=%d", h2c->dsi);
Willy Tarreauf9f44992023-02-20 16:57:47 +0100510 if (h2s == h2_idle_stream)
511 chunk_appendf(&trace_buf, " h2s=IDL");
512 else if (h2s != h2_closed_stream)
513 chunk_appendf(&trace_buf, " h2s=%p(%d,%s)", h2s, h2s->id, h2s_st_to_str(h2s->st));
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100514 if (h2s->id && h2s->errcode)
515 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2s->errcode), h2s->errcode);
Willy Tarreau73db4342019-09-25 07:28:44 +0200516 }
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200517 }
518
519 /* Let's dump decoded requests and responses right after parsing. They
520 * are traced at level USER with a few recognizable flags.
521 */
522 if ((mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW) ||
523 mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR)) && buf)
524 htx = htxbuf(buf); // recv req/res
525 else if (mask == (H2_EV_TX_FRAME|H2_EV_TX_HDR))
526 htx = a3; // send req/res
527 else
528 htx = NULL;
529
Willy Tarreau94f1dcf2019-08-30 07:11:30 +0200530 if (level == TRACE_LEVEL_USER && src->verbosity != H2_VERB_MINIMAL && htx && (pos = htx_get_head(htx)) != -1) {
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200531 const struct htx_blk *blk = htx_get_blk(htx, pos);
532 const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
533 enum htx_blk_type type = htx_get_blk_type(blk);
534
535 if (type == HTX_BLK_REQ_SL)
536 chunk_appendf(&trace_buf, " : [%d] H2 REQ: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200537 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200538 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
539 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
540 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
541 else if (type == HTX_BLK_RES_SL)
542 chunk_appendf(&trace_buf, " : [%d] H2 RES: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200543 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200544 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
545 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
546 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
547 }
548}
549
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200550
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100551/* Detect a pending read0 for a H2 connection. It happens if a read0 was
552 * already reported on a previous xprt->rcvbuf() AND a frame parser failed
553 * to parse pending data, confirming no more progress is possible because
554 * we're facing a truncated frame. The function returns 1 to report a read0
555 * or 0 otherwise.
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200556 */
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100557static inline int h2c_read0_pending(struct h2c *h2c)
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200558{
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100559 return !!(h2c->flags & H2_CF_END_REACHED);
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200560}
561
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200562/* returns true if the connection is allowed to expire, false otherwise. A
Willy Tarreau34395832022-03-18 14:59:54 +0100563 * connection may expire when it has no attached streams. As long as streams
564 * are attached, the application layer is responsible for timeout management,
565 * and each layer will detach when it doesn't want to wait anymore. When the
566 * last one leaves, the connection must take over timeout management.
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200567 */
568static inline int h2c_may_expire(const struct h2c *h2c)
569{
Willy Tarreau36c22322022-05-27 10:41:24 +0200570 return !h2c->nb_sc;
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200571}
572
Willy Tarreau15a47332022-03-18 15:57:34 +0100573/* update h2c timeout if needed */
574static void h2c_update_timeout(struct h2c *h2c)
575{
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200576 int is_idle_conn = 0;
577
Willy Tarreau15a47332022-03-18 15:57:34 +0100578 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
579
580 if (!h2c->task)
581 goto leave;
582
583 if (h2c_may_expire(h2c)) {
584 /* no more streams attached */
585 if (h2c->last_sid >= 0) {
586 /* GOAWAY sent, closing in progress */
587 h2c->task->expire = tick_add_ifset(now_ms, h2c->shut_timeout);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200588 is_idle_conn = 1;
Willy Tarreau15a47332022-03-18 15:57:34 +0100589 } else if (br_data(h2c->mbuf)) {
590 /* pending output data: always the regular data timeout */
591 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
Willy Tarreau6ff91e22022-04-14 11:43:35 +0200592 } else if (!(h2c->flags & H2_CF_IS_BACK) && h2c->max_id > 0 && !b_data(&h2c->dbuf)) {
Willy Tarreau15a47332022-03-18 15:57:34 +0100593 /* idle after having seen one stream => keep-alive */
Willy Tarreau86b08a32022-04-13 17:40:28 +0200594 int to;
595
596 if (tick_isset(h2c->proxy->timeout.httpka))
597 to = h2c->proxy->timeout.httpka;
598 else
599 to = h2c->proxy->timeout.httpreq;
600
601 h2c->task->expire = tick_add_ifset(h2c->idle_start, to);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200602 is_idle_conn = 1;
Willy Tarreau15a47332022-03-18 15:57:34 +0100603 } else {
604 /* before first request, or started to deserialize a
605 * new req => http-request, but only set, not refresh.
606 */
607 int exp = (h2c->flags & H2_CF_IS_BACK) ? TICK_ETERNITY : h2c->proxy->timeout.httpreq;
608 h2c->task->expire = tick_add_ifset(h2c->idle_start, exp);
609 }
610 /* if a timeout above was not set, fall back to the default one */
611 if (!tick_isset(h2c->task->expire))
612 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200613
614 if ((h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) &&
615 is_idle_conn && tick_isset(global.close_spread_end)) {
616 /* If a soft-stop is in progress and a close-spread-time
617 * is set, we want to spread idle connection closing roughly
618 * evenly across the defined window. This should only
619 * act on idle frontend connections.
620 * If the window end is already in the past, we wake the
621 * timeout task up immediately so that it can be closed.
622 */
623 int remaining_window = tick_remain(now_ms, global.close_spread_end);
624 if (remaining_window) {
625 /* We don't need to reset the expire if it would
626 * already happen before the close window end.
627 */
628 if (tick_isset(h2c->task->expire) &&
629 tick_is_le(global.close_spread_end, h2c->task->expire)) {
630 /* Set an expire value shorter than the current value
631 * because the close spread window end comes earlier.
632 */
633 h2c->task->expire = tick_add(now_ms, statistical_prng_range(remaining_window));
634 }
635 }
636 else {
637 /* We are past the soft close window end, wake the timeout
638 * task up immediately.
639 */
640 task_wakeup(h2c->task, TASK_WOKEN_TIMER);
641 }
642 }
643
Willy Tarreau15a47332022-03-18 15:57:34 +0100644 } else {
645 h2c->task->expire = TICK_ETERNITY;
646 }
647 task_queue(h2c->task);
648 leave:
649 TRACE_LEAVE(H2_EV_H2C_WAKE);
650}
651
Olivier Houchard7a977432019-03-21 15:47:13 +0100652static __inline int
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200653h2c_is_dead(const struct h2c *h2c)
Olivier Houchard7a977432019-03-21 15:47:13 +0100654{
655 if (eb_is_empty(&h2c->streams_by_id) && /* don't close if streams exist */
Christopher Fauletff7925d2022-10-11 19:12:40 +0200656 ((h2c->flags & H2_CF_ERROR) || /* errors close immediately */
Olivier Houchard7a977432019-03-21 15:47:13 +0100657 (h2c->st0 >= H2_CS_ERROR && !h2c->task) || /* a timeout stroke earlier */
658 (!(h2c->conn->owner)) || /* Nobody's left to take care of the connection, drop it now */
Willy Tarreau662fafc2019-05-26 09:43:07 +0200659 (!br_data(h2c->mbuf) && /* mux buffer empty, also process clean events below */
Christopher Fauletff7925d2022-10-11 19:12:40 +0200660 ((h2c->flags & H2_CF_RCVD_SHUT) ||
Olivier Houchard7a977432019-03-21 15:47:13 +0100661 (h2c->last_sid >= 0 && h2c->max_id >= h2c->last_sid)))))
662 return 1;
663
664 return 0;
Olivier Houchard7a977432019-03-21 15:47:13 +0100665}
666
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200667/*****************************************************/
668/* functions below are for dynamic buffer management */
669/*****************************************************/
670
Willy Tarreau315d8072017-12-10 22:17:57 +0100671/* indicates whether or not the we may call the h2_recv() function to attempt
672 * to receive data into the buffer and/or demux pending data. The condition is
673 * a bit complex due to some API limits for now. The rules are the following :
674 * - if an error or a shutdown was detected on the connection and the buffer
675 * is empty, we must not attempt to receive
676 * - if the demux buf failed to be allocated, we must not try to receive and
677 * we know there is nothing pending
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100678 * - if no flag indicates a blocking condition, we may attempt to receive,
679 * regardless of whether the demux buffer is full or not, so that only
680 * de demux part decides whether or not to block. This is needed because
681 * the connection API indeed prevents us from re-enabling receipt that is
682 * already enabled in a polled state, so we must always immediately stop
683 * as soon as the demux can't proceed so as never to hit an end of read
684 * with data pending in the buffers.
Willy Tarreau315d8072017-12-10 22:17:57 +0100685 * - otherwise must may not attempt
686 */
687static inline int h2_recv_allowed(const struct h2c *h2c)
688{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200689 if (b_data(&h2c->dbuf) == 0 &&
Christopher Fauletff7925d2022-10-11 19:12:40 +0200690 ((h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERROR)) || h2c->st0 >= H2_CS_ERROR))
Willy Tarreau315d8072017-12-10 22:17:57 +0100691 return 0;
692
693 if (!(h2c->flags & H2_CF_DEM_DALLOC) &&
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100694 !(h2c->flags & H2_CF_DEM_BLOCK_ANY))
Willy Tarreau315d8072017-12-10 22:17:57 +0100695 return 1;
696
697 return 0;
698}
699
Willy Tarreau47b515a2018-12-21 16:09:41 +0100700/* restarts reading on the connection if it was not enabled */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200701static inline void h2c_restart_reading(const struct h2c *h2c, int consider_buffer)
Willy Tarreau47b515a2018-12-21 16:09:41 +0100702{
703 if (!h2_recv_allowed(h2c))
704 return;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200705 if ((!consider_buffer || !b_data(&h2c->dbuf))
706 && (h2c->wait_event.events & SUB_RETRY_RECV))
Willy Tarreau47b515a2018-12-21 16:09:41 +0100707 return;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200708 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau47b515a2018-12-21 16:09:41 +0100709}
710
711
Willy Tarreau4596fe22022-05-17 19:07:51 +0200712/* returns true if the front connection has too many stream connectors attached */
Willy Tarreau36c22322022-05-27 10:41:24 +0200713static inline int h2_frt_has_too_many_sc(const struct h2c *h2c)
Willy Tarreauf2101912018-07-19 10:11:38 +0200714{
Willy Tarreau36c22322022-05-27 10:41:24 +0200715 return h2c->nb_sc > h2_settings_max_concurrent_streams;
Willy Tarreauf2101912018-07-19 10:11:38 +0200716}
717
Willy Tarreau44e973f2018-03-01 17:49:30 +0100718/* Tries to grab a buffer and to re-enable processing on mux <target>. The h2c
719 * flags are used to figure what buffer was requested. It returns 1 if the
720 * allocation succeeds, in which case the connection is woken up, or 0 if it's
721 * impossible to wake up and we prefer to be woken up later.
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200722 */
Willy Tarreau44e973f2018-03-01 17:49:30 +0100723static int h2_buf_available(void *target)
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200724{
725 struct h2c *h2c = target;
Willy Tarreau0b559072018-02-26 15:22:17 +0100726 struct h2s *h2s;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200727
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100728 if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200729 h2c->flags &= ~H2_CF_DEM_DALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200730 h2c_restart_reading(h2c, 1);
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200731 return 1;
732 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200733
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100734 if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100735 h2c->flags &= ~H2_CF_MUX_MALLOC;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200736
737 if (h2c->flags & H2_CF_DEM_MROOM) {
738 h2c->flags &= ~H2_CF_DEM_MROOM;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200739 h2c_restart_reading(h2c, 1);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200740 }
Willy Tarreau14398122017-09-22 14:26:04 +0200741 return 1;
742 }
Willy Tarreau0b559072018-02-26 15:22:17 +0100743
744 if ((h2c->flags & H2_CF_DEM_SALLOC) &&
Willy Tarreau7be4ee02022-05-18 07:31:41 +0200745 (h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s_sc(h2s) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100746 b_alloc(&h2s->rxbuf)) {
Willy Tarreau0b559072018-02-26 15:22:17 +0100747 h2c->flags &= ~H2_CF_DEM_SALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200748 h2c_restart_reading(h2c, 1);
Willy Tarreau0b559072018-02-26 15:22:17 +0100749 return 1;
750 }
751
Willy Tarreau14398122017-09-22 14:26:04 +0200752 return 0;
753}
754
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200755static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200756{
757 struct buffer *buf = NULL;
758
Willy Tarreau2b718102021-04-21 07:32:39 +0200759 if (likely(!LIST_INLIST(&h2c->buf_wait.list)) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100760 unlikely((buf = b_alloc(bptr)) == NULL)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100761 h2c->buf_wait.target = h2c;
762 h2c->buf_wait.wakeup_cb = h2_buf_available;
Willy Tarreaub4e34762021-09-30 19:02:18 +0200763 LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +0200764 }
765 return buf;
766}
767
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200768static inline void h2_release_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200769{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200770 if (bptr->size) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100771 b_free(bptr);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100772 offer_buffers(NULL, 1);
Willy Tarreau14398122017-09-22 14:26:04 +0200773 }
774}
775
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200776static inline void h2_release_mbuf(struct h2c *h2c)
777{
778 struct buffer *buf;
779 unsigned int count = 0;
780
781 while (b_size(buf = br_head_pick(h2c->mbuf))) {
782 b_free(buf);
783 count++;
784 }
785 if (count)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100786 offer_buffers(NULL, count);
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200787}
788
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100789/* returns the number of allocatable outgoing streams for the connection taking
790 * the last_sid and the reserved ones into account.
791 */
792static inline int h2_streams_left(const struct h2c *h2c)
793{
794 int ret;
795
796 /* consider the number of outgoing streams we're allowed to create before
797 * reaching the last GOAWAY frame seen. max_id is the last assigned id,
798 * nb_reserved is the number of streams which don't yet have an ID.
799 */
800 ret = (h2c->last_sid >= 0) ? h2c->last_sid : 0x7FFFFFFF;
801 ret = (unsigned int)(ret - h2c->max_id) / 2 - h2c->nb_reserved - 1;
802 if (ret < 0)
803 ret = 0;
804 return ret;
805}
806
Willy Tarreau00f18a32019-01-26 12:19:01 +0100807/* returns the number of streams in use on a connection to figure if it's
Willy Tarreau36c22322022-05-27 10:41:24 +0200808 * idle or not. We check nb_sc and not nb_streams as the caller will want
Willy Tarreau00f18a32019-01-26 12:19:01 +0100809 * to know if it was the last one after a detach().
810 */
811static int h2_used_streams(struct connection *conn)
812{
813 struct h2c *h2c = conn->ctx;
814
Willy Tarreau36c22322022-05-27 10:41:24 +0200815 return h2c->nb_sc;
Willy Tarreau00f18a32019-01-26 12:19:01 +0100816}
817
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100818/* returns the number of concurrent streams available on the connection */
Olivier Houchardd540b362018-11-05 18:37:53 +0100819static int h2_avail_streams(struct connection *conn)
820{
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100821 struct server *srv = objt_server(conn->target);
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100822 struct h2c *h2c = conn->ctx;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100823 int ret1, ret2;
Olivier Houchardd540b362018-11-05 18:37:53 +0100824
Willy Tarreau6afec462019-01-28 06:40:19 +0100825 /* RFC7540#6.8: Receivers of a GOAWAY frame MUST NOT open additional
826 * streams on the connection.
827 */
828 if (h2c->last_sid >= 0)
829 return 0;
830
Willy Tarreauc61966f2019-10-31 15:10:03 +0100831 if (h2c->st0 >= H2_CS_ERROR)
832 return 0;
833
Willy Tarreau86949782019-01-31 10:42:05 +0100834 /* note: may be negative if a SETTINGS frame changes the limit */
835 ret1 = h2c->streams_limit - h2c->nb_streams;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100836
837 /* we must also consider the limit imposed by stream IDs */
838 ret2 = h2_streams_left(h2c);
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100839 ret1 = MIN(ret1, ret2);
Willy Tarreau86949782019-01-31 10:42:05 +0100840 if (ret1 > 0 && srv && srv->max_reuse >= 0) {
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100841 ret2 = h2c->stream_cnt <= srv->max_reuse ? srv->max_reuse - h2c->stream_cnt + 1: 0;
842 ret1 = MIN(ret1, ret2);
843 }
844 return ret1;
Olivier Houchardd540b362018-11-05 18:37:53 +0100845}
846
Willy Tarreau11e8a8c2023-01-24 19:43:11 +0100847/* inconditionally produce a trace of the header. Please do not call this one
848 * and use h2_trace_header() instead which first checks if traces are enabled.
849 */
850void _h2_trace_header(const struct ist hn, const struct ist hv,
851 uint64_t mask, const struct ist trc_loc, const char *func,
852 const struct h2c *h2c, const struct h2s *h2s)
853{
854 struct ist n_ist, v_ist;
855 const char *c_str, *s_str;
856
857 chunk_reset(&trash);
858 c_str = chunk_newstr(&trash);
859 if (h2c) {
860 chunk_appendf(&trash, "h2c=%p(%c,%s) ",
861 h2c, (h2c->flags & H2_CF_IS_BACK) ? 'B' : 'F', h2c_st_to_str(h2c->st0));
862 }
863
864 s_str = chunk_newstr(&trash);
865 if (h2s) {
866 if (h2s->id <= 0)
867 chunk_appendf(&trash, "dsi=%d ", h2s->h2c->dsi);
868 chunk_appendf(&trash, "h2s=%p(%d,%s) ", h2s, h2s->id, h2s_st_to_str(h2s->st));
869 }
870 else if (h2c)
871 chunk_appendf(&trash, "dsi=%d ", h2c->dsi);
872
873 n_ist = ist2(chunk_newstr(&trash), 0);
874 istscpy(&n_ist, hn, 256);
875 trash.data += n_ist.len;
876 if (n_ist.len != hn.len)
877 chunk_appendf(&trash, " (... +%ld)", (long)(hn.len - n_ist.len));
878
879 v_ist = ist2(chunk_newstr(&trash), 0);
880 istscpy(&v_ist, hv, 1024);
881 trash.data += v_ist.len;
882 if (v_ist.len != hv.len)
883 chunk_appendf(&trash, " (... +%ld)", (long)(hv.len - v_ist.len));
884
885 TRACE_PRINTF_LOC(TRACE_LEVEL_USER, mask, trc_loc, func,
Christopher Fauletc2545162023-01-30 08:26:09 +0100886 (h2c ? h2c->conn : 0), 0, 0, 0,
Willy Tarreau11e8a8c2023-01-24 19:43:11 +0100887 "%s%s%s %s: %s", c_str, s_str,
888 (mask & H2_EV_TX_HDR) ? "sndh" : "rcvh",
889 n_ist.ptr, v_ist.ptr);
890}
891
892/* produce a trace of the header after checking that tracing is enabled */
893static inline void h2_trace_header(const struct ist hn, const struct ist hv,
894 uint64_t mask, const struct ist trc_loc, const char *func,
895 const struct h2c *h2c, const struct h2s *h2s)
896{
897 if ((TRACE_SOURCE)->verbosity >= H2_VERB_ADVANCED &&
898 TRACE_ENABLED(TRACE_LEVEL_USER, mask, h2c ? h2c->conn : 0, h2s, 0, 0))
899 _h2_trace_header(hn, hv, mask, trc_loc, func, h2c, h2s);
900}
901
902/* hpack-encode header name <hn> and value <hv>, possibly emitting a trace if
903 * currently enabled. This is done on behalf of function <func> at <trc_loc>
904 * passed as ist(TRC_LOC), h2c <h2c>, and h2s <h2s>, all of which may be NULL.
905 * The trace is only emitted if the header is emitted (in which case non-zero
906 * is returned). The trash is modified. In the traces, the header's name will
907 * be truncated to 256 chars and the header's value to 1024 chars.
908 */
909static inline int h2_encode_header(struct buffer *buf, const struct ist hn, const struct ist hv,
910 uint64_t mask, const struct ist trc_loc, const char *func,
911 const struct h2c *h2c, const struct h2s *h2s)
912{
913 int ret;
914
915 ret = hpack_encode_header(buf, hn, hv);
916 if (ret)
917 h2_trace_header(hn, hv, mask, trc_loc, func, h2c, h2s);
918
919 return ret;
920}
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200921
Willy Tarreau62f52692017-10-08 23:01:42 +0200922/*****************************************************************/
923/* functions below are dedicated to the mux setup and management */
924/*****************************************************************/
925
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200926/* Initialize the mux once it's attached. For outgoing connections, the context
927 * is already initialized before installing the mux, so we detect incoming
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200928 * connections from the fact that the context is still NULL (even during mux
929 * upgrades). <input> is always used as Input buffer and may contain data. It is
930 * the caller responsibility to not reuse it anymore. Returns < 0 on error.
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200931 */
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200932static int h2_init(struct connection *conn, struct proxy *prx, struct session *sess,
933 struct buffer *input)
Willy Tarreau32218eb2017-09-22 08:07:25 +0200934{
935 struct h2c *h2c;
Willy Tarreauea392822017-10-31 10:02:25 +0100936 struct task *t = NULL;
Christopher Fauletf81ef032019-10-04 15:19:43 +0200937 void *conn_ctx = conn->ctx;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200938
Christopher Fauletf81ef032019-10-04 15:19:43 +0200939 TRACE_ENTER(H2_EV_H2C_NEW);
Willy Tarreau7838a792019-08-12 18:42:03 +0200940
Willy Tarreaubafbe012017-11-24 17:34:44 +0100941 h2c = pool_alloc(pool_head_h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +0200942 if (!h2c)
mildiscd2d7de2018-10-02 16:44:18 +0200943 goto fail_no_h2c;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200944
Christopher Faulete9b70722019-04-08 10:46:02 +0200945 if (conn_is_back(conn)) {
Willy Tarreau01b44822018-10-03 14:26:37 +0200946 h2c->flags = H2_CF_IS_BACK;
947 h2c->shut_timeout = h2c->timeout = prx->timeout.server;
948 if (tick_isset(prx->timeout.serverfin))
949 h2c->shut_timeout = prx->timeout.serverfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +0100950
951 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_be,
952 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +0200953 } else {
954 h2c->flags = H2_CF_NONE;
955 h2c->shut_timeout = h2c->timeout = prx->timeout.client;
956 if (tick_isset(prx->timeout.clientfin))
957 h2c->shut_timeout = prx->timeout.clientfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +0100958
959 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe,
960 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +0200961 }
Willy Tarreau3f133572017-10-31 19:21:06 +0100962
Willy Tarreau0b37d652018-10-03 10:33:02 +0200963 h2c->proxy = prx;
Willy Tarreau33400292017-11-05 11:23:40 +0100964 h2c->task = NULL;
Willy Tarreau389ab0d2023-03-20 19:16:04 +0100965 h2c->wait_event.tasklet = NULL;
Willy Tarreau15a47332022-03-18 15:57:34 +0100966 h2c->idle_start = now_ms;
Willy Tarreau3f133572017-10-31 19:21:06 +0100967 if (tick_isset(h2c->timeout)) {
Willy Tarreaubeeabf52021-10-01 18:23:30 +0200968 t = task_new_here();
Willy Tarreau3f133572017-10-31 19:21:06 +0100969 if (!t)
970 goto fail;
971
972 h2c->task = t;
973 t->process = h2_timeout_task;
974 t->context = h2c;
975 t->expire = tick_add(now_ms, h2c->timeout);
976 }
Willy Tarreauea392822017-10-31 10:02:25 +0100977
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200978 h2c->wait_event.tasklet = tasklet_new();
979 if (!h2c->wait_event.tasklet)
Olivier Houchard910b2bc2018-07-17 18:49:38 +0200980 goto fail;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200981 h2c->wait_event.tasklet->process = h2_io_cb;
982 h2c->wait_event.tasklet->context = h2c;
Willy Tarreau4f6516d2018-12-19 13:59:17 +0100983 h2c->wait_event.events = 0;
Amaury Denoyelled3a88c12021-05-03 10:47:51 +0200984 if (!conn_is_back(conn)) {
985 /* Connection might already be in the stopping_list if subject
986 * to h1->h2 upgrade.
987 */
988 if (!LIST_INLIST(&conn->stopping_list)) {
989 LIST_APPEND(&mux_stopping_data[tid].list,
990 &conn->stopping_list);
991 }
992 }
Olivier Houchard910b2bc2018-07-17 18:49:38 +0200993
Willy Tarreau2bdcc702020-05-19 11:31:11 +0200994 h2c->ddht = hpack_dht_alloc();
Willy Tarreau32218eb2017-09-22 08:07:25 +0200995 if (!h2c->ddht)
996 goto fail;
997
998 /* Initialise the context. */
999 h2c->st0 = H2_CS_PREFACE;
1000 h2c->conn = conn;
Willy Tarreau2e2083a2019-01-31 10:34:07 +01001001 h2c->streams_limit = h2_settings_max_concurrent_streams;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001002 h2c->max_id = -1;
1003 h2c->errcode = H2_ERR_NO_ERROR;
Willy Tarreau97aaa672018-12-23 09:49:04 +01001004 h2c->rcvd_c = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001005 h2c->rcvd_s = 0;
Willy Tarreau49745612017-12-03 18:56:02 +01001006 h2c->nb_streams = 0;
Willy Tarreau36c22322022-05-27 10:41:24 +02001007 h2c->nb_sc = 0;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001008 h2c->nb_reserved = 0;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001009 h2c->stream_cnt = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001010
Christopher Faulet51f73eb2019-04-08 11:22:47 +02001011 h2c->dbuf = *input;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001012 h2c->dsi = -1;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001013
Willy Tarreau32218eb2017-09-22 08:07:25 +02001014 h2c->last_sid = -1;
1015
Willy Tarreau51330962019-05-26 09:38:07 +02001016 br_init(h2c->mbuf, sizeof(h2c->mbuf) / sizeof(h2c->mbuf[0]));
Willy Tarreau32218eb2017-09-22 08:07:25 +02001017 h2c->miw = 65535; /* mux initial window size */
1018 h2c->mws = 65535; /* mux window size */
1019 h2c->mfs = 16384; /* initial max frame size */
Willy Tarreau751f2d02018-10-05 09:35:00 +02001020 h2c->streams_by_id = EB_ROOT;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001021 LIST_INIT(&h2c->send_list);
1022 LIST_INIT(&h2c->fctl_list);
Willy Tarreau9edf6db2019-10-02 10:49:59 +02001023 LIST_INIT(&h2c->blocked_list);
Willy Tarreau90f366b2021-02-20 11:49:49 +01001024 LIST_INIT(&h2c->buf_wait.list);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001025
Christopher Fauletf81ef032019-10-04 15:19:43 +02001026 conn->ctx = h2c;
1027
Willy Tarreau8e6f7492021-06-16 17:47:24 +02001028 TRACE_USER("new H2 connection", H2_EV_H2C_NEW, conn);
1029
Willy Tarreau3f133572017-10-31 19:21:06 +01001030 if (t)
1031 task_queue(t);
Willy Tarreauea392822017-10-31 10:02:25 +01001032
Willy Tarreau01b44822018-10-03 14:26:37 +02001033 if (h2c->flags & H2_CF_IS_BACK) {
1034 /* FIXME: this is temporary, for outgoing connections we need
1035 * to immediately allocate a stream until the code is modified
Willy Tarreau36c22322022-05-27 10:41:24 +02001036 * so that the caller calls ->attach(). For now the outgoing sc
Christopher Fauletf81ef032019-10-04 15:19:43 +02001037 * is stored as conn->ctx by the caller and saved in conn_ctx.
Willy Tarreau01b44822018-10-03 14:26:37 +02001038 */
1039 struct h2s *h2s;
1040
Christopher Fauletf81ef032019-10-04 15:19:43 +02001041 h2s = h2c_bck_stream_new(h2c, conn_ctx, sess);
Willy Tarreau01b44822018-10-03 14:26:37 +02001042 if (!h2s)
1043 goto fail_stream;
1044 }
1045
Frédéric Lécaille9969adb2023-01-18 11:52:21 +01001046 proxy_inc_fe_cum_sess_ver_ctr(sess->listener, prx, 2);
Willy Tarreau4781b152021-04-06 13:53:36 +02001047 HA_ATOMIC_INC(&h2c->px_counters->open_conns);
1048 HA_ATOMIC_INC(&h2c->px_counters->total_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001049
Willy Tarreau0f383582018-10-03 14:22:21 +02001050 /* prepare to read something */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02001051 h2c_restart_reading(h2c, 1);
Willy Tarreau7838a792019-08-12 18:42:03 +02001052 TRACE_LEAVE(H2_EV_H2C_NEW, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001053 return 0;
Willy Tarreau01b44822018-10-03 14:26:37 +02001054 fail_stream:
1055 hpack_dht_free(h2c->ddht);
mildiscd2d7de2018-10-02 16:44:18 +02001056 fail:
Willy Tarreauf6562792019-05-07 19:05:35 +02001057 task_destroy(t);
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001058 if (h2c->wait_event.tasklet)
1059 tasklet_free(h2c->wait_event.tasklet);
Willy Tarreaubafbe012017-11-24 17:34:44 +01001060 pool_free(pool_head_h2c, h2c);
mildiscd2d7de2018-10-02 16:44:18 +02001061 fail_no_h2c:
Willy Tarreau3b990fe2022-01-12 17:24:26 +01001062 if (!conn_is_back(conn))
1063 LIST_DEL_INIT(&conn->stopping_list);
Christopher Fauletf81ef032019-10-04 15:19:43 +02001064 conn->ctx = conn_ctx; /* restore saved ctx */
1065 TRACE_DEVEL("leaving in error", H2_EV_H2C_NEW|H2_EV_H2C_END|H2_EV_H2C_ERR);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001066 return -1;
1067}
1068
Willy Tarreau751f2d02018-10-05 09:35:00 +02001069/* returns the next allocatable outgoing stream ID for the H2 connection, or
1070 * -1 if no more is allocatable.
1071 */
1072static inline int32_t h2c_get_next_sid(const struct h2c *h2c)
1073{
1074 int32_t id = (h2c->max_id + 1) | 1;
Willy Tarreaua80dca82019-01-24 17:08:28 +01001075
1076 if ((id & 0x80000000U) || (h2c->last_sid >= 0 && id > h2c->last_sid))
Willy Tarreau751f2d02018-10-05 09:35:00 +02001077 id = -1;
1078 return id;
1079}
1080
Willy Tarreau2373acc2017-10-12 17:35:14 +02001081/* returns the stream associated with id <id> or NULL if not found */
1082static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id)
1083{
1084 struct eb32_node *node;
1085
Willy Tarreau751f2d02018-10-05 09:35:00 +02001086 if (id == 0)
1087 return (struct h2s *)h2_closed_stream;
1088
Willy Tarreau2a856182017-05-16 15:20:39 +02001089 if (id > h2c->max_id)
1090 return (struct h2s *)h2_idle_stream;
1091
Willy Tarreau2373acc2017-10-12 17:35:14 +02001092 node = eb32_lookup(&h2c->streams_by_id, id);
1093 if (!node)
Willy Tarreau2a856182017-05-16 15:20:39 +02001094 return (struct h2s *)h2_closed_stream;
Willy Tarreau2373acc2017-10-12 17:35:14 +02001095
1096 return container_of(node, struct h2s, by_id);
1097}
1098
Christopher Faulet73c12072019-04-08 11:23:22 +02001099/* release function. This one should be called to free all resources allocated
1100 * to the mux.
Willy Tarreau62f52692017-10-08 23:01:42 +02001101 */
Christopher Faulet73c12072019-04-08 11:23:22 +02001102static void h2_release(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02001103{
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001104 struct connection *conn = h2c->conn;
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001105
Willy Tarreau7838a792019-08-12 18:42:03 +02001106 TRACE_ENTER(H2_EV_H2C_END);
1107
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001108 hpack_dht_free(h2c->ddht);
Christopher Faulet61840e72019-04-15 09:33:32 +02001109
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001110 if (LIST_INLIST(&h2c->buf_wait.list))
1111 LIST_DEL_INIT(&h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +02001112
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001113 h2_release_buf(h2c, &h2c->dbuf);
1114 h2_release_mbuf(h2c);
Willy Tarreau14398122017-09-22 14:26:04 +02001115
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001116 if (h2c->task) {
1117 h2c->task->context = NULL;
1118 task_wakeup(h2c->task, TASK_WOKEN_OTHER);
1119 h2c->task = NULL;
1120 }
1121 if (h2c->wait_event.tasklet)
1122 tasklet_free(h2c->wait_event.tasklet);
1123 if (conn && h2c->wait_event.events != 0)
1124 conn->xprt->unsubscribe(conn, conn->xprt_ctx, h2c->wait_event.events,
1125 &h2c->wait_event);
Willy Tarreauea392822017-10-31 10:02:25 +01001126
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001127 HA_ATOMIC_DEC(&h2c->px_counters->open_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001128
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001129 pool_free(pool_head_h2c, h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001130
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001131 if (conn) {
Amaury Denoyelled3a88c12021-05-03 10:47:51 +02001132 if (!conn_is_back(conn))
1133 LIST_DEL_INIT(&conn->stopping_list);
1134
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001135 conn->mux = NULL;
1136 conn->ctx = NULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02001137 TRACE_DEVEL("freeing conn", H2_EV_H2C_END, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001138
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001139 conn_stop_tracking(conn);
Willy Tarreau0b222472021-10-21 22:24:31 +02001140
1141 /* there might be a GOAWAY frame still pending in the TCP
1142 * stack, and if the peer continues to send (i.e. window
1143 * updates etc), this can result in losing the GOAWAY. For
1144 * this reason we try to drain anything received in between.
1145 */
1146 conn->flags |= CO_FL_WANT_DRAIN;
1147
1148 conn_xprt_shutw(conn);
1149 conn_xprt_close(conn);
1150 conn_sock_shutw(conn, !conn_is_back(conn));
1151 conn_ctrl_close(conn);
1152
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001153 if (conn->destroy_cb)
1154 conn->destroy_cb(conn);
1155 conn_free(conn);
1156 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001157
1158 TRACE_LEAVE(H2_EV_H2C_END);
Willy Tarreau62f52692017-10-08 23:01:42 +02001159}
1160
1161
Willy Tarreau71681172017-10-23 14:39:06 +02001162/******************************************************/
1163/* functions below are for the H2 protocol processing */
1164/******************************************************/
1165
1166/* returns the stream if of stream <h2s> or 0 if <h2s> is NULL */
Willy Tarreau1f094672017-11-20 21:27:45 +01001167static inline __maybe_unused int h2s_id(const struct h2s *h2s)
Willy Tarreau71681172017-10-23 14:39:06 +02001168{
1169 return h2s ? h2s->id : 0;
1170}
1171
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001172/* returns the sum of the stream's own window size and the mux's initial
1173 * window, which together form the stream's effective window size.
1174 */
1175static inline int h2s_mws(const struct h2s *h2s)
1176{
1177 return h2s->sws + h2s->h2c->miw;
1178}
1179
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001180/* marks an error on the connection. Before settings are sent, we must not send
1181 * a GOAWAY frame, and the error state will prevent h2c_send_goaway_error()
1182 * from verifying this so we set H2_CF_GOAWAY_FAILED to make sure it will not
1183 * even try.
1184 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001185static inline __maybe_unused void h2c_error(struct h2c *h2c, enum h2_err err)
Willy Tarreau741d6df2017-10-17 08:00:59 +02001186{
Willy Tarreau022e5e52020-09-10 09:33:15 +02001187 TRACE_POINT(H2_EV_H2C_ERR, h2c->conn, 0, 0, (void *)(long)(err));
Willy Tarreau741d6df2017-10-17 08:00:59 +02001188 h2c->errcode = err;
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001189 if (h2c->st0 < H2_CS_SETTINGS1)
1190 h2c->flags |= H2_CF_GOAWAY_FAILED;
Willy Tarreau741d6df2017-10-17 08:00:59 +02001191 h2c->st0 = H2_CS_ERROR;
1192}
1193
Willy Tarreau175cebb2019-01-24 10:02:24 +01001194/* marks an error on the stream. It may also update an already closed stream
1195 * (e.g. to report an error after an RST was received).
1196 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001197static inline __maybe_unused void h2s_error(struct h2s *h2s, enum h2_err err)
Willy Tarreau2e43f082017-10-17 08:03:59 +02001198{
Willy Tarreau175cebb2019-01-24 10:02:24 +01001199 if (h2s->id && h2s->st != H2_SS_ERROR) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02001200 TRACE_POINT(H2_EV_H2S_ERR, h2s->h2c->conn, h2s, 0, (void *)(long)(err));
Willy Tarreau2e43f082017-10-17 08:03:59 +02001201 h2s->errcode = err;
Willy Tarreau175cebb2019-01-24 10:02:24 +01001202 if (h2s->st < H2_SS_ERROR)
1203 h2s->st = H2_SS_ERROR;
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001204 se_fl_set_error(h2s->sd);
Willy Tarreau2e43f082017-10-17 08:03:59 +02001205 }
1206}
1207
Willy Tarreau7e094452018-12-19 18:08:52 +01001208/* attempt to notify the data layer of recv availability */
1209static void __maybe_unused h2s_notify_recv(struct h2s *h2s)
1210{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001211 if (h2s->subs && h2s->subs->events & SUB_RETRY_RECV) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001212 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01001213 tasklet_wakeup(h2s->subs->tasklet);
1214 h2s->subs->events &= ~SUB_RETRY_RECV;
1215 if (!h2s->subs->events)
1216 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001217 }
1218}
1219
1220/* attempt to notify the data layer of send availability */
1221static void __maybe_unused h2s_notify_send(struct h2s *h2s)
1222{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001223 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001224 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01001225 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01001226 tasklet_wakeup(h2s->subs->tasklet);
1227 h2s->subs->events &= ~SUB_RETRY_SEND;
1228 if (!h2s->subs->events)
1229 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001230 }
Willy Tarreau5723f292020-01-10 15:16:57 +01001231 else if (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) {
1232 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
1233 tasklet_wakeup(h2s->shut_tl);
1234 }
Willy Tarreau7e094452018-12-19 18:08:52 +01001235}
1236
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001237/* alerts the data layer, trying to wake it up by all means, following
1238 * this sequence :
1239 * - if the h2s' data layer is subscribed to recv, then it's woken up for recv
1240 * - if its subscribed to send, then it's woken up for send
1241 * - if it was subscribed to neither, its ->wake() callback is called
1242 * It is safe to call this function with a closed stream which doesn't have a
Willy Tarreau4596fe22022-05-17 19:07:51 +02001243 * stream connector anymore.
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001244 */
1245static void __maybe_unused h2s_alert(struct h2s *h2s)
1246{
Willy Tarreau7838a792019-08-12 18:42:03 +02001247 TRACE_ENTER(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
1248
Willy Tarreauf96508a2020-01-10 11:12:48 +01001249 if (h2s->subs ||
Willy Tarreau5723f292020-01-10 15:16:57 +01001250 (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW))) {
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001251 h2s_notify_recv(h2s);
1252 h2s_notify_send(h2s);
1253 }
Willy Tarreau2f2318d2022-05-18 10:17:16 +02001254 else if (h2s_sc(h2s) && h2s_sc(h2s)->app_ops->wake != NULL) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001255 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreau2f2318d2022-05-18 10:17:16 +02001256 h2s_sc(h2s)->app_ops->wake(h2s_sc(h2s));
Willy Tarreau7838a792019-08-12 18:42:03 +02001257 }
1258
1259 TRACE_LEAVE(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001260}
1261
Willy Tarreaue4820742017-07-27 13:37:23 +02001262/* writes the 24-bit frame size <len> at address <frame> */
Willy Tarreau1f094672017-11-20 21:27:45 +01001263static inline __maybe_unused void h2_set_frame_size(void *frame, uint32_t len)
Willy Tarreaue4820742017-07-27 13:37:23 +02001264{
1265 uint8_t *out = frame;
1266
1267 *out = len >> 16;
1268 write_n16(out + 1, len);
1269}
1270
Willy Tarreau54c15062017-10-10 17:10:03 +02001271/* reads <bytes> bytes from buffer <b> starting at relative offset <o> from the
1272 * current pointer, dealing with wrapping, and stores the result in <dst>. It's
1273 * the caller's responsibility to verify that there are at least <bytes> bytes
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001274 * available in the buffer's input prior to calling this function. The buffer
1275 * is assumed not to hold any output data.
Willy Tarreau54c15062017-10-10 17:10:03 +02001276 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001277static inline __maybe_unused void h2_get_buf_bytes(void *dst, size_t bytes,
Willy Tarreau54c15062017-10-10 17:10:03 +02001278 const struct buffer *b, int o)
1279{
Willy Tarreau591d4452018-06-15 17:21:00 +02001280 readv_bytes(dst, bytes, b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001281}
1282
Willy Tarreau1f094672017-11-20 21:27:45 +01001283static inline __maybe_unused uint16_t h2_get_n16(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001284{
Willy Tarreau591d4452018-06-15 17:21:00 +02001285 return readv_n16(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001286}
1287
Willy Tarreau1f094672017-11-20 21:27:45 +01001288static inline __maybe_unused uint32_t h2_get_n32(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001289{
Willy Tarreau591d4452018-06-15 17:21:00 +02001290 return readv_n32(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001291}
1292
Willy Tarreau1f094672017-11-20 21:27:45 +01001293static inline __maybe_unused uint64_t h2_get_n64(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001294{
Willy Tarreau591d4452018-06-15 17:21:00 +02001295 return readv_n64(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001296}
1297
1298
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001299/* Peeks an H2 frame header from offset <o> of buffer <b> into descriptor <h>.
1300 * The algorithm is not obvious. It turns out that H2 headers are neither
1301 * aligned nor do they use regular sizes. And to add to the trouble, the buffer
1302 * may wrap so each byte read must be checked. The header is formed like this :
Willy Tarreau715d5312017-07-11 15:20:24 +02001303 *
1304 * b0 b1 b2 b3 b4 b5..b8
1305 * +----------+---------+--------+----+----+----------------------+
1306 * |len[23:16]|len[15:8]|len[7:0]|type|flag|sid[31:0] (big endian)|
1307 * +----------+---------+--------+----+----+----------------------+
1308 *
1309 * Here we read a big-endian 64 bit word from h[1]. This way in a single read
1310 * we get the sid properly aligned and ordered, and 16 bits of len properly
1311 * ordered as well. The type and flags can be extracted using bit shifts from
1312 * the word, and only one extra read is needed to fetch len[16:23].
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001313 * Returns zero if some bytes are missing, otherwise non-zero on success. The
1314 * buffer is assumed not to contain any output data.
Willy Tarreau715d5312017-07-11 15:20:24 +02001315 */
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001316static __maybe_unused int h2_peek_frame_hdr(const struct buffer *b, int o, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001317{
1318 uint64_t w;
1319
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001320 if (b_data(b) < o + 9)
Willy Tarreau715d5312017-07-11 15:20:24 +02001321 return 0;
1322
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001323 w = h2_get_n64(b, o + 1);
1324 h->len = *(uint8_t*)b_peek(b, o) << 16;
Willy Tarreau715d5312017-07-11 15:20:24 +02001325 h->sid = w & 0x7FFFFFFF; /* RFC7540#4.1: R bit must be ignored */
1326 h->ff = w >> 32;
1327 h->ft = w >> 40;
1328 h->len += w >> 48;
1329 return 1;
1330}
1331
1332/* skip the next 9 bytes corresponding to the frame header possibly parsed by
1333 * h2_peek_frame_hdr() above.
1334 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001335static inline __maybe_unused void h2_skip_frame_hdr(struct buffer *b)
Willy Tarreau715d5312017-07-11 15:20:24 +02001336{
Willy Tarreaue5f12ce2018-06-15 10:28:05 +02001337 b_del(b, 9);
Willy Tarreau715d5312017-07-11 15:20:24 +02001338}
1339
1340/* same as above, automatically advances the buffer on success */
Willy Tarreau1f094672017-11-20 21:27:45 +01001341static inline __maybe_unused int h2_get_frame_hdr(struct buffer *b, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001342{
1343 int ret;
1344
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001345 ret = h2_peek_frame_hdr(b, 0, h);
Willy Tarreau715d5312017-07-11 15:20:24 +02001346 if (ret > 0)
1347 h2_skip_frame_hdr(b);
1348 return ret;
1349}
1350
Willy Tarreaucb985a42019-10-07 16:56:34 +02001351
1352/* try to fragment the headers frame present at the beginning of buffer <b>,
1353 * enforcing a limit of <mfs> bytes per frame. Returns 0 on failure, 1 on
1354 * success. Typical causes of failure include a buffer not large enough to
1355 * add extra frame headers. The existing frame size is read in the current
1356 * frame. Its EH flag will be cleared if CONTINUATION frames need to be added,
1357 * and its length will be adjusted. The stream ID for continuation frames will
1358 * be copied from the initial frame's.
1359 */
1360static int h2_fragment_headers(struct buffer *b, uint32_t mfs)
1361{
1362 size_t remain = b->data - 9;
1363 int extra_frames = (remain - 1) / mfs;
1364 size_t fsize;
1365 char *fptr;
1366 int frame;
1367
1368 if (b->data <= mfs + 9)
1369 return 1;
1370
1371 /* Too large a frame, we need to fragment it using CONTINUATION
1372 * frames. We start from the end and move tails as needed.
1373 */
1374 if (b->data + extra_frames * 9 > b->size)
1375 return 0;
1376
1377 for (frame = extra_frames; frame; frame--) {
1378 fsize = ((remain - 1) % mfs) + 1;
1379 remain -= fsize;
1380
1381 /* move data */
1382 fptr = b->area + 9 + remain + (frame - 1) * 9;
1383 memmove(fptr + 9, b->area + 9 + remain, fsize);
1384 b->data += 9;
1385
1386 /* write new frame header */
1387 h2_set_frame_size(fptr, fsize);
1388 fptr[3] = H2_FT_CONTINUATION;
1389 fptr[4] = (frame == extra_frames) ? H2_F_HEADERS_END_HEADERS : 0;
1390 write_n32(fptr + 5, read_n32(b->area + 5));
1391 }
1392
1393 b->area[4] &= ~H2_F_HEADERS_END_HEADERS;
1394 h2_set_frame_size(b->area, remain);
1395 return 1;
1396}
1397
1398
Willy Tarreau00dd0782018-03-01 16:31:34 +01001399/* marks stream <h2s> as CLOSED and decrement the number of active streams for
1400 * its connection if the stream was not yet closed. Please use this exclusively
Willy Tarreaubcdc6cc2023-03-20 19:14:47 +01001401 * before closing a stream to ensure stream count is well maintained. Note that
1402 * it does explicitly support being called with a partially initialized h2s
1403 * (e.g. sd==NULL).
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001404 */
Willy Tarreau00dd0782018-03-01 16:31:34 +01001405static inline void h2s_close(struct h2s *h2s)
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001406{
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001407 if (h2s->st != H2_SS_CLOSED) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001408 TRACE_ENTER(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001409 h2s->h2c->nb_streams--;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001410 if (!h2s->id)
1411 h2s->h2c->nb_reserved--;
Willy Tarreaubcdc6cc2023-03-20 19:14:47 +01001412 if (h2s->sd && h2s_sc(h2s)) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001413 if (!se_fl_test(h2s->sd, SE_FL_EOS) && !b_data(&h2s->rxbuf))
Willy Tarreaua27db382019-03-25 18:13:16 +01001414 h2s_notify_recv(h2s);
1415 }
Willy Tarreau4781b152021-04-06 13:53:36 +02001416 HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001417
Willy Tarreau7838a792019-08-12 18:42:03 +02001418 TRACE_LEAVE(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001419 }
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001420 h2s->st = H2_SS_CLOSED;
1421}
1422
Willy Tarreau71049cc2018-03-28 13:56:39 +02001423/* detaches an H2 stream from its H2C and releases it to the H2S pool. */
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001424/* h2s_destroy should only ever be called by the thread that owns the stream,
1425 * that means that a tasklet should be used if we want to destroy the h2s
1426 * from another thread
1427 */
Willy Tarreau71049cc2018-03-28 13:56:39 +02001428static void h2s_destroy(struct h2s *h2s)
Willy Tarreau0a10de62018-03-01 16:27:53 +01001429{
Willy Tarreau7838a792019-08-12 18:42:03 +02001430 struct connection *conn = h2s->h2c->conn;
1431
1432 TRACE_ENTER(H2_EV_H2S_END, conn, h2s);
1433
Willy Tarreau0a10de62018-03-01 16:27:53 +01001434 h2s_close(h2s);
1435 eb32_delete(&h2s->by_id);
Olivier Houchard638b7992018-08-16 15:41:52 +02001436 if (b_size(&h2s->rxbuf)) {
1437 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01001438 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02001439 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001440
1441 if (h2s->subs)
1442 h2s->subs->events = 0;
1443
Joseph Herlantd77575d2018-11-25 10:54:45 -08001444 /* There's no need to explicitly call unsubscribe here, the only
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001445 * reference left would be in the h2c send_list/fctl_list, and if
1446 * we're in it, we're getting out anyway
1447 */
Olivier Houchardd360ac62019-03-22 17:37:16 +01001448 LIST_DEL_INIT(&h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01001449
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001450 /* ditto, calling tasklet_free() here should be ok */
Willy Tarreau5723f292020-01-10 15:16:57 +01001451 tasklet_free(h2s->shut_tl);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001452 BUG_ON(h2s->sd && !se_fl_test(h2s->sd, SE_FL_ORPHAN));
1453 sedesc_free(h2s->sd);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001454 pool_free(pool_head_h2s, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02001455
1456 TRACE_LEAVE(H2_EV_H2S_END, conn);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001457}
1458
Willy Tarreaua8e49542018-10-03 18:53:55 +02001459/* allocates a new stream <id> for connection <h2c> and adds it into h2c's
1460 * stream tree. In case of error, nothing is added and NULL is returned. The
1461 * causes of errors can be any failed memory allocation. The caller is
1462 * responsible for checking if the connection may support an extra stream
1463 * prior to calling this function.
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001464 */
Willy Tarreaua8e49542018-10-03 18:53:55 +02001465static struct h2s *h2s_new(struct h2c *h2c, int id)
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001466{
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001467 struct h2s *h2s;
1468
Willy Tarreau7838a792019-08-12 18:42:03 +02001469 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1470
Willy Tarreaubafbe012017-11-24 17:34:44 +01001471 h2s = pool_alloc(pool_head_h2s);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001472 if (!h2s)
1473 goto out;
1474
Willy Tarreau5723f292020-01-10 15:16:57 +01001475 h2s->shut_tl = tasklet_new();
1476 if (!h2s->shut_tl) {
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001477 pool_free(pool_head_h2s, h2s);
1478 goto out;
1479 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001480 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01001481 h2s->shut_tl->process = h2_deferred_shut;
1482 h2s->shut_tl->context = h2s;
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001483 LIST_INIT(&h2s->list);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001484 h2s->h2c = h2c;
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001485 h2s->sd = NULL;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001486 h2s->sws = 0;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001487 h2s->flags = H2_SF_NONE;
1488 h2s->errcode = H2_ERR_NO_ERROR;
1489 h2s->st = H2_SS_IDLE;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +02001490 h2s->status = 0;
Willy Tarreau1915ca22019-01-24 11:49:37 +01001491 h2s->body_len = 0;
Olivier Houchard638b7992018-08-16 15:41:52 +02001492 h2s->rxbuf = BUF_NULL;
Amaury Denoyelle74162742020-12-11 17:53:05 +01001493 memset(h2s->upgrade_protocol, 0, sizeof(h2s->upgrade_protocol));
Willy Tarreau751f2d02018-10-05 09:35:00 +02001494
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001495 h2s->by_id.key = h2s->id = id;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001496 if (id > 0)
1497 h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001498 else
1499 h2c->nb_reserved++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001500
1501 eb32_insert(&h2c->streams_by_id, &h2s->by_id);
Willy Tarreau49745612017-12-03 18:56:02 +01001502 h2c->nb_streams++;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001503 h2c->stream_cnt++;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001504
Willy Tarreau4781b152021-04-06 13:53:36 +02001505 HA_ATOMIC_INC(&h2c->px_counters->open_streams);
1506 HA_ATOMIC_INC(&h2c->px_counters->total_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001507
Willy Tarreau7838a792019-08-12 18:42:03 +02001508 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001509 return h2s;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001510 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001511 TRACE_DEVEL("leaving in error", H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001512 return NULL;
1513}
1514
1515/* creates a new stream <id> on the h2c connection and returns it, or NULL in
Christopher Faulet7d013e72020-12-15 16:56:50 +01001516 * case of memory allocation error. <input> is used as input buffer for the new
1517 * stream. On success, it is transferred to the stream and the mux is no longer
1518 * responsible of it. On error, <input> is unchanged, thus the mux must still
1519 * take care of it.
Willy Tarreaua8e49542018-10-03 18:53:55 +02001520 */
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02001521static struct h2s *h2c_frt_stream_new(struct h2c *h2c, int id, struct buffer *input, uint32_t flags)
Willy Tarreaua8e49542018-10-03 18:53:55 +02001522{
1523 struct session *sess = h2c->conn->owner;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001524 struct h2s *h2s;
1525
Willy Tarreau7838a792019-08-12 18:42:03 +02001526 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1527
Willy Tarreaue872f752022-05-12 09:24:41 +02001528 if (h2c->nb_streams >= h2_settings_max_concurrent_streams) {
1529 TRACE_ERROR("HEADERS frame causing MAX_CONCURRENT_STREAMS to be exceeded", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001530 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001531 }
Willy Tarreaua8e49542018-10-03 18:53:55 +02001532
1533 h2s = h2s_new(h2c, id);
1534 if (!h2s)
Willy Tarreaue872f752022-05-12 09:24:41 +02001535 goto out_alloc;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001536
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001537 h2s->sd = sedesc_new();
1538 if (!h2s->sd)
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001539 goto out_close;
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001540 h2s->sd->se = h2s;
1541 h2s->sd->conn = h2c->conn;
1542 se_fl_set(h2s->sd, SE_FL_T_MUX | SE_FL_ORPHAN | SE_FL_NOT_FIRST);
Christopher Faulet72722c02023-02-23 14:26:34 +01001543 se_expect_no_data(h2s->sd);
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +01001544
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02001545 /* FIXME wrong analogy between ext-connect and websocket, this need to
1546 * be refine.
1547 */
1548 if (flags & H2_SF_EXT_CONNECT_RCVD)
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001549 se_fl_set(h2s->sd, SE_FL_WEBSOCKET);
Christopher Fauletb669d682022-03-22 18:37:19 +01001550
Willy Tarreaud0de6772022-02-04 09:05:37 +01001551 /* The stream will record the request's accept date (which is either the
1552 * end of the connection's or the date immediately after the previous
1553 * request) and the idle time, which is the delay since the previous
1554 * request. We can set the value now, it will be copied by stream_new().
1555 */
1556 sess->t_idle = tv_ms_elapsed(&sess->tv_accept, &now) - sess->t_handshake;
Christopher Fauleta9e8b392022-03-23 11:01:09 +01001557
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001558 if (!sc_new_from_endp(h2s->sd, sess, input))
Christopher Fauleta9e8b392022-03-23 11:01:09 +01001559 goto out_close;
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02001560
Willy Tarreau36c22322022-05-27 10:41:24 +02001561 h2c->nb_sc++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001562
Willy Tarreau590a0512018-09-05 11:56:48 +02001563 /* We want the accept date presented to the next stream to be the one
1564 * we have now, the handshake time to be null (since the next stream
1565 * is not delayed by a handshake), and the idle time to count since
1566 * right now.
1567 */
1568 sess->accept_date = date;
1569 sess->tv_accept = now;
1570 sess->t_handshake = 0;
Willy Tarreaud0de6772022-02-04 09:05:37 +01001571 sess->t_idle = 0;
Willy Tarreau590a0512018-09-05 11:56:48 +02001572
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001573 /* OK done, the stream lives its own life now */
Willy Tarreau36c22322022-05-27 10:41:24 +02001574 if (h2_frt_has_too_many_sc(h2c))
Willy Tarreauf2101912018-07-19 10:11:38 +02001575 h2c->flags |= H2_CF_DEM_TOOMANY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001576 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001577 return h2s;
1578
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001579 out_close:
Willy Tarreau71049cc2018-03-28 13:56:39 +02001580 h2s_destroy(h2s);
Willy Tarreaue872f752022-05-12 09:24:41 +02001581 out_alloc:
1582 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001583 out:
Willy Tarreau45efc072018-10-03 18:27:52 +02001584 sess_log(sess);
Willy Tarreau7838a792019-08-12 18:42:03 +02001585 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreau45efc072018-10-03 18:27:52 +02001586 return NULL;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001587}
1588
Willy Tarreau36c22322022-05-27 10:41:24 +02001589/* allocates a new stream associated to stream connector <sc> on the h2c
Willy Tarreau4596fe22022-05-17 19:07:51 +02001590 * connection and returns it, or NULL in case of memory allocation error or if
1591 * the highest possible stream ID was reached.
Willy Tarreau751f2d02018-10-05 09:35:00 +02001592 */
Willy Tarreau36c22322022-05-27 10:41:24 +02001593static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct stconn *sc, struct session *sess)
Willy Tarreau751f2d02018-10-05 09:35:00 +02001594{
1595 struct h2s *h2s = NULL;
1596
Willy Tarreau7838a792019-08-12 18:42:03 +02001597 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1598
Willy Tarreaue872f752022-05-12 09:24:41 +02001599 if (h2c->nb_streams >= h2c->streams_limit) {
1600 TRACE_ERROR("Aborting stream since negotiated limit is too low", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001601 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001602 }
Willy Tarreau751f2d02018-10-05 09:35:00 +02001603
Willy Tarreaue872f752022-05-12 09:24:41 +02001604 if (h2_streams_left(h2c) < 1) {
1605 TRACE_ERROR("Aborting stream since no more streams left", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreaua80dca82019-01-24 17:08:28 +01001606 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001607 }
Willy Tarreaua80dca82019-01-24 17:08:28 +01001608
Willy Tarreau751f2d02018-10-05 09:35:00 +02001609 /* Defer choosing the ID until we send the first message to create the stream */
1610 h2s = h2s_new(h2c, 0);
Willy Tarreaue872f752022-05-12 09:24:41 +02001611 if (!h2s) {
1612 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001613 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001614 }
Willy Tarreau751f2d02018-10-05 09:35:00 +02001615
Willy Tarreau36c22322022-05-27 10:41:24 +02001616 if (sc_attach_mux(sc, h2s, h2c->conn) < 0) {
Willy Tarreaue872f752022-05-12 09:24:41 +02001617 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW, h2c->conn);
Christopher Faulet070b91b2022-03-31 19:27:18 +02001618 h2s_destroy(h2s);
1619 h2s = NULL;
1620 goto out;
1621 }
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001622 h2s->sd = sc->sedesc;
Olivier Houchardf502aca2018-12-14 19:42:40 +01001623 h2s->sess = sess;
Willy Tarreau36c22322022-05-27 10:41:24 +02001624 h2c->nb_sc++;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001625
Willy Tarreau751f2d02018-10-05 09:35:00 +02001626 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001627 if (likely(h2s))
1628 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
1629 else
1630 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001631 return h2s;
1632}
1633
Willy Tarreaube5b7152017-09-25 16:25:39 +02001634/* try to send a settings frame on the connection. Returns > 0 on success, 0 if
1635 * it couldn't do anything. It may return an error in h2c. See RFC7540#11.3 for
1636 * the various settings codes.
1637 */
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001638static int h2c_send_settings(struct h2c *h2c)
Willy Tarreaube5b7152017-09-25 16:25:39 +02001639{
1640 struct buffer *res;
1641 char buf_data[100]; // enough for 15 settings
Willy Tarreau83061a82018-07-13 11:56:34 +02001642 struct buffer buf;
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001643 int mfs;
Willy Tarreau7838a792019-08-12 18:42:03 +02001644 int ret = 0;
1645
1646 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001647
Willy Tarreaube5b7152017-09-25 16:25:39 +02001648 chunk_init(&buf, buf_data, sizeof(buf_data));
1649 chunk_memcpy(&buf,
1650 "\x00\x00\x00" /* length : 0 for now */
1651 "\x04\x00" /* type : 4 (settings), flags : 0 */
1652 "\x00\x00\x00\x00", /* stream ID : 0 */
1653 9);
1654
Willy Tarreau0bbad6b2019-02-26 16:01:52 +01001655 if (h2c->flags & H2_CF_IS_BACK) {
1656 /* send settings_enable_push=0 */
1657 chunk_memcat(&buf, "\x00\x02\x00\x00\x00\x00", 6);
1658 }
1659
Amaury Denoyellebefeae82021-07-09 17:14:30 +02001660 /* rfc 8441 #3 SETTINGS_ENABLE_CONNECT_PROTOCOL=1,
1661 * sent automatically unless disabled in the global config */
1662 if (!(global.tune.options & GTUNE_DISABLE_H2_WEBSOCKET))
1663 chunk_memcat(&buf, "\x00\x08\x00\x00\x00\x01", 6);
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01001664
Willy Tarreaube5b7152017-09-25 16:25:39 +02001665 if (h2_settings_header_table_size != 4096) {
1666 char str[6] = "\x00\x01"; /* header_table_size */
1667
1668 write_n32(str + 2, h2_settings_header_table_size);
1669 chunk_memcat(&buf, str, 6);
1670 }
1671
1672 if (h2_settings_initial_window_size != 65535) {
1673 char str[6] = "\x00\x04"; /* initial_window_size */
1674
1675 write_n32(str + 2, h2_settings_initial_window_size);
1676 chunk_memcat(&buf, str, 6);
1677 }
1678
1679 if (h2_settings_max_concurrent_streams != 0) {
1680 char str[6] = "\x00\x03"; /* max_concurrent_streams */
1681
1682 /* Note: 0 means "unlimited" for haproxy's config but not for
1683 * the protocol, so never send this value!
1684 */
1685 write_n32(str + 2, h2_settings_max_concurrent_streams);
1686 chunk_memcat(&buf, str, 6);
1687 }
1688
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001689 mfs = h2_settings_max_frame_size;
1690 if (mfs > global.tune.bufsize)
1691 mfs = global.tune.bufsize;
1692
1693 if (!mfs)
1694 mfs = global.tune.bufsize;
1695
1696 if (mfs != 16384) {
Willy Tarreaube5b7152017-09-25 16:25:39 +02001697 char str[6] = "\x00\x05"; /* max_frame_size */
1698
1699 /* note: similarly we could also emit MAX_HEADER_LIST_SIZE to
1700 * match bufsize - rewrite size, but at the moment it seems
1701 * that clients don't take care of it.
1702 */
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001703 write_n32(str + 2, mfs);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001704 chunk_memcat(&buf, str, 6);
1705 }
1706
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001707 h2_set_frame_size(buf.area, buf.data - 9);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001708
1709 res = br_tail(h2c->mbuf);
1710 retry:
1711 if (!h2_get_buf(h2c, res)) {
1712 h2c->flags |= H2_CF_MUX_MALLOC;
1713 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001714 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001715 }
1716
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001717 ret = b_istput(res, ist2(buf.area, buf.data));
Willy Tarreaube5b7152017-09-25 16:25:39 +02001718 if (unlikely(ret <= 0)) {
1719 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001720 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1721 goto retry;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001722 h2c->flags |= H2_CF_MUX_MFULL;
1723 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001724 }
1725 else {
1726 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001727 ret = 0;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001728 }
1729 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001730 out:
1731 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001732 return ret;
1733}
1734
Willy Tarreau52eed752017-09-22 15:05:09 +02001735/* Try to receive a connection preface, then upon success try to send our
1736 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1737 * missing data. It may return an error in h2c.
1738 */
1739static int h2c_frt_recv_preface(struct h2c *h2c)
1740{
1741 int ret1;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001742 int ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001743
Willy Tarreau7838a792019-08-12 18:42:03 +02001744 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
1745
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001746 ret1 = b_isteq(&h2c->dbuf, 0, b_data(&h2c->dbuf), ist(H2_CONN_PREFACE));
Willy Tarreau52eed752017-09-22 15:05:09 +02001747
1748 if (unlikely(ret1 <= 0)) {
Christopher Fauletb5f7b522021-07-26 12:06:53 +02001749 if (!ret1)
1750 h2c->flags |= H2_CF_DEM_SHORT_READ;
Christopher Fauletff7925d2022-10-11 19:12:40 +02001751 if (ret1 < 0 || (h2c->flags & H2_CF_RCVD_SHUT)) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01001752 TRACE_ERROR("I/O error or short read", H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02001753 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauee4684f2021-06-17 08:08:48 +02001754 if (b_data(&h2c->dbuf) ||
1755 !(((const struct session *)h2c->conn->owner)->fe->options & PR_O_IGNORE_PRB))
1756 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01001757 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001758 ret2 = 0;
1759 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02001760 }
1761
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001762 ret2 = h2c_send_settings(h2c);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001763 if (ret2 > 0)
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001764 b_del(&h2c->dbuf, ret1);
Willy Tarreau7838a792019-08-12 18:42:03 +02001765 out:
1766 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001767 return ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001768}
1769
Willy Tarreau01b44822018-10-03 14:26:37 +02001770/* Try to send a connection preface, then upon success try to send our
1771 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1772 * missing data. It may return an error in h2c.
1773 */
1774static int h2c_bck_send_preface(struct h2c *h2c)
1775{
1776 struct buffer *res;
Willy Tarreau7838a792019-08-12 18:42:03 +02001777 int ret = 0;
1778
1779 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02001780
Willy Tarreaubcc45952019-05-26 10:05:50 +02001781 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001782 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001783 if (!h2_get_buf(h2c, res)) {
Willy Tarreau01b44822018-10-03 14:26:37 +02001784 h2c->flags |= H2_CF_MUX_MALLOC;
1785 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001786 goto out;
Willy Tarreau01b44822018-10-03 14:26:37 +02001787 }
1788
1789 if (!b_data(res)) {
1790 /* preface not yet sent */
Willy Tarreau9c218e72019-05-26 10:08:28 +02001791 ret = b_istput(res, ist(H2_CONN_PREFACE));
1792 if (unlikely(ret <= 0)) {
1793 if (!ret) {
1794 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1795 goto retry;
1796 h2c->flags |= H2_CF_MUX_MFULL;
1797 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001798 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001799 }
1800 else {
1801 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001802 ret = 0;
1803 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001804 }
1805 }
Willy Tarreau01b44822018-10-03 14:26:37 +02001806 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001807 ret = h2c_send_settings(h2c);
1808 out:
1809 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
1810 return ret;
Willy Tarreau01b44822018-10-03 14:26:37 +02001811}
1812
Willy Tarreau081d4722017-05-16 21:51:05 +02001813/* try to send a GOAWAY frame on the connection to report an error or a graceful
1814 * shutdown, with h2c->errcode as the error code. Returns > 0 on success or zero
1815 * if nothing was done. It uses h2c->last_sid as the advertised ID, or copies it
1816 * from h2c->max_id if it's not set yet (<0). In case of lack of room to write
1817 * the message, it subscribes the requester (either <h2s> or <h2c>) to future
1818 * notifications. It sets H2_CF_GOAWAY_SENT on success, and H2_CF_GOAWAY_FAILED
1819 * on unrecoverable failure. It will not attempt to send one again in this last
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001820 * case, nor will it send one if settings were not sent (e.g. still waiting for
1821 * a preface) so that it is safe to use h2c_error() to report such errors.
Willy Tarreau081d4722017-05-16 21:51:05 +02001822 */
1823static int h2c_send_goaway_error(struct h2c *h2c, struct h2s *h2s)
1824{
1825 struct buffer *res;
1826 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02001827 int ret = 0;
Willy Tarreau081d4722017-05-16 21:51:05 +02001828
Willy Tarreau7838a792019-08-12 18:42:03 +02001829 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
1830
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001831 if ((h2c->flags & H2_CF_GOAWAY_FAILED) || h2c->st0 < H2_CS_SETTINGS1) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001832 ret = 1; // claim that it worked
1833 goto out;
1834 }
Willy Tarreau081d4722017-05-16 21:51:05 +02001835
Willy Tarreau9c218e72019-05-26 10:08:28 +02001836 /* len: 8, type: 7, flags: none, sid: 0 */
1837 memcpy(str, "\x00\x00\x08\x07\x00\x00\x00\x00\x00", 9);
1838
1839 if (h2c->last_sid < 0)
1840 h2c->last_sid = h2c->max_id;
1841
1842 write_n32(str + 9, h2c->last_sid);
1843 write_n32(str + 13, h2c->errcode);
1844
Willy Tarreaubcc45952019-05-26 10:05:50 +02001845 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001846 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001847 if (!h2_get_buf(h2c, res)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02001848 h2c->flags |= H2_CF_MUX_MALLOC;
1849 if (h2s)
1850 h2s->flags |= H2_SF_BLK_MROOM;
1851 else
1852 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001853 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001854 }
1855
Willy Tarreauea1b06d2018-07-12 09:02:47 +02001856 ret = b_istput(res, ist2(str, 17));
Willy Tarreau081d4722017-05-16 21:51:05 +02001857 if (unlikely(ret <= 0)) {
1858 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001859 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1860 goto retry;
Willy Tarreau081d4722017-05-16 21:51:05 +02001861 h2c->flags |= H2_CF_MUX_MFULL;
1862 if (h2s)
1863 h2s->flags |= H2_SF_BLK_MROOM;
1864 else
1865 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001866 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001867 }
1868 else {
1869 /* we cannot report this error using GOAWAY, so we mark
1870 * it and claim a success.
1871 */
1872 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
1873 h2c->flags |= H2_CF_GOAWAY_FAILED;
Willy Tarreau7838a792019-08-12 18:42:03 +02001874 ret = 1;
1875 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001876 }
1877 }
1878 h2c->flags |= H2_CF_GOAWAY_SENT;
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001879
1880 /* some codes are not for real errors, just attempts to close cleanly */
1881 switch (h2c->errcode) {
1882 case H2_ERR_NO_ERROR:
1883 case H2_ERR_ENHANCE_YOUR_CALM:
1884 case H2_ERR_REFUSED_STREAM:
1885 case H2_ERR_CANCEL:
1886 break;
1887 default:
Willy Tarreau4781b152021-04-06 13:53:36 +02001888 HA_ATOMIC_INC(&h2c->px_counters->goaway_resp);
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001889 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001890 out:
1891 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
Willy Tarreau081d4722017-05-16 21:51:05 +02001892 return ret;
1893}
1894
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001895/* Try to send an RST_STREAM frame on the connection for the indicated stream
1896 * during mux operations. This stream must be valid and cannot be closed
1897 * already. h2s->id will be used for the stream ID and h2s->errcode will be
1898 * used for the error code. h2s->st will be update to H2_SS_CLOSED if it was
1899 * not yet.
1900 *
1901 * Returns > 0 on success or zero if nothing was done. In case of lack of room
1902 * to write the message, it subscribes the stream to future notifications.
1903 */
1904static int h2s_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
1905{
1906 struct buffer *res;
1907 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02001908 int ret = 0;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001909
Willy Tarreau7838a792019-08-12 18:42:03 +02001910 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
1911
1912 if (!h2s || h2s->st == H2_SS_CLOSED) {
1913 ret = 1;
1914 goto out;
1915 }
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001916
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001917 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
1918 * RST_STREAM in response to a RST_STREAM frame.
1919 */
Willy Tarreau231f6162019-08-06 10:01:40 +02001920 if (h2c->dsi == h2s->id && h2c->dft == H2_FT_RST_STREAM) {
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001921 ret = 1;
1922 goto ignore;
1923 }
1924
Willy Tarreau9c218e72019-05-26 10:08:28 +02001925 /* len: 4, type: 3, flags: none */
1926 memcpy(str, "\x00\x00\x04\x03\x00", 5);
1927 write_n32(str + 5, h2s->id);
1928 write_n32(str + 9, h2s->errcode);
1929
Willy Tarreaubcc45952019-05-26 10:05:50 +02001930 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001931 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001932 if (!h2_get_buf(h2c, res)) {
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001933 h2c->flags |= H2_CF_MUX_MALLOC;
1934 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001935 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001936 }
1937
Willy Tarreauea1b06d2018-07-12 09:02:47 +02001938 ret = b_istput(res, ist2(str, 13));
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001939 if (unlikely(ret <= 0)) {
1940 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001941 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1942 goto retry;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001943 h2c->flags |= H2_CF_MUX_MFULL;
1944 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001945 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001946 }
1947 else {
1948 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001949 ret = 0;
1950 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001951 }
1952 }
1953
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001954 ignore:
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001955 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01001956 h2s_close(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02001957 out:
1958 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001959 return ret;
1960}
1961
1962/* Try to send an RST_STREAM frame on the connection for the stream being
1963 * demuxed using h2c->dsi for the stream ID. It will use h2s->errcode as the
Willy Tarreaue6888ff2018-12-23 18:26:26 +01001964 * error code, even if the stream is one of the dummy ones, and will update
1965 * h2s->st to H2_SS_CLOSED if it was not yet.
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001966 *
1967 * Returns > 0 on success or zero if nothing was done. In case of lack of room
1968 * to write the message, it blocks the demuxer and subscribes it to future
Joseph Herlantd77575d2018-11-25 10:54:45 -08001969 * notifications. It's worth mentioning that an RST may even be sent for a
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001970 * closed stream.
Willy Tarreau27a84c92017-10-17 08:10:17 +02001971 */
1972static int h2c_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
1973{
1974 struct buffer *res;
1975 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02001976 int ret = 0;
1977
1978 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02001979
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001980 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
1981 * RST_STREAM in response to a RST_STREAM frame.
1982 */
1983 if (h2c->dft == H2_FT_RST_STREAM) {
1984 ret = 1;
1985 goto ignore;
1986 }
1987
Willy Tarreau9c218e72019-05-26 10:08:28 +02001988 /* len: 4, type: 3, flags: none */
1989 memcpy(str, "\x00\x00\x04\x03\x00", 5);
1990
1991 write_n32(str + 5, h2c->dsi);
1992 write_n32(str + 9, h2s->errcode);
1993
Willy Tarreaubcc45952019-05-26 10:05:50 +02001994 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001995 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001996 if (!h2_get_buf(h2c, res)) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02001997 h2c->flags |= H2_CF_MUX_MALLOC;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001998 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001999 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002000 }
2001
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002002 ret = b_istput(res, ist2(str, 13));
Willy Tarreau27a84c92017-10-17 08:10:17 +02002003 if (unlikely(ret <= 0)) {
2004 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002005 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2006 goto retry;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002007 h2c->flags |= H2_CF_MUX_MFULL;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002008 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002009 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002010 }
2011 else {
2012 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002013 ret = 0;
2014 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002015 }
2016 }
2017
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002018 ignore:
Willy Tarreauab0e1da2018-10-05 10:16:37 +02002019 if (h2s->id) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02002020 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01002021 h2s_close(h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002022 }
2023
Willy Tarreau7838a792019-08-12 18:42:03 +02002024 out:
Willy Tarreau4781b152021-04-06 13:53:36 +02002025 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_resp);
Willy Tarreau7838a792019-08-12 18:42:03 +02002026 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02002027 return ret;
2028}
2029
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002030/* try to send an empty DATA frame with the ES flag set to notify about the
2031 * end of stream and match a shutdown(write). If an ES was already sent as
2032 * indicated by HLOC/ERROR/RESET/CLOSED states, nothing is done. Returns > 0
2033 * on success or zero if nothing was done. In case of lack of room to write the
2034 * message, it subscribes the requesting stream to future notifications.
2035 */
2036static int h2_send_empty_data_es(struct h2s *h2s)
2037{
2038 struct h2c *h2c = h2s->h2c;
2039 struct buffer *res;
2040 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02002041 int ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002042
Willy Tarreau7838a792019-08-12 18:42:03 +02002043 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
2044
2045 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_ERROR || h2s->st == H2_SS_CLOSED) {
2046 ret = 1;
2047 goto out;
2048 }
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002049
Willy Tarreau9c218e72019-05-26 10:08:28 +02002050 /* len: 0x000000, type: 0(DATA), flags: ES=1 */
2051 memcpy(str, "\x00\x00\x00\x00\x01", 5);
2052 write_n32(str + 5, h2s->id);
2053
Willy Tarreaubcc45952019-05-26 10:05:50 +02002054 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002055 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002056 if (!h2_get_buf(h2c, res)) {
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002057 h2c->flags |= H2_CF_MUX_MALLOC;
2058 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002059 goto out;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002060 }
2061
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002062 ret = b_istput(res, ist2(str, 9));
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002063 if (likely(ret > 0)) {
2064 h2s->flags |= H2_SF_ES_SENT;
2065 }
2066 else if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002067 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2068 goto retry;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002069 h2c->flags |= H2_CF_MUX_MFULL;
2070 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002071 }
2072 else {
2073 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002074 ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002075 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002076 out:
2077 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002078 return ret;
2079}
2080
Willy Tarreau4596fe22022-05-17 19:07:51 +02002081/* wake a specific stream and assign its stream connector some SE_FL_* flags
2082 * among SE_FL_ERR_PENDING and SE_FL_ERROR if needed. The stream's state
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002083 * is automatically updated accordingly. If the stream is orphaned, it is
2084 * destroyed.
Christopher Fauletf02ca002019-03-07 16:21:34 +01002085 */
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002086static void h2s_wake_one_stream(struct h2s *h2s)
Christopher Fauletf02ca002019-03-07 16:21:34 +01002087{
Willy Tarreau7838a792019-08-12 18:42:03 +02002088 struct h2c *h2c = h2s->h2c;
2089
2090 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn, h2s);
2091
Willy Tarreau7be4ee02022-05-18 07:31:41 +02002092 if (!h2s_sc(h2s)) {
Christopher Fauletf02ca002019-03-07 16:21:34 +01002093 /* this stream was already orphaned */
2094 h2s_destroy(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002095 TRACE_DEVEL("leaving with no h2s", H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002096 return;
2097 }
2098
Christopher Fauletaade4ed2020-10-08 15:38:41 +02002099 if (h2c_read0_pending(h2s->h2c)) {
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002100 if (h2s->st == H2_SS_OPEN)
2101 h2s->st = H2_SS_HREM;
2102 else if (h2s->st == H2_SS_HLOC)
2103 h2s_close(h2s);
2104 }
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002105
Christopher Fauletff7925d2022-10-11 19:12:40 +02002106 if (h2s->h2c->st0 >= H2_CS_ERROR || (h2s->h2c->flags & (H2_CF_ERR_PENDING|H2_CF_ERROR)) ||
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002107 (h2s->h2c->last_sid > 0 && (!h2s->id || h2s->id > h2s->h2c->last_sid))) {
Christopher Fauletff7925d2022-10-11 19:12:40 +02002108 se_fl_set_error(h2s->sd);
Willy Tarreau23482912019-05-07 15:23:14 +02002109
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002110 if (h2s->st < H2_SS_ERROR)
2111 h2s->st = H2_SS_ERROR;
2112 }
Christopher Fauletf02ca002019-03-07 16:21:34 +01002113
2114 h2s_alert(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002115 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002116}
2117
2118/* wake the streams attached to the connection, whose id is greater than <last>
2119 * or unassigned.
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002120 */
Willy Tarreau23482912019-05-07 15:23:14 +02002121static void h2_wake_some_streams(struct h2c *h2c, int last)
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002122{
2123 struct eb32_node *node;
2124 struct h2s *h2s;
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002125
Willy Tarreau7838a792019-08-12 18:42:03 +02002126 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn);
2127
Christopher Fauletf02ca002019-03-07 16:21:34 +01002128 /* Wake all streams with ID > last */
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002129 node = eb32_lookup_ge(&h2c->streams_by_id, last + 1);
2130 while (node) {
2131 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002132 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002133 h2s_wake_one_stream(h2s);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002134 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01002135
Christopher Fauletf02ca002019-03-07 16:21:34 +01002136 /* Wake all streams with unassigned ID (ID == 0) */
2137 node = eb32_lookup(&h2c->streams_by_id, 0);
2138 while (node) {
2139 h2s = container_of(node, struct h2s, by_id);
2140 if (h2s->id > 0)
2141 break;
2142 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002143 h2s_wake_one_stream(h2s);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002144 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002145
2146 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002147}
2148
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002149/* Wake up all blocked streams whose window size has become positive after the
2150 * mux's initial window was adjusted. This should be done after having processed
2151 * SETTINGS frames which have updated the mux's initial window size.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002152 */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002153static void h2c_unblock_sfctl(struct h2c *h2c)
Willy Tarreau3421aba2017-07-27 15:41:03 +02002154{
2155 struct h2s *h2s;
2156 struct eb32_node *node;
2157
Willy Tarreau7838a792019-08-12 18:42:03 +02002158 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
2159
Willy Tarreau3421aba2017-07-27 15:41:03 +02002160 node = eb32_first(&h2c->streams_by_id);
2161 while (node) {
2162 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002163 if (h2s->flags & H2_SF_BLK_SFCTL && h2s_mws(h2s) > 0) {
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002164 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002165 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002166 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2167 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002168 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002169 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002170 node = eb32_next(node);
2171 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002172
2173 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002174}
2175
2176/* processes a SETTINGS frame whose payload is <payload> for <plen> bytes, and
2177 * ACKs it if needed. Returns > 0 on success or zero on missing data. It may
Willy Tarreaub860c732019-01-30 15:39:55 +01002178 * return an error in h2c. The caller must have already verified frame length
2179 * and stream ID validity. Described in RFC7540#6.5.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002180 */
2181static int h2c_handle_settings(struct h2c *h2c)
2182{
2183 unsigned int offset;
2184 int error;
2185
Willy Tarreau7838a792019-08-12 18:42:03 +02002186 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
2187
Willy Tarreau3421aba2017-07-27 15:41:03 +02002188 if (h2c->dff & H2_F_SETTINGS_ACK) {
2189 if (h2c->dfl) {
2190 error = H2_ERR_FRAME_SIZE_ERROR;
2191 goto fail;
2192 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002193 goto done;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002194 }
2195
Willy Tarreau3421aba2017-07-27 15:41:03 +02002196 /* process full frame only */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002197 if (b_data(&h2c->dbuf) < h2c->dfl) {
2198 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002199 goto out0;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002200 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002201
2202 /* parse the frame */
2203 for (offset = 0; offset < h2c->dfl; offset += 6) {
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002204 uint16_t type = h2_get_n16(&h2c->dbuf, offset);
2205 int32_t arg = h2_get_n32(&h2c->dbuf, offset + 2);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002206
2207 switch (type) {
2208 case H2_SETTINGS_INITIAL_WINDOW_SIZE:
2209 /* we need to update all existing streams with the
2210 * difference from the previous iws.
2211 */
2212 if (arg < 0) { // RFC7540#6.5.2
2213 error = H2_ERR_FLOW_CONTROL_ERROR;
2214 goto fail;
2215 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002216 h2c->miw = arg;
2217 break;
2218 case H2_SETTINGS_MAX_FRAME_SIZE:
2219 if (arg < 16384 || arg > 16777215) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002220 TRACE_ERROR("MAX_FRAME_SIZE out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002221 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002222 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002223 goto fail;
2224 }
2225 h2c->mfs = arg;
2226 break;
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01002227 case H2_SETTINGS_HEADER_TABLE_SIZE:
2228 h2c->flags |= H2_CF_SHTS_UPDATED;
2229 break;
Willy Tarreau1b38b462017-12-03 19:02:28 +01002230 case H2_SETTINGS_ENABLE_PUSH:
2231 if (arg < 0 || arg > 1) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002232 TRACE_ERROR("ENABLE_PUSH out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002233 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002234 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002235 goto fail;
2236 }
2237 break;
Willy Tarreau2e2083a2019-01-31 10:34:07 +01002238 case H2_SETTINGS_MAX_CONCURRENT_STREAMS:
2239 if (h2c->flags & H2_CF_IS_BACK) {
2240 /* the limit is only for the backend; for the frontend it is our limit */
2241 if ((unsigned int)arg > h2_settings_max_concurrent_streams)
2242 arg = h2_settings_max_concurrent_streams;
2243 h2c->streams_limit = arg;
2244 }
2245 break;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002246 case H2_SETTINGS_ENABLE_CONNECT_PROTOCOL:
Amaury Denoyelle0df04362021-10-18 09:43:29 +02002247 if (arg == 1)
2248 h2c->flags |= H2_CF_RCVD_RFC8441;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002249 break;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002250 }
2251 }
2252
2253 /* need to ACK this frame now */
2254 h2c->st0 = H2_CS_FRAME_A;
Willy Tarreau7838a792019-08-12 18:42:03 +02002255 done:
2256 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002257 return 1;
2258 fail:
Willy Tarreau9364a5f2019-10-23 11:06:35 +02002259 if (!(h2c->flags & H2_CF_IS_BACK))
2260 sess_log(h2c->conn->owner);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002261 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002262 out0:
2263 TRACE_DEVEL("leaving with missing data or error", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002264 return 0;
2265}
2266
2267/* try to send an ACK for a settings frame on the connection. Returns > 0 on
2268 * success or one of the h2_status values.
2269 */
2270static int h2c_ack_settings(struct h2c *h2c)
2271{
2272 struct buffer *res;
2273 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02002274 int ret = 0;
2275
2276 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002277
Willy Tarreau9c218e72019-05-26 10:08:28 +02002278 memcpy(str,
2279 "\x00\x00\x00" /* length : 0 (no data) */
2280 "\x04" "\x01" /* type : 4, flags : ACK */
2281 "\x00\x00\x00\x00" /* stream ID */, 9);
2282
Willy Tarreaubcc45952019-05-26 10:05:50 +02002283 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002284 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002285 if (!h2_get_buf(h2c, res)) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02002286 h2c->flags |= H2_CF_MUX_MALLOC;
2287 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002288 goto out;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002289 }
2290
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002291 ret = b_istput(res, ist2(str, 9));
Willy Tarreau3421aba2017-07-27 15:41:03 +02002292 if (unlikely(ret <= 0)) {
2293 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002294 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2295 goto retry;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002296 h2c->flags |= H2_CF_MUX_MFULL;
2297 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002298 }
2299 else {
2300 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002301 ret = 0;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002302 }
2303 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002304 out:
2305 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002306 return ret;
2307}
2308
Willy Tarreaucf68c782017-10-10 17:11:41 +02002309/* processes a PING frame and schedules an ACK if needed. The caller must pass
2310 * the pointer to the payload in <payload>. Returns > 0 on success or zero on
Willy Tarreaub860c732019-01-30 15:39:55 +01002311 * missing data. The caller must have already verified frame length
2312 * and stream ID validity.
Willy Tarreaucf68c782017-10-10 17:11:41 +02002313 */
2314static int h2c_handle_ping(struct h2c *h2c)
2315{
Willy Tarreaucf68c782017-10-10 17:11:41 +02002316 /* schedule a response */
Willy Tarreau68ed6412017-12-03 18:15:56 +01002317 if (!(h2c->dff & H2_F_PING_ACK))
Willy Tarreaucf68c782017-10-10 17:11:41 +02002318 h2c->st0 = H2_CS_FRAME_A;
2319 return 1;
2320}
2321
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002322/* Try to send a window update for stream id <sid> and value <increment>.
2323 * Returns > 0 on success or zero on missing room or failure. It may return an
2324 * error in h2c.
2325 */
2326static int h2c_send_window_update(struct h2c *h2c, int sid, uint32_t increment)
2327{
2328 struct buffer *res;
2329 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02002330 int ret = 0;
2331
2332 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002333
Willy Tarreau9c218e72019-05-26 10:08:28 +02002334 /* length: 4, type: 8, flags: none */
2335 memcpy(str, "\x00\x00\x04\x08\x00", 5);
2336 write_n32(str + 5, sid);
2337 write_n32(str + 9, increment);
2338
Willy Tarreaubcc45952019-05-26 10:05:50 +02002339 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002340 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002341 if (!h2_get_buf(h2c, res)) {
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002342 h2c->flags |= H2_CF_MUX_MALLOC;
2343 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002344 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002345 }
2346
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002347 ret = b_istput(res, ist2(str, 13));
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002348 if (unlikely(ret <= 0)) {
2349 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002350 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2351 goto retry;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002352 h2c->flags |= H2_CF_MUX_MFULL;
2353 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002354 }
2355 else {
2356 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002357 ret = 0;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002358 }
2359 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002360 out:
2361 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002362 return ret;
2363}
2364
2365/* try to send pending window update for the connection. It's safe to call it
2366 * with no pending updates. Returns > 0 on success or zero on missing room or
2367 * failure. It may return an error in h2c.
2368 */
2369static int h2c_send_conn_wu(struct h2c *h2c)
2370{
2371 int ret = 1;
2372
Willy Tarreau7838a792019-08-12 18:42:03 +02002373 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2374
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002375 if (h2c->rcvd_c <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002376 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002377
Willy Tarreau97aaa672018-12-23 09:49:04 +01002378 if (!(h2c->flags & H2_CF_WINDOW_OPENED)) {
2379 /* increase the advertised connection window to 2G on
2380 * first update.
2381 */
2382 h2c->flags |= H2_CF_WINDOW_OPENED;
2383 h2c->rcvd_c += H2_INITIAL_WINDOW_INCREMENT;
2384 }
2385
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002386 /* send WU for the connection */
2387 ret = h2c_send_window_update(h2c, 0, h2c->rcvd_c);
2388 if (ret > 0)
2389 h2c->rcvd_c = 0;
2390
Willy Tarreau7838a792019-08-12 18:42:03 +02002391 out:
2392 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002393 return ret;
2394}
2395
2396/* try to send pending window update for the current dmux stream. It's safe to
2397 * call it with no pending updates. Returns > 0 on success or zero on missing
2398 * room or failure. It may return an error in h2c.
2399 */
2400static int h2c_send_strm_wu(struct h2c *h2c)
2401{
2402 int ret = 1;
2403
Willy Tarreau7838a792019-08-12 18:42:03 +02002404 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2405
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002406 if (h2c->rcvd_s <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002407 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002408
2409 /* send WU for the stream */
2410 ret = h2c_send_window_update(h2c, h2c->dsi, h2c->rcvd_s);
2411 if (ret > 0)
2412 h2c->rcvd_s = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002413 out:
2414 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002415 return ret;
2416}
2417
Willy Tarreaucf68c782017-10-10 17:11:41 +02002418/* try to send an ACK for a ping frame on the connection. Returns > 0 on
2419 * success, 0 on missing data or one of the h2_status values.
2420 */
2421static int h2c_ack_ping(struct h2c *h2c)
2422{
2423 struct buffer *res;
2424 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02002425 int ret = 0;
2426
2427 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002428
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002429 if (b_data(&h2c->dbuf) < 8)
Willy Tarreau7838a792019-08-12 18:42:03 +02002430 goto out;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002431
Willy Tarreaucf68c782017-10-10 17:11:41 +02002432 memcpy(str,
2433 "\x00\x00\x08" /* length : 8 (same payload) */
2434 "\x06" "\x01" /* type : 6, flags : ACK */
2435 "\x00\x00\x00\x00" /* stream ID */, 9);
2436
2437 /* copy the original payload */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002438 h2_get_buf_bytes(str + 9, 8, &h2c->dbuf, 0);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002439
Willy Tarreau9c218e72019-05-26 10:08:28 +02002440 res = br_tail(h2c->mbuf);
2441 retry:
2442 if (!h2_get_buf(h2c, res)) {
2443 h2c->flags |= H2_CF_MUX_MALLOC;
2444 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002445 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02002446 }
2447
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002448 ret = b_istput(res, ist2(str, 17));
Willy Tarreaucf68c782017-10-10 17:11:41 +02002449 if (unlikely(ret <= 0)) {
2450 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002451 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2452 goto retry;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002453 h2c->flags |= H2_CF_MUX_MFULL;
2454 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002455 }
2456 else {
2457 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002458 ret = 0;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002459 }
2460 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002461 out:
2462 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002463 return ret;
2464}
2465
Willy Tarreau26f95952017-07-27 17:18:30 +02002466/* processes a WINDOW_UPDATE frame whose payload is <payload> for <plen> bytes.
2467 * Returns > 0 on success or zero on missing data. It may return an error in
Willy Tarreaub860c732019-01-30 15:39:55 +01002468 * h2c or h2s. The caller must have already verified frame length and stream ID
2469 * validity. Described in RFC7540#6.9.
Willy Tarreau26f95952017-07-27 17:18:30 +02002470 */
2471static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s)
2472{
2473 int32_t inc;
2474 int error;
2475
Willy Tarreau7838a792019-08-12 18:42:03 +02002476 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
2477
Willy Tarreau26f95952017-07-27 17:18:30 +02002478 /* process full frame only */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002479 if (b_data(&h2c->dbuf) < h2c->dfl) {
2480 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002481 goto out0;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002482 }
Willy Tarreau26f95952017-07-27 17:18:30 +02002483
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002484 inc = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau26f95952017-07-27 17:18:30 +02002485
2486 if (h2c->dsi != 0) {
2487 /* stream window update */
Willy Tarreau26f95952017-07-27 17:18:30 +02002488
2489 /* it's not an error to receive WU on a closed stream */
2490 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau7838a792019-08-12 18:42:03 +02002491 goto done;
Willy Tarreau26f95952017-07-27 17:18:30 +02002492
2493 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002494 TRACE_ERROR("stream WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002495 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002496 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002497 goto strm_err;
2498 }
2499
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002500 if (h2s_mws(h2s) >= 0 && h2s_mws(h2s) + inc < 0) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002501 TRACE_ERROR("stream WINDOW_UPDATE inc<0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002502 error = H2_ERR_FLOW_CONTROL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002503 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002504 goto strm_err;
2505 }
2506
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002507 h2s->sws += inc;
2508 if (h2s_mws(h2s) > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
Willy Tarreau26f95952017-07-27 17:18:30 +02002509 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002510 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002511 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2512 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002513 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreau26f95952017-07-27 17:18:30 +02002514 }
2515 }
2516 else {
2517 /* connection window update */
2518 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002519 TRACE_ERROR("conn WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002520 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002521 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002522 goto conn_err;
2523 }
2524
2525 if (h2c->mws >= 0 && h2c->mws + inc < 0) {
2526 error = H2_ERR_FLOW_CONTROL_ERROR;
2527 goto conn_err;
2528 }
2529
2530 h2c->mws += inc;
2531 }
2532
Willy Tarreau7838a792019-08-12 18:42:03 +02002533 done:
2534 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002535 return 1;
2536
2537 conn_err:
2538 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002539 out0:
2540 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002541 return 0;
2542
2543 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01002544 h2s_error(h2s, error);
2545 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002546 TRACE_DEVEL("leaving on stream error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002547 return 0;
2548}
2549
Willy Tarreaue96b0922017-10-30 00:28:29 +01002550/* processes a GOAWAY frame, and signals all streams whose ID is greater than
Willy Tarreaub860c732019-01-30 15:39:55 +01002551 * the last ID. Returns > 0 on success or zero on missing data. The caller must
2552 * have already verified frame length and stream ID validity. Described in
2553 * RFC7540#6.8.
Willy Tarreaue96b0922017-10-30 00:28:29 +01002554 */
2555static int h2c_handle_goaway(struct h2c *h2c)
2556{
Willy Tarreaue96b0922017-10-30 00:28:29 +01002557 int last;
2558
Willy Tarreau7838a792019-08-12 18:42:03 +02002559 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002560 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002561 if (b_data(&h2c->dbuf) < h2c->dfl) {
2562 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002563 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002564 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002565 }
Willy Tarreaue96b0922017-10-30 00:28:29 +01002566
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002567 last = h2_get_n32(&h2c->dbuf, 0);
2568 h2c->errcode = h2_get_n32(&h2c->dbuf, 4);
Willy Tarreau11cc2d62017-12-03 10:27:47 +01002569 if (h2c->last_sid < 0)
2570 h2c->last_sid = last;
Willy Tarreau23482912019-05-07 15:23:14 +02002571 h2_wake_some_streams(h2c, last);
Willy Tarreau7838a792019-08-12 18:42:03 +02002572 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002573 return 1;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002574}
2575
Willy Tarreau92153fc2017-12-03 19:46:19 +01002576/* processes a PRIORITY frame, and either skips it or rejects if it is
Willy Tarreaub860c732019-01-30 15:39:55 +01002577 * invalid. Returns > 0 on success or zero on missing data. It may return an
2578 * error in h2c. The caller must have already verified frame length and stream
2579 * ID validity. Described in RFC7540#6.3.
Willy Tarreau92153fc2017-12-03 19:46:19 +01002580 */
2581static int h2c_handle_priority(struct h2c *h2c)
2582{
Willy Tarreau7838a792019-08-12 18:42:03 +02002583 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
2584
Willy Tarreau92153fc2017-12-03 19:46:19 +01002585 /* process full frame only */
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002586 if (b_data(&h2c->dbuf) < h2c->dfl) {
Willy Tarreau7838a792019-08-12 18:42:03 +02002587 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002588 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002589 return 0;
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002590 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01002591
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002592 if (h2_get_n32(&h2c->dbuf, 0) == h2c->dsi) {
Willy Tarreau92153fc2017-12-03 19:46:19 +01002593 /* 7540#5.3 : can't depend on itself */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002594 TRACE_ERROR("PRIORITY depends on itself", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002595 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02002596 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002597 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002598 return 0;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002599 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002600 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreau92153fc2017-12-03 19:46:19 +01002601 return 1;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002602}
2603
Willy Tarreaucd234e92017-08-18 10:59:39 +02002604/* processes an RST_STREAM frame, and sets the 32-bit error code on the stream.
Willy Tarreaub860c732019-01-30 15:39:55 +01002605 * Returns > 0 on success or zero on missing data. The caller must have already
2606 * verified frame length and stream ID validity. Described in RFC7540#6.4.
Willy Tarreaucd234e92017-08-18 10:59:39 +02002607 */
2608static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s)
2609{
Willy Tarreau7838a792019-08-12 18:42:03 +02002610 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
2611
Willy Tarreaucd234e92017-08-18 10:59:39 +02002612 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002613 if (b_data(&h2c->dbuf) < h2c->dfl) {
2614 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002615 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002616 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002617 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002618
2619 /* late RST, already handled */
Willy Tarreau7838a792019-08-12 18:42:03 +02002620 if (h2s->st == H2_SS_CLOSED) {
2621 TRACE_DEVEL("leaving on stream closed", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002622 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02002623 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002624
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002625 h2s->errcode = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau00dd0782018-03-01 16:31:34 +01002626 h2s_close(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002627
Willy Tarreau7be4ee02022-05-18 07:31:41 +02002628 if (h2s_sc(h2s)) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02002629 se_fl_set_error(h2s->sd);
Willy Tarreauf830f012018-12-19 17:44:55 +01002630 h2s_alert(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002631 }
2632
2633 h2s->flags |= H2_SF_RST_RCVD;
Willy Tarreau7838a792019-08-12 18:42:03 +02002634 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002635 return 1;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002636}
2637
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002638/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2639 * It may return an error in h2c or h2s. The caller must consider that the
2640 * return value is the new h2s in case one was allocated (most common case).
2641 * Described in RFC7540#6.2. Most of the
Willy Tarreau13278b42017-10-13 19:23:14 +02002642 * errors here are reported as connection errors since it's impossible to
2643 * recover from such errors after the compression context has been altered.
2644 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002645static struct h2s *h2c_frt_handle_headers(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau13278b42017-10-13 19:23:14 +02002646{
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002647 struct buffer rxbuf = BUF_NULL;
Willy Tarreau4790f7c2019-01-24 11:33:02 +01002648 unsigned long long body_len = 0;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002649 uint32_t flags = 0;
Willy Tarreau13278b42017-10-13 19:23:14 +02002650 int error;
2651
Willy Tarreau7838a792019-08-12 18:42:03 +02002652 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2653
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002654 if (!b_size(&h2c->dbuf)) {
2655 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002656 goto out; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002657 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002658
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002659 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2660 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002661 goto out; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002662 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002663
2664 /* now either the frame is complete or the buffer is complete */
2665 if (h2s->st != H2_SS_IDLE) {
Willy Tarreau88d138e2019-01-02 19:38:14 +01002666 /* The stream exists/existed, this must be a trailers frame */
2667 if (h2s->st != H2_SS_CLOSED) {
Willy Tarreau7cfbb812023-01-26 16:02:01 +01002668 error = h2c_dec_hdrs(h2c, &h2s->rxbuf, &h2s->flags, &body_len, NULL);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002669 /* unrecoverable error ? */
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002670 if (h2c->st0 >= H2_CS_ERROR) {
Willy Tarreau17c630b2023-01-19 23:58:11 +01002671 TRACE_USER("Unrecoverable error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002672 sess_log(h2c->conn->owner);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002673 goto out;
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002674 }
Willy Tarreauaab1a602019-05-06 11:12:18 +02002675
Christopher Faulet485da0b2021-10-08 08:56:00 +02002676 if (error == 0) {
2677 /* Demux not blocked because of the stream, it is an incomplete frame */
2678 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2679 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002680 goto out; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002681 }
Willy Tarreauaab1a602019-05-06 11:12:18 +02002682
2683 if (error < 0) {
2684 /* Failed to decode this frame (e.g. too large request)
2685 * but the HPACK decompressor is still synchronized.
2686 */
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002687 sess_log(h2c->conn->owner);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002688 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
Willy Tarreau17c630b2023-01-19 23:58:11 +01002689 TRACE_USER("Stream error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002690 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau88d138e2019-01-02 19:38:14 +01002691 goto out;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002692 }
Willy Tarreau88d138e2019-01-02 19:38:14 +01002693 goto done;
2694 }
Willy Tarreau1f035502019-01-30 11:44:07 +01002695 /* the connection was already killed by an RST, let's consume
2696 * the data and send another RST.
2697 */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01002698 error = h2c_dec_hdrs(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002699 sess_log(h2c->conn->owner);
Willy Tarreau1f035502019-01-30 11:44:07 +01002700 h2s = (struct h2s*)h2_error_stream;
2701 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002702 }
2703 else if (h2c->dsi <= h2c->max_id || !(h2c->dsi & 1)) {
2704 /* RFC7540#5.1.1 stream id > prev ones, and must be odd here */
2705 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002706 TRACE_ERROR("HEADERS on invalid stream ID", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau4781b152021-04-06 13:53:36 +02002707 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau22de8d32018-09-05 19:55:58 +02002708 sess_log(h2c->conn->owner);
Willy Tarreau13278b42017-10-13 19:23:14 +02002709 goto conn_err;
2710 }
Willy Tarreau415b1ee2019-01-02 13:59:43 +01002711 else if (h2c->flags & H2_CF_DEM_TOOMANY)
Willy Tarreau36c22322022-05-27 10:41:24 +02002712 goto out; // IDLE but too many sc still present
Willy Tarreau13278b42017-10-13 19:23:14 +02002713
Willy Tarreau7cfbb812023-01-26 16:02:01 +01002714 error = h2c_dec_hdrs(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002715
Willy Tarreau25919232019-01-03 14:48:18 +01002716 /* unrecoverable error ? */
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002717 if (h2c->st0 >= H2_CS_ERROR) {
Willy Tarreau17c630b2023-01-19 23:58:11 +01002718 TRACE_USER("Unrecoverable error decoding H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002719 sess_log(h2c->conn->owner);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002720 goto out;
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002721 }
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002722
Willy Tarreau25919232019-01-03 14:48:18 +01002723 if (error <= 0) {
Christopher Faulet485da0b2021-10-08 08:56:00 +02002724 if (error == 0) {
2725 /* Demux not blocked because of the stream, it is an incomplete frame */
2726 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2727 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau25919232019-01-03 14:48:18 +01002728 goto out; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002729 }
Willy Tarreau25919232019-01-03 14:48:18 +01002730
2731 /* Failed to decode this stream (e.g. too large request)
2732 * but the HPACK decompressor is still synchronized.
2733 */
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002734 sess_log(h2c->conn->owner);
Willy Tarreau25919232019-01-03 14:48:18 +01002735 h2s = (struct h2s*)h2_error_stream;
2736 goto send_rst;
2737 }
2738
Willy Tarreau29268e92021-06-17 08:29:14 +02002739 TRACE_USER("rcvd H2 request ", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW, h2c->conn, 0, &rxbuf);
2740
Willy Tarreau198b5072022-05-12 09:08:51 +02002741 /* Now we cannot roll back and we won't come back here anymore for this
2742 * stream, this stream ID is open.
2743 */
2744 if (h2c->dsi > h2c->max_id)
2745 h2c->max_id = h2c->dsi;
2746
Willy Tarreau22de8d32018-09-05 19:55:58 +02002747 /* Note: we don't emit any other logs below because ff we return
Willy Tarreaua8e49542018-10-03 18:53:55 +02002748 * positively from h2c_frt_stream_new(), the stream will report the error,
2749 * and if we return in error, h2c_frt_stream_new() will emit the error.
Christopher Faulet7d013e72020-12-15 16:56:50 +01002750 *
2751 * Xfer the rxbuf to the stream. On success, the new stream owns the
2752 * rxbuf. On error, it is released here.
Willy Tarreau22de8d32018-09-05 19:55:58 +02002753 */
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02002754 h2s = h2c_frt_stream_new(h2c, h2c->dsi, &rxbuf, flags);
Willy Tarreau13278b42017-10-13 19:23:14 +02002755 if (!h2s) {
Willy Tarreau96a10c22018-12-23 18:30:44 +01002756 h2s = (struct h2s*)h2_refused_stream;
2757 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002758 }
2759
2760 h2s->st = H2_SS_OPEN;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002761 h2s->flags |= flags;
Willy Tarreau1915ca22019-01-24 11:49:37 +01002762 h2s->body_len = body_len;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002763
Willy Tarreau88d138e2019-01-02 19:38:14 +01002764 done:
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002765 if (h2c->dff & H2_F_HEADERS_END_STREAM)
Willy Tarreau13278b42017-10-13 19:23:14 +02002766 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002767
2768 if (h2s->flags & H2_SF_ES_RCVD) {
Willy Tarreaufc10f592019-01-30 19:28:32 +01002769 if (h2s->st == H2_SS_OPEN)
2770 h2s->st = H2_SS_HREM;
2771 else
2772 h2s_close(h2s);
Willy Tarreau13278b42017-10-13 19:23:14 +02002773 }
Willy Tarreau0d6e5d22023-02-20 17:05:10 +01002774 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002775 return h2s;
Willy Tarreau13278b42017-10-13 19:23:14 +02002776
2777 conn_err:
2778 h2c_error(h2c, error);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002779 goto out;
Willy Tarreau13278b42017-10-13 19:23:14 +02002780
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002781 out:
2782 h2_release_buf(h2c, &rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002783 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002784 return NULL;
Willy Tarreau96a10c22018-12-23 18:30:44 +01002785
2786 send_rst:
2787 /* make the demux send an RST for the current stream. We may only
2788 * do this if we're certain that the HEADERS frame was properly
2789 * decompressed so that the HPACK decoder is still kept up to date.
2790 */
2791 h2_release_buf(h2c, &rxbuf);
2792 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002793
Willy Tarreau022e5e52020-09-10 09:33:15 +02002794 TRACE_USER("rejected H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002795 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau96a10c22018-12-23 18:30:44 +01002796 return h2s;
Willy Tarreau13278b42017-10-13 19:23:14 +02002797}
2798
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002799/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2800 * It may return an error in h2c or h2s. Described in RFC7540#6.2. Most of the
2801 * errors here are reported as connection errors since it's impossible to
2802 * recover from such errors after the compression context has been altered.
2803 */
2804static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
2805{
Christopher Faulet6884aa32019-09-23 15:28:20 +02002806 struct buffer rxbuf = BUF_NULL;
2807 unsigned long long body_len = 0;
2808 uint32_t flags = 0;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002809 int error;
2810
Willy Tarreau7838a792019-08-12 18:42:03 +02002811 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2812
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002813 if (!b_size(&h2c->dbuf)) {
2814 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002815 goto fail; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002816 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002817
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002818 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2819 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002820 goto fail; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002821 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002822
Christopher Faulet6884aa32019-09-23 15:28:20 +02002823 if (h2s->st != H2_SS_CLOSED) {
Willy Tarreau7cfbb812023-01-26 16:02:01 +01002824 error = h2c_dec_hdrs(h2c, &h2s->rxbuf, &h2s->flags, &h2s->body_len, h2s->upgrade_protocol);
Christopher Faulet6884aa32019-09-23 15:28:20 +02002825 }
2826 else {
2827 /* the connection was already killed by an RST, let's consume
2828 * the data and send another RST.
2829 */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01002830 error = h2c_dec_hdrs(h2c, &rxbuf, &flags, &body_len, NULL);
Christopher Fauletea7a7782019-09-26 16:19:13 +02002831 h2s = (struct h2s*)h2_error_stream;
Christopher Faulet6884aa32019-09-23 15:28:20 +02002832 h2c->st0 = H2_CS_FRAME_E;
2833 goto send_rst;
2834 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002835
Willy Tarreau25919232019-01-03 14:48:18 +01002836 /* unrecoverable error ? */
Willy Tarreau17c630b2023-01-19 23:58:11 +01002837 if (h2c->st0 >= H2_CS_ERROR) {
2838 TRACE_USER("Unrecoverable error decoding H2 HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002839 goto fail;
Willy Tarreau17c630b2023-01-19 23:58:11 +01002840 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002841
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002842 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
2843 /* RFC7540#5.1 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002844 TRACE_ERROR("response HEADERS in invalid state", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002845 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
2846 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002847 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002848 goto fail;
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002849 }
2850
Willy Tarreau25919232019-01-03 14:48:18 +01002851 if (error <= 0) {
Christopher Faulet485da0b2021-10-08 08:56:00 +02002852 if (error == 0) {
2853 /* Demux not blocked because of the stream, it is an incomplete frame */
2854 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2855 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002856 goto fail; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002857 }
Willy Tarreau25919232019-01-03 14:48:18 +01002858
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002859 /* stream error : send RST_STREAM */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002860 TRACE_ERROR("couldn't decode response HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau25919232019-01-03 14:48:18 +01002861 h2s_error(h2s, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002862 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002863 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002864 goto fail;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002865 }
2866
Christopher Fauletfa922f02019-05-07 10:55:17 +02002867 if (h2c->dff & H2_F_HEADERS_END_STREAM)
Willy Tarreau45ffc0c2019-01-03 09:32:20 +01002868 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreau45ffc0c2019-01-03 09:32:20 +01002869
Willy Tarreau95acc8b2022-05-27 16:14:10 +02002870 if (se_fl_test(h2s->sd, SE_FL_ERROR) && h2s->st < H2_SS_ERROR)
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002871 h2s->st = H2_SS_ERROR;
Christopher Fauletfa922f02019-05-07 10:55:17 +02002872 else if (h2s->flags & H2_SF_ES_RCVD) {
2873 if (h2s->st == H2_SS_OPEN)
2874 h2s->st = H2_SS_HREM;
2875 else if (h2s->st == H2_SS_HLOC)
2876 h2s_close(h2s);
2877 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002878
Christopher Fauletf95f8762021-01-22 11:59:07 +01002879 /* Unblock busy server h2s waiting for the response headers to validate
2880 * the tunnel establishment or the end of the response of an oborted
2881 * tunnel
2882 */
2883 if ((h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY)) == (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY) ||
2884 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
2885 TRACE_STATE("Unblock h2s blocked on tunnel establishment/abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
2886 h2s->flags &= ~H2_SF_BLK_MBUSY;
2887 }
2888
Willy Tarreau9abb3172021-06-16 18:32:42 +02002889 TRACE_USER("rcvd H2 response ", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, 0, &h2s->rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002890 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002891 return h2s;
Willy Tarreau7838a792019-08-12 18:42:03 +02002892 fail:
2893 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2894 return NULL;
Christopher Faulet6884aa32019-09-23 15:28:20 +02002895
2896 send_rst:
2897 /* make the demux send an RST for the current stream. We may only
2898 * do this if we're certain that the HEADERS frame was properly
2899 * decompressed so that the HPACK decoder is still kept up to date.
2900 */
2901 h2_release_buf(h2c, &rxbuf);
2902 h2c->st0 = H2_CS_FRAME_E;
2903
Willy Tarreau022e5e52020-09-10 09:33:15 +02002904 TRACE_USER("rejected H2 response", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Christopher Faulet6884aa32019-09-23 15:28:20 +02002905 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2906 return h2s;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002907}
2908
Willy Tarreau454f9052017-10-26 19:40:35 +02002909/* processes a DATA frame. Returns > 0 on success or zero on missing data.
2910 * It may return an error in h2c or h2s. Described in RFC7540#6.1.
2911 */
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01002912static int h2c_handle_data(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02002913{
2914 int error;
2915
Willy Tarreau7838a792019-08-12 18:42:03 +02002916 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
2917
Willy Tarreau454f9052017-10-26 19:40:35 +02002918 /* note that empty DATA frames are perfectly valid and sometimes used
2919 * to signal an end of stream (with the ES flag).
2920 */
2921
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002922 if (!b_size(&h2c->dbuf) && h2c->dfl) {
2923 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002924 goto fail; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002925 }
Willy Tarreau454f9052017-10-26 19:40:35 +02002926
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002927 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2928 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002929 goto fail; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002930 }
Willy Tarreau454f9052017-10-26 19:40:35 +02002931
2932 /* now either the frame is complete or the buffer is complete */
2933
Willy Tarreau454f9052017-10-26 19:40:35 +02002934 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
2935 /* RFC7540#6.1 */
2936 error = H2_ERR_STREAM_CLOSED;
2937 goto strm_err;
2938 }
2939
Christopher Faulet4f09ec82019-06-19 09:25:58 +02002940 if ((h2s->flags & H2_SF_DATA_CLEN) && (h2c->dfl - h2c->dpl) > h2s->body_len) {
Willy Tarreau1915ca22019-01-24 11:49:37 +01002941 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002942 TRACE_ERROR("DATA frame larger than content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01002943 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002944 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01002945 goto strm_err;
2946 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01002947 if (!(h2c->flags & H2_CF_IS_BACK) &&
2948 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT) &&
2949 ((h2c->dfl - h2c->dpl) || !(h2c->dff & H2_F_DATA_END_STREAM))) {
2950 /* a tunnel attempt was aborted but the client still try to send some raw data.
2951 * Thus the stream is closed with the CANCEL error. Here we take care it is not
2952 * an empty DATA Frame with the ES flag. The error is only handled if ES was
2953 * already sent to the client because depending on the scheduling, these data may
Ilya Shipitsinacf84592021-02-06 22:29:08 +05002954 * have been sent before the server response but not handle here.
Christopher Faulet91b21dc2021-01-22 12:13:15 +01002955 */
2956 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
2957 error = H2_ERR_CANCEL;
2958 goto strm_err;
2959 }
Willy Tarreau1915ca22019-01-24 11:49:37 +01002960
Willy Tarreaua56a6de2018-02-26 15:59:07 +01002961 if (!h2_frt_transfer_data(h2s))
Willy Tarreau7838a792019-08-12 18:42:03 +02002962 goto fail;
Willy Tarreaua56a6de2018-02-26 15:59:07 +01002963
Willy Tarreau454f9052017-10-26 19:40:35 +02002964 /* call the upper layers to process the frame, then let the upper layer
2965 * notify the stream about any change.
2966 */
Willy Tarreau7be4ee02022-05-18 07:31:41 +02002967 if (!h2s_sc(h2s)) {
Willy Tarreau082c4572019-08-06 10:11:02 +02002968 /* The upper layer has already closed, this may happen on
2969 * 4xx/redirects during POST, or when receiving a response
2970 * from an H2 server after the client has aborted.
2971 */
2972 error = H2_ERR_CANCEL;
Willy Tarreau454f9052017-10-26 19:40:35 +02002973 goto strm_err;
2974 }
2975
Willy Tarreau8f650c32017-11-21 19:36:21 +01002976 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02002977 goto fail;
Willy Tarreau8f650c32017-11-21 19:36:21 +01002978
Willy Tarreau721c9742017-11-07 11:05:42 +01002979 if (h2s->st >= H2_SS_ERROR) {
Willy Tarreau454f9052017-10-26 19:40:35 +02002980 /* stream error : send RST_STREAM */
Willy Tarreaua20a5192017-12-27 11:02:06 +01002981 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau454f9052017-10-26 19:40:35 +02002982 }
2983
2984 /* check for completion : the callee will change this to FRAME_A or
2985 * FRAME_H once done.
2986 */
2987 if (h2c->st0 == H2_CS_FRAME_P)
Willy Tarreau7838a792019-08-12 18:42:03 +02002988 goto fail;
Willy Tarreau454f9052017-10-26 19:40:35 +02002989
Willy Tarreauc4134ba2017-12-11 18:45:08 +01002990 /* last frame */
2991 if (h2c->dff & H2_F_DATA_END_STREAM) {
Christopher Fauletfa922f02019-05-07 10:55:17 +02002992 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreaufc10f592019-01-30 19:28:32 +01002993 if (h2s->st == H2_SS_OPEN)
2994 h2s->st = H2_SS_HREM;
2995 else
2996 h2s_close(h2s);
2997
Willy Tarreau1915ca22019-01-24 11:49:37 +01002998 if (h2s->flags & H2_SF_DATA_CLEN && h2s->body_len) {
2999 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003000 TRACE_ERROR("ES on DATA frame before content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003001 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02003002 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003003 goto strm_err;
3004 }
Willy Tarreauc4134ba2017-12-11 18:45:08 +01003005 }
3006
Christopher Fauletf95f8762021-01-22 11:59:07 +01003007 /* Unblock busy server h2s waiting for the end of the response for an
3008 * aborted tunnel
3009 */
3010 if ((h2c->flags & H2_CF_IS_BACK) &&
3011 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
3012 TRACE_STATE("Unblock h2s blocked on tunnel abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3013 h2s->flags &= ~H2_SF_BLK_MBUSY;
3014 }
3015
Willy Tarreau7838a792019-08-12 18:42:03 +02003016 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003017 return 1;
3018
Willy Tarreau454f9052017-10-26 19:40:35 +02003019 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01003020 h2s_error(h2s, error);
3021 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02003022 fail:
3023 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003024 return 0;
3025}
3026
Willy Tarreau63864812019-08-07 14:25:20 +02003027/* check that the current frame described in h2c->{dsi,dft,dfl,dff,...} is
3028 * valid for the current stream state. This is needed only after parsing the
3029 * frame header but in practice it can be performed at any time during
3030 * H2_CS_FRAME_P since no state transition happens there. Returns >0 on success
3031 * or 0 in case of error, in which case either h2s or h2c will carry an error.
3032 */
3033static int h2_frame_check_vs_state(struct h2c *h2c, struct h2s *h2s)
3034{
Willy Tarreau7838a792019-08-12 18:42:03 +02003035 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
3036
Willy Tarreau63864812019-08-07 14:25:20 +02003037 if (h2s->st == H2_SS_IDLE &&
3038 h2c->dft != H2_FT_HEADERS && h2c->dft != H2_FT_PRIORITY) {
3039 /* RFC7540#5.1: any frame other than HEADERS or PRIORITY in
3040 * this state MUST be treated as a connection error
3041 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003042 TRACE_ERROR("invalid frame type for IDLE state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003043 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003044 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau63864812019-08-07 14:25:20 +02003045 /* only log if no other stream can report the error */
3046 sess_log(h2c->conn->owner);
3047 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003048 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02003049 TRACE_DEVEL("leaving in error (idle&!hdrs&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003050 return 0;
3051 }
3052
Willy Tarreau57a18162019-11-24 14:57:53 +01003053 if (h2s->st == H2_SS_IDLE && (h2c->flags & H2_CF_IS_BACK)) {
3054 /* only PUSH_PROMISE would be permitted here */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003055 TRACE_ERROR("invalid frame type for IDLE state (back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau57a18162019-11-24 14:57:53 +01003056 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02003057 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau57a18162019-11-24 14:57:53 +01003058 TRACE_DEVEL("leaving in error (idle&back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
3059 return 0;
3060 }
3061
Willy Tarreau63864812019-08-07 14:25:20 +02003062 if (h2s->st == H2_SS_HREM && h2c->dft != H2_FT_WINDOW_UPDATE &&
3063 h2c->dft != H2_FT_RST_STREAM && h2c->dft != H2_FT_PRIORITY) {
3064 /* RFC7540#5.1: any frame other than WU/PRIO/RST in
3065 * this state MUST be treated as a stream error.
3066 * 6.2, 6.6 and 6.10 further mandate that HEADERS/
3067 * PUSH_PROMISE/CONTINUATION cause connection errors.
3068 */
Amaury Denoyellea8879232020-10-27 17:16:03 +01003069 if (h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003070 TRACE_ERROR("invalid frame type for HREM state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003071 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02003072 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01003073 }
3074 else {
Willy Tarreau63864812019-08-07 14:25:20 +02003075 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
Amaury Denoyellea8879232020-10-27 17:16:03 +01003076 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003077 TRACE_DEVEL("leaving in error (hrem&!wu&!rst&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003078 return 0;
3079 }
3080
3081 /* Below the management of frames received in closed state is a
3082 * bit hackish because the spec makes strong differences between
3083 * streams closed by receiving RST, sending RST, and seeing ES
3084 * in both directions. In addition to this, the creation of a
3085 * new stream reusing the identifier of a closed one will be
3086 * detected here. Given that we cannot keep track of all closed
3087 * streams forever, we consider that unknown closed streams were
3088 * closed on RST received, which allows us to respond with an
3089 * RST without breaking the connection (eg: to abort a transfer).
3090 * Some frames have to be silently ignored as well.
3091 */
3092 if (h2s->st == H2_SS_CLOSED && h2c->dsi) {
3093 if (!(h2c->flags & H2_CF_IS_BACK) && h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
3094 /* #5.1.1: The identifier of a newly
3095 * established stream MUST be numerically
3096 * greater than all streams that the initiating
3097 * endpoint has opened or reserved. This
3098 * governs streams that are opened using a
3099 * HEADERS frame and streams that are reserved
3100 * using PUSH_PROMISE. An endpoint that
3101 * receives an unexpected stream identifier
3102 * MUST respond with a connection error.
3103 */
3104 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003105 TRACE_DEVEL("leaving in error (closed&hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003106 return 0;
3107 }
3108
Willy Tarreau4c08f122019-09-26 08:47:15 +02003109 if (h2s->flags & H2_SF_RST_RCVD &&
3110 !(h2_ft_bit(h2c->dft) & (H2_FT_HDR_MASK | H2_FT_RST_STREAM_BIT | H2_FT_PRIORITY_BIT | H2_FT_WINDOW_UPDATE_BIT))) {
Willy Tarreau63864812019-08-07 14:25:20 +02003111 /* RFC7540#5.1:closed: an endpoint that
3112 * receives any frame other than PRIORITY after
3113 * receiving a RST_STREAM MUST treat that as a
3114 * stream error of type STREAM_CLOSED.
3115 *
3116 * Note that old streams fall into this category
3117 * and will lead to an RST being sent.
3118 *
3119 * However, we cannot generalize this to all frame types. Those
3120 * carrying compression state must still be processed before
3121 * being dropped or we'll desynchronize the decoder. This can
3122 * happen with request trailers received after sending an
3123 * RST_STREAM, or with header/trailers responses received after
3124 * sending RST_STREAM (aborted stream).
Willy Tarreau4c08f122019-09-26 08:47:15 +02003125 *
3126 * In addition, since our CLOSED streams always carry the
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003127 * RST_RCVD bit, we don't want to accidentally catch valid
Willy Tarreau4c08f122019-09-26 08:47:15 +02003128 * frames for a closed stream, i.e. RST/PRIO/WU.
Willy Tarreau63864812019-08-07 14:25:20 +02003129 */
3130 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
3131 h2c->st0 = H2_CS_FRAME_E;
Christopher Faulet6884aa32019-09-23 15:28:20 +02003132 TRACE_DEVEL("leaving in error (rst_rcvd&!hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003133 return 0;
3134 }
3135
3136 /* RFC7540#5.1:closed: if this state is reached as a
3137 * result of sending a RST_STREAM frame, the peer that
3138 * receives the RST_STREAM might have already sent
3139 * frames on the stream that cannot be withdrawn. An
3140 * endpoint MUST ignore frames that it receives on
3141 * closed streams after it has sent a RST_STREAM
3142 * frame. An endpoint MAY choose to limit the period
3143 * over which it ignores frames and treat frames that
3144 * arrive after this time as being in error.
3145 */
3146 if (h2s->id && !(h2s->flags & H2_SF_RST_SENT)) {
3147 /* RFC7540#5.1:closed: any frame other than
3148 * PRIO/WU/RST in this state MUST be treated as
3149 * a connection error
3150 */
3151 if (h2c->dft != H2_FT_RST_STREAM &&
3152 h2c->dft != H2_FT_PRIORITY &&
3153 h2c->dft != H2_FT_WINDOW_UPDATE) {
3154 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003155 TRACE_DEVEL("leaving in error (rst_sent&!rst&!prio&!wu)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003156 return 0;
3157 }
3158 }
3159 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003160 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003161 return 1;
3162}
3163
Willy Tarreaubc933932017-10-09 16:21:43 +02003164/* process Rx frames to be demultiplexed */
3165static void h2_process_demux(struct h2c *h2c)
3166{
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003167 struct h2s *h2s = NULL, *tmp_h2s;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003168 struct h2_fh hdr;
3169 unsigned int padlen = 0;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003170 int32_t old_iw = h2c->miw;
Willy Tarreauf3ee0692017-10-17 08:18:25 +02003171
Willy Tarreau7838a792019-08-12 18:42:03 +02003172 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3173
Willy Tarreau081d4722017-05-16 21:51:05 +02003174 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02003175 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02003176
3177 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3178 if (h2c->st0 == H2_CS_PREFACE) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003179 TRACE_STATE("expecting preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02003180 if (h2c->flags & H2_CF_IS_BACK)
Willy Tarreau7838a792019-08-12 18:42:03 +02003181 goto out;
3182
Willy Tarreau52eed752017-09-22 15:05:09 +02003183 if (unlikely(h2c_frt_recv_preface(h2c) <= 0)) {
3184 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau22de8d32018-09-05 19:55:58 +02003185 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003186 TRACE_PROTO("failed to receive preface", H2_EV_RX_PREFACE|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003187 h2c->st0 = H2_CS_ERROR2;
Willy Tarreauee4684f2021-06-17 08:08:48 +02003188 if (b_data(&h2c->dbuf) ||
Christopher Faulet3f35da22021-07-26 10:18:35 +02003189 !(((const struct session *)h2c->conn->owner)->fe->options & (PR_O_NULLNOLOG|PR_O_IGNORE_PRB)))
Willy Tarreauee4684f2021-06-17 08:08:48 +02003190 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003191 }
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003192 goto done;
Willy Tarreau52eed752017-09-22 15:05:09 +02003193 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003194 TRACE_PROTO("received preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003195
3196 h2c->max_id = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02003197 TRACE_STATE("switching to SETTINGS1", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreaued2b9d92022-08-18 15:30:41 +02003198 h2c->st0 = H2_CS_SETTINGS1;
Willy Tarreau52eed752017-09-22 15:05:09 +02003199 }
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003200
3201 if (h2c->st0 == H2_CS_SETTINGS1) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003202 /* ensure that what is pending is a valid SETTINGS frame
3203 * without an ACK.
3204 */
Willy Tarreau7838a792019-08-12 18:42:03 +02003205 TRACE_STATE("expecting settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003206 if (!h2_get_frame_hdr(&h2c->dbuf, &hdr)) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003207 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003208 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau22de8d32018-09-05 19:55:58 +02003209 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003210 TRACE_ERROR("failed to receive settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003211 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003212 if (!(h2c->flags & H2_CF_IS_BACK))
3213 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003214 }
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003215 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003216 }
3217
3218 if (hdr.sid || hdr.ft != H2_FT_SETTINGS || hdr.ff & H2_F_SETTINGS_ACK) {
3219 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003220 TRACE_ERROR("unexpected frame type or flags", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003221 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
3222 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003223 if (!(h2c->flags & H2_CF_IS_BACK))
3224 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003225 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003226 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003227 }
3228
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003229 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003230 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003231 TRACE_ERROR("invalid settings frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003232 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
3233 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003234 if (!(h2c->flags & H2_CF_IS_BACK))
3235 sess_log(h2c->conn->owner);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003236 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003237 }
3238
Willy Tarreau3bf69182018-12-21 15:34:50 +01003239 /* that's OK, switch to FRAME_P to process it. This is
3240 * a SETTINGS frame whose header has already been
3241 * deleted above.
3242 */
Willy Tarreau54f46e52019-01-30 15:11:03 +01003243 padlen = 0;
Willy Tarreau4781b152021-04-06 13:53:36 +02003244 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003245 goto new_frame;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003246 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003247 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003248
3249 /* process as many incoming frames as possible below */
Willy Tarreau7838a792019-08-12 18:42:03 +02003250 while (1) {
Willy Tarreau7e98c052017-10-10 15:56:59 +02003251 int ret = 0;
3252
Willy Tarreau7838a792019-08-12 18:42:03 +02003253 if (!b_data(&h2c->dbuf)) {
3254 TRACE_DEVEL("no more Rx data", H2_EV_RX_FRAME, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003255 h2c->flags |= H2_CF_DEM_SHORT_READ;
3256 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003257 }
3258
3259 if (h2c->st0 >= H2_CS_ERROR) {
3260 TRACE_STATE("end of connection reported", H2_EV_RX_FRAME|H2_EV_RX_EOI, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003261 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003262 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003263
3264 if (h2c->st0 == H2_CS_FRAME_H) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003265 TRACE_STATE("expecting H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003266 if (!h2_peek_frame_hdr(&h2c->dbuf, 0, &hdr)) {
3267 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003268 break;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003269 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003270
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003271 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003272 TRACE_ERROR("invalid H2 frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003273 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003274 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau22de8d32018-09-05 19:55:58 +02003275 /* only log if no other stream can report the error */
3276 sess_log(h2c->conn->owner);
3277 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003278 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003279 break;
3280 }
3281
Willy Tarreau617592c2022-06-08 16:32:22 +02003282 if (h2c->rcvd_s && h2c->dsi != hdr.sid) {
3283 /* changed stream with a pending WU, need to
3284 * send it now.
3285 */
3286 TRACE_PROTO("sending stream WINDOW_UPDATE frame on stream switch", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
3287 ret = h2c_send_strm_wu(h2c);
3288 if (ret <= 0)
3289 break;
3290 }
3291
Christopher Fauletdd2a5622019-06-18 12:22:38 +02003292 padlen = 0;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003293 if (h2_ft_bit(hdr.ft) & H2_FT_PADDED_MASK && hdr.ff & H2_F_PADDED) {
3294 /* If the frame is padded (HEADERS, PUSH_PROMISE or DATA),
3295 * we read the pad length and drop it from the remaining
3296 * payload (one byte + the 9 remaining ones = 10 total
3297 * removed), so we have a frame payload starting after the
3298 * pad len. Flow controlled frames (DATA) also count the
3299 * padlen in the flow control, so it must be adjusted.
3300 */
3301 if (hdr.len < 1) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003302 TRACE_ERROR("invalid H2 padded frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003303 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003304 if (!(h2c->flags & H2_CF_IS_BACK))
3305 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003306 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003307 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003308 }
3309 hdr.len--;
3310
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003311 if (b_data(&h2c->dbuf) < 10) {
3312 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003313 break; // missing padlen
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003314 }
Willy Tarreau3bf69182018-12-21 15:34:50 +01003315
3316 padlen = *(uint8_t *)b_peek(&h2c->dbuf, 9);
3317
3318 if (padlen > hdr.len) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003319 TRACE_ERROR("invalid H2 padding length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003320 /* RFC7540#6.1 : pad length = length of
3321 * frame payload or greater => error.
3322 */
3323 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003324 if (!(h2c->flags & H2_CF_IS_BACK))
3325 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003326 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003327 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003328 }
3329
3330 if (h2_ft_bit(hdr.ft) & H2_FT_FC_MASK) {
3331 h2c->rcvd_c++;
3332 h2c->rcvd_s++;
3333 }
3334 b_del(&h2c->dbuf, 1);
3335 }
3336 h2_skip_frame_hdr(&h2c->dbuf);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003337
3338 new_frame:
Willy Tarreau7e98c052017-10-10 15:56:59 +02003339 h2c->dfl = hdr.len;
3340 h2c->dsi = hdr.sid;
3341 h2c->dft = hdr.ft;
3342 h2c->dff = hdr.ff;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003343 h2c->dpl = padlen;
Willy Tarreau0f458712022-08-18 11:19:57 +02003344 h2c->flags |= H2_CF_DEM_IN_PROGRESS;
Willy Tarreau73db4342019-09-25 07:28:44 +02003345 TRACE_STATE("rcvd H2 frame header, switching to FRAME_P state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003346 h2c->st0 = H2_CS_FRAME_P;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003347
3348 /* check for minimum basic frame format validity */
3349 ret = h2_frame_check(h2c->dft, 1, h2c->dsi, h2c->dfl, global.tune.bufsize);
3350 if (ret != H2_ERR_NO_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003351 TRACE_ERROR("received invalid H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003352 h2c_error(h2c, ret);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003353 if (!(h2c->flags & H2_CF_IS_BACK))
3354 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003355 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003356 goto done;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003357 }
Willy Tarreau15a47332022-03-18 15:57:34 +01003358
3359 /* transition to HEADERS frame ends the keep-alive idle
3360 * timer and starts the http-request idle delay.
3361 */
3362 if (hdr.ft == H2_FT_HEADERS)
3363 h2c->idle_start = now_ms;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003364 }
3365
Willy Tarreau9fd5aa82019-08-06 15:21:45 +02003366 /* Only H2_CS_FRAME_P, H2_CS_FRAME_A and H2_CS_FRAME_E here.
3367 * H2_CS_FRAME_P indicates an incomplete previous operation
3368 * (most often the first attempt) and requires some validity
3369 * checks for the frame and the current state. The two other
3370 * ones are set after completion (or abortion) and must skip
3371 * validity checks.
3372 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003373 tmp_h2s = h2c_st_by_id(h2c, h2c->dsi);
3374
Willy Tarreau7be4ee02022-05-18 07:31:41 +02003375 if (tmp_h2s != h2s && h2s && h2s_sc(h2s) &&
Willy Tarreau567beb82018-12-18 16:52:44 +01003376 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003377 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003378 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003379 (h2s->flags & H2_SF_ES_RCVD) ||
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003380 se_fl_test(h2s->sd, SE_FL_ERROR | SE_FL_ERR_PENDING | SE_FL_EOS))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003381 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003382 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_STRM_WAKE, h2c->conn, h2s);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003383 se_fl_set(h2s->sd, SE_FL_RCV_MORE);
Willy Tarreau7e094452018-12-19 18:08:52 +01003384 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003385 }
3386 h2s = tmp_h2s;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003387
Willy Tarreau63864812019-08-07 14:25:20 +02003388 if (h2c->st0 == H2_CS_FRAME_E ||
Willy Tarreau7838a792019-08-12 18:42:03 +02003389 (h2c->st0 == H2_CS_FRAME_P && !h2_frame_check_vs_state(h2c, h2s))) {
3390 TRACE_PROTO("stream error reported", H2_EV_RX_FRAME|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003391 goto strm_err;
Willy Tarreau7838a792019-08-12 18:42:03 +02003392 }
Willy Tarreauc0da1962017-10-30 18:38:00 +01003393
Willy Tarreau7e98c052017-10-10 15:56:59 +02003394 switch (h2c->dft) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02003395 case H2_FT_SETTINGS:
Willy Tarreau7838a792019-08-12 18:42:03 +02003396 if (h2c->st0 == H2_CS_FRAME_P) {
3397 TRACE_PROTO("receiving H2 SETTINGS frame", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003398 ret = h2c_handle_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003399 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003400 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003401
Willy Tarreau7838a792019-08-12 18:42:03 +02003402 if (h2c->st0 == H2_CS_FRAME_A) {
3403 TRACE_PROTO("sending H2 SETTINGS ACK frame", H2_EV_TX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003404 ret = h2c_ack_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003405 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02003406 break;
3407
Willy Tarreaucf68c782017-10-10 17:11:41 +02003408 case H2_FT_PING:
Willy Tarreau7838a792019-08-12 18:42:03 +02003409 if (h2c->st0 == H2_CS_FRAME_P) {
3410 TRACE_PROTO("receiving H2 PING frame", H2_EV_RX_FRAME|H2_EV_RX_PING, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003411 ret = h2c_handle_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003412 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003413
Willy Tarreau7838a792019-08-12 18:42:03 +02003414 if (h2c->st0 == H2_CS_FRAME_A) {
3415 TRACE_PROTO("sending H2 PING ACK frame", H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003416 ret = h2c_ack_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003417 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003418 break;
3419
Willy Tarreau26f95952017-07-27 17:18:30 +02003420 case H2_FT_WINDOW_UPDATE:
Willy Tarreau7838a792019-08-12 18:42:03 +02003421 if (h2c->st0 == H2_CS_FRAME_P) {
3422 TRACE_PROTO("receiving H2 WINDOW_UPDATE frame", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02003423 ret = h2c_handle_window_update(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003424 }
Willy Tarreau26f95952017-07-27 17:18:30 +02003425 break;
3426
Willy Tarreau61290ec2017-10-17 08:19:21 +02003427 case H2_FT_CONTINUATION:
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003428 /* RFC7540#6.10: CONTINUATION may only be preceded by
Willy Tarreauea18f862018-12-22 20:19:26 +01003429 * a HEADERS/PUSH_PROMISE/CONTINUATION frame. These
3430 * frames' parsers consume all following CONTINUATION
3431 * frames so this one is out of sequence.
Willy Tarreau61290ec2017-10-17 08:19:21 +02003432 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003433 TRACE_ERROR("received unexpected H2 CONTINUATION frame", H2_EV_RX_FRAME|H2_EV_RX_CONT|H2_EV_H2C_ERR, h2c->conn, h2s);
Willy Tarreauea18f862018-12-22 20:19:26 +01003434 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003435 if (!(h2c->flags & H2_CF_IS_BACK))
3436 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003437 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003438 goto done;
Willy Tarreau61290ec2017-10-17 08:19:21 +02003439
Willy Tarreau13278b42017-10-13 19:23:14 +02003440 case H2_FT_HEADERS:
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003441 if (h2c->st0 == H2_CS_FRAME_P) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003442 TRACE_PROTO("receiving H2 HEADERS frame", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003443 if (h2c->flags & H2_CF_IS_BACK)
3444 tmp_h2s = h2c_bck_handle_headers(h2c, h2s);
3445 else
3446 tmp_h2s = h2c_frt_handle_headers(h2c, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003447 if (tmp_h2s) {
3448 h2s = tmp_h2s;
3449 ret = 1;
3450 }
3451 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003452 HA_ATOMIC_INC(&h2c->px_counters->headers_rcvd);
Willy Tarreau13278b42017-10-13 19:23:14 +02003453 break;
3454
Willy Tarreau454f9052017-10-26 19:40:35 +02003455 case H2_FT_DATA:
Willy Tarreau7838a792019-08-12 18:42:03 +02003456 if (h2c->st0 == H2_CS_FRAME_P) {
3457 TRACE_PROTO("receiving H2 DATA frame", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01003458 ret = h2c_handle_data(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003459 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003460 HA_ATOMIC_INC(&h2c->px_counters->data_rcvd);
Willy Tarreau454f9052017-10-26 19:40:35 +02003461
Willy Tarreau7838a792019-08-12 18:42:03 +02003462 if (h2c->st0 == H2_CS_FRAME_A) {
Willy Tarreau617592c2022-06-08 16:32:22 +02003463 /* rcvd_s will suffice to trigger the sending of a WU */
3464 h2c->st0 = H2_CS_FRAME_H;
Willy Tarreau7838a792019-08-12 18:42:03 +02003465 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003466 break;
Willy Tarreaucd234e92017-08-18 10:59:39 +02003467
Willy Tarreau92153fc2017-12-03 19:46:19 +01003468 case H2_FT_PRIORITY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003469 if (h2c->st0 == H2_CS_FRAME_P) {
3470 TRACE_PROTO("receiving H2 PRIORITY frame", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn, h2s);
Willy Tarreau92153fc2017-12-03 19:46:19 +01003471 ret = h2c_handle_priority(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003472 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01003473 break;
3474
Willy Tarreaucd234e92017-08-18 10:59:39 +02003475 case H2_FT_RST_STREAM:
Willy Tarreau7838a792019-08-12 18:42:03 +02003476 if (h2c->st0 == H2_CS_FRAME_P) {
3477 TRACE_PROTO("receiving H2 RST_STREAM frame", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003478 ret = h2c_handle_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003479 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003480 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_rcvd);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003481 break;
3482
Willy Tarreaue96b0922017-10-30 00:28:29 +01003483 case H2_FT_GOAWAY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003484 if (h2c->st0 == H2_CS_FRAME_P) {
3485 TRACE_PROTO("receiving H2 GOAWAY frame", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn, h2s);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003486 ret = h2c_handle_goaway(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003487 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003488 HA_ATOMIC_INC(&h2c->px_counters->goaway_rcvd);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003489 break;
3490
Willy Tarreau1c661982017-10-30 13:52:01 +01003491 /* implement all extra frame types here */
Willy Tarreau7e98c052017-10-10 15:56:59 +02003492 default:
Willy Tarreau7838a792019-08-12 18:42:03 +02003493 TRACE_PROTO("receiving H2 ignored frame", H2_EV_RX_FRAME, h2c->conn, h2s);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003494 /* drop frames that we ignore. They may be larger than
3495 * the buffer so we drain all of their contents until
3496 * we reach the end.
3497 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003498 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3499 b_del(&h2c->dbuf, ret);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003500 h2c->dfl -= ret;
3501 ret = h2c->dfl == 0;
3502 }
3503
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003504 strm_err:
Willy Tarreaua20a5192017-12-27 11:02:06 +01003505 /* We may have to send an RST if not done yet */
Willy Tarreau7838a792019-08-12 18:42:03 +02003506 if (h2s->st == H2_SS_ERROR) {
3507 TRACE_STATE("stream error, switching to FRAME_E", H2_EV_RX_FRAME|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003508 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02003509 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003510
Willy Tarreau7838a792019-08-12 18:42:03 +02003511 if (h2c->st0 == H2_CS_FRAME_E) {
3512 TRACE_PROTO("sending H2 RST_STREAM frame", H2_EV_TX_FRAME|H2_EV_TX_RST|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003513 ret = h2c_send_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003514 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003515
Willy Tarreau7e98c052017-10-10 15:56:59 +02003516 /* error or missing data condition met above ? */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003517 if (ret <= 0)
Willy Tarreau7e98c052017-10-10 15:56:59 +02003518 break;
3519
3520 if (h2c->st0 != H2_CS_FRAME_H) {
Willy Tarreaubba7a4d2020-09-18 07:41:28 +02003521 if (h2c->dfl)
3522 TRACE_DEVEL("skipping remaining frame payload", H2_EV_RX_FRAME, h2c->conn, h2s);
Christopher Faulet5112a602019-09-26 16:38:28 +02003523 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3524 b_del(&h2c->dbuf, ret);
3525 h2c->dfl -= ret;
3526 if (!h2c->dfl) {
Willy Tarreau0f458712022-08-18 11:19:57 +02003527 h2c->flags &= ~H2_CF_DEM_IN_PROGRESS;
Christopher Faulet5112a602019-09-26 16:38:28 +02003528 TRACE_STATE("switching to FRAME_H", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
3529 h2c->st0 = H2_CS_FRAME_H;
Christopher Faulet5112a602019-09-26 16:38:28 +02003530 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003531 }
3532 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003533
Willy Tarreau617592c2022-06-08 16:32:22 +02003534 if (h2c->rcvd_s > 0 &&
Christopher Faulet68ee7842022-10-12 10:21:33 +02003535 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))) {
Willy Tarreau617592c2022-06-08 16:32:22 +02003536 TRACE_PROTO("sending stream WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn, h2s);
3537 h2c_send_strm_wu(h2c);
3538 }
3539
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003540 if (h2c->rcvd_c > 0 &&
Christopher Faulet68ee7842022-10-12 10:21:33 +02003541 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003542 TRACE_PROTO("sending H2 WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003543 h2c_send_conn_wu(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003544 }
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003545
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003546 done:
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003547 if (h2c->st0 >= H2_CS_ERROR || (h2c->flags & H2_CF_DEM_SHORT_READ)) {
3548 if (h2c->flags & H2_CF_RCVD_SHUT)
3549 h2c->flags |= H2_CF_END_REACHED;
3550 }
3551
Willy Tarreau7be4ee02022-05-18 07:31:41 +02003552 if (h2s && h2s_sc(h2s) &&
Willy Tarreau567beb82018-12-18 16:52:44 +01003553 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003554 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003555 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003556 (h2s->flags & H2_SF_ES_RCVD) ||
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003557 se_fl_test(h2s->sd, SE_FL_ERROR | SE_FL_ERR_PENDING | SE_FL_EOS))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003558 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003559 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn, h2s);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003560 se_fl_set(h2s->sd, SE_FL_RCV_MORE);
Willy Tarreau7e094452018-12-19 18:08:52 +01003561 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003562 }
Willy Tarreau1ed87b72018-11-25 08:45:16 +01003563
Willy Tarreau7838a792019-08-12 18:42:03 +02003564 if (old_iw != h2c->miw) {
3565 TRACE_STATE("notifying streams about SFCTL increase", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003566 h2c_unblock_sfctl(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003567 }
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003568
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02003569 h2c_restart_reading(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02003570 out:
3571 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003572 return;
Willy Tarreaubc933932017-10-09 16:21:43 +02003573}
3574
Willy Tarreau989539b2020-01-10 17:01:29 +01003575/* resume each h2s eligible for sending in list head <head> */
3576static void h2_resume_each_sending_h2s(struct h2c *h2c, struct list *head)
3577{
3578 struct h2s *h2s, *h2s_back;
3579
3580 TRACE_ENTER(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3581
3582 list_for_each_entry_safe(h2s, h2s_back, head, list) {
3583 if (h2c->mws <= 0 ||
3584 h2c->flags & H2_CF_MUX_BLOCK_ANY ||
3585 h2c->st0 >= H2_CS_ERROR)
3586 break;
3587
3588 h2s->flags &= ~H2_SF_BLK_ANY;
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003589
Willy Tarreaud9464162020-01-10 18:25:07 +01003590 if (h2s->flags & H2_SF_NOTIFIED)
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003591 continue;
3592
Willy Tarreau5723f292020-01-10 15:16:57 +01003593 /* If the sender changed his mind and unsubscribed, let's just
3594 * remove the stream from the send_list.
Willy Tarreau989539b2020-01-10 17:01:29 +01003595 */
Willy Tarreauf96508a2020-01-10 11:12:48 +01003596 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) &&
3597 (!h2s->subs || !(h2s->subs->events & SUB_RETRY_SEND))) {
Willy Tarreau989539b2020-01-10 17:01:29 +01003598 LIST_DEL_INIT(&h2s->list);
3599 continue;
3600 }
3601
Willy Tarreauf96508a2020-01-10 11:12:48 +01003602 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau5723f292020-01-10 15:16:57 +01003603 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01003604 tasklet_wakeup(h2s->subs->tasklet);
3605 h2s->subs->events &= ~SUB_RETRY_SEND;
3606 if (!h2s->subs->events)
3607 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01003608 }
3609 else if (h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) {
3610 tasklet_wakeup(h2s->shut_tl);
3611 }
Willy Tarreau989539b2020-01-10 17:01:29 +01003612 }
3613
3614 TRACE_LEAVE(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3615}
3616
Willy Tarreaubc933932017-10-09 16:21:43 +02003617/* process Tx frames from streams to be multiplexed. Returns > 0 if it reached
3618 * the end.
3619 */
3620static int h2_process_mux(struct h2c *h2c)
3621{
Willy Tarreau7838a792019-08-12 18:42:03 +02003622 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3623
Willy Tarreau01b44822018-10-03 14:26:37 +02003624 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3625 if (unlikely(h2c->st0 == H2_CS_PREFACE && (h2c->flags & H2_CF_IS_BACK))) {
3626 if (unlikely(h2c_bck_send_preface(h2c) <= 0)) {
3627 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003628 if (h2c->st0 == H2_CS_ERROR)
Willy Tarreau01b44822018-10-03 14:26:37 +02003629 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau01b44822018-10-03 14:26:37 +02003630 goto fail;
3631 }
3632 h2c->st0 = H2_CS_SETTINGS1;
3633 }
3634 /* need to wait for the other side */
Willy Tarreau75a930a2018-12-12 08:03:58 +01003635 if (h2c->st0 < H2_CS_FRAME_H)
Willy Tarreau7838a792019-08-12 18:42:03 +02003636 goto done;
Willy Tarreau01b44822018-10-03 14:26:37 +02003637 }
3638
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003639 /* start by sending possibly pending window updates */
Willy Tarreaue74679a2019-08-06 15:39:32 +02003640 if (h2c->rcvd_s > 0 &&
3641 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3642 h2c_send_strm_wu(h2c) < 0)
3643 goto fail;
3644
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003645 if (h2c->rcvd_c > 0 &&
3646 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3647 h2c_send_conn_wu(h2c) < 0)
3648 goto fail;
3649
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003650 /* First we always process the flow control list because the streams
3651 * waiting there were already elected for immediate emission but were
3652 * blocked just on this.
3653 */
Willy Tarreau989539b2020-01-10 17:01:29 +01003654 h2_resume_each_sending_h2s(h2c, &h2c->fctl_list);
3655 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003656
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003657 fail:
Willy Tarreau3eabe9b2017-11-07 11:03:01 +01003658 if (unlikely(h2c->st0 >= H2_CS_ERROR)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02003659 if (h2c->st0 == H2_CS_ERROR) {
3660 if (h2c->max_id >= 0) {
3661 h2c_send_goaway_error(h2c, NULL);
3662 if (h2c->flags & H2_CF_MUX_BLOCK_ANY)
Willy Tarreau7838a792019-08-12 18:42:03 +02003663 goto out0;
Willy Tarreau081d4722017-05-16 21:51:05 +02003664 }
3665
3666 h2c->st0 = H2_CS_ERROR2; // sent (or failed hard) !
3667 }
Willy Tarreau081d4722017-05-16 21:51:05 +02003668 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003669 done:
3670 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
3671 return 1;
3672 out0:
3673 TRACE_DEVEL("leaving in blocked situation", H2_EV_H2C_WAKE, h2c->conn);
3674 return 0;
Willy Tarreaubc933932017-10-09 16:21:43 +02003675}
3676
Willy Tarreau62f52692017-10-08 23:01:42 +02003677
Willy Tarreau479998a2018-11-18 06:30:59 +01003678/* Attempt to read data, and subscribe if none available.
3679 * The function returns 1 if data has been received, otherwise zero.
3680 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003681static int h2_recv(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003682{
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003683 struct connection *conn = h2c->conn;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003684 struct buffer *buf;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003685 int max;
Olivier Houchard7505f942018-08-21 18:10:44 +02003686 size_t ret;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003687
Willy Tarreau7838a792019-08-12 18:42:03 +02003688 TRACE_ENTER(H2_EV_H2C_RECV, h2c->conn);
3689
3690 if (h2c->wait_event.events & SUB_RETRY_RECV) {
3691 TRACE_DEVEL("leaving on sub_recv", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003692 return (b_data(&h2c->dbuf));
Willy Tarreau7838a792019-08-12 18:42:03 +02003693 }
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003694
Willy Tarreau7838a792019-08-12 18:42:03 +02003695 if (!h2_recv_allowed(h2c)) {
3696 TRACE_DEVEL("leaving on !recv_allowed", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003697 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02003698 }
Willy Tarreaua2af5122017-10-09 11:56:46 +02003699
Willy Tarreau44e973f2018-03-01 17:49:30 +01003700 buf = h2_get_buf(h2c, &h2c->dbuf);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003701 if (!buf) {
3702 h2c->flags |= H2_CF_DEM_DALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02003703 TRACE_DEVEL("leaving on !alloc", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003704 return 0;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003705 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003706
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003707 if (!b_data(buf)) {
3708 /* try to pre-align the buffer like the
3709 * rxbufs will be to optimize memory copies. We'll make
3710 * sure that the frame header lands at the end of the
3711 * HTX block to alias it upon recv. We cannot use the
3712 * head because rcv_buf() will realign the buffer if
3713 * it's empty. Thus we cheat and pretend we already
3714 * have a few bytes there.
3715 */
3716 max = buf_room_for_htx_data(buf) + 9;
3717 buf->head = sizeof(struct htx) - 9;
3718 }
3719 else
3720 max = b_room(buf);
Willy Tarreau2a59e872018-12-12 08:23:47 +01003721
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003722 ret = max ? conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, max, 0) : 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003723
Christopher Fauletde9d6052021-04-23 12:25:18 +02003724 if (max && !ret && h2_recv_allowed(h2c)) {
3725 TRACE_DATA("failed to receive data, subscribing", H2_EV_H2C_RECV, h2c->conn);
3726 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h2c->wait_event);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003727 } else if (ret) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02003728 TRACE_DATA("received data", H2_EV_H2C_RECV, h2c->conn, 0, 0, (void*)(long)ret);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003729 h2c->flags &= ~H2_CF_DEM_SHORT_READ;
3730 }
Olivier Houchard81a15af2018-10-19 17:26:49 +02003731
Christopher Fauletde9d6052021-04-23 12:25:18 +02003732 if (conn_xprt_read0_pending(h2c->conn)) {
3733 TRACE_DATA("received read0", H2_EV_H2C_RECV, h2c->conn);
3734 h2c->flags |= H2_CF_RCVD_SHUT;
3735 }
Christopher Fauletff7925d2022-10-11 19:12:40 +02003736 if (h2c->conn->flags & CO_FL_ERROR) {
3737 TRACE_DATA("connection error", H2_EV_H2C_RECV, h2c->conn);
3738 h2c->flags |= H2_CF_ERROR;
3739 }
Christopher Fauletde9d6052021-04-23 12:25:18 +02003740
Olivier Houcharda1411e62018-08-17 18:42:48 +02003741 if (!b_data(buf)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +01003742 h2_release_buf(h2c, &h2c->dbuf);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003743 goto end;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003744 }
3745
Willy Tarreau7838a792019-08-12 18:42:03 +02003746 if (b_data(buf) == buf->size) {
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003747 h2c->flags |= H2_CF_DEM_DFULL;
Willy Tarreau35fb8462019-10-02 11:05:46 +02003748 TRACE_STATE("demux buffer full", H2_EV_H2C_RECV|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau7838a792019-08-12 18:42:03 +02003749 }
3750
Christopher Fauletff7925d2022-10-11 19:12:40 +02003751 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02003752 TRACE_LEAVE(H2_EV_H2C_RECV, h2c->conn);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003753 return !!ret || (h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERROR));
Willy Tarreau62f52692017-10-08 23:01:42 +02003754}
3755
Willy Tarreau479998a2018-11-18 06:30:59 +01003756/* Try to send data if possible.
3757 * The function returns 1 if data have been sent, otherwise zero.
3758 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003759static int h2_send(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003760{
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003761 struct connection *conn = h2c->conn;
Willy Tarreaubc933932017-10-09 16:21:43 +02003762 int done;
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003763 int sent = 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003764
Willy Tarreau7838a792019-08-12 18:42:03 +02003765 TRACE_ENTER(H2_EV_H2C_SEND, h2c->conn);
Willy Tarreaua2af5122017-10-09 11:56:46 +02003766
Christopher Fauletff7925d2022-10-11 19:12:40 +02003767 if (h2c->flags & (H2_CF_ERROR|H2_CF_ERR_PENDING)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003768 TRACE_DEVEL("leaving on error", H2_EV_H2C_SEND, h2c->conn);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003769 if (h2c->flags & H2_CF_RCVD_SHUT)
3770 h2c->flags |= H2_CF_ERROR;
3771 b_reset(br_tail(h2c->mbuf));
Willy Tarreau7838a792019-08-12 18:42:03 +02003772 return 1;
3773 }
Olivier Houchard7505f942018-08-21 18:10:44 +02003774
Willy Tarreau911db9b2020-01-23 16:27:54 +01003775 if (conn->flags & CO_FL_WAIT_XPRT) {
Willy Tarreaua2af5122017-10-09 11:56:46 +02003776 /* a handshake was requested */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003777 goto schedule;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003778 }
3779
Willy Tarreaubc933932017-10-09 16:21:43 +02003780 /* This loop is quite simple : it tries to fill as much as it can from
3781 * pending streams into the existing buffer until it's reportedly full
3782 * or the end of send requests is reached. Then it tries to send this
3783 * buffer's contents out, marks it not full if at least one byte could
3784 * be sent, and tries again.
3785 *
3786 * The snd_buf() function normally takes a "flags" argument which may
3787 * be made of a combination of CO_SFL_MSG_MORE to indicate that more
3788 * data immediately comes and CO_SFL_STREAMER to indicate that the
3789 * connection is streaming lots of data (used to increase TLS record
3790 * size at the expense of latency). The former can be sent any time
3791 * there's a buffer full flag, as it indicates at least one stream
3792 * attempted to send and failed so there are pending data. An
3793 * alternative would be to set it as long as there's an active stream
3794 * but that would be problematic for ACKs until we have an absolute
3795 * guarantee that all waiters have at least one byte to send. The
3796 * latter should possibly not be set for now.
3797 */
3798
3799 done = 0;
3800 while (!done) {
3801 unsigned int flags = 0;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003802 unsigned int released = 0;
3803 struct buffer *buf;
Willy Tarreau464fa062023-03-17 16:09:14 +01003804 uint to_send;
Willy Tarreaubc933932017-10-09 16:21:43 +02003805
3806 /* fill as much as we can into the current buffer */
3807 while (((h2c->flags & (H2_CF_MUX_MFULL|H2_CF_MUX_MALLOC)) == 0) && !done)
3808 done = h2_process_mux(h2c);
3809
Olivier Houchard2b094432019-01-29 18:28:36 +01003810 if (h2c->flags & H2_CF_MUX_MALLOC)
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003811 done = 1; // we won't go further without extra buffers
Olivier Houchard2b094432019-01-29 18:28:36 +01003812
Christopher Faulet9a3d3fc2020-10-22 16:24:58 +02003813 if ((conn->flags & (CO_FL_SOCK_WR_SH|CO_FL_ERROR)) ||
Willy Tarreaue6dc7a02021-10-21 17:30:06 +02003814 (h2c->flags & H2_CF_GOAWAY_FAILED))
Willy Tarreaubc933932017-10-09 16:21:43 +02003815 break;
3816
Christopher Faulet68ee7842022-10-12 10:21:33 +02003817 if (h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))
Willy Tarreaubc933932017-10-09 16:21:43 +02003818 flags |= CO_SFL_MSG_MORE;
3819
Willy Tarreau464fa062023-03-17 16:09:14 +01003820 to_send = br_count(h2c->mbuf);
3821 if (to_send > 1) {
Willy Tarreau14ea98a2023-03-16 17:30:30 +01003822 /* usually we want to emit small TLS records to speed
3823 * up the decoding on the client. That's what is being
3824 * done by default. However if there is more than one
3825 * buffer being allocated, we're streaming large data
3826 * so we stich to large records.
3827 */
3828 flags |= CO_SFL_STREAMER;
3829 }
3830
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003831 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
3832 if (b_data(buf)) {
Willy Tarreau464fa062023-03-17 16:09:14 +01003833 int ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, b_data(buf),
3834 flags | (to_send > 1 ? CO_SFL_MSG_MORE : 0));
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003835 if (!ret) {
3836 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003837 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003838 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003839 sent = 1;
Willy Tarreau464fa062023-03-17 16:09:14 +01003840 to_send--;
Willy Tarreau022e5e52020-09-10 09:33:15 +02003841 TRACE_DATA("sent data", H2_EV_H2C_SEND, h2c->conn, 0, buf, (void*)(long)ret);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003842 b_del(buf, ret);
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003843 if (b_data(buf)) {
3844 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003845 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003846 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003847 }
3848 b_free(buf);
3849 released++;
Willy Tarreau787db9a2018-06-14 18:31:46 +02003850 }
Willy Tarreaubc933932017-10-09 16:21:43 +02003851
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003852 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01003853 offer_buffers(NULL, released);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003854
Willy Tarreau93c55112023-03-16 16:47:44 +01003855 /* Normally if wrote at least one byte, the buffer is not full
3856 * anymore. However, if it was marked full because all of its
3857 * buffers were used, we don't want to instantly wake up many
3858 * streams because we'd create a thundering herd effect, notably
3859 * when data are flushed in small chunks. Instead we wait for
3860 * the buffer to be decongested again before allowing to send
3861 * again. It also has the added benefit of not pumping more
3862 * data from the other side when it's known that this one is
3863 * still congested.
3864 */
3865 if (sent && br_single(h2c->mbuf))
Christopher Faulet69fe5ce2019-10-24 10:31:01 +02003866 h2c->flags &= ~(H2_CF_MUX_MFULL | H2_CF_DEM_MROOM);
Willy Tarreaubc933932017-10-09 16:21:43 +02003867 }
3868
Christopher Fauletff7925d2022-10-11 19:12:40 +02003869 if (conn->flags & CO_FL_ERROR) {
3870 h2c->flags |= H2_CF_ERR_PENDING;
3871 if (h2c->flags & H2_CF_RCVD_SHUT)
3872 h2c->flags |= H2_CF_ERROR;
Willy Tarreau51330962019-05-26 09:38:07 +02003873 b_reset(br_tail(h2c->mbuf));
Willy Tarreaua2af5122017-10-09 11:56:46 +02003874 }
Christopher Fauletff7925d2022-10-11 19:12:40 +02003875
Olivier Houchard6ff20392018-07-17 18:46:31 +02003876 /* We're not full anymore, so we can wake any task that are waiting
3877 * for us.
3878 */
Willy Tarreau989539b2020-01-10 17:01:29 +01003879 if (!(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM)) && h2c->st0 >= H2_CS_FRAME_H)
3880 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Olivier Houchardd360ac62019-03-22 17:37:16 +01003881
Olivier Houchard910b2bc2018-07-17 18:49:38 +02003882 /* We're done, no more to send */
Willy Tarreau7838a792019-08-12 18:42:03 +02003883 if (!br_data(h2c->mbuf)) {
3884 TRACE_DEVEL("leaving with everything sent", H2_EV_H2C_SEND, h2c->conn);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003885 goto end;
Willy Tarreau7838a792019-08-12 18:42:03 +02003886 }
Olivier Houchard910b2bc2018-07-17 18:49:38 +02003887schedule:
Willy Tarreau7838a792019-08-12 18:42:03 +02003888 if (!(conn->flags & CO_FL_ERROR) && !(h2c->wait_event.events & SUB_RETRY_SEND)) {
3889 TRACE_STATE("more data to send, subscribing", H2_EV_H2C_SEND, h2c->conn);
Olivier Houcharde179d0e2019-03-21 18:27:17 +01003890 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &h2c->wait_event);
Willy Tarreau7838a792019-08-12 18:42:03 +02003891 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003892 TRACE_DEVEL("leaving with some data left to send", H2_EV_H2C_SEND, h2c->conn);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003893end:
3894 return sent || (h2c->flags & (H2_CF_ERR_PENDING|H2_CF_ERROR));
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003895}
3896
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02003897/* this is the tasklet referenced in h2c->wait_event.tasklet */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003898struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003899{
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003900 struct connection *conn;
3901 struct tasklet *tl = (struct tasklet *)t;
3902 int conn_in_list;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003903 struct h2c *h2c = ctx;
Olivier Houchard7505f942018-08-21 18:10:44 +02003904 int ret = 0;
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003905
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003906 if (state & TASK_F_USR1) {
3907 /* the tasklet was idling on an idle connection, it might have
3908 * been stolen, let's be careful!
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003909 */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003910 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3911 if (t->context == NULL) {
3912 /* The connection has been taken over by another thread,
3913 * we're no longer responsible for it, so just free the
3914 * tasklet, and do nothing.
3915 */
3916 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3917 tasklet_free(tl);
Willy Tarreau74163142021-03-13 11:30:19 +01003918 t = NULL;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003919 goto leave;
3920 }
3921 conn = h2c->conn;
3922 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003923
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003924 /* Remove the connection from the list, to be sure nobody attempts
3925 * to use it while we handle the I/O events
3926 */
Christopher Faulet3a7b5392023-03-16 11:43:05 +01003927 conn_in_list = conn_get_idle_flag(conn);
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003928 if (conn_in_list)
3929 conn_delete_from_tree(&conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003930
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003931 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3932 } else {
3933 /* we're certain the connection was not in an idle list */
3934 conn = h2c->conn;
3935 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
3936 conn_in_list = 0;
3937 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003938
Willy Tarreau4f6516d2018-12-19 13:59:17 +01003939 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Olivier Houchard7505f942018-08-21 18:10:44 +02003940 ret = h2_send(h2c);
Willy Tarreau4f6516d2018-12-19 13:59:17 +01003941 if (!(h2c->wait_event.events & SUB_RETRY_RECV))
Olivier Houchard7505f942018-08-21 18:10:44 +02003942 ret |= h2_recv(h2c);
Willy Tarreaucef5c8e2018-12-18 10:29:54 +01003943 if (ret || b_data(&h2c->dbuf))
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003944 ret = h2_process(h2c);
3945
3946 /* If we were in an idle list, we want to add it back into it,
3947 * unless h2_process() returned -1, which mean it has destroyed
3948 * the connection (testing !ret is enough, if h2_process() wasn't
3949 * called then ret will be 0 anyway.
3950 */
Willy Tarreau74163142021-03-13 11:30:19 +01003951 if (ret < 0)
3952 t = NULL;
3953
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003954 if (!ret && conn_in_list) {
3955 struct server *srv = objt_server(conn->target);
3956
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01003957 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003958 if (conn_in_list == CO_FL_SAFE_LIST)
Willy Tarreau85223482022-09-29 20:32:43 +02003959 eb64_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003960 else
Willy Tarreau85223482022-09-29 20:32:43 +02003961 eb64_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node);
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01003962 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003963 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003964
Willy Tarreau38468772020-06-28 00:31:13 +02003965leave:
Willy Tarreau7838a792019-08-12 18:42:03 +02003966 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreau74163142021-03-13 11:30:19 +01003967 return t;
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003968}
Willy Tarreaua2af5122017-10-09 11:56:46 +02003969
Willy Tarreau62f52692017-10-08 23:01:42 +02003970/* callback called on any event by the connection handler.
3971 * It applies changes and returns zero, or < 0 if it wants immediate
3972 * destruction of the connection (which normally doesn not happen in h2).
3973 */
Olivier Houchard7505f942018-08-21 18:10:44 +02003974static int h2_process(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003975{
Olivier Houchard7505f942018-08-21 18:10:44 +02003976 struct connection *conn = h2c->conn;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003977
Willy Tarreau7838a792019-08-12 18:42:03 +02003978 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
3979
Willy Tarreauf0961222021-02-05 11:41:46 +01003980 if (!(h2c->flags & H2_CF_DEM_BLOCK_ANY) &&
3981 (b_data(&h2c->dbuf) || (h2c->flags & H2_CF_RCVD_SHUT))) {
Willy Tarreaud13bf272017-12-14 10:34:52 +01003982 h2_process_demux(h2c);
3983
Christopher Fauletff7925d2022-10-11 19:12:40 +02003984 if (h2c->st0 >= H2_CS_ERROR || (h2c->flags & H2_CF_ERROR))
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003985 b_reset(&h2c->dbuf);
Willy Tarreaud13bf272017-12-14 10:34:52 +01003986
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003987 if (!b_full(&h2c->dbuf))
Willy Tarreaud13bf272017-12-14 10:34:52 +01003988 h2c->flags &= ~H2_CF_DEM_DFULL;
3989 }
Olivier Houchard7505f942018-08-21 18:10:44 +02003990 h2_send(h2c);
Willy Tarreaud13bf272017-12-14 10:34:52 +01003991
Christopher Fauletdfd10ab2021-10-06 14:24:19 +02003992 if (unlikely(h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) && !(h2c->flags & H2_CF_IS_BACK)) {
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +02003993 int send_goaway = 1;
3994 /* If a close-spread-time option is set, we want to avoid
3995 * closing all the active HTTP2 connections at once so we add a
3996 * random factor that will spread the closing.
3997 */
3998 if (tick_isset(global.close_spread_end)) {
3999 int remaining_window = tick_remain(now_ms, global.close_spread_end);
4000 if (remaining_window) {
4001 /* This should increase the closing rate the
4002 * further along the window we are. */
4003 send_goaway = (remaining_window <= statistical_prng_range(global.close_spread_time));
4004 }
4005 }
Remi Tricot-Le Breton4d7fdc62022-04-26 15:17:18 +02004006 else if (global.tune.options & GTUNE_DISABLE_ACTIVE_CLOSE)
4007 send_goaway = 0; /* let the client close his connection himself */
Willy Tarreau8ec14062017-12-30 18:08:13 +01004008 /* frontend is stopping, reload likely in progress, let's try
4009 * to announce a graceful shutdown if not yet done. We don't
4010 * care if it fails, it will be tried again later.
4011 */
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +02004012 if (send_goaway) {
4013 TRACE_STATE("proxy stopped, sending GOAWAY", H2_EV_H2C_WAKE|H2_EV_TX_FRAME, conn);
4014 if (!(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
4015 if (h2c->last_sid < 0)
4016 h2c->last_sid = (1U << 31) - 1;
4017 h2c_send_goaway_error(h2c, NULL);
4018 }
Willy Tarreau8ec14062017-12-30 18:08:13 +01004019 }
4020 }
4021
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004022 /*
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004023 * If we received early data, and the handshake is done, wake
4024 * any stream that was waiting for it.
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004025 */
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004026 if (!(h2c->flags & H2_CF_WAIT_FOR_HS) &&
Willy Tarreau911db9b2020-01-23 16:27:54 +01004027 (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT | CO_FL_EARLY_DATA)) == CO_FL_EARLY_DATA) {
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004028 struct eb32_node *node;
4029 struct h2s *h2s;
4030
4031 h2c->flags |= H2_CF_WAIT_FOR_HS;
4032 node = eb32_lookup_ge(&h2c->streams_by_id, 1);
4033
4034 while (node) {
4035 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004036 if (se_fl_test(h2s->sd, SE_FL_WAIT_FOR_HS))
Willy Tarreau7e094452018-12-19 18:08:52 +01004037 h2s_notify_recv(h2s);
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004038 node = eb32_next(node);
4039 }
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004040 }
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004041
Christopher Fauletff7925d2022-10-11 19:12:40 +02004042 if ((h2c->flags & H2_CF_ERROR) || h2c_read0_pending(h2c) ||
Willy Tarreau29a98242017-10-31 06:59:15 +01004043 h2c->st0 == H2_CS_ERROR2 || h2c->flags & H2_CF_GOAWAY_FAILED ||
4044 (eb_is_empty(&h2c->streams_by_id) && h2c->last_sid >= 0 &&
4045 h2c->max_id >= h2c->last_sid)) {
Willy Tarreau23482912019-05-07 15:23:14 +02004046 h2_wake_some_streams(h2c, 0);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004047
4048 if (eb_is_empty(&h2c->streams_by_id)) {
4049 /* no more stream, kill the connection now */
Christopher Faulet73c12072019-04-08 11:23:22 +02004050 h2_release(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004051 TRACE_DEVEL("leaving after releasing the connection", H2_EV_H2C_WAKE);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004052 return -1;
4053 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004054
4055 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004056 if (conn->flags & CO_FL_LIST_MASK) {
4057 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004058 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004059 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4060 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004061 }
4062 else if (h2c->st0 == H2_CS_ERROR) {
4063 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004064 if (conn->flags & CO_FL_LIST_MASK) {
4065 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004066 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004067 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4068 }
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004069 }
4070
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004071 if (!b_data(&h2c->dbuf))
Willy Tarreau44e973f2018-03-01 17:49:30 +01004072 h2_release_buf(h2c, &h2c->dbuf);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004073
Christopher Fauletff7925d2022-10-11 19:12:40 +02004074 if (h2c->st0 == H2_CS_ERROR2 || (h2c->flags & H2_CF_GOAWAY_FAILED) ||
Olivier Houchard53216e72018-10-10 15:46:36 +02004075 (h2c->st0 != H2_CS_ERROR &&
Willy Tarreau662fafc2019-05-26 09:43:07 +02004076 !br_data(h2c->mbuf) &&
Olivier Houchard53216e72018-10-10 15:46:36 +02004077 (h2c->mws <= 0 || LIST_ISEMPTY(&h2c->fctl_list)) &&
4078 ((h2c->flags & H2_CF_MUX_BLOCK_ANY) || LIST_ISEMPTY(&h2c->send_list))))
Willy Tarreau2e3c0002019-05-26 09:45:23 +02004079 h2_release_mbuf(h2c);
Willy Tarreaua2af5122017-10-09 11:56:46 +02004080
Willy Tarreau15a47332022-03-18 15:57:34 +01004081 h2c_update_timeout(h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +02004082 h2_send(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004083 TRACE_LEAVE(H2_EV_H2C_WAKE, conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02004084 return 0;
4085}
4086
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004087/* wake-up function called by the connection layer (mux_ops.wake) */
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004088static int h2_wake(struct connection *conn)
4089{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004090 struct h2c *h2c = conn->ctx;
Willy Tarreau7838a792019-08-12 18:42:03 +02004091 int ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004092
Willy Tarreau7838a792019-08-12 18:42:03 +02004093 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4094 ret = h2_process(h2c);
Willy Tarreau508f9892020-02-11 04:38:56 +01004095 if (ret >= 0)
4096 h2_wake_some_streams(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02004097 TRACE_LEAVE(H2_EV_H2C_WAKE);
4098 return ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004099}
4100
Willy Tarreauea392822017-10-31 10:02:25 +01004101/* Connection timeout management. The principle is that if there's no receipt
4102 * nor sending for a certain amount of time, the connection is closed. If the
4103 * MUX buffer still has lying data or is not allocatable, the connection is
4104 * immediately killed. If it's allocatable and empty, we attempt to send a
4105 * GOAWAY frame.
4106 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004107struct task *h2_timeout_task(struct task *t, void *context, unsigned int state)
Willy Tarreauea392822017-10-31 10:02:25 +01004108{
Olivier Houchard9f6af332018-05-25 14:04:04 +02004109 struct h2c *h2c = context;
Willy Tarreauea392822017-10-31 10:02:25 +01004110 int expired = tick_is_expired(t->expire, now_ms);
4111
Willy Tarreau7838a792019-08-12 18:42:03 +02004112 TRACE_ENTER(H2_EV_H2C_WAKE, h2c ? h2c->conn : NULL);
4113
Willy Tarreaubd42e922020-06-30 11:19:23 +02004114 if (h2c) {
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004115 /* Make sure nobody stole the connection from us */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004116 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004117
4118 /* Somebody already stole the connection from us, so we should not
4119 * free it, we just have to free the task.
4120 */
4121 if (!t->context) {
4122 h2c = NULL;
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004123 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004124 goto do_leave;
4125 }
4126
4127
Willy Tarreaubd42e922020-06-30 11:19:23 +02004128 if (!expired) {
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004129 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004130 TRACE_DEVEL("leaving (not expired)", H2_EV_H2C_WAKE, h2c->conn);
4131 return t;
4132 }
Willy Tarreauea392822017-10-31 10:02:25 +01004133
Willy Tarreaubd42e922020-06-30 11:19:23 +02004134 if (!h2c_may_expire(h2c)) {
4135 /* we do still have streams but all of them are idle, waiting
4136 * for the data layer, so we must not enforce the timeout here.
4137 */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004138 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004139 t->expire = TICK_ETERNITY;
4140 return t;
4141 }
Willy Tarreauc2ea47f2019-10-01 10:12:00 +02004142
Willy Tarreaubd42e922020-06-30 11:19:23 +02004143 /* We're about to destroy the connection, so make sure nobody attempts
4144 * to steal it from us.
4145 */
Christopher Faulet3a7b5392023-03-16 11:43:05 +01004146 if (h2c->conn->flags & CO_FL_LIST_MASK)
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004147 conn_delete_from_tree(&h2c->conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004148
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004149 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004150 }
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004151
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004152do_leave:
Olivier Houchard3f795f72019-04-17 22:51:06 +02004153 task_destroy(t);
Willy Tarreau0975f112018-03-29 15:22:59 +02004154
4155 if (!h2c) {
4156 /* resources were already deleted */
Willy Tarreau7838a792019-08-12 18:42:03 +02004157 TRACE_DEVEL("leaving (not more h2c)", H2_EV_H2C_WAKE);
Willy Tarreau0975f112018-03-29 15:22:59 +02004158 return NULL;
4159 }
4160
4161 h2c->task = NULL;
Willy Tarreauea392822017-10-31 10:02:25 +01004162 h2c_error(h2c, H2_ERR_NO_ERROR);
Willy Tarreau23482912019-05-07 15:23:14 +02004163 h2_wake_some_streams(h2c, 0);
Willy Tarreauea392822017-10-31 10:02:25 +01004164
Willy Tarreau662fafc2019-05-26 09:43:07 +02004165 if (br_data(h2c->mbuf)) {
Willy Tarreauea392822017-10-31 10:02:25 +01004166 /* don't even try to send a GOAWAY, the buffer is stuck */
4167 h2c->flags |= H2_CF_GOAWAY_FAILED;
4168 }
4169
4170 /* try to send but no need to insist */
Willy Tarreau599391a2017-11-24 10:16:00 +01004171 h2c->last_sid = h2c->max_id;
Willy Tarreauea392822017-10-31 10:02:25 +01004172 if (h2c_send_goaway_error(h2c, NULL) <= 0)
4173 h2c->flags |= H2_CF_GOAWAY_FAILED;
4174
Willy Tarreau662fafc2019-05-26 09:43:07 +02004175 if (br_data(h2c->mbuf) && !(h2c->flags & H2_CF_GOAWAY_FAILED) && conn_xprt_ready(h2c->conn)) {
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004176 unsigned int released = 0;
4177 struct buffer *buf;
4178
4179 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
4180 if (b_data(buf)) {
4181 int ret = h2c->conn->xprt->snd_buf(h2c->conn, h2c->conn->xprt_ctx, buf, b_data(buf), 0);
4182 if (!ret)
4183 break;
4184 b_del(buf, ret);
4185 if (b_data(buf))
4186 break;
4187 b_free(buf);
4188 released++;
4189 }
Willy Tarreau787db9a2018-06-14 18:31:46 +02004190 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004191
4192 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01004193 offer_buffers(NULL, released);
Willy Tarreau787db9a2018-06-14 18:31:46 +02004194 }
Willy Tarreauea392822017-10-31 10:02:25 +01004195
Willy Tarreau4481e262019-10-31 15:36:30 +01004196 /* in any case this connection must not be considered idle anymore */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004197 if (h2c->conn->flags & CO_FL_LIST_MASK) {
4198 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004199 conn_delete_from_tree(&h2c->conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004200 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4201 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004202
Willy Tarreau0975f112018-03-29 15:22:59 +02004203 /* either we can release everything now or it will be done later once
4204 * the last stream closes.
4205 */
4206 if (eb_is_empty(&h2c->streams_by_id))
Christopher Faulet73c12072019-04-08 11:23:22 +02004207 h2_release(h2c);
Willy Tarreauea392822017-10-31 10:02:25 +01004208
Willy Tarreau7838a792019-08-12 18:42:03 +02004209 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreauea392822017-10-31 10:02:25 +01004210 return NULL;
4211}
4212
4213
Willy Tarreau62f52692017-10-08 23:01:42 +02004214/*******************************************/
4215/* functions below are used by the streams */
4216/*******************************************/
4217
4218/*
4219 * Attach a new stream to a connection
4220 * (Used for outgoing connections)
4221 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004222static int h2_attach(struct connection *conn, struct sedesc *sd, struct session *sess)
Willy Tarreau62f52692017-10-08 23:01:42 +02004223{
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004224 struct h2s *h2s;
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004225 struct h2c *h2c = conn->ctx;
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004226
Willy Tarreau7838a792019-08-12 18:42:03 +02004227 TRACE_ENTER(H2_EV_H2S_NEW, conn);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004228 h2s = h2c_bck_stream_new(h2c, sd->sc, sess);
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004229 if (!h2s) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004230 TRACE_DEVEL("leaving on stream creation failure", H2_EV_H2S_NEW|H2_EV_H2S_ERR, conn);
Christopher Faulete00ad352021-12-16 14:44:31 +01004231 return -1;
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004232 }
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004233
4234 /* the connection is not idle anymore, let's mark this */
4235 HA_ATOMIC_AND(&h2c->wait_event.tasklet->state, ~TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004236 xprt_set_used(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004237
Willy Tarreau7838a792019-08-12 18:42:03 +02004238 TRACE_LEAVE(H2_EV_H2S_NEW, conn, h2s);
Christopher Faulete00ad352021-12-16 14:44:31 +01004239 return 0;
Willy Tarreau62f52692017-10-08 23:01:42 +02004240}
4241
Willy Tarreau4596fe22022-05-17 19:07:51 +02004242/* Retrieves the first valid stream connector from this connection, or returns
4243 * NULL. We have to scan because we may have some orphan streams. It might be
Willy Tarreaufafd3982018-11-18 21:29:20 +01004244 * beneficial to scan backwards from the end to reduce the likeliness to find
4245 * orphans.
4246 */
Willy Tarreaud1373532022-05-27 11:00:59 +02004247static struct stconn *h2_get_first_sc(const struct connection *conn)
Willy Tarreaufafd3982018-11-18 21:29:20 +01004248{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004249 struct h2c *h2c = conn->ctx;
Willy Tarreaufafd3982018-11-18 21:29:20 +01004250 struct h2s *h2s;
4251 struct eb32_node *node;
4252
4253 node = eb32_first(&h2c->streams_by_id);
4254 while (node) {
4255 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau7be4ee02022-05-18 07:31:41 +02004256 if (h2s_sc(h2s))
4257 return h2s_sc(h2s);
Willy Tarreaufafd3982018-11-18 21:29:20 +01004258 node = eb32_next(node);
4259 }
4260 return NULL;
4261}
4262
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004263static int h2_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
4264{
4265 int ret = 0;
4266 struct h2c *h2c = conn->ctx;
4267
4268 switch (mux_ctl) {
4269 case MUX_STATUS:
4270 /* Only consider the mux to be ready if we're done with
4271 * the preface and settings, and we had no error.
4272 */
4273 if (h2c->st0 >= H2_CS_FRAME_H && h2c->st0 < H2_CS_ERROR)
4274 ret |= MUX_STATUS_READY;
4275 return ret;
Christopher Faulet4c8ad842020-10-06 14:59:17 +02004276 case MUX_EXIT_STATUS:
4277 return MUX_ES_UNKNOWN;
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004278 default:
4279 return -1;
4280 }
4281}
4282
Willy Tarreau62f52692017-10-08 23:01:42 +02004283/*
Olivier Houchard060ed432018-11-06 16:32:42 +01004284 * Destroy the mux and the associated connection, if it is no longer used
4285 */
Christopher Faulet73c12072019-04-08 11:23:22 +02004286static void h2_destroy(void *ctx)
Olivier Houchard060ed432018-11-06 16:32:42 +01004287{
Christopher Faulet73c12072019-04-08 11:23:22 +02004288 struct h2c *h2c = ctx;
Olivier Houchard060ed432018-11-06 16:32:42 +01004289
Willy Tarreau7838a792019-08-12 18:42:03 +02004290 TRACE_ENTER(H2_EV_H2C_END, h2c->conn);
Christopher Faulet4e610962022-04-14 11:23:50 +02004291 if (eb_is_empty(&h2c->streams_by_id)) {
4292 BUG_ON(h2c->conn->ctx != h2c);
Christopher Faulet73c12072019-04-08 11:23:22 +02004293 h2_release(h2c);
Christopher Faulet4e610962022-04-14 11:23:50 +02004294 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004295 TRACE_LEAVE(H2_EV_H2C_END);
Olivier Houchard060ed432018-11-06 16:32:42 +01004296}
4297
4298/*
Willy Tarreau62f52692017-10-08 23:01:42 +02004299 * Detach the stream from the connection and possibly release the connection.
4300 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004301static void h2_detach(struct sedesc *sd)
Willy Tarreau62f52692017-10-08 23:01:42 +02004302{
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004303 struct h2s *h2s = sd->se;
Willy Tarreau60935142017-10-16 18:11:19 +02004304 struct h2c *h2c;
Olivier Houchardf502aca2018-12-14 19:42:40 +01004305 struct session *sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004306
Willy Tarreau7838a792019-08-12 18:42:03 +02004307 TRACE_ENTER(H2_EV_STRM_END, h2s ? h2s->h2c->conn : NULL, h2s);
4308
Willy Tarreau7838a792019-08-12 18:42:03 +02004309 if (!h2s) {
4310 TRACE_LEAVE(H2_EV_STRM_END);
Willy Tarreau60935142017-10-16 18:11:19 +02004311 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004312 }
Willy Tarreau60935142017-10-16 18:11:19 +02004313
Willy Tarreaud9464162020-01-10 18:25:07 +01004314 /* there's no txbuf so we're certain not to be able to send anything */
4315 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02004316
Olivier Houchardf502aca2018-12-14 19:42:40 +01004317 sess = h2s->sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004318 h2c = h2s->h2c;
Willy Tarreau36c22322022-05-27 10:41:24 +02004319 h2c->nb_sc--;
4320 if (!h2c->nb_sc)
Willy Tarreau15a47332022-03-18 15:57:34 +01004321 h2c->idle_start = now_ms;
4322
Willy Tarreaufa1d3572019-01-31 10:31:51 +01004323 if ((h2c->flags & (H2_CF_IS_BACK|H2_CF_DEM_TOOMANY)) == H2_CF_DEM_TOOMANY &&
Willy Tarreau36c22322022-05-27 10:41:24 +02004324 !h2_frt_has_too_many_sc(h2c)) {
Willy Tarreaufa1d3572019-01-31 10:31:51 +01004325 /* frontend connection was blocking new streams creation */
Willy Tarreauf2101912018-07-19 10:11:38 +02004326 h2c->flags &= ~H2_CF_DEM_TOOMANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004327 h2c_restart_reading(h2c, 1);
Willy Tarreauf2101912018-07-19 10:11:38 +02004328 }
Willy Tarreau60935142017-10-16 18:11:19 +02004329
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004330 /* this stream may be blocked waiting for some data to leave (possibly
4331 * an ES or RST frame), so orphan it in this case.
4332 */
Christopher Fauletff7925d2022-10-11 19:12:40 +02004333 if (!(h2c->flags & (H2_CF_ERR_PENDING|H2_CF_ERROR)) &&
Willy Tarreaua2b51812018-07-27 09:55:14 +02004334 (h2c->st0 < H2_CS_ERROR) &&
Willy Tarreau5723f292020-01-10 15:16:57 +01004335 (h2s->flags & (H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL)) &&
Willy Tarreauf96508a2020-01-10 11:12:48 +01004336 ((h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) || h2s->subs)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004337 TRACE_DEVEL("leaving on stream blocked", H2_EV_STRM_END|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau15a47332022-03-18 15:57:34 +01004338 /* refresh the timeout if none was active, so that the last
4339 * leaving stream may arm it.
4340 */
Willy Tarreau3fb2c6d2023-03-16 18:06:19 +01004341 if (h2c->task && !tick_isset(h2c->task->expire))
Willy Tarreau15a47332022-03-18 15:57:34 +01004342 h2c_update_timeout(h2c);
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004343 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004344 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004345
Christopher Faulet68ee7842022-10-12 10:21:33 +02004346 if ((h2c->flags & H2_CF_DEM_BLOCK_ANY && h2s->id == h2c->dsi)) {
Willy Tarreau45f752e2017-10-30 15:44:59 +01004347 /* unblock the connection if it was blocked on this
4348 * stream.
4349 */
4350 h2c->flags &= ~H2_CF_DEM_BLOCK_ANY;
4351 h2c->flags &= ~H2_CF_MUX_BLOCK_ANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004352 h2c_restart_reading(h2c, 1);
Willy Tarreau45f752e2017-10-30 15:44:59 +01004353 }
4354
Willy Tarreau71049cc2018-03-28 13:56:39 +02004355 h2s_destroy(h2s);
Willy Tarreau60935142017-10-16 18:11:19 +02004356
Christopher Faulet9b79a102019-07-15 11:22:56 +02004357 if (h2c->flags & H2_CF_IS_BACK) {
Christopher Fauletff7925d2022-10-11 19:12:40 +02004358 if (!(h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERR_PENDING|H2_CF_ERROR))) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004359 if (h2c->conn->flags & CO_FL_PRIVATE) {
Christopher Faulet08016ab2020-07-01 16:10:06 +02004360 /* Add the connection in the session server list, if not already done */
4361 if (!session_add_conn(sess, h2c->conn, h2c->conn->target)) {
4362 h2c->conn->owner = NULL;
4363 if (eb_is_empty(&h2c->streams_by_id)) {
4364 h2c->conn->mux->destroy(h2c);
4365 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4366 return;
Christopher Fauletc5579d12020-07-01 15:45:41 +02004367 }
4368 }
Christopher Faulet08016ab2020-07-01 16:10:06 +02004369 if (eb_is_empty(&h2c->streams_by_id)) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004370 if (session_check_idle_conn(h2c->conn->owner, h2c->conn) != 0) {
4371 /* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
4372 TRACE_DEVEL("leaving without reusable idle connection", H2_EV_STRM_END);
Olivier Houchard351411f2018-12-27 17:20:54 +01004373 return;
4374 }
4375 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004376 }
Christopher Fauletc5579d12020-07-01 15:45:41 +02004377 else {
4378 if (eb_is_empty(&h2c->streams_by_id)) {
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004379 /* If the connection is owned by the session, first remove it
4380 * from its list
4381 */
4382 if (h2c->conn->owner) {
4383 session_unown_conn(h2c->conn->owner, h2c->conn);
4384 h2c->conn->owner = NULL;
4385 }
4386
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004387 /* mark that the tasklet may lose its context to another thread and
4388 * that the handler needs to check it under the idle conns lock.
4389 */
4390 HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004391 xprt_set_idle(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
4392
Olivier Houcharddc2f2752020-02-13 19:12:07 +01004393 if (!srv_add_to_idle_list(objt_server(h2c->conn->target), h2c->conn, 1)) {
Olivier Houchard2444aa52020-01-20 13:56:01 +01004394 /* The server doesn't want it, let's kill the connection right away */
4395 h2c->conn->mux->destroy(h2c);
4396 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4397 return;
4398 }
Olivier Houchard199d4fa2020-03-22 23:25:51 +01004399 /* At this point, the connection has been added to the
4400 * server idle list, so another thread may already have
4401 * hijacked it, so we can't do anything with it.
4402 */
Olivier Houchard2444aa52020-01-20 13:56:01 +01004403 TRACE_DEVEL("reusable idle connection", H2_EV_STRM_END);
4404 return;
Olivier Houchard8a786902018-12-15 16:05:40 +01004405
Olivier Houchard8a786902018-12-15 16:05:40 +01004406 }
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004407 else if (!h2c->conn->hash_node->node.node.leaf_p &&
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004408 h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02004409 !LIST_INLIST(&h2c->conn->session_list)) {
Willy Tarreau85223482022-09-29 20:32:43 +02004410 eb64_insert(&__objt_server(h2c->conn->target)->per_thr[tid].avail_conns,
4411 &h2c->conn->hash_node->node);
Christopher Fauletc5579d12020-07-01 15:45:41 +02004412 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004413 }
4414 }
4415 }
4416
Willy Tarreaue323f342018-03-28 13:51:45 +02004417 /* We don't want to close right now unless we're removing the
4418 * last stream, and either the connection is in error, or it
4419 * reached the ID already specified in a GOAWAY frame received
4420 * or sent (as seen by last_sid >= 0).
4421 */
Olivier Houchard7a977432019-03-21 15:47:13 +01004422 if (h2c_is_dead(h2c)) {
Willy Tarreaue323f342018-03-28 13:51:45 +02004423 /* no more stream will come, kill it now */
Willy Tarreau7838a792019-08-12 18:42:03 +02004424 TRACE_DEVEL("leaving and killing dead connection", H2_EV_STRM_END, h2c->conn);
Christopher Faulet73c12072019-04-08 11:23:22 +02004425 h2_release(h2c);
Willy Tarreaue323f342018-03-28 13:51:45 +02004426 }
4427 else if (h2c->task) {
Willy Tarreau15a47332022-03-18 15:57:34 +01004428 h2c_update_timeout(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004429 TRACE_DEVEL("leaving, refreshing connection's timeout", H2_EV_STRM_END, h2c->conn);
Willy Tarreau60935142017-10-16 18:11:19 +02004430 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004431 else
4432 TRACE_DEVEL("leaving", H2_EV_STRM_END, h2c->conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02004433}
4434
Willy Tarreau88bdba32019-05-13 18:17:53 +02004435/* Performs a synchronous or asynchronous shutr(). */
4436static void h2_do_shutr(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004437{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004438 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004439
Willy Tarreauf983d002019-05-14 10:40:21 +02004440 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004441 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004442
Willy Tarreau7838a792019-08-12 18:42:03 +02004443 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4444
Willy Tarreau18059042019-01-31 19:12:48 +01004445 /* a connstream may require us to immediately kill the whole connection
4446 * for example because of a "tcp-request content reject" rule that is
4447 * normally used to limit abuse. In this case we schedule a goaway to
4448 * close the connection.
Willy Tarreau926fa4c2017-11-07 14:42:12 +01004449 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004450 if (se_fl_test(h2s->sd, SE_FL_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004451 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004452 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004453 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4454 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4455 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004456 else if (!(h2s->flags & H2_SF_HEADERS_SENT)) {
4457 /* Nothing was never sent for this stream, so reset with
4458 * REFUSED_STREAM error to let the client retry the
4459 * request.
4460 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004461 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004462 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4463 }
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004464 else {
4465 /* a final response was already provided, we don't want this
4466 * stream anymore. This may happen when the server responds
4467 * before the end of an upload and closes quickly (redirect,
4468 * deny, ...)
4469 */
4470 h2s_error(h2s, H2_ERR_CANCEL);
4471 }
Willy Tarreau18059042019-01-31 19:12:48 +01004472
Willy Tarreau90c32322017-11-24 08:00:30 +01004473 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004474 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004475 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004476
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004477 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004478 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau00dd0782018-03-01 16:31:34 +01004479 h2s_close(h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004480 done:
4481 h2s->flags &= ~H2_SF_WANT_SHUTR;
Willy Tarreau7838a792019-08-12 18:42:03 +02004482 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004483 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004484add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004485 /* Let the handler know we want to shutr, and add ourselves to the
4486 * most relevant list if not yet done. h2_deferred_shut() will be
4487 * automatically called via the shut_tl tasklet when there's room
4488 * again.
4489 */
4490 h2s->flags |= H2_SF_WANT_SHUTR;
Willy Tarreau2b718102021-04-21 07:32:39 +02004491 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004492 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004493 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004494 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004495 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004496 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004497 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004498 return;
Willy Tarreau62f52692017-10-08 23:01:42 +02004499}
4500
Willy Tarreau88bdba32019-05-13 18:17:53 +02004501/* Performs a synchronous or asynchronous shutw(). */
4502static void h2_do_shutw(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004503{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004504 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004505
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004506 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004507 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004508
Willy Tarreau7838a792019-08-12 18:42:03 +02004509 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4510
Willy Tarreau473e0e52022-08-18 16:12:15 +02004511 if (h2s->st != H2_SS_ERROR &&
4512 (h2s->flags & (H2_SF_HEADERS_SENT | H2_SF_MORE_HTX_DATA)) == H2_SF_HEADERS_SENT) {
4513 /* we can cleanly close using an empty data frame only after headers
4514 * and if no more data is expected to be sent.
4515 */
Willy Tarreau58e32082017-11-07 14:41:09 +01004516 if (!(h2s->flags & (H2_SF_ES_SENT|H2_SF_RST_SENT)) &&
4517 h2_send_empty_data_es(h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004518 goto add_to_list;
Willy Tarreau58e32082017-11-07 14:41:09 +01004519
4520 if (h2s->st == H2_SS_HREM)
Willy Tarreau00dd0782018-03-01 16:31:34 +01004521 h2s_close(h2s);
Willy Tarreau58e32082017-11-07 14:41:09 +01004522 else
4523 h2s->st = H2_SS_HLOC;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004524 } else {
Willy Tarreau18059042019-01-31 19:12:48 +01004525 /* a connstream may require us to immediately kill the whole connection
4526 * for example because of a "tcp-request content reject" rule that is
4527 * normally used to limit abuse. In this case we schedule a goaway to
4528 * close the connection.
Willy Tarreaua1349f02017-10-31 07:41:55 +01004529 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004530 if (se_fl_test(h2s->sd, SE_FL_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004531 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004532 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004533 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4534 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4535 }
Willy Tarreau473e0e52022-08-18 16:12:15 +02004536 else if (h2s->flags & H2_SF_MORE_HTX_DATA) {
4537 /* some unsent data were pending (e.g. abort during an upload),
4538 * let's send a CANCEL.
4539 */
4540 TRACE_STATE("shutw before end of data, sending CANCEL", H2_EV_STRM_SHUT, h2c->conn, h2s);
4541 h2s_error(h2s, H2_ERR_CANCEL);
4542 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004543 else {
4544 /* Nothing was never sent for this stream, so reset with
4545 * REFUSED_STREAM error to let the client retry the
4546 * request.
4547 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004548 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004549 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4550 }
Willy Tarreau18059042019-01-31 19:12:48 +01004551
Willy Tarreau90c32322017-11-24 08:00:30 +01004552 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004553 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004554 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004555
Willy Tarreau00dd0782018-03-01 16:31:34 +01004556 h2s_close(h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004557 }
4558
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004559 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004560 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau7838a792019-08-12 18:42:03 +02004561
4562 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
4563
Willy Tarreau88bdba32019-05-13 18:17:53 +02004564 done:
4565 h2s->flags &= ~H2_SF_WANT_SHUTW;
4566 return;
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004567
4568 add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004569 /* Let the handler know we want to shutw, and add ourselves to the
4570 * most relevant list if not yet done. h2_deferred_shut() will be
4571 * automatically called via the shut_tl tasklet when there's room
4572 * again.
4573 */
4574 h2s->flags |= H2_SF_WANT_SHUTW;
Willy Tarreau2b718102021-04-21 07:32:39 +02004575 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004576 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004577 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004578 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004579 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004580 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004581 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004582 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004583}
4584
Willy Tarreau5723f292020-01-10 15:16:57 +01004585/* This is the tasklet referenced in h2s->shut_tl, it is used for
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004586 * deferred shutdowns when the h2_detach() was done but the mux buffer was full
4587 * and prevented the last frame from being emitted.
4588 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004589struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004590{
4591 struct h2s *h2s = ctx;
Willy Tarreau88bdba32019-05-13 18:17:53 +02004592 struct h2c *h2c = h2s->h2c;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004593
Willy Tarreau7838a792019-08-12 18:42:03 +02004594 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4595
Willy Tarreau5723f292020-01-10 15:16:57 +01004596 if (h2s->flags & H2_SF_NOTIFIED) {
4597 /* some data processing remains to be done first */
4598 goto end;
4599 }
4600
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004601 if (h2s->flags & H2_SF_WANT_SHUTW)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004602 h2_do_shutw(h2s);
4603
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004604 if (h2s->flags & H2_SF_WANT_SHUTR)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004605 h2_do_shutr(h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004606
Willy Tarreau88bdba32019-05-13 18:17:53 +02004607 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Olivier Houchardafc7cb82019-03-25 14:08:01 +01004608 /* We're done trying to send, remove ourself from the send_list */
Olivier Houchardafc7cb82019-03-25 14:08:01 +01004609 LIST_DEL_INIT(&h2s->list);
Olivier Houchard7a977432019-03-21 15:47:13 +01004610
Willy Tarreau7be4ee02022-05-18 07:31:41 +02004611 if (!h2s_sc(h2s)) {
Willy Tarreau88bdba32019-05-13 18:17:53 +02004612 h2s_destroy(h2s);
Willy Tarreau74163142021-03-13 11:30:19 +01004613 if (h2c_is_dead(h2c)) {
Willy Tarreau88bdba32019-05-13 18:17:53 +02004614 h2_release(h2c);
Willy Tarreau74163142021-03-13 11:30:19 +01004615 t = NULL;
4616 }
Willy Tarreau88bdba32019-05-13 18:17:53 +02004617 }
Olivier Houchard7a977432019-03-21 15:47:13 +01004618 }
Willy Tarreau5723f292020-01-10 15:16:57 +01004619 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02004620 TRACE_LEAVE(H2_EV_STRM_SHUT);
Willy Tarreau74163142021-03-13 11:30:19 +01004621 return t;
Willy Tarreau62f52692017-10-08 23:01:42 +02004622}
4623
Willy Tarreau4596fe22022-05-17 19:07:51 +02004624/* shutr() called by the stream connector (mux_ops.shutr) */
Willy Tarreau36c22322022-05-27 10:41:24 +02004625static void h2_shutr(struct stconn *sc, enum co_shr_mode mode)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004626{
Willy Tarreau36c22322022-05-27 10:41:24 +02004627 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004628
Willy Tarreau7838a792019-08-12 18:42:03 +02004629 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004630 if (mode)
4631 h2_do_shutr(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004632 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004633}
4634
Willy Tarreau4596fe22022-05-17 19:07:51 +02004635/* shutw() called by the stream connector (mux_ops.shutw) */
Willy Tarreau36c22322022-05-27 10:41:24 +02004636static void h2_shutw(struct stconn *sc, enum co_shw_mode mode)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004637{
Willy Tarreau36c22322022-05-27 10:41:24 +02004638 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004639
Willy Tarreau7838a792019-08-12 18:42:03 +02004640 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004641 h2_do_shutw(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004642 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004643}
4644
Christopher Faulet9b79a102019-07-15 11:22:56 +02004645/* Decode the payload of a HEADERS frame and produce the HTX request or response
4646 * depending on the connection's side. Returns a positive value on success, a
4647 * negative value on failure, or 0 if it couldn't proceed. May report connection
4648 * errors in h2c->errcode if the frame is non-decodable and the connection
4649 * unrecoverable. In absence of connection error when a failure is reported, the
4650 * caller must assume a stream error.
Willy Tarreauea18f862018-12-22 20:19:26 +01004651 *
4652 * The function may fold CONTINUATION frames into the initial HEADERS frame
4653 * by removing padding and next frame header, then moving the CONTINUATION
4654 * frame's payload and adjusting h2c->dfl to match the new aggregated frame,
4655 * leaving a hole between the main frame and the beginning of the next one.
4656 * The possibly remaining incomplete or next frame at the end may be moved
4657 * if the aggregated frame is not deleted, in order to fill the hole. Wrapped
4658 * HEADERS frames are unwrapped into a temporary buffer before decoding.
4659 *
4660 * A buffer at the beginning of processing may look like this :
4661 *
4662 * ,---.---------.-----.--------------.--------------.------.---.
4663 * |///| HEADERS | PAD | CONTINUATION | CONTINUATION | DATA |///|
4664 * `---^---------^-----^--------------^--------------^------^---'
4665 * | | <-----> | |
4666 * area | dpl | wrap
4667 * |<--------------> |
4668 * | dfl |
4669 * |<-------------------------------------------------->|
4670 * head data
4671 *
4672 * Padding is automatically overwritten when folding, participating to the
4673 * hole size after dfl :
4674 *
4675 * ,---.------------------------.-----.--------------.------.---.
4676 * |///| HEADERS : CONTINUATION |/////| CONTINUATION | DATA |///|
4677 * `---^------------------------^-----^--------------^------^---'
4678 * | | <-----> | |
4679 * area | hole | wrap
4680 * |<-----------------------> |
4681 * | dfl |
4682 * |<-------------------------------------------------->|
4683 * head data
4684 *
4685 * Please note that the HEADERS frame is always deprived from its PADLEN byte
4686 * however it may start with the 5 stream-dep+weight bytes in case of PRIORITY
4687 * bit.
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004688 *
4689 * The <flags> field must point to either the stream's flags or to a copy of it
4690 * so that the function can update the following flags :
4691 * - H2_SF_DATA_CLEN when content-length is seen
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004692 * - H2_SF_HEADERS_RCVD once the frame is successfully decoded
Willy Tarreau88d138e2019-01-02 19:38:14 +01004693 *
4694 * The H2_SF_HEADERS_RCVD flag is also looked at in the <flags> field prior to
4695 * decoding, in order to detect if we're dealing with a headers or a trailers
4696 * block (the trailers block appears after H2_SF_HEADERS_RCVD was seen).
Willy Tarreau13278b42017-10-13 19:23:14 +02004697 */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01004698static int h2c_dec_hdrs(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol)
Willy Tarreau13278b42017-10-13 19:23:14 +02004699{
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004700 const uint8_t *hdrs = (uint8_t *)b_head(&h2c->dbuf);
Willy Tarreau83061a82018-07-13 11:56:34 +02004701 struct buffer *tmp = get_trash_chunk();
Christopher Faulete4ab11b2019-06-11 15:05:37 +02004702 struct http_hdr list[global.tune.max_http_hdr * 2];
Willy Tarreau83061a82018-07-13 11:56:34 +02004703 struct buffer *copy = NULL;
Willy Tarreau174b06a2018-04-25 18:13:58 +02004704 unsigned int msgf;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004705 struct htx *htx = NULL;
Willy Tarreauea18f862018-12-22 20:19:26 +01004706 int flen; // header frame len
4707 int hole = 0;
Willy Tarreau86277d42019-01-02 15:36:11 +01004708 int ret = 0;
4709 int outlen;
Willy Tarreau13278b42017-10-13 19:23:14 +02004710 int wrap;
Willy Tarreau13278b42017-10-13 19:23:14 +02004711
Willy Tarreau7838a792019-08-12 18:42:03 +02004712 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
4713
Willy Tarreauea18f862018-12-22 20:19:26 +01004714next_frame:
4715 if (b_data(&h2c->dbuf) - hole < h2c->dfl)
4716 goto leave; // incomplete input frame
4717
4718 /* No END_HEADERS means there's one or more CONTINUATION frames. In
4719 * this case, we'll try to paste it immediately after the initial
4720 * HEADERS frame payload and kill any possible padding. The initial
4721 * frame's length will be increased to represent the concatenation
4722 * of the two frames. The next frame is read from position <tlen>
4723 * and written at position <flen> (minus padding if some is present).
4724 */
4725 if (unlikely(!(h2c->dff & H2_F_HEADERS_END_HEADERS))) {
4726 struct h2_fh hdr;
4727 int clen; // CONTINUATION frame's payload length
4728
Willy Tarreau7838a792019-08-12 18:42:03 +02004729 TRACE_STATE("EH missing, expecting continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004730 if (!h2_peek_frame_hdr(&h2c->dbuf, h2c->dfl + hole, &hdr)) {
4731 /* no more data, the buffer may be full, either due to
4732 * too large a frame or because of too large a hole that
4733 * we're going to compact at the end.
4734 */
4735 goto leave;
4736 }
4737
4738 if (hdr.ft != H2_FT_CONTINUATION) {
4739 /* RFC7540#6.10: frame of unexpected type */
Willy Tarreau7838a792019-08-12 18:42:03 +02004740 TRACE_STATE("not continuation!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004741 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004742 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004743 goto fail;
4744 }
4745
4746 if (hdr.sid != h2c->dsi) {
4747 /* RFC7540#6.10: frame of different stream */
Willy Tarreau7838a792019-08-12 18:42:03 +02004748 TRACE_STATE("different stream ID!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004749 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004750 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004751 goto fail;
4752 }
4753
4754 if ((unsigned)hdr.len > (unsigned)global.tune.bufsize) {
4755 /* RFC7540#4.2: invalid frame length */
Willy Tarreau7838a792019-08-12 18:42:03 +02004756 TRACE_STATE("too large frame!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004757 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4758 goto fail;
4759 }
4760
4761 /* detect when we must stop aggragating frames */
4762 h2c->dff |= hdr.ff & H2_F_HEADERS_END_HEADERS;
4763
4764 /* Take as much as we can of the CONTINUATION frame's payload */
4765 clen = b_data(&h2c->dbuf) - (h2c->dfl + hole + 9);
4766 if (clen > hdr.len)
4767 clen = hdr.len;
4768
4769 /* Move the frame's payload over the padding, hole and frame
4770 * header. At least one of hole or dpl is null (see diagrams
4771 * above). The hole moves after the new aggragated frame.
4772 */
4773 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole + 9), clen, -(h2c->dpl + hole + 9));
Christopher Fauletcb1847c2021-04-21 11:11:21 +02004774 h2c->dfl += hdr.len - h2c->dpl;
Willy Tarreauea18f862018-12-22 20:19:26 +01004775 hole += h2c->dpl + 9;
4776 h2c->dpl = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02004777 TRACE_STATE("waiting for next continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_CONT|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004778 goto next_frame;
4779 }
4780
4781 flen = h2c->dfl - h2c->dpl;
Willy Tarreau68472622017-12-11 18:36:37 +01004782
Willy Tarreau13278b42017-10-13 19:23:14 +02004783 /* if the input buffer wraps, take a temporary copy of it (rare) */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004784 wrap = b_wrap(&h2c->dbuf) - b_head(&h2c->dbuf);
Willy Tarreau13278b42017-10-13 19:23:14 +02004785 if (wrap < h2c->dfl) {
Willy Tarreau68dd9852017-07-03 14:44:26 +02004786 copy = alloc_trash_chunk();
4787 if (!copy) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004788 TRACE_DEVEL("failed to allocate temporary buffer", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR, h2c->conn);
Willy Tarreau68dd9852017-07-03 14:44:26 +02004789 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
4790 goto fail;
4791 }
Willy Tarreau843b7cb2018-07-13 10:54:26 +02004792 memcpy(copy->area, b_head(&h2c->dbuf), wrap);
4793 memcpy(copy->area + wrap, b_orig(&h2c->dbuf), h2c->dfl - wrap);
4794 hdrs = (uint8_t *) copy->area;
Willy Tarreau13278b42017-10-13 19:23:14 +02004795 }
4796
Willy Tarreau13278b42017-10-13 19:23:14 +02004797 /* Skip StreamDep and weight for now (we don't support PRIORITY) */
4798 if (h2c->dff & H2_F_HEADERS_PRIORITY) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004799 if (read_n32(hdrs) == h2c->dsi) {
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004800 /* RFC7540#5.3.1 : stream dep may not depend on itself */
Willy Tarreau7838a792019-08-12 18:42:03 +02004801 TRACE_STATE("invalid stream dependency!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004802 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004803 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreaua0d11b62018-09-05 18:30:05 +02004804 goto fail;
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004805 }
4806
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004807 if (flen < 5) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004808 TRACE_STATE("frame too short for priority!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004809 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4810 goto fail;
4811 }
4812
Willy Tarreau13278b42017-10-13 19:23:14 +02004813 hdrs += 5; // stream dep = 4, weight = 1
4814 flen -= 5;
4815 }
4816
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004817 if (!h2_get_buf(h2c, rxbuf)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004818 TRACE_STATE("waiting for h2c rxbuf allocation", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau937f7602018-02-26 15:22:17 +01004819 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau86277d42019-01-02 15:36:11 +01004820 goto leave;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004821 }
Willy Tarreau13278b42017-10-13 19:23:14 +02004822
Willy Tarreau937f7602018-02-26 15:22:17 +01004823 /* we can't retry a failed decompression operation so we must be very
4824 * careful not to take any risks. In practice the output buffer is
4825 * always empty except maybe for trailers, in which case we simply have
4826 * to wait for the upper layer to finish consuming what is available.
4827 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004828 htx = htx_from_buf(rxbuf);
4829 if (!htx_is_empty(htx)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004830 TRACE_STATE("waiting for room in h2c rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004831 h2c->flags |= H2_CF_DEM_SFULL;
4832 goto leave;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004833 }
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004834
Willy Tarreau25919232019-01-03 14:48:18 +01004835 /* past this point we cannot roll back in case of error */
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004836 outlen = hpack_decode_frame(h2c->ddht, hdrs, flen, list,
4837 sizeof(list)/sizeof(list[0]), tmp);
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01004838
4839 if (outlen > 0 &&
4840 (TRACE_SOURCE)->verbosity >= H2_VERB_ADVANCED &&
4841 TRACE_ENABLED(TRACE_LEVEL_USER, H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, 0, 0, 0)) {
4842 struct ist n;
4843 int i;
4844
4845 for (i = 0; list[i].n.len; i++) {
4846 n = list[i].n;
4847
4848 if (!isttest(n)) {
4849 /* this is in fact a pseudo header whose number is in n.len */
4850 n = h2_phdr_to_ist(n.len);
4851 }
4852
4853 h2_trace_header(n, list[i].v, H2_EV_RX_FRAME|H2_EV_RX_HDR,
4854 ist(TRC_LOC), __FUNCTION__, h2c, NULL);
4855 }
4856 }
4857
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004858 if (outlen < 0) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004859 TRACE_STATE("failed to decompress HPACK", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004860 h2c_error(h2c, H2_ERR_COMPRESSION_ERROR);
4861 goto fail;
4862 }
4863
Willy Tarreau25919232019-01-03 14:48:18 +01004864 /* The PACK decompressor was updated, let's update the input buffer and
4865 * the parser's state to commit these changes and allow us to later
4866 * fail solely on the stream if needed.
4867 */
4868 b_del(&h2c->dbuf, h2c->dfl + hole);
4869 h2c->dfl = hole = 0;
4870 h2c->st0 = H2_CS_FRAME_H;
4871
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004872 /* OK now we have our header list in <list> */
Willy Tarreau880f5802019-01-03 08:10:14 +01004873 msgf = (h2c->dff & H2_F_HEADERS_END_STREAM) ? 0 : H2_MSGF_BODY;
Christopher Fauletd0db4232021-01-22 11:46:30 +01004874 msgf |= (*flags & H2_SF_BODY_TUNNEL) ? H2_MSGF_BODY_TUNNEL: 0;
Amaury Denoyelle74162742020-12-11 17:53:05 +01004875 /* If an Extended CONNECT has been sent on this stream, set message flag
Ilya Shipitsinacf84592021-02-06 22:29:08 +05004876 * to convert 200 response to 101 htx response */
Amaury Denoyelle74162742020-12-11 17:53:05 +01004877 msgf |= (*flags & H2_SF_EXT_CONNECT_SENT) ? H2_MSGF_EXT_CONNECT: 0;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004878
Willy Tarreau88d138e2019-01-02 19:38:14 +01004879 if (*flags & H2_SF_HEADERS_RCVD)
4880 goto trailers;
4881
4882 /* This is the first HEADERS frame so it's a headers block */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004883 if (h2c->flags & H2_CF_IS_BACK)
Amaury Denoyelle74162742020-12-11 17:53:05 +01004884 outlen = h2_make_htx_response(list, htx, &msgf, body_len, upgrade_protocol);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004885 else
4886 outlen = h2_make_htx_request(list, htx, &msgf, body_len);
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004887
Christopher Faulet3d875582021-04-26 17:46:13 +02004888 if (outlen < 0 || htx_free_space(htx) < global.tune.maxrewrite) {
Willy Tarreau25919232019-01-03 14:48:18 +01004889 /* too large headers? this is a stream error only */
Christopher Faulet3d875582021-04-26 17:46:13 +02004890 TRACE_STATE("message headers too large", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR|H2_EV_PROTO_ERR, h2c->conn);
4891 htx->flags |= HTX_FL_PARSING_ERROR;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004892 goto fail;
4893 }
Willy Tarreau13278b42017-10-13 19:23:14 +02004894
Willy Tarreau174b06a2018-04-25 18:13:58 +02004895 if (msgf & H2_MSGF_BODY) {
4896 /* a payload is present */
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01004897 if (msgf & H2_MSGF_BODY_CL) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004898 *flags |= H2_SF_DATA_CLEN;
Christopher Faulet9b79a102019-07-15 11:22:56 +02004899 htx->extra = *body_len;
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01004900 }
Willy Tarreau174b06a2018-04-25 18:13:58 +02004901 }
Christopher Faulet7d247f02020-12-02 14:26:36 +01004902 if (msgf & H2_MSGF_BODYLESS_RSP)
4903 *flags |= H2_SF_BODYLESS_RESP;
Willy Tarreau174b06a2018-04-25 18:13:58 +02004904
Christopher Fauletd0db4232021-01-22 11:46:30 +01004905 if (msgf & H2_MSGF_BODY_TUNNEL)
4906 *flags |= H2_SF_BODY_TUNNEL;
4907 else {
4908 /* Abort the tunnel attempt, if any */
4909 if (*flags & H2_SF_BODY_TUNNEL)
4910 *flags |= H2_SF_TUNNEL_ABRT;
4911 *flags &= ~H2_SF_BODY_TUNNEL;
4912 }
4913
Willy Tarreau88d138e2019-01-02 19:38:14 +01004914 done:
Christopher Faulet0b465482019-02-19 15:14:23 +01004915 /* indicate that a HEADERS frame was received for this stream, except
4916 * for 1xx responses. For 1xx responses, another HEADERS frame is
4917 * expected.
4918 */
4919 if (!(msgf & H2_MSGF_RSP_1XX))
4920 *flags |= H2_SF_HEADERS_RCVD;
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004921
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01004922 if (h2c->dff & H2_F_HEADERS_END_STREAM) {
Christopher Faulet827a6292022-12-22 09:47:01 +01004923 if (msgf & H2_MSGF_RSP_1XX) {
4924 /* RFC9113#8.1 : HEADERS frame with the ES flag set that carries an informational status code is malformed */
4925 TRACE_STATE("invalid interim response with ES flag!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
4926 goto fail;
4927 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01004928 /* no more data are expected for this message */
4929 htx->flags |= HTX_FL_EOM;
Willy Tarreau88d138e2019-01-02 19:38:14 +01004930 }
Willy Tarreau937f7602018-02-26 15:22:17 +01004931
Amaury Denoyelleefe22762020-12-11 17:53:08 +01004932 if (msgf & H2_MSGF_EXT_CONNECT)
4933 *flags |= H2_SF_EXT_CONNECT_RCVD;
4934
Willy Tarreau86277d42019-01-02 15:36:11 +01004935 /* success */
4936 ret = 1;
4937
Willy Tarreau68dd9852017-07-03 14:44:26 +02004938 leave:
Willy Tarreau86277d42019-01-02 15:36:11 +01004939 /* If there is a hole left and it's not at the end, we are forced to
Willy Tarreauea18f862018-12-22 20:19:26 +01004940 * move the remaining data over it.
4941 */
4942 if (hole) {
4943 if (b_data(&h2c->dbuf) > h2c->dfl + hole)
4944 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole),
4945 b_data(&h2c->dbuf) - (h2c->dfl + hole), -hole);
4946 b_sub(&h2c->dbuf, hole);
4947 }
4948
Christopher Faulet07f88d72021-04-21 10:39:53 +02004949 if (b_full(&h2c->dbuf) && h2c->dfl) {
Willy Tarreauea18f862018-12-22 20:19:26 +01004950 /* too large frames */
4951 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau86277d42019-01-02 15:36:11 +01004952 ret = -1;
Willy Tarreauea18f862018-12-22 20:19:26 +01004953 }
4954
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01004955 if (htx)
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004956 htx_to_buf(htx, rxbuf);
Willy Tarreau68dd9852017-07-03 14:44:26 +02004957 free_trash_chunk(copy);
Willy Tarreau7838a792019-08-12 18:42:03 +02004958 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau86277d42019-01-02 15:36:11 +01004959 return ret;
4960
Willy Tarreau68dd9852017-07-03 14:44:26 +02004961 fail:
Willy Tarreau86277d42019-01-02 15:36:11 +01004962 ret = -1;
Willy Tarreau68dd9852017-07-03 14:44:26 +02004963 goto leave;
Willy Tarreau88d138e2019-01-02 19:38:14 +01004964
4965 trailers:
4966 /* This is the last HEADERS frame hence a trailer */
Willy Tarreau88d138e2019-01-02 19:38:14 +01004967 if (!(h2c->dff & H2_F_HEADERS_END_STREAM)) {
4968 /* It's a trailer but it's missing ES flag */
Willy Tarreau7838a792019-08-12 18:42:03 +02004969 TRACE_STATE("missing EH on trailers frame", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau88d138e2019-01-02 19:38:14 +01004970 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004971 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau88d138e2019-01-02 19:38:14 +01004972 goto fail;
4973 }
4974
Christopher Faulet9b79a102019-07-15 11:22:56 +02004975 /* Trailers terminate a DATA sequence */
Willy Tarreau7838a792019-08-12 18:42:03 +02004976 if (h2_make_htx_trailers(list, htx) <= 0) {
4977 TRACE_STATE("failed to append HTX trailers into rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004978 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02004979 }
Willy Tarreau88d138e2019-01-02 19:38:14 +01004980 goto done;
Willy Tarreau13278b42017-10-13 19:23:14 +02004981}
4982
Christopher Faulet9b79a102019-07-15 11:22:56 +02004983/* Transfer the payload of a DATA frame to the HTTP/1 side. The HTTP/2 frame
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01004984 * parser state is automatically updated. Returns > 0 if it could completely
4985 * send the current frame, 0 if it couldn't complete, in which case
Willy Tarreaub605c422022-05-17 17:04:55 +02004986 * SE_FL_RCV_MORE must be checked to know if some data remain pending (an empty
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01004987 * DATA frame can return 0 as a valid result). Stream errors are reported in
4988 * h2s->errcode and connection errors in h2c->errcode. The caller must already
4989 * have checked the frame header and ensured that the frame was complete or the
4990 * buffer full. It changes the frame state to FRAME_A once done.
Willy Tarreau454f9052017-10-26 19:40:35 +02004991 */
Willy Tarreau454b57b2018-02-26 15:50:05 +01004992static int h2_frt_transfer_data(struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02004993{
4994 struct h2c *h2c = h2s->h2c;
Christopher Faulet9b79a102019-07-15 11:22:56 +02004995 int block;
Willy Tarreaud755ea62018-02-26 15:44:54 +01004996 unsigned int flen = 0;
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01004997 struct htx *htx = NULL;
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02004998 struct buffer *scbuf;
Christopher Faulet9b79a102019-07-15 11:22:56 +02004999 unsigned int sent;
Willy Tarreau454f9052017-10-26 19:40:35 +02005000
Willy Tarreau7838a792019-08-12 18:42:03 +02005001 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
5002
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005003 h2c->flags &= ~H2_CF_DEM_SFULL;
Willy Tarreau454f9052017-10-26 19:40:35 +02005004
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02005005 scbuf = h2_get_buf(h2c, &h2s->rxbuf);
5006 if (!scbuf) {
Willy Tarreaud755ea62018-02-26 15:44:54 +01005007 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02005008 TRACE_STATE("waiting for an h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005009 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005010 }
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02005011 htx = htx_from_buf(scbuf);
Willy Tarreaud755ea62018-02-26 15:44:54 +01005012
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005013try_again:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005014 flen = h2c->dfl - h2c->dpl;
5015 if (!flen)
Willy Tarreau4a28da12018-01-04 14:41:00 +01005016 goto end_transfer;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005017
Willy Tarreauc9fa0482018-07-10 17:43:27 +02005018 if (flen > b_data(&h2c->dbuf)) {
5019 flen = b_data(&h2c->dbuf);
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005020 if (!flen)
Willy Tarreau454b57b2018-02-26 15:50:05 +01005021 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005022 }
5023
Christopher Faulet9b79a102019-07-15 11:22:56 +02005024 block = htx_free_data_space(htx);
5025 if (!block) {
5026 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02005027 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Christopher Faulet9b79a102019-07-15 11:22:56 +02005028 goto fail;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005029 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02005030 if (flen > block)
5031 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005032
Christopher Faulet9b79a102019-07-15 11:22:56 +02005033 /* here, flen is the max we can copy into the output buffer */
5034 block = b_contig_data(&h2c->dbuf, 0);
5035 if (flen > block)
5036 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005037
Christopher Faulet9b79a102019-07-15 11:22:56 +02005038 sent = htx_add_data(htx, ist2(b_head(&h2c->dbuf), flen));
Willy Tarreau022e5e52020-09-10 09:33:15 +02005039 TRACE_DATA("move some data to h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s, 0, (void *)(long)sent);
Willy Tarreau454f9052017-10-26 19:40:35 +02005040
Christopher Faulet9b79a102019-07-15 11:22:56 +02005041 b_del(&h2c->dbuf, sent);
5042 h2c->dfl -= sent;
5043 h2c->rcvd_c += sent;
5044 h2c->rcvd_s += sent; // warning, this can also affect the closed streams!
Willy Tarreau454f9052017-10-26 19:40:35 +02005045
Christopher Faulet9b79a102019-07-15 11:22:56 +02005046 if (h2s->flags & H2_SF_DATA_CLEN) {
5047 h2s->body_len -= sent;
5048 htx->extra = h2s->body_len;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005049 }
5050
Christopher Faulet9b79a102019-07-15 11:22:56 +02005051 if (sent < flen) {
Willy Tarreaud755ea62018-02-26 15:44:54 +01005052 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02005053 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005054 goto fail;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005055 }
5056
Christopher Faulet9b79a102019-07-15 11:22:56 +02005057 goto try_again;
5058
Willy Tarreau4a28da12018-01-04 14:41:00 +01005059 end_transfer:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005060 /* here we're done with the frame, all the payload (except padding) was
5061 * transferred.
5062 */
Willy Tarreaueba10f22018-04-25 20:44:22 +02005063
Christopher Faulet5be651d2021-01-22 15:28:03 +01005064 if (!(h2s->flags & H2_SF_BODY_TUNNEL) && (h2c->dff & H2_F_DATA_END_STREAM)) {
5065 /* no more data are expected for this message. This add the EOM
5066 * flag but only on the response path or if no tunnel attempt
5067 * was aborted. Otherwise (request path + tunnel abrted), the
5068 * EOM was already reported.
5069 */
Christopher Faulet33724322021-02-10 09:04:59 +01005070 if ((h2c->flags & H2_CF_IS_BACK) || !(h2s->flags & H2_SF_TUNNEL_ABRT)) {
5071 /* If we receive an empty DATA frame with ES flag while the HTX
5072 * message is empty, we must be sure to push a block to be sure
5073 * the HTX EOM flag will be handled on the other side. It is a
5074 * workaround because for now it is not possible to push empty
5075 * HTX DATA block. And without this block, there is no way to
5076 * "commit" the end of the message.
5077 */
5078 if (htx_is_empty(htx)) {
5079 if (!htx_add_endof(htx, HTX_BLK_EOT))
5080 goto fail;
5081 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005082 htx->flags |= HTX_FL_EOM;
Christopher Faulet33724322021-02-10 09:04:59 +01005083 }
Willy Tarreaueba10f22018-04-25 20:44:22 +02005084 }
5085
Willy Tarreaud1023bb2018-03-22 16:53:12 +01005086 h2c->rcvd_c += h2c->dpl;
5087 h2c->rcvd_s += h2c->dpl;
5088 h2c->dpl = 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02005089 h2c->st0 = H2_CS_FRAME_A; // send the corresponding window update
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02005090 htx_to_buf(htx, scbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02005091 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005092 return 1;
Willy Tarreau454b57b2018-02-26 15:50:05 +01005093 fail:
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01005094 if (htx)
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02005095 htx_to_buf(htx, scbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02005096 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005097 return 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02005098}
5099
Willy Tarreau115e83b2018-12-01 19:17:53 +01005100/* Try to send a HEADERS frame matching HTX response present in HTX message
5101 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
5102 * must check the stream's status to detect any error which might have happened
5103 * subsequently to a successful send. The htx blocks are automatically removed
5104 * from the message. The htx message is assumed to be valid since produced from
5105 * the internal code, hence it contains a start line, an optional series of
5106 * header blocks and an end of header, otherwise an invalid frame could be
5107 * emitted and the resulting htx message could be left in an inconsistent state.
5108 */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01005109static size_t h2s_snd_fhdrs(struct h2s *h2s, struct htx *htx)
Willy Tarreau115e83b2018-12-01 19:17:53 +01005110{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02005111 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau115e83b2018-12-01 19:17:53 +01005112 struct h2c *h2c = h2s->h2c;
5113 struct htx_blk *blk;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005114 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005115 struct buffer *mbuf;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005116 struct htx_sl *sl;
5117 enum htx_blk_type type;
5118 int es_now = 0;
5119 int ret = 0;
5120 int hdr;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005121
Willy Tarreau7838a792019-08-12 18:42:03 +02005122 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5123
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005124 /* get the start line (we do have one) and the rest of the headers,
5125 * that we dump starting at header 0 */
5126 sl = NULL;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005127 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005128 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau115e83b2018-12-01 19:17:53 +01005129 type = htx_get_blk_type(blk);
5130
5131 if (type == HTX_BLK_UNUSED)
5132 continue;
5133
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005134 if (type == HTX_BLK_EOH)
Willy Tarreau115e83b2018-12-01 19:17:53 +01005135 break;
5136
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005137 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005138 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005139 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5140 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5141 goto fail;
5142 }
5143
5144 list[hdr].n = htx_get_blk_name(htx, blk);
5145 list[hdr].v = htx_get_blk_value(htx, blk);
5146 hdr++;
5147 }
5148 else if (type == HTX_BLK_RES_SL) {
Christopher Faulet56498132021-01-29 11:39:43 +01005149 BUG_ON(sl); /* Only one start-line expected */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005150 sl = htx_get_blk_ptr(htx, blk);
5151 h2s->status = sl->info.res.status;
Christopher Faulet7d247f02020-12-02 14:26:36 +01005152 if (h2s->status == 204 || h2s->status == 304)
5153 h2s->flags |= H2_SF_BODYLESS_RESP;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005154 if (h2s->status < 100 || h2s->status > 999) {
5155 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5156 goto fail;
5157 }
5158 else if (h2s->status == 101) {
Amaury Denoyelleefe22762020-12-11 17:53:08 +01005159 if (unlikely(h2s->flags & H2_SF_EXT_CONNECT_RCVD)) {
5160 /* If an Extended CONNECT has been received, we need to convert 101 to 200 */
5161 h2s->status = 200;
5162 h2s->flags &= ~H2_SF_EXT_CONNECT_RCVD;
5163 }
5164 else {
5165 /* Otherwise, 101 responses are not supported in H2, so return a error (RFC7540#8.1.1) */
5166 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5167 goto fail;
5168 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005169 }
5170 else if ((h2s->flags & H2_SF_BODY_TUNNEL) && h2s->status >= 300) {
5171 /* Abort the tunnel attempt */
5172 h2s->flags &= ~H2_SF_BODY_TUNNEL;
5173 h2s->flags |= H2_SF_TUNNEL_ABRT;
5174 }
5175 }
5176 else {
5177 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005178 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02005179 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005180 }
5181
Christopher Faulet56498132021-01-29 11:39:43 +01005182 /* The start-line me be defined */
5183 BUG_ON(!sl);
5184
Willy Tarreau115e83b2018-12-01 19:17:53 +01005185 /* marker for end of headers */
5186 list[hdr].n = ist("");
5187
Willy Tarreau9c218e72019-05-26 10:08:28 +02005188 mbuf = br_tail(h2c->mbuf);
5189 retry:
5190 if (!h2_get_buf(h2c, mbuf)) {
5191 h2c->flags |= H2_CF_MUX_MALLOC;
5192 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005193 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005194 return 0;
5195 }
5196
Willy Tarreau115e83b2018-12-01 19:17:53 +01005197 chunk_reset(&outbuf);
5198
5199 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005200 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5201 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005202 break;
5203 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005204 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau115e83b2018-12-01 19:17:53 +01005205 }
5206
5207 if (outbuf.size < 9)
5208 goto full;
5209
5210 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5211 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5212 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5213 outbuf.data = 9;
5214
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005215 if ((h2c->flags & (H2_CF_SHTS_UPDATED|H2_CF_DTSU_EMITTED)) == H2_CF_SHTS_UPDATED) {
5216 /* SETTINGS_HEADER_TABLE_SIZE changed, we must send an HPACK
5217 * dynamic table size update so that some clients are not
5218 * confused. In practice we only need to send the DTSU when the
5219 * advertised size is lower than the current one, and since we
5220 * don't use it and don't care about the default 4096 bytes,
5221 * we only ack it with a zero size thus we at most have to deal
5222 * with this once. See RFC7541#4.2 and #6.3 for the spec, and
5223 * below for the whole context and interoperability risks:
5224 * https://lists.w3.org/Archives/Public/ietf-http-wg/2021OctDec/0235.html
5225 */
5226 if (b_room(&outbuf) < 1)
5227 goto full;
5228 outbuf.area[outbuf.data++] = 0x20; // HPACK DTSU 0 bytes
5229
5230 /* let's not update the flags now but only once the buffer is
5231 * really committed.
5232 */
5233 }
5234
Willy Tarreau115e83b2018-12-01 19:17:53 +01005235 /* encode status, which necessarily is the first one */
Willy Tarreauaafdf582018-12-10 18:06:40 +01005236 if (!hpack_encode_int_status(&outbuf, h2s->status)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005237 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005238 goto realign_again;
5239 goto full;
5240 }
5241
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005242 if ((TRACE_SOURCE)->verbosity >= H2_VERB_ADVANCED) {
5243 char sts[4];
5244
5245 h2_trace_header(ist(":status"), ist(ultoa_r(h2s->status, sts, sizeof(sts))),
5246 H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__,
5247 h2c, h2s);
5248 }
5249
Willy Tarreau115e83b2018-12-01 19:17:53 +01005250 /* encode all headers, stop at empty name */
5251 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
5252 /* these ones do not exist in H2 and must be dropped. */
5253 if (isteq(list[hdr].n, ist("connection")) ||
5254 isteq(list[hdr].n, ist("proxy-connection")) ||
5255 isteq(list[hdr].n, ist("keep-alive")) ||
5256 isteq(list[hdr].n, ist("upgrade")) ||
5257 isteq(list[hdr].n, ist("transfer-encoding")))
5258 continue;
5259
Christopher Faulet86d144c2019-08-14 16:32:25 +02005260 /* Skip all pseudo-headers */
5261 if (*(list[hdr].n.ptr) == ':')
5262 continue;
5263
Willy Tarreau115e83b2018-12-01 19:17:53 +01005264 if (isteq(list[hdr].n, ist("")))
5265 break; // end
5266
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005267 if (!h2_encode_header(&outbuf, list[hdr].n, list[hdr].v, H2_EV_TX_FRAME|H2_EV_TX_HDR,
5268 ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Willy Tarreau115e83b2018-12-01 19:17:53 +01005269 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005270 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005271 goto realign_again;
5272 goto full;
5273 }
5274 }
5275
Willy Tarreaucb985a42019-10-07 16:56:34 +02005276 /* update the frame's size */
5277 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5278
5279 if (outbuf.data > h2c->mfs + 9) {
5280 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5281 /* output full */
5282 if (b_space_wraps(mbuf))
5283 goto realign_again;
5284 goto full;
5285 }
5286 }
5287
Willy Tarreau3a537072021-06-17 08:40:04 +02005288 TRACE_USER("sent H2 response ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5289
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005290 /* remove all header blocks including the EOH and compute the
5291 * corresponding size.
Willy Tarreau115e83b2018-12-01 19:17:53 +01005292 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005293 ret = 0;
5294 blk = htx_get_head_blk(htx);
5295 while (blk) {
5296 type = htx_get_blk_type(blk);
5297 ret += htx_get_blksz(blk);
5298 blk = htx_remove_blk(htx, blk);
5299 /* The removed block is the EOH */
5300 if (type == HTX_BLK_EOH)
5301 break;
Christopher Faulet5be651d2021-01-22 15:28:03 +01005302 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005303
Willy Tarreau95acc8b2022-05-27 16:14:10 +02005304 if (!h2s_sc(h2s) || se_fl_test(h2s->sd, SE_FL_SHW)) {
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005305 /* Response already closed: add END_STREAM */
5306 es_now = 1;
5307 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005308 else if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx) && h2s->status >= 200) {
5309 /* EOM+empty: we may need to add END_STREAM except for 1xx
Christopher Faulet991febd2020-12-02 15:17:31 +01005310 * responses and tunneled response.
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005311 */
Christopher Faulet991febd2020-12-02 15:17:31 +01005312 if (!(h2s->flags & H2_SF_BODY_TUNNEL) || h2s->status >= 300)
5313 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005314 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005315
Willy Tarreau115e83b2018-12-01 19:17:53 +01005316 if (es_now)
5317 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5318
5319 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005320 b_add(mbuf, outbuf.data);
Christopher Faulet0b465482019-02-19 15:14:23 +01005321
5322 /* indicates the HEADERS frame was sent, except for 1xx responses. For
5323 * 1xx responses, another HEADERS frame is expected.
5324 */
Christopher Faulet89899422020-12-07 18:24:43 +01005325 if (h2s->status >= 200)
Christopher Faulet0b465482019-02-19 15:14:23 +01005326 h2s->flags |= H2_SF_HEADERS_SENT;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005327
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005328 if (h2c->flags & H2_CF_SHTS_UPDATED) {
5329 /* was sent above */
5330 h2c->flags |= H2_CF_DTSU_EMITTED;
Willy Tarreauc7d85482022-02-16 14:28:14 +01005331 h2c->flags &= ~H2_CF_SHTS_UPDATED;
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005332 }
5333
Willy Tarreau115e83b2018-12-01 19:17:53 +01005334 if (es_now) {
5335 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02005336 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005337 if (h2s->st == H2_SS_OPEN)
5338 h2s->st = H2_SS_HLOC;
5339 else
5340 h2s_close(h2s);
5341 }
5342
5343 /* OK we could properly deliver the response */
Willy Tarreau115e83b2018-12-01 19:17:53 +01005344 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02005345 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005346 return ret;
5347 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005348 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5349 goto retry;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005350 h2c->flags |= H2_CF_MUX_MFULL;
5351 h2s->flags |= H2_SF_BLK_MROOM;
5352 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005353 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005354 goto end;
5355 fail:
5356 /* unparsable HTX messages, too large ones to be produced in the local
5357 * list etc go here (unrecoverable errors).
5358 */
5359 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5360 ret = 0;
5361 goto end;
5362}
5363
Willy Tarreau80739692018-10-05 11:35:57 +02005364/* Try to send a HEADERS frame matching HTX request present in HTX message
5365 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
5366 * must check the stream's status to detect any error which might have happened
5367 * subsequently to a successful send. The htx blocks are automatically removed
5368 * from the message. The htx message is assumed to be valid since produced from
5369 * the internal code, hence it contains a start line, an optional series of
5370 * header blocks and an end of header, otherwise an invalid frame could be
5371 * emitted and the resulting htx message could be left in an inconsistent state.
5372 */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01005373static size_t h2s_snd_bhdrs(struct h2s *h2s, struct htx *htx)
Willy Tarreau80739692018-10-05 11:35:57 +02005374{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02005375 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau80739692018-10-05 11:35:57 +02005376 struct h2c *h2c = h2s->h2c;
5377 struct htx_blk *blk;
Willy Tarreau80739692018-10-05 11:35:57 +02005378 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005379 struct buffer *mbuf;
Willy Tarreau80739692018-10-05 11:35:57 +02005380 struct htx_sl *sl;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005381 struct ist meth, uri, auth, host = IST_NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005382 enum htx_blk_type type;
5383 int es_now = 0;
5384 int ret = 0;
5385 int hdr;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005386 int extended_connect = 0;
Willy Tarreau80739692018-10-05 11:35:57 +02005387
Willy Tarreau7838a792019-08-12 18:42:03 +02005388 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5389
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005390 /* get the start line (we do have one) and the rest of the headers,
5391 * that we dump starting at header 0 */
5392 sl = NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005393 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005394 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005395 type = htx_get_blk_type(blk);
5396
5397 if (type == HTX_BLK_UNUSED)
5398 continue;
5399
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005400 if (type == HTX_BLK_EOH)
Willy Tarreau80739692018-10-05 11:35:57 +02005401 break;
5402
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005403 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005404 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005405 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5406 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5407 goto fail;
5408 }
Willy Tarreau80739692018-10-05 11:35:57 +02005409
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005410 list[hdr].n = htx_get_blk_name(htx, blk);
5411 list[hdr].v = htx_get_blk_value(htx, blk);
Christopher Faulet67d58092019-10-02 10:51:38 +02005412
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005413 /* Skip header if same name is used to add the server name */
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005414 if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name) &&
5415 isteq(list[hdr].n, h2c->proxy->server_id_hdr_name))
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005416 continue;
Christopher Faulet67d58092019-10-02 10:51:38 +02005417
Ilya Shipitsinacf84592021-02-06 22:29:08 +05005418 /* Convert connection: upgrade to Extended connect from rfc 8441 */
Christopher Faulet52a5ec22021-09-09 09:52:51 +02005419 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteqi(list[hdr].n, ist("connection"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005420 /* rfc 7230 #6.1 Connection = list of tokens */
5421 struct ist connection_ist = list[hdr].v;
5422 do {
5423 if (isteqi(iststop(connection_ist, ','),
5424 ist("upgrade"))) {
Amaury Denoyelle0df04362021-10-18 09:43:29 +02005425 if (!(h2c->flags & H2_CF_RCVD_RFC8441)) {
5426 TRACE_STATE("reject upgrade because of no RFC8441 support", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5427 goto fail;
5428 }
5429
Amaury Denoyellee0c258c2021-10-18 10:05:16 +02005430 TRACE_STATE("convert upgrade to extended connect method", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005431 h2s->flags |= (H2_SF_BODY_TUNNEL|H2_SF_EXT_CONNECT_SENT);
5432 sl->info.req.meth = HTTP_METH_CONNECT;
5433 meth = ist("CONNECT");
5434
5435 extended_connect = 1;
5436 break;
5437 }
5438
5439 connection_ist = istadv(istfind(connection_ist, ','), 1);
5440 } while (istlen(connection_ist));
5441 }
5442
Christopher Faulet52a5ec22021-09-09 09:52:51 +02005443 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteq(list[hdr].n, ist("upgrade"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005444 /* rfc 7230 #6.7 Upgrade = list of protocols
5445 * rfc 8441 #4 Extended connect = :protocol is single-valued
5446 *
5447 * only first HTTP/1 protocol is preserved
5448 */
5449 const struct ist protocol = iststop(list[hdr].v, ',');
5450 /* upgrade_protocol field is 16 bytes long in h2s */
5451 istpad(h2s->upgrade_protocol, isttrim(protocol, 15));
5452 }
5453
5454 if (isteq(list[hdr].n, ist("host")))
5455 host = list[hdr].v;
5456
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005457 hdr++;
5458 }
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005459 else if (type == HTX_BLK_REQ_SL) {
5460 BUG_ON(sl); /* Only one start-line expected */
5461 sl = htx_get_blk_ptr(htx, blk);
5462 meth = htx_sl_req_meth(sl);
5463 uri = htx_sl_req_uri(sl);
5464 if (sl->info.req.meth == HTTP_METH_HEAD)
5465 h2s->flags |= H2_SF_BODYLESS_RESP;
5466 if (unlikely(uri.len == 0)) {
5467 TRACE_ERROR("no URI in HTX request", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5468 goto fail;
5469 }
5470 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005471 else {
5472 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5473 goto fail;
5474 }
Willy Tarreau80739692018-10-05 11:35:57 +02005475 }
5476
Christopher Faulet56498132021-01-29 11:39:43 +01005477 /* The start-line me be defined */
5478 BUG_ON(!sl);
5479
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005480 /* Now add the server name to a header (if requested) */
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005481 if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name)) {
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005482 struct server *srv = objt_server(h2c->conn->target);
5483
5484 if (srv) {
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005485 list[hdr].n = h2c->proxy->server_id_hdr_name;
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005486 list[hdr].v = ist(srv->id);
5487 hdr++;
5488 }
5489 }
5490
Willy Tarreau80739692018-10-05 11:35:57 +02005491 /* marker for end of headers */
5492 list[hdr].n = ist("");
5493
Willy Tarreau9c218e72019-05-26 10:08:28 +02005494 mbuf = br_tail(h2c->mbuf);
5495 retry:
5496 if (!h2_get_buf(h2c, mbuf)) {
5497 h2c->flags |= H2_CF_MUX_MALLOC;
5498 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005499 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005500 return 0;
5501 }
5502
Willy Tarreau80739692018-10-05 11:35:57 +02005503 chunk_reset(&outbuf);
5504
5505 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005506 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5507 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005508 break;
5509 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005510 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau80739692018-10-05 11:35:57 +02005511 }
5512
5513 if (outbuf.size < 9)
5514 goto full;
5515
5516 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5517 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5518 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5519 outbuf.data = 9;
5520
5521 /* encode the method, which necessarily is the first one */
Willy Tarreaubdabc3a2018-12-10 18:25:11 +01005522 if (!hpack_encode_method(&outbuf, sl->info.req.meth, meth)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005523 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005524 goto realign_again;
5525 goto full;
5526 }
5527
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005528 h2_trace_header(ist(":method"), meth, H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__, h2c, h2s);
5529
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005530 auth = ist(NULL);
5531
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005532 /* RFC7540 #8.3: the CONNECT method must have :
5533 * - :authority set to the URI part (host:port)
5534 * - :method set to CONNECT
5535 * - :scheme and :path omitted
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005536 *
5537 * Note that this is not applicable in case of the Extended CONNECT
5538 * protocol from rfc 8441.
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005539 */
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005540 if (unlikely(sl->info.req.meth == HTTP_METH_CONNECT) && !extended_connect) {
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005541 auth = uri;
5542
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005543 if (!h2_encode_header(&outbuf, ist(":authority"), auth, H2_EV_TX_FRAME|H2_EV_TX_HDR,
5544 ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005545 /* output full */
5546 if (b_space_wraps(mbuf))
5547 goto realign_again;
5548 goto full;
5549 }
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005550
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005551 h2s->flags |= H2_SF_BODY_TUNNEL;
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005552 } else {
5553 /* other methods need a :scheme. If an authority is known from
5554 * the request line, it must be sent, otherwise only host is
5555 * sent. Host is never sent as the authority.
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005556 *
5557 * This code is also applicable for Extended CONNECT protocol
5558 * from rfc 8441.
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005559 */
5560 struct ist scheme = { };
Christopher Faulet3b44c542019-06-14 10:46:51 +02005561
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005562 if (uri.ptr[0] != '/' && uri.ptr[0] != '*') {
5563 /* the URI seems to start with a scheme */
5564 int len = 1;
5565
5566 while (len < uri.len && uri.ptr[len] != ':')
5567 len++;
5568
5569 if (len + 2 < uri.len && uri.ptr[len + 1] == '/' && uri.ptr[len + 2] == '/') {
5570 /* make the uri start at the authority now */
Tim Duesterhus9f75ed12021-03-02 18:57:26 +01005571 scheme = ist2(uri.ptr, len);
Tim Duesterhus154374c2021-03-02 18:57:27 +01005572 uri = istadv(uri, len + 3);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005573
5574 /* find the auth part of the URI */
Tim Duesterhus92c696e2021-02-28 16:11:36 +01005575 auth = ist2(uri.ptr, 0);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005576 while (auth.len < uri.len && auth.ptr[auth.len] != '/')
5577 auth.len++;
5578
Tim Duesterhus154374c2021-03-02 18:57:27 +01005579 uri = istadv(uri, auth.len);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005580 }
5581 }
5582
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005583 /* For Extended CONNECT, the :authority must be present.
5584 * Use host value for it.
5585 */
5586 if (unlikely(extended_connect) && isttest(host))
5587 auth = host;
5588
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005589 if (!scheme.len) {
5590 /* no explicit scheme, we're using an origin-form URI,
5591 * probably from an H1 request transcoded to H2 via an
5592 * external layer, then received as H2 without authority.
5593 * So we have to look up the scheme from the HTX flags.
5594 * In such a case only http and https are possible, and
5595 * https is the default (sent by browsers).
5596 */
5597 if ((sl->flags & (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP)) == (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP))
5598 scheme = ist("http");
5599 else
5600 scheme = ist("https");
5601 }
Christopher Faulet3b44c542019-06-14 10:46:51 +02005602
5603 if (!hpack_encode_scheme(&outbuf, scheme)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005604 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005605 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005606 goto realign_again;
5607 goto full;
5608 }
Willy Tarreau80739692018-10-05 11:35:57 +02005609
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005610 if (auth.len &&
5611 !h2_encode_header(&outbuf, ist(":authority"), auth, H2_EV_TX_FRAME|H2_EV_TX_HDR,
5612 ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005613 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005614 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005615 goto realign_again;
5616 goto full;
5617 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005618
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005619 /* encode the path. RFC7540#8.1.2.3: if path is empty it must
5620 * be sent as '/' or '*'.
5621 */
5622 if (unlikely(!uri.len)) {
5623 if (sl->info.req.meth == HTTP_METH_OPTIONS)
5624 uri = ist("*");
5625 else
5626 uri = ist("/");
Willy Tarreau053c1572019-02-01 16:13:59 +01005627 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005628
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005629 if (!hpack_encode_path(&outbuf, uri)) {
5630 /* output full */
5631 if (b_space_wraps(mbuf))
5632 goto realign_again;
5633 goto full;
5634 }
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005635
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005636 h2_trace_header(ist(":path"), uri, H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__, h2c, h2s);
5637
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005638 /* encode the pseudo-header protocol from rfc8441 if using
5639 * Extended CONNECT method.
5640 */
5641 if (unlikely(extended_connect)) {
5642 const struct ist protocol = ist(h2s->upgrade_protocol);
5643 if (isttest(protocol)) {
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005644 if (!h2_encode_header(&outbuf, ist(":protocol"), protocol, H2_EV_TX_FRAME|H2_EV_TX_HDR,
5645 ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005646 /* output full */
5647 if (b_space_wraps(mbuf))
5648 goto realign_again;
5649 goto full;
5650 }
5651 }
5652 }
Willy Tarreau80739692018-10-05 11:35:57 +02005653 }
5654
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005655 /* encode all headers, stop at empty name. Host is only sent if we
5656 * do not provide an authority.
5657 */
Willy Tarreau80739692018-10-05 11:35:57 +02005658 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005659 struct ist n = list[hdr].n;
5660 struct ist v = list[hdr].v;
5661
Willy Tarreau80739692018-10-05 11:35:57 +02005662 /* these ones do not exist in H2 and must be dropped. */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005663 if (isteq(n, ist("connection")) ||
5664 (auth.len && isteq(n, ist("host"))) ||
5665 isteq(n, ist("proxy-connection")) ||
5666 isteq(n, ist("keep-alive")) ||
5667 isteq(n, ist("upgrade")) ||
5668 isteq(n, ist("transfer-encoding")))
Willy Tarreau80739692018-10-05 11:35:57 +02005669 continue;
5670
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005671 if (isteq(n, ist("te"))) {
5672 /* "te" may only be sent with "trailers" if this value
5673 * is present, otherwise it must be deleted.
5674 */
5675 v = istist(v, ist("trailers"));
Tim Duesterhus7b5777d2021-03-02 18:57:28 +01005676 if (!isttest(v) || (v.len > 8 && v.ptr[8] != ','))
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005677 continue;
5678 v = ist("trailers");
5679 }
5680
Christopher Faulet86d144c2019-08-14 16:32:25 +02005681 /* Skip all pseudo-headers */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005682 if (*(n.ptr) == ':')
Christopher Faulet86d144c2019-08-14 16:32:25 +02005683 continue;
5684
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005685 if (isteq(n, ist("")))
Willy Tarreau80739692018-10-05 11:35:57 +02005686 break; // end
5687
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01005688 if (!h2_encode_header(&outbuf, n, v, H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005689 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005690 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005691 goto realign_again;
5692 goto full;
5693 }
5694 }
5695
Willy Tarreaucb985a42019-10-07 16:56:34 +02005696 /* update the frame's size */
5697 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5698
5699 if (outbuf.data > h2c->mfs + 9) {
5700 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5701 /* output full */
5702 if (b_space_wraps(mbuf))
5703 goto realign_again;
5704 goto full;
5705 }
5706 }
5707
Willy Tarreau3a537072021-06-17 08:40:04 +02005708 TRACE_USER("sent H2 request ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5709
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005710 /* remove all header blocks including the EOH and compute the
5711 * corresponding size.
Willy Tarreau80739692018-10-05 11:35:57 +02005712 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005713 ret = 0;
5714 blk = htx_get_head_blk(htx);
5715 while (blk) {
5716 type = htx_get_blk_type(blk);
5717 ret += htx_get_blksz(blk);
5718 blk = htx_remove_blk(htx, blk);
5719 /* The removed block is the EOH */
5720 if (type == HTX_BLK_EOH)
5721 break;
Christopher Fauletd0db4232021-01-22 11:46:30 +01005722 }
Willy Tarreau80739692018-10-05 11:35:57 +02005723
Willy Tarreau95acc8b2022-05-27 16:14:10 +02005724 if (!h2s_sc(h2s) || se_fl_test(h2s->sd, SE_FL_SHW)) {
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005725 /* Request already closed: add END_STREAM */
Willy Tarreau80739692018-10-05 11:35:57 +02005726 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005727 }
5728 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
5729 /* EOM+empty: we may need to add END_STREAM (except for CONNECT
5730 * request)
5731 */
5732 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5733 es_now = 1;
5734 }
Willy Tarreau80739692018-10-05 11:35:57 +02005735
Willy Tarreau80739692018-10-05 11:35:57 +02005736 if (es_now)
5737 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5738
5739 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005740 b_add(mbuf, outbuf.data);
Willy Tarreau80739692018-10-05 11:35:57 +02005741 h2s->flags |= H2_SF_HEADERS_SENT;
5742 h2s->st = H2_SS_OPEN;
5743
Willy Tarreau80739692018-10-05 11:35:57 +02005744 if (es_now) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005745 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02005746 // trim any possibly pending data (eg: inconsistent content-length)
5747 h2s->flags |= H2_SF_ES_SENT;
5748 h2s->st = H2_SS_HLOC;
5749 }
5750
Willy Tarreau80739692018-10-05 11:35:57 +02005751 end:
5752 return ret;
5753 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005754 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5755 goto retry;
Willy Tarreau80739692018-10-05 11:35:57 +02005756 h2c->flags |= H2_CF_MUX_MFULL;
5757 h2s->flags |= H2_SF_BLK_MROOM;
5758 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005759 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005760 goto end;
5761 fail:
5762 /* unparsable HTX messages, too large ones to be produced in the local
5763 * list etc go here (unrecoverable errors).
5764 */
5765 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5766 ret = 0;
5767 goto end;
5768}
5769
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005770/* Try to send a DATA frame matching HTTP response present in HTX structure
Willy Tarreau98de12a2018-12-12 07:03:00 +01005771 * present in <buf>, for stream <h2s>. Returns the number of bytes sent. The
5772 * caller must check the stream's status to detect any error which might have
5773 * happened subsequently to a successful send. Returns the number of data bytes
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005774 * consumed, or zero if nothing done.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005775 */
Christopher Faulet142854b2020-12-02 15:12:40 +01005776static size_t h2s_make_data(struct h2s *h2s, struct buffer *buf, size_t count)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005777{
5778 struct h2c *h2c = h2s->h2c;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005779 struct htx *htx;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005780 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005781 struct buffer *mbuf;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005782 size_t total = 0;
5783 int es_now = 0;
5784 int bsize; /* htx block size */
5785 int fsize; /* h2 frame size */
5786 struct htx_blk *blk;
5787 enum htx_blk_type type;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01005788 int trunc_out; /* non-zero if truncated on out buf */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005789
Willy Tarreau7838a792019-08-12 18:42:03 +02005790 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5791
Willy Tarreau98de12a2018-12-12 07:03:00 +01005792 htx = htx_from_buf(buf);
5793
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005794 /* We only come here with HTX_BLK_DATA blocks */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005795
5796 new_frame:
Willy Tarreauee573762018-12-04 15:25:57 +01005797 if (!count || htx_is_empty(htx))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005798 goto end;
5799
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005800 if ((h2c->flags & H2_CF_IS_BACK) &&
Christopher Fauletf95f8762021-01-22 11:59:07 +01005801 (h2s->flags & (H2_SF_HEADERS_RCVD|H2_SF_BODY_TUNNEL)) == H2_SF_BODY_TUNNEL) {
5802 /* The response HEADERS frame not received yet. Thus the tunnel
5803 * is not fully established yet. In this situation, we block
5804 * data sending.
5805 */
5806 h2s->flags |= H2_SF_BLK_MBUSY;
5807 TRACE_STATE("Request DATA frame blocked waiting for tunnel establishment", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5808 goto end;
5809 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01005810 else if ((h2c->flags & H2_CF_IS_BACK) && (h2s->flags & H2_SF_TUNNEL_ABRT)) {
5811 /* a tunnel attempt was aborted but the is pending raw data to xfer to the server.
5812 * Thus the stream is closed with the CANCEL error. The error will be reported to
5813 * the upper layer as aserver abort. But at this stage there is nothing more we can
5814 * do. We just wait for the end of the response to be sure to not truncate it.
5815 */
5816 if (!(h2s->flags & H2_SF_ES_RCVD)) {
5817 TRACE_STATE("Request DATA frame blocked waiting end of aborted tunnel", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5818 h2s->flags |= H2_SF_BLK_MBUSY;
5819 }
5820 else {
5821 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
5822 h2s_error(h2s, H2_ERR_CANCEL);
5823 }
5824 goto end;
5825 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01005826
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005827 blk = htx_get_head_blk(htx);
5828 type = htx_get_blk_type(blk);
5829 bsize = htx_get_blksz(blk);
5830 fsize = bsize;
5831 trunc_out = 0;
5832 if (type != HTX_BLK_DATA)
5833 goto end;
5834
Willy Tarreau9c218e72019-05-26 10:08:28 +02005835 mbuf = br_tail(h2c->mbuf);
5836 retry:
5837 if (!h2_get_buf(h2c, mbuf)) {
5838 h2c->flags |= H2_CF_MUX_MALLOC;
5839 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005840 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005841 goto end;
5842 }
5843
Willy Tarreau98de12a2018-12-12 07:03:00 +01005844 /* Perform some optimizations to reduce the number of buffer copies.
5845 * First, if the mux's buffer is empty and the htx area contains
5846 * exactly one data block of the same size as the requested count, and
5847 * this count fits within the frame size, the stream's window size, and
5848 * the connection's window size, then it's possible to simply swap the
5849 * caller's buffer with the mux's output buffer and adjust offsets and
5850 * length to match the entire DATA HTX block in the middle. In this
5851 * case we perform a true zero-copy operation from end-to-end. This is
5852 * the situation that happens all the time with large files. Second, if
5853 * this is not possible, but the mux's output buffer is empty, we still
5854 * have an opportunity to avoid the copy to the intermediary buffer, by
5855 * making the intermediary buffer's area point to the output buffer's
5856 * area. In this case we want to skip the HTX header to make sure that
5857 * copies remain aligned and that this operation remains possible all
5858 * the time. This goes for headers, data blocks and any data extracted
5859 * from the HTX blocks.
5860 */
5861 if (unlikely(fsize == count &&
Christopher Faulet192c6a22019-06-11 16:32:24 +02005862 htx_nbblks(htx) == 1 && type == HTX_BLK_DATA &&
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005863 fsize <= h2s_mws(h2s) && fsize <= h2c->mws && fsize <= h2c->mfs)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005864 void *old_area = mbuf->area;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005865
Willy Tarreaubcc45952019-05-26 10:05:50 +02005866 if (b_data(mbuf)) {
Willy Tarreau8ab128c2019-03-21 17:47:28 +01005867 /* Too bad there are data left there. We're willing to memcpy/memmove
5868 * up to 1/4 of the buffer, which means that it's OK to copy a large
5869 * frame into a buffer containing few data if it needs to be realigned,
5870 * and that it's also OK to copy few data without realigning. Otherwise
5871 * we'll pretend the mbuf is full and wait for it to become empty.
Willy Tarreau98de12a2018-12-12 07:03:00 +01005872 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005873 if (fsize + 9 <= b_room(mbuf) &&
5874 (b_data(mbuf) <= b_size(mbuf) / 4 ||
Willy Tarreau7838a792019-08-12 18:42:03 +02005875 (fsize <= b_size(mbuf) / 4 && fsize + 9 <= b_contig_space(mbuf)))) {
5876 TRACE_STATE("small data present in output buffer, appending", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005877 goto copy;
Willy Tarreau7838a792019-08-12 18:42:03 +02005878 }
Willy Tarreau8ab128c2019-03-21 17:47:28 +01005879
Willy Tarreau9c218e72019-05-26 10:08:28 +02005880 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5881 goto retry;
5882
Willy Tarreau98de12a2018-12-12 07:03:00 +01005883 h2c->flags |= H2_CF_MUX_MFULL;
5884 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005885 TRACE_STATE("too large data present in output buffer, waiting for emptiness", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005886 goto end;
5887 }
5888
Christopher Faulet925abdf2021-04-27 22:51:07 +02005889 if (htx->flags & HTX_FL_EOM) {
5890 /* EOM+empty: we may need to add END_STREAM (except for tunneled
5891 * message)
5892 */
5893 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5894 es_now = 1;
5895 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01005896 /* map an H2 frame to the HTX block so that we can put the
5897 * frame header there.
5898 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005899 *mbuf = b_make(buf->area, buf->size, sizeof(struct htx) + blk->addr - 9, fsize + 9);
5900 outbuf.area = b_head(mbuf);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005901
5902 /* prepend an H2 DATA frame header just before the DATA block */
5903 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
5904 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
Christopher Faulet925abdf2021-04-27 22:51:07 +02005905 if (es_now)
5906 outbuf.area[4] |= H2_F_DATA_END_STREAM;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005907 h2_set_frame_size(outbuf.area, fsize);
5908
5909 /* update windows */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005910 h2s->sws -= fsize;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005911 h2c->mws -= fsize;
5912
5913 /* and exchange with our old area */
5914 buf->area = old_area;
5915 buf->data = buf->head = 0;
5916 total += fsize;
Christopher Faulet925abdf2021-04-27 22:51:07 +02005917 fsize = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005918
5919 TRACE_PROTO("sent H2 DATA frame (zero-copy)", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Christopher Faulet925abdf2021-04-27 22:51:07 +02005920 goto out;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005921 }
Willy Tarreau2fb1d4c2018-12-04 15:28:03 +01005922
Willy Tarreau98de12a2018-12-12 07:03:00 +01005923 copy:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005924 /* for DATA and EOM we'll have to emit a frame, even if empty */
5925
5926 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005927 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5928 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005929 break;
5930 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005931 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005932 }
5933
5934 if (outbuf.size < 9) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02005935 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5936 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005937 h2c->flags |= H2_CF_MUX_MFULL;
5938 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005939 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005940 goto end;
5941 }
5942
5943 /* len: 0x000000 (fill later), type: 0(DATA), flags: none=0 */
5944 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
5945 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5946 outbuf.data = 9;
5947
5948 /* we have in <fsize> the exact number of bytes we need to copy from
5949 * the HTX buffer. We need to check this against the connection's and
5950 * the stream's send windows, and to ensure that this fits in the max
5951 * frame size and in the buffer's available space minus 9 bytes (for
5952 * the frame header). The connection's flow control is applied last so
5953 * that we can use a separate list of streams which are immediately
5954 * unblocked on window opening. Note: we don't implement padding.
5955 */
5956
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005957 if (!fsize)
5958 goto send_empty;
5959
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005960 if (h2s_mws(h2s) <= 0) {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005961 h2s->flags |= H2_SF_BLK_SFCTL;
Willy Tarreau2b718102021-04-21 07:32:39 +02005962 if (LIST_INLIST(&h2s->list))
Olivier Houchardbfe2a832019-05-10 14:02:21 +02005963 LIST_DEL_INIT(&h2s->list);
Willy Tarreau2b718102021-04-21 07:32:39 +02005964 LIST_APPEND(&h2c->blocked_list, &h2s->list);
Willy Tarreau7838a792019-08-12 18:42:03 +02005965 TRACE_STATE("stream window <=0, flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005966 goto end;
5967 }
5968
Willy Tarreauee573762018-12-04 15:25:57 +01005969 if (fsize > count)
5970 fsize = count;
5971
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005972 if (fsize > h2s_mws(h2s))
5973 fsize = h2s_mws(h2s); // >0
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005974
5975 if (h2c->mfs && fsize > h2c->mfs)
5976 fsize = h2c->mfs; // >0
5977
5978 if (fsize + 9 > outbuf.size) {
Willy Tarreau455d5682019-05-24 19:42:18 +02005979 /* It doesn't fit at once. If it at least fits once split and
5980 * the amount of data to move is low, let's defragment the
5981 * buffer now.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005982 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005983 if (b_space_wraps(mbuf) &&
5984 (fsize + 9 <= b_room(mbuf)) &&
5985 b_data(mbuf) <= MAX_DATA_REALIGN)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005986 goto realign_again;
5987 fsize = outbuf.size - 9;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01005988 trunc_out = 1;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005989
5990 if (fsize <= 0) {
5991 /* no need to send an empty frame here */
Willy Tarreau9c218e72019-05-26 10:08:28 +02005992 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5993 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005994 h2c->flags |= H2_CF_MUX_MFULL;
5995 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005996 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005997 goto end;
5998 }
5999 }
6000
6001 if (h2c->mws <= 0) {
6002 h2s->flags |= H2_SF_BLK_MFCTL;
Willy Tarreau7838a792019-08-12 18:42:03 +02006003 TRACE_STATE("connection window <=0, stream flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2C_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006004 goto end;
6005 }
6006
6007 if (fsize > h2c->mws)
6008 fsize = h2c->mws;
6009
6010 /* now let's copy this this into the output buffer */
6011 memcpy(outbuf.area + 9, htx_get_blk_ptr(htx, blk), fsize);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006012 h2s->sws -= fsize;
Willy Tarreau0f799ca2018-12-04 15:20:11 +01006013 h2c->mws -= fsize;
Willy Tarreauee573762018-12-04 15:25:57 +01006014 count -= fsize;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006015
6016 send_empty:
6017 /* update the frame's size */
6018 h2_set_frame_size(outbuf.area, fsize);
6019
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006020 /* consume incoming HTX block */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006021 total += fsize;
6022 if (fsize == bsize) {
6023 htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006024 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
6025 /* EOM+empty: we may need to add END_STREAM (except for tunneled
6026 * message)
6027 */
6028 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
6029 es_now = 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02006030 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006031 }
6032 else {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006033 /* we've truncated this block */
6034 htx_cut_data_blk(htx, blk, fsize);
6035 }
6036
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006037 if (es_now)
6038 outbuf.area[4] |= H2_F_DATA_END_STREAM;
6039
6040 /* commit the H2 response */
6041 b_add(mbuf, fsize + 9);
6042
Christopher Faulet925abdf2021-04-27 22:51:07 +02006043 out:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006044 if (es_now) {
6045 if (h2s->st == H2_SS_OPEN)
6046 h2s->st = H2_SS_HLOC;
6047 else
6048 h2s_close(h2s);
6049
6050 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02006051 TRACE_PROTO("ES flag set on outgoing frame", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006052 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006053 else if (fsize) {
6054 if (fsize == bsize) {
6055 TRACE_DEVEL("more data may be available, trying to send another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6056 goto new_frame;
6057 }
6058 else if (trunc_out) {
6059 /* we've truncated this block */
6060 goto new_frame;
6061 }
6062 }
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006063
6064 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02006065 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006066 return total;
6067}
6068
Christopher Faulet991febd2020-12-02 15:17:31 +01006069/* Skip the message payload (DATA blocks) and emit an empty DATA frame with the
6070 * ES flag set for stream <h2s>. This function is called for response known to
6071 * have no payload. Only DATA blocks are skipped. This means the trailers are
Ilya Shipitsinacf84592021-02-06 22:29:08 +05006072 * still emitted. The caller must check the stream's status to detect any error
Christopher Faulet991febd2020-12-02 15:17:31 +01006073 * which might have happened subsequently to a successful send. Returns the
6074 * number of data bytes consumed, or zero if nothing done.
6075 */
6076static size_t h2s_skip_data(struct h2s *h2s, struct buffer *buf, size_t count)
6077{
6078 struct h2c *h2c = h2s->h2c;
6079 struct htx *htx;
6080 int bsize; /* htx block size */
6081 int fsize; /* h2 frame size */
6082 struct htx_blk *blk;
6083 enum htx_blk_type type;
6084 size_t total = 0;
6085
6086 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6087
Christopher Faulet991febd2020-12-02 15:17:31 +01006088 htx = htx_from_buf(buf);
6089
6090 next_data:
6091 if (!count || htx_is_empty(htx))
6092 goto end;
6093 blk = htx_get_head_blk(htx);
6094 type = htx_get_blk_type(blk);
6095 bsize = htx_get_blksz(blk);
6096 fsize = bsize;
6097 if (type != HTX_BLK_DATA)
6098 goto end;
6099
6100 if (fsize > count)
6101 fsize = count;
6102
6103 if (fsize != bsize)
6104 goto skip_data;
6105
6106 if (!(htx->flags & HTX_FL_EOM) || !htx_is_unique_blk(htx, blk))
6107 goto skip_data;
6108
6109 /* Here, it is the last block and it is also the end of the message. So
6110 * we can emit an empty DATA frame with the ES flag set
6111 */
6112 if (h2_send_empty_data_es(h2s) <= 0)
6113 goto end;
6114
6115 if (h2s->st == H2_SS_OPEN)
6116 h2s->st = H2_SS_HLOC;
6117 else
6118 h2s_close(h2s);
6119
6120 skip_data:
6121 /* consume incoming HTX block */
6122 total += fsize;
6123 if (fsize == bsize) {
6124 TRACE_DEVEL("more data may be available, trying to skip another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6125 htx_remove_blk(htx, blk);
6126 goto next_data;
6127 }
6128 else {
6129 /* we've truncated this block */
6130 htx_cut_data_blk(htx, blk, fsize);
6131 }
6132
6133 end:
6134 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6135 return total;
6136}
6137
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006138/* Try to send a HEADERS frame matching HTX_BLK_TLR series of blocks present in
6139 * HTX message <htx> for the H2 stream <h2s>. Returns the number of bytes
6140 * processed. The caller must check the stream's status to detect any error
6141 * which might have happened subsequently to a successful send. The htx blocks
6142 * are automatically removed from the message. The htx message is assumed to be
6143 * valid since produced from the internal code. Processing stops when meeting
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006144 * the EOT, which *is* removed. All trailers are processed at once and sent as a
6145 * single frame. The ES flag is always set.
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006146 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006147static size_t h2s_make_trailers(struct h2s *h2s, struct htx *htx)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006148{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02006149 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006150 struct h2c *h2c = h2s->h2c;
6151 struct htx_blk *blk;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006152 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02006153 struct buffer *mbuf;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006154 enum htx_blk_type type;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006155 int ret = 0;
6156 int hdr;
6157 int idx;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006158
Willy Tarreau7838a792019-08-12 18:42:03 +02006159 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
6160
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006161 /* get trailers. */
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006162 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006163 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006164 type = htx_get_blk_type(blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006165
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006166 if (type == HTX_BLK_UNUSED)
6167 continue;
6168
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006169 if (type == HTX_BLK_EOT)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006170 break;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006171 if (type == HTX_BLK_TLR) {
6172 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
6173 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
6174 goto fail;
6175 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006176
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006177 list[hdr].n = htx_get_blk_name(htx, blk);
6178 list[hdr].v = htx_get_blk_value(htx, blk);
6179 hdr++;
6180 }
6181 else {
6182 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006183 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02006184 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006185 }
6186
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006187 /* marker for end of trailers */
6188 list[hdr].n = ist("");
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006189
Willy Tarreau9c218e72019-05-26 10:08:28 +02006190 mbuf = br_tail(h2c->mbuf);
6191 retry:
6192 if (!h2_get_buf(h2c, mbuf)) {
6193 h2c->flags |= H2_CF_MUX_MALLOC;
6194 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006195 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02006196 goto end;
6197 }
6198
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006199 chunk_reset(&outbuf);
6200
6201 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02006202 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
6203 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006204 break;
6205 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02006206 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006207 }
6208
6209 if (outbuf.size < 9)
6210 goto full;
6211
6212 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4,ES=1 */
6213 memcpy(outbuf.area, "\x00\x00\x00\x01\x05", 5);
6214 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
6215 outbuf.data = 9;
6216
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006217 /* encode all headers */
6218 for (idx = 0; idx < hdr; idx++) {
6219 /* these ones do not exist in H2 or must not appear in
6220 * trailers and must be dropped.
6221 */
6222 if (isteq(list[idx].n, ist("host")) ||
6223 isteq(list[idx].n, ist("content-length")) ||
6224 isteq(list[idx].n, ist("connection")) ||
6225 isteq(list[idx].n, ist("proxy-connection")) ||
6226 isteq(list[idx].n, ist("keep-alive")) ||
6227 isteq(list[idx].n, ist("upgrade")) ||
6228 isteq(list[idx].n, ist("te")) ||
6229 isteq(list[idx].n, ist("transfer-encoding")))
6230 continue;
6231
Christopher Faulet86d144c2019-08-14 16:32:25 +02006232 /* Skip all pseudo-headers */
6233 if (*(list[idx].n.ptr) == ':')
6234 continue;
6235
Willy Tarreau11e8a8c2023-01-24 19:43:11 +01006236 if (!h2_encode_header(&outbuf, list[idx].n, list[idx].v, H2_EV_TX_FRAME|H2_EV_TX_HDR,
6237 ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006238 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006239 if (b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006240 goto realign_again;
6241 goto full;
6242 }
6243 }
6244
Willy Tarreau5121e5d2019-05-06 15:13:41 +02006245 if (outbuf.data == 9) {
6246 /* here we have a problem, we have nothing to emit (either we
6247 * received an empty trailers block followed or we removed its
6248 * contents above). Because of this we can't send a HEADERS
6249 * frame, so we have to cheat and instead send an empty DATA
6250 * frame conveying the ES flag.
Willy Tarreau67b8cae2019-02-21 18:16:35 +01006251 */
6252 outbuf.area[3] = H2_FT_DATA;
6253 outbuf.area[4] = H2_F_DATA_END_STREAM;
6254 }
6255
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006256 /* update the frame's size */
6257 h2_set_frame_size(outbuf.area, outbuf.data - 9);
6258
Willy Tarreau572d9f52019-10-11 16:58:37 +02006259 if (outbuf.data > h2c->mfs + 9) {
6260 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
6261 /* output full */
6262 if (b_space_wraps(mbuf))
6263 goto realign_again;
6264 goto full;
6265 }
6266 }
6267
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006268 /* commit the H2 response */
Willy Tarreau7838a792019-08-12 18:42:03 +02006269 TRACE_PROTO("sent H2 trailers HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006270 b_add(mbuf, outbuf.data);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006271 h2s->flags |= H2_SF_ES_SENT;
6272
6273 if (h2s->st == H2_SS_OPEN)
6274 h2s->st = H2_SS_HLOC;
6275 else
6276 h2s_close(h2s);
6277
6278 /* OK we could properly deliver the response */
6279 done:
Willy Tarreaufb07b3f2019-05-06 11:23:29 +02006280 /* remove all header blocks till the end and compute the corresponding size. */
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006281 ret = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006282 blk = htx_get_head_blk(htx);
6283 while (blk) {
6284 type = htx_get_blk_type(blk);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006285 ret += htx_get_blksz(blk);
6286 blk = htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006287 /* The removed block is the EOT */
6288 if (type == HTX_BLK_EOT)
6289 break;
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006290 }
6291
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006292 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02006293 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006294 return ret;
6295 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02006296 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6297 goto retry;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006298 h2c->flags |= H2_CF_MUX_MFULL;
6299 h2s->flags |= H2_SF_BLK_MROOM;
6300 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006301 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006302 goto end;
6303 fail:
6304 /* unparsable HTX messages, too large ones to be produced in the local
6305 * list etc go here (unrecoverable errors).
6306 */
6307 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
6308 ret = 0;
6309 goto end;
6310}
6311
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006312/* Called from the upper layer, to subscribe <es> to events <event_type>. The
6313 * event subscriber <es> is not allowed to change from a previous call as long
6314 * as at least one event is still subscribed. The <event_type> must only be a
6315 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006316 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006317static int h2_subscribe(struct stconn *sc, int event_type, struct wait_event *es)
Olivier Houchard6ff20392018-07-17 18:46:31 +02006318{
Willy Tarreau36c22322022-05-27 10:41:24 +02006319 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard4cf7fb12018-08-02 19:23:05 +02006320 struct h2c *h2c = h2s->h2c;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006321
Willy Tarreau7838a792019-08-12 18:42:03 +02006322 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006323
6324 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006325 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006326
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006327 es->events |= event_type;
6328 h2s->subs = es;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006329
6330 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006331 TRACE_DEVEL("subscribe(recv)", H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006332
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006333 if (event_type & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006334 TRACE_DEVEL("subscribe(send)", H2_EV_STRM_SEND, h2c->conn, h2s);
Olivier Houchardf8338152019-05-14 17:50:32 +02006335 if (!(h2s->flags & H2_SF_BLK_SFCTL) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02006336 !LIST_INLIST(&h2s->list)) {
Olivier Houchardf8338152019-05-14 17:50:32 +02006337 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02006338 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Olivier Houchardf8338152019-05-14 17:50:32 +02006339 else
Willy Tarreau2b718102021-04-21 07:32:39 +02006340 LIST_APPEND(&h2c->send_list, &h2s->list);
Olivier Houcharde1c6dbc2018-08-01 17:06:43 +02006341 }
Olivier Houchard6ff20392018-07-17 18:46:31 +02006342 }
Willy Tarreau7838a792019-08-12 18:42:03 +02006343 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006344 return 0;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006345}
6346
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006347/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
6348 * The <es> pointer is not allowed to differ from the one passed to the
6349 * subscribe() call. It always returns zero.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006350 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006351static int h2_unsubscribe(struct stconn *sc, int event_type, struct wait_event *es)
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006352{
Willy Tarreau36c22322022-05-27 10:41:24 +02006353 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006354
Willy Tarreau7838a792019-08-12 18:42:03 +02006355 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006356
6357 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006358 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006359
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006360 es->events &= ~event_type;
6361 if (!es->events)
Willy Tarreauf96508a2020-01-10 11:12:48 +01006362 h2s->subs = NULL;
6363
6364 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006365 TRACE_DEVEL("unsubscribe(recv)", H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006366
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006367 if (event_type & SUB_RETRY_SEND) {
Frédéric Lécaille67fda162022-06-30 12:01:54 +02006368 TRACE_DEVEL("unsubscribe(send)", H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006369 h2s->flags &= ~H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006370 if (!(h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)))
6371 LIST_DEL_INIT(&h2s->list);
Olivier Houchardd846c262018-10-19 17:24:29 +02006372 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01006373
Willy Tarreau7838a792019-08-12 18:42:03 +02006374 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006375 return 0;
6376}
6377
6378
Christopher Faulet564e39c2021-09-21 15:50:55 +02006379/* Called from the upper layer, to receive data
6380 *
6381 * The caller is responsible for defragmenting <buf> if necessary. But <flags>
6382 * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
6383 * means the caller wants to flush input data (from the mux buffer and the
6384 * channel buffer) to be able to use kernel splicing or any kind of mux-to-mux
6385 * xfer. If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
6386 * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
6387 * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
6388 * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
6389 * copy as much data as possible.
6390 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006391static size_t h2_rcv_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
Olivier Houchard511efea2018-08-16 15:30:32 +02006392{
Willy Tarreau36c22322022-05-27 10:41:24 +02006393 struct h2s *h2s = __sc_mux_strm(sc);
Willy Tarreau082f5592018-11-25 08:03:32 +01006394 struct h2c *h2c = h2s->h2c;
Willy Tarreau86724e22018-12-01 23:19:43 +01006395 struct htx *h2s_htx = NULL;
6396 struct htx *buf_htx = NULL;
Olivier Houchard511efea2018-08-16 15:30:32 +02006397 size_t ret = 0;
6398
Willy Tarreau7838a792019-08-12 18:42:03 +02006399 TRACE_ENTER(H2_EV_STRM_RECV, h2c->conn, h2s);
6400
Olivier Houchard511efea2018-08-16 15:30:32 +02006401 /* transfer possibly pending data to the upper layer */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006402 h2s_htx = htx_from_buf(&h2s->rxbuf);
Christopher Fauletec361bb2022-02-21 15:12:54 +01006403 if (htx_is_empty(h2s_htx) && !(h2s_htx->flags & HTX_FL_PARSING_ERROR)) {
Christopher Faulet9b79a102019-07-15 11:22:56 +02006404 /* Here htx_to_buf() will set buffer data to 0 because
6405 * the HTX is empty.
6406 */
6407 htx_to_buf(h2s_htx, &h2s->rxbuf);
6408 goto end;
6409 }
Willy Tarreau7196dd62019-03-05 10:51:11 +01006410
Christopher Faulet9b79a102019-07-15 11:22:56 +02006411 ret = h2s_htx->data;
6412 buf_htx = htx_from_buf(buf);
Willy Tarreau7196dd62019-03-05 10:51:11 +01006413
Christopher Faulet9b79a102019-07-15 11:22:56 +02006414 /* <buf> is empty and the message is small enough, swap the
6415 * buffers. */
6416 if (htx_is_empty(buf_htx) && htx_used_space(h2s_htx) <= count) {
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01006417 htx_to_buf(buf_htx, buf);
6418 htx_to_buf(h2s_htx, &h2s->rxbuf);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006419 b_xfer(buf, &h2s->rxbuf, b_data(&h2s->rxbuf));
6420 goto end;
Willy Tarreau86724e22018-12-01 23:19:43 +01006421 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02006422
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006423 htx_xfer_blks(buf_htx, h2s_htx, count, HTX_BLK_UNUSED);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006424
6425 if (h2s_htx->flags & HTX_FL_PARSING_ERROR) {
6426 buf_htx->flags |= HTX_FL_PARSING_ERROR;
6427 if (htx_is_empty(buf_htx))
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006428 se_fl_set(h2s->sd, SE_FL_EOI);
Willy Tarreau86724e22018-12-01 23:19:43 +01006429 }
Christopher Faulet72722c02023-02-23 14:26:34 +01006430 else if (htx_is_empty(h2s_htx)) {
Christopher Faulet42432f32020-11-20 17:43:16 +01006431 buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM);
Olivier Houchard511efea2018-08-16 15:30:32 +02006432
Christopher Faulet72722c02023-02-23 14:26:34 +01006433 if (!(h2c->flags & H2_CF_IS_BACK) && (buf_htx->flags & HTX_FL_EOM)) {
6434 /* If request EOM is reported to the upper layer, it means the
6435 * H2S now expects data from the opposite side.
6436 */
6437 se_expect_data(h2s->sd);
6438 }
6439 }
6440
Christopher Faulet9b79a102019-07-15 11:22:56 +02006441 buf_htx->extra = (h2s_htx->extra ? (h2s_htx->data + h2s_htx->extra) : 0);
6442 htx_to_buf(buf_htx, buf);
6443 htx_to_buf(h2s_htx, &h2s->rxbuf);
6444 ret -= h2s_htx->data;
6445
Christopher Faulet37070b22019-02-14 15:12:14 +01006446 end:
Olivier Houchard638b7992018-08-16 15:41:52 +02006447 if (b_data(&h2s->rxbuf))
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006448 se_fl_set(h2s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
Olivier Houchard511efea2018-08-16 15:30:32 +02006449 else {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006450 se_fl_clr(h2s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
Christopher Fauletd0db4232021-01-22 11:46:30 +01006451 if (h2s->flags & H2_SF_ES_RCVD) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006452 se_fl_set(h2s->sd, SE_FL_EOI);
Christopher Fauletd0db4232021-01-22 11:46:30 +01006453 /* Add EOS flag for tunnel */
6454 if (h2s->flags & H2_SF_BODY_TUNNEL)
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006455 se_fl_set(h2s->sd, SE_FL_EOS);
Christopher Fauletd0db4232021-01-22 11:46:30 +01006456 }
Christopher Fauletaade4ed2020-10-08 15:38:41 +02006457 if (h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED)
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006458 se_fl_set(h2s->sd, SE_FL_EOS);
6459 if (se_fl_test(h2s->sd, SE_FL_ERR_PENDING))
6460 se_fl_set(h2s->sd, SE_FL_ERROR);
Olivier Houchard638b7992018-08-16 15:41:52 +02006461 if (b_size(&h2s->rxbuf)) {
6462 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01006463 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02006464 }
Olivier Houchard511efea2018-08-16 15:30:32 +02006465 }
6466
Willy Tarreau082f5592018-11-25 08:03:32 +01006467 if (ret && h2c->dsi == h2s->id) {
6468 /* demux is blocking on this stream's buffer */
6469 h2c->flags &= ~H2_CF_DEM_SFULL;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02006470 h2c_restart_reading(h2c, 1);
Willy Tarreau082f5592018-11-25 08:03:32 +01006471 }
Christopher Faulet37070b22019-02-14 15:12:14 +01006472
Willy Tarreau7838a792019-08-12 18:42:03 +02006473 TRACE_LEAVE(H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard511efea2018-08-16 15:30:32 +02006474 return ret;
6475}
6476
Olivier Houchardd846c262018-10-19 17:24:29 +02006477
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006478/* Called from the upper layer, to send data from buffer <buf> for no more than
6479 * <count> bytes. Returns the number of bytes effectively sent. Some status
Willy Tarreau4596fe22022-05-17 19:07:51 +02006480 * flags may be updated on the stream connector.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006481 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006482static size_t h2_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
Willy Tarreau62f52692017-10-08 23:01:42 +02006483{
Willy Tarreau36c22322022-05-27 10:41:24 +02006484 struct h2s *h2s = __sc_mux_strm(sc);
Willy Tarreau1dc41e72018-06-14 13:21:28 +02006485 size_t total = 0;
Willy Tarreau5dd17352018-06-14 13:33:30 +02006486 size_t ret;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006487 struct htx *htx;
6488 struct htx_blk *blk;
6489 enum htx_blk_type btype;
6490 uint32_t bsize;
6491 int32_t idx;
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006492
Willy Tarreau7838a792019-08-12 18:42:03 +02006493 TRACE_ENTER(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
6494
Olivier Houchardd360ac62019-03-22 17:37:16 +01006495 /* If we were not just woken because we wanted to send but couldn't,
6496 * and there's somebody else that is waiting to send, do nothing,
6497 * we will subscribe later and be put at the end of the list
6498 */
Willy Tarreaud9464162020-01-10 18:25:07 +01006499 if (!(h2s->flags & H2_SF_NOTIFIED) &&
Willy Tarreau7838a792019-08-12 18:42:03 +02006500 (!LIST_ISEMPTY(&h2s->h2c->send_list) || !LIST_ISEMPTY(&h2s->h2c->fctl_list))) {
6501 TRACE_DEVEL("other streams already waiting, going to the queue and leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
Olivier Houchardd360ac62019-03-22 17:37:16 +01006502 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006503 }
Willy Tarreaud9464162020-01-10 18:25:07 +01006504 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02006505
Willy Tarreau7838a792019-08-12 18:42:03 +02006506 if (h2s->h2c->st0 < H2_CS_FRAME_H) {
6507 TRACE_DEVEL("connection not ready, leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006508 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006509 }
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006510
Willy Tarreaucab22952019-10-31 15:48:18 +01006511 if (h2s->h2c->st0 >= H2_CS_ERROR) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006512 se_fl_set(h2s->sd, SE_FL_ERROR);
Willy Tarreaucab22952019-10-31 15:48:18 +01006513 TRACE_DEVEL("connection is in error, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
6514 return 0;
6515 }
6516
Christopher Faulet9b79a102019-07-15 11:22:56 +02006517 htx = htx_from_buf(buf);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006518
Willy Tarreau0bad0432018-06-14 16:54:01 +02006519 if (!(h2s->flags & H2_SF_OUTGOING_DATA) && count)
Willy Tarreauc4312d32017-11-07 12:01:53 +01006520 h2s->flags |= H2_SF_OUTGOING_DATA;
6521
Christopher Faulet2e47e3a2023-01-13 11:40:24 +01006522 if (htx->extra && htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
Willy Tarreau48770452022-08-18 16:03:51 +02006523 h2s->flags |= H2_SF_MORE_HTX_DATA;
6524 else
6525 h2s->flags &= ~H2_SF_MORE_HTX_DATA;
6526
Willy Tarreau751f2d02018-10-05 09:35:00 +02006527 if (h2s->id == 0) {
6528 int32_t id = h2c_get_next_sid(h2s->h2c);
6529
6530 if (id < 0) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006531 se_fl_set(h2s->sd, SE_FL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02006532 TRACE_DEVEL("couldn't get a stream ID, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02006533 return 0;
6534 }
6535
6536 eb32_delete(&h2s->by_id);
6537 h2s->by_id.key = h2s->id = id;
6538 h2s->h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01006539 h2s->h2c->nb_reserved--;
Willy Tarreau751f2d02018-10-05 09:35:00 +02006540 eb32_insert(&h2s->h2c->streams_by_id, &h2s->by_id);
6541 }
6542
Christopher Faulet9b79a102019-07-15 11:22:56 +02006543 while (h2s->st < H2_SS_HLOC && !(h2s->flags & H2_SF_BLK_ANY) &&
6544 count && !htx_is_empty(htx)) {
6545 idx = htx_get_head(htx);
6546 blk = htx_get_blk(htx, idx);
6547 btype = htx_get_blk_type(blk);
6548 bsize = htx_get_blksz(blk);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006549
Christopher Faulet9b79a102019-07-15 11:22:56 +02006550 switch (btype) {
Willy Tarreau80739692018-10-05 11:35:57 +02006551 case HTX_BLK_REQ_SL:
6552 /* start-line before headers */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01006553 ret = h2s_snd_bhdrs(h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02006554 if (ret > 0) {
6555 total += ret;
6556 count -= ret;
6557 if (ret < bsize)
6558 goto done;
6559 }
6560 break;
6561
Willy Tarreau115e83b2018-12-01 19:17:53 +01006562 case HTX_BLK_RES_SL:
6563 /* start-line before headers */
Willy Tarreau7cfbb812023-01-26 16:02:01 +01006564 ret = h2s_snd_fhdrs(h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01006565 if (ret > 0) {
6566 total += ret;
6567 count -= ret;
6568 if (ret < bsize)
6569 goto done;
6570 }
6571 break;
6572
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006573 case HTX_BLK_DATA:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006574 /* all these cause the emission of a DATA frame (possibly empty) */
Christopher Faulet991febd2020-12-02 15:17:31 +01006575 if (!(h2s->h2c->flags & H2_CF_IS_BACK) &&
6576 (h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BODYLESS_RESP)) == H2_SF_BODYLESS_RESP)
6577 ret = h2s_skip_data(h2s, buf, count);
6578 else
6579 ret = h2s_make_data(h2s, buf, count);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006580 if (ret > 0) {
Willy Tarreau98de12a2018-12-12 07:03:00 +01006581 htx = htx_from_buf(buf);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006582 total += ret;
6583 count -= ret;
6584 if (ret < bsize)
6585 goto done;
6586 }
6587 break;
6588
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006589 case HTX_BLK_TLR:
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006590 case HTX_BLK_EOT:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006591 /* This is the first trailers block, all the subsequent ones */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006592 ret = h2s_make_trailers(h2s, htx);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006593 if (ret > 0) {
6594 total += ret;
6595 count -= ret;
6596 if (ret < bsize)
6597 goto done;
6598 }
6599 break;
6600
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006601 default:
6602 htx_remove_blk(htx, blk);
6603 total += bsize;
6604 count -= bsize;
6605 break;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006606 }
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006607 }
6608
Christopher Faulet9b79a102019-07-15 11:22:56 +02006609 done:
Willy Tarreau2b778482019-05-06 15:00:22 +02006610 if (h2s->st >= H2_SS_HLOC) {
Willy Tarreau00610962018-07-19 10:58:28 +02006611 /* trim any possibly pending data after we close (extra CR-LF,
6612 * unprocessed trailers, abnormal extra data, ...)
6613 */
Willy Tarreau0bad0432018-06-14 16:54:01 +02006614 total += count;
6615 count = 0;
Willy Tarreau00610962018-07-19 10:58:28 +02006616 }
6617
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006618 /* RST are sent similarly to frame acks */
Willy Tarreau02492192017-12-07 15:59:29 +01006619 if (h2s->st == H2_SS_ERROR || h2s->flags & H2_SF_RST_RCVD) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006620 TRACE_DEVEL("reporting RST/error to the app-layer stream", H2_EV_H2S_SEND|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006621 se_fl_set_error(h2s->sd);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01006622 if (h2s_send_rst_stream(h2s->h2c, h2s) > 0)
Willy Tarreau00dd0782018-03-01 16:31:34 +01006623 h2s_close(h2s);
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006624 }
6625
Christopher Faulet9b79a102019-07-15 11:22:56 +02006626 htx_to_buf(htx, buf);
Olivier Houchardd846c262018-10-19 17:24:29 +02006627
Olivier Houchard7505f942018-08-21 18:10:44 +02006628 if (total > 0) {
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006629 if (!(h2s->h2c->wait_event.events & SUB_RETRY_SEND)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006630 TRACE_DEVEL("data queued, waking up h2c sender", H2_EV_H2S_SEND|H2_EV_H2C_SEND, h2s->h2c->conn, h2s);
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02006631 tasklet_wakeup(h2s->h2c->wait_event.tasklet);
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006632 }
Olivier Houchardd846c262018-10-19 17:24:29 +02006633
Olivier Houchard7505f942018-08-21 18:10:44 +02006634 }
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006635 /* If we're waiting for flow control, and we got a shutr on the
6636 * connection, we will never be unlocked, so add an error on
Willy Tarreau4596fe22022-05-17 19:07:51 +02006637 * the stream connector.
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006638 */
Christopher Fauletff7925d2022-10-11 19:12:40 +02006639 if ((h2s->h2c->flags & H2_CF_RCVD_SHUT) &&
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006640 !b_data(&h2s->h2c->dbuf) &&
6641 (h2s->flags & (H2_SF_BLK_SFCTL | H2_SF_BLK_MFCTL))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006642 TRACE_DEVEL("fctl with shutr, reporting error to app-layer", H2_EV_H2S_SEND|H2_EV_STRM_SEND|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau35c4dd02023-01-17 16:25:29 +01006643 se_fl_set_error(h2s->sd);
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006644 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006645
Willy Tarreau5723f292020-01-10 15:16:57 +01006646 if (total > 0 && !(h2s->flags & H2_SF_BLK_SFCTL) &&
6647 !(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006648 /* Ok we managed to send something, leave the send_list if we were still there */
Olivier Houchardd360ac62019-03-22 17:37:16 +01006649 LIST_DEL_INIT(&h2s->list);
6650 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006651
Willy Tarreau7838a792019-08-12 18:42:03 +02006652 TRACE_LEAVE(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006653 return total;
Willy Tarreau62f52692017-10-08 23:01:42 +02006654}
6655
Willy Tarreau90bffa22022-09-01 19:06:44 +02006656/* appends some info about stream <h2s> to buffer <msg>, or does nothing if
Willy Tarreau7051f732022-09-02 15:22:12 +02006657 * <h2s> is NULL. Returns non-zero if the stream is considered suspicious. May
6658 * emit multiple lines, each new one being prefixed with <pfx>, if <pfx> is not
6659 * NULL, otherwise a single line is used.
Willy Tarreau90bffa22022-09-01 19:06:44 +02006660 */
Willy Tarreau7051f732022-09-02 15:22:12 +02006661static int h2_dump_h2s_info(struct buffer *msg, const struct h2s *h2s, const char *pfx)
Willy Tarreau90bffa22022-09-01 19:06:44 +02006662{
6663 int ret = 0;
6664
6665 if (!h2s)
6666 return ret;
6667
Willy Tarreau7051f732022-09-02 15:22:12 +02006668 chunk_appendf(msg, " h2s.id=%d .st=%s .flg=0x%04x .rxbuf=%u@%p+%u/%u",
Willy Tarreau90bffa22022-09-01 19:06:44 +02006669 h2s->id, h2s_st_to_str(h2s->st), h2s->flags,
6670 (unsigned int)b_data(&h2s->rxbuf), b_orig(&h2s->rxbuf),
Willy Tarreau7051f732022-09-02 15:22:12 +02006671 (unsigned int)b_head_ofs(&h2s->rxbuf), (unsigned int)b_size(&h2s->rxbuf));
6672
6673 if (pfx)
6674 chunk_appendf(msg, "\n%s", pfx);
6675
6676 chunk_appendf(msg, " .sc=%p", h2s_sc(h2s));
Willy Tarreau90bffa22022-09-01 19:06:44 +02006677 if (h2s_sc(h2s))
6678 chunk_appendf(msg, "(.flg=0x%08x .app=%p)",
6679 h2s_sc(h2s)->flags, h2s_sc(h2s)->app);
6680
Willy Tarreau7051f732022-09-02 15:22:12 +02006681 chunk_appendf(msg, " .sd=%p", h2s->sd);
Willy Tarreau90bffa22022-09-01 19:06:44 +02006682 chunk_appendf(msg, "(.flg=0x%08x)", se_fl_get(h2s->sd));
6683
Willy Tarreau7051f732022-09-02 15:22:12 +02006684 if (pfx)
6685 chunk_appendf(msg, "\n%s", pfx);
6686
Willy Tarreau90bffa22022-09-01 19:06:44 +02006687 chunk_appendf(msg, " .subs=%p", h2s->subs);
6688 if (h2s->subs) {
6689 chunk_appendf(msg, "(ev=%d tl=%p", h2s->subs->events, h2s->subs->tasklet);
6690 chunk_appendf(msg, " tl.calls=%d tl.ctx=%p tl.fct=",
6691 h2s->subs->tasklet->calls,
6692 h2s->subs->tasklet->context);
6693 if (h2s->subs->tasklet->calls >= 1000000)
6694 ret = 1;
6695 resolve_sym_name(msg, NULL, h2s->subs->tasklet->process);
6696 chunk_appendf(msg, ")");
6697 }
6698 return ret;
6699}
6700
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006701/* appends some info about connection <h2c> to buffer <msg>, or does nothing if
6702 * <h2c> is NULL. Returns non-zero if the connection is considered suspicious.
Willy Tarreau7051f732022-09-02 15:22:12 +02006703 * May emit multiple lines, each new one being prefixed with <pfx>, if <pfx> is
6704 * not NULL, otherwise a single line is used.
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006705 */
Willy Tarreau7051f732022-09-02 15:22:12 +02006706static int h2_dump_h2c_info(struct buffer *msg, struct h2c *h2c, const char *pfx)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006707{
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006708 const struct buffer *hmbuf, *tmbuf;
6709 const struct h2s *h2s = NULL;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006710 struct eb32_node *node;
6711 int fctl_cnt = 0;
6712 int send_cnt = 0;
6713 int tree_cnt = 0;
6714 int orph_cnt = 0;
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006715 int ret = 0;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006716
6717 if (!h2c)
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006718 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006719
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006720 list_for_each_entry(h2s, &h2c->fctl_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006721 fctl_cnt++;
6722
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006723 list_for_each_entry(h2s, &h2c->send_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006724 send_cnt++;
6725
6726 node = eb32_first(&h2c->streams_by_id);
6727 while (node) {
6728 h2s = container_of(node, struct h2s, by_id);
6729 tree_cnt++;
Willy Tarreau7be4ee02022-05-18 07:31:41 +02006730 if (!h2s_sc(h2s))
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006731 orph_cnt++;
6732 node = eb32_next(node);
6733 }
6734
Willy Tarreau60f62682019-05-26 11:32:27 +02006735 hmbuf = br_head(h2c->mbuf);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006736 tmbuf = br_tail(h2c->mbuf);
Willy Tarreauab2ec452019-08-30 07:07:08 +02006737 chunk_appendf(msg, " h2c.st0=%s .err=%d .maxid=%d .lastid=%d .flg=0x%04x"
Willy Tarreau7051f732022-09-02 15:22:12 +02006738 " .nbst=%u .nbsc=%u",
Willy Tarreauab2ec452019-08-30 07:07:08 +02006739 h2c_st_to_str(h2c->st0), h2c->errcode, h2c->max_id, h2c->last_sid, h2c->flags,
Willy Tarreau7051f732022-09-02 15:22:12 +02006740 h2c->nb_streams, h2c->nb_sc);
6741
6742 if (pfx)
6743 chunk_appendf(msg, "\n%s", pfx);
6744
6745 chunk_appendf(msg, " .fctl_cnt=%d .send_cnt=%d .tree_cnt=%d"
6746 " .orph_cnt=%d .sub=%d .dsi=%d .dbuf=%u@%p+%u/%u",
6747 fctl_cnt, send_cnt, tree_cnt, orph_cnt,
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006748 h2c->wait_event.events, h2c->dsi,
Willy Tarreau987c0632018-12-18 10:32:05 +01006749 (unsigned int)b_data(&h2c->dbuf), b_orig(&h2c->dbuf),
Willy Tarreau7051f732022-09-02 15:22:12 +02006750 (unsigned int)b_head_ofs(&h2c->dbuf), (unsigned int)b_size(&h2c->dbuf));
6751
6752 if (pfx)
6753 chunk_appendf(msg, "\n%s", pfx);
6754
Christopher Faulet68ee7842022-10-12 10:21:33 +02006755 chunk_appendf(msg, " .mbuf=[%u..%u|%u],h=[%u@%p+%u/%u],t=[%u@%p+%u/%u]",
Willy Tarreau60f62682019-05-26 11:32:27 +02006756 br_head_idx(h2c->mbuf), br_tail_idx(h2c->mbuf), br_size(h2c->mbuf),
6757 (unsigned int)b_data(hmbuf), b_orig(hmbuf),
6758 (unsigned int)b_head_ofs(hmbuf), (unsigned int)b_size(hmbuf),
Willy Tarreaubcc45952019-05-26 10:05:50 +02006759 (unsigned int)b_data(tmbuf), b_orig(tmbuf),
6760 (unsigned int)b_head_ofs(tmbuf), (unsigned int)b_size(tmbuf));
Willy Tarreau987c0632018-12-18 10:32:05 +01006761
Willy Tarreauf8c77092022-11-29 15:26:43 +01006762 chunk_appendf(msg, " .task=%p", h2c->task);
6763 if (h2c->task) {
6764 chunk_appendf(msg, " .exp=%s",
6765 h2c->task->expire ? tick_is_expired(h2c->task->expire, now_ms) ? "<PAST>" :
6766 human_time(TICKS_TO_MS(h2c->task->expire - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
6767 }
Willy Tarreau7051f732022-09-02 15:22:12 +02006768
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006769 return ret;
6770}
6771
6772/* for debugging with CLI's "show fd" command */
6773static int h2_show_fd(struct buffer *msg, struct connection *conn)
6774{
6775 struct h2c *h2c = conn->ctx;
6776 const struct h2s *h2s;
6777 struct eb32_node *node;
6778 int ret = 0;
6779
6780 if (!h2c)
6781 return ret;
6782
Willy Tarreau7051f732022-09-02 15:22:12 +02006783 ret |= h2_dump_h2c_info(msg, h2c, NULL);
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006784
6785 node = eb32_last(&h2c->streams_by_id);
6786 if (node) {
6787 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau90bffa22022-09-01 19:06:44 +02006788 chunk_appendf(msg, " last_h2s=%p", h2s);
Willy Tarreau7051f732022-09-02 15:22:12 +02006789 ret |= h2_dump_h2s_info(msg, h2s, NULL);
Willy Tarreau987c0632018-12-18 10:32:05 +01006790 }
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006791
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006792 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006793}
Willy Tarreau62f52692017-10-08 23:01:42 +02006794
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02006795/* for debugging with CLI's "show sess" command. May emit multiple lines, each
6796 * new one being prefixed with <pfx>, if <pfx> is not NULL, otherwise a single
6797 * line is used. Each field starts with a space so it's safe to print it after
6798 * existing fields.
6799 */
6800static int h2_show_sd(struct buffer *msg, struct sedesc *sd, const char *pfx)
6801{
6802 struct h2s *h2s = sd->se;
6803 int ret = 0;
6804
6805 if (!h2s)
6806 return ret;
6807
6808 chunk_appendf(msg, " h2s=%p", h2s);
Willy Tarreau7051f732022-09-02 15:22:12 +02006809 ret |= h2_dump_h2s_info(msg, h2s, pfx);
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02006810 if (pfx)
6811 chunk_appendf(msg, "\n%s", pfx);
6812 chunk_appendf(msg, " h2c=%p", h2s->h2c);
Willy Tarreau7051f732022-09-02 15:22:12 +02006813 ret |= h2_dump_h2c_info(msg, h2s->h2c, pfx);
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02006814 return ret;
6815}
6816
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006817/* Migrate the the connection to the current thread.
6818 * Return 0 if successful, non-zero otherwise.
6819 * Expected to be called with the old thread lock held.
6820 */
Olivier Houchard1662cdb2020-07-03 14:04:37 +02006821static int h2_takeover(struct connection *conn, int orig_tid)
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006822{
6823 struct h2c *h2c = conn->ctx;
Willy Tarreau617e80f2020-07-01 16:39:33 +02006824 struct task *task;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006825
6826 if (fd_takeover(conn->handle.fd, conn) != 0)
6827 return -1;
Olivier Houcharda74bb7e2020-07-03 14:01:21 +02006828
6829 if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
6830 /* We failed to takeover the xprt, even if the connection may
6831 * still be valid, flag it as error'd, as we have already
6832 * taken over the fd, and wake the tasklet, so that it will
6833 * destroy it.
6834 */
6835 conn->flags |= CO_FL_ERROR;
6836 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
6837 return -1;
6838 }
6839
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006840 if (h2c->wait_event.events)
6841 h2c->conn->xprt->unsubscribe(h2c->conn, h2c->conn->xprt_ctx,
6842 h2c->wait_event.events, &h2c->wait_event);
6843 /* To let the tasklet know it should free itself, and do nothing else,
6844 * set its context to NULL.
6845 */
6846 h2c->wait_event.tasklet->context = NULL;
Olivier Houchard1662cdb2020-07-03 14:04:37 +02006847 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
Willy Tarreau617e80f2020-07-01 16:39:33 +02006848
6849 task = h2c->task;
6850 if (task) {
6851 task->context = NULL;
6852 h2c->task = NULL;
6853 __ha_barrier_store();
6854 task_kill(task);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006855
Willy Tarreaubeeabf52021-10-01 18:23:30 +02006856 h2c->task = task_new_here();
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006857 if (!h2c->task) {
6858 h2_release(h2c);
6859 return -1;
6860 }
6861 h2c->task->process = h2_timeout_task;
6862 h2c->task->context = h2c;
6863 }
6864 h2c->wait_event.tasklet = tasklet_new();
6865 if (!h2c->wait_event.tasklet) {
6866 h2_release(h2c);
6867 return -1;
6868 }
6869 h2c->wait_event.tasklet->process = h2_io_cb;
6870 h2c->wait_event.tasklet->context = h2c;
6871 h2c->conn->xprt->subscribe(h2c->conn, h2c->conn->xprt_ctx,
6872 SUB_RETRY_RECV, &h2c->wait_event);
6873
6874 return 0;
6875}
6876
Willy Tarreau62f52692017-10-08 23:01:42 +02006877/*******************************************************/
6878/* functions below are dedicated to the config parsers */
6879/*******************************************************/
6880
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006881/* config parser for global "tune.h2.header-table-size" */
6882static int h2_parse_header_table_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006883 const struct proxy *defpx, const char *file, int line,
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006884 char **err)
6885{
6886 if (too_many_args(1, args, err, NULL))
6887 return -1;
6888
6889 h2_settings_header_table_size = atoi(args[1]);
6890 if (h2_settings_header_table_size < 4096 || h2_settings_header_table_size > 65536) {
6891 memprintf(err, "'%s' expects a numeric value between 4096 and 65536.", args[0]);
6892 return -1;
6893 }
6894 return 0;
6895}
Willy Tarreau62f52692017-10-08 23:01:42 +02006896
Willy Tarreaue6baec02017-07-27 11:45:11 +02006897/* config parser for global "tune.h2.initial-window-size" */
6898static int h2_parse_initial_window_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006899 const struct proxy *defpx, const char *file, int line,
Willy Tarreaue6baec02017-07-27 11:45:11 +02006900 char **err)
6901{
6902 if (too_many_args(1, args, err, NULL))
6903 return -1;
6904
6905 h2_settings_initial_window_size = atoi(args[1]);
6906 if (h2_settings_initial_window_size < 0) {
6907 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
6908 return -1;
6909 }
6910 return 0;
6911}
6912
Willy Tarreau5242ef82017-07-27 11:47:28 +02006913/* config parser for global "tune.h2.max-concurrent-streams" */
6914static int h2_parse_max_concurrent_streams(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006915 const struct proxy *defpx, const char *file, int line,
Willy Tarreau5242ef82017-07-27 11:47:28 +02006916 char **err)
6917{
6918 if (too_many_args(1, args, err, NULL))
6919 return -1;
6920
6921 h2_settings_max_concurrent_streams = atoi(args[1]);
Willy Tarreau5a490b62019-01-31 10:39:51 +01006922 if ((int)h2_settings_max_concurrent_streams < 0) {
Willy Tarreau5242ef82017-07-27 11:47:28 +02006923 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
6924 return -1;
6925 }
6926 return 0;
6927}
6928
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006929/* config parser for global "tune.h2.max-frame-size" */
6930static int h2_parse_max_frame_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006931 const struct proxy *defpx, const char *file, int line,
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006932 char **err)
6933{
6934 if (too_many_args(1, args, err, NULL))
6935 return -1;
6936
6937 h2_settings_max_frame_size = atoi(args[1]);
6938 if (h2_settings_max_frame_size < 16384 || h2_settings_max_frame_size > 16777215) {
6939 memprintf(err, "'%s' expects a numeric value between 16384 and 16777215.", args[0]);
6940 return -1;
6941 }
6942 return 0;
6943}
6944
Willy Tarreau62f52692017-10-08 23:01:42 +02006945
6946/****************************************/
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05006947/* MUX initialization and instantiation */
Willy Tarreau62f52692017-10-08 23:01:42 +02006948/***************************************/
6949
6950/* The mux operations */
Willy Tarreau680b2bd2018-11-27 07:30:17 +01006951static const struct mux_ops h2_ops = {
Willy Tarreau62f52692017-10-08 23:01:42 +02006952 .init = h2_init,
Olivier Houchard21df6cc2018-09-14 23:21:44 +02006953 .wake = h2_wake,
Willy Tarreau62f52692017-10-08 23:01:42 +02006954 .snd_buf = h2_snd_buf,
Olivier Houchard511efea2018-08-16 15:30:32 +02006955 .rcv_buf = h2_rcv_buf,
Olivier Houchard6ff20392018-07-17 18:46:31 +02006956 .subscribe = h2_subscribe,
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006957 .unsubscribe = h2_unsubscribe,
Willy Tarreau62f52692017-10-08 23:01:42 +02006958 .attach = h2_attach,
Willy Tarreaud1373532022-05-27 11:00:59 +02006959 .get_first_sc = h2_get_first_sc,
Willy Tarreau62f52692017-10-08 23:01:42 +02006960 .detach = h2_detach,
Olivier Houchard060ed432018-11-06 16:32:42 +01006961 .destroy = h2_destroy,
Olivier Houchardd540b362018-11-05 18:37:53 +01006962 .avail_streams = h2_avail_streams,
Willy Tarreau00f18a32019-01-26 12:19:01 +01006963 .used_streams = h2_used_streams,
Willy Tarreau62f52692017-10-08 23:01:42 +02006964 .shutr = h2_shutr,
6965 .shutw = h2_shutw,
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02006966 .ctl = h2_ctl,
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006967 .show_fd = h2_show_fd,
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02006968 .show_sd = h2_show_sd,
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006969 .takeover = h2_takeover,
Christopher Fauleta97cced2022-04-12 18:04:10 +02006970 .flags = MX_FL_HTX|MX_FL_HOL_RISK|MX_FL_NO_UPG,
Willy Tarreau62f52692017-10-08 23:01:42 +02006971 .name = "H2",
6972};
6973
Christopher Faulet32f61c02018-04-10 14:33:41 +02006974static struct mux_proto_list mux_proto_h2 =
Christopher Fauletc985f6c2019-07-15 11:42:52 +02006975 { .token = IST("h2"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BOTH, .mux = &h2_ops };
Willy Tarreau62f52692017-10-08 23:01:42 +02006976
Willy Tarreau0108d902018-11-25 19:14:37 +01006977INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_h2);
6978
Willy Tarreau62f52692017-10-08 23:01:42 +02006979/* config keyword parsers */
6980static struct cfg_kw_list cfg_kws = {ILH, {
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006981 { CFG_GLOBAL, "tune.h2.header-table-size", h2_parse_header_table_size },
Willy Tarreaue6baec02017-07-27 11:45:11 +02006982 { CFG_GLOBAL, "tune.h2.initial-window-size", h2_parse_initial_window_size },
Willy Tarreau5242ef82017-07-27 11:47:28 +02006983 { CFG_GLOBAL, "tune.h2.max-concurrent-streams", h2_parse_max_concurrent_streams },
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006984 { CFG_GLOBAL, "tune.h2.max-frame-size", h2_parse_max_frame_size },
Willy Tarreau62f52692017-10-08 23:01:42 +02006985 { 0, NULL, NULL }
6986}};
6987
Willy Tarreau0108d902018-11-25 19:14:37 +01006988INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
Willy Tarreau2bdcc702020-05-19 11:31:11 +02006989
6990/* initialize internal structs after the config is parsed.
6991 * Returns zero on success, non-zero on error.
6992 */
6993static int init_h2()
6994{
6995 pool_head_hpack_tbl = create_pool("hpack_tbl",
6996 h2_settings_header_table_size,
6997 MEM_F_SHARED|MEM_F_EXACT);
Christopher Faulet52140992020-11-06 15:23:39 +01006998 if (!pool_head_hpack_tbl) {
6999 ha_alert("failed to allocate hpack_tbl memory pool\n");
7000 return (ERR_ALERT | ERR_FATAL);
7001 }
7002 return ERR_NONE;
Willy Tarreau2bdcc702020-05-19 11:31:11 +02007003}
7004
7005REGISTER_POST_CHECK(init_h2);