blob: 16ed74305b765a531fe53afb32afefdbce85e944 [file] [log] [blame]
Willy Tarreau62f52692017-10-08 23:01:42 +02001/*
2 * HTTP/2 mux-demux for connections
3 *
4 * Copyright 2017 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreaudfd3de82020-06-04 23:46:14 +020013#include <import/eb32tree.h>
Willy Tarreau63617db2021-10-06 18:23:40 +020014#include <import/ebmbtree.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020015#include <haproxy/api.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020016#include <haproxy/cfgparse.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020017#include <haproxy/connection.h>
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +020018#include <haproxy/dynbuf.h>
Willy Tarreaubf073142020-06-03 12:04:01 +020019#include <haproxy/h2.h>
Willy Tarreaube327fa2020-06-03 09:09:57 +020020#include <haproxy/hpack-dec.h>
21#include <haproxy/hpack-enc.h>
22#include <haproxy/hpack-tbl.h>
Willy Tarreau87735332020-06-04 09:08:41 +020023#include <haproxy/http_htx.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020024#include <haproxy/htx.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020025#include <haproxy/istbuf.h>
Willy Tarreau36979d92020-06-05 17:27:29 +020026#include <haproxy/log.h>
Willy Tarreau6c0fadf2022-09-12 19:07:51 +020027#include <haproxy/mux_h2-t.h>
Willy Tarreau6131d6a2020-06-02 16:48:09 +020028#include <haproxy/net_helper.h>
Willy Tarreau48d25b32020-06-04 18:58:52 +020029#include <haproxy/session-t.h>
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +010030#include <haproxy/stats.h>
Willy Tarreaucb086c62022-05-27 09:47:12 +020031#include <haproxy/stconn.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020032#include <haproxy/stream.h>
Willy Tarreauc6d61d72020-06-04 19:02:42 +020033#include <haproxy/trace.h>
Willy Tarreau62f52692017-10-08 23:01:42 +020034
35
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010036/* dummy streams returned for closed, error, refused, idle and states */
Willy Tarreau2a856182017-05-16 15:20:39 +020037static const struct h2s *h2_closed_stream;
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010038static const struct h2s *h2_error_stream;
Willy Tarreau8d0d58b2018-12-23 18:29:12 +010039static const struct h2s *h2_refused_stream;
Willy Tarreau2a856182017-05-16 15:20:39 +020040static const struct h2s *h2_idle_stream;
41
Willy Tarreau5ab6b572017-09-22 08:05:00 +020042
Willy Tarreau6c0fadf2022-09-12 19:07:51 +020043/**** H2 connection descriptor ****/
Willy Tarreau5ab6b572017-09-22 08:05:00 +020044struct h2c {
45 struct connection *conn;
46
47 enum h2_cs st0; /* mux state */
48 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
49
50 /* 16 bit hole here */
51 uint32_t flags; /* connection flags: H2_CF_* */
Willy Tarreau2e2083a2019-01-31 10:34:07 +010052 uint32_t streams_limit; /* maximum number of concurrent streams the peer supports */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020053 int32_t max_id; /* highest ID known on this connection, <0 before preface */
54 uint32_t rcvd_c; /* newly received data to ACK for the connection */
Willy Tarreau617592c2022-06-08 16:32:22 +020055 uint32_t rcvd_s; /* newly received data to ACK for the current stream (dsi) or zero */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020056
57 /* states for the demux direction */
58 struct hpack_dht *ddht; /* demux dynamic header table */
Willy Tarreauc9fa0482018-07-10 17:43:27 +020059 struct buffer dbuf; /* demux buffer */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020060
61 int32_t dsi; /* demux stream ID (<0 = idle) */
62 int32_t dfl; /* demux frame length (if dsi >= 0) */
63 int8_t dft; /* demux frame type (if dsi >= 0) */
64 int8_t dff; /* demux frame flags (if dsi >= 0) */
Willy Tarreau05e5daf2017-12-11 15:17:36 +010065 uint8_t dpl; /* demux pad length (part of dfl), init to 0 */
66 /* 8 bit hole here */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020067 int32_t last_sid; /* last processed stream ID for GOAWAY, <0 before preface */
68
69 /* states for the mux direction */
Willy Tarreau51330962019-05-26 09:38:07 +020070 struct buffer mbuf[H2C_MBUF_CNT]; /* mux buffers (ring) */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020071 int32_t miw; /* mux initial window size for all new streams */
72 int32_t mws; /* mux window size. Can be negative. */
73 int32_t mfs; /* mux's max frame size */
74
Willy Tarreauea392822017-10-31 10:02:25 +010075 int timeout; /* idle timeout duration in ticks */
Willy Tarreau599391a2017-11-24 10:16:00 +010076 int shut_timeout; /* idle timeout duration in ticks after GOAWAY was sent */
Willy Tarreau15a47332022-03-18 15:57:34 +010077 int idle_start; /* date of the last time the connection went idle */
78 /* 32-bit hole here */
Willy Tarreau49745612017-12-03 18:56:02 +010079 unsigned int nb_streams; /* number of streams in the tree */
Willy Tarreau36c22322022-05-27 10:41:24 +020080 unsigned int nb_sc; /* number of attached stream connectors */
Willy Tarreaud64a3eb2019-01-23 10:22:21 +010081 unsigned int nb_reserved; /* number of reserved streams */
Willy Tarreaue9634bd2019-01-23 10:25:10 +010082 unsigned int stream_cnt; /* total number of streams seen */
Willy Tarreau0b37d652018-10-03 10:33:02 +020083 struct proxy *proxy; /* the proxy this connection was created for */
Willy Tarreauea392822017-10-31 10:02:25 +010084 struct task *task; /* timeout management task */
Amaury Denoyellec92697d2020-10-27 17:16:01 +010085 struct h2_counters *px_counters; /* h2 counters attached to proxy */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020086 struct eb_root streams_by_id; /* all active streams by their ID */
87 struct list send_list; /* list of blocked streams requesting to send */
88 struct list fctl_list; /* list of streams blocked by connection's fctl */
Willy Tarreau9edf6db2019-10-02 10:49:59 +020089 struct list blocked_list; /* list of streams blocked for other reasons (e.g. sfctl, dep) */
Willy Tarreau44e973f2018-03-01 17:49:30 +010090 struct buffer_wait buf_wait; /* wait list for buffer allocations */
Olivier Houchardfa8aa862018-10-10 18:25:41 +020091 struct wait_event wait_event; /* To be used if we're waiting for I/Os */
Willy Tarreau5ab6b572017-09-22 08:05:00 +020092};
93
Willy Tarreau2c249eb2019-05-13 18:06:17 +020094
Willy Tarreau18312642017-10-11 07:57:07 +020095/* H2 stream descriptor, describing the stream as it appears in the H2C, and as
Christopher Fauletfafd1b02020-11-03 18:25:52 +010096 * it is being processed in the internal HTTP representation (HTX).
Willy Tarreau18312642017-10-11 07:57:07 +020097 */
98struct h2s {
Willy Tarreau95acc8b2022-05-27 16:14:10 +020099 struct sedesc *sd;
Olivier Houchardf502aca2018-12-14 19:42:40 +0100100 struct session *sess;
Willy Tarreau18312642017-10-11 07:57:07 +0200101 struct h2c *h2c;
Willy Tarreau18312642017-10-11 07:57:07 +0200102 struct eb32_node by_id; /* place in h2c's streams_by_id */
Willy Tarreau18312642017-10-11 07:57:07 +0200103 int32_t id; /* stream ID */
104 uint32_t flags; /* H2_SF_* */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +0200105 int sws; /* stream window size, to be added to the mux's initial window size */
Willy Tarreau18312642017-10-11 07:57:07 +0200106 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
107 enum h2_ss st;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +0200108 uint16_t status; /* HTTP response status */
Willy Tarreau1915ca22019-01-24 11:49:37 +0100109 unsigned long long body_len; /* remaining body length according to content-length if H2_SF_DATA_CLEN */
Olivier Houchard638b7992018-08-16 15:41:52 +0200110 struct buffer rxbuf; /* receive buffer, always valid (buf_empty or real buffer) */
Willy Tarreau4596fe22022-05-17 19:07:51 +0200111 struct wait_event *subs; /* recv wait_event the stream connector associated is waiting on (via h2_subscribe) */
Olivier Houchardfa8aa862018-10-10 18:25:41 +0200112 struct list list; /* To be used when adding in h2c->send_list or h2c->fctl_lsit */
Willy Tarreau5723f292020-01-10 15:16:57 +0100113 struct tasklet *shut_tl; /* deferred shutdown tasklet, to retry to send an RST after we failed to,
114 * in case there's no other subscription to do it */
Amaury Denoyelle74162742020-12-11 17:53:05 +0100115
116 char upgrade_protocol[16]; /* rfc 8441: requested protocol on Extended CONNECT */
Willy Tarreau18312642017-10-11 07:57:07 +0200117};
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200118
Willy Tarreauc6405142017-09-21 20:23:50 +0200119/* descriptor for an h2 frame header */
120struct h2_fh {
121 uint32_t len; /* length, host order, 24 bits */
122 uint32_t sid; /* stream id, host order, 31 bits */
123 uint8_t ft; /* frame type */
124 uint8_t ff; /* frame flags */
125};
126
Willy Tarreau12ae2122019-08-08 18:23:12 +0200127/* trace source and events */
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200128static void h2_trace(enum trace_level level, uint64_t mask, \
129 const struct trace_source *src,
130 const struct ist where, const struct ist func,
131 const void *a1, const void *a2, const void *a3, const void *a4);
Willy Tarreau12ae2122019-08-08 18:23:12 +0200132
133/* The event representation is split like this :
134 * strm - application layer
135 * h2s - internal H2 stream
136 * h2c - internal H2 connection
137 * conn - external connection
138 *
139 */
140static const struct trace_event h2_trace_events[] = {
141#define H2_EV_H2C_NEW (1ULL << 0)
Willy Tarreau87951942019-08-30 07:34:36 +0200142 { .mask = H2_EV_H2C_NEW, .name = "h2c_new", .desc = "new H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200143#define H2_EV_H2C_RECV (1ULL << 1)
Willy Tarreau87951942019-08-30 07:34:36 +0200144 { .mask = H2_EV_H2C_RECV, .name = "h2c_recv", .desc = "Rx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200145#define H2_EV_H2C_SEND (1ULL << 2)
Willy Tarreau87951942019-08-30 07:34:36 +0200146 { .mask = H2_EV_H2C_SEND, .name = "h2c_send", .desc = "Tx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200147#define H2_EV_H2C_FCTL (1ULL << 3)
Willy Tarreau87951942019-08-30 07:34:36 +0200148 { .mask = H2_EV_H2C_FCTL, .name = "h2c_fctl", .desc = "H2 connection flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200149#define H2_EV_H2C_BLK (1ULL << 4)
Willy Tarreau87951942019-08-30 07:34:36 +0200150 { .mask = H2_EV_H2C_BLK, .name = "h2c_blk", .desc = "H2 connection blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200151#define H2_EV_H2C_WAKE (1ULL << 5)
Willy Tarreau87951942019-08-30 07:34:36 +0200152 { .mask = H2_EV_H2C_WAKE, .name = "h2c_wake", .desc = "H2 connection woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200153#define H2_EV_H2C_END (1ULL << 6)
Willy Tarreau87951942019-08-30 07:34:36 +0200154 { .mask = H2_EV_H2C_END, .name = "h2c_end", .desc = "H2 connection terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200155#define H2_EV_H2C_ERR (1ULL << 7)
Willy Tarreau87951942019-08-30 07:34:36 +0200156 { .mask = H2_EV_H2C_ERR, .name = "h2c_err", .desc = "error on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200157#define H2_EV_RX_FHDR (1ULL << 8)
Willy Tarreau87951942019-08-30 07:34:36 +0200158 { .mask = H2_EV_RX_FHDR, .name = "rx_fhdr", .desc = "H2 frame header received" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200159#define H2_EV_RX_FRAME (1ULL << 9)
Willy Tarreau87951942019-08-30 07:34:36 +0200160 { .mask = H2_EV_RX_FRAME, .name = "rx_frame", .desc = "receipt of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200161#define H2_EV_RX_EOI (1ULL << 10)
Willy Tarreau87951942019-08-30 07:34:36 +0200162 { .mask = H2_EV_RX_EOI, .name = "rx_eoi", .desc = "receipt of end of H2 input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200163#define H2_EV_RX_PREFACE (1ULL << 11)
Willy Tarreau87951942019-08-30 07:34:36 +0200164 { .mask = H2_EV_RX_PREFACE, .name = "rx_preface", .desc = "receipt of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200165#define H2_EV_RX_DATA (1ULL << 12)
Willy Tarreau87951942019-08-30 07:34:36 +0200166 { .mask = H2_EV_RX_DATA, .name = "rx_data", .desc = "receipt of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200167#define H2_EV_RX_HDR (1ULL << 13)
Willy Tarreau87951942019-08-30 07:34:36 +0200168 { .mask = H2_EV_RX_HDR, .name = "rx_hdr", .desc = "receipt of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200169#define H2_EV_RX_PRIO (1ULL << 14)
Willy Tarreau87951942019-08-30 07:34:36 +0200170 { .mask = H2_EV_RX_PRIO, .name = "rx_prio", .desc = "receipt of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200171#define H2_EV_RX_RST (1ULL << 15)
Willy Tarreau87951942019-08-30 07:34:36 +0200172 { .mask = H2_EV_RX_RST, .name = "rx_rst", .desc = "receipt of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200173#define H2_EV_RX_SETTINGS (1ULL << 16)
Willy Tarreau87951942019-08-30 07:34:36 +0200174 { .mask = H2_EV_RX_SETTINGS, .name = "rx_settings", .desc = "receipt of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200175#define H2_EV_RX_PUSH (1ULL << 17)
Willy Tarreau87951942019-08-30 07:34:36 +0200176 { .mask = H2_EV_RX_PUSH, .name = "rx_push", .desc = "receipt of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200177#define H2_EV_RX_PING (1ULL << 18)
Willy Tarreau87951942019-08-30 07:34:36 +0200178 { .mask = H2_EV_RX_PING, .name = "rx_ping", .desc = "receipt of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200179#define H2_EV_RX_GOAWAY (1ULL << 19)
Willy Tarreau87951942019-08-30 07:34:36 +0200180 { .mask = H2_EV_RX_GOAWAY, .name = "rx_goaway", .desc = "receipt of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200181#define H2_EV_RX_WU (1ULL << 20)
Willy Tarreau87951942019-08-30 07:34:36 +0200182 { .mask = H2_EV_RX_WU, .name = "rx_wu", .desc = "receipt of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200183#define H2_EV_RX_CONT (1ULL << 21)
Willy Tarreau87951942019-08-30 07:34:36 +0200184 { .mask = H2_EV_RX_CONT, .name = "rx_cont", .desc = "receipt of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200185#define H2_EV_TX_FRAME (1ULL << 22)
Willy Tarreau87951942019-08-30 07:34:36 +0200186 { .mask = H2_EV_TX_FRAME, .name = "tx_frame", .desc = "transmission of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200187#define H2_EV_TX_EOI (1ULL << 23)
Willy Tarreau87951942019-08-30 07:34:36 +0200188 { .mask = H2_EV_TX_EOI, .name = "tx_eoi", .desc = "transmission of H2 end of input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200189#define H2_EV_TX_PREFACE (1ULL << 24)
Willy Tarreau87951942019-08-30 07:34:36 +0200190 { .mask = H2_EV_TX_PREFACE, .name = "tx_preface", .desc = "transmission of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200191#define H2_EV_TX_DATA (1ULL << 25)
Willy Tarreau87951942019-08-30 07:34:36 +0200192 { .mask = H2_EV_TX_DATA, .name = "tx_data", .desc = "transmission of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200193#define H2_EV_TX_HDR (1ULL << 26)
Willy Tarreau87951942019-08-30 07:34:36 +0200194 { .mask = H2_EV_TX_HDR, .name = "tx_hdr", .desc = "transmission of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200195#define H2_EV_TX_PRIO (1ULL << 27)
Willy Tarreau87951942019-08-30 07:34:36 +0200196 { .mask = H2_EV_TX_PRIO, .name = "tx_prio", .desc = "transmission of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200197#define H2_EV_TX_RST (1ULL << 28)
Willy Tarreau87951942019-08-30 07:34:36 +0200198 { .mask = H2_EV_TX_RST, .name = "tx_rst", .desc = "transmission of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200199#define H2_EV_TX_SETTINGS (1ULL << 29)
Willy Tarreau87951942019-08-30 07:34:36 +0200200 { .mask = H2_EV_TX_SETTINGS, .name = "tx_settings", .desc = "transmission of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200201#define H2_EV_TX_PUSH (1ULL << 30)
Willy Tarreau87951942019-08-30 07:34:36 +0200202 { .mask = H2_EV_TX_PUSH, .name = "tx_push", .desc = "transmission of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200203#define H2_EV_TX_PING (1ULL << 31)
Willy Tarreau87951942019-08-30 07:34:36 +0200204 { .mask = H2_EV_TX_PING, .name = "tx_ping", .desc = "transmission of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200205#define H2_EV_TX_GOAWAY (1ULL << 32)
Willy Tarreau87951942019-08-30 07:34:36 +0200206 { .mask = H2_EV_TX_GOAWAY, .name = "tx_goaway", .desc = "transmission of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200207#define H2_EV_TX_WU (1ULL << 33)
Willy Tarreau87951942019-08-30 07:34:36 +0200208 { .mask = H2_EV_TX_WU, .name = "tx_wu", .desc = "transmission of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200209#define H2_EV_TX_CONT (1ULL << 34)
Willy Tarreau87951942019-08-30 07:34:36 +0200210 { .mask = H2_EV_TX_CONT, .name = "tx_cont", .desc = "transmission of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200211#define H2_EV_H2S_NEW (1ULL << 35)
Willy Tarreau87951942019-08-30 07:34:36 +0200212 { .mask = H2_EV_H2S_NEW, .name = "h2s_new", .desc = "new H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200213#define H2_EV_H2S_RECV (1ULL << 36)
Willy Tarreau87951942019-08-30 07:34:36 +0200214 { .mask = H2_EV_H2S_RECV, .name = "h2s_recv", .desc = "Rx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200215#define H2_EV_H2S_SEND (1ULL << 37)
Willy Tarreau87951942019-08-30 07:34:36 +0200216 { .mask = H2_EV_H2S_SEND, .name = "h2s_send", .desc = "Tx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200217#define H2_EV_H2S_FCTL (1ULL << 38)
Willy Tarreau87951942019-08-30 07:34:36 +0200218 { .mask = H2_EV_H2S_FCTL, .name = "h2s_fctl", .desc = "H2 stream flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200219#define H2_EV_H2S_BLK (1ULL << 39)
Willy Tarreau87951942019-08-30 07:34:36 +0200220 { .mask = H2_EV_H2S_BLK, .name = "h2s_blk", .desc = "H2 stream blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200221#define H2_EV_H2S_WAKE (1ULL << 40)
Willy Tarreau87951942019-08-30 07:34:36 +0200222 { .mask = H2_EV_H2S_WAKE, .name = "h2s_wake", .desc = "H2 stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200223#define H2_EV_H2S_END (1ULL << 41)
Willy Tarreau87951942019-08-30 07:34:36 +0200224 { .mask = H2_EV_H2S_END, .name = "h2s_end", .desc = "H2 stream terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200225#define H2_EV_H2S_ERR (1ULL << 42)
Willy Tarreau87951942019-08-30 07:34:36 +0200226 { .mask = H2_EV_H2S_ERR, .name = "h2s_err", .desc = "error on H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200227#define H2_EV_STRM_NEW (1ULL << 43)
Willy Tarreau87951942019-08-30 07:34:36 +0200228 { .mask = H2_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200229#define H2_EV_STRM_RECV (1ULL << 44)
Willy Tarreau87951942019-08-30 07:34:36 +0200230 { .mask = H2_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200231#define H2_EV_STRM_SEND (1ULL << 45)
Willy Tarreau87951942019-08-30 07:34:36 +0200232 { .mask = H2_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200233#define H2_EV_STRM_FULL (1ULL << 46)
Willy Tarreau87951942019-08-30 07:34:36 +0200234 { .mask = H2_EV_STRM_FULL, .name = "strm_full", .desc = "stream buffer full" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200235#define H2_EV_STRM_WAKE (1ULL << 47)
Willy Tarreau87951942019-08-30 07:34:36 +0200236 { .mask = H2_EV_STRM_WAKE, .name = "strm_wake", .desc = "stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200237#define H2_EV_STRM_SHUT (1ULL << 48)
Willy Tarreau87951942019-08-30 07:34:36 +0200238 { .mask = H2_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200239#define H2_EV_STRM_END (1ULL << 49)
Willy Tarreau87951942019-08-30 07:34:36 +0200240 { .mask = H2_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200241#define H2_EV_STRM_ERR (1ULL << 50)
Willy Tarreau87951942019-08-30 07:34:36 +0200242 { .mask = H2_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200243#define H2_EV_PROTO_ERR (1ULL << 51)
Willy Tarreau87951942019-08-30 07:34:36 +0200244 { .mask = H2_EV_PROTO_ERR, .name = "proto_err", .desc = "protocol error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200245 { }
246};
247
248static const struct name_desc h2_trace_lockon_args[4] = {
249 /* arg1 */ { /* already used by the connection */ },
250 /* arg2 */ { .name="h2s", .desc="H2 stream" },
251 /* arg3 */ { },
252 /* arg4 */ { }
253};
254
255static const struct name_desc h2_trace_decoding[] = {
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200256#define H2_VERB_CLEAN 1
257 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
258#define H2_VERB_MINIMAL 2
Willy Tarreau12ae2122019-08-08 18:23:12 +0200259 { .name="minimal", .desc="report only h2c/h2s state and flags, no real decoding" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200260#define H2_VERB_SIMPLE 3
Willy Tarreau12ae2122019-08-08 18:23:12 +0200261 { .name="simple", .desc="add request/response status line or frame info when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200262#define H2_VERB_ADVANCED 4
Willy Tarreau12ae2122019-08-08 18:23:12 +0200263 { .name="advanced", .desc="add header fields or frame decoding when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200264#define H2_VERB_COMPLETE 5
Willy Tarreau12ae2122019-08-08 18:23:12 +0200265 { .name="complete", .desc="add full data dump when available" },
266 { /* end */ }
267};
268
Willy Tarreau6eb3d372021-04-10 19:29:26 +0200269static struct trace_source trace_h2 __read_mostly = {
Willy Tarreau12ae2122019-08-08 18:23:12 +0200270 .name = IST("h2"),
271 .desc = "HTTP/2 multiplexer",
272 .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200273 .default_cb = h2_trace,
Willy Tarreau12ae2122019-08-08 18:23:12 +0200274 .known_events = h2_trace_events,
275 .lockon_args = h2_trace_lockon_args,
276 .decoding = h2_trace_decoding,
277 .report_events = ~0, // report everything by default
278};
279
280#define TRACE_SOURCE &trace_h2
281INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
282
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100283/* h2 stats module */
284enum {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100285 H2_ST_HEADERS_RCVD,
286 H2_ST_DATA_RCVD,
287 H2_ST_SETTINGS_RCVD,
288 H2_ST_RST_STREAM_RCVD,
289 H2_ST_GOAWAY_RCVD,
290
Amaury Denoyellea8879232020-10-27 17:16:03 +0100291 H2_ST_CONN_PROTO_ERR,
292 H2_ST_STRM_PROTO_ERR,
293 H2_ST_RST_STREAM_RESP,
294 H2_ST_GOAWAY_RESP,
295
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100296 H2_ST_OPEN_CONN,
297 H2_ST_OPEN_STREAM,
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100298 H2_ST_TOTAL_CONN,
299 H2_ST_TOTAL_STREAM,
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100300
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100301 H2_STATS_COUNT /* must be the last member of the enum */
302};
303
304static struct name_desc h2_stats[] = {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100305 [H2_ST_HEADERS_RCVD] = { .name = "h2_headers_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100306 .desc = "Total number of received HEADERS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100307 [H2_ST_DATA_RCVD] = { .name = "h2_data_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100308 .desc = "Total number of received DATA frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100309 [H2_ST_SETTINGS_RCVD] = { .name = "h2_settings_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100310 .desc = "Total number of received SETTINGS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100311 [H2_ST_RST_STREAM_RCVD] = { .name = "h2_rst_stream_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100312 .desc = "Total number of received RST_STREAM frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100313 [H2_ST_GOAWAY_RCVD] = { .name = "h2_goaway_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100314 .desc = "Total number of received GOAWAY frames" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100315
316 [H2_ST_CONN_PROTO_ERR] = { .name = "h2_detected_conn_protocol_errors",
317 .desc = "Total number of connection protocol errors" },
318 [H2_ST_STRM_PROTO_ERR] = { .name = "h2_detected_strm_protocol_errors",
319 .desc = "Total number of stream protocol errors" },
320 [H2_ST_RST_STREAM_RESP] = { .name = "h2_rst_stream_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100321 .desc = "Total number of RST_STREAM sent on detected error" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100322 [H2_ST_GOAWAY_RESP] = { .name = "h2_goaway_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100323 .desc = "Total number of GOAWAY sent on detected error" },
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100324
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100325 [H2_ST_OPEN_CONN] = { .name = "h2_open_connections",
326 .desc = "Count of currently open connections" },
327 [H2_ST_OPEN_STREAM] = { .name = "h2_backend_open_streams",
328 .desc = "Count of currently open streams" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100329 [H2_ST_TOTAL_CONN] = { .name = "h2_total_connections",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100330 .desc = "Total number of connections" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100331 [H2_ST_TOTAL_STREAM] = { .name = "h2_backend_total_streams",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100332 .desc = "Total number of streams" },
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100333};
334
335static struct h2_counters {
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100336 long long headers_rcvd; /* total number of HEADERS frame received */
337 long long data_rcvd; /* total number of DATA frame received */
338 long long settings_rcvd; /* total number of SETTINGS frame received */
339 long long rst_stream_rcvd; /* total number of RST_STREAM frame received */
340 long long goaway_rcvd; /* total number of GOAWAY frame received */
Amaury Denoyellea8879232020-10-27 17:16:03 +0100341
342 long long conn_proto_err; /* total number of protocol errors detected */
343 long long strm_proto_err; /* total number of protocol errors detected */
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100344 long long rst_stream_resp; /* total number of RST_STREAM frame sent on error */
345 long long goaway_resp; /* total number of GOAWAY frame sent on error */
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100346
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100347 long long open_conns; /* count of currently open connections */
348 long long open_streams; /* count of currently open streams */
349 long long total_conns; /* total number of connections */
350 long long total_streams; /* total number of streams */
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100351} h2_counters;
352
353static void h2_fill_stats(void *data, struct field *stats)
354{
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100355 struct h2_counters *counters = data;
356
357 stats[H2_ST_HEADERS_RCVD] = mkf_u64(FN_COUNTER, counters->headers_rcvd);
358 stats[H2_ST_DATA_RCVD] = mkf_u64(FN_COUNTER, counters->data_rcvd);
359 stats[H2_ST_SETTINGS_RCVD] = mkf_u64(FN_COUNTER, counters->settings_rcvd);
360 stats[H2_ST_RST_STREAM_RCVD] = mkf_u64(FN_COUNTER, counters->rst_stream_rcvd);
361 stats[H2_ST_GOAWAY_RCVD] = mkf_u64(FN_COUNTER, counters->goaway_rcvd);
Amaury Denoyellea8879232020-10-27 17:16:03 +0100362
363 stats[H2_ST_CONN_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->conn_proto_err);
364 stats[H2_ST_STRM_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->strm_proto_err);
365 stats[H2_ST_RST_STREAM_RESP] = mkf_u64(FN_COUNTER, counters->rst_stream_resp);
366 stats[H2_ST_GOAWAY_RESP] = mkf_u64(FN_COUNTER, counters->goaway_resp);
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100367
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100368 stats[H2_ST_OPEN_CONN] = mkf_u64(FN_GAUGE, counters->open_conns);
369 stats[H2_ST_OPEN_STREAM] = mkf_u64(FN_GAUGE, counters->open_streams);
370 stats[H2_ST_TOTAL_CONN] = mkf_u64(FN_COUNTER, counters->total_conns);
371 stats[H2_ST_TOTAL_STREAM] = mkf_u64(FN_COUNTER, counters->total_streams);
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100372}
373
374static struct stats_module h2_stats_module = {
375 .name = "h2",
376 .fill_stats = h2_fill_stats,
377 .stats = h2_stats,
378 .stats_count = H2_STATS_COUNT,
379 .counters = &h2_counters,
380 .counters_size = sizeof(h2_counters),
381 .domain_flags = MK_STATS_PROXY_DOMAIN(STATS_PX_CAP_FE|STATS_PX_CAP_BE),
382 .clearable = 1,
383};
384
385INITCALL1(STG_REGISTER, stats_register_module, &h2_stats_module);
386
Willy Tarreau8ceae722018-11-26 11:58:30 +0100387/* the h2c connection pool */
388DECLARE_STATIC_POOL(pool_head_h2c, "h2c", sizeof(struct h2c));
389
390/* the h2s stream pool */
391DECLARE_STATIC_POOL(pool_head_h2s, "h2s", sizeof(struct h2s));
392
Willy Tarreaudc572362018-12-12 08:08:05 +0100393/* The default connection window size is 65535, it may only be enlarged using
394 * a WINDOW_UPDATE message. Since the window must never be larger than 2G-1,
395 * we'll pretend we already received the difference between the two to send
396 * an equivalent window update to enlarge it to 2G-1.
397 */
398#define H2_INITIAL_WINDOW_INCREMENT ((1U<<31)-1 - 65535)
399
Willy Tarreau455d5682019-05-24 19:42:18 +0200400/* maximum amount of data we're OK with re-aligning for buffer optimizations */
401#define MAX_DATA_REALIGN 1024
402
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200403/* a few settings from the global section */
404static int h2_settings_header_table_size = 4096; /* initial value */
Glenn Strauss0012f892022-06-04 22:11:50 -0400405static int h2_settings_initial_window_size = 65536; /* initial value */
Willy Tarreau5a490b62019-01-31 10:39:51 +0100406static unsigned int h2_settings_max_concurrent_streams = 100;
Willy Tarreaua24b35c2019-02-21 13:24:36 +0100407static int h2_settings_max_frame_size = 0; /* unset */
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200408
Willy Tarreaub22b5f02022-05-10 14:57:16 +0200409/* a dummy closed endpoint */
Willy Tarreauea59b022022-05-17 17:53:22 +0200410static const struct sedesc closed_ep = {
Willy Tarreauc1054922022-05-18 07:43:52 +0200411 .sc = NULL,
Willy Tarreaub605c422022-05-17 17:04:55 +0200412 .flags = SE_FL_DETACHED,
Willy Tarreaub22b5f02022-05-10 14:57:16 +0200413};
414
Willy Tarreau2a856182017-05-16 15:20:39 +0200415/* a dmumy closed stream */
416static const struct h2s *h2_closed_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200417 .sd = (struct sedesc *)&closed_ep,
Willy Tarreau2a856182017-05-16 15:20:39 +0200418 .h2c = NULL,
419 .st = H2_SS_CLOSED,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100420 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreauab837502017-12-27 15:07:30 +0100421 .flags = H2_SF_RST_RCVD,
Willy Tarreau2a856182017-05-16 15:20:39 +0200422 .id = 0,
423};
424
Willy Tarreauecb9dcd2019-01-03 12:00:17 +0100425/* a dmumy closed stream returning a PROTOCOL_ERROR error */
426static const struct h2s *h2_error_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200427 .sd = (struct sedesc *)&closed_ep,
Willy Tarreauecb9dcd2019-01-03 12:00:17 +0100428 .h2c = NULL,
429 .st = H2_SS_CLOSED,
430 .errcode = H2_ERR_PROTOCOL_ERROR,
431 .flags = 0,
432 .id = 0,
433};
434
Willy Tarreau8d0d58b2018-12-23 18:29:12 +0100435/* a dmumy closed stream returning a REFUSED_STREAM error */
436static const struct h2s *h2_refused_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200437 .sd = (struct sedesc *)&closed_ep,
Willy Tarreau8d0d58b2018-12-23 18:29:12 +0100438 .h2c = NULL,
439 .st = H2_SS_CLOSED,
440 .errcode = H2_ERR_REFUSED_STREAM,
441 .flags = 0,
442 .id = 0,
443};
444
Willy Tarreau2a856182017-05-16 15:20:39 +0200445/* and a dummy idle stream for use with any unannounced stream */
446static const struct h2s *h2_idle_stream = &(const struct h2s){
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200447 .sd = (struct sedesc *)&closed_ep,
Willy Tarreau2a856182017-05-16 15:20:39 +0200448 .h2c = NULL,
449 .st = H2_SS_IDLE,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100450 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreau2a856182017-05-16 15:20:39 +0200451 .id = 0,
452};
453
Willy Tarreau144f84a2021-03-02 16:09:26 +0100454struct task *h2_timeout_task(struct task *t, void *context, unsigned int state);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +0200455static int h2_send(struct h2c *h2c);
456static int h2_recv(struct h2c *h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +0200457static int h2_process(struct h2c *h2c);
Willy Tarreau691d5032021-01-20 14:55:01 +0100458/* h2_io_cb is exported to see it resolved in "show fd" */
Willy Tarreau144f84a2021-03-02 16:09:26 +0100459struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state);
Willy Tarreau0b559072018-02-26 15:22:17 +0100460static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id);
Amaury Denoyelle74162742020-12-11 17:53:05 +0100461static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol);
Willy Tarreaua56a6de2018-02-26 15:59:07 +0100462static int h2_frt_transfer_data(struct h2s *h2s);
Willy Tarreau144f84a2021-03-02 16:09:26 +0100463struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state);
Willy Tarreau36c22322022-05-27 10:41:24 +0200464static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct stconn *sc, struct session *sess);
Willy Tarreau8b2757c2018-12-19 17:36:48 +0100465static void h2s_alert(struct h2s *h2s);
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200466
Willy Tarreau7be4ee02022-05-18 07:31:41 +0200467/* returns the stconn associated to the H2 stream */
468static forceinline struct stconn *h2s_sc(const struct h2s *h2s)
469{
Willy Tarreau95acc8b2022-05-27 16:14:10 +0200470 return h2s->sd->sc;
Willy Tarreau7be4ee02022-05-18 07:31:41 +0200471}
472
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200473/* the H2 traces always expect that arg1, if non-null, is of type connection
474 * (from which we can derive h2c), that arg2, if non-null, is of type h2s, and
475 * that arg3, if non-null, is either of type htx for tx headers, or of type
476 * buffer for everything else.
477 */
478static void h2_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
479 const struct ist where, const struct ist func,
480 const void *a1, const void *a2, const void *a3, const void *a4)
481{
482 const struct connection *conn = a1;
483 const struct h2c *h2c = conn ? conn->ctx : NULL;
484 const struct h2s *h2s = a2;
485 const struct buffer *buf = a3;
486 const struct htx *htx;
487 int pos;
488
489 if (!h2c) // nothing to add
490 return;
491
Willy Tarreau17104d42019-08-30 07:12:55 +0200492 if (src->verbosity > H2_VERB_CLEAN) {
Willy Tarreau73db4342019-09-25 07:28:44 +0200493 chunk_appendf(&trace_buf, " : h2c=%p(%c,%s)", h2c, conn_is_back(conn) ? 'B' : 'F', h2c_st_to_str(h2c->st0));
494
Willy Tarreau8e6f7492021-06-16 17:47:24 +0200495 if (mask & H2_EV_H2C_NEW) // inside h2_init, otherwise it's hard to match conn & h2c
496 conn_append_debug_info(&trace_buf, conn, " : ");
497
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100498 if (h2c->errcode)
499 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2c->errcode), h2c->errcode);
500
Willy Tarreau0f458712022-08-18 11:19:57 +0200501 if (h2c->flags & H2_CF_DEM_IN_PROGRESS && // frame processing has started, type and length are valid
Willy Tarreau73db4342019-09-25 07:28:44 +0200502 (mask & (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) == (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) {
Willy Tarreau8520d872020-09-18 07:39:29 +0200503 chunk_appendf(&trace_buf, " dft=%s/%02x dfl=%d", h2_ft_str(h2c->dft), h2c->dff, h2c->dfl);
Willy Tarreau73db4342019-09-25 07:28:44 +0200504 }
505
506 if (h2s) {
507 if (h2s->id <= 0)
508 chunk_appendf(&trace_buf, " dsi=%d", h2c->dsi);
509 chunk_appendf(&trace_buf, " h2s=%p(%d,%s)", h2s, h2s->id, h2s_st_to_str(h2s->st));
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100510 if (h2s->id && h2s->errcode)
511 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2s->errcode), h2s->errcode);
Willy Tarreau73db4342019-09-25 07:28:44 +0200512 }
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200513 }
514
515 /* Let's dump decoded requests and responses right after parsing. They
516 * are traced at level USER with a few recognizable flags.
517 */
518 if ((mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW) ||
519 mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR)) && buf)
520 htx = htxbuf(buf); // recv req/res
521 else if (mask == (H2_EV_TX_FRAME|H2_EV_TX_HDR))
522 htx = a3; // send req/res
523 else
524 htx = NULL;
525
Willy Tarreau94f1dcf2019-08-30 07:11:30 +0200526 if (level == TRACE_LEVEL_USER && src->verbosity != H2_VERB_MINIMAL && htx && (pos = htx_get_head(htx)) != -1) {
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200527 const struct htx_blk *blk = htx_get_blk(htx, pos);
528 const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
529 enum htx_blk_type type = htx_get_blk_type(blk);
530
531 if (type == HTX_BLK_REQ_SL)
532 chunk_appendf(&trace_buf, " : [%d] H2 REQ: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200533 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200534 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
535 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
536 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
537 else if (type == HTX_BLK_RES_SL)
538 chunk_appendf(&trace_buf, " : [%d] H2 RES: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200539 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200540 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
541 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
542 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
543 }
544}
545
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200546
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100547/* Detect a pending read0 for a H2 connection. It happens if a read0 was
548 * already reported on a previous xprt->rcvbuf() AND a frame parser failed
549 * to parse pending data, confirming no more progress is possible because
550 * we're facing a truncated frame. The function returns 1 to report a read0
551 * or 0 otherwise.
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200552 */
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100553static inline int h2c_read0_pending(struct h2c *h2c)
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200554{
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100555 return !!(h2c->flags & H2_CF_END_REACHED);
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200556}
557
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200558/* returns true if the connection is allowed to expire, false otherwise. A
Willy Tarreau34395832022-03-18 14:59:54 +0100559 * connection may expire when it has no attached streams. As long as streams
560 * are attached, the application layer is responsible for timeout management,
561 * and each layer will detach when it doesn't want to wait anymore. When the
562 * last one leaves, the connection must take over timeout management.
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200563 */
564static inline int h2c_may_expire(const struct h2c *h2c)
565{
Willy Tarreau36c22322022-05-27 10:41:24 +0200566 return !h2c->nb_sc;
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200567}
568
Willy Tarreau15a47332022-03-18 15:57:34 +0100569/* update h2c timeout if needed */
570static void h2c_update_timeout(struct h2c *h2c)
571{
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200572 int is_idle_conn = 0;
573
Willy Tarreau15a47332022-03-18 15:57:34 +0100574 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
575
576 if (!h2c->task)
577 goto leave;
578
579 if (h2c_may_expire(h2c)) {
580 /* no more streams attached */
581 if (h2c->last_sid >= 0) {
582 /* GOAWAY sent, closing in progress */
583 h2c->task->expire = tick_add_ifset(now_ms, h2c->shut_timeout);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200584 is_idle_conn = 1;
Willy Tarreau15a47332022-03-18 15:57:34 +0100585 } else if (br_data(h2c->mbuf)) {
586 /* pending output data: always the regular data timeout */
587 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
Willy Tarreau6ff91e22022-04-14 11:43:35 +0200588 } else if (!(h2c->flags & H2_CF_IS_BACK) && h2c->max_id > 0 && !b_data(&h2c->dbuf)) {
Willy Tarreau15a47332022-03-18 15:57:34 +0100589 /* idle after having seen one stream => keep-alive */
Willy Tarreau86b08a32022-04-13 17:40:28 +0200590 int to;
591
592 if (tick_isset(h2c->proxy->timeout.httpka))
593 to = h2c->proxy->timeout.httpka;
594 else
595 to = h2c->proxy->timeout.httpreq;
596
597 h2c->task->expire = tick_add_ifset(h2c->idle_start, to);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200598 is_idle_conn = 1;
Willy Tarreau15a47332022-03-18 15:57:34 +0100599 } else {
600 /* before first request, or started to deserialize a
601 * new req => http-request, but only set, not refresh.
602 */
603 int exp = (h2c->flags & H2_CF_IS_BACK) ? TICK_ETERNITY : h2c->proxy->timeout.httpreq;
604 h2c->task->expire = tick_add_ifset(h2c->idle_start, exp);
605 }
606 /* if a timeout above was not set, fall back to the default one */
607 if (!tick_isset(h2c->task->expire))
608 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200609
610 if ((h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) &&
611 is_idle_conn && tick_isset(global.close_spread_end)) {
612 /* If a soft-stop is in progress and a close-spread-time
613 * is set, we want to spread idle connection closing roughly
614 * evenly across the defined window. This should only
615 * act on idle frontend connections.
616 * If the window end is already in the past, we wake the
617 * timeout task up immediately so that it can be closed.
618 */
619 int remaining_window = tick_remain(now_ms, global.close_spread_end);
620 if (remaining_window) {
621 /* We don't need to reset the expire if it would
622 * already happen before the close window end.
623 */
624 if (tick_isset(h2c->task->expire) &&
625 tick_is_le(global.close_spread_end, h2c->task->expire)) {
626 /* Set an expire value shorter than the current value
627 * because the close spread window end comes earlier.
628 */
629 h2c->task->expire = tick_add(now_ms, statistical_prng_range(remaining_window));
630 }
631 }
632 else {
633 /* We are past the soft close window end, wake the timeout
634 * task up immediately.
635 */
636 task_wakeup(h2c->task, TASK_WOKEN_TIMER);
637 }
638 }
639
Willy Tarreau15a47332022-03-18 15:57:34 +0100640 } else {
641 h2c->task->expire = TICK_ETERNITY;
642 }
643 task_queue(h2c->task);
644 leave:
645 TRACE_LEAVE(H2_EV_H2C_WAKE);
646}
647
Olivier Houchard7a977432019-03-21 15:47:13 +0100648static __inline int
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200649h2c_is_dead(const struct h2c *h2c)
Olivier Houchard7a977432019-03-21 15:47:13 +0100650{
651 if (eb_is_empty(&h2c->streams_by_id) && /* don't close if streams exist */
Christopher Fauletff7925d2022-10-11 19:12:40 +0200652 ((h2c->flags & H2_CF_ERROR) || /* errors close immediately */
Olivier Houchard7a977432019-03-21 15:47:13 +0100653 (h2c->st0 >= H2_CS_ERROR && !h2c->task) || /* a timeout stroke earlier */
654 (!(h2c->conn->owner)) || /* Nobody's left to take care of the connection, drop it now */
Willy Tarreau662fafc2019-05-26 09:43:07 +0200655 (!br_data(h2c->mbuf) && /* mux buffer empty, also process clean events below */
Christopher Fauletff7925d2022-10-11 19:12:40 +0200656 ((h2c->flags & H2_CF_RCVD_SHUT) ||
Olivier Houchard7a977432019-03-21 15:47:13 +0100657 (h2c->last_sid >= 0 && h2c->max_id >= h2c->last_sid)))))
658 return 1;
659
660 return 0;
Olivier Houchard7a977432019-03-21 15:47:13 +0100661}
662
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200663/*****************************************************/
664/* functions below are for dynamic buffer management */
665/*****************************************************/
666
Willy Tarreau315d8072017-12-10 22:17:57 +0100667/* indicates whether or not the we may call the h2_recv() function to attempt
668 * to receive data into the buffer and/or demux pending data. The condition is
669 * a bit complex due to some API limits for now. The rules are the following :
670 * - if an error or a shutdown was detected on the connection and the buffer
671 * is empty, we must not attempt to receive
672 * - if the demux buf failed to be allocated, we must not try to receive and
673 * we know there is nothing pending
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100674 * - if no flag indicates a blocking condition, we may attempt to receive,
675 * regardless of whether the demux buffer is full or not, so that only
676 * de demux part decides whether or not to block. This is needed because
677 * the connection API indeed prevents us from re-enabling receipt that is
678 * already enabled in a polled state, so we must always immediately stop
679 * as soon as the demux can't proceed so as never to hit an end of read
680 * with data pending in the buffers.
Willy Tarreau315d8072017-12-10 22:17:57 +0100681 * - otherwise must may not attempt
682 */
683static inline int h2_recv_allowed(const struct h2c *h2c)
684{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200685 if (b_data(&h2c->dbuf) == 0 &&
Christopher Fauletff7925d2022-10-11 19:12:40 +0200686 ((h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERROR)) || h2c->st0 >= H2_CS_ERROR))
Willy Tarreau315d8072017-12-10 22:17:57 +0100687 return 0;
688
689 if (!(h2c->flags & H2_CF_DEM_DALLOC) &&
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100690 !(h2c->flags & H2_CF_DEM_BLOCK_ANY))
Willy Tarreau315d8072017-12-10 22:17:57 +0100691 return 1;
692
693 return 0;
694}
695
Willy Tarreau47b515a2018-12-21 16:09:41 +0100696/* restarts reading on the connection if it was not enabled */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200697static inline void h2c_restart_reading(const struct h2c *h2c, int consider_buffer)
Willy Tarreau47b515a2018-12-21 16:09:41 +0100698{
699 if (!h2_recv_allowed(h2c))
700 return;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200701 if ((!consider_buffer || !b_data(&h2c->dbuf))
702 && (h2c->wait_event.events & SUB_RETRY_RECV))
Willy Tarreau47b515a2018-12-21 16:09:41 +0100703 return;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200704 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau47b515a2018-12-21 16:09:41 +0100705}
706
707
Willy Tarreau4596fe22022-05-17 19:07:51 +0200708/* returns true if the front connection has too many stream connectors attached */
Willy Tarreau36c22322022-05-27 10:41:24 +0200709static inline int h2_frt_has_too_many_sc(const struct h2c *h2c)
Willy Tarreauf2101912018-07-19 10:11:38 +0200710{
Willy Tarreau36c22322022-05-27 10:41:24 +0200711 return h2c->nb_sc > h2_settings_max_concurrent_streams;
Willy Tarreauf2101912018-07-19 10:11:38 +0200712}
713
Willy Tarreau44e973f2018-03-01 17:49:30 +0100714/* Tries to grab a buffer and to re-enable processing on mux <target>. The h2c
715 * flags are used to figure what buffer was requested. It returns 1 if the
716 * allocation succeeds, in which case the connection is woken up, or 0 if it's
717 * impossible to wake up and we prefer to be woken up later.
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200718 */
Willy Tarreau44e973f2018-03-01 17:49:30 +0100719static int h2_buf_available(void *target)
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200720{
721 struct h2c *h2c = target;
Willy Tarreau0b559072018-02-26 15:22:17 +0100722 struct h2s *h2s;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200723
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100724 if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200725 h2c->flags &= ~H2_CF_DEM_DALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200726 h2c_restart_reading(h2c, 1);
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200727 return 1;
728 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200729
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100730 if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100731 h2c->flags &= ~H2_CF_MUX_MALLOC;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200732
733 if (h2c->flags & H2_CF_DEM_MROOM) {
734 h2c->flags &= ~H2_CF_DEM_MROOM;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200735 h2c_restart_reading(h2c, 1);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200736 }
Willy Tarreau14398122017-09-22 14:26:04 +0200737 return 1;
738 }
Willy Tarreau0b559072018-02-26 15:22:17 +0100739
740 if ((h2c->flags & H2_CF_DEM_SALLOC) &&
Willy Tarreau7be4ee02022-05-18 07:31:41 +0200741 (h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s_sc(h2s) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100742 b_alloc(&h2s->rxbuf)) {
Willy Tarreau0b559072018-02-26 15:22:17 +0100743 h2c->flags &= ~H2_CF_DEM_SALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200744 h2c_restart_reading(h2c, 1);
Willy Tarreau0b559072018-02-26 15:22:17 +0100745 return 1;
746 }
747
Willy Tarreau14398122017-09-22 14:26:04 +0200748 return 0;
749}
750
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200751static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200752{
753 struct buffer *buf = NULL;
754
Willy Tarreau2b718102021-04-21 07:32:39 +0200755 if (likely(!LIST_INLIST(&h2c->buf_wait.list)) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100756 unlikely((buf = b_alloc(bptr)) == NULL)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100757 h2c->buf_wait.target = h2c;
758 h2c->buf_wait.wakeup_cb = h2_buf_available;
Willy Tarreaub4e34762021-09-30 19:02:18 +0200759 LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +0200760 }
761 return buf;
762}
763
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200764static inline void h2_release_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200765{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200766 if (bptr->size) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100767 b_free(bptr);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100768 offer_buffers(NULL, 1);
Willy Tarreau14398122017-09-22 14:26:04 +0200769 }
770}
771
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200772static inline void h2_release_mbuf(struct h2c *h2c)
773{
774 struct buffer *buf;
775 unsigned int count = 0;
776
777 while (b_size(buf = br_head_pick(h2c->mbuf))) {
778 b_free(buf);
779 count++;
780 }
781 if (count)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100782 offer_buffers(NULL, count);
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200783}
784
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100785/* returns the number of allocatable outgoing streams for the connection taking
786 * the last_sid and the reserved ones into account.
787 */
788static inline int h2_streams_left(const struct h2c *h2c)
789{
790 int ret;
791
792 /* consider the number of outgoing streams we're allowed to create before
793 * reaching the last GOAWAY frame seen. max_id is the last assigned id,
794 * nb_reserved is the number of streams which don't yet have an ID.
795 */
796 ret = (h2c->last_sid >= 0) ? h2c->last_sid : 0x7FFFFFFF;
797 ret = (unsigned int)(ret - h2c->max_id) / 2 - h2c->nb_reserved - 1;
798 if (ret < 0)
799 ret = 0;
800 return ret;
801}
802
Willy Tarreau00f18a32019-01-26 12:19:01 +0100803/* returns the number of streams in use on a connection to figure if it's
Willy Tarreau36c22322022-05-27 10:41:24 +0200804 * idle or not. We check nb_sc and not nb_streams as the caller will want
Willy Tarreau00f18a32019-01-26 12:19:01 +0100805 * to know if it was the last one after a detach().
806 */
807static int h2_used_streams(struct connection *conn)
808{
809 struct h2c *h2c = conn->ctx;
810
Willy Tarreau36c22322022-05-27 10:41:24 +0200811 return h2c->nb_sc;
Willy Tarreau00f18a32019-01-26 12:19:01 +0100812}
813
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100814/* returns the number of concurrent streams available on the connection */
Olivier Houchardd540b362018-11-05 18:37:53 +0100815static int h2_avail_streams(struct connection *conn)
816{
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100817 struct server *srv = objt_server(conn->target);
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100818 struct h2c *h2c = conn->ctx;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100819 int ret1, ret2;
Olivier Houchardd540b362018-11-05 18:37:53 +0100820
Willy Tarreau6afec462019-01-28 06:40:19 +0100821 /* RFC7540#6.8: Receivers of a GOAWAY frame MUST NOT open additional
822 * streams on the connection.
823 */
824 if (h2c->last_sid >= 0)
825 return 0;
826
Willy Tarreauc61966f2019-10-31 15:10:03 +0100827 if (h2c->st0 >= H2_CS_ERROR)
828 return 0;
829
Willy Tarreau86949782019-01-31 10:42:05 +0100830 /* note: may be negative if a SETTINGS frame changes the limit */
831 ret1 = h2c->streams_limit - h2c->nb_streams;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100832
833 /* we must also consider the limit imposed by stream IDs */
834 ret2 = h2_streams_left(h2c);
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100835 ret1 = MIN(ret1, ret2);
Willy Tarreau86949782019-01-31 10:42:05 +0100836 if (ret1 > 0 && srv && srv->max_reuse >= 0) {
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100837 ret2 = h2c->stream_cnt <= srv->max_reuse ? srv->max_reuse - h2c->stream_cnt + 1: 0;
838 ret1 = MIN(ret1, ret2);
839 }
840 return ret1;
Olivier Houchardd540b362018-11-05 18:37:53 +0100841}
842
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200843
Willy Tarreau62f52692017-10-08 23:01:42 +0200844/*****************************************************************/
845/* functions below are dedicated to the mux setup and management */
846/*****************************************************************/
847
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200848/* Initialize the mux once it's attached. For outgoing connections, the context
849 * is already initialized before installing the mux, so we detect incoming
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200850 * connections from the fact that the context is still NULL (even during mux
851 * upgrades). <input> is always used as Input buffer and may contain data. It is
852 * the caller responsibility to not reuse it anymore. Returns < 0 on error.
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200853 */
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200854static int h2_init(struct connection *conn, struct proxy *prx, struct session *sess,
855 struct buffer *input)
Willy Tarreau32218eb2017-09-22 08:07:25 +0200856{
857 struct h2c *h2c;
Willy Tarreauea392822017-10-31 10:02:25 +0100858 struct task *t = NULL;
Christopher Fauletf81ef032019-10-04 15:19:43 +0200859 void *conn_ctx = conn->ctx;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200860
Christopher Fauletf81ef032019-10-04 15:19:43 +0200861 TRACE_ENTER(H2_EV_H2C_NEW);
Willy Tarreau7838a792019-08-12 18:42:03 +0200862
Willy Tarreaubafbe012017-11-24 17:34:44 +0100863 h2c = pool_alloc(pool_head_h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +0200864 if (!h2c)
mildiscd2d7de2018-10-02 16:44:18 +0200865 goto fail_no_h2c;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200866
Christopher Faulete9b70722019-04-08 10:46:02 +0200867 if (conn_is_back(conn)) {
Willy Tarreau01b44822018-10-03 14:26:37 +0200868 h2c->flags = H2_CF_IS_BACK;
869 h2c->shut_timeout = h2c->timeout = prx->timeout.server;
870 if (tick_isset(prx->timeout.serverfin))
871 h2c->shut_timeout = prx->timeout.serverfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +0100872
873 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_be,
874 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +0200875 } else {
876 h2c->flags = H2_CF_NONE;
877 h2c->shut_timeout = h2c->timeout = prx->timeout.client;
878 if (tick_isset(prx->timeout.clientfin))
879 h2c->shut_timeout = prx->timeout.clientfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +0100880
881 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe,
882 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +0200883 }
Willy Tarreau3f133572017-10-31 19:21:06 +0100884
Willy Tarreau0b37d652018-10-03 10:33:02 +0200885 h2c->proxy = prx;
Willy Tarreau33400292017-11-05 11:23:40 +0100886 h2c->task = NULL;
Willy Tarreau15a47332022-03-18 15:57:34 +0100887 h2c->idle_start = now_ms;
Willy Tarreau3f133572017-10-31 19:21:06 +0100888 if (tick_isset(h2c->timeout)) {
Willy Tarreaubeeabf52021-10-01 18:23:30 +0200889 t = task_new_here();
Willy Tarreau3f133572017-10-31 19:21:06 +0100890 if (!t)
891 goto fail;
892
893 h2c->task = t;
894 t->process = h2_timeout_task;
895 t->context = h2c;
896 t->expire = tick_add(now_ms, h2c->timeout);
897 }
Willy Tarreauea392822017-10-31 10:02:25 +0100898
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200899 h2c->wait_event.tasklet = tasklet_new();
900 if (!h2c->wait_event.tasklet)
Olivier Houchard910b2bc2018-07-17 18:49:38 +0200901 goto fail;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200902 h2c->wait_event.tasklet->process = h2_io_cb;
903 h2c->wait_event.tasklet->context = h2c;
Willy Tarreau4f6516d2018-12-19 13:59:17 +0100904 h2c->wait_event.events = 0;
Amaury Denoyelled3a88c12021-05-03 10:47:51 +0200905 if (!conn_is_back(conn)) {
906 /* Connection might already be in the stopping_list if subject
907 * to h1->h2 upgrade.
908 */
909 if (!LIST_INLIST(&conn->stopping_list)) {
910 LIST_APPEND(&mux_stopping_data[tid].list,
911 &conn->stopping_list);
912 }
913 }
Olivier Houchard910b2bc2018-07-17 18:49:38 +0200914
Willy Tarreau2bdcc702020-05-19 11:31:11 +0200915 h2c->ddht = hpack_dht_alloc();
Willy Tarreau32218eb2017-09-22 08:07:25 +0200916 if (!h2c->ddht)
917 goto fail;
918
919 /* Initialise the context. */
920 h2c->st0 = H2_CS_PREFACE;
921 h2c->conn = conn;
Willy Tarreau2e2083a2019-01-31 10:34:07 +0100922 h2c->streams_limit = h2_settings_max_concurrent_streams;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200923 h2c->max_id = -1;
924 h2c->errcode = H2_ERR_NO_ERROR;
Willy Tarreau97aaa672018-12-23 09:49:04 +0100925 h2c->rcvd_c = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200926 h2c->rcvd_s = 0;
Willy Tarreau49745612017-12-03 18:56:02 +0100927 h2c->nb_streams = 0;
Willy Tarreau36c22322022-05-27 10:41:24 +0200928 h2c->nb_sc = 0;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100929 h2c->nb_reserved = 0;
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100930 h2c->stream_cnt = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200931
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200932 h2c->dbuf = *input;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200933 h2c->dsi = -1;
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100934
Willy Tarreau32218eb2017-09-22 08:07:25 +0200935 h2c->last_sid = -1;
936
Willy Tarreau51330962019-05-26 09:38:07 +0200937 br_init(h2c->mbuf, sizeof(h2c->mbuf) / sizeof(h2c->mbuf[0]));
Willy Tarreau32218eb2017-09-22 08:07:25 +0200938 h2c->miw = 65535; /* mux initial window size */
939 h2c->mws = 65535; /* mux window size */
940 h2c->mfs = 16384; /* initial max frame size */
Willy Tarreau751f2d02018-10-05 09:35:00 +0200941 h2c->streams_by_id = EB_ROOT;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200942 LIST_INIT(&h2c->send_list);
943 LIST_INIT(&h2c->fctl_list);
Willy Tarreau9edf6db2019-10-02 10:49:59 +0200944 LIST_INIT(&h2c->blocked_list);
Willy Tarreau90f366b2021-02-20 11:49:49 +0100945 LIST_INIT(&h2c->buf_wait.list);
Willy Tarreau32218eb2017-09-22 08:07:25 +0200946
Christopher Fauletf81ef032019-10-04 15:19:43 +0200947 conn->ctx = h2c;
948
Willy Tarreau8e6f7492021-06-16 17:47:24 +0200949 TRACE_USER("new H2 connection", H2_EV_H2C_NEW, conn);
950
Willy Tarreau3f133572017-10-31 19:21:06 +0100951 if (t)
952 task_queue(t);
Willy Tarreauea392822017-10-31 10:02:25 +0100953
Willy Tarreau01b44822018-10-03 14:26:37 +0200954 if (h2c->flags & H2_CF_IS_BACK) {
955 /* FIXME: this is temporary, for outgoing connections we need
956 * to immediately allocate a stream until the code is modified
Willy Tarreau36c22322022-05-27 10:41:24 +0200957 * so that the caller calls ->attach(). For now the outgoing sc
Christopher Fauletf81ef032019-10-04 15:19:43 +0200958 * is stored as conn->ctx by the caller and saved in conn_ctx.
Willy Tarreau01b44822018-10-03 14:26:37 +0200959 */
960 struct h2s *h2s;
961
Christopher Fauletf81ef032019-10-04 15:19:43 +0200962 h2s = h2c_bck_stream_new(h2c, conn_ctx, sess);
Willy Tarreau01b44822018-10-03 14:26:37 +0200963 if (!h2s)
964 goto fail_stream;
965 }
966
Willy Tarreau4781b152021-04-06 13:53:36 +0200967 HA_ATOMIC_INC(&h2c->px_counters->open_conns);
968 HA_ATOMIC_INC(&h2c->px_counters->total_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100969
Willy Tarreau0f383582018-10-03 14:22:21 +0200970 /* prepare to read something */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200971 h2c_restart_reading(h2c, 1);
Willy Tarreau7838a792019-08-12 18:42:03 +0200972 TRACE_LEAVE(H2_EV_H2C_NEW, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +0200973 return 0;
Willy Tarreau01b44822018-10-03 14:26:37 +0200974 fail_stream:
975 hpack_dht_free(h2c->ddht);
mildiscd2d7de2018-10-02 16:44:18 +0200976 fail:
Willy Tarreauf6562792019-05-07 19:05:35 +0200977 task_destroy(t);
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200978 if (h2c->wait_event.tasklet)
979 tasklet_free(h2c->wait_event.tasklet);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100980 pool_free(pool_head_h2c, h2c);
mildiscd2d7de2018-10-02 16:44:18 +0200981 fail_no_h2c:
Willy Tarreau3b990fe2022-01-12 17:24:26 +0100982 if (!conn_is_back(conn))
983 LIST_DEL_INIT(&conn->stopping_list);
Christopher Fauletf81ef032019-10-04 15:19:43 +0200984 conn->ctx = conn_ctx; /* restore saved ctx */
985 TRACE_DEVEL("leaving in error", H2_EV_H2C_NEW|H2_EV_H2C_END|H2_EV_H2C_ERR);
Willy Tarreau32218eb2017-09-22 08:07:25 +0200986 return -1;
987}
988
Willy Tarreau751f2d02018-10-05 09:35:00 +0200989/* returns the next allocatable outgoing stream ID for the H2 connection, or
990 * -1 if no more is allocatable.
991 */
992static inline int32_t h2c_get_next_sid(const struct h2c *h2c)
993{
994 int32_t id = (h2c->max_id + 1) | 1;
Willy Tarreaua80dca82019-01-24 17:08:28 +0100995
996 if ((id & 0x80000000U) || (h2c->last_sid >= 0 && id > h2c->last_sid))
Willy Tarreau751f2d02018-10-05 09:35:00 +0200997 id = -1;
998 return id;
999}
1000
Willy Tarreau2373acc2017-10-12 17:35:14 +02001001/* returns the stream associated with id <id> or NULL if not found */
1002static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id)
1003{
1004 struct eb32_node *node;
1005
Willy Tarreau751f2d02018-10-05 09:35:00 +02001006 if (id == 0)
1007 return (struct h2s *)h2_closed_stream;
1008
Willy Tarreau2a856182017-05-16 15:20:39 +02001009 if (id > h2c->max_id)
1010 return (struct h2s *)h2_idle_stream;
1011
Willy Tarreau2373acc2017-10-12 17:35:14 +02001012 node = eb32_lookup(&h2c->streams_by_id, id);
1013 if (!node)
Willy Tarreau2a856182017-05-16 15:20:39 +02001014 return (struct h2s *)h2_closed_stream;
Willy Tarreau2373acc2017-10-12 17:35:14 +02001015
1016 return container_of(node, struct h2s, by_id);
1017}
1018
Christopher Faulet73c12072019-04-08 11:23:22 +02001019/* release function. This one should be called to free all resources allocated
1020 * to the mux.
Willy Tarreau62f52692017-10-08 23:01:42 +02001021 */
Christopher Faulet73c12072019-04-08 11:23:22 +02001022static void h2_release(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02001023{
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001024 struct connection *conn = h2c->conn;
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001025
Willy Tarreau7838a792019-08-12 18:42:03 +02001026 TRACE_ENTER(H2_EV_H2C_END);
1027
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001028 hpack_dht_free(h2c->ddht);
Christopher Faulet61840e72019-04-15 09:33:32 +02001029
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001030 if (LIST_INLIST(&h2c->buf_wait.list))
1031 LIST_DEL_INIT(&h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +02001032
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001033 h2_release_buf(h2c, &h2c->dbuf);
1034 h2_release_mbuf(h2c);
Willy Tarreau14398122017-09-22 14:26:04 +02001035
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001036 if (h2c->task) {
1037 h2c->task->context = NULL;
1038 task_wakeup(h2c->task, TASK_WOKEN_OTHER);
1039 h2c->task = NULL;
1040 }
1041 if (h2c->wait_event.tasklet)
1042 tasklet_free(h2c->wait_event.tasklet);
1043 if (conn && h2c->wait_event.events != 0)
1044 conn->xprt->unsubscribe(conn, conn->xprt_ctx, h2c->wait_event.events,
1045 &h2c->wait_event);
Willy Tarreauea392822017-10-31 10:02:25 +01001046
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001047 HA_ATOMIC_DEC(&h2c->px_counters->open_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001048
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001049 pool_free(pool_head_h2c, h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001050
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001051 if (conn) {
Amaury Denoyelled3a88c12021-05-03 10:47:51 +02001052 if (!conn_is_back(conn))
1053 LIST_DEL_INIT(&conn->stopping_list);
1054
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001055 conn->mux = NULL;
1056 conn->ctx = NULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02001057 TRACE_DEVEL("freeing conn", H2_EV_H2C_END, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001058
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001059 conn_stop_tracking(conn);
Willy Tarreau0b222472021-10-21 22:24:31 +02001060
1061 /* there might be a GOAWAY frame still pending in the TCP
1062 * stack, and if the peer continues to send (i.e. window
1063 * updates etc), this can result in losing the GOAWAY. For
1064 * this reason we try to drain anything received in between.
1065 */
1066 conn->flags |= CO_FL_WANT_DRAIN;
1067
1068 conn_xprt_shutw(conn);
1069 conn_xprt_close(conn);
1070 conn_sock_shutw(conn, !conn_is_back(conn));
1071 conn_ctrl_close(conn);
1072
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001073 if (conn->destroy_cb)
1074 conn->destroy_cb(conn);
1075 conn_free(conn);
1076 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001077
1078 TRACE_LEAVE(H2_EV_H2C_END);
Willy Tarreau62f52692017-10-08 23:01:42 +02001079}
1080
1081
Willy Tarreau71681172017-10-23 14:39:06 +02001082/******************************************************/
1083/* functions below are for the H2 protocol processing */
1084/******************************************************/
1085
1086/* returns the stream if of stream <h2s> or 0 if <h2s> is NULL */
Willy Tarreau1f094672017-11-20 21:27:45 +01001087static inline __maybe_unused int h2s_id(const struct h2s *h2s)
Willy Tarreau71681172017-10-23 14:39:06 +02001088{
1089 return h2s ? h2s->id : 0;
1090}
1091
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001092/* returns the sum of the stream's own window size and the mux's initial
1093 * window, which together form the stream's effective window size.
1094 */
1095static inline int h2s_mws(const struct h2s *h2s)
1096{
1097 return h2s->sws + h2s->h2c->miw;
1098}
1099
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001100/* marks an error on the connection. Before settings are sent, we must not send
1101 * a GOAWAY frame, and the error state will prevent h2c_send_goaway_error()
1102 * from verifying this so we set H2_CF_GOAWAY_FAILED to make sure it will not
1103 * even try.
1104 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001105static inline __maybe_unused void h2c_error(struct h2c *h2c, enum h2_err err)
Willy Tarreau741d6df2017-10-17 08:00:59 +02001106{
Willy Tarreau022e5e52020-09-10 09:33:15 +02001107 TRACE_POINT(H2_EV_H2C_ERR, h2c->conn, 0, 0, (void *)(long)(err));
Willy Tarreau741d6df2017-10-17 08:00:59 +02001108 h2c->errcode = err;
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001109 if (h2c->st0 < H2_CS_SETTINGS1)
1110 h2c->flags |= H2_CF_GOAWAY_FAILED;
Willy Tarreau741d6df2017-10-17 08:00:59 +02001111 h2c->st0 = H2_CS_ERROR;
1112}
1113
Willy Tarreau175cebb2019-01-24 10:02:24 +01001114/* marks an error on the stream. It may also update an already closed stream
1115 * (e.g. to report an error after an RST was received).
1116 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001117static inline __maybe_unused void h2s_error(struct h2s *h2s, enum h2_err err)
Willy Tarreau2e43f082017-10-17 08:03:59 +02001118{
Willy Tarreau175cebb2019-01-24 10:02:24 +01001119 if (h2s->id && h2s->st != H2_SS_ERROR) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02001120 TRACE_POINT(H2_EV_H2S_ERR, h2s->h2c->conn, h2s, 0, (void *)(long)(err));
Willy Tarreau2e43f082017-10-17 08:03:59 +02001121 h2s->errcode = err;
Willy Tarreau175cebb2019-01-24 10:02:24 +01001122 if (h2s->st < H2_SS_ERROR)
1123 h2s->st = H2_SS_ERROR;
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001124 se_fl_set_error(h2s->sd);
Willy Tarreau2e43f082017-10-17 08:03:59 +02001125 }
1126}
1127
Willy Tarreau7e094452018-12-19 18:08:52 +01001128/* attempt to notify the data layer of recv availability */
1129static void __maybe_unused h2s_notify_recv(struct h2s *h2s)
1130{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001131 if (h2s->subs && h2s->subs->events & SUB_RETRY_RECV) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001132 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01001133 tasklet_wakeup(h2s->subs->tasklet);
1134 h2s->subs->events &= ~SUB_RETRY_RECV;
1135 if (!h2s->subs->events)
1136 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001137 }
1138}
1139
1140/* attempt to notify the data layer of send availability */
1141static void __maybe_unused h2s_notify_send(struct h2s *h2s)
1142{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001143 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001144 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01001145 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01001146 tasklet_wakeup(h2s->subs->tasklet);
1147 h2s->subs->events &= ~SUB_RETRY_SEND;
1148 if (!h2s->subs->events)
1149 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001150 }
Willy Tarreau5723f292020-01-10 15:16:57 +01001151 else if (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) {
1152 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
1153 tasklet_wakeup(h2s->shut_tl);
1154 }
Willy Tarreau7e094452018-12-19 18:08:52 +01001155}
1156
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001157/* alerts the data layer, trying to wake it up by all means, following
1158 * this sequence :
1159 * - if the h2s' data layer is subscribed to recv, then it's woken up for recv
1160 * - if its subscribed to send, then it's woken up for send
1161 * - if it was subscribed to neither, its ->wake() callback is called
1162 * It is safe to call this function with a closed stream which doesn't have a
Willy Tarreau4596fe22022-05-17 19:07:51 +02001163 * stream connector anymore.
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001164 */
1165static void __maybe_unused h2s_alert(struct h2s *h2s)
1166{
Willy Tarreau7838a792019-08-12 18:42:03 +02001167 TRACE_ENTER(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
1168
Willy Tarreauf96508a2020-01-10 11:12:48 +01001169 if (h2s->subs ||
Willy Tarreau5723f292020-01-10 15:16:57 +01001170 (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW))) {
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001171 h2s_notify_recv(h2s);
1172 h2s_notify_send(h2s);
1173 }
Willy Tarreau2f2318d2022-05-18 10:17:16 +02001174 else if (h2s_sc(h2s) && h2s_sc(h2s)->app_ops->wake != NULL) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001175 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreau2f2318d2022-05-18 10:17:16 +02001176 h2s_sc(h2s)->app_ops->wake(h2s_sc(h2s));
Willy Tarreau7838a792019-08-12 18:42:03 +02001177 }
1178
1179 TRACE_LEAVE(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001180}
1181
Willy Tarreaue4820742017-07-27 13:37:23 +02001182/* writes the 24-bit frame size <len> at address <frame> */
Willy Tarreau1f094672017-11-20 21:27:45 +01001183static inline __maybe_unused void h2_set_frame_size(void *frame, uint32_t len)
Willy Tarreaue4820742017-07-27 13:37:23 +02001184{
1185 uint8_t *out = frame;
1186
1187 *out = len >> 16;
1188 write_n16(out + 1, len);
1189}
1190
Willy Tarreau54c15062017-10-10 17:10:03 +02001191/* reads <bytes> bytes from buffer <b> starting at relative offset <o> from the
1192 * current pointer, dealing with wrapping, and stores the result in <dst>. It's
1193 * the caller's responsibility to verify that there are at least <bytes> bytes
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001194 * available in the buffer's input prior to calling this function. The buffer
1195 * is assumed not to hold any output data.
Willy Tarreau54c15062017-10-10 17:10:03 +02001196 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001197static inline __maybe_unused void h2_get_buf_bytes(void *dst, size_t bytes,
Willy Tarreau54c15062017-10-10 17:10:03 +02001198 const struct buffer *b, int o)
1199{
Willy Tarreau591d4452018-06-15 17:21:00 +02001200 readv_bytes(dst, bytes, b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001201}
1202
Willy Tarreau1f094672017-11-20 21:27:45 +01001203static inline __maybe_unused uint16_t h2_get_n16(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001204{
Willy Tarreau591d4452018-06-15 17:21:00 +02001205 return readv_n16(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001206}
1207
Willy Tarreau1f094672017-11-20 21:27:45 +01001208static inline __maybe_unused uint32_t h2_get_n32(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001209{
Willy Tarreau591d4452018-06-15 17:21:00 +02001210 return readv_n32(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001211}
1212
Willy Tarreau1f094672017-11-20 21:27:45 +01001213static inline __maybe_unused uint64_t h2_get_n64(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001214{
Willy Tarreau591d4452018-06-15 17:21:00 +02001215 return readv_n64(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001216}
1217
1218
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001219/* Peeks an H2 frame header from offset <o> of buffer <b> into descriptor <h>.
1220 * The algorithm is not obvious. It turns out that H2 headers are neither
1221 * aligned nor do they use regular sizes. And to add to the trouble, the buffer
1222 * may wrap so each byte read must be checked. The header is formed like this :
Willy Tarreau715d5312017-07-11 15:20:24 +02001223 *
1224 * b0 b1 b2 b3 b4 b5..b8
1225 * +----------+---------+--------+----+----+----------------------+
1226 * |len[23:16]|len[15:8]|len[7:0]|type|flag|sid[31:0] (big endian)|
1227 * +----------+---------+--------+----+----+----------------------+
1228 *
1229 * Here we read a big-endian 64 bit word from h[1]. This way in a single read
1230 * we get the sid properly aligned and ordered, and 16 bits of len properly
1231 * ordered as well. The type and flags can be extracted using bit shifts from
1232 * the word, and only one extra read is needed to fetch len[16:23].
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001233 * Returns zero if some bytes are missing, otherwise non-zero on success. The
1234 * buffer is assumed not to contain any output data.
Willy Tarreau715d5312017-07-11 15:20:24 +02001235 */
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001236static __maybe_unused int h2_peek_frame_hdr(const struct buffer *b, int o, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001237{
1238 uint64_t w;
1239
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001240 if (b_data(b) < o + 9)
Willy Tarreau715d5312017-07-11 15:20:24 +02001241 return 0;
1242
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001243 w = h2_get_n64(b, o + 1);
1244 h->len = *(uint8_t*)b_peek(b, o) << 16;
Willy Tarreau715d5312017-07-11 15:20:24 +02001245 h->sid = w & 0x7FFFFFFF; /* RFC7540#4.1: R bit must be ignored */
1246 h->ff = w >> 32;
1247 h->ft = w >> 40;
1248 h->len += w >> 48;
1249 return 1;
1250}
1251
1252/* skip the next 9 bytes corresponding to the frame header possibly parsed by
1253 * h2_peek_frame_hdr() above.
1254 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001255static inline __maybe_unused void h2_skip_frame_hdr(struct buffer *b)
Willy Tarreau715d5312017-07-11 15:20:24 +02001256{
Willy Tarreaue5f12ce2018-06-15 10:28:05 +02001257 b_del(b, 9);
Willy Tarreau715d5312017-07-11 15:20:24 +02001258}
1259
1260/* same as above, automatically advances the buffer on success */
Willy Tarreau1f094672017-11-20 21:27:45 +01001261static inline __maybe_unused int h2_get_frame_hdr(struct buffer *b, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001262{
1263 int ret;
1264
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001265 ret = h2_peek_frame_hdr(b, 0, h);
Willy Tarreau715d5312017-07-11 15:20:24 +02001266 if (ret > 0)
1267 h2_skip_frame_hdr(b);
1268 return ret;
1269}
1270
Willy Tarreaucb985a42019-10-07 16:56:34 +02001271
1272/* try to fragment the headers frame present at the beginning of buffer <b>,
1273 * enforcing a limit of <mfs> bytes per frame. Returns 0 on failure, 1 on
1274 * success. Typical causes of failure include a buffer not large enough to
1275 * add extra frame headers. The existing frame size is read in the current
1276 * frame. Its EH flag will be cleared if CONTINUATION frames need to be added,
1277 * and its length will be adjusted. The stream ID for continuation frames will
1278 * be copied from the initial frame's.
1279 */
1280static int h2_fragment_headers(struct buffer *b, uint32_t mfs)
1281{
1282 size_t remain = b->data - 9;
1283 int extra_frames = (remain - 1) / mfs;
1284 size_t fsize;
1285 char *fptr;
1286 int frame;
1287
1288 if (b->data <= mfs + 9)
1289 return 1;
1290
1291 /* Too large a frame, we need to fragment it using CONTINUATION
1292 * frames. We start from the end and move tails as needed.
1293 */
1294 if (b->data + extra_frames * 9 > b->size)
1295 return 0;
1296
1297 for (frame = extra_frames; frame; frame--) {
1298 fsize = ((remain - 1) % mfs) + 1;
1299 remain -= fsize;
1300
1301 /* move data */
1302 fptr = b->area + 9 + remain + (frame - 1) * 9;
1303 memmove(fptr + 9, b->area + 9 + remain, fsize);
1304 b->data += 9;
1305
1306 /* write new frame header */
1307 h2_set_frame_size(fptr, fsize);
1308 fptr[3] = H2_FT_CONTINUATION;
1309 fptr[4] = (frame == extra_frames) ? H2_F_HEADERS_END_HEADERS : 0;
1310 write_n32(fptr + 5, read_n32(b->area + 5));
1311 }
1312
1313 b->area[4] &= ~H2_F_HEADERS_END_HEADERS;
1314 h2_set_frame_size(b->area, remain);
1315 return 1;
1316}
1317
1318
Willy Tarreau00dd0782018-03-01 16:31:34 +01001319/* marks stream <h2s> as CLOSED and decrement the number of active streams for
1320 * its connection if the stream was not yet closed. Please use this exclusively
1321 * before closing a stream to ensure stream count is well maintained.
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001322 */
Willy Tarreau00dd0782018-03-01 16:31:34 +01001323static inline void h2s_close(struct h2s *h2s)
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001324{
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001325 if (h2s->st != H2_SS_CLOSED) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001326 TRACE_ENTER(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001327 h2s->h2c->nb_streams--;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001328 if (!h2s->id)
1329 h2s->h2c->nb_reserved--;
Willy Tarreau7be4ee02022-05-18 07:31:41 +02001330 if (h2s_sc(h2s)) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001331 if (!se_fl_test(h2s->sd, SE_FL_EOS) && !b_data(&h2s->rxbuf))
Willy Tarreaua27db382019-03-25 18:13:16 +01001332 h2s_notify_recv(h2s);
1333 }
Willy Tarreau4781b152021-04-06 13:53:36 +02001334 HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001335
Willy Tarreau7838a792019-08-12 18:42:03 +02001336 TRACE_LEAVE(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001337 }
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001338 h2s->st = H2_SS_CLOSED;
1339}
1340
Willy Tarreau71049cc2018-03-28 13:56:39 +02001341/* detaches an H2 stream from its H2C and releases it to the H2S pool. */
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001342/* h2s_destroy should only ever be called by the thread that owns the stream,
1343 * that means that a tasklet should be used if we want to destroy the h2s
1344 * from another thread
1345 */
Willy Tarreau71049cc2018-03-28 13:56:39 +02001346static void h2s_destroy(struct h2s *h2s)
Willy Tarreau0a10de62018-03-01 16:27:53 +01001347{
Willy Tarreau7838a792019-08-12 18:42:03 +02001348 struct connection *conn = h2s->h2c->conn;
1349
1350 TRACE_ENTER(H2_EV_H2S_END, conn, h2s);
1351
Willy Tarreau0a10de62018-03-01 16:27:53 +01001352 h2s_close(h2s);
1353 eb32_delete(&h2s->by_id);
Olivier Houchard638b7992018-08-16 15:41:52 +02001354 if (b_size(&h2s->rxbuf)) {
1355 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01001356 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02001357 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001358
1359 if (h2s->subs)
1360 h2s->subs->events = 0;
1361
Joseph Herlantd77575d2018-11-25 10:54:45 -08001362 /* There's no need to explicitly call unsubscribe here, the only
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001363 * reference left would be in the h2c send_list/fctl_list, and if
1364 * we're in it, we're getting out anyway
1365 */
Olivier Houchardd360ac62019-03-22 17:37:16 +01001366 LIST_DEL_INIT(&h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01001367
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001368 /* ditto, calling tasklet_free() here should be ok */
Willy Tarreau5723f292020-01-10 15:16:57 +01001369 tasklet_free(h2s->shut_tl);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001370 BUG_ON(h2s->sd && !se_fl_test(h2s->sd, SE_FL_ORPHAN));
1371 sedesc_free(h2s->sd);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001372 pool_free(pool_head_h2s, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02001373
1374 TRACE_LEAVE(H2_EV_H2S_END, conn);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001375}
1376
Willy Tarreaua8e49542018-10-03 18:53:55 +02001377/* allocates a new stream <id> for connection <h2c> and adds it into h2c's
1378 * stream tree. In case of error, nothing is added and NULL is returned. The
1379 * causes of errors can be any failed memory allocation. The caller is
1380 * responsible for checking if the connection may support an extra stream
1381 * prior to calling this function.
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001382 */
Willy Tarreaua8e49542018-10-03 18:53:55 +02001383static struct h2s *h2s_new(struct h2c *h2c, int id)
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001384{
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001385 struct h2s *h2s;
1386
Willy Tarreau7838a792019-08-12 18:42:03 +02001387 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1388
Willy Tarreaubafbe012017-11-24 17:34:44 +01001389 h2s = pool_alloc(pool_head_h2s);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001390 if (!h2s)
1391 goto out;
1392
Willy Tarreau5723f292020-01-10 15:16:57 +01001393 h2s->shut_tl = tasklet_new();
1394 if (!h2s->shut_tl) {
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001395 pool_free(pool_head_h2s, h2s);
1396 goto out;
1397 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001398 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01001399 h2s->shut_tl->process = h2_deferred_shut;
1400 h2s->shut_tl->context = h2s;
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001401 LIST_INIT(&h2s->list);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001402 h2s->h2c = h2c;
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001403 h2s->sd = NULL;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001404 h2s->sws = 0;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001405 h2s->flags = H2_SF_NONE;
1406 h2s->errcode = H2_ERR_NO_ERROR;
1407 h2s->st = H2_SS_IDLE;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +02001408 h2s->status = 0;
Willy Tarreau1915ca22019-01-24 11:49:37 +01001409 h2s->body_len = 0;
Olivier Houchard638b7992018-08-16 15:41:52 +02001410 h2s->rxbuf = BUF_NULL;
Amaury Denoyelle74162742020-12-11 17:53:05 +01001411 memset(h2s->upgrade_protocol, 0, sizeof(h2s->upgrade_protocol));
Willy Tarreau751f2d02018-10-05 09:35:00 +02001412
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001413 h2s->by_id.key = h2s->id = id;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001414 if (id > 0)
1415 h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001416 else
1417 h2c->nb_reserved++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001418
1419 eb32_insert(&h2c->streams_by_id, &h2s->by_id);
Willy Tarreau49745612017-12-03 18:56:02 +01001420 h2c->nb_streams++;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001421 h2c->stream_cnt++;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001422
Willy Tarreau4781b152021-04-06 13:53:36 +02001423 HA_ATOMIC_INC(&h2c->px_counters->open_streams);
1424 HA_ATOMIC_INC(&h2c->px_counters->total_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001425
Willy Tarreau7838a792019-08-12 18:42:03 +02001426 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001427 return h2s;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001428 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001429 TRACE_DEVEL("leaving in error", H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001430 return NULL;
1431}
1432
1433/* creates a new stream <id> on the h2c connection and returns it, or NULL in
Christopher Faulet7d013e72020-12-15 16:56:50 +01001434 * case of memory allocation error. <input> is used as input buffer for the new
1435 * stream. On success, it is transferred to the stream and the mux is no longer
1436 * responsible of it. On error, <input> is unchanged, thus the mux must still
1437 * take care of it.
Willy Tarreaua8e49542018-10-03 18:53:55 +02001438 */
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02001439static struct h2s *h2c_frt_stream_new(struct h2c *h2c, int id, struct buffer *input, uint32_t flags)
Willy Tarreaua8e49542018-10-03 18:53:55 +02001440{
1441 struct session *sess = h2c->conn->owner;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001442 struct h2s *h2s;
1443
Willy Tarreau7838a792019-08-12 18:42:03 +02001444 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1445
Willy Tarreaue872f752022-05-12 09:24:41 +02001446 if (h2c->nb_streams >= h2_settings_max_concurrent_streams) {
1447 TRACE_ERROR("HEADERS frame causing MAX_CONCURRENT_STREAMS to be exceeded", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001448 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001449 }
Willy Tarreaua8e49542018-10-03 18:53:55 +02001450
1451 h2s = h2s_new(h2c, id);
1452 if (!h2s)
Willy Tarreaue872f752022-05-12 09:24:41 +02001453 goto out_alloc;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001454
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001455 h2s->sd = sedesc_new();
1456 if (!h2s->sd)
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001457 goto out_close;
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001458 h2s->sd->se = h2s;
1459 h2s->sd->conn = h2c->conn;
1460 se_fl_set(h2s->sd, SE_FL_T_MUX | SE_FL_ORPHAN | SE_FL_NOT_FIRST);
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +01001461
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02001462 /* FIXME wrong analogy between ext-connect and websocket, this need to
1463 * be refine.
1464 */
1465 if (flags & H2_SF_EXT_CONNECT_RCVD)
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001466 se_fl_set(h2s->sd, SE_FL_WEBSOCKET);
Christopher Fauletb669d682022-03-22 18:37:19 +01001467
Willy Tarreaud0de6772022-02-04 09:05:37 +01001468 /* The stream will record the request's accept date (which is either the
1469 * end of the connection's or the date immediately after the previous
1470 * request) and the idle time, which is the delay since the previous
1471 * request. We can set the value now, it will be copied by stream_new().
1472 */
1473 sess->t_idle = tv_ms_elapsed(&sess->tv_accept, &now) - sess->t_handshake;
Christopher Fauleta9e8b392022-03-23 11:01:09 +01001474
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001475 if (!sc_new_from_endp(h2s->sd, sess, input))
Christopher Fauleta9e8b392022-03-23 11:01:09 +01001476 goto out_close;
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02001477
Willy Tarreau36c22322022-05-27 10:41:24 +02001478 h2c->nb_sc++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001479
Willy Tarreau590a0512018-09-05 11:56:48 +02001480 /* We want the accept date presented to the next stream to be the one
1481 * we have now, the handshake time to be null (since the next stream
1482 * is not delayed by a handshake), and the idle time to count since
1483 * right now.
1484 */
1485 sess->accept_date = date;
1486 sess->tv_accept = now;
1487 sess->t_handshake = 0;
Willy Tarreaud0de6772022-02-04 09:05:37 +01001488 sess->t_idle = 0;
Willy Tarreau590a0512018-09-05 11:56:48 +02001489
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001490 /* OK done, the stream lives its own life now */
Willy Tarreau36c22322022-05-27 10:41:24 +02001491 if (h2_frt_has_too_many_sc(h2c))
Willy Tarreauf2101912018-07-19 10:11:38 +02001492 h2c->flags |= H2_CF_DEM_TOOMANY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001493 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001494 return h2s;
1495
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001496 out_close:
Willy Tarreau71049cc2018-03-28 13:56:39 +02001497 h2s_destroy(h2s);
Willy Tarreaue872f752022-05-12 09:24:41 +02001498 out_alloc:
1499 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001500 out:
Willy Tarreau45efc072018-10-03 18:27:52 +02001501 sess_log(sess);
Willy Tarreau7838a792019-08-12 18:42:03 +02001502 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreau45efc072018-10-03 18:27:52 +02001503 return NULL;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001504}
1505
Willy Tarreau36c22322022-05-27 10:41:24 +02001506/* allocates a new stream associated to stream connector <sc> on the h2c
Willy Tarreau4596fe22022-05-17 19:07:51 +02001507 * connection and returns it, or NULL in case of memory allocation error or if
1508 * the highest possible stream ID was reached.
Willy Tarreau751f2d02018-10-05 09:35:00 +02001509 */
Willy Tarreau36c22322022-05-27 10:41:24 +02001510static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct stconn *sc, struct session *sess)
Willy Tarreau751f2d02018-10-05 09:35:00 +02001511{
1512 struct h2s *h2s = NULL;
1513
Willy Tarreau7838a792019-08-12 18:42:03 +02001514 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1515
Willy Tarreaue872f752022-05-12 09:24:41 +02001516 if (h2c->nb_streams >= h2c->streams_limit) {
1517 TRACE_ERROR("Aborting stream since negotiated limit is too low", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001518 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001519 }
Willy Tarreau751f2d02018-10-05 09:35:00 +02001520
Willy Tarreaue872f752022-05-12 09:24:41 +02001521 if (h2_streams_left(h2c) < 1) {
1522 TRACE_ERROR("Aborting stream since no more streams left", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreaua80dca82019-01-24 17:08:28 +01001523 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001524 }
Willy Tarreaua80dca82019-01-24 17:08:28 +01001525
Willy Tarreau751f2d02018-10-05 09:35:00 +02001526 /* Defer choosing the ID until we send the first message to create the stream */
1527 h2s = h2s_new(h2c, 0);
Willy Tarreaue872f752022-05-12 09:24:41 +02001528 if (!h2s) {
1529 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001530 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001531 }
Willy Tarreau751f2d02018-10-05 09:35:00 +02001532
Willy Tarreau36c22322022-05-27 10:41:24 +02001533 if (sc_attach_mux(sc, h2s, h2c->conn) < 0) {
Willy Tarreaue872f752022-05-12 09:24:41 +02001534 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW, h2c->conn);
Christopher Faulet070b91b2022-03-31 19:27:18 +02001535 h2s_destroy(h2s);
1536 h2s = NULL;
1537 goto out;
1538 }
Willy Tarreau95acc8b2022-05-27 16:14:10 +02001539 h2s->sd = sc->sedesc;
Olivier Houchardf502aca2018-12-14 19:42:40 +01001540 h2s->sess = sess;
Willy Tarreau36c22322022-05-27 10:41:24 +02001541 h2c->nb_sc++;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001542
Willy Tarreau751f2d02018-10-05 09:35:00 +02001543 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001544 if (likely(h2s))
1545 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
1546 else
1547 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001548 return h2s;
1549}
1550
Willy Tarreaube5b7152017-09-25 16:25:39 +02001551/* try to send a settings frame on the connection. Returns > 0 on success, 0 if
1552 * it couldn't do anything. It may return an error in h2c. See RFC7540#11.3 for
1553 * the various settings codes.
1554 */
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001555static int h2c_send_settings(struct h2c *h2c)
Willy Tarreaube5b7152017-09-25 16:25:39 +02001556{
1557 struct buffer *res;
1558 char buf_data[100]; // enough for 15 settings
Willy Tarreau83061a82018-07-13 11:56:34 +02001559 struct buffer buf;
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001560 int mfs;
Willy Tarreau7838a792019-08-12 18:42:03 +02001561 int ret = 0;
1562
1563 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001564
Willy Tarreaube5b7152017-09-25 16:25:39 +02001565 chunk_init(&buf, buf_data, sizeof(buf_data));
1566 chunk_memcpy(&buf,
1567 "\x00\x00\x00" /* length : 0 for now */
1568 "\x04\x00" /* type : 4 (settings), flags : 0 */
1569 "\x00\x00\x00\x00", /* stream ID : 0 */
1570 9);
1571
Willy Tarreau0bbad6b2019-02-26 16:01:52 +01001572 if (h2c->flags & H2_CF_IS_BACK) {
1573 /* send settings_enable_push=0 */
1574 chunk_memcat(&buf, "\x00\x02\x00\x00\x00\x00", 6);
1575 }
1576
Amaury Denoyellebefeae82021-07-09 17:14:30 +02001577 /* rfc 8441 #3 SETTINGS_ENABLE_CONNECT_PROTOCOL=1,
1578 * sent automatically unless disabled in the global config */
1579 if (!(global.tune.options & GTUNE_DISABLE_H2_WEBSOCKET))
1580 chunk_memcat(&buf, "\x00\x08\x00\x00\x00\x01", 6);
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01001581
Willy Tarreaube5b7152017-09-25 16:25:39 +02001582 if (h2_settings_header_table_size != 4096) {
1583 char str[6] = "\x00\x01"; /* header_table_size */
1584
1585 write_n32(str + 2, h2_settings_header_table_size);
1586 chunk_memcat(&buf, str, 6);
1587 }
1588
1589 if (h2_settings_initial_window_size != 65535) {
1590 char str[6] = "\x00\x04"; /* initial_window_size */
1591
1592 write_n32(str + 2, h2_settings_initial_window_size);
1593 chunk_memcat(&buf, str, 6);
1594 }
1595
1596 if (h2_settings_max_concurrent_streams != 0) {
1597 char str[6] = "\x00\x03"; /* max_concurrent_streams */
1598
1599 /* Note: 0 means "unlimited" for haproxy's config but not for
1600 * the protocol, so never send this value!
1601 */
1602 write_n32(str + 2, h2_settings_max_concurrent_streams);
1603 chunk_memcat(&buf, str, 6);
1604 }
1605
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001606 mfs = h2_settings_max_frame_size;
1607 if (mfs > global.tune.bufsize)
1608 mfs = global.tune.bufsize;
1609
1610 if (!mfs)
1611 mfs = global.tune.bufsize;
1612
1613 if (mfs != 16384) {
Willy Tarreaube5b7152017-09-25 16:25:39 +02001614 char str[6] = "\x00\x05"; /* max_frame_size */
1615
1616 /* note: similarly we could also emit MAX_HEADER_LIST_SIZE to
1617 * match bufsize - rewrite size, but at the moment it seems
1618 * that clients don't take care of it.
1619 */
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001620 write_n32(str + 2, mfs);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001621 chunk_memcat(&buf, str, 6);
1622 }
1623
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001624 h2_set_frame_size(buf.area, buf.data - 9);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001625
1626 res = br_tail(h2c->mbuf);
1627 retry:
1628 if (!h2_get_buf(h2c, res)) {
1629 h2c->flags |= H2_CF_MUX_MALLOC;
1630 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001631 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001632 }
1633
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001634 ret = b_istput(res, ist2(buf.area, buf.data));
Willy Tarreaube5b7152017-09-25 16:25:39 +02001635 if (unlikely(ret <= 0)) {
1636 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001637 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1638 goto retry;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001639 h2c->flags |= H2_CF_MUX_MFULL;
1640 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001641 }
1642 else {
1643 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001644 ret = 0;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001645 }
1646 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001647 out:
1648 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001649 return ret;
1650}
1651
Willy Tarreau52eed752017-09-22 15:05:09 +02001652/* Try to receive a connection preface, then upon success try to send our
1653 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1654 * missing data. It may return an error in h2c.
1655 */
1656static int h2c_frt_recv_preface(struct h2c *h2c)
1657{
1658 int ret1;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001659 int ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001660
Willy Tarreau7838a792019-08-12 18:42:03 +02001661 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
1662
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001663 ret1 = b_isteq(&h2c->dbuf, 0, b_data(&h2c->dbuf), ist(H2_CONN_PREFACE));
Willy Tarreau52eed752017-09-22 15:05:09 +02001664
1665 if (unlikely(ret1 <= 0)) {
Christopher Fauletb5f7b522021-07-26 12:06:53 +02001666 if (!ret1)
1667 h2c->flags |= H2_CF_DEM_SHORT_READ;
Christopher Fauletff7925d2022-10-11 19:12:40 +02001668 if (ret1 < 0 || (h2c->flags & H2_CF_RCVD_SHUT)) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01001669 TRACE_ERROR("I/O error or short read", H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02001670 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauee4684f2021-06-17 08:08:48 +02001671 if (b_data(&h2c->dbuf) ||
1672 !(((const struct session *)h2c->conn->owner)->fe->options & PR_O_IGNORE_PRB))
1673 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01001674 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001675 ret2 = 0;
1676 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02001677 }
1678
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001679 ret2 = h2c_send_settings(h2c);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001680 if (ret2 > 0)
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001681 b_del(&h2c->dbuf, ret1);
Willy Tarreau7838a792019-08-12 18:42:03 +02001682 out:
1683 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001684 return ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001685}
1686
Willy Tarreau01b44822018-10-03 14:26:37 +02001687/* Try to send a connection preface, then upon success try to send our
1688 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1689 * missing data. It may return an error in h2c.
1690 */
1691static int h2c_bck_send_preface(struct h2c *h2c)
1692{
1693 struct buffer *res;
Willy Tarreau7838a792019-08-12 18:42:03 +02001694 int ret = 0;
1695
1696 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02001697
Willy Tarreaubcc45952019-05-26 10:05:50 +02001698 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001699 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001700 if (!h2_get_buf(h2c, res)) {
Willy Tarreau01b44822018-10-03 14:26:37 +02001701 h2c->flags |= H2_CF_MUX_MALLOC;
1702 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001703 goto out;
Willy Tarreau01b44822018-10-03 14:26:37 +02001704 }
1705
1706 if (!b_data(res)) {
1707 /* preface not yet sent */
Willy Tarreau9c218e72019-05-26 10:08:28 +02001708 ret = b_istput(res, ist(H2_CONN_PREFACE));
1709 if (unlikely(ret <= 0)) {
1710 if (!ret) {
1711 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1712 goto retry;
1713 h2c->flags |= H2_CF_MUX_MFULL;
1714 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001715 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001716 }
1717 else {
1718 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001719 ret = 0;
1720 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001721 }
1722 }
Willy Tarreau01b44822018-10-03 14:26:37 +02001723 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001724 ret = h2c_send_settings(h2c);
1725 out:
1726 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
1727 return ret;
Willy Tarreau01b44822018-10-03 14:26:37 +02001728}
1729
Willy Tarreau081d4722017-05-16 21:51:05 +02001730/* try to send a GOAWAY frame on the connection to report an error or a graceful
1731 * shutdown, with h2c->errcode as the error code. Returns > 0 on success or zero
1732 * if nothing was done. It uses h2c->last_sid as the advertised ID, or copies it
1733 * from h2c->max_id if it's not set yet (<0). In case of lack of room to write
1734 * the message, it subscribes the requester (either <h2s> or <h2c>) to future
1735 * notifications. It sets H2_CF_GOAWAY_SENT on success, and H2_CF_GOAWAY_FAILED
1736 * on unrecoverable failure. It will not attempt to send one again in this last
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001737 * case, nor will it send one if settings were not sent (e.g. still waiting for
1738 * a preface) so that it is safe to use h2c_error() to report such errors.
Willy Tarreau081d4722017-05-16 21:51:05 +02001739 */
1740static int h2c_send_goaway_error(struct h2c *h2c, struct h2s *h2s)
1741{
1742 struct buffer *res;
1743 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02001744 int ret = 0;
Willy Tarreau081d4722017-05-16 21:51:05 +02001745
Willy Tarreau7838a792019-08-12 18:42:03 +02001746 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
1747
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001748 if ((h2c->flags & H2_CF_GOAWAY_FAILED) || h2c->st0 < H2_CS_SETTINGS1) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001749 ret = 1; // claim that it worked
1750 goto out;
1751 }
Willy Tarreau081d4722017-05-16 21:51:05 +02001752
Willy Tarreau9c218e72019-05-26 10:08:28 +02001753 /* len: 8, type: 7, flags: none, sid: 0 */
1754 memcpy(str, "\x00\x00\x08\x07\x00\x00\x00\x00\x00", 9);
1755
1756 if (h2c->last_sid < 0)
1757 h2c->last_sid = h2c->max_id;
1758
1759 write_n32(str + 9, h2c->last_sid);
1760 write_n32(str + 13, h2c->errcode);
1761
Willy Tarreaubcc45952019-05-26 10:05:50 +02001762 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001763 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001764 if (!h2_get_buf(h2c, res)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02001765 h2c->flags |= H2_CF_MUX_MALLOC;
1766 if (h2s)
1767 h2s->flags |= H2_SF_BLK_MROOM;
1768 else
1769 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001770 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001771 }
1772
Willy Tarreauea1b06d2018-07-12 09:02:47 +02001773 ret = b_istput(res, ist2(str, 17));
Willy Tarreau081d4722017-05-16 21:51:05 +02001774 if (unlikely(ret <= 0)) {
1775 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001776 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1777 goto retry;
Willy Tarreau081d4722017-05-16 21:51:05 +02001778 h2c->flags |= H2_CF_MUX_MFULL;
1779 if (h2s)
1780 h2s->flags |= H2_SF_BLK_MROOM;
1781 else
1782 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001783 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001784 }
1785 else {
1786 /* we cannot report this error using GOAWAY, so we mark
1787 * it and claim a success.
1788 */
1789 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
1790 h2c->flags |= H2_CF_GOAWAY_FAILED;
Willy Tarreau7838a792019-08-12 18:42:03 +02001791 ret = 1;
1792 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001793 }
1794 }
1795 h2c->flags |= H2_CF_GOAWAY_SENT;
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001796
1797 /* some codes are not for real errors, just attempts to close cleanly */
1798 switch (h2c->errcode) {
1799 case H2_ERR_NO_ERROR:
1800 case H2_ERR_ENHANCE_YOUR_CALM:
1801 case H2_ERR_REFUSED_STREAM:
1802 case H2_ERR_CANCEL:
1803 break;
1804 default:
Willy Tarreau4781b152021-04-06 13:53:36 +02001805 HA_ATOMIC_INC(&h2c->px_counters->goaway_resp);
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001806 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001807 out:
1808 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
Willy Tarreau081d4722017-05-16 21:51:05 +02001809 return ret;
1810}
1811
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001812/* Try to send an RST_STREAM frame on the connection for the indicated stream
1813 * during mux operations. This stream must be valid and cannot be closed
1814 * already. h2s->id will be used for the stream ID and h2s->errcode will be
1815 * used for the error code. h2s->st will be update to H2_SS_CLOSED if it was
1816 * not yet.
1817 *
1818 * Returns > 0 on success or zero if nothing was done. In case of lack of room
1819 * to write the message, it subscribes the stream to future notifications.
1820 */
1821static int h2s_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
1822{
1823 struct buffer *res;
1824 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02001825 int ret = 0;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001826
Willy Tarreau7838a792019-08-12 18:42:03 +02001827 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
1828
1829 if (!h2s || h2s->st == H2_SS_CLOSED) {
1830 ret = 1;
1831 goto out;
1832 }
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001833
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001834 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
1835 * RST_STREAM in response to a RST_STREAM frame.
1836 */
Willy Tarreau231f6162019-08-06 10:01:40 +02001837 if (h2c->dsi == h2s->id && h2c->dft == H2_FT_RST_STREAM) {
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001838 ret = 1;
1839 goto ignore;
1840 }
1841
Willy Tarreau9c218e72019-05-26 10:08:28 +02001842 /* len: 4, type: 3, flags: none */
1843 memcpy(str, "\x00\x00\x04\x03\x00", 5);
1844 write_n32(str + 5, h2s->id);
1845 write_n32(str + 9, h2s->errcode);
1846
Willy Tarreaubcc45952019-05-26 10:05:50 +02001847 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001848 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001849 if (!h2_get_buf(h2c, res)) {
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001850 h2c->flags |= H2_CF_MUX_MALLOC;
1851 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001852 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001853 }
1854
Willy Tarreauea1b06d2018-07-12 09:02:47 +02001855 ret = b_istput(res, ist2(str, 13));
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001856 if (unlikely(ret <= 0)) {
1857 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001858 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1859 goto retry;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001860 h2c->flags |= H2_CF_MUX_MFULL;
1861 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001862 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001863 }
1864 else {
1865 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001866 ret = 0;
1867 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001868 }
1869 }
1870
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001871 ignore:
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001872 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01001873 h2s_close(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02001874 out:
1875 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001876 return ret;
1877}
1878
1879/* Try to send an RST_STREAM frame on the connection for the stream being
1880 * demuxed using h2c->dsi for the stream ID. It will use h2s->errcode as the
Willy Tarreaue6888ff2018-12-23 18:26:26 +01001881 * error code, even if the stream is one of the dummy ones, and will update
1882 * h2s->st to H2_SS_CLOSED if it was not yet.
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001883 *
1884 * Returns > 0 on success or zero if nothing was done. In case of lack of room
1885 * to write the message, it blocks the demuxer and subscribes it to future
Joseph Herlantd77575d2018-11-25 10:54:45 -08001886 * notifications. It's worth mentioning that an RST may even be sent for a
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001887 * closed stream.
Willy Tarreau27a84c92017-10-17 08:10:17 +02001888 */
1889static int h2c_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
1890{
1891 struct buffer *res;
1892 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02001893 int ret = 0;
1894
1895 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02001896
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001897 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
1898 * RST_STREAM in response to a RST_STREAM frame.
1899 */
1900 if (h2c->dft == H2_FT_RST_STREAM) {
1901 ret = 1;
1902 goto ignore;
1903 }
1904
Willy Tarreau9c218e72019-05-26 10:08:28 +02001905 /* len: 4, type: 3, flags: none */
1906 memcpy(str, "\x00\x00\x04\x03\x00", 5);
1907
1908 write_n32(str + 5, h2c->dsi);
1909 write_n32(str + 9, h2s->errcode);
1910
Willy Tarreaubcc45952019-05-26 10:05:50 +02001911 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001912 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001913 if (!h2_get_buf(h2c, res)) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02001914 h2c->flags |= H2_CF_MUX_MALLOC;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001915 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001916 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02001917 }
1918
Willy Tarreauea1b06d2018-07-12 09:02:47 +02001919 ret = b_istput(res, ist2(str, 13));
Willy Tarreau27a84c92017-10-17 08:10:17 +02001920 if (unlikely(ret <= 0)) {
1921 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001922 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1923 goto retry;
Willy Tarreau27a84c92017-10-17 08:10:17 +02001924 h2c->flags |= H2_CF_MUX_MFULL;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001925 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001926 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02001927 }
1928 else {
1929 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001930 ret = 0;
1931 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02001932 }
1933 }
1934
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001935 ignore:
Willy Tarreauab0e1da2018-10-05 10:16:37 +02001936 if (h2s->id) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02001937 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01001938 h2s_close(h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001939 }
1940
Willy Tarreau7838a792019-08-12 18:42:03 +02001941 out:
Willy Tarreau4781b152021-04-06 13:53:36 +02001942 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_resp);
Willy Tarreau7838a792019-08-12 18:42:03 +02001943 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02001944 return ret;
1945}
1946
Willy Tarreauc7576ea2017-10-29 22:00:09 +01001947/* try to send an empty DATA frame with the ES flag set to notify about the
1948 * end of stream and match a shutdown(write). If an ES was already sent as
1949 * indicated by HLOC/ERROR/RESET/CLOSED states, nothing is done. Returns > 0
1950 * on success or zero if nothing was done. In case of lack of room to write the
1951 * message, it subscribes the requesting stream to future notifications.
1952 */
1953static int h2_send_empty_data_es(struct h2s *h2s)
1954{
1955 struct h2c *h2c = h2s->h2c;
1956 struct buffer *res;
1957 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02001958 int ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01001959
Willy Tarreau7838a792019-08-12 18:42:03 +02001960 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
1961
1962 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_ERROR || h2s->st == H2_SS_CLOSED) {
1963 ret = 1;
1964 goto out;
1965 }
Willy Tarreauc7576ea2017-10-29 22:00:09 +01001966
Willy Tarreau9c218e72019-05-26 10:08:28 +02001967 /* len: 0x000000, type: 0(DATA), flags: ES=1 */
1968 memcpy(str, "\x00\x00\x00\x00\x01", 5);
1969 write_n32(str + 5, h2s->id);
1970
Willy Tarreaubcc45952019-05-26 10:05:50 +02001971 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001972 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001973 if (!h2_get_buf(h2c, res)) {
Willy Tarreauc7576ea2017-10-29 22:00:09 +01001974 h2c->flags |= H2_CF_MUX_MALLOC;
1975 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001976 goto out;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01001977 }
1978
Willy Tarreauea1b06d2018-07-12 09:02:47 +02001979 ret = b_istput(res, ist2(str, 9));
Willy Tarreau6d8b6822017-11-07 14:39:09 +01001980 if (likely(ret > 0)) {
1981 h2s->flags |= H2_SF_ES_SENT;
1982 }
1983 else if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001984 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1985 goto retry;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01001986 h2c->flags |= H2_CF_MUX_MFULL;
1987 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01001988 }
1989 else {
1990 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001991 ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01001992 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001993 out:
1994 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01001995 return ret;
1996}
1997
Willy Tarreau4596fe22022-05-17 19:07:51 +02001998/* wake a specific stream and assign its stream connector some SE_FL_* flags
1999 * among SE_FL_ERR_PENDING and SE_FL_ERROR if needed. The stream's state
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002000 * is automatically updated accordingly. If the stream is orphaned, it is
2001 * destroyed.
Christopher Fauletf02ca002019-03-07 16:21:34 +01002002 */
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002003static void h2s_wake_one_stream(struct h2s *h2s)
Christopher Fauletf02ca002019-03-07 16:21:34 +01002004{
Willy Tarreau7838a792019-08-12 18:42:03 +02002005 struct h2c *h2c = h2s->h2c;
2006
2007 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn, h2s);
2008
Willy Tarreau7be4ee02022-05-18 07:31:41 +02002009 if (!h2s_sc(h2s)) {
Christopher Fauletf02ca002019-03-07 16:21:34 +01002010 /* this stream was already orphaned */
2011 h2s_destroy(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002012 TRACE_DEVEL("leaving with no h2s", H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002013 return;
2014 }
2015
Christopher Fauletaade4ed2020-10-08 15:38:41 +02002016 if (h2c_read0_pending(h2s->h2c)) {
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002017 if (h2s->st == H2_SS_OPEN)
2018 h2s->st = H2_SS_HREM;
2019 else if (h2s->st == H2_SS_HLOC)
2020 h2s_close(h2s);
2021 }
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002022
Christopher Fauletff7925d2022-10-11 19:12:40 +02002023 if (h2s->h2c->st0 >= H2_CS_ERROR || (h2s->h2c->flags & (H2_CF_ERR_PENDING|H2_CF_ERROR)) ||
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002024 (h2s->h2c->last_sid > 0 && (!h2s->id || h2s->id > h2s->h2c->last_sid))) {
Christopher Fauletff7925d2022-10-11 19:12:40 +02002025 se_fl_set_error(h2s->sd);
Willy Tarreau23482912019-05-07 15:23:14 +02002026
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002027 if (h2s->st < H2_SS_ERROR)
2028 h2s->st = H2_SS_ERROR;
2029 }
Christopher Fauletf02ca002019-03-07 16:21:34 +01002030
2031 h2s_alert(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002032 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002033}
2034
2035/* wake the streams attached to the connection, whose id is greater than <last>
2036 * or unassigned.
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002037 */
Willy Tarreau23482912019-05-07 15:23:14 +02002038static void h2_wake_some_streams(struct h2c *h2c, int last)
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002039{
2040 struct eb32_node *node;
2041 struct h2s *h2s;
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002042
Willy Tarreau7838a792019-08-12 18:42:03 +02002043 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn);
2044
Christopher Fauletf02ca002019-03-07 16:21:34 +01002045 /* Wake all streams with ID > last */
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002046 node = eb32_lookup_ge(&h2c->streams_by_id, last + 1);
2047 while (node) {
2048 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002049 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002050 h2s_wake_one_stream(h2s);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002051 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01002052
Christopher Fauletf02ca002019-03-07 16:21:34 +01002053 /* Wake all streams with unassigned ID (ID == 0) */
2054 node = eb32_lookup(&h2c->streams_by_id, 0);
2055 while (node) {
2056 h2s = container_of(node, struct h2s, by_id);
2057 if (h2s->id > 0)
2058 break;
2059 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002060 h2s_wake_one_stream(h2s);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002061 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002062
2063 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002064}
2065
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002066/* Wake up all blocked streams whose window size has become positive after the
2067 * mux's initial window was adjusted. This should be done after having processed
2068 * SETTINGS frames which have updated the mux's initial window size.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002069 */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002070static void h2c_unblock_sfctl(struct h2c *h2c)
Willy Tarreau3421aba2017-07-27 15:41:03 +02002071{
2072 struct h2s *h2s;
2073 struct eb32_node *node;
2074
Willy Tarreau7838a792019-08-12 18:42:03 +02002075 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
2076
Willy Tarreau3421aba2017-07-27 15:41:03 +02002077 node = eb32_first(&h2c->streams_by_id);
2078 while (node) {
2079 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002080 if (h2s->flags & H2_SF_BLK_SFCTL && h2s_mws(h2s) > 0) {
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002081 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002082 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002083 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2084 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002085 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002086 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002087 node = eb32_next(node);
2088 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002089
2090 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002091}
2092
2093/* processes a SETTINGS frame whose payload is <payload> for <plen> bytes, and
2094 * ACKs it if needed. Returns > 0 on success or zero on missing data. It may
Willy Tarreaub860c732019-01-30 15:39:55 +01002095 * return an error in h2c. The caller must have already verified frame length
2096 * and stream ID validity. Described in RFC7540#6.5.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002097 */
2098static int h2c_handle_settings(struct h2c *h2c)
2099{
2100 unsigned int offset;
2101 int error;
2102
Willy Tarreau7838a792019-08-12 18:42:03 +02002103 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
2104
Willy Tarreau3421aba2017-07-27 15:41:03 +02002105 if (h2c->dff & H2_F_SETTINGS_ACK) {
2106 if (h2c->dfl) {
2107 error = H2_ERR_FRAME_SIZE_ERROR;
2108 goto fail;
2109 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002110 goto done;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002111 }
2112
Willy Tarreau3421aba2017-07-27 15:41:03 +02002113 /* process full frame only */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002114 if (b_data(&h2c->dbuf) < h2c->dfl) {
2115 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002116 goto out0;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002117 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002118
2119 /* parse the frame */
2120 for (offset = 0; offset < h2c->dfl; offset += 6) {
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002121 uint16_t type = h2_get_n16(&h2c->dbuf, offset);
2122 int32_t arg = h2_get_n32(&h2c->dbuf, offset + 2);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002123
2124 switch (type) {
2125 case H2_SETTINGS_INITIAL_WINDOW_SIZE:
2126 /* we need to update all existing streams with the
2127 * difference from the previous iws.
2128 */
2129 if (arg < 0) { // RFC7540#6.5.2
2130 error = H2_ERR_FLOW_CONTROL_ERROR;
2131 goto fail;
2132 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002133 h2c->miw = arg;
2134 break;
2135 case H2_SETTINGS_MAX_FRAME_SIZE:
2136 if (arg < 16384 || arg > 16777215) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002137 TRACE_ERROR("MAX_FRAME_SIZE out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002138 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002139 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002140 goto fail;
2141 }
2142 h2c->mfs = arg;
2143 break;
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01002144 case H2_SETTINGS_HEADER_TABLE_SIZE:
2145 h2c->flags |= H2_CF_SHTS_UPDATED;
2146 break;
Willy Tarreau1b38b462017-12-03 19:02:28 +01002147 case H2_SETTINGS_ENABLE_PUSH:
2148 if (arg < 0 || arg > 1) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002149 TRACE_ERROR("ENABLE_PUSH out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002150 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002151 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002152 goto fail;
2153 }
2154 break;
Willy Tarreau2e2083a2019-01-31 10:34:07 +01002155 case H2_SETTINGS_MAX_CONCURRENT_STREAMS:
2156 if (h2c->flags & H2_CF_IS_BACK) {
2157 /* the limit is only for the backend; for the frontend it is our limit */
2158 if ((unsigned int)arg > h2_settings_max_concurrent_streams)
2159 arg = h2_settings_max_concurrent_streams;
2160 h2c->streams_limit = arg;
2161 }
2162 break;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002163 case H2_SETTINGS_ENABLE_CONNECT_PROTOCOL:
Amaury Denoyelle0df04362021-10-18 09:43:29 +02002164 if (arg == 1)
2165 h2c->flags |= H2_CF_RCVD_RFC8441;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002166 break;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002167 }
2168 }
2169
2170 /* need to ACK this frame now */
2171 h2c->st0 = H2_CS_FRAME_A;
Willy Tarreau7838a792019-08-12 18:42:03 +02002172 done:
2173 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002174 return 1;
2175 fail:
Willy Tarreau9364a5f2019-10-23 11:06:35 +02002176 if (!(h2c->flags & H2_CF_IS_BACK))
2177 sess_log(h2c->conn->owner);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002178 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002179 out0:
2180 TRACE_DEVEL("leaving with missing data or error", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002181 return 0;
2182}
2183
2184/* try to send an ACK for a settings frame on the connection. Returns > 0 on
2185 * success or one of the h2_status values.
2186 */
2187static int h2c_ack_settings(struct h2c *h2c)
2188{
2189 struct buffer *res;
2190 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02002191 int ret = 0;
2192
2193 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002194
Willy Tarreau9c218e72019-05-26 10:08:28 +02002195 memcpy(str,
2196 "\x00\x00\x00" /* length : 0 (no data) */
2197 "\x04" "\x01" /* type : 4, flags : ACK */
2198 "\x00\x00\x00\x00" /* stream ID */, 9);
2199
Willy Tarreaubcc45952019-05-26 10:05:50 +02002200 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002201 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002202 if (!h2_get_buf(h2c, res)) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02002203 h2c->flags |= H2_CF_MUX_MALLOC;
2204 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002205 goto out;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002206 }
2207
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002208 ret = b_istput(res, ist2(str, 9));
Willy Tarreau3421aba2017-07-27 15:41:03 +02002209 if (unlikely(ret <= 0)) {
2210 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002211 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2212 goto retry;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002213 h2c->flags |= H2_CF_MUX_MFULL;
2214 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002215 }
2216 else {
2217 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002218 ret = 0;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002219 }
2220 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002221 out:
2222 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002223 return ret;
2224}
2225
Willy Tarreaucf68c782017-10-10 17:11:41 +02002226/* processes a PING frame and schedules an ACK if needed. The caller must pass
2227 * the pointer to the payload in <payload>. Returns > 0 on success or zero on
Willy Tarreaub860c732019-01-30 15:39:55 +01002228 * missing data. The caller must have already verified frame length
2229 * and stream ID validity.
Willy Tarreaucf68c782017-10-10 17:11:41 +02002230 */
2231static int h2c_handle_ping(struct h2c *h2c)
2232{
Willy Tarreaucf68c782017-10-10 17:11:41 +02002233 /* schedule a response */
Willy Tarreau68ed6412017-12-03 18:15:56 +01002234 if (!(h2c->dff & H2_F_PING_ACK))
Willy Tarreaucf68c782017-10-10 17:11:41 +02002235 h2c->st0 = H2_CS_FRAME_A;
2236 return 1;
2237}
2238
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002239/* Try to send a window update for stream id <sid> and value <increment>.
2240 * Returns > 0 on success or zero on missing room or failure. It may return an
2241 * error in h2c.
2242 */
2243static int h2c_send_window_update(struct h2c *h2c, int sid, uint32_t increment)
2244{
2245 struct buffer *res;
2246 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02002247 int ret = 0;
2248
2249 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002250
Willy Tarreau9c218e72019-05-26 10:08:28 +02002251 /* length: 4, type: 8, flags: none */
2252 memcpy(str, "\x00\x00\x04\x08\x00", 5);
2253 write_n32(str + 5, sid);
2254 write_n32(str + 9, increment);
2255
Willy Tarreaubcc45952019-05-26 10:05:50 +02002256 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002257 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002258 if (!h2_get_buf(h2c, res)) {
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002259 h2c->flags |= H2_CF_MUX_MALLOC;
2260 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002261 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002262 }
2263
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002264 ret = b_istput(res, ist2(str, 13));
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002265 if (unlikely(ret <= 0)) {
2266 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002267 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2268 goto retry;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002269 h2c->flags |= H2_CF_MUX_MFULL;
2270 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002271 }
2272 else {
2273 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002274 ret = 0;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002275 }
2276 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002277 out:
2278 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002279 return ret;
2280}
2281
2282/* try to send pending window update for the connection. It's safe to call it
2283 * with no pending updates. Returns > 0 on success or zero on missing room or
2284 * failure. It may return an error in h2c.
2285 */
2286static int h2c_send_conn_wu(struct h2c *h2c)
2287{
2288 int ret = 1;
2289
Willy Tarreau7838a792019-08-12 18:42:03 +02002290 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2291
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002292 if (h2c->rcvd_c <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002293 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002294
Willy Tarreau97aaa672018-12-23 09:49:04 +01002295 if (!(h2c->flags & H2_CF_WINDOW_OPENED)) {
2296 /* increase the advertised connection window to 2G on
2297 * first update.
2298 */
2299 h2c->flags |= H2_CF_WINDOW_OPENED;
2300 h2c->rcvd_c += H2_INITIAL_WINDOW_INCREMENT;
2301 }
2302
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002303 /* send WU for the connection */
2304 ret = h2c_send_window_update(h2c, 0, h2c->rcvd_c);
2305 if (ret > 0)
2306 h2c->rcvd_c = 0;
2307
Willy Tarreau7838a792019-08-12 18:42:03 +02002308 out:
2309 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002310 return ret;
2311}
2312
2313/* try to send pending window update for the current dmux stream. It's safe to
2314 * call it with no pending updates. Returns > 0 on success or zero on missing
2315 * room or failure. It may return an error in h2c.
2316 */
2317static int h2c_send_strm_wu(struct h2c *h2c)
2318{
2319 int ret = 1;
2320
Willy Tarreau7838a792019-08-12 18:42:03 +02002321 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2322
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002323 if (h2c->rcvd_s <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002324 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002325
2326 /* send WU for the stream */
2327 ret = h2c_send_window_update(h2c, h2c->dsi, h2c->rcvd_s);
2328 if (ret > 0)
2329 h2c->rcvd_s = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002330 out:
2331 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002332 return ret;
2333}
2334
Willy Tarreaucf68c782017-10-10 17:11:41 +02002335/* try to send an ACK for a ping frame on the connection. Returns > 0 on
2336 * success, 0 on missing data or one of the h2_status values.
2337 */
2338static int h2c_ack_ping(struct h2c *h2c)
2339{
2340 struct buffer *res;
2341 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02002342 int ret = 0;
2343
2344 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002345
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002346 if (b_data(&h2c->dbuf) < 8)
Willy Tarreau7838a792019-08-12 18:42:03 +02002347 goto out;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002348
Willy Tarreaucf68c782017-10-10 17:11:41 +02002349 memcpy(str,
2350 "\x00\x00\x08" /* length : 8 (same payload) */
2351 "\x06" "\x01" /* type : 6, flags : ACK */
2352 "\x00\x00\x00\x00" /* stream ID */, 9);
2353
2354 /* copy the original payload */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002355 h2_get_buf_bytes(str + 9, 8, &h2c->dbuf, 0);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002356
Willy Tarreau9c218e72019-05-26 10:08:28 +02002357 res = br_tail(h2c->mbuf);
2358 retry:
2359 if (!h2_get_buf(h2c, res)) {
2360 h2c->flags |= H2_CF_MUX_MALLOC;
2361 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002362 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02002363 }
2364
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002365 ret = b_istput(res, ist2(str, 17));
Willy Tarreaucf68c782017-10-10 17:11:41 +02002366 if (unlikely(ret <= 0)) {
2367 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002368 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2369 goto retry;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002370 h2c->flags |= H2_CF_MUX_MFULL;
2371 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002372 }
2373 else {
2374 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002375 ret = 0;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002376 }
2377 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002378 out:
2379 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002380 return ret;
2381}
2382
Willy Tarreau26f95952017-07-27 17:18:30 +02002383/* processes a WINDOW_UPDATE frame whose payload is <payload> for <plen> bytes.
2384 * Returns > 0 on success or zero on missing data. It may return an error in
Willy Tarreaub860c732019-01-30 15:39:55 +01002385 * h2c or h2s. The caller must have already verified frame length and stream ID
2386 * validity. Described in RFC7540#6.9.
Willy Tarreau26f95952017-07-27 17:18:30 +02002387 */
2388static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s)
2389{
2390 int32_t inc;
2391 int error;
2392
Willy Tarreau7838a792019-08-12 18:42:03 +02002393 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
2394
Willy Tarreau26f95952017-07-27 17:18:30 +02002395 /* process full frame only */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002396 if (b_data(&h2c->dbuf) < h2c->dfl) {
2397 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002398 goto out0;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002399 }
Willy Tarreau26f95952017-07-27 17:18:30 +02002400
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002401 inc = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau26f95952017-07-27 17:18:30 +02002402
2403 if (h2c->dsi != 0) {
2404 /* stream window update */
Willy Tarreau26f95952017-07-27 17:18:30 +02002405
2406 /* it's not an error to receive WU on a closed stream */
2407 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau7838a792019-08-12 18:42:03 +02002408 goto done;
Willy Tarreau26f95952017-07-27 17:18:30 +02002409
2410 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002411 TRACE_ERROR("stream WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002412 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002413 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002414 goto strm_err;
2415 }
2416
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002417 if (h2s_mws(h2s) >= 0 && h2s_mws(h2s) + inc < 0) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002418 TRACE_ERROR("stream WINDOW_UPDATE inc<0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002419 error = H2_ERR_FLOW_CONTROL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002420 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002421 goto strm_err;
2422 }
2423
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002424 h2s->sws += inc;
2425 if (h2s_mws(h2s) > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
Willy Tarreau26f95952017-07-27 17:18:30 +02002426 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002427 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002428 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2429 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002430 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreau26f95952017-07-27 17:18:30 +02002431 }
2432 }
2433 else {
2434 /* connection window update */
2435 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002436 TRACE_ERROR("conn WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002437 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002438 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002439 goto conn_err;
2440 }
2441
2442 if (h2c->mws >= 0 && h2c->mws + inc < 0) {
2443 error = H2_ERR_FLOW_CONTROL_ERROR;
2444 goto conn_err;
2445 }
2446
2447 h2c->mws += inc;
2448 }
2449
Willy Tarreau7838a792019-08-12 18:42:03 +02002450 done:
2451 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002452 return 1;
2453
2454 conn_err:
2455 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002456 out0:
2457 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002458 return 0;
2459
2460 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01002461 h2s_error(h2s, error);
2462 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002463 TRACE_DEVEL("leaving on stream error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002464 return 0;
2465}
2466
Willy Tarreaue96b0922017-10-30 00:28:29 +01002467/* processes a GOAWAY frame, and signals all streams whose ID is greater than
Willy Tarreaub860c732019-01-30 15:39:55 +01002468 * the last ID. Returns > 0 on success or zero on missing data. The caller must
2469 * have already verified frame length and stream ID validity. Described in
2470 * RFC7540#6.8.
Willy Tarreaue96b0922017-10-30 00:28:29 +01002471 */
2472static int h2c_handle_goaway(struct h2c *h2c)
2473{
Willy Tarreaue96b0922017-10-30 00:28:29 +01002474 int last;
2475
Willy Tarreau7838a792019-08-12 18:42:03 +02002476 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002477 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002478 if (b_data(&h2c->dbuf) < h2c->dfl) {
2479 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002480 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002481 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002482 }
Willy Tarreaue96b0922017-10-30 00:28:29 +01002483
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002484 last = h2_get_n32(&h2c->dbuf, 0);
2485 h2c->errcode = h2_get_n32(&h2c->dbuf, 4);
Willy Tarreau11cc2d62017-12-03 10:27:47 +01002486 if (h2c->last_sid < 0)
2487 h2c->last_sid = last;
Willy Tarreau23482912019-05-07 15:23:14 +02002488 h2_wake_some_streams(h2c, last);
Willy Tarreau7838a792019-08-12 18:42:03 +02002489 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002490 return 1;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002491}
2492
Willy Tarreau92153fc2017-12-03 19:46:19 +01002493/* processes a PRIORITY frame, and either skips it or rejects if it is
Willy Tarreaub860c732019-01-30 15:39:55 +01002494 * invalid. Returns > 0 on success or zero on missing data. It may return an
2495 * error in h2c. The caller must have already verified frame length and stream
2496 * ID validity. Described in RFC7540#6.3.
Willy Tarreau92153fc2017-12-03 19:46:19 +01002497 */
2498static int h2c_handle_priority(struct h2c *h2c)
2499{
Willy Tarreau7838a792019-08-12 18:42:03 +02002500 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
2501
Willy Tarreau92153fc2017-12-03 19:46:19 +01002502 /* process full frame only */
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002503 if (b_data(&h2c->dbuf) < h2c->dfl) {
Willy Tarreau7838a792019-08-12 18:42:03 +02002504 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002505 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002506 return 0;
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002507 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01002508
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002509 if (h2_get_n32(&h2c->dbuf, 0) == h2c->dsi) {
Willy Tarreau92153fc2017-12-03 19:46:19 +01002510 /* 7540#5.3 : can't depend on itself */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002511 TRACE_ERROR("PRIORITY depends on itself", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002512 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02002513 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002514 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002515 return 0;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002516 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002517 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreau92153fc2017-12-03 19:46:19 +01002518 return 1;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002519}
2520
Willy Tarreaucd234e92017-08-18 10:59:39 +02002521/* processes an RST_STREAM frame, and sets the 32-bit error code on the stream.
Willy Tarreaub860c732019-01-30 15:39:55 +01002522 * Returns > 0 on success or zero on missing data. The caller must have already
2523 * verified frame length and stream ID validity. Described in RFC7540#6.4.
Willy Tarreaucd234e92017-08-18 10:59:39 +02002524 */
2525static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s)
2526{
Willy Tarreau7838a792019-08-12 18:42:03 +02002527 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
2528
Willy Tarreaucd234e92017-08-18 10:59:39 +02002529 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002530 if (b_data(&h2c->dbuf) < h2c->dfl) {
2531 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002532 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002533 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002534 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002535
2536 /* late RST, already handled */
Willy Tarreau7838a792019-08-12 18:42:03 +02002537 if (h2s->st == H2_SS_CLOSED) {
2538 TRACE_DEVEL("leaving on stream closed", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002539 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02002540 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002541
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002542 h2s->errcode = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau00dd0782018-03-01 16:31:34 +01002543 h2s_close(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002544
Willy Tarreau7be4ee02022-05-18 07:31:41 +02002545 if (h2s_sc(h2s)) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02002546 se_fl_set_error(h2s->sd);
Willy Tarreauf830f012018-12-19 17:44:55 +01002547 h2s_alert(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002548 }
2549
2550 h2s->flags |= H2_SF_RST_RCVD;
Willy Tarreau7838a792019-08-12 18:42:03 +02002551 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002552 return 1;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002553}
2554
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002555/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2556 * It may return an error in h2c or h2s. The caller must consider that the
2557 * return value is the new h2s in case one was allocated (most common case).
2558 * Described in RFC7540#6.2. Most of the
Willy Tarreau13278b42017-10-13 19:23:14 +02002559 * errors here are reported as connection errors since it's impossible to
2560 * recover from such errors after the compression context has been altered.
2561 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002562static struct h2s *h2c_frt_handle_headers(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau13278b42017-10-13 19:23:14 +02002563{
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002564 struct buffer rxbuf = BUF_NULL;
Willy Tarreau4790f7c2019-01-24 11:33:02 +01002565 unsigned long long body_len = 0;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002566 uint32_t flags = 0;
Willy Tarreau13278b42017-10-13 19:23:14 +02002567 int error;
2568
Willy Tarreau7838a792019-08-12 18:42:03 +02002569 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2570
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002571 if (!b_size(&h2c->dbuf)) {
2572 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002573 goto out; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002574 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002575
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002576 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2577 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002578 goto out; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002579 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002580
2581 /* now either the frame is complete or the buffer is complete */
2582 if (h2s->st != H2_SS_IDLE) {
Willy Tarreau88d138e2019-01-02 19:38:14 +01002583 /* The stream exists/existed, this must be a trailers frame */
2584 if (h2s->st != H2_SS_CLOSED) {
Amaury Denoyelle74162742020-12-11 17:53:05 +01002585 error = h2c_decode_headers(h2c, &h2s->rxbuf, &h2s->flags, &body_len, NULL);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002586 /* unrecoverable error ? */
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002587 if (h2c->st0 >= H2_CS_ERROR) {
Willy Tarreau17c630b2023-01-19 23:58:11 +01002588 TRACE_USER("Unrecoverable error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002589 sess_log(h2c->conn->owner);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002590 goto out;
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002591 }
Willy Tarreauaab1a602019-05-06 11:12:18 +02002592
Christopher Faulet485da0b2021-10-08 08:56:00 +02002593 if (error == 0) {
2594 /* Demux not blocked because of the stream, it is an incomplete frame */
2595 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2596 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002597 goto out; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002598 }
Willy Tarreauaab1a602019-05-06 11:12:18 +02002599
2600 if (error < 0) {
2601 /* Failed to decode this frame (e.g. too large request)
2602 * but the HPACK decompressor is still synchronized.
2603 */
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002604 sess_log(h2c->conn->owner);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002605 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
Willy Tarreau17c630b2023-01-19 23:58:11 +01002606 TRACE_USER("Stream error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002607 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau88d138e2019-01-02 19:38:14 +01002608 goto out;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002609 }
Willy Tarreau88d138e2019-01-02 19:38:14 +01002610 goto done;
2611 }
Willy Tarreau1f035502019-01-30 11:44:07 +01002612 /* the connection was already killed by an RST, let's consume
2613 * the data and send another RST.
2614 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01002615 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002616 sess_log(h2c->conn->owner);
Willy Tarreau1f035502019-01-30 11:44:07 +01002617 h2s = (struct h2s*)h2_error_stream;
2618 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002619 }
2620 else if (h2c->dsi <= h2c->max_id || !(h2c->dsi & 1)) {
2621 /* RFC7540#5.1.1 stream id > prev ones, and must be odd here */
2622 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002623 TRACE_ERROR("HEADERS on invalid stream ID", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau4781b152021-04-06 13:53:36 +02002624 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau22de8d32018-09-05 19:55:58 +02002625 sess_log(h2c->conn->owner);
Willy Tarreau13278b42017-10-13 19:23:14 +02002626 goto conn_err;
2627 }
Willy Tarreau415b1ee2019-01-02 13:59:43 +01002628 else if (h2c->flags & H2_CF_DEM_TOOMANY)
Willy Tarreau36c22322022-05-27 10:41:24 +02002629 goto out; // IDLE but too many sc still present
Willy Tarreau13278b42017-10-13 19:23:14 +02002630
Amaury Denoyelle74162742020-12-11 17:53:05 +01002631 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002632
Willy Tarreau25919232019-01-03 14:48:18 +01002633 /* unrecoverable error ? */
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002634 if (h2c->st0 >= H2_CS_ERROR) {
Willy Tarreau17c630b2023-01-19 23:58:11 +01002635 TRACE_USER("Unrecoverable error decoding H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002636 sess_log(h2c->conn->owner);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002637 goto out;
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002638 }
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002639
Willy Tarreau25919232019-01-03 14:48:18 +01002640 if (error <= 0) {
Christopher Faulet485da0b2021-10-08 08:56:00 +02002641 if (error == 0) {
2642 /* Demux not blocked because of the stream, it is an incomplete frame */
2643 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2644 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau25919232019-01-03 14:48:18 +01002645 goto out; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002646 }
Willy Tarreau25919232019-01-03 14:48:18 +01002647
2648 /* Failed to decode this stream (e.g. too large request)
2649 * but the HPACK decompressor is still synchronized.
2650 */
Willy Tarreauf43f36d2023-01-19 23:22:03 +01002651 sess_log(h2c->conn->owner);
Willy Tarreau25919232019-01-03 14:48:18 +01002652 h2s = (struct h2s*)h2_error_stream;
2653 goto send_rst;
2654 }
2655
Willy Tarreau29268e92021-06-17 08:29:14 +02002656 TRACE_USER("rcvd H2 request ", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW, h2c->conn, 0, &rxbuf);
2657
Willy Tarreau198b5072022-05-12 09:08:51 +02002658 /* Now we cannot roll back and we won't come back here anymore for this
2659 * stream, this stream ID is open.
2660 */
2661 if (h2c->dsi > h2c->max_id)
2662 h2c->max_id = h2c->dsi;
2663
Willy Tarreau22de8d32018-09-05 19:55:58 +02002664 /* Note: we don't emit any other logs below because ff we return
Willy Tarreaua8e49542018-10-03 18:53:55 +02002665 * positively from h2c_frt_stream_new(), the stream will report the error,
2666 * and if we return in error, h2c_frt_stream_new() will emit the error.
Christopher Faulet7d013e72020-12-15 16:56:50 +01002667 *
2668 * Xfer the rxbuf to the stream. On success, the new stream owns the
2669 * rxbuf. On error, it is released here.
Willy Tarreau22de8d32018-09-05 19:55:58 +02002670 */
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02002671 h2s = h2c_frt_stream_new(h2c, h2c->dsi, &rxbuf, flags);
Willy Tarreau13278b42017-10-13 19:23:14 +02002672 if (!h2s) {
Willy Tarreau96a10c22018-12-23 18:30:44 +01002673 h2s = (struct h2s*)h2_refused_stream;
2674 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002675 }
2676
2677 h2s->st = H2_SS_OPEN;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002678 h2s->flags |= flags;
Willy Tarreau1915ca22019-01-24 11:49:37 +01002679 h2s->body_len = body_len;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002680
Willy Tarreau88d138e2019-01-02 19:38:14 +01002681 done:
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002682 if (h2c->dff & H2_F_HEADERS_END_STREAM)
Willy Tarreau13278b42017-10-13 19:23:14 +02002683 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002684
2685 if (h2s->flags & H2_SF_ES_RCVD) {
Willy Tarreaufc10f592019-01-30 19:28:32 +01002686 if (h2s->st == H2_SS_OPEN)
2687 h2s->st = H2_SS_HREM;
2688 else
2689 h2s_close(h2s);
Willy Tarreau13278b42017-10-13 19:23:14 +02002690 }
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002691 return h2s;
Willy Tarreau13278b42017-10-13 19:23:14 +02002692
2693 conn_err:
2694 h2c_error(h2c, error);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002695 goto out;
Willy Tarreau13278b42017-10-13 19:23:14 +02002696
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002697 out:
2698 h2_release_buf(h2c, &rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002699 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002700 return NULL;
Willy Tarreau96a10c22018-12-23 18:30:44 +01002701
2702 send_rst:
2703 /* make the demux send an RST for the current stream. We may only
2704 * do this if we're certain that the HEADERS frame was properly
2705 * decompressed so that the HPACK decoder is still kept up to date.
2706 */
2707 h2_release_buf(h2c, &rxbuf);
2708 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002709
Willy Tarreau022e5e52020-09-10 09:33:15 +02002710 TRACE_USER("rejected H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002711 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau96a10c22018-12-23 18:30:44 +01002712 return h2s;
Willy Tarreau13278b42017-10-13 19:23:14 +02002713}
2714
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002715/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2716 * It may return an error in h2c or h2s. Described in RFC7540#6.2. Most of the
2717 * errors here are reported as connection errors since it's impossible to
2718 * recover from such errors after the compression context has been altered.
2719 */
2720static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
2721{
Christopher Faulet6884aa32019-09-23 15:28:20 +02002722 struct buffer rxbuf = BUF_NULL;
2723 unsigned long long body_len = 0;
2724 uint32_t flags = 0;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002725 int error;
2726
Willy Tarreau7838a792019-08-12 18:42:03 +02002727 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2728
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002729 if (!b_size(&h2c->dbuf)) {
2730 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002731 goto fail; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002732 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002733
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002734 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2735 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002736 goto fail; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002737 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002738
Christopher Faulet6884aa32019-09-23 15:28:20 +02002739 if (h2s->st != H2_SS_CLOSED) {
Amaury Denoyelle74162742020-12-11 17:53:05 +01002740 error = h2c_decode_headers(h2c, &h2s->rxbuf, &h2s->flags, &h2s->body_len, h2s->upgrade_protocol);
Christopher Faulet6884aa32019-09-23 15:28:20 +02002741 }
2742 else {
2743 /* the connection was already killed by an RST, let's consume
2744 * the data and send another RST.
2745 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01002746 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Christopher Fauletea7a7782019-09-26 16:19:13 +02002747 h2s = (struct h2s*)h2_error_stream;
Christopher Faulet6884aa32019-09-23 15:28:20 +02002748 h2c->st0 = H2_CS_FRAME_E;
2749 goto send_rst;
2750 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002751
Willy Tarreau25919232019-01-03 14:48:18 +01002752 /* unrecoverable error ? */
Willy Tarreau17c630b2023-01-19 23:58:11 +01002753 if (h2c->st0 >= H2_CS_ERROR) {
2754 TRACE_USER("Unrecoverable error decoding H2 HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002755 goto fail;
Willy Tarreau17c630b2023-01-19 23:58:11 +01002756 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002757
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002758 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
2759 /* RFC7540#5.1 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002760 TRACE_ERROR("response HEADERS in invalid state", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002761 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
2762 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002763 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002764 goto fail;
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002765 }
2766
Willy Tarreau25919232019-01-03 14:48:18 +01002767 if (error <= 0) {
Christopher Faulet485da0b2021-10-08 08:56:00 +02002768 if (error == 0) {
2769 /* Demux not blocked because of the stream, it is an incomplete frame */
2770 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2771 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002772 goto fail; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002773 }
Willy Tarreau25919232019-01-03 14:48:18 +01002774
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002775 /* stream error : send RST_STREAM */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002776 TRACE_ERROR("couldn't decode response HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau25919232019-01-03 14:48:18 +01002777 h2s_error(h2s, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002778 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002779 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002780 goto fail;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002781 }
2782
Christopher Fauletfa922f02019-05-07 10:55:17 +02002783 if (h2c->dff & H2_F_HEADERS_END_STREAM)
Willy Tarreau45ffc0c2019-01-03 09:32:20 +01002784 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreau45ffc0c2019-01-03 09:32:20 +01002785
Willy Tarreau95acc8b2022-05-27 16:14:10 +02002786 if (se_fl_test(h2s->sd, SE_FL_ERROR) && h2s->st < H2_SS_ERROR)
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002787 h2s->st = H2_SS_ERROR;
Christopher Fauletfa922f02019-05-07 10:55:17 +02002788 else if (h2s->flags & H2_SF_ES_RCVD) {
2789 if (h2s->st == H2_SS_OPEN)
2790 h2s->st = H2_SS_HREM;
2791 else if (h2s->st == H2_SS_HLOC)
2792 h2s_close(h2s);
2793 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002794
Christopher Fauletf95f8762021-01-22 11:59:07 +01002795 /* Unblock busy server h2s waiting for the response headers to validate
2796 * the tunnel establishment or the end of the response of an oborted
2797 * tunnel
2798 */
2799 if ((h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY)) == (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY) ||
2800 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
2801 TRACE_STATE("Unblock h2s blocked on tunnel establishment/abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
2802 h2s->flags &= ~H2_SF_BLK_MBUSY;
2803 }
2804
Willy Tarreau9abb3172021-06-16 18:32:42 +02002805 TRACE_USER("rcvd H2 response ", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, 0, &h2s->rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002806 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002807 return h2s;
Willy Tarreau7838a792019-08-12 18:42:03 +02002808 fail:
2809 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2810 return NULL;
Christopher Faulet6884aa32019-09-23 15:28:20 +02002811
2812 send_rst:
2813 /* make the demux send an RST for the current stream. We may only
2814 * do this if we're certain that the HEADERS frame was properly
2815 * decompressed so that the HPACK decoder is still kept up to date.
2816 */
2817 h2_release_buf(h2c, &rxbuf);
2818 h2c->st0 = H2_CS_FRAME_E;
2819
Willy Tarreau022e5e52020-09-10 09:33:15 +02002820 TRACE_USER("rejected H2 response", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Christopher Faulet6884aa32019-09-23 15:28:20 +02002821 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2822 return h2s;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002823}
2824
Willy Tarreau454f9052017-10-26 19:40:35 +02002825/* processes a DATA frame. Returns > 0 on success or zero on missing data.
2826 * It may return an error in h2c or h2s. Described in RFC7540#6.1.
2827 */
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01002828static int h2c_handle_data(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02002829{
2830 int error;
2831
Willy Tarreau7838a792019-08-12 18:42:03 +02002832 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
2833
Willy Tarreau454f9052017-10-26 19:40:35 +02002834 /* note that empty DATA frames are perfectly valid and sometimes used
2835 * to signal an end of stream (with the ES flag).
2836 */
2837
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002838 if (!b_size(&h2c->dbuf) && h2c->dfl) {
2839 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002840 goto fail; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002841 }
Willy Tarreau454f9052017-10-26 19:40:35 +02002842
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002843 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2844 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002845 goto fail; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002846 }
Willy Tarreau454f9052017-10-26 19:40:35 +02002847
2848 /* now either the frame is complete or the buffer is complete */
2849
Willy Tarreau454f9052017-10-26 19:40:35 +02002850 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
2851 /* RFC7540#6.1 */
2852 error = H2_ERR_STREAM_CLOSED;
2853 goto strm_err;
2854 }
2855
Christopher Faulet4f09ec82019-06-19 09:25:58 +02002856 if ((h2s->flags & H2_SF_DATA_CLEN) && (h2c->dfl - h2c->dpl) > h2s->body_len) {
Willy Tarreau1915ca22019-01-24 11:49:37 +01002857 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002858 TRACE_ERROR("DATA frame larger than content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01002859 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002860 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01002861 goto strm_err;
2862 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01002863 if (!(h2c->flags & H2_CF_IS_BACK) &&
2864 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT) &&
2865 ((h2c->dfl - h2c->dpl) || !(h2c->dff & H2_F_DATA_END_STREAM))) {
2866 /* a tunnel attempt was aborted but the client still try to send some raw data.
2867 * Thus the stream is closed with the CANCEL error. Here we take care it is not
2868 * an empty DATA Frame with the ES flag. The error is only handled if ES was
2869 * already sent to the client because depending on the scheduling, these data may
Ilya Shipitsinacf84592021-02-06 22:29:08 +05002870 * have been sent before the server response but not handle here.
Christopher Faulet91b21dc2021-01-22 12:13:15 +01002871 */
2872 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
2873 error = H2_ERR_CANCEL;
2874 goto strm_err;
2875 }
Willy Tarreau1915ca22019-01-24 11:49:37 +01002876
Willy Tarreaua56a6de2018-02-26 15:59:07 +01002877 if (!h2_frt_transfer_data(h2s))
Willy Tarreau7838a792019-08-12 18:42:03 +02002878 goto fail;
Willy Tarreaua56a6de2018-02-26 15:59:07 +01002879
Willy Tarreau454f9052017-10-26 19:40:35 +02002880 /* call the upper layers to process the frame, then let the upper layer
2881 * notify the stream about any change.
2882 */
Willy Tarreau7be4ee02022-05-18 07:31:41 +02002883 if (!h2s_sc(h2s)) {
Willy Tarreau082c4572019-08-06 10:11:02 +02002884 /* The upper layer has already closed, this may happen on
2885 * 4xx/redirects during POST, or when receiving a response
2886 * from an H2 server after the client has aborted.
2887 */
2888 error = H2_ERR_CANCEL;
Willy Tarreau454f9052017-10-26 19:40:35 +02002889 goto strm_err;
2890 }
2891
Willy Tarreau8f650c32017-11-21 19:36:21 +01002892 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02002893 goto fail;
Willy Tarreau8f650c32017-11-21 19:36:21 +01002894
Willy Tarreau721c9742017-11-07 11:05:42 +01002895 if (h2s->st >= H2_SS_ERROR) {
Willy Tarreau454f9052017-10-26 19:40:35 +02002896 /* stream error : send RST_STREAM */
Willy Tarreaua20a5192017-12-27 11:02:06 +01002897 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau454f9052017-10-26 19:40:35 +02002898 }
2899
2900 /* check for completion : the callee will change this to FRAME_A or
2901 * FRAME_H once done.
2902 */
2903 if (h2c->st0 == H2_CS_FRAME_P)
Willy Tarreau7838a792019-08-12 18:42:03 +02002904 goto fail;
Willy Tarreau454f9052017-10-26 19:40:35 +02002905
Willy Tarreauc4134ba2017-12-11 18:45:08 +01002906 /* last frame */
2907 if (h2c->dff & H2_F_DATA_END_STREAM) {
Christopher Fauletfa922f02019-05-07 10:55:17 +02002908 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreaufc10f592019-01-30 19:28:32 +01002909 if (h2s->st == H2_SS_OPEN)
2910 h2s->st = H2_SS_HREM;
2911 else
2912 h2s_close(h2s);
2913
Willy Tarreau1915ca22019-01-24 11:49:37 +01002914 if (h2s->flags & H2_SF_DATA_CLEN && h2s->body_len) {
2915 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002916 TRACE_ERROR("ES on DATA frame before content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01002917 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002918 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01002919 goto strm_err;
2920 }
Willy Tarreauc4134ba2017-12-11 18:45:08 +01002921 }
2922
Christopher Fauletf95f8762021-01-22 11:59:07 +01002923 /* Unblock busy server h2s waiting for the end of the response for an
2924 * aborted tunnel
2925 */
2926 if ((h2c->flags & H2_CF_IS_BACK) &&
2927 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
2928 TRACE_STATE("Unblock h2s blocked on tunnel abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
2929 h2s->flags &= ~H2_SF_BLK_MBUSY;
2930 }
2931
Willy Tarreau7838a792019-08-12 18:42:03 +02002932 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02002933 return 1;
2934
Willy Tarreau454f9052017-10-26 19:40:35 +02002935 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01002936 h2s_error(h2s, error);
2937 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002938 fail:
2939 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02002940 return 0;
2941}
2942
Willy Tarreau63864812019-08-07 14:25:20 +02002943/* check that the current frame described in h2c->{dsi,dft,dfl,dff,...} is
2944 * valid for the current stream state. This is needed only after parsing the
2945 * frame header but in practice it can be performed at any time during
2946 * H2_CS_FRAME_P since no state transition happens there. Returns >0 on success
2947 * or 0 in case of error, in which case either h2s or h2c will carry an error.
2948 */
2949static int h2_frame_check_vs_state(struct h2c *h2c, struct h2s *h2s)
2950{
Willy Tarreau7838a792019-08-12 18:42:03 +02002951 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
2952
Willy Tarreau63864812019-08-07 14:25:20 +02002953 if (h2s->st == H2_SS_IDLE &&
2954 h2c->dft != H2_FT_HEADERS && h2c->dft != H2_FT_PRIORITY) {
2955 /* RFC7540#5.1: any frame other than HEADERS or PRIORITY in
2956 * this state MUST be treated as a connection error
2957 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002958 TRACE_ERROR("invalid frame type for IDLE state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02002959 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02002960 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau63864812019-08-07 14:25:20 +02002961 /* only log if no other stream can report the error */
2962 sess_log(h2c->conn->owner);
2963 }
Willy Tarreau4781b152021-04-06 13:53:36 +02002964 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002965 TRACE_DEVEL("leaving in error (idle&!hdrs&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02002966 return 0;
2967 }
2968
Willy Tarreau57a18162019-11-24 14:57:53 +01002969 if (h2s->st == H2_SS_IDLE && (h2c->flags & H2_CF_IS_BACK)) {
2970 /* only PUSH_PROMISE would be permitted here */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002971 TRACE_ERROR("invalid frame type for IDLE state (back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau57a18162019-11-24 14:57:53 +01002972 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02002973 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau57a18162019-11-24 14:57:53 +01002974 TRACE_DEVEL("leaving in error (idle&back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
2975 return 0;
2976 }
2977
Willy Tarreau63864812019-08-07 14:25:20 +02002978 if (h2s->st == H2_SS_HREM && h2c->dft != H2_FT_WINDOW_UPDATE &&
2979 h2c->dft != H2_FT_RST_STREAM && h2c->dft != H2_FT_PRIORITY) {
2980 /* RFC7540#5.1: any frame other than WU/PRIO/RST in
2981 * this state MUST be treated as a stream error.
2982 * 6.2, 6.6 and 6.10 further mandate that HEADERS/
2983 * PUSH_PROMISE/CONTINUATION cause connection errors.
2984 */
Amaury Denoyellea8879232020-10-27 17:16:03 +01002985 if (h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002986 TRACE_ERROR("invalid frame type for HREM state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02002987 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02002988 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01002989 }
2990 else {
Willy Tarreau63864812019-08-07 14:25:20 +02002991 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
Amaury Denoyellea8879232020-10-27 17:16:03 +01002992 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002993 TRACE_DEVEL("leaving in error (hrem&!wu&!rst&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02002994 return 0;
2995 }
2996
2997 /* Below the management of frames received in closed state is a
2998 * bit hackish because the spec makes strong differences between
2999 * streams closed by receiving RST, sending RST, and seeing ES
3000 * in both directions. In addition to this, the creation of a
3001 * new stream reusing the identifier of a closed one will be
3002 * detected here. Given that we cannot keep track of all closed
3003 * streams forever, we consider that unknown closed streams were
3004 * closed on RST received, which allows us to respond with an
3005 * RST without breaking the connection (eg: to abort a transfer).
3006 * Some frames have to be silently ignored as well.
3007 */
3008 if (h2s->st == H2_SS_CLOSED && h2c->dsi) {
3009 if (!(h2c->flags & H2_CF_IS_BACK) && h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
3010 /* #5.1.1: The identifier of a newly
3011 * established stream MUST be numerically
3012 * greater than all streams that the initiating
3013 * endpoint has opened or reserved. This
3014 * governs streams that are opened using a
3015 * HEADERS frame and streams that are reserved
3016 * using PUSH_PROMISE. An endpoint that
3017 * receives an unexpected stream identifier
3018 * MUST respond with a connection error.
3019 */
3020 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003021 TRACE_DEVEL("leaving in error (closed&hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003022 return 0;
3023 }
3024
Willy Tarreau4c08f122019-09-26 08:47:15 +02003025 if (h2s->flags & H2_SF_RST_RCVD &&
3026 !(h2_ft_bit(h2c->dft) & (H2_FT_HDR_MASK | H2_FT_RST_STREAM_BIT | H2_FT_PRIORITY_BIT | H2_FT_WINDOW_UPDATE_BIT))) {
Willy Tarreau63864812019-08-07 14:25:20 +02003027 /* RFC7540#5.1:closed: an endpoint that
3028 * receives any frame other than PRIORITY after
3029 * receiving a RST_STREAM MUST treat that as a
3030 * stream error of type STREAM_CLOSED.
3031 *
3032 * Note that old streams fall into this category
3033 * and will lead to an RST being sent.
3034 *
3035 * However, we cannot generalize this to all frame types. Those
3036 * carrying compression state must still be processed before
3037 * being dropped or we'll desynchronize the decoder. This can
3038 * happen with request trailers received after sending an
3039 * RST_STREAM, or with header/trailers responses received after
3040 * sending RST_STREAM (aborted stream).
Willy Tarreau4c08f122019-09-26 08:47:15 +02003041 *
3042 * In addition, since our CLOSED streams always carry the
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003043 * RST_RCVD bit, we don't want to accidentally catch valid
Willy Tarreau4c08f122019-09-26 08:47:15 +02003044 * frames for a closed stream, i.e. RST/PRIO/WU.
Willy Tarreau63864812019-08-07 14:25:20 +02003045 */
3046 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
3047 h2c->st0 = H2_CS_FRAME_E;
Christopher Faulet6884aa32019-09-23 15:28:20 +02003048 TRACE_DEVEL("leaving in error (rst_rcvd&!hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003049 return 0;
3050 }
3051
3052 /* RFC7540#5.1:closed: if this state is reached as a
3053 * result of sending a RST_STREAM frame, the peer that
3054 * receives the RST_STREAM might have already sent
3055 * frames on the stream that cannot be withdrawn. An
3056 * endpoint MUST ignore frames that it receives on
3057 * closed streams after it has sent a RST_STREAM
3058 * frame. An endpoint MAY choose to limit the period
3059 * over which it ignores frames and treat frames that
3060 * arrive after this time as being in error.
3061 */
3062 if (h2s->id && !(h2s->flags & H2_SF_RST_SENT)) {
3063 /* RFC7540#5.1:closed: any frame other than
3064 * PRIO/WU/RST in this state MUST be treated as
3065 * a connection error
3066 */
3067 if (h2c->dft != H2_FT_RST_STREAM &&
3068 h2c->dft != H2_FT_PRIORITY &&
3069 h2c->dft != H2_FT_WINDOW_UPDATE) {
3070 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003071 TRACE_DEVEL("leaving in error (rst_sent&!rst&!prio&!wu)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003072 return 0;
3073 }
3074 }
3075 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003076 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003077 return 1;
3078}
3079
Willy Tarreaubc933932017-10-09 16:21:43 +02003080/* process Rx frames to be demultiplexed */
3081static void h2_process_demux(struct h2c *h2c)
3082{
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003083 struct h2s *h2s = NULL, *tmp_h2s;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003084 struct h2_fh hdr;
3085 unsigned int padlen = 0;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003086 int32_t old_iw = h2c->miw;
Willy Tarreauf3ee0692017-10-17 08:18:25 +02003087
Willy Tarreau7838a792019-08-12 18:42:03 +02003088 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3089
Willy Tarreau081d4722017-05-16 21:51:05 +02003090 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02003091 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02003092
3093 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3094 if (h2c->st0 == H2_CS_PREFACE) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003095 TRACE_STATE("expecting preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02003096 if (h2c->flags & H2_CF_IS_BACK)
Willy Tarreau7838a792019-08-12 18:42:03 +02003097 goto out;
3098
Willy Tarreau52eed752017-09-22 15:05:09 +02003099 if (unlikely(h2c_frt_recv_preface(h2c) <= 0)) {
3100 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau22de8d32018-09-05 19:55:58 +02003101 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003102 TRACE_PROTO("failed to receive preface", H2_EV_RX_PREFACE|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003103 h2c->st0 = H2_CS_ERROR2;
Willy Tarreauee4684f2021-06-17 08:08:48 +02003104 if (b_data(&h2c->dbuf) ||
Christopher Faulet3f35da22021-07-26 10:18:35 +02003105 !(((const struct session *)h2c->conn->owner)->fe->options & (PR_O_NULLNOLOG|PR_O_IGNORE_PRB)))
Willy Tarreauee4684f2021-06-17 08:08:48 +02003106 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003107 }
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003108 goto done;
Willy Tarreau52eed752017-09-22 15:05:09 +02003109 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003110 TRACE_PROTO("received preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003111
3112 h2c->max_id = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02003113 TRACE_STATE("switching to SETTINGS1", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreaued2b9d92022-08-18 15:30:41 +02003114 h2c->st0 = H2_CS_SETTINGS1;
Willy Tarreau52eed752017-09-22 15:05:09 +02003115 }
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003116
3117 if (h2c->st0 == H2_CS_SETTINGS1) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003118 /* ensure that what is pending is a valid SETTINGS frame
3119 * without an ACK.
3120 */
Willy Tarreau7838a792019-08-12 18:42:03 +02003121 TRACE_STATE("expecting settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003122 if (!h2_get_frame_hdr(&h2c->dbuf, &hdr)) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003123 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003124 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau22de8d32018-09-05 19:55:58 +02003125 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003126 TRACE_ERROR("failed to receive settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003127 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003128 if (!(h2c->flags & H2_CF_IS_BACK))
3129 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003130 }
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003131 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003132 }
3133
3134 if (hdr.sid || hdr.ft != H2_FT_SETTINGS || hdr.ff & H2_F_SETTINGS_ACK) {
3135 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003136 TRACE_ERROR("unexpected frame type or flags", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003137 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
3138 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003139 if (!(h2c->flags & H2_CF_IS_BACK))
3140 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003141 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003142 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003143 }
3144
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003145 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003146 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003147 TRACE_ERROR("invalid settings frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003148 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
3149 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003150 if (!(h2c->flags & H2_CF_IS_BACK))
3151 sess_log(h2c->conn->owner);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003152 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003153 }
3154
Willy Tarreau3bf69182018-12-21 15:34:50 +01003155 /* that's OK, switch to FRAME_P to process it. This is
3156 * a SETTINGS frame whose header has already been
3157 * deleted above.
3158 */
Willy Tarreau54f46e52019-01-30 15:11:03 +01003159 padlen = 0;
Willy Tarreau4781b152021-04-06 13:53:36 +02003160 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003161 goto new_frame;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003162 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003163 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003164
3165 /* process as many incoming frames as possible below */
Willy Tarreau7838a792019-08-12 18:42:03 +02003166 while (1) {
Willy Tarreau7e98c052017-10-10 15:56:59 +02003167 int ret = 0;
3168
Willy Tarreau7838a792019-08-12 18:42:03 +02003169 if (!b_data(&h2c->dbuf)) {
3170 TRACE_DEVEL("no more Rx data", H2_EV_RX_FRAME, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003171 h2c->flags |= H2_CF_DEM_SHORT_READ;
3172 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003173 }
3174
3175 if (h2c->st0 >= H2_CS_ERROR) {
3176 TRACE_STATE("end of connection reported", H2_EV_RX_FRAME|H2_EV_RX_EOI, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003177 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003178 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003179
3180 if (h2c->st0 == H2_CS_FRAME_H) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003181 TRACE_STATE("expecting H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003182 if (!h2_peek_frame_hdr(&h2c->dbuf, 0, &hdr)) {
3183 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003184 break;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003185 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003186
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003187 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003188 TRACE_ERROR("invalid H2 frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003189 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003190 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau22de8d32018-09-05 19:55:58 +02003191 /* only log if no other stream can report the error */
3192 sess_log(h2c->conn->owner);
3193 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003194 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003195 break;
3196 }
3197
Willy Tarreau617592c2022-06-08 16:32:22 +02003198 if (h2c->rcvd_s && h2c->dsi != hdr.sid) {
3199 /* changed stream with a pending WU, need to
3200 * send it now.
3201 */
3202 TRACE_PROTO("sending stream WINDOW_UPDATE frame on stream switch", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
3203 ret = h2c_send_strm_wu(h2c);
3204 if (ret <= 0)
3205 break;
3206 }
3207
Christopher Fauletdd2a5622019-06-18 12:22:38 +02003208 padlen = 0;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003209 if (h2_ft_bit(hdr.ft) & H2_FT_PADDED_MASK && hdr.ff & H2_F_PADDED) {
3210 /* If the frame is padded (HEADERS, PUSH_PROMISE or DATA),
3211 * we read the pad length and drop it from the remaining
3212 * payload (one byte + the 9 remaining ones = 10 total
3213 * removed), so we have a frame payload starting after the
3214 * pad len. Flow controlled frames (DATA) also count the
3215 * padlen in the flow control, so it must be adjusted.
3216 */
3217 if (hdr.len < 1) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003218 TRACE_ERROR("invalid H2 padded frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003219 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003220 if (!(h2c->flags & H2_CF_IS_BACK))
3221 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003222 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003223 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003224 }
3225 hdr.len--;
3226
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003227 if (b_data(&h2c->dbuf) < 10) {
3228 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003229 break; // missing padlen
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003230 }
Willy Tarreau3bf69182018-12-21 15:34:50 +01003231
3232 padlen = *(uint8_t *)b_peek(&h2c->dbuf, 9);
3233
3234 if (padlen > hdr.len) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003235 TRACE_ERROR("invalid H2 padding length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003236 /* RFC7540#6.1 : pad length = length of
3237 * frame payload or greater => error.
3238 */
3239 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003240 if (!(h2c->flags & H2_CF_IS_BACK))
3241 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003242 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003243 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003244 }
3245
3246 if (h2_ft_bit(hdr.ft) & H2_FT_FC_MASK) {
3247 h2c->rcvd_c++;
3248 h2c->rcvd_s++;
3249 }
3250 b_del(&h2c->dbuf, 1);
3251 }
3252 h2_skip_frame_hdr(&h2c->dbuf);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003253
3254 new_frame:
Willy Tarreau7e98c052017-10-10 15:56:59 +02003255 h2c->dfl = hdr.len;
3256 h2c->dsi = hdr.sid;
3257 h2c->dft = hdr.ft;
3258 h2c->dff = hdr.ff;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003259 h2c->dpl = padlen;
Willy Tarreau0f458712022-08-18 11:19:57 +02003260 h2c->flags |= H2_CF_DEM_IN_PROGRESS;
Willy Tarreau73db4342019-09-25 07:28:44 +02003261 TRACE_STATE("rcvd H2 frame header, switching to FRAME_P state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003262 h2c->st0 = H2_CS_FRAME_P;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003263
3264 /* check for minimum basic frame format validity */
3265 ret = h2_frame_check(h2c->dft, 1, h2c->dsi, h2c->dfl, global.tune.bufsize);
3266 if (ret != H2_ERR_NO_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003267 TRACE_ERROR("received invalid H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003268 h2c_error(h2c, ret);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003269 if (!(h2c->flags & H2_CF_IS_BACK))
3270 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003271 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003272 goto done;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003273 }
Willy Tarreau15a47332022-03-18 15:57:34 +01003274
3275 /* transition to HEADERS frame ends the keep-alive idle
3276 * timer and starts the http-request idle delay.
3277 */
3278 if (hdr.ft == H2_FT_HEADERS)
3279 h2c->idle_start = now_ms;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003280 }
3281
Willy Tarreau9fd5aa82019-08-06 15:21:45 +02003282 /* Only H2_CS_FRAME_P, H2_CS_FRAME_A and H2_CS_FRAME_E here.
3283 * H2_CS_FRAME_P indicates an incomplete previous operation
3284 * (most often the first attempt) and requires some validity
3285 * checks for the frame and the current state. The two other
3286 * ones are set after completion (or abortion) and must skip
3287 * validity checks.
3288 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003289 tmp_h2s = h2c_st_by_id(h2c, h2c->dsi);
3290
Willy Tarreau7be4ee02022-05-18 07:31:41 +02003291 if (tmp_h2s != h2s && h2s && h2s_sc(h2s) &&
Willy Tarreau567beb82018-12-18 16:52:44 +01003292 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003293 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003294 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003295 (h2s->flags & H2_SF_ES_RCVD) ||
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003296 se_fl_test(h2s->sd, SE_FL_ERROR | SE_FL_ERR_PENDING | SE_FL_EOS))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003297 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003298 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_STRM_WAKE, h2c->conn, h2s);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003299 se_fl_set(h2s->sd, SE_FL_RCV_MORE);
Willy Tarreau7e094452018-12-19 18:08:52 +01003300 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003301 }
3302 h2s = tmp_h2s;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003303
Willy Tarreau63864812019-08-07 14:25:20 +02003304 if (h2c->st0 == H2_CS_FRAME_E ||
Willy Tarreau7838a792019-08-12 18:42:03 +02003305 (h2c->st0 == H2_CS_FRAME_P && !h2_frame_check_vs_state(h2c, h2s))) {
3306 TRACE_PROTO("stream error reported", H2_EV_RX_FRAME|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003307 goto strm_err;
Willy Tarreau7838a792019-08-12 18:42:03 +02003308 }
Willy Tarreauc0da1962017-10-30 18:38:00 +01003309
Willy Tarreau7e98c052017-10-10 15:56:59 +02003310 switch (h2c->dft) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02003311 case H2_FT_SETTINGS:
Willy Tarreau7838a792019-08-12 18:42:03 +02003312 if (h2c->st0 == H2_CS_FRAME_P) {
3313 TRACE_PROTO("receiving H2 SETTINGS frame", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003314 ret = h2c_handle_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003315 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003316 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003317
Willy Tarreau7838a792019-08-12 18:42:03 +02003318 if (h2c->st0 == H2_CS_FRAME_A) {
3319 TRACE_PROTO("sending H2 SETTINGS ACK frame", H2_EV_TX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003320 ret = h2c_ack_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003321 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02003322 break;
3323
Willy Tarreaucf68c782017-10-10 17:11:41 +02003324 case H2_FT_PING:
Willy Tarreau7838a792019-08-12 18:42:03 +02003325 if (h2c->st0 == H2_CS_FRAME_P) {
3326 TRACE_PROTO("receiving H2 PING frame", H2_EV_RX_FRAME|H2_EV_RX_PING, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003327 ret = h2c_handle_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003328 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003329
Willy Tarreau7838a792019-08-12 18:42:03 +02003330 if (h2c->st0 == H2_CS_FRAME_A) {
3331 TRACE_PROTO("sending H2 PING ACK frame", H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003332 ret = h2c_ack_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003333 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003334 break;
3335
Willy Tarreau26f95952017-07-27 17:18:30 +02003336 case H2_FT_WINDOW_UPDATE:
Willy Tarreau7838a792019-08-12 18:42:03 +02003337 if (h2c->st0 == H2_CS_FRAME_P) {
3338 TRACE_PROTO("receiving H2 WINDOW_UPDATE frame", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02003339 ret = h2c_handle_window_update(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003340 }
Willy Tarreau26f95952017-07-27 17:18:30 +02003341 break;
3342
Willy Tarreau61290ec2017-10-17 08:19:21 +02003343 case H2_FT_CONTINUATION:
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003344 /* RFC7540#6.10: CONTINUATION may only be preceded by
Willy Tarreauea18f862018-12-22 20:19:26 +01003345 * a HEADERS/PUSH_PROMISE/CONTINUATION frame. These
3346 * frames' parsers consume all following CONTINUATION
3347 * frames so this one is out of sequence.
Willy Tarreau61290ec2017-10-17 08:19:21 +02003348 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003349 TRACE_ERROR("received unexpected H2 CONTINUATION frame", H2_EV_RX_FRAME|H2_EV_RX_CONT|H2_EV_H2C_ERR, h2c->conn, h2s);
Willy Tarreauea18f862018-12-22 20:19:26 +01003350 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003351 if (!(h2c->flags & H2_CF_IS_BACK))
3352 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003353 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003354 goto done;
Willy Tarreau61290ec2017-10-17 08:19:21 +02003355
Willy Tarreau13278b42017-10-13 19:23:14 +02003356 case H2_FT_HEADERS:
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003357 if (h2c->st0 == H2_CS_FRAME_P) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003358 TRACE_PROTO("receiving H2 HEADERS frame", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003359 if (h2c->flags & H2_CF_IS_BACK)
3360 tmp_h2s = h2c_bck_handle_headers(h2c, h2s);
3361 else
3362 tmp_h2s = h2c_frt_handle_headers(h2c, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003363 if (tmp_h2s) {
3364 h2s = tmp_h2s;
3365 ret = 1;
3366 }
3367 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003368 HA_ATOMIC_INC(&h2c->px_counters->headers_rcvd);
Willy Tarreau13278b42017-10-13 19:23:14 +02003369 break;
3370
Willy Tarreau454f9052017-10-26 19:40:35 +02003371 case H2_FT_DATA:
Willy Tarreau7838a792019-08-12 18:42:03 +02003372 if (h2c->st0 == H2_CS_FRAME_P) {
3373 TRACE_PROTO("receiving H2 DATA frame", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01003374 ret = h2c_handle_data(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003375 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003376 HA_ATOMIC_INC(&h2c->px_counters->data_rcvd);
Willy Tarreau454f9052017-10-26 19:40:35 +02003377
Willy Tarreau7838a792019-08-12 18:42:03 +02003378 if (h2c->st0 == H2_CS_FRAME_A) {
Willy Tarreau617592c2022-06-08 16:32:22 +02003379 /* rcvd_s will suffice to trigger the sending of a WU */
3380 h2c->st0 = H2_CS_FRAME_H;
Willy Tarreau7838a792019-08-12 18:42:03 +02003381 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003382 break;
Willy Tarreaucd234e92017-08-18 10:59:39 +02003383
Willy Tarreau92153fc2017-12-03 19:46:19 +01003384 case H2_FT_PRIORITY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003385 if (h2c->st0 == H2_CS_FRAME_P) {
3386 TRACE_PROTO("receiving H2 PRIORITY frame", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn, h2s);
Willy Tarreau92153fc2017-12-03 19:46:19 +01003387 ret = h2c_handle_priority(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003388 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01003389 break;
3390
Willy Tarreaucd234e92017-08-18 10:59:39 +02003391 case H2_FT_RST_STREAM:
Willy Tarreau7838a792019-08-12 18:42:03 +02003392 if (h2c->st0 == H2_CS_FRAME_P) {
3393 TRACE_PROTO("receiving H2 RST_STREAM frame", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003394 ret = h2c_handle_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003395 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003396 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_rcvd);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003397 break;
3398
Willy Tarreaue96b0922017-10-30 00:28:29 +01003399 case H2_FT_GOAWAY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003400 if (h2c->st0 == H2_CS_FRAME_P) {
3401 TRACE_PROTO("receiving H2 GOAWAY frame", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn, h2s);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003402 ret = h2c_handle_goaway(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003403 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003404 HA_ATOMIC_INC(&h2c->px_counters->goaway_rcvd);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003405 break;
3406
Willy Tarreau1c661982017-10-30 13:52:01 +01003407 /* implement all extra frame types here */
Willy Tarreau7e98c052017-10-10 15:56:59 +02003408 default:
Willy Tarreau7838a792019-08-12 18:42:03 +02003409 TRACE_PROTO("receiving H2 ignored frame", H2_EV_RX_FRAME, h2c->conn, h2s);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003410 /* drop frames that we ignore. They may be larger than
3411 * the buffer so we drain all of their contents until
3412 * we reach the end.
3413 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003414 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3415 b_del(&h2c->dbuf, ret);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003416 h2c->dfl -= ret;
3417 ret = h2c->dfl == 0;
3418 }
3419
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003420 strm_err:
Willy Tarreaua20a5192017-12-27 11:02:06 +01003421 /* We may have to send an RST if not done yet */
Willy Tarreau7838a792019-08-12 18:42:03 +02003422 if (h2s->st == H2_SS_ERROR) {
3423 TRACE_STATE("stream error, switching to FRAME_E", H2_EV_RX_FRAME|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003424 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02003425 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003426
Willy Tarreau7838a792019-08-12 18:42:03 +02003427 if (h2c->st0 == H2_CS_FRAME_E) {
3428 TRACE_PROTO("sending H2 RST_STREAM frame", H2_EV_TX_FRAME|H2_EV_TX_RST|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003429 ret = h2c_send_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003430 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003431
Willy Tarreau7e98c052017-10-10 15:56:59 +02003432 /* error or missing data condition met above ? */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003433 if (ret <= 0)
Willy Tarreau7e98c052017-10-10 15:56:59 +02003434 break;
3435
3436 if (h2c->st0 != H2_CS_FRAME_H) {
Willy Tarreaubba7a4d2020-09-18 07:41:28 +02003437 if (h2c->dfl)
3438 TRACE_DEVEL("skipping remaining frame payload", H2_EV_RX_FRAME, h2c->conn, h2s);
Christopher Faulet5112a602019-09-26 16:38:28 +02003439 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3440 b_del(&h2c->dbuf, ret);
3441 h2c->dfl -= ret;
3442 if (!h2c->dfl) {
Willy Tarreau0f458712022-08-18 11:19:57 +02003443 h2c->flags &= ~H2_CF_DEM_IN_PROGRESS;
Christopher Faulet5112a602019-09-26 16:38:28 +02003444 TRACE_STATE("switching to FRAME_H", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
3445 h2c->st0 = H2_CS_FRAME_H;
Christopher Faulet5112a602019-09-26 16:38:28 +02003446 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003447 }
3448 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003449
Willy Tarreau617592c2022-06-08 16:32:22 +02003450 if (h2c->rcvd_s > 0 &&
Christopher Faulet68ee7842022-10-12 10:21:33 +02003451 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))) {
Willy Tarreau617592c2022-06-08 16:32:22 +02003452 TRACE_PROTO("sending stream WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn, h2s);
3453 h2c_send_strm_wu(h2c);
3454 }
3455
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003456 if (h2c->rcvd_c > 0 &&
Christopher Faulet68ee7842022-10-12 10:21:33 +02003457 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003458 TRACE_PROTO("sending H2 WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003459 h2c_send_conn_wu(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003460 }
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003461
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003462 done:
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003463 if (h2c->st0 >= H2_CS_ERROR || (h2c->flags & H2_CF_DEM_SHORT_READ)) {
3464 if (h2c->flags & H2_CF_RCVD_SHUT)
3465 h2c->flags |= H2_CF_END_REACHED;
3466 }
3467
Willy Tarreau7be4ee02022-05-18 07:31:41 +02003468 if (h2s && h2s_sc(h2s) &&
Willy Tarreau567beb82018-12-18 16:52:44 +01003469 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003470 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003471 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003472 (h2s->flags & H2_SF_ES_RCVD) ||
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003473 se_fl_test(h2s->sd, SE_FL_ERROR | SE_FL_ERR_PENDING | SE_FL_EOS))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003474 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003475 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn, h2s);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003476 se_fl_set(h2s->sd, SE_FL_RCV_MORE);
Willy Tarreau7e094452018-12-19 18:08:52 +01003477 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003478 }
Willy Tarreau1ed87b72018-11-25 08:45:16 +01003479
Willy Tarreau7838a792019-08-12 18:42:03 +02003480 if (old_iw != h2c->miw) {
3481 TRACE_STATE("notifying streams about SFCTL increase", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003482 h2c_unblock_sfctl(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003483 }
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003484
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02003485 h2c_restart_reading(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02003486 out:
3487 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003488 return;
Willy Tarreaubc933932017-10-09 16:21:43 +02003489}
3490
Willy Tarreau989539b2020-01-10 17:01:29 +01003491/* resume each h2s eligible for sending in list head <head> */
3492static void h2_resume_each_sending_h2s(struct h2c *h2c, struct list *head)
3493{
3494 struct h2s *h2s, *h2s_back;
3495
3496 TRACE_ENTER(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3497
3498 list_for_each_entry_safe(h2s, h2s_back, head, list) {
3499 if (h2c->mws <= 0 ||
3500 h2c->flags & H2_CF_MUX_BLOCK_ANY ||
3501 h2c->st0 >= H2_CS_ERROR)
3502 break;
3503
3504 h2s->flags &= ~H2_SF_BLK_ANY;
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003505
Willy Tarreaud9464162020-01-10 18:25:07 +01003506 if (h2s->flags & H2_SF_NOTIFIED)
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003507 continue;
3508
Willy Tarreau5723f292020-01-10 15:16:57 +01003509 /* If the sender changed his mind and unsubscribed, let's just
3510 * remove the stream from the send_list.
Willy Tarreau989539b2020-01-10 17:01:29 +01003511 */
Willy Tarreauf96508a2020-01-10 11:12:48 +01003512 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) &&
3513 (!h2s->subs || !(h2s->subs->events & SUB_RETRY_SEND))) {
Willy Tarreau989539b2020-01-10 17:01:29 +01003514 LIST_DEL_INIT(&h2s->list);
3515 continue;
3516 }
3517
Willy Tarreauf96508a2020-01-10 11:12:48 +01003518 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau5723f292020-01-10 15:16:57 +01003519 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01003520 tasklet_wakeup(h2s->subs->tasklet);
3521 h2s->subs->events &= ~SUB_RETRY_SEND;
3522 if (!h2s->subs->events)
3523 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01003524 }
3525 else if (h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) {
3526 tasklet_wakeup(h2s->shut_tl);
3527 }
Willy Tarreau989539b2020-01-10 17:01:29 +01003528 }
3529
3530 TRACE_LEAVE(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3531}
3532
Willy Tarreaubc933932017-10-09 16:21:43 +02003533/* process Tx frames from streams to be multiplexed. Returns > 0 if it reached
3534 * the end.
3535 */
3536static int h2_process_mux(struct h2c *h2c)
3537{
Willy Tarreau7838a792019-08-12 18:42:03 +02003538 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3539
Willy Tarreau01b44822018-10-03 14:26:37 +02003540 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3541 if (unlikely(h2c->st0 == H2_CS_PREFACE && (h2c->flags & H2_CF_IS_BACK))) {
3542 if (unlikely(h2c_bck_send_preface(h2c) <= 0)) {
3543 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003544 if (h2c->st0 == H2_CS_ERROR)
Willy Tarreau01b44822018-10-03 14:26:37 +02003545 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau01b44822018-10-03 14:26:37 +02003546 goto fail;
3547 }
3548 h2c->st0 = H2_CS_SETTINGS1;
3549 }
3550 /* need to wait for the other side */
Willy Tarreau75a930a2018-12-12 08:03:58 +01003551 if (h2c->st0 < H2_CS_FRAME_H)
Willy Tarreau7838a792019-08-12 18:42:03 +02003552 goto done;
Willy Tarreau01b44822018-10-03 14:26:37 +02003553 }
3554
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003555 /* start by sending possibly pending window updates */
Willy Tarreaue74679a2019-08-06 15:39:32 +02003556 if (h2c->rcvd_s > 0 &&
3557 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3558 h2c_send_strm_wu(h2c) < 0)
3559 goto fail;
3560
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003561 if (h2c->rcvd_c > 0 &&
3562 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3563 h2c_send_conn_wu(h2c) < 0)
3564 goto fail;
3565
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003566 /* First we always process the flow control list because the streams
3567 * waiting there were already elected for immediate emission but were
3568 * blocked just on this.
3569 */
Willy Tarreau989539b2020-01-10 17:01:29 +01003570 h2_resume_each_sending_h2s(h2c, &h2c->fctl_list);
3571 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003572
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003573 fail:
Willy Tarreau3eabe9b2017-11-07 11:03:01 +01003574 if (unlikely(h2c->st0 >= H2_CS_ERROR)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02003575 if (h2c->st0 == H2_CS_ERROR) {
3576 if (h2c->max_id >= 0) {
3577 h2c_send_goaway_error(h2c, NULL);
3578 if (h2c->flags & H2_CF_MUX_BLOCK_ANY)
Willy Tarreau7838a792019-08-12 18:42:03 +02003579 goto out0;
Willy Tarreau081d4722017-05-16 21:51:05 +02003580 }
3581
3582 h2c->st0 = H2_CS_ERROR2; // sent (or failed hard) !
3583 }
Willy Tarreau081d4722017-05-16 21:51:05 +02003584 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003585 done:
3586 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
3587 return 1;
3588 out0:
3589 TRACE_DEVEL("leaving in blocked situation", H2_EV_H2C_WAKE, h2c->conn);
3590 return 0;
Willy Tarreaubc933932017-10-09 16:21:43 +02003591}
3592
Willy Tarreau62f52692017-10-08 23:01:42 +02003593
Willy Tarreau479998a2018-11-18 06:30:59 +01003594/* Attempt to read data, and subscribe if none available.
3595 * The function returns 1 if data has been received, otherwise zero.
3596 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003597static int h2_recv(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003598{
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003599 struct connection *conn = h2c->conn;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003600 struct buffer *buf;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003601 int max;
Olivier Houchard7505f942018-08-21 18:10:44 +02003602 size_t ret;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003603
Willy Tarreau7838a792019-08-12 18:42:03 +02003604 TRACE_ENTER(H2_EV_H2C_RECV, h2c->conn);
3605
3606 if (h2c->wait_event.events & SUB_RETRY_RECV) {
3607 TRACE_DEVEL("leaving on sub_recv", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003608 return (b_data(&h2c->dbuf));
Willy Tarreau7838a792019-08-12 18:42:03 +02003609 }
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003610
Willy Tarreau7838a792019-08-12 18:42:03 +02003611 if (!h2_recv_allowed(h2c)) {
3612 TRACE_DEVEL("leaving on !recv_allowed", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003613 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02003614 }
Willy Tarreaua2af5122017-10-09 11:56:46 +02003615
Willy Tarreau44e973f2018-03-01 17:49:30 +01003616 buf = h2_get_buf(h2c, &h2c->dbuf);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003617 if (!buf) {
3618 h2c->flags |= H2_CF_DEM_DALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02003619 TRACE_DEVEL("leaving on !alloc", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003620 return 0;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003621 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003622
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003623 if (!b_data(buf)) {
3624 /* try to pre-align the buffer like the
3625 * rxbufs will be to optimize memory copies. We'll make
3626 * sure that the frame header lands at the end of the
3627 * HTX block to alias it upon recv. We cannot use the
3628 * head because rcv_buf() will realign the buffer if
3629 * it's empty. Thus we cheat and pretend we already
3630 * have a few bytes there.
3631 */
3632 max = buf_room_for_htx_data(buf) + 9;
3633 buf->head = sizeof(struct htx) - 9;
3634 }
3635 else
3636 max = b_room(buf);
Willy Tarreau2a59e872018-12-12 08:23:47 +01003637
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003638 ret = max ? conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, max, 0) : 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003639
Christopher Fauletde9d6052021-04-23 12:25:18 +02003640 if (max && !ret && h2_recv_allowed(h2c)) {
3641 TRACE_DATA("failed to receive data, subscribing", H2_EV_H2C_RECV, h2c->conn);
3642 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h2c->wait_event);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003643 } else if (ret) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02003644 TRACE_DATA("received data", H2_EV_H2C_RECV, h2c->conn, 0, 0, (void*)(long)ret);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003645 h2c->flags &= ~H2_CF_DEM_SHORT_READ;
3646 }
Olivier Houchard81a15af2018-10-19 17:26:49 +02003647
Christopher Fauletde9d6052021-04-23 12:25:18 +02003648 if (conn_xprt_read0_pending(h2c->conn)) {
3649 TRACE_DATA("received read0", H2_EV_H2C_RECV, h2c->conn);
3650 h2c->flags |= H2_CF_RCVD_SHUT;
3651 }
Christopher Fauletff7925d2022-10-11 19:12:40 +02003652 if (h2c->conn->flags & CO_FL_ERROR) {
3653 TRACE_DATA("connection error", H2_EV_H2C_RECV, h2c->conn);
3654 h2c->flags |= H2_CF_ERROR;
3655 }
Christopher Fauletde9d6052021-04-23 12:25:18 +02003656
Olivier Houcharda1411e62018-08-17 18:42:48 +02003657 if (!b_data(buf)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +01003658 h2_release_buf(h2c, &h2c->dbuf);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003659 goto end;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003660 }
3661
Willy Tarreau7838a792019-08-12 18:42:03 +02003662 if (b_data(buf) == buf->size) {
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003663 h2c->flags |= H2_CF_DEM_DFULL;
Willy Tarreau35fb8462019-10-02 11:05:46 +02003664 TRACE_STATE("demux buffer full", H2_EV_H2C_RECV|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau7838a792019-08-12 18:42:03 +02003665 }
3666
Christopher Fauletff7925d2022-10-11 19:12:40 +02003667 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02003668 TRACE_LEAVE(H2_EV_H2C_RECV, h2c->conn);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003669 return !!ret || (h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERROR));
Willy Tarreau62f52692017-10-08 23:01:42 +02003670}
3671
Willy Tarreau479998a2018-11-18 06:30:59 +01003672/* Try to send data if possible.
3673 * The function returns 1 if data have been sent, otherwise zero.
3674 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003675static int h2_send(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003676{
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003677 struct connection *conn = h2c->conn;
Willy Tarreaubc933932017-10-09 16:21:43 +02003678 int done;
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003679 int sent = 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003680
Willy Tarreau7838a792019-08-12 18:42:03 +02003681 TRACE_ENTER(H2_EV_H2C_SEND, h2c->conn);
Willy Tarreaua2af5122017-10-09 11:56:46 +02003682
Christopher Fauletff7925d2022-10-11 19:12:40 +02003683 if (h2c->flags & (H2_CF_ERROR|H2_CF_ERR_PENDING)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003684 TRACE_DEVEL("leaving on error", H2_EV_H2C_SEND, h2c->conn);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003685 if (h2c->flags & H2_CF_RCVD_SHUT)
3686 h2c->flags |= H2_CF_ERROR;
3687 b_reset(br_tail(h2c->mbuf));
Willy Tarreau7838a792019-08-12 18:42:03 +02003688 return 1;
3689 }
Olivier Houchard7505f942018-08-21 18:10:44 +02003690
Willy Tarreau911db9b2020-01-23 16:27:54 +01003691 if (conn->flags & CO_FL_WAIT_XPRT) {
Willy Tarreaua2af5122017-10-09 11:56:46 +02003692 /* a handshake was requested */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003693 goto schedule;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003694 }
3695
Willy Tarreaubc933932017-10-09 16:21:43 +02003696 /* This loop is quite simple : it tries to fill as much as it can from
3697 * pending streams into the existing buffer until it's reportedly full
3698 * or the end of send requests is reached. Then it tries to send this
3699 * buffer's contents out, marks it not full if at least one byte could
3700 * be sent, and tries again.
3701 *
3702 * The snd_buf() function normally takes a "flags" argument which may
3703 * be made of a combination of CO_SFL_MSG_MORE to indicate that more
3704 * data immediately comes and CO_SFL_STREAMER to indicate that the
3705 * connection is streaming lots of data (used to increase TLS record
3706 * size at the expense of latency). The former can be sent any time
3707 * there's a buffer full flag, as it indicates at least one stream
3708 * attempted to send and failed so there are pending data. An
3709 * alternative would be to set it as long as there's an active stream
3710 * but that would be problematic for ACKs until we have an absolute
3711 * guarantee that all waiters have at least one byte to send. The
3712 * latter should possibly not be set for now.
3713 */
3714
3715 done = 0;
3716 while (!done) {
3717 unsigned int flags = 0;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003718 unsigned int released = 0;
3719 struct buffer *buf;
Willy Tarreaubc933932017-10-09 16:21:43 +02003720
3721 /* fill as much as we can into the current buffer */
3722 while (((h2c->flags & (H2_CF_MUX_MFULL|H2_CF_MUX_MALLOC)) == 0) && !done)
3723 done = h2_process_mux(h2c);
3724
Olivier Houchard2b094432019-01-29 18:28:36 +01003725 if (h2c->flags & H2_CF_MUX_MALLOC)
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003726 done = 1; // we won't go further without extra buffers
Olivier Houchard2b094432019-01-29 18:28:36 +01003727
Christopher Faulet9a3d3fc2020-10-22 16:24:58 +02003728 if ((conn->flags & (CO_FL_SOCK_WR_SH|CO_FL_ERROR)) ||
Willy Tarreaue6dc7a02021-10-21 17:30:06 +02003729 (h2c->flags & H2_CF_GOAWAY_FAILED))
Willy Tarreaubc933932017-10-09 16:21:43 +02003730 break;
3731
Christopher Faulet68ee7842022-10-12 10:21:33 +02003732 if (h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))
Willy Tarreaubc933932017-10-09 16:21:43 +02003733 flags |= CO_SFL_MSG_MORE;
3734
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003735 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
3736 if (b_data(buf)) {
3737 int ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, b_data(buf), flags);
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003738 if (!ret) {
3739 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003740 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003741 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003742 sent = 1;
Willy Tarreau022e5e52020-09-10 09:33:15 +02003743 TRACE_DATA("sent data", H2_EV_H2C_SEND, h2c->conn, 0, buf, (void*)(long)ret);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003744 b_del(buf, ret);
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003745 if (b_data(buf)) {
3746 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003747 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003748 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003749 }
3750 b_free(buf);
3751 released++;
Willy Tarreau787db9a2018-06-14 18:31:46 +02003752 }
Willy Tarreaubc933932017-10-09 16:21:43 +02003753
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003754 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01003755 offer_buffers(NULL, released);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003756
Willy Tarreaubc933932017-10-09 16:21:43 +02003757 /* wrote at least one byte, the buffer is not full anymore */
Christopher Faulet69fe5ce2019-10-24 10:31:01 +02003758 if (sent)
3759 h2c->flags &= ~(H2_CF_MUX_MFULL | H2_CF_DEM_MROOM);
Willy Tarreaubc933932017-10-09 16:21:43 +02003760 }
3761
Christopher Fauletff7925d2022-10-11 19:12:40 +02003762 if (conn->flags & CO_FL_ERROR) {
3763 h2c->flags |= H2_CF_ERR_PENDING;
3764 if (h2c->flags & H2_CF_RCVD_SHUT)
3765 h2c->flags |= H2_CF_ERROR;
Willy Tarreau51330962019-05-26 09:38:07 +02003766 b_reset(br_tail(h2c->mbuf));
Willy Tarreaua2af5122017-10-09 11:56:46 +02003767 }
Christopher Fauletff7925d2022-10-11 19:12:40 +02003768
Olivier Houchard6ff20392018-07-17 18:46:31 +02003769 /* We're not full anymore, so we can wake any task that are waiting
3770 * for us.
3771 */
Willy Tarreau989539b2020-01-10 17:01:29 +01003772 if (!(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM)) && h2c->st0 >= H2_CS_FRAME_H)
3773 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Olivier Houchardd360ac62019-03-22 17:37:16 +01003774
Olivier Houchard910b2bc2018-07-17 18:49:38 +02003775 /* We're done, no more to send */
Willy Tarreau7838a792019-08-12 18:42:03 +02003776 if (!br_data(h2c->mbuf)) {
3777 TRACE_DEVEL("leaving with everything sent", H2_EV_H2C_SEND, h2c->conn);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003778 goto end;
Willy Tarreau7838a792019-08-12 18:42:03 +02003779 }
Olivier Houchard910b2bc2018-07-17 18:49:38 +02003780schedule:
Willy Tarreau7838a792019-08-12 18:42:03 +02003781 if (!(conn->flags & CO_FL_ERROR) && !(h2c->wait_event.events & SUB_RETRY_SEND)) {
3782 TRACE_STATE("more data to send, subscribing", H2_EV_H2C_SEND, h2c->conn);
Olivier Houcharde179d0e2019-03-21 18:27:17 +01003783 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &h2c->wait_event);
Willy Tarreau7838a792019-08-12 18:42:03 +02003784 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003785 TRACE_DEVEL("leaving with some data left to send", H2_EV_H2C_SEND, h2c->conn);
Christopher Fauletff7925d2022-10-11 19:12:40 +02003786end:
3787 return sent || (h2c->flags & (H2_CF_ERR_PENDING|H2_CF_ERROR));
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003788}
3789
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02003790/* this is the tasklet referenced in h2c->wait_event.tasklet */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003791struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003792{
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003793 struct connection *conn;
3794 struct tasklet *tl = (struct tasklet *)t;
3795 int conn_in_list;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003796 struct h2c *h2c = ctx;
Olivier Houchard7505f942018-08-21 18:10:44 +02003797 int ret = 0;
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003798
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003799 if (state & TASK_F_USR1) {
3800 /* the tasklet was idling on an idle connection, it might have
3801 * been stolen, let's be careful!
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003802 */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003803 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3804 if (t->context == NULL) {
3805 /* The connection has been taken over by another thread,
3806 * we're no longer responsible for it, so just free the
3807 * tasklet, and do nothing.
3808 */
3809 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3810 tasklet_free(tl);
Willy Tarreau74163142021-03-13 11:30:19 +01003811 t = NULL;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003812 goto leave;
3813 }
3814 conn = h2c->conn;
3815 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003816
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003817 conn_in_list = conn->flags & CO_FL_LIST_MASK;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003818
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003819 /* Remove the connection from the list, to be sure nobody attempts
3820 * to use it while we handle the I/O events
3821 */
3822 if (conn_in_list)
3823 conn_delete_from_tree(&conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003824
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003825 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3826 } else {
3827 /* we're certain the connection was not in an idle list */
3828 conn = h2c->conn;
3829 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
3830 conn_in_list = 0;
3831 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003832
Willy Tarreau4f6516d2018-12-19 13:59:17 +01003833 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Olivier Houchard7505f942018-08-21 18:10:44 +02003834 ret = h2_send(h2c);
Willy Tarreau4f6516d2018-12-19 13:59:17 +01003835 if (!(h2c->wait_event.events & SUB_RETRY_RECV))
Olivier Houchard7505f942018-08-21 18:10:44 +02003836 ret |= h2_recv(h2c);
Willy Tarreaucef5c8e2018-12-18 10:29:54 +01003837 if (ret || b_data(&h2c->dbuf))
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003838 ret = h2_process(h2c);
3839
3840 /* If we were in an idle list, we want to add it back into it,
3841 * unless h2_process() returned -1, which mean it has destroyed
3842 * the connection (testing !ret is enough, if h2_process() wasn't
3843 * called then ret will be 0 anyway.
3844 */
Willy Tarreau74163142021-03-13 11:30:19 +01003845 if (ret < 0)
3846 t = NULL;
3847
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003848 if (!ret && conn_in_list) {
3849 struct server *srv = objt_server(conn->target);
3850
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01003851 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003852 if (conn_in_list == CO_FL_SAFE_LIST)
Willy Tarreau85223482022-09-29 20:32:43 +02003853 eb64_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003854 else
Willy Tarreau85223482022-09-29 20:32:43 +02003855 eb64_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node);
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01003856 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003857 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003858
Willy Tarreau38468772020-06-28 00:31:13 +02003859leave:
Willy Tarreau7838a792019-08-12 18:42:03 +02003860 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreau74163142021-03-13 11:30:19 +01003861 return t;
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003862}
Willy Tarreaua2af5122017-10-09 11:56:46 +02003863
Willy Tarreau62f52692017-10-08 23:01:42 +02003864/* callback called on any event by the connection handler.
3865 * It applies changes and returns zero, or < 0 if it wants immediate
3866 * destruction of the connection (which normally doesn not happen in h2).
3867 */
Olivier Houchard7505f942018-08-21 18:10:44 +02003868static int h2_process(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003869{
Olivier Houchard7505f942018-08-21 18:10:44 +02003870 struct connection *conn = h2c->conn;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003871
Willy Tarreau7838a792019-08-12 18:42:03 +02003872 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
3873
Willy Tarreauf0961222021-02-05 11:41:46 +01003874 if (!(h2c->flags & H2_CF_DEM_BLOCK_ANY) &&
3875 (b_data(&h2c->dbuf) || (h2c->flags & H2_CF_RCVD_SHUT))) {
Willy Tarreaud13bf272017-12-14 10:34:52 +01003876 h2_process_demux(h2c);
3877
Christopher Fauletff7925d2022-10-11 19:12:40 +02003878 if (h2c->st0 >= H2_CS_ERROR || (h2c->flags & H2_CF_ERROR))
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003879 b_reset(&h2c->dbuf);
Willy Tarreaud13bf272017-12-14 10:34:52 +01003880
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003881 if (!b_full(&h2c->dbuf))
Willy Tarreaud13bf272017-12-14 10:34:52 +01003882 h2c->flags &= ~H2_CF_DEM_DFULL;
3883 }
Olivier Houchard7505f942018-08-21 18:10:44 +02003884 h2_send(h2c);
Willy Tarreaud13bf272017-12-14 10:34:52 +01003885
Christopher Fauletdfd10ab2021-10-06 14:24:19 +02003886 if (unlikely(h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) && !(h2c->flags & H2_CF_IS_BACK)) {
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +02003887 int send_goaway = 1;
3888 /* If a close-spread-time option is set, we want to avoid
3889 * closing all the active HTTP2 connections at once so we add a
3890 * random factor that will spread the closing.
3891 */
3892 if (tick_isset(global.close_spread_end)) {
3893 int remaining_window = tick_remain(now_ms, global.close_spread_end);
3894 if (remaining_window) {
3895 /* This should increase the closing rate the
3896 * further along the window we are. */
3897 send_goaway = (remaining_window <= statistical_prng_range(global.close_spread_time));
3898 }
3899 }
Remi Tricot-Le Breton4d7fdc62022-04-26 15:17:18 +02003900 else if (global.tune.options & GTUNE_DISABLE_ACTIVE_CLOSE)
3901 send_goaway = 0; /* let the client close his connection himself */
Willy Tarreau8ec14062017-12-30 18:08:13 +01003902 /* frontend is stopping, reload likely in progress, let's try
3903 * to announce a graceful shutdown if not yet done. We don't
3904 * care if it fails, it will be tried again later.
3905 */
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +02003906 if (send_goaway) {
3907 TRACE_STATE("proxy stopped, sending GOAWAY", H2_EV_H2C_WAKE|H2_EV_TX_FRAME, conn);
3908 if (!(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
3909 if (h2c->last_sid < 0)
3910 h2c->last_sid = (1U << 31) - 1;
3911 h2c_send_goaway_error(h2c, NULL);
3912 }
Willy Tarreau8ec14062017-12-30 18:08:13 +01003913 }
3914 }
3915
Olivier Houchard7fc96d52017-11-23 18:25:47 +01003916 /*
Olivier Houchard6fa63d92017-11-27 18:41:32 +01003917 * If we received early data, and the handshake is done, wake
3918 * any stream that was waiting for it.
Olivier Houchard7fc96d52017-11-23 18:25:47 +01003919 */
Olivier Houchard6fa63d92017-11-27 18:41:32 +01003920 if (!(h2c->flags & H2_CF_WAIT_FOR_HS) &&
Willy Tarreau911db9b2020-01-23 16:27:54 +01003921 (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT | CO_FL_EARLY_DATA)) == CO_FL_EARLY_DATA) {
Olivier Houchard6fa63d92017-11-27 18:41:32 +01003922 struct eb32_node *node;
3923 struct h2s *h2s;
3924
3925 h2c->flags |= H2_CF_WAIT_FOR_HS;
3926 node = eb32_lookup_ge(&h2c->streams_by_id, 1);
3927
3928 while (node) {
3929 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02003930 if (se_fl_test(h2s->sd, SE_FL_WAIT_FOR_HS))
Willy Tarreau7e094452018-12-19 18:08:52 +01003931 h2s_notify_recv(h2s);
Olivier Houchard6fa63d92017-11-27 18:41:32 +01003932 node = eb32_next(node);
3933 }
Olivier Houchard7fc96d52017-11-23 18:25:47 +01003934 }
Olivier Houchard6fa63d92017-11-27 18:41:32 +01003935
Christopher Fauletff7925d2022-10-11 19:12:40 +02003936 if ((h2c->flags & H2_CF_ERROR) || h2c_read0_pending(h2c) ||
Willy Tarreau29a98242017-10-31 06:59:15 +01003937 h2c->st0 == H2_CS_ERROR2 || h2c->flags & H2_CF_GOAWAY_FAILED ||
3938 (eb_is_empty(&h2c->streams_by_id) && h2c->last_sid >= 0 &&
3939 h2c->max_id >= h2c->last_sid)) {
Willy Tarreau23482912019-05-07 15:23:14 +02003940 h2_wake_some_streams(h2c, 0);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003941
3942 if (eb_is_empty(&h2c->streams_by_id)) {
3943 /* no more stream, kill the connection now */
Christopher Faulet73c12072019-04-08 11:23:22 +02003944 h2_release(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003945 TRACE_DEVEL("leaving after releasing the connection", H2_EV_H2C_WAKE);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003946 return -1;
3947 }
Willy Tarreau4481e262019-10-31 15:36:30 +01003948
3949 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01003950 if (conn->flags & CO_FL_LIST_MASK) {
3951 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01003952 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01003953 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3954 }
Willy Tarreau4481e262019-10-31 15:36:30 +01003955 }
3956 else if (h2c->st0 == H2_CS_ERROR) {
3957 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01003958 if (conn->flags & CO_FL_LIST_MASK) {
3959 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01003960 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01003961 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3962 }
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003963 }
3964
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003965 if (!b_data(&h2c->dbuf))
Willy Tarreau44e973f2018-03-01 17:49:30 +01003966 h2_release_buf(h2c, &h2c->dbuf);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003967
Christopher Fauletff7925d2022-10-11 19:12:40 +02003968 if (h2c->st0 == H2_CS_ERROR2 || (h2c->flags & H2_CF_GOAWAY_FAILED) ||
Olivier Houchard53216e72018-10-10 15:46:36 +02003969 (h2c->st0 != H2_CS_ERROR &&
Willy Tarreau662fafc2019-05-26 09:43:07 +02003970 !br_data(h2c->mbuf) &&
Olivier Houchard53216e72018-10-10 15:46:36 +02003971 (h2c->mws <= 0 || LIST_ISEMPTY(&h2c->fctl_list)) &&
3972 ((h2c->flags & H2_CF_MUX_BLOCK_ANY) || LIST_ISEMPTY(&h2c->send_list))))
Willy Tarreau2e3c0002019-05-26 09:45:23 +02003973 h2_release_mbuf(h2c);
Willy Tarreaua2af5122017-10-09 11:56:46 +02003974
Willy Tarreau15a47332022-03-18 15:57:34 +01003975 h2c_update_timeout(h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +02003976 h2_send(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003977 TRACE_LEAVE(H2_EV_H2C_WAKE, conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02003978 return 0;
3979}
3980
Willy Tarreau749f5ca2019-03-21 19:19:36 +01003981/* wake-up function called by the connection layer (mux_ops.wake) */
Olivier Houchard21df6cc2018-09-14 23:21:44 +02003982static int h2_wake(struct connection *conn)
3983{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01003984 struct h2c *h2c = conn->ctx;
Willy Tarreau7838a792019-08-12 18:42:03 +02003985 int ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02003986
Willy Tarreau7838a792019-08-12 18:42:03 +02003987 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
3988 ret = h2_process(h2c);
Willy Tarreau508f9892020-02-11 04:38:56 +01003989 if (ret >= 0)
3990 h2_wake_some_streams(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02003991 TRACE_LEAVE(H2_EV_H2C_WAKE);
3992 return ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02003993}
3994
Willy Tarreauea392822017-10-31 10:02:25 +01003995/* Connection timeout management. The principle is that if there's no receipt
3996 * nor sending for a certain amount of time, the connection is closed. If the
3997 * MUX buffer still has lying data or is not allocatable, the connection is
3998 * immediately killed. If it's allocatable and empty, we attempt to send a
3999 * GOAWAY frame.
4000 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004001struct task *h2_timeout_task(struct task *t, void *context, unsigned int state)
Willy Tarreauea392822017-10-31 10:02:25 +01004002{
Olivier Houchard9f6af332018-05-25 14:04:04 +02004003 struct h2c *h2c = context;
Willy Tarreauea392822017-10-31 10:02:25 +01004004 int expired = tick_is_expired(t->expire, now_ms);
4005
Willy Tarreau7838a792019-08-12 18:42:03 +02004006 TRACE_ENTER(H2_EV_H2C_WAKE, h2c ? h2c->conn : NULL);
4007
Willy Tarreaubd42e922020-06-30 11:19:23 +02004008 if (h2c) {
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004009 /* Make sure nobody stole the connection from us */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004010 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004011
4012 /* Somebody already stole the connection from us, so we should not
4013 * free it, we just have to free the task.
4014 */
4015 if (!t->context) {
4016 h2c = NULL;
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004017 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004018 goto do_leave;
4019 }
4020
4021
Willy Tarreaubd42e922020-06-30 11:19:23 +02004022 if (!expired) {
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004023 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004024 TRACE_DEVEL("leaving (not expired)", H2_EV_H2C_WAKE, h2c->conn);
4025 return t;
4026 }
Willy Tarreauea392822017-10-31 10:02:25 +01004027
Willy Tarreaubd42e922020-06-30 11:19:23 +02004028 if (!h2c_may_expire(h2c)) {
4029 /* we do still have streams but all of them are idle, waiting
4030 * for the data layer, so we must not enforce the timeout here.
4031 */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004032 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004033 t->expire = TICK_ETERNITY;
4034 return t;
4035 }
Willy Tarreauc2ea47f2019-10-01 10:12:00 +02004036
Willy Tarreaubd42e922020-06-30 11:19:23 +02004037 /* We're about to destroy the connection, so make sure nobody attempts
4038 * to steal it from us.
4039 */
Willy Tarreaubd42e922020-06-30 11:19:23 +02004040 if (h2c->conn->flags & CO_FL_LIST_MASK)
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004041 conn_delete_from_tree(&h2c->conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004042
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004043 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004044 }
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004045
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004046do_leave:
Olivier Houchard3f795f72019-04-17 22:51:06 +02004047 task_destroy(t);
Willy Tarreau0975f112018-03-29 15:22:59 +02004048
4049 if (!h2c) {
4050 /* resources were already deleted */
Willy Tarreau7838a792019-08-12 18:42:03 +02004051 TRACE_DEVEL("leaving (not more h2c)", H2_EV_H2C_WAKE);
Willy Tarreau0975f112018-03-29 15:22:59 +02004052 return NULL;
4053 }
4054
4055 h2c->task = NULL;
Willy Tarreauea392822017-10-31 10:02:25 +01004056 h2c_error(h2c, H2_ERR_NO_ERROR);
Willy Tarreau23482912019-05-07 15:23:14 +02004057 h2_wake_some_streams(h2c, 0);
Willy Tarreauea392822017-10-31 10:02:25 +01004058
Willy Tarreau662fafc2019-05-26 09:43:07 +02004059 if (br_data(h2c->mbuf)) {
Willy Tarreauea392822017-10-31 10:02:25 +01004060 /* don't even try to send a GOAWAY, the buffer is stuck */
4061 h2c->flags |= H2_CF_GOAWAY_FAILED;
4062 }
4063
4064 /* try to send but no need to insist */
Willy Tarreau599391a2017-11-24 10:16:00 +01004065 h2c->last_sid = h2c->max_id;
Willy Tarreauea392822017-10-31 10:02:25 +01004066 if (h2c_send_goaway_error(h2c, NULL) <= 0)
4067 h2c->flags |= H2_CF_GOAWAY_FAILED;
4068
Willy Tarreau662fafc2019-05-26 09:43:07 +02004069 if (br_data(h2c->mbuf) && !(h2c->flags & H2_CF_GOAWAY_FAILED) && conn_xprt_ready(h2c->conn)) {
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004070 unsigned int released = 0;
4071 struct buffer *buf;
4072
4073 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
4074 if (b_data(buf)) {
4075 int ret = h2c->conn->xprt->snd_buf(h2c->conn, h2c->conn->xprt_ctx, buf, b_data(buf), 0);
4076 if (!ret)
4077 break;
4078 b_del(buf, ret);
4079 if (b_data(buf))
4080 break;
4081 b_free(buf);
4082 released++;
4083 }
Willy Tarreau787db9a2018-06-14 18:31:46 +02004084 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004085
4086 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01004087 offer_buffers(NULL, released);
Willy Tarreau787db9a2018-06-14 18:31:46 +02004088 }
Willy Tarreauea392822017-10-31 10:02:25 +01004089
Willy Tarreau4481e262019-10-31 15:36:30 +01004090 /* in any case this connection must not be considered idle anymore */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004091 if (h2c->conn->flags & CO_FL_LIST_MASK) {
4092 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004093 conn_delete_from_tree(&h2c->conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004094 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4095 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004096
Willy Tarreau0975f112018-03-29 15:22:59 +02004097 /* either we can release everything now or it will be done later once
4098 * the last stream closes.
4099 */
4100 if (eb_is_empty(&h2c->streams_by_id))
Christopher Faulet73c12072019-04-08 11:23:22 +02004101 h2_release(h2c);
Willy Tarreauea392822017-10-31 10:02:25 +01004102
Willy Tarreau7838a792019-08-12 18:42:03 +02004103 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreauea392822017-10-31 10:02:25 +01004104 return NULL;
4105}
4106
4107
Willy Tarreau62f52692017-10-08 23:01:42 +02004108/*******************************************/
4109/* functions below are used by the streams */
4110/*******************************************/
4111
4112/*
4113 * Attach a new stream to a connection
4114 * (Used for outgoing connections)
4115 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004116static int h2_attach(struct connection *conn, struct sedesc *sd, struct session *sess)
Willy Tarreau62f52692017-10-08 23:01:42 +02004117{
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004118 struct h2s *h2s;
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004119 struct h2c *h2c = conn->ctx;
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004120
Willy Tarreau7838a792019-08-12 18:42:03 +02004121 TRACE_ENTER(H2_EV_H2S_NEW, conn);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004122 h2s = h2c_bck_stream_new(h2c, sd->sc, sess);
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004123 if (!h2s) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004124 TRACE_DEVEL("leaving on stream creation failure", H2_EV_H2S_NEW|H2_EV_H2S_ERR, conn);
Christopher Faulete00ad352021-12-16 14:44:31 +01004125 return -1;
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004126 }
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004127
4128 /* the connection is not idle anymore, let's mark this */
4129 HA_ATOMIC_AND(&h2c->wait_event.tasklet->state, ~TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004130 xprt_set_used(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004131
Willy Tarreau7838a792019-08-12 18:42:03 +02004132 TRACE_LEAVE(H2_EV_H2S_NEW, conn, h2s);
Christopher Faulete00ad352021-12-16 14:44:31 +01004133 return 0;
Willy Tarreau62f52692017-10-08 23:01:42 +02004134}
4135
Willy Tarreau4596fe22022-05-17 19:07:51 +02004136/* Retrieves the first valid stream connector from this connection, or returns
4137 * NULL. We have to scan because we may have some orphan streams. It might be
Willy Tarreaufafd3982018-11-18 21:29:20 +01004138 * beneficial to scan backwards from the end to reduce the likeliness to find
4139 * orphans.
4140 */
Willy Tarreaud1373532022-05-27 11:00:59 +02004141static struct stconn *h2_get_first_sc(const struct connection *conn)
Willy Tarreaufafd3982018-11-18 21:29:20 +01004142{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004143 struct h2c *h2c = conn->ctx;
Willy Tarreaufafd3982018-11-18 21:29:20 +01004144 struct h2s *h2s;
4145 struct eb32_node *node;
4146
4147 node = eb32_first(&h2c->streams_by_id);
4148 while (node) {
4149 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau7be4ee02022-05-18 07:31:41 +02004150 if (h2s_sc(h2s))
4151 return h2s_sc(h2s);
Willy Tarreaufafd3982018-11-18 21:29:20 +01004152 node = eb32_next(node);
4153 }
4154 return NULL;
4155}
4156
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004157static int h2_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
4158{
4159 int ret = 0;
4160 struct h2c *h2c = conn->ctx;
4161
4162 switch (mux_ctl) {
4163 case MUX_STATUS:
4164 /* Only consider the mux to be ready if we're done with
4165 * the preface and settings, and we had no error.
4166 */
4167 if (h2c->st0 >= H2_CS_FRAME_H && h2c->st0 < H2_CS_ERROR)
4168 ret |= MUX_STATUS_READY;
4169 return ret;
Christopher Faulet4c8ad842020-10-06 14:59:17 +02004170 case MUX_EXIT_STATUS:
4171 return MUX_ES_UNKNOWN;
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004172 default:
4173 return -1;
4174 }
4175}
4176
Willy Tarreau62f52692017-10-08 23:01:42 +02004177/*
Olivier Houchard060ed432018-11-06 16:32:42 +01004178 * Destroy the mux and the associated connection, if it is no longer used
4179 */
Christopher Faulet73c12072019-04-08 11:23:22 +02004180static void h2_destroy(void *ctx)
Olivier Houchard060ed432018-11-06 16:32:42 +01004181{
Christopher Faulet73c12072019-04-08 11:23:22 +02004182 struct h2c *h2c = ctx;
Olivier Houchard060ed432018-11-06 16:32:42 +01004183
Willy Tarreau7838a792019-08-12 18:42:03 +02004184 TRACE_ENTER(H2_EV_H2C_END, h2c->conn);
Christopher Faulet4e610962022-04-14 11:23:50 +02004185 if (eb_is_empty(&h2c->streams_by_id)) {
4186 BUG_ON(h2c->conn->ctx != h2c);
Christopher Faulet73c12072019-04-08 11:23:22 +02004187 h2_release(h2c);
Christopher Faulet4e610962022-04-14 11:23:50 +02004188 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004189 TRACE_LEAVE(H2_EV_H2C_END);
Olivier Houchard060ed432018-11-06 16:32:42 +01004190}
4191
4192/*
Willy Tarreau62f52692017-10-08 23:01:42 +02004193 * Detach the stream from the connection and possibly release the connection.
4194 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004195static void h2_detach(struct sedesc *sd)
Willy Tarreau62f52692017-10-08 23:01:42 +02004196{
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004197 struct h2s *h2s = sd->se;
Willy Tarreau60935142017-10-16 18:11:19 +02004198 struct h2c *h2c;
Olivier Houchardf502aca2018-12-14 19:42:40 +01004199 struct session *sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004200
Willy Tarreau7838a792019-08-12 18:42:03 +02004201 TRACE_ENTER(H2_EV_STRM_END, h2s ? h2s->h2c->conn : NULL, h2s);
4202
Willy Tarreau7838a792019-08-12 18:42:03 +02004203 if (!h2s) {
4204 TRACE_LEAVE(H2_EV_STRM_END);
Willy Tarreau60935142017-10-16 18:11:19 +02004205 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004206 }
Willy Tarreau60935142017-10-16 18:11:19 +02004207
Willy Tarreaud9464162020-01-10 18:25:07 +01004208 /* there's no txbuf so we're certain not to be able to send anything */
4209 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02004210
Olivier Houchardf502aca2018-12-14 19:42:40 +01004211 sess = h2s->sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004212 h2c = h2s->h2c;
Willy Tarreau36c22322022-05-27 10:41:24 +02004213 h2c->nb_sc--;
4214 if (!h2c->nb_sc)
Willy Tarreau15a47332022-03-18 15:57:34 +01004215 h2c->idle_start = now_ms;
4216
Willy Tarreaufa1d3572019-01-31 10:31:51 +01004217 if ((h2c->flags & (H2_CF_IS_BACK|H2_CF_DEM_TOOMANY)) == H2_CF_DEM_TOOMANY &&
Willy Tarreau36c22322022-05-27 10:41:24 +02004218 !h2_frt_has_too_many_sc(h2c)) {
Willy Tarreaufa1d3572019-01-31 10:31:51 +01004219 /* frontend connection was blocking new streams creation */
Willy Tarreauf2101912018-07-19 10:11:38 +02004220 h2c->flags &= ~H2_CF_DEM_TOOMANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004221 h2c_restart_reading(h2c, 1);
Willy Tarreauf2101912018-07-19 10:11:38 +02004222 }
Willy Tarreau60935142017-10-16 18:11:19 +02004223
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004224 /* this stream may be blocked waiting for some data to leave (possibly
4225 * an ES or RST frame), so orphan it in this case.
4226 */
Christopher Fauletff7925d2022-10-11 19:12:40 +02004227 if (!(h2c->flags & (H2_CF_ERR_PENDING|H2_CF_ERROR)) &&
Willy Tarreaua2b51812018-07-27 09:55:14 +02004228 (h2c->st0 < H2_CS_ERROR) &&
Willy Tarreau5723f292020-01-10 15:16:57 +01004229 (h2s->flags & (H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL)) &&
Willy Tarreauf96508a2020-01-10 11:12:48 +01004230 ((h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) || h2s->subs)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004231 TRACE_DEVEL("leaving on stream blocked", H2_EV_STRM_END|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau15a47332022-03-18 15:57:34 +01004232 /* refresh the timeout if none was active, so that the last
4233 * leaving stream may arm it.
4234 */
4235 if (!tick_isset(h2c->task->expire))
4236 h2c_update_timeout(h2c);
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004237 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004238 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004239
Christopher Faulet68ee7842022-10-12 10:21:33 +02004240 if ((h2c->flags & H2_CF_DEM_BLOCK_ANY && h2s->id == h2c->dsi)) {
Willy Tarreau45f752e2017-10-30 15:44:59 +01004241 /* unblock the connection if it was blocked on this
4242 * stream.
4243 */
4244 h2c->flags &= ~H2_CF_DEM_BLOCK_ANY;
4245 h2c->flags &= ~H2_CF_MUX_BLOCK_ANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004246 h2c_restart_reading(h2c, 1);
Willy Tarreau45f752e2017-10-30 15:44:59 +01004247 }
4248
Willy Tarreau71049cc2018-03-28 13:56:39 +02004249 h2s_destroy(h2s);
Willy Tarreau60935142017-10-16 18:11:19 +02004250
Christopher Faulet9b79a102019-07-15 11:22:56 +02004251 if (h2c->flags & H2_CF_IS_BACK) {
Christopher Fauletff7925d2022-10-11 19:12:40 +02004252 if (!(h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERR_PENDING|H2_CF_ERROR))) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004253 if (h2c->conn->flags & CO_FL_PRIVATE) {
Christopher Faulet08016ab2020-07-01 16:10:06 +02004254 /* Add the connection in the session server list, if not already done */
4255 if (!session_add_conn(sess, h2c->conn, h2c->conn->target)) {
4256 h2c->conn->owner = NULL;
4257 if (eb_is_empty(&h2c->streams_by_id)) {
4258 h2c->conn->mux->destroy(h2c);
4259 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4260 return;
Christopher Fauletc5579d12020-07-01 15:45:41 +02004261 }
4262 }
Christopher Faulet08016ab2020-07-01 16:10:06 +02004263 if (eb_is_empty(&h2c->streams_by_id)) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004264 if (session_check_idle_conn(h2c->conn->owner, h2c->conn) != 0) {
4265 /* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
4266 TRACE_DEVEL("leaving without reusable idle connection", H2_EV_STRM_END);
Olivier Houchard351411f2018-12-27 17:20:54 +01004267 return;
4268 }
4269 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004270 }
Christopher Fauletc5579d12020-07-01 15:45:41 +02004271 else {
4272 if (eb_is_empty(&h2c->streams_by_id)) {
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004273 /* If the connection is owned by the session, first remove it
4274 * from its list
4275 */
4276 if (h2c->conn->owner) {
4277 session_unown_conn(h2c->conn->owner, h2c->conn);
4278 h2c->conn->owner = NULL;
4279 }
4280
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004281 /* mark that the tasklet may lose its context to another thread and
4282 * that the handler needs to check it under the idle conns lock.
4283 */
4284 HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004285 xprt_set_idle(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
4286
Olivier Houcharddc2f2752020-02-13 19:12:07 +01004287 if (!srv_add_to_idle_list(objt_server(h2c->conn->target), h2c->conn, 1)) {
Olivier Houchard2444aa52020-01-20 13:56:01 +01004288 /* The server doesn't want it, let's kill the connection right away */
4289 h2c->conn->mux->destroy(h2c);
4290 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4291 return;
4292 }
Olivier Houchard199d4fa2020-03-22 23:25:51 +01004293 /* At this point, the connection has been added to the
4294 * server idle list, so another thread may already have
4295 * hijacked it, so we can't do anything with it.
4296 */
Olivier Houchard2444aa52020-01-20 13:56:01 +01004297 TRACE_DEVEL("reusable idle connection", H2_EV_STRM_END);
4298 return;
Olivier Houchard8a786902018-12-15 16:05:40 +01004299
Olivier Houchard8a786902018-12-15 16:05:40 +01004300 }
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004301 else if (!h2c->conn->hash_node->node.node.leaf_p &&
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004302 h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02004303 !LIST_INLIST(&h2c->conn->session_list)) {
Willy Tarreau85223482022-09-29 20:32:43 +02004304 eb64_insert(&__objt_server(h2c->conn->target)->per_thr[tid].avail_conns,
4305 &h2c->conn->hash_node->node);
Christopher Fauletc5579d12020-07-01 15:45:41 +02004306 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004307 }
4308 }
4309 }
4310
Willy Tarreaue323f342018-03-28 13:51:45 +02004311 /* We don't want to close right now unless we're removing the
4312 * last stream, and either the connection is in error, or it
4313 * reached the ID already specified in a GOAWAY frame received
4314 * or sent (as seen by last_sid >= 0).
4315 */
Olivier Houchard7a977432019-03-21 15:47:13 +01004316 if (h2c_is_dead(h2c)) {
Willy Tarreaue323f342018-03-28 13:51:45 +02004317 /* no more stream will come, kill it now */
Willy Tarreau7838a792019-08-12 18:42:03 +02004318 TRACE_DEVEL("leaving and killing dead connection", H2_EV_STRM_END, h2c->conn);
Christopher Faulet73c12072019-04-08 11:23:22 +02004319 h2_release(h2c);
Willy Tarreaue323f342018-03-28 13:51:45 +02004320 }
4321 else if (h2c->task) {
Willy Tarreau15a47332022-03-18 15:57:34 +01004322 h2c_update_timeout(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004323 TRACE_DEVEL("leaving, refreshing connection's timeout", H2_EV_STRM_END, h2c->conn);
Willy Tarreau60935142017-10-16 18:11:19 +02004324 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004325 else
4326 TRACE_DEVEL("leaving", H2_EV_STRM_END, h2c->conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02004327}
4328
Willy Tarreau88bdba32019-05-13 18:17:53 +02004329/* Performs a synchronous or asynchronous shutr(). */
4330static void h2_do_shutr(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004331{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004332 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004333
Willy Tarreauf983d002019-05-14 10:40:21 +02004334 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004335 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004336
Willy Tarreau7838a792019-08-12 18:42:03 +02004337 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4338
Willy Tarreau18059042019-01-31 19:12:48 +01004339 /* a connstream may require us to immediately kill the whole connection
4340 * for example because of a "tcp-request content reject" rule that is
4341 * normally used to limit abuse. In this case we schedule a goaway to
4342 * close the connection.
Willy Tarreau926fa4c2017-11-07 14:42:12 +01004343 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004344 if (se_fl_test(h2s->sd, SE_FL_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004345 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004346 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004347 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4348 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4349 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004350 else if (!(h2s->flags & H2_SF_HEADERS_SENT)) {
4351 /* Nothing was never sent for this stream, so reset with
4352 * REFUSED_STREAM error to let the client retry the
4353 * request.
4354 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004355 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004356 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4357 }
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004358 else {
4359 /* a final response was already provided, we don't want this
4360 * stream anymore. This may happen when the server responds
4361 * before the end of an upload and closes quickly (redirect,
4362 * deny, ...)
4363 */
4364 h2s_error(h2s, H2_ERR_CANCEL);
4365 }
Willy Tarreau18059042019-01-31 19:12:48 +01004366
Willy Tarreau90c32322017-11-24 08:00:30 +01004367 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004368 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004369 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004370
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004371 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004372 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau00dd0782018-03-01 16:31:34 +01004373 h2s_close(h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004374 done:
4375 h2s->flags &= ~H2_SF_WANT_SHUTR;
Willy Tarreau7838a792019-08-12 18:42:03 +02004376 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004377 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004378add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004379 /* Let the handler know we want to shutr, and add ourselves to the
4380 * most relevant list if not yet done. h2_deferred_shut() will be
4381 * automatically called via the shut_tl tasklet when there's room
4382 * again.
4383 */
4384 h2s->flags |= H2_SF_WANT_SHUTR;
Willy Tarreau2b718102021-04-21 07:32:39 +02004385 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004386 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004387 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004388 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004389 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004390 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004391 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004392 return;
Willy Tarreau62f52692017-10-08 23:01:42 +02004393}
4394
Willy Tarreau88bdba32019-05-13 18:17:53 +02004395/* Performs a synchronous or asynchronous shutw(). */
4396static void h2_do_shutw(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004397{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004398 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004399
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004400 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004401 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004402
Willy Tarreau7838a792019-08-12 18:42:03 +02004403 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4404
Willy Tarreau473e0e52022-08-18 16:12:15 +02004405 if (h2s->st != H2_SS_ERROR &&
4406 (h2s->flags & (H2_SF_HEADERS_SENT | H2_SF_MORE_HTX_DATA)) == H2_SF_HEADERS_SENT) {
4407 /* we can cleanly close using an empty data frame only after headers
4408 * and if no more data is expected to be sent.
4409 */
Willy Tarreau58e32082017-11-07 14:41:09 +01004410 if (!(h2s->flags & (H2_SF_ES_SENT|H2_SF_RST_SENT)) &&
4411 h2_send_empty_data_es(h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004412 goto add_to_list;
Willy Tarreau58e32082017-11-07 14:41:09 +01004413
4414 if (h2s->st == H2_SS_HREM)
Willy Tarreau00dd0782018-03-01 16:31:34 +01004415 h2s_close(h2s);
Willy Tarreau58e32082017-11-07 14:41:09 +01004416 else
4417 h2s->st = H2_SS_HLOC;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004418 } else {
Willy Tarreau18059042019-01-31 19:12:48 +01004419 /* a connstream may require us to immediately kill the whole connection
4420 * for example because of a "tcp-request content reject" rule that is
4421 * normally used to limit abuse. In this case we schedule a goaway to
4422 * close the connection.
Willy Tarreaua1349f02017-10-31 07:41:55 +01004423 */
Willy Tarreau95acc8b2022-05-27 16:14:10 +02004424 if (se_fl_test(h2s->sd, SE_FL_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004425 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004426 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004427 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4428 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4429 }
Willy Tarreau473e0e52022-08-18 16:12:15 +02004430 else if (h2s->flags & H2_SF_MORE_HTX_DATA) {
4431 /* some unsent data were pending (e.g. abort during an upload),
4432 * let's send a CANCEL.
4433 */
4434 TRACE_STATE("shutw before end of data, sending CANCEL", H2_EV_STRM_SHUT, h2c->conn, h2s);
4435 h2s_error(h2s, H2_ERR_CANCEL);
4436 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004437 else {
4438 /* Nothing was never sent for this stream, so reset with
4439 * REFUSED_STREAM error to let the client retry the
4440 * request.
4441 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004442 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004443 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4444 }
Willy Tarreau18059042019-01-31 19:12:48 +01004445
Willy Tarreau90c32322017-11-24 08:00:30 +01004446 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004447 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004448 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004449
Willy Tarreau00dd0782018-03-01 16:31:34 +01004450 h2s_close(h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004451 }
4452
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004453 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004454 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau7838a792019-08-12 18:42:03 +02004455
4456 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
4457
Willy Tarreau88bdba32019-05-13 18:17:53 +02004458 done:
4459 h2s->flags &= ~H2_SF_WANT_SHUTW;
4460 return;
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004461
4462 add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004463 /* Let the handler know we want to shutw, and add ourselves to the
4464 * most relevant list if not yet done. h2_deferred_shut() will be
4465 * automatically called via the shut_tl tasklet when there's room
4466 * again.
4467 */
4468 h2s->flags |= H2_SF_WANT_SHUTW;
Willy Tarreau2b718102021-04-21 07:32:39 +02004469 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004470 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004471 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004472 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004473 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004474 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004475 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004476 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004477}
4478
Willy Tarreau5723f292020-01-10 15:16:57 +01004479/* This is the tasklet referenced in h2s->shut_tl, it is used for
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004480 * deferred shutdowns when the h2_detach() was done but the mux buffer was full
4481 * and prevented the last frame from being emitted.
4482 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004483struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004484{
4485 struct h2s *h2s = ctx;
Willy Tarreau88bdba32019-05-13 18:17:53 +02004486 struct h2c *h2c = h2s->h2c;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004487
Willy Tarreau7838a792019-08-12 18:42:03 +02004488 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4489
Willy Tarreau5723f292020-01-10 15:16:57 +01004490 if (h2s->flags & H2_SF_NOTIFIED) {
4491 /* some data processing remains to be done first */
4492 goto end;
4493 }
4494
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004495 if (h2s->flags & H2_SF_WANT_SHUTW)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004496 h2_do_shutw(h2s);
4497
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004498 if (h2s->flags & H2_SF_WANT_SHUTR)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004499 h2_do_shutr(h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004500
Willy Tarreau88bdba32019-05-13 18:17:53 +02004501 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Olivier Houchardafc7cb82019-03-25 14:08:01 +01004502 /* We're done trying to send, remove ourself from the send_list */
Olivier Houchardafc7cb82019-03-25 14:08:01 +01004503 LIST_DEL_INIT(&h2s->list);
Olivier Houchard7a977432019-03-21 15:47:13 +01004504
Willy Tarreau7be4ee02022-05-18 07:31:41 +02004505 if (!h2s_sc(h2s)) {
Willy Tarreau88bdba32019-05-13 18:17:53 +02004506 h2s_destroy(h2s);
Willy Tarreau74163142021-03-13 11:30:19 +01004507 if (h2c_is_dead(h2c)) {
Willy Tarreau88bdba32019-05-13 18:17:53 +02004508 h2_release(h2c);
Willy Tarreau74163142021-03-13 11:30:19 +01004509 t = NULL;
4510 }
Willy Tarreau88bdba32019-05-13 18:17:53 +02004511 }
Olivier Houchard7a977432019-03-21 15:47:13 +01004512 }
Willy Tarreau5723f292020-01-10 15:16:57 +01004513 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02004514 TRACE_LEAVE(H2_EV_STRM_SHUT);
Willy Tarreau74163142021-03-13 11:30:19 +01004515 return t;
Willy Tarreau62f52692017-10-08 23:01:42 +02004516}
4517
Willy Tarreau4596fe22022-05-17 19:07:51 +02004518/* shutr() called by the stream connector (mux_ops.shutr) */
Willy Tarreau36c22322022-05-27 10:41:24 +02004519static void h2_shutr(struct stconn *sc, enum co_shr_mode mode)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004520{
Willy Tarreau36c22322022-05-27 10:41:24 +02004521 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004522
Willy Tarreau7838a792019-08-12 18:42:03 +02004523 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004524 if (mode)
4525 h2_do_shutr(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004526 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004527}
4528
Willy Tarreau4596fe22022-05-17 19:07:51 +02004529/* shutw() called by the stream connector (mux_ops.shutw) */
Willy Tarreau36c22322022-05-27 10:41:24 +02004530static void h2_shutw(struct stconn *sc, enum co_shw_mode mode)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004531{
Willy Tarreau36c22322022-05-27 10:41:24 +02004532 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004533
Willy Tarreau7838a792019-08-12 18:42:03 +02004534 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004535 h2_do_shutw(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004536 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004537}
4538
Christopher Faulet9b79a102019-07-15 11:22:56 +02004539/* Decode the payload of a HEADERS frame and produce the HTX request or response
4540 * depending on the connection's side. Returns a positive value on success, a
4541 * negative value on failure, or 0 if it couldn't proceed. May report connection
4542 * errors in h2c->errcode if the frame is non-decodable and the connection
4543 * unrecoverable. In absence of connection error when a failure is reported, the
4544 * caller must assume a stream error.
Willy Tarreauea18f862018-12-22 20:19:26 +01004545 *
4546 * The function may fold CONTINUATION frames into the initial HEADERS frame
4547 * by removing padding and next frame header, then moving the CONTINUATION
4548 * frame's payload and adjusting h2c->dfl to match the new aggregated frame,
4549 * leaving a hole between the main frame and the beginning of the next one.
4550 * The possibly remaining incomplete or next frame at the end may be moved
4551 * if the aggregated frame is not deleted, in order to fill the hole. Wrapped
4552 * HEADERS frames are unwrapped into a temporary buffer before decoding.
4553 *
4554 * A buffer at the beginning of processing may look like this :
4555 *
4556 * ,---.---------.-----.--------------.--------------.------.---.
4557 * |///| HEADERS | PAD | CONTINUATION | CONTINUATION | DATA |///|
4558 * `---^---------^-----^--------------^--------------^------^---'
4559 * | | <-----> | |
4560 * area | dpl | wrap
4561 * |<--------------> |
4562 * | dfl |
4563 * |<-------------------------------------------------->|
4564 * head data
4565 *
4566 * Padding is automatically overwritten when folding, participating to the
4567 * hole size after dfl :
4568 *
4569 * ,---.------------------------.-----.--------------.------.---.
4570 * |///| HEADERS : CONTINUATION |/////| CONTINUATION | DATA |///|
4571 * `---^------------------------^-----^--------------^------^---'
4572 * | | <-----> | |
4573 * area | hole | wrap
4574 * |<-----------------------> |
4575 * | dfl |
4576 * |<-------------------------------------------------->|
4577 * head data
4578 *
4579 * Please note that the HEADERS frame is always deprived from its PADLEN byte
4580 * however it may start with the 5 stream-dep+weight bytes in case of PRIORITY
4581 * bit.
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004582 *
4583 * The <flags> field must point to either the stream's flags or to a copy of it
4584 * so that the function can update the following flags :
4585 * - H2_SF_DATA_CLEN when content-length is seen
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004586 * - H2_SF_HEADERS_RCVD once the frame is successfully decoded
Willy Tarreau88d138e2019-01-02 19:38:14 +01004587 *
4588 * The H2_SF_HEADERS_RCVD flag is also looked at in the <flags> field prior to
4589 * decoding, in order to detect if we're dealing with a headers or a trailers
4590 * block (the trailers block appears after H2_SF_HEADERS_RCVD was seen).
Willy Tarreau13278b42017-10-13 19:23:14 +02004591 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01004592static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol)
Willy Tarreau13278b42017-10-13 19:23:14 +02004593{
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004594 const uint8_t *hdrs = (uint8_t *)b_head(&h2c->dbuf);
Willy Tarreau83061a82018-07-13 11:56:34 +02004595 struct buffer *tmp = get_trash_chunk();
Christopher Faulete4ab11b2019-06-11 15:05:37 +02004596 struct http_hdr list[global.tune.max_http_hdr * 2];
Willy Tarreau83061a82018-07-13 11:56:34 +02004597 struct buffer *copy = NULL;
Willy Tarreau174b06a2018-04-25 18:13:58 +02004598 unsigned int msgf;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004599 struct htx *htx = NULL;
Willy Tarreauea18f862018-12-22 20:19:26 +01004600 int flen; // header frame len
4601 int hole = 0;
Willy Tarreau86277d42019-01-02 15:36:11 +01004602 int ret = 0;
4603 int outlen;
Willy Tarreau13278b42017-10-13 19:23:14 +02004604 int wrap;
Willy Tarreau13278b42017-10-13 19:23:14 +02004605
Willy Tarreau7838a792019-08-12 18:42:03 +02004606 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
4607
Willy Tarreauea18f862018-12-22 20:19:26 +01004608next_frame:
4609 if (b_data(&h2c->dbuf) - hole < h2c->dfl)
4610 goto leave; // incomplete input frame
4611
4612 /* No END_HEADERS means there's one or more CONTINUATION frames. In
4613 * this case, we'll try to paste it immediately after the initial
4614 * HEADERS frame payload and kill any possible padding. The initial
4615 * frame's length will be increased to represent the concatenation
4616 * of the two frames. The next frame is read from position <tlen>
4617 * and written at position <flen> (minus padding if some is present).
4618 */
4619 if (unlikely(!(h2c->dff & H2_F_HEADERS_END_HEADERS))) {
4620 struct h2_fh hdr;
4621 int clen; // CONTINUATION frame's payload length
4622
Willy Tarreau7838a792019-08-12 18:42:03 +02004623 TRACE_STATE("EH missing, expecting continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004624 if (!h2_peek_frame_hdr(&h2c->dbuf, h2c->dfl + hole, &hdr)) {
4625 /* no more data, the buffer may be full, either due to
4626 * too large a frame or because of too large a hole that
4627 * we're going to compact at the end.
4628 */
4629 goto leave;
4630 }
4631
4632 if (hdr.ft != H2_FT_CONTINUATION) {
4633 /* RFC7540#6.10: frame of unexpected type */
Willy Tarreau7838a792019-08-12 18:42:03 +02004634 TRACE_STATE("not continuation!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004635 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004636 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004637 goto fail;
4638 }
4639
4640 if (hdr.sid != h2c->dsi) {
4641 /* RFC7540#6.10: frame of different stream */
Willy Tarreau7838a792019-08-12 18:42:03 +02004642 TRACE_STATE("different stream ID!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004643 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004644 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004645 goto fail;
4646 }
4647
4648 if ((unsigned)hdr.len > (unsigned)global.tune.bufsize) {
4649 /* RFC7540#4.2: invalid frame length */
Willy Tarreau7838a792019-08-12 18:42:03 +02004650 TRACE_STATE("too large frame!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004651 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4652 goto fail;
4653 }
4654
4655 /* detect when we must stop aggragating frames */
4656 h2c->dff |= hdr.ff & H2_F_HEADERS_END_HEADERS;
4657
4658 /* Take as much as we can of the CONTINUATION frame's payload */
4659 clen = b_data(&h2c->dbuf) - (h2c->dfl + hole + 9);
4660 if (clen > hdr.len)
4661 clen = hdr.len;
4662
4663 /* Move the frame's payload over the padding, hole and frame
4664 * header. At least one of hole or dpl is null (see diagrams
4665 * above). The hole moves after the new aggragated frame.
4666 */
4667 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole + 9), clen, -(h2c->dpl + hole + 9));
Christopher Fauletcb1847c2021-04-21 11:11:21 +02004668 h2c->dfl += hdr.len - h2c->dpl;
Willy Tarreauea18f862018-12-22 20:19:26 +01004669 hole += h2c->dpl + 9;
4670 h2c->dpl = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02004671 TRACE_STATE("waiting for next continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_CONT|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004672 goto next_frame;
4673 }
4674
4675 flen = h2c->dfl - h2c->dpl;
Willy Tarreau68472622017-12-11 18:36:37 +01004676
Willy Tarreau13278b42017-10-13 19:23:14 +02004677 /* if the input buffer wraps, take a temporary copy of it (rare) */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004678 wrap = b_wrap(&h2c->dbuf) - b_head(&h2c->dbuf);
Willy Tarreau13278b42017-10-13 19:23:14 +02004679 if (wrap < h2c->dfl) {
Willy Tarreau68dd9852017-07-03 14:44:26 +02004680 copy = alloc_trash_chunk();
4681 if (!copy) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004682 TRACE_DEVEL("failed to allocate temporary buffer", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR, h2c->conn);
Willy Tarreau68dd9852017-07-03 14:44:26 +02004683 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
4684 goto fail;
4685 }
Willy Tarreau843b7cb2018-07-13 10:54:26 +02004686 memcpy(copy->area, b_head(&h2c->dbuf), wrap);
4687 memcpy(copy->area + wrap, b_orig(&h2c->dbuf), h2c->dfl - wrap);
4688 hdrs = (uint8_t *) copy->area;
Willy Tarreau13278b42017-10-13 19:23:14 +02004689 }
4690
Willy Tarreau13278b42017-10-13 19:23:14 +02004691 /* Skip StreamDep and weight for now (we don't support PRIORITY) */
4692 if (h2c->dff & H2_F_HEADERS_PRIORITY) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004693 if (read_n32(hdrs) == h2c->dsi) {
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004694 /* RFC7540#5.3.1 : stream dep may not depend on itself */
Willy Tarreau7838a792019-08-12 18:42:03 +02004695 TRACE_STATE("invalid stream dependency!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004696 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004697 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreaua0d11b62018-09-05 18:30:05 +02004698 goto fail;
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004699 }
4700
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004701 if (flen < 5) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004702 TRACE_STATE("frame too short for priority!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004703 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4704 goto fail;
4705 }
4706
Willy Tarreau13278b42017-10-13 19:23:14 +02004707 hdrs += 5; // stream dep = 4, weight = 1
4708 flen -= 5;
4709 }
4710
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004711 if (!h2_get_buf(h2c, rxbuf)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004712 TRACE_STATE("waiting for h2c rxbuf allocation", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau937f7602018-02-26 15:22:17 +01004713 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau86277d42019-01-02 15:36:11 +01004714 goto leave;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004715 }
Willy Tarreau13278b42017-10-13 19:23:14 +02004716
Willy Tarreau937f7602018-02-26 15:22:17 +01004717 /* we can't retry a failed decompression operation so we must be very
4718 * careful not to take any risks. In practice the output buffer is
4719 * always empty except maybe for trailers, in which case we simply have
4720 * to wait for the upper layer to finish consuming what is available.
4721 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004722 htx = htx_from_buf(rxbuf);
4723 if (!htx_is_empty(htx)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004724 TRACE_STATE("waiting for room in h2c rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004725 h2c->flags |= H2_CF_DEM_SFULL;
4726 goto leave;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004727 }
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004728
Willy Tarreau25919232019-01-03 14:48:18 +01004729 /* past this point we cannot roll back in case of error */
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004730 outlen = hpack_decode_frame(h2c->ddht, hdrs, flen, list,
4731 sizeof(list)/sizeof(list[0]), tmp);
4732 if (outlen < 0) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004733 TRACE_STATE("failed to decompress HPACK", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004734 h2c_error(h2c, H2_ERR_COMPRESSION_ERROR);
4735 goto fail;
4736 }
4737
Willy Tarreau25919232019-01-03 14:48:18 +01004738 /* The PACK decompressor was updated, let's update the input buffer and
4739 * the parser's state to commit these changes and allow us to later
4740 * fail solely on the stream if needed.
4741 */
4742 b_del(&h2c->dbuf, h2c->dfl + hole);
4743 h2c->dfl = hole = 0;
4744 h2c->st0 = H2_CS_FRAME_H;
4745
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004746 /* OK now we have our header list in <list> */
Willy Tarreau880f5802019-01-03 08:10:14 +01004747 msgf = (h2c->dff & H2_F_HEADERS_END_STREAM) ? 0 : H2_MSGF_BODY;
Christopher Fauletd0db4232021-01-22 11:46:30 +01004748 msgf |= (*flags & H2_SF_BODY_TUNNEL) ? H2_MSGF_BODY_TUNNEL: 0;
Amaury Denoyelle74162742020-12-11 17:53:05 +01004749 /* If an Extended CONNECT has been sent on this stream, set message flag
Ilya Shipitsinacf84592021-02-06 22:29:08 +05004750 * to convert 200 response to 101 htx response */
Amaury Denoyelle74162742020-12-11 17:53:05 +01004751 msgf |= (*flags & H2_SF_EXT_CONNECT_SENT) ? H2_MSGF_EXT_CONNECT: 0;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004752
Willy Tarreau88d138e2019-01-02 19:38:14 +01004753 if (*flags & H2_SF_HEADERS_RCVD)
4754 goto trailers;
4755
4756 /* This is the first HEADERS frame so it's a headers block */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004757 if (h2c->flags & H2_CF_IS_BACK)
Amaury Denoyelle74162742020-12-11 17:53:05 +01004758 outlen = h2_make_htx_response(list, htx, &msgf, body_len, upgrade_protocol);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004759 else
4760 outlen = h2_make_htx_request(list, htx, &msgf, body_len);
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004761
Christopher Faulet3d875582021-04-26 17:46:13 +02004762 if (outlen < 0 || htx_free_space(htx) < global.tune.maxrewrite) {
Willy Tarreau25919232019-01-03 14:48:18 +01004763 /* too large headers? this is a stream error only */
Christopher Faulet3d875582021-04-26 17:46:13 +02004764 TRACE_STATE("message headers too large", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR|H2_EV_PROTO_ERR, h2c->conn);
4765 htx->flags |= HTX_FL_PARSING_ERROR;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004766 goto fail;
4767 }
Willy Tarreau13278b42017-10-13 19:23:14 +02004768
Willy Tarreau174b06a2018-04-25 18:13:58 +02004769 if (msgf & H2_MSGF_BODY) {
4770 /* a payload is present */
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01004771 if (msgf & H2_MSGF_BODY_CL) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004772 *flags |= H2_SF_DATA_CLEN;
Christopher Faulet9b79a102019-07-15 11:22:56 +02004773 htx->extra = *body_len;
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01004774 }
Willy Tarreau174b06a2018-04-25 18:13:58 +02004775 }
Christopher Faulet7d247f02020-12-02 14:26:36 +01004776 if (msgf & H2_MSGF_BODYLESS_RSP)
4777 *flags |= H2_SF_BODYLESS_RESP;
Willy Tarreau174b06a2018-04-25 18:13:58 +02004778
Christopher Fauletd0db4232021-01-22 11:46:30 +01004779 if (msgf & H2_MSGF_BODY_TUNNEL)
4780 *flags |= H2_SF_BODY_TUNNEL;
4781 else {
4782 /* Abort the tunnel attempt, if any */
4783 if (*flags & H2_SF_BODY_TUNNEL)
4784 *flags |= H2_SF_TUNNEL_ABRT;
4785 *flags &= ~H2_SF_BODY_TUNNEL;
4786 }
4787
Willy Tarreau88d138e2019-01-02 19:38:14 +01004788 done:
Christopher Faulet0b465482019-02-19 15:14:23 +01004789 /* indicate that a HEADERS frame was received for this stream, except
4790 * for 1xx responses. For 1xx responses, another HEADERS frame is
4791 * expected.
4792 */
4793 if (!(msgf & H2_MSGF_RSP_1XX))
4794 *flags |= H2_SF_HEADERS_RCVD;
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004795
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01004796 if (h2c->dff & H2_F_HEADERS_END_STREAM) {
Christopher Faulet827a6292022-12-22 09:47:01 +01004797 if (msgf & H2_MSGF_RSP_1XX) {
4798 /* RFC9113#8.1 : HEADERS frame with the ES flag set that carries an informational status code is malformed */
4799 TRACE_STATE("invalid interim response with ES flag!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
4800 goto fail;
4801 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01004802 /* no more data are expected for this message */
4803 htx->flags |= HTX_FL_EOM;
Willy Tarreau88d138e2019-01-02 19:38:14 +01004804 }
Willy Tarreau937f7602018-02-26 15:22:17 +01004805
Amaury Denoyelleefe22762020-12-11 17:53:08 +01004806 if (msgf & H2_MSGF_EXT_CONNECT)
4807 *flags |= H2_SF_EXT_CONNECT_RCVD;
4808
Willy Tarreau86277d42019-01-02 15:36:11 +01004809 /* success */
4810 ret = 1;
4811
Willy Tarreau68dd9852017-07-03 14:44:26 +02004812 leave:
Willy Tarreau86277d42019-01-02 15:36:11 +01004813 /* If there is a hole left and it's not at the end, we are forced to
Willy Tarreauea18f862018-12-22 20:19:26 +01004814 * move the remaining data over it.
4815 */
4816 if (hole) {
4817 if (b_data(&h2c->dbuf) > h2c->dfl + hole)
4818 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole),
4819 b_data(&h2c->dbuf) - (h2c->dfl + hole), -hole);
4820 b_sub(&h2c->dbuf, hole);
4821 }
4822
Christopher Faulet07f88d72021-04-21 10:39:53 +02004823 if (b_full(&h2c->dbuf) && h2c->dfl) {
Willy Tarreauea18f862018-12-22 20:19:26 +01004824 /* too large frames */
4825 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau86277d42019-01-02 15:36:11 +01004826 ret = -1;
Willy Tarreauea18f862018-12-22 20:19:26 +01004827 }
4828
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01004829 if (htx)
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004830 htx_to_buf(htx, rxbuf);
Willy Tarreau68dd9852017-07-03 14:44:26 +02004831 free_trash_chunk(copy);
Willy Tarreau7838a792019-08-12 18:42:03 +02004832 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau86277d42019-01-02 15:36:11 +01004833 return ret;
4834
Willy Tarreau68dd9852017-07-03 14:44:26 +02004835 fail:
Willy Tarreau86277d42019-01-02 15:36:11 +01004836 ret = -1;
Willy Tarreau68dd9852017-07-03 14:44:26 +02004837 goto leave;
Willy Tarreau88d138e2019-01-02 19:38:14 +01004838
4839 trailers:
4840 /* This is the last HEADERS frame hence a trailer */
Willy Tarreau88d138e2019-01-02 19:38:14 +01004841 if (!(h2c->dff & H2_F_HEADERS_END_STREAM)) {
4842 /* It's a trailer but it's missing ES flag */
Willy Tarreau7838a792019-08-12 18:42:03 +02004843 TRACE_STATE("missing EH on trailers frame", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau88d138e2019-01-02 19:38:14 +01004844 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004845 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau88d138e2019-01-02 19:38:14 +01004846 goto fail;
4847 }
4848
Christopher Faulet9b79a102019-07-15 11:22:56 +02004849 /* Trailers terminate a DATA sequence */
Willy Tarreau7838a792019-08-12 18:42:03 +02004850 if (h2_make_htx_trailers(list, htx) <= 0) {
4851 TRACE_STATE("failed to append HTX trailers into rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004852 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02004853 }
Willy Tarreau88d138e2019-01-02 19:38:14 +01004854 goto done;
Willy Tarreau13278b42017-10-13 19:23:14 +02004855}
4856
Christopher Faulet9b79a102019-07-15 11:22:56 +02004857/* Transfer the payload of a DATA frame to the HTTP/1 side. The HTTP/2 frame
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01004858 * parser state is automatically updated. Returns > 0 if it could completely
4859 * send the current frame, 0 if it couldn't complete, in which case
Willy Tarreaub605c422022-05-17 17:04:55 +02004860 * SE_FL_RCV_MORE must be checked to know if some data remain pending (an empty
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01004861 * DATA frame can return 0 as a valid result). Stream errors are reported in
4862 * h2s->errcode and connection errors in h2c->errcode. The caller must already
4863 * have checked the frame header and ensured that the frame was complete or the
4864 * buffer full. It changes the frame state to FRAME_A once done.
Willy Tarreau454f9052017-10-26 19:40:35 +02004865 */
Willy Tarreau454b57b2018-02-26 15:50:05 +01004866static int h2_frt_transfer_data(struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02004867{
4868 struct h2c *h2c = h2s->h2c;
Christopher Faulet9b79a102019-07-15 11:22:56 +02004869 int block;
Willy Tarreaud755ea62018-02-26 15:44:54 +01004870 unsigned int flen = 0;
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01004871 struct htx *htx = NULL;
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02004872 struct buffer *scbuf;
Christopher Faulet9b79a102019-07-15 11:22:56 +02004873 unsigned int sent;
Willy Tarreau454f9052017-10-26 19:40:35 +02004874
Willy Tarreau7838a792019-08-12 18:42:03 +02004875 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
4876
Willy Tarreau8fc016d2017-12-11 18:27:15 +01004877 h2c->flags &= ~H2_CF_DEM_SFULL;
Willy Tarreau454f9052017-10-26 19:40:35 +02004878
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02004879 scbuf = h2_get_buf(h2c, &h2s->rxbuf);
4880 if (!scbuf) {
Willy Tarreaud755ea62018-02-26 15:44:54 +01004881 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02004882 TRACE_STATE("waiting for an h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01004883 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01004884 }
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02004885 htx = htx_from_buf(scbuf);
Willy Tarreaud755ea62018-02-26 15:44:54 +01004886
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01004887try_again:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01004888 flen = h2c->dfl - h2c->dpl;
4889 if (!flen)
Willy Tarreau4a28da12018-01-04 14:41:00 +01004890 goto end_transfer;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01004891
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004892 if (flen > b_data(&h2c->dbuf)) {
4893 flen = b_data(&h2c->dbuf);
Willy Tarreau8fc016d2017-12-11 18:27:15 +01004894 if (!flen)
Willy Tarreau454b57b2018-02-26 15:50:05 +01004895 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01004896 }
4897
Christopher Faulet9b79a102019-07-15 11:22:56 +02004898 block = htx_free_data_space(htx);
4899 if (!block) {
4900 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02004901 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004902 goto fail;
Willy Tarreaueba10f22018-04-25 20:44:22 +02004903 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02004904 if (flen > block)
4905 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02004906
Christopher Faulet9b79a102019-07-15 11:22:56 +02004907 /* here, flen is the max we can copy into the output buffer */
4908 block = b_contig_data(&h2c->dbuf, 0);
4909 if (flen > block)
4910 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02004911
Christopher Faulet9b79a102019-07-15 11:22:56 +02004912 sent = htx_add_data(htx, ist2(b_head(&h2c->dbuf), flen));
Willy Tarreau022e5e52020-09-10 09:33:15 +02004913 TRACE_DATA("move some data to h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s, 0, (void *)(long)sent);
Willy Tarreau454f9052017-10-26 19:40:35 +02004914
Christopher Faulet9b79a102019-07-15 11:22:56 +02004915 b_del(&h2c->dbuf, sent);
4916 h2c->dfl -= sent;
4917 h2c->rcvd_c += sent;
4918 h2c->rcvd_s += sent; // warning, this can also affect the closed streams!
Willy Tarreau454f9052017-10-26 19:40:35 +02004919
Christopher Faulet9b79a102019-07-15 11:22:56 +02004920 if (h2s->flags & H2_SF_DATA_CLEN) {
4921 h2s->body_len -= sent;
4922 htx->extra = h2s->body_len;
Willy Tarreaueba10f22018-04-25 20:44:22 +02004923 }
4924
Christopher Faulet9b79a102019-07-15 11:22:56 +02004925 if (sent < flen) {
Willy Tarreaud755ea62018-02-26 15:44:54 +01004926 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02004927 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01004928 goto fail;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01004929 }
4930
Christopher Faulet9b79a102019-07-15 11:22:56 +02004931 goto try_again;
4932
Willy Tarreau4a28da12018-01-04 14:41:00 +01004933 end_transfer:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01004934 /* here we're done with the frame, all the payload (except padding) was
4935 * transferred.
4936 */
Willy Tarreaueba10f22018-04-25 20:44:22 +02004937
Christopher Faulet5be651d2021-01-22 15:28:03 +01004938 if (!(h2s->flags & H2_SF_BODY_TUNNEL) && (h2c->dff & H2_F_DATA_END_STREAM)) {
4939 /* no more data are expected for this message. This add the EOM
4940 * flag but only on the response path or if no tunnel attempt
4941 * was aborted. Otherwise (request path + tunnel abrted), the
4942 * EOM was already reported.
4943 */
Christopher Faulet33724322021-02-10 09:04:59 +01004944 if ((h2c->flags & H2_CF_IS_BACK) || !(h2s->flags & H2_SF_TUNNEL_ABRT)) {
4945 /* If we receive an empty DATA frame with ES flag while the HTX
4946 * message is empty, we must be sure to push a block to be sure
4947 * the HTX EOM flag will be handled on the other side. It is a
4948 * workaround because for now it is not possible to push empty
4949 * HTX DATA block. And without this block, there is no way to
4950 * "commit" the end of the message.
4951 */
4952 if (htx_is_empty(htx)) {
4953 if (!htx_add_endof(htx, HTX_BLK_EOT))
4954 goto fail;
4955 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01004956 htx->flags |= HTX_FL_EOM;
Christopher Faulet33724322021-02-10 09:04:59 +01004957 }
Willy Tarreaueba10f22018-04-25 20:44:22 +02004958 }
4959
Willy Tarreaud1023bb2018-03-22 16:53:12 +01004960 h2c->rcvd_c += h2c->dpl;
4961 h2c->rcvd_s += h2c->dpl;
4962 h2c->dpl = 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02004963 h2c->st0 = H2_CS_FRAME_A; // send the corresponding window update
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02004964 htx_to_buf(htx, scbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02004965 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01004966 return 1;
Willy Tarreau454b57b2018-02-26 15:50:05 +01004967 fail:
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01004968 if (htx)
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02004969 htx_to_buf(htx, scbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02004970 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01004971 return 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02004972}
4973
Willy Tarreau115e83b2018-12-01 19:17:53 +01004974/* Try to send a HEADERS frame matching HTX response present in HTX message
4975 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
4976 * must check the stream's status to detect any error which might have happened
4977 * subsequently to a successful send. The htx blocks are automatically removed
4978 * from the message. The htx message is assumed to be valid since produced from
4979 * the internal code, hence it contains a start line, an optional series of
4980 * header blocks and an end of header, otherwise an invalid frame could be
4981 * emitted and the resulting htx message could be left in an inconsistent state.
4982 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004983static size_t h2s_frt_make_resp_headers(struct h2s *h2s, struct htx *htx)
Willy Tarreau115e83b2018-12-01 19:17:53 +01004984{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02004985 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau115e83b2018-12-01 19:17:53 +01004986 struct h2c *h2c = h2s->h2c;
4987 struct htx_blk *blk;
Willy Tarreau115e83b2018-12-01 19:17:53 +01004988 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02004989 struct buffer *mbuf;
Willy Tarreau115e83b2018-12-01 19:17:53 +01004990 struct htx_sl *sl;
4991 enum htx_blk_type type;
4992 int es_now = 0;
4993 int ret = 0;
4994 int hdr;
Willy Tarreau115e83b2018-12-01 19:17:53 +01004995
Willy Tarreau7838a792019-08-12 18:42:03 +02004996 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
4997
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01004998 /* get the start line (we do have one) and the rest of the headers,
4999 * that we dump starting at header 0 */
5000 sl = NULL;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005001 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005002 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau115e83b2018-12-01 19:17:53 +01005003 type = htx_get_blk_type(blk);
5004
5005 if (type == HTX_BLK_UNUSED)
5006 continue;
5007
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005008 if (type == HTX_BLK_EOH)
Willy Tarreau115e83b2018-12-01 19:17:53 +01005009 break;
5010
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005011 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005012 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005013 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5014 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5015 goto fail;
5016 }
5017
5018 list[hdr].n = htx_get_blk_name(htx, blk);
5019 list[hdr].v = htx_get_blk_value(htx, blk);
5020 hdr++;
5021 }
5022 else if (type == HTX_BLK_RES_SL) {
Christopher Faulet56498132021-01-29 11:39:43 +01005023 BUG_ON(sl); /* Only one start-line expected */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005024 sl = htx_get_blk_ptr(htx, blk);
5025 h2s->status = sl->info.res.status;
Christopher Faulet7d247f02020-12-02 14:26:36 +01005026 if (h2s->status == 204 || h2s->status == 304)
5027 h2s->flags |= H2_SF_BODYLESS_RESP;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005028 if (h2s->status < 100 || h2s->status > 999) {
5029 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5030 goto fail;
5031 }
5032 else if (h2s->status == 101) {
Amaury Denoyelleefe22762020-12-11 17:53:08 +01005033 if (unlikely(h2s->flags & H2_SF_EXT_CONNECT_RCVD)) {
5034 /* If an Extended CONNECT has been received, we need to convert 101 to 200 */
5035 h2s->status = 200;
5036 h2s->flags &= ~H2_SF_EXT_CONNECT_RCVD;
5037 }
5038 else {
5039 /* Otherwise, 101 responses are not supported in H2, so return a error (RFC7540#8.1.1) */
5040 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5041 goto fail;
5042 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005043 }
5044 else if ((h2s->flags & H2_SF_BODY_TUNNEL) && h2s->status >= 300) {
5045 /* Abort the tunnel attempt */
5046 h2s->flags &= ~H2_SF_BODY_TUNNEL;
5047 h2s->flags |= H2_SF_TUNNEL_ABRT;
5048 }
5049 }
5050 else {
5051 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005052 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02005053 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005054 }
5055
Christopher Faulet56498132021-01-29 11:39:43 +01005056 /* The start-line me be defined */
5057 BUG_ON(!sl);
5058
Willy Tarreau115e83b2018-12-01 19:17:53 +01005059 /* marker for end of headers */
5060 list[hdr].n = ist("");
5061
Willy Tarreau9c218e72019-05-26 10:08:28 +02005062 mbuf = br_tail(h2c->mbuf);
5063 retry:
5064 if (!h2_get_buf(h2c, mbuf)) {
5065 h2c->flags |= H2_CF_MUX_MALLOC;
5066 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005067 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005068 return 0;
5069 }
5070
Willy Tarreau115e83b2018-12-01 19:17:53 +01005071 chunk_reset(&outbuf);
5072
5073 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005074 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5075 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005076 break;
5077 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005078 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau115e83b2018-12-01 19:17:53 +01005079 }
5080
5081 if (outbuf.size < 9)
5082 goto full;
5083
5084 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5085 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5086 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5087 outbuf.data = 9;
5088
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005089 if ((h2c->flags & (H2_CF_SHTS_UPDATED|H2_CF_DTSU_EMITTED)) == H2_CF_SHTS_UPDATED) {
5090 /* SETTINGS_HEADER_TABLE_SIZE changed, we must send an HPACK
5091 * dynamic table size update so that some clients are not
5092 * confused. In practice we only need to send the DTSU when the
5093 * advertised size is lower than the current one, and since we
5094 * don't use it and don't care about the default 4096 bytes,
5095 * we only ack it with a zero size thus we at most have to deal
5096 * with this once. See RFC7541#4.2 and #6.3 for the spec, and
5097 * below for the whole context and interoperability risks:
5098 * https://lists.w3.org/Archives/Public/ietf-http-wg/2021OctDec/0235.html
5099 */
5100 if (b_room(&outbuf) < 1)
5101 goto full;
5102 outbuf.area[outbuf.data++] = 0x20; // HPACK DTSU 0 bytes
5103
5104 /* let's not update the flags now but only once the buffer is
5105 * really committed.
5106 */
5107 }
5108
Willy Tarreau115e83b2018-12-01 19:17:53 +01005109 /* encode status, which necessarily is the first one */
Willy Tarreauaafdf582018-12-10 18:06:40 +01005110 if (!hpack_encode_int_status(&outbuf, h2s->status)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005111 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005112 goto realign_again;
5113 goto full;
5114 }
5115
5116 /* encode all headers, stop at empty name */
5117 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
5118 /* these ones do not exist in H2 and must be dropped. */
5119 if (isteq(list[hdr].n, ist("connection")) ||
5120 isteq(list[hdr].n, ist("proxy-connection")) ||
5121 isteq(list[hdr].n, ist("keep-alive")) ||
5122 isteq(list[hdr].n, ist("upgrade")) ||
5123 isteq(list[hdr].n, ist("transfer-encoding")))
5124 continue;
5125
Christopher Faulet86d144c2019-08-14 16:32:25 +02005126 /* Skip all pseudo-headers */
5127 if (*(list[hdr].n.ptr) == ':')
5128 continue;
5129
Willy Tarreau115e83b2018-12-01 19:17:53 +01005130 if (isteq(list[hdr].n, ist("")))
5131 break; // end
5132
5133 if (!hpack_encode_header(&outbuf, list[hdr].n, list[hdr].v)) {
5134 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005135 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005136 goto realign_again;
5137 goto full;
5138 }
5139 }
5140
Willy Tarreaucb985a42019-10-07 16:56:34 +02005141 /* update the frame's size */
5142 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5143
5144 if (outbuf.data > h2c->mfs + 9) {
5145 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5146 /* output full */
5147 if (b_space_wraps(mbuf))
5148 goto realign_again;
5149 goto full;
5150 }
5151 }
5152
Willy Tarreau3a537072021-06-17 08:40:04 +02005153 TRACE_USER("sent H2 response ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5154
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005155 /* remove all header blocks including the EOH and compute the
5156 * corresponding size.
Willy Tarreau115e83b2018-12-01 19:17:53 +01005157 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005158 ret = 0;
5159 blk = htx_get_head_blk(htx);
5160 while (blk) {
5161 type = htx_get_blk_type(blk);
5162 ret += htx_get_blksz(blk);
5163 blk = htx_remove_blk(htx, blk);
5164 /* The removed block is the EOH */
5165 if (type == HTX_BLK_EOH)
5166 break;
Christopher Faulet5be651d2021-01-22 15:28:03 +01005167 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005168
Willy Tarreau95acc8b2022-05-27 16:14:10 +02005169 if (!h2s_sc(h2s) || se_fl_test(h2s->sd, SE_FL_SHW)) {
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005170 /* Response already closed: add END_STREAM */
5171 es_now = 1;
5172 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005173 else if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx) && h2s->status >= 200) {
5174 /* EOM+empty: we may need to add END_STREAM except for 1xx
Christopher Faulet991febd2020-12-02 15:17:31 +01005175 * responses and tunneled response.
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005176 */
Christopher Faulet991febd2020-12-02 15:17:31 +01005177 if (!(h2s->flags & H2_SF_BODY_TUNNEL) || h2s->status >= 300)
5178 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005179 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005180
Willy Tarreau115e83b2018-12-01 19:17:53 +01005181 if (es_now)
5182 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5183
5184 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005185 b_add(mbuf, outbuf.data);
Christopher Faulet0b465482019-02-19 15:14:23 +01005186
5187 /* indicates the HEADERS frame was sent, except for 1xx responses. For
5188 * 1xx responses, another HEADERS frame is expected.
5189 */
Christopher Faulet89899422020-12-07 18:24:43 +01005190 if (h2s->status >= 200)
Christopher Faulet0b465482019-02-19 15:14:23 +01005191 h2s->flags |= H2_SF_HEADERS_SENT;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005192
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005193 if (h2c->flags & H2_CF_SHTS_UPDATED) {
5194 /* was sent above */
5195 h2c->flags |= H2_CF_DTSU_EMITTED;
Willy Tarreauc7d85482022-02-16 14:28:14 +01005196 h2c->flags &= ~H2_CF_SHTS_UPDATED;
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005197 }
5198
Willy Tarreau115e83b2018-12-01 19:17:53 +01005199 if (es_now) {
5200 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02005201 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005202 if (h2s->st == H2_SS_OPEN)
5203 h2s->st = H2_SS_HLOC;
5204 else
5205 h2s_close(h2s);
5206 }
5207
5208 /* OK we could properly deliver the response */
Willy Tarreau115e83b2018-12-01 19:17:53 +01005209 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02005210 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005211 return ret;
5212 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005213 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5214 goto retry;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005215 h2c->flags |= H2_CF_MUX_MFULL;
5216 h2s->flags |= H2_SF_BLK_MROOM;
5217 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005218 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005219 goto end;
5220 fail:
5221 /* unparsable HTX messages, too large ones to be produced in the local
5222 * list etc go here (unrecoverable errors).
5223 */
5224 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5225 ret = 0;
5226 goto end;
5227}
5228
Willy Tarreau80739692018-10-05 11:35:57 +02005229/* Try to send a HEADERS frame matching HTX request present in HTX message
5230 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
5231 * must check the stream's status to detect any error which might have happened
5232 * subsequently to a successful send. The htx blocks are automatically removed
5233 * from the message. The htx message is assumed to be valid since produced from
5234 * the internal code, hence it contains a start line, an optional series of
5235 * header blocks and an end of header, otherwise an invalid frame could be
5236 * emitted and the resulting htx message could be left in an inconsistent state.
5237 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02005238static size_t h2s_bck_make_req_headers(struct h2s *h2s, struct htx *htx)
Willy Tarreau80739692018-10-05 11:35:57 +02005239{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02005240 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau80739692018-10-05 11:35:57 +02005241 struct h2c *h2c = h2s->h2c;
5242 struct htx_blk *blk;
Willy Tarreau80739692018-10-05 11:35:57 +02005243 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005244 struct buffer *mbuf;
Willy Tarreau80739692018-10-05 11:35:57 +02005245 struct htx_sl *sl;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005246 struct ist meth, uri, auth, host = IST_NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005247 enum htx_blk_type type;
5248 int es_now = 0;
5249 int ret = 0;
5250 int hdr;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005251 int extended_connect = 0;
Willy Tarreau80739692018-10-05 11:35:57 +02005252
Willy Tarreau7838a792019-08-12 18:42:03 +02005253 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5254
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005255 /* get the start line (we do have one) and the rest of the headers,
5256 * that we dump starting at header 0 */
5257 sl = NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005258 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005259 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005260 type = htx_get_blk_type(blk);
5261
5262 if (type == HTX_BLK_UNUSED)
5263 continue;
5264
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005265 if (type == HTX_BLK_EOH)
Willy Tarreau80739692018-10-05 11:35:57 +02005266 break;
5267
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005268 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005269 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005270 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5271 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5272 goto fail;
5273 }
Willy Tarreau80739692018-10-05 11:35:57 +02005274
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005275 list[hdr].n = htx_get_blk_name(htx, blk);
5276 list[hdr].v = htx_get_blk_value(htx, blk);
Christopher Faulet67d58092019-10-02 10:51:38 +02005277
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005278 /* Skip header if same name is used to add the server name */
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005279 if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name) &&
5280 isteq(list[hdr].n, h2c->proxy->server_id_hdr_name))
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005281 continue;
Christopher Faulet67d58092019-10-02 10:51:38 +02005282
Ilya Shipitsinacf84592021-02-06 22:29:08 +05005283 /* Convert connection: upgrade to Extended connect from rfc 8441 */
Christopher Faulet52a5ec22021-09-09 09:52:51 +02005284 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteqi(list[hdr].n, ist("connection"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005285 /* rfc 7230 #6.1 Connection = list of tokens */
5286 struct ist connection_ist = list[hdr].v;
5287 do {
5288 if (isteqi(iststop(connection_ist, ','),
5289 ist("upgrade"))) {
Amaury Denoyelle0df04362021-10-18 09:43:29 +02005290 if (!(h2c->flags & H2_CF_RCVD_RFC8441)) {
5291 TRACE_STATE("reject upgrade because of no RFC8441 support", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5292 goto fail;
5293 }
5294
Amaury Denoyellee0c258c2021-10-18 10:05:16 +02005295 TRACE_STATE("convert upgrade to extended connect method", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005296 h2s->flags |= (H2_SF_BODY_TUNNEL|H2_SF_EXT_CONNECT_SENT);
5297 sl->info.req.meth = HTTP_METH_CONNECT;
5298 meth = ist("CONNECT");
5299
5300 extended_connect = 1;
5301 break;
5302 }
5303
5304 connection_ist = istadv(istfind(connection_ist, ','), 1);
5305 } while (istlen(connection_ist));
5306 }
5307
Christopher Faulet52a5ec22021-09-09 09:52:51 +02005308 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteq(list[hdr].n, ist("upgrade"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005309 /* rfc 7230 #6.7 Upgrade = list of protocols
5310 * rfc 8441 #4 Extended connect = :protocol is single-valued
5311 *
5312 * only first HTTP/1 protocol is preserved
5313 */
5314 const struct ist protocol = iststop(list[hdr].v, ',');
5315 /* upgrade_protocol field is 16 bytes long in h2s */
5316 istpad(h2s->upgrade_protocol, isttrim(protocol, 15));
5317 }
5318
5319 if (isteq(list[hdr].n, ist("host")))
5320 host = list[hdr].v;
5321
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005322 hdr++;
5323 }
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005324 else if (type == HTX_BLK_REQ_SL) {
5325 BUG_ON(sl); /* Only one start-line expected */
5326 sl = htx_get_blk_ptr(htx, blk);
5327 meth = htx_sl_req_meth(sl);
5328 uri = htx_sl_req_uri(sl);
5329 if (sl->info.req.meth == HTTP_METH_HEAD)
5330 h2s->flags |= H2_SF_BODYLESS_RESP;
5331 if (unlikely(uri.len == 0)) {
5332 TRACE_ERROR("no URI in HTX request", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5333 goto fail;
5334 }
5335 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005336 else {
5337 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5338 goto fail;
5339 }
Willy Tarreau80739692018-10-05 11:35:57 +02005340 }
5341
Christopher Faulet56498132021-01-29 11:39:43 +01005342 /* The start-line me be defined */
5343 BUG_ON(!sl);
5344
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005345 /* Now add the server name to a header (if requested) */
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005346 if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name)) {
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005347 struct server *srv = objt_server(h2c->conn->target);
5348
5349 if (srv) {
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005350 list[hdr].n = h2c->proxy->server_id_hdr_name;
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005351 list[hdr].v = ist(srv->id);
5352 hdr++;
5353 }
5354 }
5355
Willy Tarreau80739692018-10-05 11:35:57 +02005356 /* marker for end of headers */
5357 list[hdr].n = ist("");
5358
Willy Tarreau9c218e72019-05-26 10:08:28 +02005359 mbuf = br_tail(h2c->mbuf);
5360 retry:
5361 if (!h2_get_buf(h2c, mbuf)) {
5362 h2c->flags |= H2_CF_MUX_MALLOC;
5363 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005364 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005365 return 0;
5366 }
5367
Willy Tarreau80739692018-10-05 11:35:57 +02005368 chunk_reset(&outbuf);
5369
5370 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005371 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5372 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005373 break;
5374 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005375 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau80739692018-10-05 11:35:57 +02005376 }
5377
5378 if (outbuf.size < 9)
5379 goto full;
5380
5381 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5382 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5383 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5384 outbuf.data = 9;
5385
5386 /* encode the method, which necessarily is the first one */
Willy Tarreaubdabc3a2018-12-10 18:25:11 +01005387 if (!hpack_encode_method(&outbuf, sl->info.req.meth, meth)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005388 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005389 goto realign_again;
5390 goto full;
5391 }
5392
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005393 auth = ist(NULL);
5394
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005395 /* RFC7540 #8.3: the CONNECT method must have :
5396 * - :authority set to the URI part (host:port)
5397 * - :method set to CONNECT
5398 * - :scheme and :path omitted
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005399 *
5400 * Note that this is not applicable in case of the Extended CONNECT
5401 * protocol from rfc 8441.
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005402 */
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005403 if (unlikely(sl->info.req.meth == HTTP_METH_CONNECT) && !extended_connect) {
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005404 auth = uri;
5405
5406 if (!hpack_encode_header(&outbuf, ist(":authority"), auth)) {
5407 /* output full */
5408 if (b_space_wraps(mbuf))
5409 goto realign_again;
5410 goto full;
5411 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005412 h2s->flags |= H2_SF_BODY_TUNNEL;
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005413 } else {
5414 /* other methods need a :scheme. If an authority is known from
5415 * the request line, it must be sent, otherwise only host is
5416 * sent. Host is never sent as the authority.
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005417 *
5418 * This code is also applicable for Extended CONNECT protocol
5419 * from rfc 8441.
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005420 */
5421 struct ist scheme = { };
Christopher Faulet3b44c542019-06-14 10:46:51 +02005422
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005423 if (uri.ptr[0] != '/' && uri.ptr[0] != '*') {
5424 /* the URI seems to start with a scheme */
5425 int len = 1;
5426
5427 while (len < uri.len && uri.ptr[len] != ':')
5428 len++;
5429
5430 if (len + 2 < uri.len && uri.ptr[len + 1] == '/' && uri.ptr[len + 2] == '/') {
5431 /* make the uri start at the authority now */
Tim Duesterhus9f75ed12021-03-02 18:57:26 +01005432 scheme = ist2(uri.ptr, len);
Tim Duesterhus154374c2021-03-02 18:57:27 +01005433 uri = istadv(uri, len + 3);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005434
5435 /* find the auth part of the URI */
Tim Duesterhus92c696e2021-02-28 16:11:36 +01005436 auth = ist2(uri.ptr, 0);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005437 while (auth.len < uri.len && auth.ptr[auth.len] != '/')
5438 auth.len++;
5439
Tim Duesterhus154374c2021-03-02 18:57:27 +01005440 uri = istadv(uri, auth.len);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005441 }
5442 }
5443
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005444 /* For Extended CONNECT, the :authority must be present.
5445 * Use host value for it.
5446 */
5447 if (unlikely(extended_connect) && isttest(host))
5448 auth = host;
5449
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005450 if (!scheme.len) {
5451 /* no explicit scheme, we're using an origin-form URI,
5452 * probably from an H1 request transcoded to H2 via an
5453 * external layer, then received as H2 without authority.
5454 * So we have to look up the scheme from the HTX flags.
5455 * In such a case only http and https are possible, and
5456 * https is the default (sent by browsers).
5457 */
5458 if ((sl->flags & (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP)) == (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP))
5459 scheme = ist("http");
5460 else
5461 scheme = ist("https");
5462 }
Christopher Faulet3b44c542019-06-14 10:46:51 +02005463
5464 if (!hpack_encode_scheme(&outbuf, scheme)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005465 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005466 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005467 goto realign_again;
5468 goto full;
5469 }
Willy Tarreau80739692018-10-05 11:35:57 +02005470
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005471 if (auth.len && !hpack_encode_header(&outbuf, ist(":authority"), auth)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005472 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005473 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005474 goto realign_again;
5475 goto full;
5476 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005477
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005478 /* encode the path. RFC7540#8.1.2.3: if path is empty it must
5479 * be sent as '/' or '*'.
5480 */
5481 if (unlikely(!uri.len)) {
5482 if (sl->info.req.meth == HTTP_METH_OPTIONS)
5483 uri = ist("*");
5484 else
5485 uri = ist("/");
Willy Tarreau053c1572019-02-01 16:13:59 +01005486 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005487
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005488 if (!hpack_encode_path(&outbuf, uri)) {
5489 /* output full */
5490 if (b_space_wraps(mbuf))
5491 goto realign_again;
5492 goto full;
5493 }
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005494
5495 /* encode the pseudo-header protocol from rfc8441 if using
5496 * Extended CONNECT method.
5497 */
5498 if (unlikely(extended_connect)) {
5499 const struct ist protocol = ist(h2s->upgrade_protocol);
5500 if (isttest(protocol)) {
5501 if (!hpack_encode_header(&outbuf,
5502 ist(":protocol"),
5503 protocol)) {
5504 /* output full */
5505 if (b_space_wraps(mbuf))
5506 goto realign_again;
5507 goto full;
5508 }
5509 }
5510 }
Willy Tarreau80739692018-10-05 11:35:57 +02005511 }
5512
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005513 /* encode all headers, stop at empty name. Host is only sent if we
5514 * do not provide an authority.
5515 */
Willy Tarreau80739692018-10-05 11:35:57 +02005516 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005517 struct ist n = list[hdr].n;
5518 struct ist v = list[hdr].v;
5519
Willy Tarreau80739692018-10-05 11:35:57 +02005520 /* these ones do not exist in H2 and must be dropped. */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005521 if (isteq(n, ist("connection")) ||
5522 (auth.len && isteq(n, ist("host"))) ||
5523 isteq(n, ist("proxy-connection")) ||
5524 isteq(n, ist("keep-alive")) ||
5525 isteq(n, ist("upgrade")) ||
5526 isteq(n, ist("transfer-encoding")))
Willy Tarreau80739692018-10-05 11:35:57 +02005527 continue;
5528
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005529 if (isteq(n, ist("te"))) {
5530 /* "te" may only be sent with "trailers" if this value
5531 * is present, otherwise it must be deleted.
5532 */
5533 v = istist(v, ist("trailers"));
Tim Duesterhus7b5777d2021-03-02 18:57:28 +01005534 if (!isttest(v) || (v.len > 8 && v.ptr[8] != ','))
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005535 continue;
5536 v = ist("trailers");
5537 }
5538
Christopher Faulet86d144c2019-08-14 16:32:25 +02005539 /* Skip all pseudo-headers */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005540 if (*(n.ptr) == ':')
Christopher Faulet86d144c2019-08-14 16:32:25 +02005541 continue;
5542
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005543 if (isteq(n, ist("")))
Willy Tarreau80739692018-10-05 11:35:57 +02005544 break; // end
5545
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005546 if (!hpack_encode_header(&outbuf, n, v)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005547 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005548 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005549 goto realign_again;
5550 goto full;
5551 }
5552 }
5553
Willy Tarreaucb985a42019-10-07 16:56:34 +02005554 /* update the frame's size */
5555 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5556
5557 if (outbuf.data > h2c->mfs + 9) {
5558 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5559 /* output full */
5560 if (b_space_wraps(mbuf))
5561 goto realign_again;
5562 goto full;
5563 }
5564 }
5565
Willy Tarreau3a537072021-06-17 08:40:04 +02005566 TRACE_USER("sent H2 request ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5567
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005568 /* remove all header blocks including the EOH and compute the
5569 * corresponding size.
Willy Tarreau80739692018-10-05 11:35:57 +02005570 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005571 ret = 0;
5572 blk = htx_get_head_blk(htx);
5573 while (blk) {
5574 type = htx_get_blk_type(blk);
5575 ret += htx_get_blksz(blk);
5576 blk = htx_remove_blk(htx, blk);
5577 /* The removed block is the EOH */
5578 if (type == HTX_BLK_EOH)
5579 break;
Christopher Fauletd0db4232021-01-22 11:46:30 +01005580 }
Willy Tarreau80739692018-10-05 11:35:57 +02005581
Willy Tarreau95acc8b2022-05-27 16:14:10 +02005582 if (!h2s_sc(h2s) || se_fl_test(h2s->sd, SE_FL_SHW)) {
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005583 /* Request already closed: add END_STREAM */
Willy Tarreau80739692018-10-05 11:35:57 +02005584 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005585 }
5586 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
5587 /* EOM+empty: we may need to add END_STREAM (except for CONNECT
5588 * request)
5589 */
5590 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5591 es_now = 1;
5592 }
Willy Tarreau80739692018-10-05 11:35:57 +02005593
Willy Tarreau80739692018-10-05 11:35:57 +02005594 if (es_now)
5595 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5596
5597 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005598 b_add(mbuf, outbuf.data);
Willy Tarreau80739692018-10-05 11:35:57 +02005599 h2s->flags |= H2_SF_HEADERS_SENT;
5600 h2s->st = H2_SS_OPEN;
5601
Willy Tarreau80739692018-10-05 11:35:57 +02005602 if (es_now) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005603 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02005604 // trim any possibly pending data (eg: inconsistent content-length)
5605 h2s->flags |= H2_SF_ES_SENT;
5606 h2s->st = H2_SS_HLOC;
5607 }
5608
Willy Tarreau80739692018-10-05 11:35:57 +02005609 end:
5610 return ret;
5611 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005612 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5613 goto retry;
Willy Tarreau80739692018-10-05 11:35:57 +02005614 h2c->flags |= H2_CF_MUX_MFULL;
5615 h2s->flags |= H2_SF_BLK_MROOM;
5616 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005617 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005618 goto end;
5619 fail:
5620 /* unparsable HTX messages, too large ones to be produced in the local
5621 * list etc go here (unrecoverable errors).
5622 */
5623 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5624 ret = 0;
5625 goto end;
5626}
5627
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005628/* Try to send a DATA frame matching HTTP response present in HTX structure
Willy Tarreau98de12a2018-12-12 07:03:00 +01005629 * present in <buf>, for stream <h2s>. Returns the number of bytes sent. The
5630 * caller must check the stream's status to detect any error which might have
5631 * happened subsequently to a successful send. Returns the number of data bytes
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005632 * consumed, or zero if nothing done.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005633 */
Christopher Faulet142854b2020-12-02 15:12:40 +01005634static size_t h2s_make_data(struct h2s *h2s, struct buffer *buf, size_t count)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005635{
5636 struct h2c *h2c = h2s->h2c;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005637 struct htx *htx;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005638 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005639 struct buffer *mbuf;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005640 size_t total = 0;
5641 int es_now = 0;
5642 int bsize; /* htx block size */
5643 int fsize; /* h2 frame size */
5644 struct htx_blk *blk;
5645 enum htx_blk_type type;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01005646 int trunc_out; /* non-zero if truncated on out buf */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005647
Willy Tarreau7838a792019-08-12 18:42:03 +02005648 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5649
Willy Tarreau98de12a2018-12-12 07:03:00 +01005650 htx = htx_from_buf(buf);
5651
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005652 /* We only come here with HTX_BLK_DATA blocks */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005653
5654 new_frame:
Willy Tarreauee573762018-12-04 15:25:57 +01005655 if (!count || htx_is_empty(htx))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005656 goto end;
5657
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005658 if ((h2c->flags & H2_CF_IS_BACK) &&
Christopher Fauletf95f8762021-01-22 11:59:07 +01005659 (h2s->flags & (H2_SF_HEADERS_RCVD|H2_SF_BODY_TUNNEL)) == H2_SF_BODY_TUNNEL) {
5660 /* The response HEADERS frame not received yet. Thus the tunnel
5661 * is not fully established yet. In this situation, we block
5662 * data sending.
5663 */
5664 h2s->flags |= H2_SF_BLK_MBUSY;
5665 TRACE_STATE("Request DATA frame blocked waiting for tunnel establishment", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5666 goto end;
5667 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01005668 else if ((h2c->flags & H2_CF_IS_BACK) && (h2s->flags & H2_SF_TUNNEL_ABRT)) {
5669 /* a tunnel attempt was aborted but the is pending raw data to xfer to the server.
5670 * Thus the stream is closed with the CANCEL error. The error will be reported to
5671 * the upper layer as aserver abort. But at this stage there is nothing more we can
5672 * do. We just wait for the end of the response to be sure to not truncate it.
5673 */
5674 if (!(h2s->flags & H2_SF_ES_RCVD)) {
5675 TRACE_STATE("Request DATA frame blocked waiting end of aborted tunnel", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5676 h2s->flags |= H2_SF_BLK_MBUSY;
5677 }
5678 else {
5679 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
5680 h2s_error(h2s, H2_ERR_CANCEL);
5681 }
5682 goto end;
5683 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01005684
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005685 blk = htx_get_head_blk(htx);
5686 type = htx_get_blk_type(blk);
5687 bsize = htx_get_blksz(blk);
5688 fsize = bsize;
5689 trunc_out = 0;
5690 if (type != HTX_BLK_DATA)
5691 goto end;
5692
Willy Tarreau9c218e72019-05-26 10:08:28 +02005693 mbuf = br_tail(h2c->mbuf);
5694 retry:
5695 if (!h2_get_buf(h2c, mbuf)) {
5696 h2c->flags |= H2_CF_MUX_MALLOC;
5697 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005698 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005699 goto end;
5700 }
5701
Willy Tarreau98de12a2018-12-12 07:03:00 +01005702 /* Perform some optimizations to reduce the number of buffer copies.
5703 * First, if the mux's buffer is empty and the htx area contains
5704 * exactly one data block of the same size as the requested count, and
5705 * this count fits within the frame size, the stream's window size, and
5706 * the connection's window size, then it's possible to simply swap the
5707 * caller's buffer with the mux's output buffer and adjust offsets and
5708 * length to match the entire DATA HTX block in the middle. In this
5709 * case we perform a true zero-copy operation from end-to-end. This is
5710 * the situation that happens all the time with large files. Second, if
5711 * this is not possible, but the mux's output buffer is empty, we still
5712 * have an opportunity to avoid the copy to the intermediary buffer, by
5713 * making the intermediary buffer's area point to the output buffer's
5714 * area. In this case we want to skip the HTX header to make sure that
5715 * copies remain aligned and that this operation remains possible all
5716 * the time. This goes for headers, data blocks and any data extracted
5717 * from the HTX blocks.
5718 */
5719 if (unlikely(fsize == count &&
Christopher Faulet192c6a22019-06-11 16:32:24 +02005720 htx_nbblks(htx) == 1 && type == HTX_BLK_DATA &&
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005721 fsize <= h2s_mws(h2s) && fsize <= h2c->mws && fsize <= h2c->mfs)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005722 void *old_area = mbuf->area;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005723
Willy Tarreaubcc45952019-05-26 10:05:50 +02005724 if (b_data(mbuf)) {
Willy Tarreau8ab128c2019-03-21 17:47:28 +01005725 /* Too bad there are data left there. We're willing to memcpy/memmove
5726 * up to 1/4 of the buffer, which means that it's OK to copy a large
5727 * frame into a buffer containing few data if it needs to be realigned,
5728 * and that it's also OK to copy few data without realigning. Otherwise
5729 * we'll pretend the mbuf is full and wait for it to become empty.
Willy Tarreau98de12a2018-12-12 07:03:00 +01005730 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005731 if (fsize + 9 <= b_room(mbuf) &&
5732 (b_data(mbuf) <= b_size(mbuf) / 4 ||
Willy Tarreau7838a792019-08-12 18:42:03 +02005733 (fsize <= b_size(mbuf) / 4 && fsize + 9 <= b_contig_space(mbuf)))) {
5734 TRACE_STATE("small data present in output buffer, appending", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005735 goto copy;
Willy Tarreau7838a792019-08-12 18:42:03 +02005736 }
Willy Tarreau8ab128c2019-03-21 17:47:28 +01005737
Willy Tarreau9c218e72019-05-26 10:08:28 +02005738 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5739 goto retry;
5740
Willy Tarreau98de12a2018-12-12 07:03:00 +01005741 h2c->flags |= H2_CF_MUX_MFULL;
5742 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005743 TRACE_STATE("too large data present in output buffer, waiting for emptiness", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005744 goto end;
5745 }
5746
Christopher Faulet925abdf2021-04-27 22:51:07 +02005747 if (htx->flags & HTX_FL_EOM) {
5748 /* EOM+empty: we may need to add END_STREAM (except for tunneled
5749 * message)
5750 */
5751 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5752 es_now = 1;
5753 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01005754 /* map an H2 frame to the HTX block so that we can put the
5755 * frame header there.
5756 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005757 *mbuf = b_make(buf->area, buf->size, sizeof(struct htx) + blk->addr - 9, fsize + 9);
5758 outbuf.area = b_head(mbuf);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005759
5760 /* prepend an H2 DATA frame header just before the DATA block */
5761 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
5762 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
Christopher Faulet925abdf2021-04-27 22:51:07 +02005763 if (es_now)
5764 outbuf.area[4] |= H2_F_DATA_END_STREAM;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005765 h2_set_frame_size(outbuf.area, fsize);
5766
5767 /* update windows */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005768 h2s->sws -= fsize;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005769 h2c->mws -= fsize;
5770
5771 /* and exchange with our old area */
5772 buf->area = old_area;
5773 buf->data = buf->head = 0;
5774 total += fsize;
Christopher Faulet925abdf2021-04-27 22:51:07 +02005775 fsize = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005776
5777 TRACE_PROTO("sent H2 DATA frame (zero-copy)", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Christopher Faulet925abdf2021-04-27 22:51:07 +02005778 goto out;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005779 }
Willy Tarreau2fb1d4c2018-12-04 15:28:03 +01005780
Willy Tarreau98de12a2018-12-12 07:03:00 +01005781 copy:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005782 /* for DATA and EOM we'll have to emit a frame, even if empty */
5783
5784 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005785 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5786 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005787 break;
5788 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005789 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005790 }
5791
5792 if (outbuf.size < 9) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02005793 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5794 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005795 h2c->flags |= H2_CF_MUX_MFULL;
5796 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005797 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005798 goto end;
5799 }
5800
5801 /* len: 0x000000 (fill later), type: 0(DATA), flags: none=0 */
5802 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
5803 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5804 outbuf.data = 9;
5805
5806 /* we have in <fsize> the exact number of bytes we need to copy from
5807 * the HTX buffer. We need to check this against the connection's and
5808 * the stream's send windows, and to ensure that this fits in the max
5809 * frame size and in the buffer's available space minus 9 bytes (for
5810 * the frame header). The connection's flow control is applied last so
5811 * that we can use a separate list of streams which are immediately
5812 * unblocked on window opening. Note: we don't implement padding.
5813 */
5814
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005815 if (!fsize)
5816 goto send_empty;
5817
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005818 if (h2s_mws(h2s) <= 0) {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005819 h2s->flags |= H2_SF_BLK_SFCTL;
Willy Tarreau2b718102021-04-21 07:32:39 +02005820 if (LIST_INLIST(&h2s->list))
Olivier Houchardbfe2a832019-05-10 14:02:21 +02005821 LIST_DEL_INIT(&h2s->list);
Willy Tarreau2b718102021-04-21 07:32:39 +02005822 LIST_APPEND(&h2c->blocked_list, &h2s->list);
Willy Tarreau7838a792019-08-12 18:42:03 +02005823 TRACE_STATE("stream window <=0, flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005824 goto end;
5825 }
5826
Willy Tarreauee573762018-12-04 15:25:57 +01005827 if (fsize > count)
5828 fsize = count;
5829
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005830 if (fsize > h2s_mws(h2s))
5831 fsize = h2s_mws(h2s); // >0
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005832
5833 if (h2c->mfs && fsize > h2c->mfs)
5834 fsize = h2c->mfs; // >0
5835
5836 if (fsize + 9 > outbuf.size) {
Willy Tarreau455d5682019-05-24 19:42:18 +02005837 /* It doesn't fit at once. If it at least fits once split and
5838 * the amount of data to move is low, let's defragment the
5839 * buffer now.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005840 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005841 if (b_space_wraps(mbuf) &&
5842 (fsize + 9 <= b_room(mbuf)) &&
5843 b_data(mbuf) <= MAX_DATA_REALIGN)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005844 goto realign_again;
5845 fsize = outbuf.size - 9;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01005846 trunc_out = 1;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005847
5848 if (fsize <= 0) {
5849 /* no need to send an empty frame here */
Willy Tarreau9c218e72019-05-26 10:08:28 +02005850 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5851 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005852 h2c->flags |= H2_CF_MUX_MFULL;
5853 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005854 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005855 goto end;
5856 }
5857 }
5858
5859 if (h2c->mws <= 0) {
5860 h2s->flags |= H2_SF_BLK_MFCTL;
Willy Tarreau7838a792019-08-12 18:42:03 +02005861 TRACE_STATE("connection window <=0, stream flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2C_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005862 goto end;
5863 }
5864
5865 if (fsize > h2c->mws)
5866 fsize = h2c->mws;
5867
5868 /* now let's copy this this into the output buffer */
5869 memcpy(outbuf.area + 9, htx_get_blk_ptr(htx, blk), fsize);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005870 h2s->sws -= fsize;
Willy Tarreau0f799ca2018-12-04 15:20:11 +01005871 h2c->mws -= fsize;
Willy Tarreauee573762018-12-04 15:25:57 +01005872 count -= fsize;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005873
5874 send_empty:
5875 /* update the frame's size */
5876 h2_set_frame_size(outbuf.area, fsize);
5877
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005878 /* consume incoming HTX block */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005879 total += fsize;
5880 if (fsize == bsize) {
5881 htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005882 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
5883 /* EOM+empty: we may need to add END_STREAM (except for tunneled
5884 * message)
5885 */
5886 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5887 es_now = 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02005888 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005889 }
5890 else {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005891 /* we've truncated this block */
5892 htx_cut_data_blk(htx, blk, fsize);
5893 }
5894
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005895 if (es_now)
5896 outbuf.area[4] |= H2_F_DATA_END_STREAM;
5897
5898 /* commit the H2 response */
5899 b_add(mbuf, fsize + 9);
5900
Christopher Faulet925abdf2021-04-27 22:51:07 +02005901 out:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005902 if (es_now) {
5903 if (h2s->st == H2_SS_OPEN)
5904 h2s->st = H2_SS_HLOC;
5905 else
5906 h2s_close(h2s);
5907
5908 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02005909 TRACE_PROTO("ES flag set on outgoing frame", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005910 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005911 else if (fsize) {
5912 if (fsize == bsize) {
5913 TRACE_DEVEL("more data may be available, trying to send another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5914 goto new_frame;
5915 }
5916 else if (trunc_out) {
5917 /* we've truncated this block */
5918 goto new_frame;
5919 }
5920 }
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005921
5922 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02005923 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005924 return total;
5925}
5926
Christopher Faulet991febd2020-12-02 15:17:31 +01005927/* Skip the message payload (DATA blocks) and emit an empty DATA frame with the
5928 * ES flag set for stream <h2s>. This function is called for response known to
5929 * have no payload. Only DATA blocks are skipped. This means the trailers are
Ilya Shipitsinacf84592021-02-06 22:29:08 +05005930 * still emitted. The caller must check the stream's status to detect any error
Christopher Faulet991febd2020-12-02 15:17:31 +01005931 * which might have happened subsequently to a successful send. Returns the
5932 * number of data bytes consumed, or zero if nothing done.
5933 */
5934static size_t h2s_skip_data(struct h2s *h2s, struct buffer *buf, size_t count)
5935{
5936 struct h2c *h2c = h2s->h2c;
5937 struct htx *htx;
5938 int bsize; /* htx block size */
5939 int fsize; /* h2 frame size */
5940 struct htx_blk *blk;
5941 enum htx_blk_type type;
5942 size_t total = 0;
5943
5944 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5945
Christopher Faulet991febd2020-12-02 15:17:31 +01005946 htx = htx_from_buf(buf);
5947
5948 next_data:
5949 if (!count || htx_is_empty(htx))
5950 goto end;
5951 blk = htx_get_head_blk(htx);
5952 type = htx_get_blk_type(blk);
5953 bsize = htx_get_blksz(blk);
5954 fsize = bsize;
5955 if (type != HTX_BLK_DATA)
5956 goto end;
5957
5958 if (fsize > count)
5959 fsize = count;
5960
5961 if (fsize != bsize)
5962 goto skip_data;
5963
5964 if (!(htx->flags & HTX_FL_EOM) || !htx_is_unique_blk(htx, blk))
5965 goto skip_data;
5966
5967 /* Here, it is the last block and it is also the end of the message. So
5968 * we can emit an empty DATA frame with the ES flag set
5969 */
5970 if (h2_send_empty_data_es(h2s) <= 0)
5971 goto end;
5972
5973 if (h2s->st == H2_SS_OPEN)
5974 h2s->st = H2_SS_HLOC;
5975 else
5976 h2s_close(h2s);
5977
5978 skip_data:
5979 /* consume incoming HTX block */
5980 total += fsize;
5981 if (fsize == bsize) {
5982 TRACE_DEVEL("more data may be available, trying to skip another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5983 htx_remove_blk(htx, blk);
5984 goto next_data;
5985 }
5986 else {
5987 /* we've truncated this block */
5988 htx_cut_data_blk(htx, blk, fsize);
5989 }
5990
5991 end:
5992 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5993 return total;
5994}
5995
Willy Tarreau1bb812f2019-01-04 10:56:26 +01005996/* Try to send a HEADERS frame matching HTX_BLK_TLR series of blocks present in
5997 * HTX message <htx> for the H2 stream <h2s>. Returns the number of bytes
5998 * processed. The caller must check the stream's status to detect any error
5999 * which might have happened subsequently to a successful send. The htx blocks
6000 * are automatically removed from the message. The htx message is assumed to be
6001 * valid since produced from the internal code. Processing stops when meeting
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006002 * the EOT, which *is* removed. All trailers are processed at once and sent as a
6003 * single frame. The ES flag is always set.
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006004 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006005static size_t h2s_make_trailers(struct h2s *h2s, struct htx *htx)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006006{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02006007 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006008 struct h2c *h2c = h2s->h2c;
6009 struct htx_blk *blk;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006010 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02006011 struct buffer *mbuf;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006012 enum htx_blk_type type;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006013 int ret = 0;
6014 int hdr;
6015 int idx;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006016
Willy Tarreau7838a792019-08-12 18:42:03 +02006017 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
6018
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006019 /* get trailers. */
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006020 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006021 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006022 type = htx_get_blk_type(blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006023
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006024 if (type == HTX_BLK_UNUSED)
6025 continue;
6026
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006027 if (type == HTX_BLK_EOT)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006028 break;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006029 if (type == HTX_BLK_TLR) {
6030 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
6031 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
6032 goto fail;
6033 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006034
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006035 list[hdr].n = htx_get_blk_name(htx, blk);
6036 list[hdr].v = htx_get_blk_value(htx, blk);
6037 hdr++;
6038 }
6039 else {
6040 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006041 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02006042 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006043 }
6044
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006045 /* marker for end of trailers */
6046 list[hdr].n = ist("");
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006047
Willy Tarreau9c218e72019-05-26 10:08:28 +02006048 mbuf = br_tail(h2c->mbuf);
6049 retry:
6050 if (!h2_get_buf(h2c, mbuf)) {
6051 h2c->flags |= H2_CF_MUX_MALLOC;
6052 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006053 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02006054 goto end;
6055 }
6056
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006057 chunk_reset(&outbuf);
6058
6059 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02006060 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
6061 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006062 break;
6063 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02006064 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006065 }
6066
6067 if (outbuf.size < 9)
6068 goto full;
6069
6070 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4,ES=1 */
6071 memcpy(outbuf.area, "\x00\x00\x00\x01\x05", 5);
6072 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
6073 outbuf.data = 9;
6074
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006075 /* encode all headers */
6076 for (idx = 0; idx < hdr; idx++) {
6077 /* these ones do not exist in H2 or must not appear in
6078 * trailers and must be dropped.
6079 */
6080 if (isteq(list[idx].n, ist("host")) ||
6081 isteq(list[idx].n, ist("content-length")) ||
6082 isteq(list[idx].n, ist("connection")) ||
6083 isteq(list[idx].n, ist("proxy-connection")) ||
6084 isteq(list[idx].n, ist("keep-alive")) ||
6085 isteq(list[idx].n, ist("upgrade")) ||
6086 isteq(list[idx].n, ist("te")) ||
6087 isteq(list[idx].n, ist("transfer-encoding")))
6088 continue;
6089
Christopher Faulet86d144c2019-08-14 16:32:25 +02006090 /* Skip all pseudo-headers */
6091 if (*(list[idx].n.ptr) == ':')
6092 continue;
6093
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006094 if (!hpack_encode_header(&outbuf, list[idx].n, list[idx].v)) {
6095 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006096 if (b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006097 goto realign_again;
6098 goto full;
6099 }
6100 }
6101
Willy Tarreau5121e5d2019-05-06 15:13:41 +02006102 if (outbuf.data == 9) {
6103 /* here we have a problem, we have nothing to emit (either we
6104 * received an empty trailers block followed or we removed its
6105 * contents above). Because of this we can't send a HEADERS
6106 * frame, so we have to cheat and instead send an empty DATA
6107 * frame conveying the ES flag.
Willy Tarreau67b8cae2019-02-21 18:16:35 +01006108 */
6109 outbuf.area[3] = H2_FT_DATA;
6110 outbuf.area[4] = H2_F_DATA_END_STREAM;
6111 }
6112
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006113 /* update the frame's size */
6114 h2_set_frame_size(outbuf.area, outbuf.data - 9);
6115
Willy Tarreau572d9f52019-10-11 16:58:37 +02006116 if (outbuf.data > h2c->mfs + 9) {
6117 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
6118 /* output full */
6119 if (b_space_wraps(mbuf))
6120 goto realign_again;
6121 goto full;
6122 }
6123 }
6124
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006125 /* commit the H2 response */
Willy Tarreau7838a792019-08-12 18:42:03 +02006126 TRACE_PROTO("sent H2 trailers HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006127 b_add(mbuf, outbuf.data);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006128 h2s->flags |= H2_SF_ES_SENT;
6129
6130 if (h2s->st == H2_SS_OPEN)
6131 h2s->st = H2_SS_HLOC;
6132 else
6133 h2s_close(h2s);
6134
6135 /* OK we could properly deliver the response */
6136 done:
Willy Tarreaufb07b3f2019-05-06 11:23:29 +02006137 /* remove all header blocks till the end and compute the corresponding size. */
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006138 ret = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006139 blk = htx_get_head_blk(htx);
6140 while (blk) {
6141 type = htx_get_blk_type(blk);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006142 ret += htx_get_blksz(blk);
6143 blk = htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006144 /* The removed block is the EOT */
6145 if (type == HTX_BLK_EOT)
6146 break;
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006147 }
6148
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006149 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02006150 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006151 return ret;
6152 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02006153 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6154 goto retry;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006155 h2c->flags |= H2_CF_MUX_MFULL;
6156 h2s->flags |= H2_SF_BLK_MROOM;
6157 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006158 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006159 goto end;
6160 fail:
6161 /* unparsable HTX messages, too large ones to be produced in the local
6162 * list etc go here (unrecoverable errors).
6163 */
6164 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
6165 ret = 0;
6166 goto end;
6167}
6168
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006169/* Called from the upper layer, to subscribe <es> to events <event_type>. The
6170 * event subscriber <es> is not allowed to change from a previous call as long
6171 * as at least one event is still subscribed. The <event_type> must only be a
6172 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006173 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006174static int h2_subscribe(struct stconn *sc, int event_type, struct wait_event *es)
Olivier Houchard6ff20392018-07-17 18:46:31 +02006175{
Willy Tarreau36c22322022-05-27 10:41:24 +02006176 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard4cf7fb12018-08-02 19:23:05 +02006177 struct h2c *h2c = h2s->h2c;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006178
Willy Tarreau7838a792019-08-12 18:42:03 +02006179 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006180
6181 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006182 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006183
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006184 es->events |= event_type;
6185 h2s->subs = es;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006186
6187 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006188 TRACE_DEVEL("subscribe(recv)", H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006189
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006190 if (event_type & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006191 TRACE_DEVEL("subscribe(send)", H2_EV_STRM_SEND, h2c->conn, h2s);
Olivier Houchardf8338152019-05-14 17:50:32 +02006192 if (!(h2s->flags & H2_SF_BLK_SFCTL) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02006193 !LIST_INLIST(&h2s->list)) {
Olivier Houchardf8338152019-05-14 17:50:32 +02006194 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02006195 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Olivier Houchardf8338152019-05-14 17:50:32 +02006196 else
Willy Tarreau2b718102021-04-21 07:32:39 +02006197 LIST_APPEND(&h2c->send_list, &h2s->list);
Olivier Houcharde1c6dbc2018-08-01 17:06:43 +02006198 }
Olivier Houchard6ff20392018-07-17 18:46:31 +02006199 }
Willy Tarreau7838a792019-08-12 18:42:03 +02006200 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006201 return 0;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006202}
6203
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006204/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
6205 * The <es> pointer is not allowed to differ from the one passed to the
6206 * subscribe() call. It always returns zero.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006207 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006208static int h2_unsubscribe(struct stconn *sc, int event_type, struct wait_event *es)
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006209{
Willy Tarreau36c22322022-05-27 10:41:24 +02006210 struct h2s *h2s = __sc_mux_strm(sc);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006211
Willy Tarreau7838a792019-08-12 18:42:03 +02006212 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006213
6214 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006215 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006216
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006217 es->events &= ~event_type;
6218 if (!es->events)
Willy Tarreauf96508a2020-01-10 11:12:48 +01006219 h2s->subs = NULL;
6220
6221 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006222 TRACE_DEVEL("unsubscribe(recv)", H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006223
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006224 if (event_type & SUB_RETRY_SEND) {
Frédéric Lécaille67fda162022-06-30 12:01:54 +02006225 TRACE_DEVEL("unsubscribe(send)", H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006226 h2s->flags &= ~H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006227 if (!(h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)))
6228 LIST_DEL_INIT(&h2s->list);
Olivier Houchardd846c262018-10-19 17:24:29 +02006229 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01006230
Willy Tarreau7838a792019-08-12 18:42:03 +02006231 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006232 return 0;
6233}
6234
6235
Christopher Faulet564e39c2021-09-21 15:50:55 +02006236/* Called from the upper layer, to receive data
6237 *
6238 * The caller is responsible for defragmenting <buf> if necessary. But <flags>
6239 * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
6240 * means the caller wants to flush input data (from the mux buffer and the
6241 * channel buffer) to be able to use kernel splicing or any kind of mux-to-mux
6242 * xfer. If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
6243 * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
6244 * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
6245 * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
6246 * copy as much data as possible.
6247 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006248static size_t h2_rcv_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
Olivier Houchard511efea2018-08-16 15:30:32 +02006249{
Willy Tarreau36c22322022-05-27 10:41:24 +02006250 struct h2s *h2s = __sc_mux_strm(sc);
Willy Tarreau082f5592018-11-25 08:03:32 +01006251 struct h2c *h2c = h2s->h2c;
Willy Tarreau86724e22018-12-01 23:19:43 +01006252 struct htx *h2s_htx = NULL;
6253 struct htx *buf_htx = NULL;
Olivier Houchard511efea2018-08-16 15:30:32 +02006254 size_t ret = 0;
6255
Willy Tarreau7838a792019-08-12 18:42:03 +02006256 TRACE_ENTER(H2_EV_STRM_RECV, h2c->conn, h2s);
6257
Olivier Houchard511efea2018-08-16 15:30:32 +02006258 /* transfer possibly pending data to the upper layer */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006259 h2s_htx = htx_from_buf(&h2s->rxbuf);
Christopher Fauletec361bb2022-02-21 15:12:54 +01006260 if (htx_is_empty(h2s_htx) && !(h2s_htx->flags & HTX_FL_PARSING_ERROR)) {
Christopher Faulet9b79a102019-07-15 11:22:56 +02006261 /* Here htx_to_buf() will set buffer data to 0 because
6262 * the HTX is empty.
6263 */
6264 htx_to_buf(h2s_htx, &h2s->rxbuf);
6265 goto end;
6266 }
Willy Tarreau7196dd62019-03-05 10:51:11 +01006267
Christopher Faulet9b79a102019-07-15 11:22:56 +02006268 ret = h2s_htx->data;
6269 buf_htx = htx_from_buf(buf);
Willy Tarreau7196dd62019-03-05 10:51:11 +01006270
Christopher Faulet9b79a102019-07-15 11:22:56 +02006271 /* <buf> is empty and the message is small enough, swap the
6272 * buffers. */
6273 if (htx_is_empty(buf_htx) && htx_used_space(h2s_htx) <= count) {
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01006274 htx_to_buf(buf_htx, buf);
6275 htx_to_buf(h2s_htx, &h2s->rxbuf);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006276 b_xfer(buf, &h2s->rxbuf, b_data(&h2s->rxbuf));
6277 goto end;
Willy Tarreau86724e22018-12-01 23:19:43 +01006278 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02006279
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006280 htx_xfer_blks(buf_htx, h2s_htx, count, HTX_BLK_UNUSED);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006281
6282 if (h2s_htx->flags & HTX_FL_PARSING_ERROR) {
6283 buf_htx->flags |= HTX_FL_PARSING_ERROR;
6284 if (htx_is_empty(buf_htx))
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006285 se_fl_set(h2s->sd, SE_FL_EOI);
Willy Tarreau86724e22018-12-01 23:19:43 +01006286 }
Christopher Faulet810df062020-07-22 16:20:34 +02006287 else if (htx_is_empty(h2s_htx))
Christopher Faulet42432f32020-11-20 17:43:16 +01006288 buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM);
Olivier Houchard511efea2018-08-16 15:30:32 +02006289
Christopher Faulet9b79a102019-07-15 11:22:56 +02006290 buf_htx->extra = (h2s_htx->extra ? (h2s_htx->data + h2s_htx->extra) : 0);
6291 htx_to_buf(buf_htx, buf);
6292 htx_to_buf(h2s_htx, &h2s->rxbuf);
6293 ret -= h2s_htx->data;
6294
Christopher Faulet37070b22019-02-14 15:12:14 +01006295 end:
Olivier Houchard638b7992018-08-16 15:41:52 +02006296 if (b_data(&h2s->rxbuf))
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006297 se_fl_set(h2s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
Olivier Houchard511efea2018-08-16 15:30:32 +02006298 else {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006299 se_fl_clr(h2s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
Christopher Fauletd0db4232021-01-22 11:46:30 +01006300 if (h2s->flags & H2_SF_ES_RCVD) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006301 se_fl_set(h2s->sd, SE_FL_EOI);
Christopher Fauletd0db4232021-01-22 11:46:30 +01006302 /* Add EOS flag for tunnel */
6303 if (h2s->flags & H2_SF_BODY_TUNNEL)
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006304 se_fl_set(h2s->sd, SE_FL_EOS);
Christopher Fauletd0db4232021-01-22 11:46:30 +01006305 }
Christopher Fauletaade4ed2020-10-08 15:38:41 +02006306 if (h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED)
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006307 se_fl_set(h2s->sd, SE_FL_EOS);
6308 if (se_fl_test(h2s->sd, SE_FL_ERR_PENDING))
6309 se_fl_set(h2s->sd, SE_FL_ERROR);
Olivier Houchard638b7992018-08-16 15:41:52 +02006310 if (b_size(&h2s->rxbuf)) {
6311 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01006312 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02006313 }
Olivier Houchard511efea2018-08-16 15:30:32 +02006314 }
6315
Willy Tarreau082f5592018-11-25 08:03:32 +01006316 if (ret && h2c->dsi == h2s->id) {
6317 /* demux is blocking on this stream's buffer */
6318 h2c->flags &= ~H2_CF_DEM_SFULL;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02006319 h2c_restart_reading(h2c, 1);
Willy Tarreau082f5592018-11-25 08:03:32 +01006320 }
Christopher Faulet37070b22019-02-14 15:12:14 +01006321
Willy Tarreau7838a792019-08-12 18:42:03 +02006322 TRACE_LEAVE(H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard511efea2018-08-16 15:30:32 +02006323 return ret;
6324}
6325
Olivier Houchardd846c262018-10-19 17:24:29 +02006326
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006327/* Called from the upper layer, to send data from buffer <buf> for no more than
6328 * <count> bytes. Returns the number of bytes effectively sent. Some status
Willy Tarreau4596fe22022-05-17 19:07:51 +02006329 * flags may be updated on the stream connector.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006330 */
Willy Tarreau36c22322022-05-27 10:41:24 +02006331static size_t h2_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
Willy Tarreau62f52692017-10-08 23:01:42 +02006332{
Willy Tarreau36c22322022-05-27 10:41:24 +02006333 struct h2s *h2s = __sc_mux_strm(sc);
Willy Tarreau1dc41e72018-06-14 13:21:28 +02006334 size_t total = 0;
Willy Tarreau5dd17352018-06-14 13:33:30 +02006335 size_t ret;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006336 struct htx *htx;
6337 struct htx_blk *blk;
6338 enum htx_blk_type btype;
6339 uint32_t bsize;
6340 int32_t idx;
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006341
Willy Tarreau7838a792019-08-12 18:42:03 +02006342 TRACE_ENTER(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
6343
Olivier Houchardd360ac62019-03-22 17:37:16 +01006344 /* If we were not just woken because we wanted to send but couldn't,
6345 * and there's somebody else that is waiting to send, do nothing,
6346 * we will subscribe later and be put at the end of the list
6347 */
Willy Tarreaud9464162020-01-10 18:25:07 +01006348 if (!(h2s->flags & H2_SF_NOTIFIED) &&
Willy Tarreau7838a792019-08-12 18:42:03 +02006349 (!LIST_ISEMPTY(&h2s->h2c->send_list) || !LIST_ISEMPTY(&h2s->h2c->fctl_list))) {
6350 TRACE_DEVEL("other streams already waiting, going to the queue and leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
Olivier Houchardd360ac62019-03-22 17:37:16 +01006351 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006352 }
Willy Tarreaud9464162020-01-10 18:25:07 +01006353 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02006354
Willy Tarreau7838a792019-08-12 18:42:03 +02006355 if (h2s->h2c->st0 < H2_CS_FRAME_H) {
6356 TRACE_DEVEL("connection not ready, leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006357 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006358 }
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006359
Willy Tarreaucab22952019-10-31 15:48:18 +01006360 if (h2s->h2c->st0 >= H2_CS_ERROR) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006361 se_fl_set(h2s->sd, SE_FL_ERROR);
Willy Tarreaucab22952019-10-31 15:48:18 +01006362 TRACE_DEVEL("connection is in error, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
6363 return 0;
6364 }
6365
Christopher Faulet9b79a102019-07-15 11:22:56 +02006366 htx = htx_from_buf(buf);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006367
Willy Tarreau0bad0432018-06-14 16:54:01 +02006368 if (!(h2s->flags & H2_SF_OUTGOING_DATA) && count)
Willy Tarreauc4312d32017-11-07 12:01:53 +01006369 h2s->flags |= H2_SF_OUTGOING_DATA;
6370
Christopher Faulet2e47e3a2023-01-13 11:40:24 +01006371 if (htx->extra && htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
Willy Tarreau48770452022-08-18 16:03:51 +02006372 h2s->flags |= H2_SF_MORE_HTX_DATA;
6373 else
6374 h2s->flags &= ~H2_SF_MORE_HTX_DATA;
6375
Willy Tarreau751f2d02018-10-05 09:35:00 +02006376 if (h2s->id == 0) {
6377 int32_t id = h2c_get_next_sid(h2s->h2c);
6378
6379 if (id < 0) {
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006380 se_fl_set(h2s->sd, SE_FL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02006381 TRACE_DEVEL("couldn't get a stream ID, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02006382 return 0;
6383 }
6384
6385 eb32_delete(&h2s->by_id);
6386 h2s->by_id.key = h2s->id = id;
6387 h2s->h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01006388 h2s->h2c->nb_reserved--;
Willy Tarreau751f2d02018-10-05 09:35:00 +02006389 eb32_insert(&h2s->h2c->streams_by_id, &h2s->by_id);
6390 }
6391
Christopher Faulet9b79a102019-07-15 11:22:56 +02006392 while (h2s->st < H2_SS_HLOC && !(h2s->flags & H2_SF_BLK_ANY) &&
6393 count && !htx_is_empty(htx)) {
6394 idx = htx_get_head(htx);
6395 blk = htx_get_blk(htx, idx);
6396 btype = htx_get_blk_type(blk);
6397 bsize = htx_get_blksz(blk);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006398
Christopher Faulet9b79a102019-07-15 11:22:56 +02006399 switch (btype) {
Willy Tarreau80739692018-10-05 11:35:57 +02006400 case HTX_BLK_REQ_SL:
6401 /* start-line before headers */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006402 ret = h2s_bck_make_req_headers(h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02006403 if (ret > 0) {
6404 total += ret;
6405 count -= ret;
6406 if (ret < bsize)
6407 goto done;
6408 }
6409 break;
6410
Willy Tarreau115e83b2018-12-01 19:17:53 +01006411 case HTX_BLK_RES_SL:
6412 /* start-line before headers */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006413 ret = h2s_frt_make_resp_headers(h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01006414 if (ret > 0) {
6415 total += ret;
6416 count -= ret;
6417 if (ret < bsize)
6418 goto done;
6419 }
6420 break;
6421
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006422 case HTX_BLK_DATA:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006423 /* all these cause the emission of a DATA frame (possibly empty) */
Christopher Faulet991febd2020-12-02 15:17:31 +01006424 if (!(h2s->h2c->flags & H2_CF_IS_BACK) &&
6425 (h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BODYLESS_RESP)) == H2_SF_BODYLESS_RESP)
6426 ret = h2s_skip_data(h2s, buf, count);
6427 else
6428 ret = h2s_make_data(h2s, buf, count);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006429 if (ret > 0) {
Willy Tarreau98de12a2018-12-12 07:03:00 +01006430 htx = htx_from_buf(buf);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006431 total += ret;
6432 count -= ret;
6433 if (ret < bsize)
6434 goto done;
6435 }
6436 break;
6437
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006438 case HTX_BLK_TLR:
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006439 case HTX_BLK_EOT:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006440 /* This is the first trailers block, all the subsequent ones */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006441 ret = h2s_make_trailers(h2s, htx);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006442 if (ret > 0) {
6443 total += ret;
6444 count -= ret;
6445 if (ret < bsize)
6446 goto done;
6447 }
6448 break;
6449
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006450 default:
6451 htx_remove_blk(htx, blk);
6452 total += bsize;
6453 count -= bsize;
6454 break;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006455 }
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006456 }
6457
Christopher Faulet9b79a102019-07-15 11:22:56 +02006458 done:
Willy Tarreau2b778482019-05-06 15:00:22 +02006459 if (h2s->st >= H2_SS_HLOC) {
Willy Tarreau00610962018-07-19 10:58:28 +02006460 /* trim any possibly pending data after we close (extra CR-LF,
6461 * unprocessed trailers, abnormal extra data, ...)
6462 */
Willy Tarreau0bad0432018-06-14 16:54:01 +02006463 total += count;
6464 count = 0;
Willy Tarreau00610962018-07-19 10:58:28 +02006465 }
6466
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006467 /* RST are sent similarly to frame acks */
Willy Tarreau02492192017-12-07 15:59:29 +01006468 if (h2s->st == H2_SS_ERROR || h2s->flags & H2_SF_RST_RCVD) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006469 TRACE_DEVEL("reporting RST/error to the app-layer stream", H2_EV_H2S_SEND|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau95acc8b2022-05-27 16:14:10 +02006470 se_fl_set_error(h2s->sd);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01006471 if (h2s_send_rst_stream(h2s->h2c, h2s) > 0)
Willy Tarreau00dd0782018-03-01 16:31:34 +01006472 h2s_close(h2s);
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006473 }
6474
Christopher Faulet9b79a102019-07-15 11:22:56 +02006475 htx_to_buf(htx, buf);
Olivier Houchardd846c262018-10-19 17:24:29 +02006476
Olivier Houchard7505f942018-08-21 18:10:44 +02006477 if (total > 0) {
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006478 if (!(h2s->h2c->wait_event.events & SUB_RETRY_SEND)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006479 TRACE_DEVEL("data queued, waking up h2c sender", H2_EV_H2S_SEND|H2_EV_H2C_SEND, h2s->h2c->conn, h2s);
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02006480 tasklet_wakeup(h2s->h2c->wait_event.tasklet);
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006481 }
Olivier Houchardd846c262018-10-19 17:24:29 +02006482
Olivier Houchard7505f942018-08-21 18:10:44 +02006483 }
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006484 /* If we're waiting for flow control, and we got a shutr on the
6485 * connection, we will never be unlocked, so add an error on
Willy Tarreau4596fe22022-05-17 19:07:51 +02006486 * the stream connector.
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006487 */
Christopher Fauletff7925d2022-10-11 19:12:40 +02006488 if ((h2s->h2c->flags & H2_CF_RCVD_SHUT) &&
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006489 !b_data(&h2s->h2c->dbuf) &&
6490 (h2s->flags & (H2_SF_BLK_SFCTL | H2_SF_BLK_MFCTL))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006491 TRACE_DEVEL("fctl with shutr, reporting error to app-layer", H2_EV_H2S_SEND|H2_EV_STRM_SEND|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau35c4dd02023-01-17 16:25:29 +01006492 se_fl_set_error(h2s->sd);
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006493 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006494
Willy Tarreau5723f292020-01-10 15:16:57 +01006495 if (total > 0 && !(h2s->flags & H2_SF_BLK_SFCTL) &&
6496 !(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006497 /* Ok we managed to send something, leave the send_list if we were still there */
Olivier Houchardd360ac62019-03-22 17:37:16 +01006498 LIST_DEL_INIT(&h2s->list);
6499 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006500
Willy Tarreau7838a792019-08-12 18:42:03 +02006501 TRACE_LEAVE(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006502 return total;
Willy Tarreau62f52692017-10-08 23:01:42 +02006503}
6504
Willy Tarreau90bffa22022-09-01 19:06:44 +02006505/* appends some info about stream <h2s> to buffer <msg>, or does nothing if
Willy Tarreau7051f732022-09-02 15:22:12 +02006506 * <h2s> is NULL. Returns non-zero if the stream is considered suspicious. May
6507 * emit multiple lines, each new one being prefixed with <pfx>, if <pfx> is not
6508 * NULL, otherwise a single line is used.
Willy Tarreau90bffa22022-09-01 19:06:44 +02006509 */
Willy Tarreau7051f732022-09-02 15:22:12 +02006510static int h2_dump_h2s_info(struct buffer *msg, const struct h2s *h2s, const char *pfx)
Willy Tarreau90bffa22022-09-01 19:06:44 +02006511{
6512 int ret = 0;
6513
6514 if (!h2s)
6515 return ret;
6516
Willy Tarreau7051f732022-09-02 15:22:12 +02006517 chunk_appendf(msg, " h2s.id=%d .st=%s .flg=0x%04x .rxbuf=%u@%p+%u/%u",
Willy Tarreau90bffa22022-09-01 19:06:44 +02006518 h2s->id, h2s_st_to_str(h2s->st), h2s->flags,
6519 (unsigned int)b_data(&h2s->rxbuf), b_orig(&h2s->rxbuf),
Willy Tarreau7051f732022-09-02 15:22:12 +02006520 (unsigned int)b_head_ofs(&h2s->rxbuf), (unsigned int)b_size(&h2s->rxbuf));
6521
6522 if (pfx)
6523 chunk_appendf(msg, "\n%s", pfx);
6524
6525 chunk_appendf(msg, " .sc=%p", h2s_sc(h2s));
Willy Tarreau90bffa22022-09-01 19:06:44 +02006526 if (h2s_sc(h2s))
6527 chunk_appendf(msg, "(.flg=0x%08x .app=%p)",
6528 h2s_sc(h2s)->flags, h2s_sc(h2s)->app);
6529
Willy Tarreau7051f732022-09-02 15:22:12 +02006530 chunk_appendf(msg, " .sd=%p", h2s->sd);
Willy Tarreau90bffa22022-09-01 19:06:44 +02006531 chunk_appendf(msg, "(.flg=0x%08x)", se_fl_get(h2s->sd));
6532
Willy Tarreau7051f732022-09-02 15:22:12 +02006533 if (pfx)
6534 chunk_appendf(msg, "\n%s", pfx);
6535
Willy Tarreau90bffa22022-09-01 19:06:44 +02006536 chunk_appendf(msg, " .subs=%p", h2s->subs);
6537 if (h2s->subs) {
6538 chunk_appendf(msg, "(ev=%d tl=%p", h2s->subs->events, h2s->subs->tasklet);
6539 chunk_appendf(msg, " tl.calls=%d tl.ctx=%p tl.fct=",
6540 h2s->subs->tasklet->calls,
6541 h2s->subs->tasklet->context);
6542 if (h2s->subs->tasklet->calls >= 1000000)
6543 ret = 1;
6544 resolve_sym_name(msg, NULL, h2s->subs->tasklet->process);
6545 chunk_appendf(msg, ")");
6546 }
6547 return ret;
6548}
6549
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006550/* appends some info about connection <h2c> to buffer <msg>, or does nothing if
6551 * <h2c> is NULL. Returns non-zero if the connection is considered suspicious.
Willy Tarreau7051f732022-09-02 15:22:12 +02006552 * May emit multiple lines, each new one being prefixed with <pfx>, if <pfx> is
6553 * not NULL, otherwise a single line is used.
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006554 */
Willy Tarreau7051f732022-09-02 15:22:12 +02006555static int h2_dump_h2c_info(struct buffer *msg, struct h2c *h2c, const char *pfx)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006556{
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006557 const struct buffer *hmbuf, *tmbuf;
6558 const struct h2s *h2s = NULL;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006559 struct eb32_node *node;
6560 int fctl_cnt = 0;
6561 int send_cnt = 0;
6562 int tree_cnt = 0;
6563 int orph_cnt = 0;
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006564 int ret = 0;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006565
6566 if (!h2c)
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006567 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006568
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006569 list_for_each_entry(h2s, &h2c->fctl_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006570 fctl_cnt++;
6571
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006572 list_for_each_entry(h2s, &h2c->send_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006573 send_cnt++;
6574
6575 node = eb32_first(&h2c->streams_by_id);
6576 while (node) {
6577 h2s = container_of(node, struct h2s, by_id);
6578 tree_cnt++;
Willy Tarreau7be4ee02022-05-18 07:31:41 +02006579 if (!h2s_sc(h2s))
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006580 orph_cnt++;
6581 node = eb32_next(node);
6582 }
6583
Willy Tarreau60f62682019-05-26 11:32:27 +02006584 hmbuf = br_head(h2c->mbuf);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006585 tmbuf = br_tail(h2c->mbuf);
Willy Tarreauab2ec452019-08-30 07:07:08 +02006586 chunk_appendf(msg, " h2c.st0=%s .err=%d .maxid=%d .lastid=%d .flg=0x%04x"
Willy Tarreau7051f732022-09-02 15:22:12 +02006587 " .nbst=%u .nbsc=%u",
Willy Tarreauab2ec452019-08-30 07:07:08 +02006588 h2c_st_to_str(h2c->st0), h2c->errcode, h2c->max_id, h2c->last_sid, h2c->flags,
Willy Tarreau7051f732022-09-02 15:22:12 +02006589 h2c->nb_streams, h2c->nb_sc);
6590
6591 if (pfx)
6592 chunk_appendf(msg, "\n%s", pfx);
6593
6594 chunk_appendf(msg, " .fctl_cnt=%d .send_cnt=%d .tree_cnt=%d"
6595 " .orph_cnt=%d .sub=%d .dsi=%d .dbuf=%u@%p+%u/%u",
6596 fctl_cnt, send_cnt, tree_cnt, orph_cnt,
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006597 h2c->wait_event.events, h2c->dsi,
Willy Tarreau987c0632018-12-18 10:32:05 +01006598 (unsigned int)b_data(&h2c->dbuf), b_orig(&h2c->dbuf),
Willy Tarreau7051f732022-09-02 15:22:12 +02006599 (unsigned int)b_head_ofs(&h2c->dbuf), (unsigned int)b_size(&h2c->dbuf));
6600
6601 if (pfx)
6602 chunk_appendf(msg, "\n%s", pfx);
6603
Christopher Faulet68ee7842022-10-12 10:21:33 +02006604 chunk_appendf(msg, " .mbuf=[%u..%u|%u],h=[%u@%p+%u/%u],t=[%u@%p+%u/%u]",
Willy Tarreau60f62682019-05-26 11:32:27 +02006605 br_head_idx(h2c->mbuf), br_tail_idx(h2c->mbuf), br_size(h2c->mbuf),
6606 (unsigned int)b_data(hmbuf), b_orig(hmbuf),
6607 (unsigned int)b_head_ofs(hmbuf), (unsigned int)b_size(hmbuf),
Willy Tarreaubcc45952019-05-26 10:05:50 +02006608 (unsigned int)b_data(tmbuf), b_orig(tmbuf),
6609 (unsigned int)b_head_ofs(tmbuf), (unsigned int)b_size(tmbuf));
Willy Tarreau987c0632018-12-18 10:32:05 +01006610
Willy Tarreauf8c77092022-11-29 15:26:43 +01006611 chunk_appendf(msg, " .task=%p", h2c->task);
6612 if (h2c->task) {
6613 chunk_appendf(msg, " .exp=%s",
6614 h2c->task->expire ? tick_is_expired(h2c->task->expire, now_ms) ? "<PAST>" :
6615 human_time(TICKS_TO_MS(h2c->task->expire - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
6616 }
Willy Tarreau7051f732022-09-02 15:22:12 +02006617
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006618 return ret;
6619}
6620
6621/* for debugging with CLI's "show fd" command */
6622static int h2_show_fd(struct buffer *msg, struct connection *conn)
6623{
6624 struct h2c *h2c = conn->ctx;
6625 const struct h2s *h2s;
6626 struct eb32_node *node;
6627 int ret = 0;
6628
6629 if (!h2c)
6630 return ret;
6631
Willy Tarreau7051f732022-09-02 15:22:12 +02006632 ret |= h2_dump_h2c_info(msg, h2c, NULL);
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006633
6634 node = eb32_last(&h2c->streams_by_id);
6635 if (node) {
6636 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau90bffa22022-09-01 19:06:44 +02006637 chunk_appendf(msg, " last_h2s=%p", h2s);
Willy Tarreau7051f732022-09-02 15:22:12 +02006638 ret |= h2_dump_h2s_info(msg, h2s, NULL);
Willy Tarreau987c0632018-12-18 10:32:05 +01006639 }
Willy Tarreau4e97bcc2022-09-01 19:25:57 +02006640
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006641 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006642}
Willy Tarreau62f52692017-10-08 23:01:42 +02006643
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02006644/* for debugging with CLI's "show sess" command. May emit multiple lines, each
6645 * new one being prefixed with <pfx>, if <pfx> is not NULL, otherwise a single
6646 * line is used. Each field starts with a space so it's safe to print it after
6647 * existing fields.
6648 */
6649static int h2_show_sd(struct buffer *msg, struct sedesc *sd, const char *pfx)
6650{
6651 struct h2s *h2s = sd->se;
6652 int ret = 0;
6653
6654 if (!h2s)
6655 return ret;
6656
6657 chunk_appendf(msg, " h2s=%p", h2s);
Willy Tarreau7051f732022-09-02 15:22:12 +02006658 ret |= h2_dump_h2s_info(msg, h2s, pfx);
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02006659 if (pfx)
6660 chunk_appendf(msg, "\n%s", pfx);
6661 chunk_appendf(msg, " h2c=%p", h2s->h2c);
Willy Tarreau7051f732022-09-02 15:22:12 +02006662 ret |= h2_dump_h2c_info(msg, h2s->h2c, pfx);
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02006663 return ret;
6664}
6665
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006666/* Migrate the the connection to the current thread.
6667 * Return 0 if successful, non-zero otherwise.
6668 * Expected to be called with the old thread lock held.
6669 */
Olivier Houchard1662cdb2020-07-03 14:04:37 +02006670static int h2_takeover(struct connection *conn, int orig_tid)
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006671{
6672 struct h2c *h2c = conn->ctx;
Willy Tarreau617e80f2020-07-01 16:39:33 +02006673 struct task *task;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006674
6675 if (fd_takeover(conn->handle.fd, conn) != 0)
6676 return -1;
Olivier Houcharda74bb7e2020-07-03 14:01:21 +02006677
6678 if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
6679 /* We failed to takeover the xprt, even if the connection may
6680 * still be valid, flag it as error'd, as we have already
6681 * taken over the fd, and wake the tasklet, so that it will
6682 * destroy it.
6683 */
6684 conn->flags |= CO_FL_ERROR;
6685 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
6686 return -1;
6687 }
6688
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006689 if (h2c->wait_event.events)
6690 h2c->conn->xprt->unsubscribe(h2c->conn, h2c->conn->xprt_ctx,
6691 h2c->wait_event.events, &h2c->wait_event);
6692 /* To let the tasklet know it should free itself, and do nothing else,
6693 * set its context to NULL.
6694 */
6695 h2c->wait_event.tasklet->context = NULL;
Olivier Houchard1662cdb2020-07-03 14:04:37 +02006696 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
Willy Tarreau617e80f2020-07-01 16:39:33 +02006697
6698 task = h2c->task;
6699 if (task) {
6700 task->context = NULL;
6701 h2c->task = NULL;
6702 __ha_barrier_store();
6703 task_kill(task);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006704
Willy Tarreaubeeabf52021-10-01 18:23:30 +02006705 h2c->task = task_new_here();
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006706 if (!h2c->task) {
6707 h2_release(h2c);
6708 return -1;
6709 }
6710 h2c->task->process = h2_timeout_task;
6711 h2c->task->context = h2c;
6712 }
6713 h2c->wait_event.tasklet = tasklet_new();
6714 if (!h2c->wait_event.tasklet) {
6715 h2_release(h2c);
6716 return -1;
6717 }
6718 h2c->wait_event.tasklet->process = h2_io_cb;
6719 h2c->wait_event.tasklet->context = h2c;
6720 h2c->conn->xprt->subscribe(h2c->conn, h2c->conn->xprt_ctx,
6721 SUB_RETRY_RECV, &h2c->wait_event);
6722
6723 return 0;
6724}
6725
Willy Tarreau62f52692017-10-08 23:01:42 +02006726/*******************************************************/
6727/* functions below are dedicated to the config parsers */
6728/*******************************************************/
6729
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006730/* config parser for global "tune.h2.header-table-size" */
6731static int h2_parse_header_table_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006732 const struct proxy *defpx, const char *file, int line,
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006733 char **err)
6734{
6735 if (too_many_args(1, args, err, NULL))
6736 return -1;
6737
6738 h2_settings_header_table_size = atoi(args[1]);
6739 if (h2_settings_header_table_size < 4096 || h2_settings_header_table_size > 65536) {
6740 memprintf(err, "'%s' expects a numeric value between 4096 and 65536.", args[0]);
6741 return -1;
6742 }
6743 return 0;
6744}
Willy Tarreau62f52692017-10-08 23:01:42 +02006745
Willy Tarreaue6baec02017-07-27 11:45:11 +02006746/* config parser for global "tune.h2.initial-window-size" */
6747static int h2_parse_initial_window_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006748 const struct proxy *defpx, const char *file, int line,
Willy Tarreaue6baec02017-07-27 11:45:11 +02006749 char **err)
6750{
6751 if (too_many_args(1, args, err, NULL))
6752 return -1;
6753
6754 h2_settings_initial_window_size = atoi(args[1]);
6755 if (h2_settings_initial_window_size < 0) {
6756 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
6757 return -1;
6758 }
6759 return 0;
6760}
6761
Willy Tarreau5242ef82017-07-27 11:47:28 +02006762/* config parser for global "tune.h2.max-concurrent-streams" */
6763static int h2_parse_max_concurrent_streams(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006764 const struct proxy *defpx, const char *file, int line,
Willy Tarreau5242ef82017-07-27 11:47:28 +02006765 char **err)
6766{
6767 if (too_many_args(1, args, err, NULL))
6768 return -1;
6769
6770 h2_settings_max_concurrent_streams = atoi(args[1]);
Willy Tarreau5a490b62019-01-31 10:39:51 +01006771 if ((int)h2_settings_max_concurrent_streams < 0) {
Willy Tarreau5242ef82017-07-27 11:47:28 +02006772 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
6773 return -1;
6774 }
6775 return 0;
6776}
6777
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006778/* config parser for global "tune.h2.max-frame-size" */
6779static int h2_parse_max_frame_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006780 const struct proxy *defpx, const char *file, int line,
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006781 char **err)
6782{
6783 if (too_many_args(1, args, err, NULL))
6784 return -1;
6785
6786 h2_settings_max_frame_size = atoi(args[1]);
6787 if (h2_settings_max_frame_size < 16384 || h2_settings_max_frame_size > 16777215) {
6788 memprintf(err, "'%s' expects a numeric value between 16384 and 16777215.", args[0]);
6789 return -1;
6790 }
6791 return 0;
6792}
6793
Willy Tarreau62f52692017-10-08 23:01:42 +02006794
6795/****************************************/
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05006796/* MUX initialization and instantiation */
Willy Tarreau62f52692017-10-08 23:01:42 +02006797/***************************************/
6798
6799/* The mux operations */
Willy Tarreau680b2bd2018-11-27 07:30:17 +01006800static const struct mux_ops h2_ops = {
Willy Tarreau62f52692017-10-08 23:01:42 +02006801 .init = h2_init,
Olivier Houchard21df6cc2018-09-14 23:21:44 +02006802 .wake = h2_wake,
Willy Tarreau62f52692017-10-08 23:01:42 +02006803 .snd_buf = h2_snd_buf,
Olivier Houchard511efea2018-08-16 15:30:32 +02006804 .rcv_buf = h2_rcv_buf,
Olivier Houchard6ff20392018-07-17 18:46:31 +02006805 .subscribe = h2_subscribe,
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006806 .unsubscribe = h2_unsubscribe,
Willy Tarreau62f52692017-10-08 23:01:42 +02006807 .attach = h2_attach,
Willy Tarreaud1373532022-05-27 11:00:59 +02006808 .get_first_sc = h2_get_first_sc,
Willy Tarreau62f52692017-10-08 23:01:42 +02006809 .detach = h2_detach,
Olivier Houchard060ed432018-11-06 16:32:42 +01006810 .destroy = h2_destroy,
Olivier Houchardd540b362018-11-05 18:37:53 +01006811 .avail_streams = h2_avail_streams,
Willy Tarreau00f18a32019-01-26 12:19:01 +01006812 .used_streams = h2_used_streams,
Willy Tarreau62f52692017-10-08 23:01:42 +02006813 .shutr = h2_shutr,
6814 .shutw = h2_shutw,
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02006815 .ctl = h2_ctl,
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006816 .show_fd = h2_show_fd,
Willy Tarreaubf4ec6f2022-09-02 15:11:40 +02006817 .show_sd = h2_show_sd,
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006818 .takeover = h2_takeover,
Christopher Fauleta97cced2022-04-12 18:04:10 +02006819 .flags = MX_FL_HTX|MX_FL_HOL_RISK|MX_FL_NO_UPG,
Willy Tarreau62f52692017-10-08 23:01:42 +02006820 .name = "H2",
6821};
6822
Christopher Faulet32f61c02018-04-10 14:33:41 +02006823static struct mux_proto_list mux_proto_h2 =
Christopher Fauletc985f6c2019-07-15 11:42:52 +02006824 { .token = IST("h2"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BOTH, .mux = &h2_ops };
Willy Tarreau62f52692017-10-08 23:01:42 +02006825
Willy Tarreau0108d902018-11-25 19:14:37 +01006826INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_h2);
6827
Willy Tarreau62f52692017-10-08 23:01:42 +02006828/* config keyword parsers */
6829static struct cfg_kw_list cfg_kws = {ILH, {
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006830 { CFG_GLOBAL, "tune.h2.header-table-size", h2_parse_header_table_size },
Willy Tarreaue6baec02017-07-27 11:45:11 +02006831 { CFG_GLOBAL, "tune.h2.initial-window-size", h2_parse_initial_window_size },
Willy Tarreau5242ef82017-07-27 11:47:28 +02006832 { CFG_GLOBAL, "tune.h2.max-concurrent-streams", h2_parse_max_concurrent_streams },
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006833 { CFG_GLOBAL, "tune.h2.max-frame-size", h2_parse_max_frame_size },
Willy Tarreau62f52692017-10-08 23:01:42 +02006834 { 0, NULL, NULL }
6835}};
6836
Willy Tarreau0108d902018-11-25 19:14:37 +01006837INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
Willy Tarreau2bdcc702020-05-19 11:31:11 +02006838
6839/* initialize internal structs after the config is parsed.
6840 * Returns zero on success, non-zero on error.
6841 */
6842static int init_h2()
6843{
6844 pool_head_hpack_tbl = create_pool("hpack_tbl",
6845 h2_settings_header_table_size,
6846 MEM_F_SHARED|MEM_F_EXACT);
Christopher Faulet52140992020-11-06 15:23:39 +01006847 if (!pool_head_hpack_tbl) {
6848 ha_alert("failed to allocate hpack_tbl memory pool\n");
6849 return (ERR_ALERT | ERR_FATAL);
6850 }
6851 return ERR_NONE;
Willy Tarreau2bdcc702020-05-19 11:31:11 +02006852}
6853
6854REGISTER_POST_CHECK(init_h2);