blob: 9ed272768cf96ffabee0ed13dc2538f8e1a10f68 [file] [log] [blame]
Willy Tarreau62f52692017-10-08 23:01:42 +02001/*
2 * HTTP/2 mux-demux for connections
3 *
4 * Copyright 2017 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreaudfd3de82020-06-04 23:46:14 +020013#include <import/eb32tree.h>
Willy Tarreau63617db2021-10-06 18:23:40 +020014#include <import/ebmbtree.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020015#include <haproxy/api.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020016#include <haproxy/cfgparse.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020017#include <haproxy/connection.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +010018#include <haproxy/conn_stream.h>
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +020019#include <haproxy/dynbuf.h>
Willy Tarreaubf073142020-06-03 12:04:01 +020020#include <haproxy/h2.h>
Willy Tarreaube327fa2020-06-03 09:09:57 +020021#include <haproxy/hpack-dec.h>
22#include <haproxy/hpack-enc.h>
23#include <haproxy/hpack-tbl.h>
Willy Tarreau87735332020-06-04 09:08:41 +020024#include <haproxy/http_htx.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020025#include <haproxy/htx.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020026#include <haproxy/istbuf.h>
Willy Tarreau36979d92020-06-05 17:27:29 +020027#include <haproxy/log.h>
Willy Tarreau6131d6a2020-06-02 16:48:09 +020028#include <haproxy/net_helper.h>
Willy Tarreau48d25b32020-06-04 18:58:52 +020029#include <haproxy/session-t.h>
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +010030#include <haproxy/stats.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020031#include <haproxy/stream.h>
Willy Tarreauc6d61d72020-06-04 19:02:42 +020032#include <haproxy/trace.h>
Willy Tarreau62f52692017-10-08 23:01:42 +020033
34
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010035/* dummy streams returned for closed, error, refused, idle and states */
Willy Tarreau2a856182017-05-16 15:20:39 +020036static const struct h2s *h2_closed_stream;
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010037static const struct h2s *h2_error_stream;
Willy Tarreau8d0d58b2018-12-23 18:29:12 +010038static const struct h2s *h2_refused_stream;
Willy Tarreau2a856182017-05-16 15:20:39 +020039static const struct h2s *h2_idle_stream;
40
Willy Tarreau5ab6b572017-09-22 08:05:00 +020041/* Connection flags (32 bit), in h2c->flags */
42#define H2_CF_NONE 0x00000000
43
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020044/* Flags indicating why writing to the mux is blocked. */
45#define H2_CF_MUX_MALLOC 0x00000001 // mux blocked on lack of connection's mux buffer
46#define H2_CF_MUX_MFULL 0x00000002 // mux blocked on connection's mux buffer full
47#define H2_CF_MUX_BLOCK_ANY 0x00000003 // aggregate of the mux flags above
48
Willy Tarreau315d8072017-12-10 22:17:57 +010049/* Flags indicating why writing to the demux is blocked.
50 * The first two ones directly affect the ability for the mux to receive data
51 * from the connection. The other ones affect the mux's ability to demux
52 * received data.
53 */
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020054#define H2_CF_DEM_DALLOC 0x00000004 // demux blocked on lack of connection's demux buffer
55#define H2_CF_DEM_DFULL 0x00000008 // demux blocked on connection's demux buffer full
Willy Tarreau315d8072017-12-10 22:17:57 +010056
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020057#define H2_CF_DEM_MBUSY 0x00000010 // demux blocked on connection's mux side busy
58#define H2_CF_DEM_MROOM 0x00000020 // demux blocked on lack of room in mux buffer
59#define H2_CF_DEM_SALLOC 0x00000040 // demux blocked on lack of stream's request buffer
60#define H2_CF_DEM_SFULL 0x00000080 // demux blocked on stream request buffer full
Willy Tarreauf2101912018-07-19 10:11:38 +020061#define H2_CF_DEM_TOOMANY 0x00000100 // demux blocked waiting for some conn_streams to leave
62#define H2_CF_DEM_BLOCK_ANY 0x000001F0 // aggregate of the demux flags above except DALLOC/DFULL
Christopher Fauletb5f7b522021-07-26 12:06:53 +020063 // (SHORT_READ is also excluded)
64
Christopher Faulet47940c32021-11-10 17:50:10 +010065#define H2_CF_DEM_SHORT_READ 0x00000200 // demux blocked on incomplete frame
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020066
Willy Tarreau081d4722017-05-16 21:51:05 +020067/* other flags */
Willy Tarreauf2101912018-07-19 10:11:38 +020068#define H2_CF_GOAWAY_SENT 0x00001000 // a GOAWAY frame was successfully sent
69#define H2_CF_GOAWAY_FAILED 0x00002000 // a GOAWAY frame failed to be sent
70#define H2_CF_WAIT_FOR_HS 0x00004000 // We did check that at least a stream was waiting for handshake
Willy Tarreaub3fb56d2018-10-03 13:56:38 +020071#define H2_CF_IS_BACK 0x00008000 // this is an outgoing connection
Willy Tarreau3d4631f2021-01-20 10:53:13 +010072#define H2_CF_WINDOW_OPENED 0x00010000 // demux increased window already advertised
73#define H2_CF_RCVD_SHUT 0x00020000 // a recv() attempt already failed on a shutdown
74#define H2_CF_END_REACHED 0x00040000 // pending data too short with RCVD_SHUT present
Willy Tarreau081d4722017-05-16 21:51:05 +020075
Amaury Denoyelle0df04362021-10-18 09:43:29 +020076#define H2_CF_RCVD_RFC8441 0x00100000 // settings from RFC8441 has been received indicating support for Extended CONNECT
Willy Tarreau39a0a1e2022-01-13 16:00:12 +010077#define H2_CF_SHTS_UPDATED 0x00200000 // SETTINGS_HEADER_TABLE_SIZE updated
78#define H2_CF_DTSU_EMITTED 0x00400000 // HPACK Dynamic Table Size Update opcode emitted
Amaury Denoyelle0df04362021-10-18 09:43:29 +020079
Willy Tarreau5ab6b572017-09-22 08:05:00 +020080/* H2 connection state, in h2c->st0 */
81enum h2_cs {
82 H2_CS_PREFACE, // init done, waiting for connection preface
83 H2_CS_SETTINGS1, // preface OK, waiting for first settings frame
84 H2_CS_FRAME_H, // first settings frame ok, waiting for frame header
85 H2_CS_FRAME_P, // frame header OK, waiting for frame payload
Willy Tarreaua20a5192017-12-27 11:02:06 +010086 H2_CS_FRAME_A, // frame payload OK, trying to send ACK frame
87 H2_CS_FRAME_E, // frame payload OK, trying to send RST frame
Willy Tarreau5ab6b572017-09-22 08:05:00 +020088 H2_CS_ERROR, // send GOAWAY(errcode) and close the connection ASAP
89 H2_CS_ERROR2, // GOAWAY(errcode) sent, close the connection ASAP
90 H2_CS_ENTRIES // must be last
91} __attribute__((packed));
92
Willy Tarreau51330962019-05-26 09:38:07 +020093
Willy Tarreau9c218e72019-05-26 10:08:28 +020094/* 32 buffers: one for the ring's root, rest for the mbuf itself */
95#define H2C_MBUF_CNT 32
Willy Tarreau51330962019-05-26 09:38:07 +020096
Willy Tarreau5ab6b572017-09-22 08:05:00 +020097/* H2 connection descriptor */
98struct h2c {
99 struct connection *conn;
100
101 enum h2_cs st0; /* mux state */
102 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
103
104 /* 16 bit hole here */
105 uint32_t flags; /* connection flags: H2_CF_* */
Willy Tarreau2e2083a2019-01-31 10:34:07 +0100106 uint32_t streams_limit; /* maximum number of concurrent streams the peer supports */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200107 int32_t max_id; /* highest ID known on this connection, <0 before preface */
108 uint32_t rcvd_c; /* newly received data to ACK for the connection */
109 uint32_t rcvd_s; /* newly received data to ACK for the current stream (dsi) */
110
111 /* states for the demux direction */
112 struct hpack_dht *ddht; /* demux dynamic header table */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200113 struct buffer dbuf; /* demux buffer */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200114
115 int32_t dsi; /* demux stream ID (<0 = idle) */
116 int32_t dfl; /* demux frame length (if dsi >= 0) */
117 int8_t dft; /* demux frame type (if dsi >= 0) */
118 int8_t dff; /* demux frame flags (if dsi >= 0) */
Willy Tarreau05e5daf2017-12-11 15:17:36 +0100119 uint8_t dpl; /* demux pad length (part of dfl), init to 0 */
120 /* 8 bit hole here */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200121 int32_t last_sid; /* last processed stream ID for GOAWAY, <0 before preface */
122
123 /* states for the mux direction */
Willy Tarreau51330962019-05-26 09:38:07 +0200124 struct buffer mbuf[H2C_MBUF_CNT]; /* mux buffers (ring) */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200125 int32_t msi; /* mux stream ID (<0 = idle) */
126 int32_t mfl; /* mux frame length (if dsi >= 0) */
127 int8_t mft; /* mux frame type (if dsi >= 0) */
128 int8_t mff; /* mux frame flags (if dsi >= 0) */
129 /* 16 bit hole here */
130 int32_t miw; /* mux initial window size for all new streams */
131 int32_t mws; /* mux window size. Can be negative. */
132 int32_t mfs; /* mux's max frame size */
133
Willy Tarreauea392822017-10-31 10:02:25 +0100134 int timeout; /* idle timeout duration in ticks */
Willy Tarreau599391a2017-11-24 10:16:00 +0100135 int shut_timeout; /* idle timeout duration in ticks after GOAWAY was sent */
Willy Tarreau15a47332022-03-18 15:57:34 +0100136 int idle_start; /* date of the last time the connection went idle */
137 /* 32-bit hole here */
Willy Tarreau49745612017-12-03 18:56:02 +0100138 unsigned int nb_streams; /* number of streams in the tree */
Willy Tarreau7ac60e82018-07-19 09:04:05 +0200139 unsigned int nb_cs; /* number of attached conn_streams */
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100140 unsigned int nb_reserved; /* number of reserved streams */
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100141 unsigned int stream_cnt; /* total number of streams seen */
Willy Tarreau0b37d652018-10-03 10:33:02 +0200142 struct proxy *proxy; /* the proxy this connection was created for */
Willy Tarreauea392822017-10-31 10:02:25 +0100143 struct task *task; /* timeout management task */
Amaury Denoyellec92697d2020-10-27 17:16:01 +0100144 struct h2_counters *px_counters; /* h2 counters attached to proxy */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200145 struct eb_root streams_by_id; /* all active streams by their ID */
146 struct list send_list; /* list of blocked streams requesting to send */
147 struct list fctl_list; /* list of streams blocked by connection's fctl */
Willy Tarreau9edf6db2019-10-02 10:49:59 +0200148 struct list blocked_list; /* list of streams blocked for other reasons (e.g. sfctl, dep) */
Willy Tarreau44e973f2018-03-01 17:49:30 +0100149 struct buffer_wait buf_wait; /* wait list for buffer allocations */
Olivier Houchardfa8aa862018-10-10 18:25:41 +0200150 struct wait_event wait_event; /* To be used if we're waiting for I/Os */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200151};
152
Willy Tarreau18312642017-10-11 07:57:07 +0200153/* H2 stream state, in h2s->st */
154enum h2_ss {
155 H2_SS_IDLE = 0, // idle
156 H2_SS_RLOC, // reserved(local)
157 H2_SS_RREM, // reserved(remote)
158 H2_SS_OPEN, // open
159 H2_SS_HREM, // half-closed(remote)
160 H2_SS_HLOC, // half-closed(local)
Willy Tarreau96060ba2017-10-16 18:34:34 +0200161 H2_SS_ERROR, // an error needs to be sent using RST_STREAM
Willy Tarreau18312642017-10-11 07:57:07 +0200162 H2_SS_CLOSED, // closed
163 H2_SS_ENTRIES // must be last
164} __attribute__((packed));
165
Willy Tarreau4c688eb2019-05-14 11:44:03 +0200166#define H2_SS_MASK(state) (1UL << (state))
167#define H2_SS_IDLE_BIT (1UL << H2_SS_IDLE)
168#define H2_SS_RLOC_BIT (1UL << H2_SS_RLOC)
169#define H2_SS_RREM_BIT (1UL << H2_SS_RREM)
170#define H2_SS_OPEN_BIT (1UL << H2_SS_OPEN)
171#define H2_SS_HREM_BIT (1UL << H2_SS_HREM)
172#define H2_SS_HLOC_BIT (1UL << H2_SS_HLOC)
173#define H2_SS_ERROR_BIT (1UL << H2_SS_ERROR)
174#define H2_SS_CLOSED_BIT (1UL << H2_SS_CLOSED)
Willy Tarreau4c688eb2019-05-14 11:44:03 +0200175
Willy Tarreau18312642017-10-11 07:57:07 +0200176/* HTTP/2 stream flags (32 bit), in h2s->flags */
177#define H2_SF_NONE 0x00000000
178#define H2_SF_ES_RCVD 0x00000001
179#define H2_SF_ES_SENT 0x00000002
180
181#define H2_SF_RST_RCVD 0x00000004 // received RST_STREAM
182#define H2_SF_RST_SENT 0x00000008 // sent RST_STREAM
183
Willy Tarreau2e5b60e2017-09-25 11:49:03 +0200184/* stream flags indicating the reason the stream is blocked */
185#define H2_SF_BLK_MBUSY 0x00000010 // blocked waiting for mux access (transient)
Willy Tarreau9edf6db2019-10-02 10:49:59 +0200186#define H2_SF_BLK_MROOM 0x00000020 // blocked waiting for room in the mux (must be in send list)
187#define H2_SF_BLK_MFCTL 0x00000040 // blocked due to mux fctl (must be in fctl list)
188#define H2_SF_BLK_SFCTL 0x00000080 // blocked due to stream fctl (must be in blocked list)
Willy Tarreau2e5b60e2017-09-25 11:49:03 +0200189#define H2_SF_BLK_ANY 0x000000F0 // any of the reasons above
190
Willy Tarreau454f9052017-10-26 19:40:35 +0200191/* stream flags indicating how data is supposed to be sent */
192#define H2_SF_DATA_CLEN 0x00000100 // data sent using content-length
Christopher Faulet7d247f02020-12-02 14:26:36 +0100193#define H2_SF_BODYLESS_RESP 0x00000200 /* Bodyless response message */
Christopher Fauletd0db4232021-01-22 11:46:30 +0100194#define H2_SF_BODY_TUNNEL 0x00000400 // Attempt to establish a Tunnelled stream (the result depends on the status code)
195
Willy Tarreau454f9052017-10-26 19:40:35 +0200196
Willy Tarreaud9464162020-01-10 18:25:07 +0100197#define H2_SF_NOTIFIED 0x00000800 // a paused stream was notified to try to send again
Willy Tarreau67434202017-11-06 20:20:51 +0100198#define H2_SF_HEADERS_SENT 0x00001000 // a HEADERS frame was sent for this stream
Willy Tarreauc4312d32017-11-07 12:01:53 +0100199#define H2_SF_OUTGOING_DATA 0x00002000 // set whenever we've seen outgoing data
Willy Tarreau67434202017-11-06 20:20:51 +0100200
Willy Tarreau6cc85a52019-01-02 15:49:20 +0100201#define H2_SF_HEADERS_RCVD 0x00004000 // a HEADERS frame was received for this stream
202
Willy Tarreau2c249eb2019-05-13 18:06:17 +0200203#define H2_SF_WANT_SHUTR 0x00008000 // a stream couldn't shutr() (mux full/busy)
204#define H2_SF_WANT_SHUTW 0x00010000 // a stream couldn't shutw() (mux full/busy)
205
Amaury Denoyelle5fb48ea2020-12-11 17:53:04 +0100206#define H2_SF_EXT_CONNECT_SENT 0x00040000 // rfc 8441 an Extended CONNECT has been sent
Amaury Denoyelleefe22762020-12-11 17:53:08 +0100207#define H2_SF_EXT_CONNECT_RCVD 0x00080000 // rfc 8441 an Extended CONNECT has been received and parsed
Christopher Fauletd0db4232021-01-22 11:46:30 +0100208
Amaury Denoyelle5fb48ea2020-12-11 17:53:04 +0100209#define H2_SF_TUNNEL_ABRT 0x00100000 // A tunnel attempt was aborted
Willy Tarreau2c249eb2019-05-13 18:06:17 +0200210
Willy Tarreau18312642017-10-11 07:57:07 +0200211/* H2 stream descriptor, describing the stream as it appears in the H2C, and as
Christopher Fauletfafd1b02020-11-03 18:25:52 +0100212 * it is being processed in the internal HTTP representation (HTX).
Willy Tarreau18312642017-10-11 07:57:07 +0200213 */
214struct h2s {
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100215 struct cs_endpoint *endp;
Olivier Houchardf502aca2018-12-14 19:42:40 +0100216 struct session *sess;
Willy Tarreau18312642017-10-11 07:57:07 +0200217 struct h2c *h2c;
Willy Tarreau18312642017-10-11 07:57:07 +0200218 struct eb32_node by_id; /* place in h2c's streams_by_id */
Willy Tarreau18312642017-10-11 07:57:07 +0200219 int32_t id; /* stream ID */
220 uint32_t flags; /* H2_SF_* */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +0200221 int sws; /* stream window size, to be added to the mux's initial window size */
Willy Tarreau18312642017-10-11 07:57:07 +0200222 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
223 enum h2_ss st;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +0200224 uint16_t status; /* HTTP response status */
Willy Tarreau1915ca22019-01-24 11:49:37 +0100225 unsigned long long body_len; /* remaining body length according to content-length if H2_SF_DATA_CLEN */
Olivier Houchard638b7992018-08-16 15:41:52 +0200226 struct buffer rxbuf; /* receive buffer, always valid (buf_empty or real buffer) */
Willy Tarreauf96508a2020-01-10 11:12:48 +0100227 struct wait_event *subs; /* recv wait_event the conn_stream associated is waiting on (via h2_subscribe) */
Olivier Houchardfa8aa862018-10-10 18:25:41 +0200228 struct list list; /* To be used when adding in h2c->send_list or h2c->fctl_lsit */
Willy Tarreau5723f292020-01-10 15:16:57 +0100229 struct tasklet *shut_tl; /* deferred shutdown tasklet, to retry to send an RST after we failed to,
230 * in case there's no other subscription to do it */
Amaury Denoyelle74162742020-12-11 17:53:05 +0100231
232 char upgrade_protocol[16]; /* rfc 8441: requested protocol on Extended CONNECT */
Willy Tarreau18312642017-10-11 07:57:07 +0200233};
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200234
Willy Tarreauc6405142017-09-21 20:23:50 +0200235/* descriptor for an h2 frame header */
236struct h2_fh {
237 uint32_t len; /* length, host order, 24 bits */
238 uint32_t sid; /* stream id, host order, 31 bits */
239 uint8_t ft; /* frame type */
240 uint8_t ff; /* frame flags */
241};
242
Willy Tarreau12ae2122019-08-08 18:23:12 +0200243/* trace source and events */
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200244static void h2_trace(enum trace_level level, uint64_t mask, \
245 const struct trace_source *src,
246 const struct ist where, const struct ist func,
247 const void *a1, const void *a2, const void *a3, const void *a4);
Willy Tarreau12ae2122019-08-08 18:23:12 +0200248
249/* The event representation is split like this :
250 * strm - application layer
251 * h2s - internal H2 stream
252 * h2c - internal H2 connection
253 * conn - external connection
254 *
255 */
256static const struct trace_event h2_trace_events[] = {
257#define H2_EV_H2C_NEW (1ULL << 0)
Willy Tarreau87951942019-08-30 07:34:36 +0200258 { .mask = H2_EV_H2C_NEW, .name = "h2c_new", .desc = "new H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200259#define H2_EV_H2C_RECV (1ULL << 1)
Willy Tarreau87951942019-08-30 07:34:36 +0200260 { .mask = H2_EV_H2C_RECV, .name = "h2c_recv", .desc = "Rx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200261#define H2_EV_H2C_SEND (1ULL << 2)
Willy Tarreau87951942019-08-30 07:34:36 +0200262 { .mask = H2_EV_H2C_SEND, .name = "h2c_send", .desc = "Tx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200263#define H2_EV_H2C_FCTL (1ULL << 3)
Willy Tarreau87951942019-08-30 07:34:36 +0200264 { .mask = H2_EV_H2C_FCTL, .name = "h2c_fctl", .desc = "H2 connection flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200265#define H2_EV_H2C_BLK (1ULL << 4)
Willy Tarreau87951942019-08-30 07:34:36 +0200266 { .mask = H2_EV_H2C_BLK, .name = "h2c_blk", .desc = "H2 connection blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200267#define H2_EV_H2C_WAKE (1ULL << 5)
Willy Tarreau87951942019-08-30 07:34:36 +0200268 { .mask = H2_EV_H2C_WAKE, .name = "h2c_wake", .desc = "H2 connection woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200269#define H2_EV_H2C_END (1ULL << 6)
Willy Tarreau87951942019-08-30 07:34:36 +0200270 { .mask = H2_EV_H2C_END, .name = "h2c_end", .desc = "H2 connection terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200271#define H2_EV_H2C_ERR (1ULL << 7)
Willy Tarreau87951942019-08-30 07:34:36 +0200272 { .mask = H2_EV_H2C_ERR, .name = "h2c_err", .desc = "error on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200273#define H2_EV_RX_FHDR (1ULL << 8)
Willy Tarreau87951942019-08-30 07:34:36 +0200274 { .mask = H2_EV_RX_FHDR, .name = "rx_fhdr", .desc = "H2 frame header received" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200275#define H2_EV_RX_FRAME (1ULL << 9)
Willy Tarreau87951942019-08-30 07:34:36 +0200276 { .mask = H2_EV_RX_FRAME, .name = "rx_frame", .desc = "receipt of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200277#define H2_EV_RX_EOI (1ULL << 10)
Willy Tarreau87951942019-08-30 07:34:36 +0200278 { .mask = H2_EV_RX_EOI, .name = "rx_eoi", .desc = "receipt of end of H2 input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200279#define H2_EV_RX_PREFACE (1ULL << 11)
Willy Tarreau87951942019-08-30 07:34:36 +0200280 { .mask = H2_EV_RX_PREFACE, .name = "rx_preface", .desc = "receipt of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200281#define H2_EV_RX_DATA (1ULL << 12)
Willy Tarreau87951942019-08-30 07:34:36 +0200282 { .mask = H2_EV_RX_DATA, .name = "rx_data", .desc = "receipt of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200283#define H2_EV_RX_HDR (1ULL << 13)
Willy Tarreau87951942019-08-30 07:34:36 +0200284 { .mask = H2_EV_RX_HDR, .name = "rx_hdr", .desc = "receipt of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200285#define H2_EV_RX_PRIO (1ULL << 14)
Willy Tarreau87951942019-08-30 07:34:36 +0200286 { .mask = H2_EV_RX_PRIO, .name = "rx_prio", .desc = "receipt of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200287#define H2_EV_RX_RST (1ULL << 15)
Willy Tarreau87951942019-08-30 07:34:36 +0200288 { .mask = H2_EV_RX_RST, .name = "rx_rst", .desc = "receipt of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200289#define H2_EV_RX_SETTINGS (1ULL << 16)
Willy Tarreau87951942019-08-30 07:34:36 +0200290 { .mask = H2_EV_RX_SETTINGS, .name = "rx_settings", .desc = "receipt of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200291#define H2_EV_RX_PUSH (1ULL << 17)
Willy Tarreau87951942019-08-30 07:34:36 +0200292 { .mask = H2_EV_RX_PUSH, .name = "rx_push", .desc = "receipt of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200293#define H2_EV_RX_PING (1ULL << 18)
Willy Tarreau87951942019-08-30 07:34:36 +0200294 { .mask = H2_EV_RX_PING, .name = "rx_ping", .desc = "receipt of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200295#define H2_EV_RX_GOAWAY (1ULL << 19)
Willy Tarreau87951942019-08-30 07:34:36 +0200296 { .mask = H2_EV_RX_GOAWAY, .name = "rx_goaway", .desc = "receipt of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200297#define H2_EV_RX_WU (1ULL << 20)
Willy Tarreau87951942019-08-30 07:34:36 +0200298 { .mask = H2_EV_RX_WU, .name = "rx_wu", .desc = "receipt of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200299#define H2_EV_RX_CONT (1ULL << 21)
Willy Tarreau87951942019-08-30 07:34:36 +0200300 { .mask = H2_EV_RX_CONT, .name = "rx_cont", .desc = "receipt of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200301#define H2_EV_TX_FRAME (1ULL << 22)
Willy Tarreau87951942019-08-30 07:34:36 +0200302 { .mask = H2_EV_TX_FRAME, .name = "tx_frame", .desc = "transmission of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200303#define H2_EV_TX_EOI (1ULL << 23)
Willy Tarreau87951942019-08-30 07:34:36 +0200304 { .mask = H2_EV_TX_EOI, .name = "tx_eoi", .desc = "transmission of H2 end of input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200305#define H2_EV_TX_PREFACE (1ULL << 24)
Willy Tarreau87951942019-08-30 07:34:36 +0200306 { .mask = H2_EV_TX_PREFACE, .name = "tx_preface", .desc = "transmission of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200307#define H2_EV_TX_DATA (1ULL << 25)
Willy Tarreau87951942019-08-30 07:34:36 +0200308 { .mask = H2_EV_TX_DATA, .name = "tx_data", .desc = "transmission of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200309#define H2_EV_TX_HDR (1ULL << 26)
Willy Tarreau87951942019-08-30 07:34:36 +0200310 { .mask = H2_EV_TX_HDR, .name = "tx_hdr", .desc = "transmission of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200311#define H2_EV_TX_PRIO (1ULL << 27)
Willy Tarreau87951942019-08-30 07:34:36 +0200312 { .mask = H2_EV_TX_PRIO, .name = "tx_prio", .desc = "transmission of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200313#define H2_EV_TX_RST (1ULL << 28)
Willy Tarreau87951942019-08-30 07:34:36 +0200314 { .mask = H2_EV_TX_RST, .name = "tx_rst", .desc = "transmission of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200315#define H2_EV_TX_SETTINGS (1ULL << 29)
Willy Tarreau87951942019-08-30 07:34:36 +0200316 { .mask = H2_EV_TX_SETTINGS, .name = "tx_settings", .desc = "transmission of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200317#define H2_EV_TX_PUSH (1ULL << 30)
Willy Tarreau87951942019-08-30 07:34:36 +0200318 { .mask = H2_EV_TX_PUSH, .name = "tx_push", .desc = "transmission of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200319#define H2_EV_TX_PING (1ULL << 31)
Willy Tarreau87951942019-08-30 07:34:36 +0200320 { .mask = H2_EV_TX_PING, .name = "tx_ping", .desc = "transmission of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200321#define H2_EV_TX_GOAWAY (1ULL << 32)
Willy Tarreau87951942019-08-30 07:34:36 +0200322 { .mask = H2_EV_TX_GOAWAY, .name = "tx_goaway", .desc = "transmission of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200323#define H2_EV_TX_WU (1ULL << 33)
Willy Tarreau87951942019-08-30 07:34:36 +0200324 { .mask = H2_EV_TX_WU, .name = "tx_wu", .desc = "transmission of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200325#define H2_EV_TX_CONT (1ULL << 34)
Willy Tarreau87951942019-08-30 07:34:36 +0200326 { .mask = H2_EV_TX_CONT, .name = "tx_cont", .desc = "transmission of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200327#define H2_EV_H2S_NEW (1ULL << 35)
Willy Tarreau87951942019-08-30 07:34:36 +0200328 { .mask = H2_EV_H2S_NEW, .name = "h2s_new", .desc = "new H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200329#define H2_EV_H2S_RECV (1ULL << 36)
Willy Tarreau87951942019-08-30 07:34:36 +0200330 { .mask = H2_EV_H2S_RECV, .name = "h2s_recv", .desc = "Rx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200331#define H2_EV_H2S_SEND (1ULL << 37)
Willy Tarreau87951942019-08-30 07:34:36 +0200332 { .mask = H2_EV_H2S_SEND, .name = "h2s_send", .desc = "Tx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200333#define H2_EV_H2S_FCTL (1ULL << 38)
Willy Tarreau87951942019-08-30 07:34:36 +0200334 { .mask = H2_EV_H2S_FCTL, .name = "h2s_fctl", .desc = "H2 stream flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200335#define H2_EV_H2S_BLK (1ULL << 39)
Willy Tarreau87951942019-08-30 07:34:36 +0200336 { .mask = H2_EV_H2S_BLK, .name = "h2s_blk", .desc = "H2 stream blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200337#define H2_EV_H2S_WAKE (1ULL << 40)
Willy Tarreau87951942019-08-30 07:34:36 +0200338 { .mask = H2_EV_H2S_WAKE, .name = "h2s_wake", .desc = "H2 stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200339#define H2_EV_H2S_END (1ULL << 41)
Willy Tarreau87951942019-08-30 07:34:36 +0200340 { .mask = H2_EV_H2S_END, .name = "h2s_end", .desc = "H2 stream terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200341#define H2_EV_H2S_ERR (1ULL << 42)
Willy Tarreau87951942019-08-30 07:34:36 +0200342 { .mask = H2_EV_H2S_ERR, .name = "h2s_err", .desc = "error on H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200343#define H2_EV_STRM_NEW (1ULL << 43)
Willy Tarreau87951942019-08-30 07:34:36 +0200344 { .mask = H2_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200345#define H2_EV_STRM_RECV (1ULL << 44)
Willy Tarreau87951942019-08-30 07:34:36 +0200346 { .mask = H2_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200347#define H2_EV_STRM_SEND (1ULL << 45)
Willy Tarreau87951942019-08-30 07:34:36 +0200348 { .mask = H2_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200349#define H2_EV_STRM_FULL (1ULL << 46)
Willy Tarreau87951942019-08-30 07:34:36 +0200350 { .mask = H2_EV_STRM_FULL, .name = "strm_full", .desc = "stream buffer full" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200351#define H2_EV_STRM_WAKE (1ULL << 47)
Willy Tarreau87951942019-08-30 07:34:36 +0200352 { .mask = H2_EV_STRM_WAKE, .name = "strm_wake", .desc = "stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200353#define H2_EV_STRM_SHUT (1ULL << 48)
Willy Tarreau87951942019-08-30 07:34:36 +0200354 { .mask = H2_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200355#define H2_EV_STRM_END (1ULL << 49)
Willy Tarreau87951942019-08-30 07:34:36 +0200356 { .mask = H2_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200357#define H2_EV_STRM_ERR (1ULL << 50)
Willy Tarreau87951942019-08-30 07:34:36 +0200358 { .mask = H2_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200359#define H2_EV_PROTO_ERR (1ULL << 51)
Willy Tarreau87951942019-08-30 07:34:36 +0200360 { .mask = H2_EV_PROTO_ERR, .name = "proto_err", .desc = "protocol error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200361 { }
362};
363
364static const struct name_desc h2_trace_lockon_args[4] = {
365 /* arg1 */ { /* already used by the connection */ },
366 /* arg2 */ { .name="h2s", .desc="H2 stream" },
367 /* arg3 */ { },
368 /* arg4 */ { }
369};
370
371static const struct name_desc h2_trace_decoding[] = {
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200372#define H2_VERB_CLEAN 1
373 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
374#define H2_VERB_MINIMAL 2
Willy Tarreau12ae2122019-08-08 18:23:12 +0200375 { .name="minimal", .desc="report only h2c/h2s state and flags, no real decoding" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200376#define H2_VERB_SIMPLE 3
Willy Tarreau12ae2122019-08-08 18:23:12 +0200377 { .name="simple", .desc="add request/response status line or frame info when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200378#define H2_VERB_ADVANCED 4
Willy Tarreau12ae2122019-08-08 18:23:12 +0200379 { .name="advanced", .desc="add header fields or frame decoding when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200380#define H2_VERB_COMPLETE 5
Willy Tarreau12ae2122019-08-08 18:23:12 +0200381 { .name="complete", .desc="add full data dump when available" },
382 { /* end */ }
383};
384
Willy Tarreau6eb3d372021-04-10 19:29:26 +0200385static struct trace_source trace_h2 __read_mostly = {
Willy Tarreau12ae2122019-08-08 18:23:12 +0200386 .name = IST("h2"),
387 .desc = "HTTP/2 multiplexer",
388 .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200389 .default_cb = h2_trace,
Willy Tarreau12ae2122019-08-08 18:23:12 +0200390 .known_events = h2_trace_events,
391 .lockon_args = h2_trace_lockon_args,
392 .decoding = h2_trace_decoding,
393 .report_events = ~0, // report everything by default
394};
395
396#define TRACE_SOURCE &trace_h2
397INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
398
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100399/* h2 stats module */
400enum {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100401 H2_ST_HEADERS_RCVD,
402 H2_ST_DATA_RCVD,
403 H2_ST_SETTINGS_RCVD,
404 H2_ST_RST_STREAM_RCVD,
405 H2_ST_GOAWAY_RCVD,
406
Amaury Denoyellea8879232020-10-27 17:16:03 +0100407 H2_ST_CONN_PROTO_ERR,
408 H2_ST_STRM_PROTO_ERR,
409 H2_ST_RST_STREAM_RESP,
410 H2_ST_GOAWAY_RESP,
411
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100412 H2_ST_OPEN_CONN,
413 H2_ST_OPEN_STREAM,
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100414 H2_ST_TOTAL_CONN,
415 H2_ST_TOTAL_STREAM,
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100416
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100417 H2_STATS_COUNT /* must be the last member of the enum */
418};
419
420static struct name_desc h2_stats[] = {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100421 [H2_ST_HEADERS_RCVD] = { .name = "h2_headers_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100422 .desc = "Total number of received HEADERS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100423 [H2_ST_DATA_RCVD] = { .name = "h2_data_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100424 .desc = "Total number of received DATA frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100425 [H2_ST_SETTINGS_RCVD] = { .name = "h2_settings_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100426 .desc = "Total number of received SETTINGS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100427 [H2_ST_RST_STREAM_RCVD] = { .name = "h2_rst_stream_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100428 .desc = "Total number of received RST_STREAM frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100429 [H2_ST_GOAWAY_RCVD] = { .name = "h2_goaway_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100430 .desc = "Total number of received GOAWAY frames" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100431
432 [H2_ST_CONN_PROTO_ERR] = { .name = "h2_detected_conn_protocol_errors",
433 .desc = "Total number of connection protocol errors" },
434 [H2_ST_STRM_PROTO_ERR] = { .name = "h2_detected_strm_protocol_errors",
435 .desc = "Total number of stream protocol errors" },
436 [H2_ST_RST_STREAM_RESP] = { .name = "h2_rst_stream_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100437 .desc = "Total number of RST_STREAM sent on detected error" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100438 [H2_ST_GOAWAY_RESP] = { .name = "h2_goaway_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100439 .desc = "Total number of GOAWAY sent on detected error" },
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100440
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100441 [H2_ST_OPEN_CONN] = { .name = "h2_open_connections",
442 .desc = "Count of currently open connections" },
443 [H2_ST_OPEN_STREAM] = { .name = "h2_backend_open_streams",
444 .desc = "Count of currently open streams" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100445 [H2_ST_TOTAL_CONN] = { .name = "h2_total_connections",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100446 .desc = "Total number of connections" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100447 [H2_ST_TOTAL_STREAM] = { .name = "h2_backend_total_streams",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100448 .desc = "Total number of streams" },
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100449};
450
451static struct h2_counters {
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100452 long long headers_rcvd; /* total number of HEADERS frame received */
453 long long data_rcvd; /* total number of DATA frame received */
454 long long settings_rcvd; /* total number of SETTINGS frame received */
455 long long rst_stream_rcvd; /* total number of RST_STREAM frame received */
456 long long goaway_rcvd; /* total number of GOAWAY frame received */
Amaury Denoyellea8879232020-10-27 17:16:03 +0100457
458 long long conn_proto_err; /* total number of protocol errors detected */
459 long long strm_proto_err; /* total number of protocol errors detected */
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100460 long long rst_stream_resp; /* total number of RST_STREAM frame sent on error */
461 long long goaway_resp; /* total number of GOAWAY frame sent on error */
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100462
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100463 long long open_conns; /* count of currently open connections */
464 long long open_streams; /* count of currently open streams */
465 long long total_conns; /* total number of connections */
466 long long total_streams; /* total number of streams */
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100467} h2_counters;
468
469static void h2_fill_stats(void *data, struct field *stats)
470{
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100471 struct h2_counters *counters = data;
472
473 stats[H2_ST_HEADERS_RCVD] = mkf_u64(FN_COUNTER, counters->headers_rcvd);
474 stats[H2_ST_DATA_RCVD] = mkf_u64(FN_COUNTER, counters->data_rcvd);
475 stats[H2_ST_SETTINGS_RCVD] = mkf_u64(FN_COUNTER, counters->settings_rcvd);
476 stats[H2_ST_RST_STREAM_RCVD] = mkf_u64(FN_COUNTER, counters->rst_stream_rcvd);
477 stats[H2_ST_GOAWAY_RCVD] = mkf_u64(FN_COUNTER, counters->goaway_rcvd);
Amaury Denoyellea8879232020-10-27 17:16:03 +0100478
479 stats[H2_ST_CONN_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->conn_proto_err);
480 stats[H2_ST_STRM_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->strm_proto_err);
481 stats[H2_ST_RST_STREAM_RESP] = mkf_u64(FN_COUNTER, counters->rst_stream_resp);
482 stats[H2_ST_GOAWAY_RESP] = mkf_u64(FN_COUNTER, counters->goaway_resp);
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100483
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100484 stats[H2_ST_OPEN_CONN] = mkf_u64(FN_GAUGE, counters->open_conns);
485 stats[H2_ST_OPEN_STREAM] = mkf_u64(FN_GAUGE, counters->open_streams);
486 stats[H2_ST_TOTAL_CONN] = mkf_u64(FN_COUNTER, counters->total_conns);
487 stats[H2_ST_TOTAL_STREAM] = mkf_u64(FN_COUNTER, counters->total_streams);
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100488}
489
490static struct stats_module h2_stats_module = {
491 .name = "h2",
492 .fill_stats = h2_fill_stats,
493 .stats = h2_stats,
494 .stats_count = H2_STATS_COUNT,
495 .counters = &h2_counters,
496 .counters_size = sizeof(h2_counters),
497 .domain_flags = MK_STATS_PROXY_DOMAIN(STATS_PX_CAP_FE|STATS_PX_CAP_BE),
498 .clearable = 1,
499};
500
501INITCALL1(STG_REGISTER, stats_register_module, &h2_stats_module);
502
Willy Tarreau8ceae722018-11-26 11:58:30 +0100503/* the h2c connection pool */
504DECLARE_STATIC_POOL(pool_head_h2c, "h2c", sizeof(struct h2c));
505
506/* the h2s stream pool */
507DECLARE_STATIC_POOL(pool_head_h2s, "h2s", sizeof(struct h2s));
508
Willy Tarreaudc572362018-12-12 08:08:05 +0100509/* The default connection window size is 65535, it may only be enlarged using
510 * a WINDOW_UPDATE message. Since the window must never be larger than 2G-1,
511 * we'll pretend we already received the difference between the two to send
512 * an equivalent window update to enlarge it to 2G-1.
513 */
514#define H2_INITIAL_WINDOW_INCREMENT ((1U<<31)-1 - 65535)
515
Willy Tarreau455d5682019-05-24 19:42:18 +0200516/* maximum amount of data we're OK with re-aligning for buffer optimizations */
517#define MAX_DATA_REALIGN 1024
518
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200519/* a few settings from the global section */
520static int h2_settings_header_table_size = 4096; /* initial value */
Willy Tarreaue6baec02017-07-27 11:45:11 +0200521static int h2_settings_initial_window_size = 65535; /* initial value */
Willy Tarreau5a490b62019-01-31 10:39:51 +0100522static unsigned int h2_settings_max_concurrent_streams = 100;
Willy Tarreaua24b35c2019-02-21 13:24:36 +0100523static int h2_settings_max_frame_size = 0; /* unset */
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200524
Willy Tarreaub22b5f02022-05-10 14:57:16 +0200525/* a dummy closed endpoint */
526static const struct cs_endpoint closed_ep = {
527 . cs = NULL,
528 .flags = CS_EP_DETACHED,
529};
530
Willy Tarreau2a856182017-05-16 15:20:39 +0200531/* a dmumy closed stream */
532static const struct h2s *h2_closed_stream = &(const struct h2s){
Willy Tarreaub22b5f02022-05-10 14:57:16 +0200533 .endp = (struct cs_endpoint *)&closed_ep,
Willy Tarreau2a856182017-05-16 15:20:39 +0200534 .h2c = NULL,
535 .st = H2_SS_CLOSED,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100536 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreauab837502017-12-27 15:07:30 +0100537 .flags = H2_SF_RST_RCVD,
Willy Tarreau2a856182017-05-16 15:20:39 +0200538 .id = 0,
539};
540
Willy Tarreauecb9dcd2019-01-03 12:00:17 +0100541/* a dmumy closed stream returning a PROTOCOL_ERROR error */
542static const struct h2s *h2_error_stream = &(const struct h2s){
Willy Tarreaub22b5f02022-05-10 14:57:16 +0200543 .endp = (struct cs_endpoint *)&closed_ep,
Willy Tarreauecb9dcd2019-01-03 12:00:17 +0100544 .h2c = NULL,
545 .st = H2_SS_CLOSED,
546 .errcode = H2_ERR_PROTOCOL_ERROR,
547 .flags = 0,
548 .id = 0,
549};
550
Willy Tarreau8d0d58b2018-12-23 18:29:12 +0100551/* a dmumy closed stream returning a REFUSED_STREAM error */
552static const struct h2s *h2_refused_stream = &(const struct h2s){
Willy Tarreaub22b5f02022-05-10 14:57:16 +0200553 .endp = (struct cs_endpoint *)&closed_ep,
Willy Tarreau8d0d58b2018-12-23 18:29:12 +0100554 .h2c = NULL,
555 .st = H2_SS_CLOSED,
556 .errcode = H2_ERR_REFUSED_STREAM,
557 .flags = 0,
558 .id = 0,
559};
560
Willy Tarreau2a856182017-05-16 15:20:39 +0200561/* and a dummy idle stream for use with any unannounced stream */
562static const struct h2s *h2_idle_stream = &(const struct h2s){
Willy Tarreaub22b5f02022-05-10 14:57:16 +0200563 .endp = (struct cs_endpoint *)&closed_ep,
Willy Tarreau2a856182017-05-16 15:20:39 +0200564 .h2c = NULL,
565 .st = H2_SS_IDLE,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100566 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreau2a856182017-05-16 15:20:39 +0200567 .id = 0,
568};
569
Willy Tarreau144f84a2021-03-02 16:09:26 +0100570struct task *h2_timeout_task(struct task *t, void *context, unsigned int state);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +0200571static int h2_send(struct h2c *h2c);
572static int h2_recv(struct h2c *h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +0200573static int h2_process(struct h2c *h2c);
Willy Tarreau691d5032021-01-20 14:55:01 +0100574/* h2_io_cb is exported to see it resolved in "show fd" */
Willy Tarreau144f84a2021-03-02 16:09:26 +0100575struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state);
Willy Tarreau0b559072018-02-26 15:22:17 +0100576static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id);
Amaury Denoyelle74162742020-12-11 17:53:05 +0100577static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol);
Willy Tarreaua56a6de2018-02-26 15:59:07 +0100578static int h2_frt_transfer_data(struct h2s *h2s);
Willy Tarreau144f84a2021-03-02 16:09:26 +0100579struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state);
Olivier Houchardf502aca2018-12-14 19:42:40 +0100580static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct conn_stream *cs, struct session *sess);
Willy Tarreau8b2757c2018-12-19 17:36:48 +0100581static void h2s_alert(struct h2s *h2s);
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200582
Willy Tarreauab2ec452019-08-30 07:07:08 +0200583/* returns a h2c state as an abbreviated 3-letter string, or "???" if unknown */
584static inline const char *h2c_st_to_str(enum h2_cs st)
585{
586 switch (st) {
587 case H2_CS_PREFACE: return "PRF";
588 case H2_CS_SETTINGS1: return "STG";
589 case H2_CS_FRAME_H: return "FRH";
590 case H2_CS_FRAME_P: return "FRP";
591 case H2_CS_FRAME_A: return "FRA";
592 case H2_CS_FRAME_E: return "FRE";
593 case H2_CS_ERROR: return "ERR";
594 case H2_CS_ERROR2: return "ER2";
595 default: return "???";
596 }
597}
598
599/* returns a h2s state as an abbreviated 3-letter string, or "???" if unknown */
600static inline const char *h2s_st_to_str(enum h2_ss st)
601{
602 switch (st) {
603 case H2_SS_IDLE: return "IDL"; // idle
604 case H2_SS_RLOC: return "RSL"; // reserved local
605 case H2_SS_RREM: return "RSR"; // reserved remote
606 case H2_SS_OPEN: return "OPN"; // open
607 case H2_SS_HREM: return "HCR"; // half-closed remote
608 case H2_SS_HLOC: return "HCL"; // half-closed local
609 case H2_SS_ERROR : return "ERR"; // error
610 case H2_SS_CLOSED: return "CLO"; // closed
611 default: return "???";
612 }
613}
614
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200615/* the H2 traces always expect that arg1, if non-null, is of type connection
616 * (from which we can derive h2c), that arg2, if non-null, is of type h2s, and
617 * that arg3, if non-null, is either of type htx for tx headers, or of type
618 * buffer for everything else.
619 */
620static void h2_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
621 const struct ist where, const struct ist func,
622 const void *a1, const void *a2, const void *a3, const void *a4)
623{
624 const struct connection *conn = a1;
625 const struct h2c *h2c = conn ? conn->ctx : NULL;
626 const struct h2s *h2s = a2;
627 const struct buffer *buf = a3;
628 const struct htx *htx;
629 int pos;
630
631 if (!h2c) // nothing to add
632 return;
633
Willy Tarreau17104d42019-08-30 07:12:55 +0200634 if (src->verbosity > H2_VERB_CLEAN) {
Willy Tarreau73db4342019-09-25 07:28:44 +0200635 chunk_appendf(&trace_buf, " : h2c=%p(%c,%s)", h2c, conn_is_back(conn) ? 'B' : 'F', h2c_st_to_str(h2c->st0));
636
Willy Tarreau8e6f7492021-06-16 17:47:24 +0200637 if (mask & H2_EV_H2C_NEW) // inside h2_init, otherwise it's hard to match conn & h2c
638 conn_append_debug_info(&trace_buf, conn, " : ");
639
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100640 if (h2c->errcode)
641 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2c->errcode), h2c->errcode);
642
Willy Tarreau73db4342019-09-25 07:28:44 +0200643 if (h2c->dsi >= 0 &&
644 (mask & (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) == (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) {
Willy Tarreau8520d872020-09-18 07:39:29 +0200645 chunk_appendf(&trace_buf, " dft=%s/%02x dfl=%d", h2_ft_str(h2c->dft), h2c->dff, h2c->dfl);
Willy Tarreau73db4342019-09-25 07:28:44 +0200646 }
647
648 if (h2s) {
649 if (h2s->id <= 0)
650 chunk_appendf(&trace_buf, " dsi=%d", h2c->dsi);
651 chunk_appendf(&trace_buf, " h2s=%p(%d,%s)", h2s, h2s->id, h2s_st_to_str(h2s->st));
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100652 if (h2s->id && h2s->errcode)
653 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2s->errcode), h2s->errcode);
Willy Tarreau73db4342019-09-25 07:28:44 +0200654 }
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200655 }
656
657 /* Let's dump decoded requests and responses right after parsing. They
658 * are traced at level USER with a few recognizable flags.
659 */
660 if ((mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW) ||
661 mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR)) && buf)
662 htx = htxbuf(buf); // recv req/res
663 else if (mask == (H2_EV_TX_FRAME|H2_EV_TX_HDR))
664 htx = a3; // send req/res
665 else
666 htx = NULL;
667
Willy Tarreau94f1dcf2019-08-30 07:11:30 +0200668 if (level == TRACE_LEVEL_USER && src->verbosity != H2_VERB_MINIMAL && htx && (pos = htx_get_head(htx)) != -1) {
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200669 const struct htx_blk *blk = htx_get_blk(htx, pos);
670 const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
671 enum htx_blk_type type = htx_get_blk_type(blk);
672
673 if (type == HTX_BLK_REQ_SL)
674 chunk_appendf(&trace_buf, " : [%d] H2 REQ: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200675 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200676 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
677 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
678 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
679 else if (type == HTX_BLK_RES_SL)
680 chunk_appendf(&trace_buf, " : [%d] H2 RES: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200681 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200682 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
683 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
684 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
685 }
686}
687
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200688
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100689/* Detect a pending read0 for a H2 connection. It happens if a read0 was
690 * already reported on a previous xprt->rcvbuf() AND a frame parser failed
691 * to parse pending data, confirming no more progress is possible because
692 * we're facing a truncated frame. The function returns 1 to report a read0
693 * or 0 otherwise.
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200694 */
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100695static inline int h2c_read0_pending(struct h2c *h2c)
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200696{
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100697 return !!(h2c->flags & H2_CF_END_REACHED);
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200698}
699
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200700/* returns true if the connection is allowed to expire, false otherwise. A
Willy Tarreau34395832022-03-18 14:59:54 +0100701 * connection may expire when it has no attached streams. As long as streams
702 * are attached, the application layer is responsible for timeout management,
703 * and each layer will detach when it doesn't want to wait anymore. When the
704 * last one leaves, the connection must take over timeout management.
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200705 */
706static inline int h2c_may_expire(const struct h2c *h2c)
707{
Willy Tarreau34395832022-03-18 14:59:54 +0100708 return !h2c->nb_cs;
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200709}
710
Willy Tarreau15a47332022-03-18 15:57:34 +0100711/* update h2c timeout if needed */
712static void h2c_update_timeout(struct h2c *h2c)
713{
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200714 int is_idle_conn = 0;
715
Willy Tarreau15a47332022-03-18 15:57:34 +0100716 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
717
718 if (!h2c->task)
719 goto leave;
720
721 if (h2c_may_expire(h2c)) {
722 /* no more streams attached */
723 if (h2c->last_sid >= 0) {
724 /* GOAWAY sent, closing in progress */
725 h2c->task->expire = tick_add_ifset(now_ms, h2c->shut_timeout);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200726 is_idle_conn = 1;
Willy Tarreau15a47332022-03-18 15:57:34 +0100727 } else if (br_data(h2c->mbuf)) {
728 /* pending output data: always the regular data timeout */
729 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
Willy Tarreau6ff91e22022-04-14 11:43:35 +0200730 } else if (!(h2c->flags & H2_CF_IS_BACK) && h2c->max_id > 0 && !b_data(&h2c->dbuf)) {
Willy Tarreau15a47332022-03-18 15:57:34 +0100731 /* idle after having seen one stream => keep-alive */
Willy Tarreau86b08a32022-04-13 17:40:28 +0200732 int to;
733
734 if (tick_isset(h2c->proxy->timeout.httpka))
735 to = h2c->proxy->timeout.httpka;
736 else
737 to = h2c->proxy->timeout.httpreq;
738
739 h2c->task->expire = tick_add_ifset(h2c->idle_start, to);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200740 is_idle_conn = 1;
Willy Tarreau15a47332022-03-18 15:57:34 +0100741 } else {
742 /* before first request, or started to deserialize a
743 * new req => http-request, but only set, not refresh.
744 */
745 int exp = (h2c->flags & H2_CF_IS_BACK) ? TICK_ETERNITY : h2c->proxy->timeout.httpreq;
746 h2c->task->expire = tick_add_ifset(h2c->idle_start, exp);
747 }
748 /* if a timeout above was not set, fall back to the default one */
749 if (!tick_isset(h2c->task->expire))
750 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200751
752 if ((h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) &&
753 is_idle_conn && tick_isset(global.close_spread_end)) {
754 /* If a soft-stop is in progress and a close-spread-time
755 * is set, we want to spread idle connection closing roughly
756 * evenly across the defined window. This should only
757 * act on idle frontend connections.
758 * If the window end is already in the past, we wake the
759 * timeout task up immediately so that it can be closed.
760 */
761 int remaining_window = tick_remain(now_ms, global.close_spread_end);
762 if (remaining_window) {
763 /* We don't need to reset the expire if it would
764 * already happen before the close window end.
765 */
766 if (tick_isset(h2c->task->expire) &&
767 tick_is_le(global.close_spread_end, h2c->task->expire)) {
768 /* Set an expire value shorter than the current value
769 * because the close spread window end comes earlier.
770 */
771 h2c->task->expire = tick_add(now_ms, statistical_prng_range(remaining_window));
772 }
773 }
774 else {
775 /* We are past the soft close window end, wake the timeout
776 * task up immediately.
777 */
778 task_wakeup(h2c->task, TASK_WOKEN_TIMER);
779 }
780 }
781
Willy Tarreau15a47332022-03-18 15:57:34 +0100782 } else {
783 h2c->task->expire = TICK_ETERNITY;
784 }
785 task_queue(h2c->task);
786 leave:
787 TRACE_LEAVE(H2_EV_H2C_WAKE);
788}
789
Olivier Houchard7a977432019-03-21 15:47:13 +0100790static __inline int
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200791h2c_is_dead(const struct h2c *h2c)
Olivier Houchard7a977432019-03-21 15:47:13 +0100792{
793 if (eb_is_empty(&h2c->streams_by_id) && /* don't close if streams exist */
794 ((h2c->conn->flags & CO_FL_ERROR) || /* errors close immediately */
795 (h2c->st0 >= H2_CS_ERROR && !h2c->task) || /* a timeout stroke earlier */
796 (!(h2c->conn->owner)) || /* Nobody's left to take care of the connection, drop it now */
Willy Tarreau662fafc2019-05-26 09:43:07 +0200797 (!br_data(h2c->mbuf) && /* mux buffer empty, also process clean events below */
Olivier Houchard7a977432019-03-21 15:47:13 +0100798 (conn_xprt_read0_pending(h2c->conn) ||
799 (h2c->last_sid >= 0 && h2c->max_id >= h2c->last_sid)))))
800 return 1;
801
802 return 0;
Olivier Houchard7a977432019-03-21 15:47:13 +0100803}
804
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200805/*****************************************************/
806/* functions below are for dynamic buffer management */
807/*****************************************************/
808
Willy Tarreau315d8072017-12-10 22:17:57 +0100809/* indicates whether or not the we may call the h2_recv() function to attempt
810 * to receive data into the buffer and/or demux pending data. The condition is
811 * a bit complex due to some API limits for now. The rules are the following :
812 * - if an error or a shutdown was detected on the connection and the buffer
813 * is empty, we must not attempt to receive
814 * - if the demux buf failed to be allocated, we must not try to receive and
815 * we know there is nothing pending
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100816 * - if no flag indicates a blocking condition, we may attempt to receive,
817 * regardless of whether the demux buffer is full or not, so that only
818 * de demux part decides whether or not to block. This is needed because
819 * the connection API indeed prevents us from re-enabling receipt that is
820 * already enabled in a polled state, so we must always immediately stop
821 * as soon as the demux can't proceed so as never to hit an end of read
822 * with data pending in the buffers.
Willy Tarreau315d8072017-12-10 22:17:57 +0100823 * - otherwise must may not attempt
824 */
825static inline int h2_recv_allowed(const struct h2c *h2c)
826{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200827 if (b_data(&h2c->dbuf) == 0 &&
Willy Tarreau315d8072017-12-10 22:17:57 +0100828 (h2c->st0 >= H2_CS_ERROR ||
829 h2c->conn->flags & CO_FL_ERROR ||
830 conn_xprt_read0_pending(h2c->conn)))
831 return 0;
832
833 if (!(h2c->flags & H2_CF_DEM_DALLOC) &&
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100834 !(h2c->flags & H2_CF_DEM_BLOCK_ANY))
Willy Tarreau315d8072017-12-10 22:17:57 +0100835 return 1;
836
837 return 0;
838}
839
Willy Tarreau47b515a2018-12-21 16:09:41 +0100840/* restarts reading on the connection if it was not enabled */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200841static inline void h2c_restart_reading(const struct h2c *h2c, int consider_buffer)
Willy Tarreau47b515a2018-12-21 16:09:41 +0100842{
843 if (!h2_recv_allowed(h2c))
844 return;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200845 if ((!consider_buffer || !b_data(&h2c->dbuf))
846 && (h2c->wait_event.events & SUB_RETRY_RECV))
Willy Tarreau47b515a2018-12-21 16:09:41 +0100847 return;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200848 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau47b515a2018-12-21 16:09:41 +0100849}
850
851
Willy Tarreaufa1d3572019-01-31 10:31:51 +0100852/* returns true if the front connection has too many conn_streams attached */
853static inline int h2_frt_has_too_many_cs(const struct h2c *h2c)
Willy Tarreauf2101912018-07-19 10:11:38 +0200854{
Willy Tarreaua8754662018-12-23 20:43:58 +0100855 return h2c->nb_cs > h2_settings_max_concurrent_streams;
Willy Tarreauf2101912018-07-19 10:11:38 +0200856}
857
Willy Tarreau44e973f2018-03-01 17:49:30 +0100858/* Tries to grab a buffer and to re-enable processing on mux <target>. The h2c
859 * flags are used to figure what buffer was requested. It returns 1 if the
860 * allocation succeeds, in which case the connection is woken up, or 0 if it's
861 * impossible to wake up and we prefer to be woken up later.
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200862 */
Willy Tarreau44e973f2018-03-01 17:49:30 +0100863static int h2_buf_available(void *target)
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200864{
865 struct h2c *h2c = target;
Willy Tarreau0b559072018-02-26 15:22:17 +0100866 struct h2s *h2s;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200867
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100868 if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200869 h2c->flags &= ~H2_CF_DEM_DALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200870 h2c_restart_reading(h2c, 1);
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200871 return 1;
872 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200873
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100874 if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100875 h2c->flags &= ~H2_CF_MUX_MALLOC;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200876
877 if (h2c->flags & H2_CF_DEM_MROOM) {
878 h2c->flags &= ~H2_CF_DEM_MROOM;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200879 h2c_restart_reading(h2c, 1);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200880 }
Willy Tarreau14398122017-09-22 14:26:04 +0200881 return 1;
882 }
Willy Tarreau0b559072018-02-26 15:22:17 +0100883
884 if ((h2c->flags & H2_CF_DEM_SALLOC) &&
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +0200885 (h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s->endp->cs &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100886 b_alloc(&h2s->rxbuf)) {
Willy Tarreau0b559072018-02-26 15:22:17 +0100887 h2c->flags &= ~H2_CF_DEM_SALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200888 h2c_restart_reading(h2c, 1);
Willy Tarreau0b559072018-02-26 15:22:17 +0100889 return 1;
890 }
891
Willy Tarreau14398122017-09-22 14:26:04 +0200892 return 0;
893}
894
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200895static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200896{
897 struct buffer *buf = NULL;
898
Willy Tarreau2b718102021-04-21 07:32:39 +0200899 if (likely(!LIST_INLIST(&h2c->buf_wait.list)) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100900 unlikely((buf = b_alloc(bptr)) == NULL)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100901 h2c->buf_wait.target = h2c;
902 h2c->buf_wait.wakeup_cb = h2_buf_available;
Willy Tarreaub4e34762021-09-30 19:02:18 +0200903 LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +0200904 }
905 return buf;
906}
907
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200908static inline void h2_release_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200909{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200910 if (bptr->size) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100911 b_free(bptr);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100912 offer_buffers(NULL, 1);
Willy Tarreau14398122017-09-22 14:26:04 +0200913 }
914}
915
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200916static inline void h2_release_mbuf(struct h2c *h2c)
917{
918 struct buffer *buf;
919 unsigned int count = 0;
920
921 while (b_size(buf = br_head_pick(h2c->mbuf))) {
922 b_free(buf);
923 count++;
924 }
925 if (count)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100926 offer_buffers(NULL, count);
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200927}
928
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100929/* returns the number of allocatable outgoing streams for the connection taking
930 * the last_sid and the reserved ones into account.
931 */
932static inline int h2_streams_left(const struct h2c *h2c)
933{
934 int ret;
935
936 /* consider the number of outgoing streams we're allowed to create before
937 * reaching the last GOAWAY frame seen. max_id is the last assigned id,
938 * nb_reserved is the number of streams which don't yet have an ID.
939 */
940 ret = (h2c->last_sid >= 0) ? h2c->last_sid : 0x7FFFFFFF;
941 ret = (unsigned int)(ret - h2c->max_id) / 2 - h2c->nb_reserved - 1;
942 if (ret < 0)
943 ret = 0;
944 return ret;
945}
946
Willy Tarreau00f18a32019-01-26 12:19:01 +0100947/* returns the number of streams in use on a connection to figure if it's
948 * idle or not. We check nb_cs and not nb_streams as the caller will want
949 * to know if it was the last one after a detach().
950 */
951static int h2_used_streams(struct connection *conn)
952{
953 struct h2c *h2c = conn->ctx;
954
955 return h2c->nb_cs;
956}
957
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100958/* returns the number of concurrent streams available on the connection */
Olivier Houchardd540b362018-11-05 18:37:53 +0100959static int h2_avail_streams(struct connection *conn)
960{
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100961 struct server *srv = objt_server(conn->target);
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100962 struct h2c *h2c = conn->ctx;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100963 int ret1, ret2;
Olivier Houchardd540b362018-11-05 18:37:53 +0100964
Willy Tarreau6afec462019-01-28 06:40:19 +0100965 /* RFC7540#6.8: Receivers of a GOAWAY frame MUST NOT open additional
966 * streams on the connection.
967 */
968 if (h2c->last_sid >= 0)
969 return 0;
970
Willy Tarreauc61966f2019-10-31 15:10:03 +0100971 if (h2c->st0 >= H2_CS_ERROR)
972 return 0;
973
Willy Tarreau86949782019-01-31 10:42:05 +0100974 /* note: may be negative if a SETTINGS frame changes the limit */
975 ret1 = h2c->streams_limit - h2c->nb_streams;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100976
977 /* we must also consider the limit imposed by stream IDs */
978 ret2 = h2_streams_left(h2c);
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100979 ret1 = MIN(ret1, ret2);
Willy Tarreau86949782019-01-31 10:42:05 +0100980 if (ret1 > 0 && srv && srv->max_reuse >= 0) {
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100981 ret2 = h2c->stream_cnt <= srv->max_reuse ? srv->max_reuse - h2c->stream_cnt + 1: 0;
982 ret1 = MIN(ret1, ret2);
983 }
984 return ret1;
Olivier Houchardd540b362018-11-05 18:37:53 +0100985}
986
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200987
Willy Tarreau62f52692017-10-08 23:01:42 +0200988/*****************************************************************/
989/* functions below are dedicated to the mux setup and management */
990/*****************************************************************/
991
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200992/* Initialize the mux once it's attached. For outgoing connections, the context
993 * is already initialized before installing the mux, so we detect incoming
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200994 * connections from the fact that the context is still NULL (even during mux
995 * upgrades). <input> is always used as Input buffer and may contain data. It is
996 * the caller responsibility to not reuse it anymore. Returns < 0 on error.
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200997 */
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200998static int h2_init(struct connection *conn, struct proxy *prx, struct session *sess,
999 struct buffer *input)
Willy Tarreau32218eb2017-09-22 08:07:25 +02001000{
1001 struct h2c *h2c;
Willy Tarreauea392822017-10-31 10:02:25 +01001002 struct task *t = NULL;
Christopher Fauletf81ef032019-10-04 15:19:43 +02001003 void *conn_ctx = conn->ctx;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001004
Christopher Fauletf81ef032019-10-04 15:19:43 +02001005 TRACE_ENTER(H2_EV_H2C_NEW);
Willy Tarreau7838a792019-08-12 18:42:03 +02001006
Willy Tarreaubafbe012017-11-24 17:34:44 +01001007 h2c = pool_alloc(pool_head_h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001008 if (!h2c)
mildiscd2d7de2018-10-02 16:44:18 +02001009 goto fail_no_h2c;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001010
Christopher Faulete9b70722019-04-08 10:46:02 +02001011 if (conn_is_back(conn)) {
Willy Tarreau01b44822018-10-03 14:26:37 +02001012 h2c->flags = H2_CF_IS_BACK;
1013 h2c->shut_timeout = h2c->timeout = prx->timeout.server;
1014 if (tick_isset(prx->timeout.serverfin))
1015 h2c->shut_timeout = prx->timeout.serverfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +01001016
1017 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_be,
1018 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +02001019 } else {
1020 h2c->flags = H2_CF_NONE;
1021 h2c->shut_timeout = h2c->timeout = prx->timeout.client;
1022 if (tick_isset(prx->timeout.clientfin))
1023 h2c->shut_timeout = prx->timeout.clientfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +01001024
1025 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe,
1026 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +02001027 }
Willy Tarreau3f133572017-10-31 19:21:06 +01001028
Willy Tarreau0b37d652018-10-03 10:33:02 +02001029 h2c->proxy = prx;
Willy Tarreau33400292017-11-05 11:23:40 +01001030 h2c->task = NULL;
Willy Tarreau15a47332022-03-18 15:57:34 +01001031 h2c->idle_start = now_ms;
Willy Tarreau3f133572017-10-31 19:21:06 +01001032 if (tick_isset(h2c->timeout)) {
Willy Tarreaubeeabf52021-10-01 18:23:30 +02001033 t = task_new_here();
Willy Tarreau3f133572017-10-31 19:21:06 +01001034 if (!t)
1035 goto fail;
1036
1037 h2c->task = t;
1038 t->process = h2_timeout_task;
1039 t->context = h2c;
1040 t->expire = tick_add(now_ms, h2c->timeout);
1041 }
Willy Tarreauea392822017-10-31 10:02:25 +01001042
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001043 h2c->wait_event.tasklet = tasklet_new();
1044 if (!h2c->wait_event.tasklet)
Olivier Houchard910b2bc2018-07-17 18:49:38 +02001045 goto fail;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001046 h2c->wait_event.tasklet->process = h2_io_cb;
1047 h2c->wait_event.tasklet->context = h2c;
Willy Tarreau4f6516d2018-12-19 13:59:17 +01001048 h2c->wait_event.events = 0;
Amaury Denoyelled3a88c12021-05-03 10:47:51 +02001049 if (!conn_is_back(conn)) {
1050 /* Connection might already be in the stopping_list if subject
1051 * to h1->h2 upgrade.
1052 */
1053 if (!LIST_INLIST(&conn->stopping_list)) {
1054 LIST_APPEND(&mux_stopping_data[tid].list,
1055 &conn->stopping_list);
1056 }
1057 }
Olivier Houchard910b2bc2018-07-17 18:49:38 +02001058
Willy Tarreau2bdcc702020-05-19 11:31:11 +02001059 h2c->ddht = hpack_dht_alloc();
Willy Tarreau32218eb2017-09-22 08:07:25 +02001060 if (!h2c->ddht)
1061 goto fail;
1062
1063 /* Initialise the context. */
1064 h2c->st0 = H2_CS_PREFACE;
1065 h2c->conn = conn;
Willy Tarreau2e2083a2019-01-31 10:34:07 +01001066 h2c->streams_limit = h2_settings_max_concurrent_streams;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001067 h2c->max_id = -1;
1068 h2c->errcode = H2_ERR_NO_ERROR;
Willy Tarreau97aaa672018-12-23 09:49:04 +01001069 h2c->rcvd_c = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001070 h2c->rcvd_s = 0;
Willy Tarreau49745612017-12-03 18:56:02 +01001071 h2c->nb_streams = 0;
Willy Tarreau7ac60e82018-07-19 09:04:05 +02001072 h2c->nb_cs = 0;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001073 h2c->nb_reserved = 0;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001074 h2c->stream_cnt = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001075
Christopher Faulet51f73eb2019-04-08 11:22:47 +02001076 h2c->dbuf = *input;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001077 h2c->dsi = -1;
1078 h2c->msi = -1;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001079
Willy Tarreau32218eb2017-09-22 08:07:25 +02001080 h2c->last_sid = -1;
1081
Willy Tarreau51330962019-05-26 09:38:07 +02001082 br_init(h2c->mbuf, sizeof(h2c->mbuf) / sizeof(h2c->mbuf[0]));
Willy Tarreau32218eb2017-09-22 08:07:25 +02001083 h2c->miw = 65535; /* mux initial window size */
1084 h2c->mws = 65535; /* mux window size */
1085 h2c->mfs = 16384; /* initial max frame size */
Willy Tarreau751f2d02018-10-05 09:35:00 +02001086 h2c->streams_by_id = EB_ROOT;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001087 LIST_INIT(&h2c->send_list);
1088 LIST_INIT(&h2c->fctl_list);
Willy Tarreau9edf6db2019-10-02 10:49:59 +02001089 LIST_INIT(&h2c->blocked_list);
Willy Tarreau90f366b2021-02-20 11:49:49 +01001090 LIST_INIT(&h2c->buf_wait.list);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001091
Christopher Fauletf81ef032019-10-04 15:19:43 +02001092 conn->ctx = h2c;
1093
Willy Tarreau8e6f7492021-06-16 17:47:24 +02001094 TRACE_USER("new H2 connection", H2_EV_H2C_NEW, conn);
1095
Willy Tarreau3f133572017-10-31 19:21:06 +01001096 if (t)
1097 task_queue(t);
Willy Tarreauea392822017-10-31 10:02:25 +01001098
Willy Tarreau01b44822018-10-03 14:26:37 +02001099 if (h2c->flags & H2_CF_IS_BACK) {
1100 /* FIXME: this is temporary, for outgoing connections we need
1101 * to immediately allocate a stream until the code is modified
1102 * so that the caller calls ->attach(). For now the outgoing cs
Christopher Fauletf81ef032019-10-04 15:19:43 +02001103 * is stored as conn->ctx by the caller and saved in conn_ctx.
Willy Tarreau01b44822018-10-03 14:26:37 +02001104 */
1105 struct h2s *h2s;
1106
Christopher Fauletf81ef032019-10-04 15:19:43 +02001107 h2s = h2c_bck_stream_new(h2c, conn_ctx, sess);
Willy Tarreau01b44822018-10-03 14:26:37 +02001108 if (!h2s)
1109 goto fail_stream;
1110 }
1111
Willy Tarreau4781b152021-04-06 13:53:36 +02001112 HA_ATOMIC_INC(&h2c->px_counters->open_conns);
1113 HA_ATOMIC_INC(&h2c->px_counters->total_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001114
Willy Tarreau0f383582018-10-03 14:22:21 +02001115 /* prepare to read something */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02001116 h2c_restart_reading(h2c, 1);
Willy Tarreau7838a792019-08-12 18:42:03 +02001117 TRACE_LEAVE(H2_EV_H2C_NEW, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001118 return 0;
Willy Tarreau01b44822018-10-03 14:26:37 +02001119 fail_stream:
1120 hpack_dht_free(h2c->ddht);
mildiscd2d7de2018-10-02 16:44:18 +02001121 fail:
Willy Tarreauf6562792019-05-07 19:05:35 +02001122 task_destroy(t);
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001123 if (h2c->wait_event.tasklet)
1124 tasklet_free(h2c->wait_event.tasklet);
Willy Tarreaubafbe012017-11-24 17:34:44 +01001125 pool_free(pool_head_h2c, h2c);
mildiscd2d7de2018-10-02 16:44:18 +02001126 fail_no_h2c:
Willy Tarreau3b990fe2022-01-12 17:24:26 +01001127 if (!conn_is_back(conn))
1128 LIST_DEL_INIT(&conn->stopping_list);
Christopher Fauletf81ef032019-10-04 15:19:43 +02001129 conn->ctx = conn_ctx; /* restore saved ctx */
1130 TRACE_DEVEL("leaving in error", H2_EV_H2C_NEW|H2_EV_H2C_END|H2_EV_H2C_ERR);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001131 return -1;
1132}
1133
Willy Tarreau751f2d02018-10-05 09:35:00 +02001134/* returns the next allocatable outgoing stream ID for the H2 connection, or
1135 * -1 if no more is allocatable.
1136 */
1137static inline int32_t h2c_get_next_sid(const struct h2c *h2c)
1138{
1139 int32_t id = (h2c->max_id + 1) | 1;
Willy Tarreaua80dca82019-01-24 17:08:28 +01001140
1141 if ((id & 0x80000000U) || (h2c->last_sid >= 0 && id > h2c->last_sid))
Willy Tarreau751f2d02018-10-05 09:35:00 +02001142 id = -1;
1143 return id;
1144}
1145
Willy Tarreau2373acc2017-10-12 17:35:14 +02001146/* returns the stream associated with id <id> or NULL if not found */
1147static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id)
1148{
1149 struct eb32_node *node;
1150
Willy Tarreau751f2d02018-10-05 09:35:00 +02001151 if (id == 0)
1152 return (struct h2s *)h2_closed_stream;
1153
Willy Tarreau2a856182017-05-16 15:20:39 +02001154 if (id > h2c->max_id)
1155 return (struct h2s *)h2_idle_stream;
1156
Willy Tarreau2373acc2017-10-12 17:35:14 +02001157 node = eb32_lookup(&h2c->streams_by_id, id);
1158 if (!node)
Willy Tarreau2a856182017-05-16 15:20:39 +02001159 return (struct h2s *)h2_closed_stream;
Willy Tarreau2373acc2017-10-12 17:35:14 +02001160
1161 return container_of(node, struct h2s, by_id);
1162}
1163
Christopher Faulet73c12072019-04-08 11:23:22 +02001164/* release function. This one should be called to free all resources allocated
1165 * to the mux.
Willy Tarreau62f52692017-10-08 23:01:42 +02001166 */
Christopher Faulet73c12072019-04-08 11:23:22 +02001167static void h2_release(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02001168{
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001169 struct connection *conn = h2c->conn;
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001170
Willy Tarreau7838a792019-08-12 18:42:03 +02001171 TRACE_ENTER(H2_EV_H2C_END);
1172
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001173 hpack_dht_free(h2c->ddht);
Christopher Faulet61840e72019-04-15 09:33:32 +02001174
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001175 if (LIST_INLIST(&h2c->buf_wait.list))
1176 LIST_DEL_INIT(&h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +02001177
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001178 h2_release_buf(h2c, &h2c->dbuf);
1179 h2_release_mbuf(h2c);
Willy Tarreau14398122017-09-22 14:26:04 +02001180
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001181 if (h2c->task) {
1182 h2c->task->context = NULL;
1183 task_wakeup(h2c->task, TASK_WOKEN_OTHER);
1184 h2c->task = NULL;
1185 }
1186 if (h2c->wait_event.tasklet)
1187 tasklet_free(h2c->wait_event.tasklet);
1188 if (conn && h2c->wait_event.events != 0)
1189 conn->xprt->unsubscribe(conn, conn->xprt_ctx, h2c->wait_event.events,
1190 &h2c->wait_event);
Willy Tarreauea392822017-10-31 10:02:25 +01001191
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001192 HA_ATOMIC_DEC(&h2c->px_counters->open_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001193
Christopher Faulet4de1bff2022-04-14 11:36:41 +02001194 pool_free(pool_head_h2c, h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001195
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001196 if (conn) {
Amaury Denoyelled3a88c12021-05-03 10:47:51 +02001197 if (!conn_is_back(conn))
1198 LIST_DEL_INIT(&conn->stopping_list);
1199
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001200 conn->mux = NULL;
1201 conn->ctx = NULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02001202 TRACE_DEVEL("freeing conn", H2_EV_H2C_END, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001203
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001204 conn_stop_tracking(conn);
Willy Tarreau0b222472021-10-21 22:24:31 +02001205
1206 /* there might be a GOAWAY frame still pending in the TCP
1207 * stack, and if the peer continues to send (i.e. window
1208 * updates etc), this can result in losing the GOAWAY. For
1209 * this reason we try to drain anything received in between.
1210 */
1211 conn->flags |= CO_FL_WANT_DRAIN;
1212
1213 conn_xprt_shutw(conn);
1214 conn_xprt_close(conn);
1215 conn_sock_shutw(conn, !conn_is_back(conn));
1216 conn_ctrl_close(conn);
1217
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001218 if (conn->destroy_cb)
1219 conn->destroy_cb(conn);
1220 conn_free(conn);
1221 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001222
1223 TRACE_LEAVE(H2_EV_H2C_END);
Willy Tarreau62f52692017-10-08 23:01:42 +02001224}
1225
1226
Willy Tarreau71681172017-10-23 14:39:06 +02001227/******************************************************/
1228/* functions below are for the H2 protocol processing */
1229/******************************************************/
1230
1231/* returns the stream if of stream <h2s> or 0 if <h2s> is NULL */
Willy Tarreau1f094672017-11-20 21:27:45 +01001232static inline __maybe_unused int h2s_id(const struct h2s *h2s)
Willy Tarreau71681172017-10-23 14:39:06 +02001233{
1234 return h2s ? h2s->id : 0;
1235}
1236
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001237/* returns the sum of the stream's own window size and the mux's initial
1238 * window, which together form the stream's effective window size.
1239 */
1240static inline int h2s_mws(const struct h2s *h2s)
1241{
1242 return h2s->sws + h2s->h2c->miw;
1243}
1244
Willy Tarreau5b5e6872017-09-25 16:17:25 +02001245/* returns true of the mux is currently busy as seen from stream <h2s> */
Willy Tarreau1f094672017-11-20 21:27:45 +01001246static inline __maybe_unused int h2c_mux_busy(const struct h2c *h2c, const struct h2s *h2s)
Willy Tarreau5b5e6872017-09-25 16:17:25 +02001247{
1248 if (h2c->msi < 0)
1249 return 0;
1250
1251 if (h2c->msi == h2s_id(h2s))
1252 return 0;
1253
1254 return 1;
1255}
1256
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001257/* marks an error on the connection. Before settings are sent, we must not send
1258 * a GOAWAY frame, and the error state will prevent h2c_send_goaway_error()
1259 * from verifying this so we set H2_CF_GOAWAY_FAILED to make sure it will not
1260 * even try.
1261 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001262static inline __maybe_unused void h2c_error(struct h2c *h2c, enum h2_err err)
Willy Tarreau741d6df2017-10-17 08:00:59 +02001263{
Willy Tarreau022e5e52020-09-10 09:33:15 +02001264 TRACE_POINT(H2_EV_H2C_ERR, h2c->conn, 0, 0, (void *)(long)(err));
Willy Tarreau741d6df2017-10-17 08:00:59 +02001265 h2c->errcode = err;
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001266 if (h2c->st0 < H2_CS_SETTINGS1)
1267 h2c->flags |= H2_CF_GOAWAY_FAILED;
Willy Tarreau741d6df2017-10-17 08:00:59 +02001268 h2c->st0 = H2_CS_ERROR;
1269}
1270
Willy Tarreau175cebb2019-01-24 10:02:24 +01001271/* marks an error on the stream. It may also update an already closed stream
1272 * (e.g. to report an error after an RST was received).
1273 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001274static inline __maybe_unused void h2s_error(struct h2s *h2s, enum h2_err err)
Willy Tarreau2e43f082017-10-17 08:03:59 +02001275{
Willy Tarreau175cebb2019-01-24 10:02:24 +01001276 if (h2s->id && h2s->st != H2_SS_ERROR) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02001277 TRACE_POINT(H2_EV_H2S_ERR, h2s->h2c->conn, h2s, 0, (void *)(long)(err));
Willy Tarreau2e43f082017-10-17 08:03:59 +02001278 h2s->errcode = err;
Willy Tarreau175cebb2019-01-24 10:02:24 +01001279 if (h2s->st < H2_SS_ERROR)
1280 h2s->st = H2_SS_ERROR;
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02001281 cs_ep_set_error(h2s->endp);
Willy Tarreau2e43f082017-10-17 08:03:59 +02001282 }
1283}
1284
Willy Tarreau7e094452018-12-19 18:08:52 +01001285/* attempt to notify the data layer of recv availability */
1286static void __maybe_unused h2s_notify_recv(struct h2s *h2s)
1287{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001288 if (h2s->subs && h2s->subs->events & SUB_RETRY_RECV) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001289 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01001290 tasklet_wakeup(h2s->subs->tasklet);
1291 h2s->subs->events &= ~SUB_RETRY_RECV;
1292 if (!h2s->subs->events)
1293 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001294 }
1295}
1296
1297/* attempt to notify the data layer of send availability */
1298static void __maybe_unused h2s_notify_send(struct h2s *h2s)
1299{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001300 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001301 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01001302 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01001303 tasklet_wakeup(h2s->subs->tasklet);
1304 h2s->subs->events &= ~SUB_RETRY_SEND;
1305 if (!h2s->subs->events)
1306 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001307 }
Willy Tarreau5723f292020-01-10 15:16:57 +01001308 else if (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) {
1309 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
1310 tasklet_wakeup(h2s->shut_tl);
1311 }
Willy Tarreau7e094452018-12-19 18:08:52 +01001312}
1313
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001314/* alerts the data layer, trying to wake it up by all means, following
1315 * this sequence :
1316 * - if the h2s' data layer is subscribed to recv, then it's woken up for recv
1317 * - if its subscribed to send, then it's woken up for send
1318 * - if it was subscribed to neither, its ->wake() callback is called
1319 * It is safe to call this function with a closed stream which doesn't have a
1320 * conn_stream anymore.
1321 */
1322static void __maybe_unused h2s_alert(struct h2s *h2s)
1323{
Willy Tarreau7838a792019-08-12 18:42:03 +02001324 TRACE_ENTER(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
1325
Willy Tarreauf96508a2020-01-10 11:12:48 +01001326 if (h2s->subs ||
Willy Tarreau5723f292020-01-10 15:16:57 +01001327 (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW))) {
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001328 h2s_notify_recv(h2s);
1329 h2s_notify_send(h2s);
1330 }
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02001331 else if (h2s->endp->cs && h2s->endp->cs->data_cb->wake != NULL) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001332 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02001333 h2s->endp->cs->data_cb->wake(h2s->endp->cs);
Willy Tarreau7838a792019-08-12 18:42:03 +02001334 }
1335
1336 TRACE_LEAVE(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001337}
1338
Willy Tarreaue4820742017-07-27 13:37:23 +02001339/* writes the 24-bit frame size <len> at address <frame> */
Willy Tarreau1f094672017-11-20 21:27:45 +01001340static inline __maybe_unused void h2_set_frame_size(void *frame, uint32_t len)
Willy Tarreaue4820742017-07-27 13:37:23 +02001341{
1342 uint8_t *out = frame;
1343
1344 *out = len >> 16;
1345 write_n16(out + 1, len);
1346}
1347
Willy Tarreau54c15062017-10-10 17:10:03 +02001348/* reads <bytes> bytes from buffer <b> starting at relative offset <o> from the
1349 * current pointer, dealing with wrapping, and stores the result in <dst>. It's
1350 * the caller's responsibility to verify that there are at least <bytes> bytes
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001351 * available in the buffer's input prior to calling this function. The buffer
1352 * is assumed not to hold any output data.
Willy Tarreau54c15062017-10-10 17:10:03 +02001353 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001354static inline __maybe_unused void h2_get_buf_bytes(void *dst, size_t bytes,
Willy Tarreau54c15062017-10-10 17:10:03 +02001355 const struct buffer *b, int o)
1356{
Willy Tarreau591d4452018-06-15 17:21:00 +02001357 readv_bytes(dst, bytes, b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001358}
1359
Willy Tarreau1f094672017-11-20 21:27:45 +01001360static inline __maybe_unused uint16_t h2_get_n16(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001361{
Willy Tarreau591d4452018-06-15 17:21:00 +02001362 return readv_n16(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001363}
1364
Willy Tarreau1f094672017-11-20 21:27:45 +01001365static inline __maybe_unused uint32_t h2_get_n32(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001366{
Willy Tarreau591d4452018-06-15 17:21:00 +02001367 return readv_n32(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001368}
1369
Willy Tarreau1f094672017-11-20 21:27:45 +01001370static inline __maybe_unused uint64_t h2_get_n64(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001371{
Willy Tarreau591d4452018-06-15 17:21:00 +02001372 return readv_n64(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001373}
1374
1375
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001376/* Peeks an H2 frame header from offset <o> of buffer <b> into descriptor <h>.
1377 * The algorithm is not obvious. It turns out that H2 headers are neither
1378 * aligned nor do they use regular sizes. And to add to the trouble, the buffer
1379 * may wrap so each byte read must be checked. The header is formed like this :
Willy Tarreau715d5312017-07-11 15:20:24 +02001380 *
1381 * b0 b1 b2 b3 b4 b5..b8
1382 * +----------+---------+--------+----+----+----------------------+
1383 * |len[23:16]|len[15:8]|len[7:0]|type|flag|sid[31:0] (big endian)|
1384 * +----------+---------+--------+----+----+----------------------+
1385 *
1386 * Here we read a big-endian 64 bit word from h[1]. This way in a single read
1387 * we get the sid properly aligned and ordered, and 16 bits of len properly
1388 * ordered as well. The type and flags can be extracted using bit shifts from
1389 * the word, and only one extra read is needed to fetch len[16:23].
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001390 * Returns zero if some bytes are missing, otherwise non-zero on success. The
1391 * buffer is assumed not to contain any output data.
Willy Tarreau715d5312017-07-11 15:20:24 +02001392 */
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001393static __maybe_unused int h2_peek_frame_hdr(const struct buffer *b, int o, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001394{
1395 uint64_t w;
1396
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001397 if (b_data(b) < o + 9)
Willy Tarreau715d5312017-07-11 15:20:24 +02001398 return 0;
1399
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001400 w = h2_get_n64(b, o + 1);
1401 h->len = *(uint8_t*)b_peek(b, o) << 16;
Willy Tarreau715d5312017-07-11 15:20:24 +02001402 h->sid = w & 0x7FFFFFFF; /* RFC7540#4.1: R bit must be ignored */
1403 h->ff = w >> 32;
1404 h->ft = w >> 40;
1405 h->len += w >> 48;
1406 return 1;
1407}
1408
1409/* skip the next 9 bytes corresponding to the frame header possibly parsed by
1410 * h2_peek_frame_hdr() above.
1411 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001412static inline __maybe_unused void h2_skip_frame_hdr(struct buffer *b)
Willy Tarreau715d5312017-07-11 15:20:24 +02001413{
Willy Tarreaue5f12ce2018-06-15 10:28:05 +02001414 b_del(b, 9);
Willy Tarreau715d5312017-07-11 15:20:24 +02001415}
1416
1417/* same as above, automatically advances the buffer on success */
Willy Tarreau1f094672017-11-20 21:27:45 +01001418static inline __maybe_unused int h2_get_frame_hdr(struct buffer *b, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001419{
1420 int ret;
1421
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001422 ret = h2_peek_frame_hdr(b, 0, h);
Willy Tarreau715d5312017-07-11 15:20:24 +02001423 if (ret > 0)
1424 h2_skip_frame_hdr(b);
1425 return ret;
1426}
1427
Willy Tarreaucb985a42019-10-07 16:56:34 +02001428
1429/* try to fragment the headers frame present at the beginning of buffer <b>,
1430 * enforcing a limit of <mfs> bytes per frame. Returns 0 on failure, 1 on
1431 * success. Typical causes of failure include a buffer not large enough to
1432 * add extra frame headers. The existing frame size is read in the current
1433 * frame. Its EH flag will be cleared if CONTINUATION frames need to be added,
1434 * and its length will be adjusted. The stream ID for continuation frames will
1435 * be copied from the initial frame's.
1436 */
1437static int h2_fragment_headers(struct buffer *b, uint32_t mfs)
1438{
1439 size_t remain = b->data - 9;
1440 int extra_frames = (remain - 1) / mfs;
1441 size_t fsize;
1442 char *fptr;
1443 int frame;
1444
1445 if (b->data <= mfs + 9)
1446 return 1;
1447
1448 /* Too large a frame, we need to fragment it using CONTINUATION
1449 * frames. We start from the end and move tails as needed.
1450 */
1451 if (b->data + extra_frames * 9 > b->size)
1452 return 0;
1453
1454 for (frame = extra_frames; frame; frame--) {
1455 fsize = ((remain - 1) % mfs) + 1;
1456 remain -= fsize;
1457
1458 /* move data */
1459 fptr = b->area + 9 + remain + (frame - 1) * 9;
1460 memmove(fptr + 9, b->area + 9 + remain, fsize);
1461 b->data += 9;
1462
1463 /* write new frame header */
1464 h2_set_frame_size(fptr, fsize);
1465 fptr[3] = H2_FT_CONTINUATION;
1466 fptr[4] = (frame == extra_frames) ? H2_F_HEADERS_END_HEADERS : 0;
1467 write_n32(fptr + 5, read_n32(b->area + 5));
1468 }
1469
1470 b->area[4] &= ~H2_F_HEADERS_END_HEADERS;
1471 h2_set_frame_size(b->area, remain);
1472 return 1;
1473}
1474
1475
Willy Tarreau00dd0782018-03-01 16:31:34 +01001476/* marks stream <h2s> as CLOSED and decrement the number of active streams for
1477 * its connection if the stream was not yet closed. Please use this exclusively
1478 * before closing a stream to ensure stream count is well maintained.
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001479 */
Willy Tarreau00dd0782018-03-01 16:31:34 +01001480static inline void h2s_close(struct h2s *h2s)
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001481{
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001482 if (h2s->st != H2_SS_CLOSED) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001483 TRACE_ENTER(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001484 h2s->h2c->nb_streams--;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001485 if (!h2s->id)
1486 h2s->h2c->nb_reserved--;
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02001487 if (h2s->endp->cs) {
Christopher Fauletb041b232022-03-24 10:27:02 +01001488 if (!(h2s->endp->flags & CS_EP_EOS) && !b_data(&h2s->rxbuf))
Willy Tarreaua27db382019-03-25 18:13:16 +01001489 h2s_notify_recv(h2s);
1490 }
Willy Tarreau4781b152021-04-06 13:53:36 +02001491 HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001492
Willy Tarreau7838a792019-08-12 18:42:03 +02001493 TRACE_LEAVE(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001494 }
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001495 h2s->st = H2_SS_CLOSED;
1496}
1497
Willy Tarreau71049cc2018-03-28 13:56:39 +02001498/* detaches an H2 stream from its H2C and releases it to the H2S pool. */
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001499/* h2s_destroy should only ever be called by the thread that owns the stream,
1500 * that means that a tasklet should be used if we want to destroy the h2s
1501 * from another thread
1502 */
Willy Tarreau71049cc2018-03-28 13:56:39 +02001503static void h2s_destroy(struct h2s *h2s)
Willy Tarreau0a10de62018-03-01 16:27:53 +01001504{
Willy Tarreau7838a792019-08-12 18:42:03 +02001505 struct connection *conn = h2s->h2c->conn;
1506
1507 TRACE_ENTER(H2_EV_H2S_END, conn, h2s);
1508
Willy Tarreau0a10de62018-03-01 16:27:53 +01001509 h2s_close(h2s);
1510 eb32_delete(&h2s->by_id);
Olivier Houchard638b7992018-08-16 15:41:52 +02001511 if (b_size(&h2s->rxbuf)) {
1512 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01001513 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02001514 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001515
1516 if (h2s->subs)
1517 h2s->subs->events = 0;
1518
Joseph Herlantd77575d2018-11-25 10:54:45 -08001519 /* There's no need to explicitly call unsubscribe here, the only
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001520 * reference left would be in the h2c send_list/fctl_list, and if
1521 * we're in it, we're getting out anyway
1522 */
Olivier Houchardd360ac62019-03-22 17:37:16 +01001523 LIST_DEL_INIT(&h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01001524
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001525 /* ditto, calling tasklet_free() here should be ok */
Willy Tarreau5723f292020-01-10 15:16:57 +01001526 tasklet_free(h2s->shut_tl);
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +01001527 BUG_ON(h2s->endp && !(h2s->endp->flags & CS_EP_ORPHAN));
1528 cs_endpoint_free(h2s->endp);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001529 pool_free(pool_head_h2s, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02001530
1531 TRACE_LEAVE(H2_EV_H2S_END, conn);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001532}
1533
Willy Tarreaua8e49542018-10-03 18:53:55 +02001534/* allocates a new stream <id> for connection <h2c> and adds it into h2c's
1535 * stream tree. In case of error, nothing is added and NULL is returned. The
1536 * causes of errors can be any failed memory allocation. The caller is
1537 * responsible for checking if the connection may support an extra stream
1538 * prior to calling this function.
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001539 */
Willy Tarreaua8e49542018-10-03 18:53:55 +02001540static struct h2s *h2s_new(struct h2c *h2c, int id)
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001541{
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001542 struct h2s *h2s;
1543
Willy Tarreau7838a792019-08-12 18:42:03 +02001544 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1545
Willy Tarreaubafbe012017-11-24 17:34:44 +01001546 h2s = pool_alloc(pool_head_h2s);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001547 if (!h2s)
1548 goto out;
1549
Willy Tarreau5723f292020-01-10 15:16:57 +01001550 h2s->shut_tl = tasklet_new();
1551 if (!h2s->shut_tl) {
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001552 pool_free(pool_head_h2s, h2s);
1553 goto out;
1554 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001555 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01001556 h2s->shut_tl->process = h2_deferred_shut;
1557 h2s->shut_tl->context = h2s;
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001558 LIST_INIT(&h2s->list);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001559 h2s->h2c = h2c;
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +01001560 h2s->endp = NULL;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001561 h2s->sws = 0;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001562 h2s->flags = H2_SF_NONE;
1563 h2s->errcode = H2_ERR_NO_ERROR;
1564 h2s->st = H2_SS_IDLE;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +02001565 h2s->status = 0;
Willy Tarreau1915ca22019-01-24 11:49:37 +01001566 h2s->body_len = 0;
Olivier Houchard638b7992018-08-16 15:41:52 +02001567 h2s->rxbuf = BUF_NULL;
Amaury Denoyelle74162742020-12-11 17:53:05 +01001568 memset(h2s->upgrade_protocol, 0, sizeof(h2s->upgrade_protocol));
Willy Tarreau751f2d02018-10-05 09:35:00 +02001569
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001570 h2s->by_id.key = h2s->id = id;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001571 if (id > 0)
1572 h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001573 else
1574 h2c->nb_reserved++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001575
1576 eb32_insert(&h2c->streams_by_id, &h2s->by_id);
Willy Tarreau49745612017-12-03 18:56:02 +01001577 h2c->nb_streams++;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001578 h2c->stream_cnt++;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001579
Willy Tarreau4781b152021-04-06 13:53:36 +02001580 HA_ATOMIC_INC(&h2c->px_counters->open_streams);
1581 HA_ATOMIC_INC(&h2c->px_counters->total_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001582
Willy Tarreau7838a792019-08-12 18:42:03 +02001583 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001584 return h2s;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001585 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001586 TRACE_DEVEL("leaving in error", H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001587 return NULL;
1588}
1589
1590/* creates a new stream <id> on the h2c connection and returns it, or NULL in
Christopher Faulet7d013e72020-12-15 16:56:50 +01001591 * case of memory allocation error. <input> is used as input buffer for the new
1592 * stream. On success, it is transferred to the stream and the mux is no longer
1593 * responsible of it. On error, <input> is unchanged, thus the mux must still
1594 * take care of it.
Willy Tarreaua8e49542018-10-03 18:53:55 +02001595 */
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02001596static struct h2s *h2c_frt_stream_new(struct h2c *h2c, int id, struct buffer *input, uint32_t flags)
Willy Tarreaua8e49542018-10-03 18:53:55 +02001597{
1598 struct session *sess = h2c->conn->owner;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001599 struct h2s *h2s;
1600
Willy Tarreau7838a792019-08-12 18:42:03 +02001601 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1602
Willy Tarreaue872f752022-05-12 09:24:41 +02001603 if (h2c->nb_streams >= h2_settings_max_concurrent_streams) {
1604 TRACE_ERROR("HEADERS frame causing MAX_CONCURRENT_STREAMS to be exceeded", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001605 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001606 }
Willy Tarreaua8e49542018-10-03 18:53:55 +02001607
1608 h2s = h2s_new(h2c, id);
1609 if (!h2s)
Willy Tarreaue872f752022-05-12 09:24:41 +02001610 goto out_alloc;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001611
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +01001612 h2s->endp = cs_endpoint_new();
1613 if (!h2s->endp)
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001614 goto out_close;
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +01001615 h2s->endp->target = h2s;
1616 h2s->endp->ctx = h2c->conn;
1617 h2s->endp->flags |= (CS_EP_T_MUX|CS_EP_ORPHAN|CS_EP_NOT_FIRST);
1618
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02001619 /* FIXME wrong analogy between ext-connect and websocket, this need to
1620 * be refine.
1621 */
1622 if (flags & H2_SF_EXT_CONNECT_RCVD)
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +01001623 h2s->endp->flags |= CS_EP_WEBSOCKET;
Christopher Fauletb669d682022-03-22 18:37:19 +01001624
Willy Tarreaud0de6772022-02-04 09:05:37 +01001625 /* The stream will record the request's accept date (which is either the
1626 * end of the connection's or the date immediately after the previous
1627 * request) and the idle time, which is the delay since the previous
1628 * request. We can set the value now, it will be copied by stream_new().
1629 */
1630 sess->t_idle = tv_ms_elapsed(&sess->tv_accept, &now) - sess->t_handshake;
Christopher Fauleta9e8b392022-03-23 11:01:09 +01001631
Willy Tarreau6796a062022-05-11 16:11:24 +02001632 if (!cs_new_from_endp(h2s->endp, sess, input))
Christopher Fauleta9e8b392022-03-23 11:01:09 +01001633 goto out_close;
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02001634
Christopher Fauleta9e8b392022-03-23 11:01:09 +01001635 h2c->nb_cs++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001636
Willy Tarreau590a0512018-09-05 11:56:48 +02001637 /* We want the accept date presented to the next stream to be the one
1638 * we have now, the handshake time to be null (since the next stream
1639 * is not delayed by a handshake), and the idle time to count since
1640 * right now.
1641 */
1642 sess->accept_date = date;
1643 sess->tv_accept = now;
1644 sess->t_handshake = 0;
Willy Tarreaud0de6772022-02-04 09:05:37 +01001645 sess->t_idle = 0;
Willy Tarreau590a0512018-09-05 11:56:48 +02001646
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001647 /* OK done, the stream lives its own life now */
Willy Tarreaufa1d3572019-01-31 10:31:51 +01001648 if (h2_frt_has_too_many_cs(h2c))
Willy Tarreauf2101912018-07-19 10:11:38 +02001649 h2c->flags |= H2_CF_DEM_TOOMANY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001650 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001651 return h2s;
1652
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001653 out_close:
Willy Tarreau71049cc2018-03-28 13:56:39 +02001654 h2s_destroy(h2s);
Willy Tarreaue872f752022-05-12 09:24:41 +02001655 out_alloc:
1656 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001657 out:
Willy Tarreau45efc072018-10-03 18:27:52 +02001658 sess_log(sess);
Willy Tarreau7838a792019-08-12 18:42:03 +02001659 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreau45efc072018-10-03 18:27:52 +02001660 return NULL;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001661}
1662
Willy Tarreau751f2d02018-10-05 09:35:00 +02001663/* allocates a new stream associated to conn_stream <cs> on the h2c connection
1664 * and returns it, or NULL in case of memory allocation error or if the highest
1665 * possible stream ID was reached.
1666 */
Olivier Houchardf502aca2018-12-14 19:42:40 +01001667static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct conn_stream *cs, struct session *sess)
Willy Tarreau751f2d02018-10-05 09:35:00 +02001668{
1669 struct h2s *h2s = NULL;
1670
Willy Tarreau7838a792019-08-12 18:42:03 +02001671 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1672
Willy Tarreaue872f752022-05-12 09:24:41 +02001673 if (h2c->nb_streams >= h2c->streams_limit) {
1674 TRACE_ERROR("Aborting stream since negotiated limit is too low", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001675 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001676 }
Willy Tarreau751f2d02018-10-05 09:35:00 +02001677
Willy Tarreaue872f752022-05-12 09:24:41 +02001678 if (h2_streams_left(h2c) < 1) {
1679 TRACE_ERROR("Aborting stream since no more streams left", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreaua80dca82019-01-24 17:08:28 +01001680 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001681 }
Willy Tarreaua80dca82019-01-24 17:08:28 +01001682
Willy Tarreau751f2d02018-10-05 09:35:00 +02001683 /* Defer choosing the ID until we send the first message to create the stream */
1684 h2s = h2s_new(h2c, 0);
Willy Tarreaue872f752022-05-12 09:24:41 +02001685 if (!h2s) {
1686 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001687 goto out;
Willy Tarreaue872f752022-05-12 09:24:41 +02001688 }
Willy Tarreau751f2d02018-10-05 09:35:00 +02001689
Christopher Faulet070b91b2022-03-31 19:27:18 +02001690 if (cs_attach_mux(cs, h2s, h2c->conn) < 0) {
Willy Tarreaue872f752022-05-12 09:24:41 +02001691 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW, h2c->conn);
Christopher Faulet070b91b2022-03-31 19:27:18 +02001692 h2s_destroy(h2s);
1693 h2s = NULL;
1694 goto out;
1695 }
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +01001696 h2s->endp = cs->endp;
Olivier Houchardf502aca2018-12-14 19:42:40 +01001697 h2s->sess = sess;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001698 h2c->nb_cs++;
1699
Willy Tarreau751f2d02018-10-05 09:35:00 +02001700 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001701 if (likely(h2s))
1702 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
1703 else
1704 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001705 return h2s;
1706}
1707
Willy Tarreaube5b7152017-09-25 16:25:39 +02001708/* try to send a settings frame on the connection. Returns > 0 on success, 0 if
1709 * it couldn't do anything. It may return an error in h2c. See RFC7540#11.3 for
1710 * the various settings codes.
1711 */
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001712static int h2c_send_settings(struct h2c *h2c)
Willy Tarreaube5b7152017-09-25 16:25:39 +02001713{
1714 struct buffer *res;
1715 char buf_data[100]; // enough for 15 settings
Willy Tarreau83061a82018-07-13 11:56:34 +02001716 struct buffer buf;
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001717 int mfs;
Willy Tarreau7838a792019-08-12 18:42:03 +02001718 int ret = 0;
1719
1720 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001721
1722 if (h2c_mux_busy(h2c, NULL)) {
1723 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001724 goto out;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001725 }
1726
Willy Tarreaube5b7152017-09-25 16:25:39 +02001727 chunk_init(&buf, buf_data, sizeof(buf_data));
1728 chunk_memcpy(&buf,
1729 "\x00\x00\x00" /* length : 0 for now */
1730 "\x04\x00" /* type : 4 (settings), flags : 0 */
1731 "\x00\x00\x00\x00", /* stream ID : 0 */
1732 9);
1733
Willy Tarreau0bbad6b2019-02-26 16:01:52 +01001734 if (h2c->flags & H2_CF_IS_BACK) {
1735 /* send settings_enable_push=0 */
1736 chunk_memcat(&buf, "\x00\x02\x00\x00\x00\x00", 6);
1737 }
1738
Amaury Denoyellebefeae82021-07-09 17:14:30 +02001739 /* rfc 8441 #3 SETTINGS_ENABLE_CONNECT_PROTOCOL=1,
1740 * sent automatically unless disabled in the global config */
1741 if (!(global.tune.options & GTUNE_DISABLE_H2_WEBSOCKET))
1742 chunk_memcat(&buf, "\x00\x08\x00\x00\x00\x01", 6);
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01001743
Willy Tarreaube5b7152017-09-25 16:25:39 +02001744 if (h2_settings_header_table_size != 4096) {
1745 char str[6] = "\x00\x01"; /* header_table_size */
1746
1747 write_n32(str + 2, h2_settings_header_table_size);
1748 chunk_memcat(&buf, str, 6);
1749 }
1750
1751 if (h2_settings_initial_window_size != 65535) {
1752 char str[6] = "\x00\x04"; /* initial_window_size */
1753
1754 write_n32(str + 2, h2_settings_initial_window_size);
1755 chunk_memcat(&buf, str, 6);
1756 }
1757
1758 if (h2_settings_max_concurrent_streams != 0) {
1759 char str[6] = "\x00\x03"; /* max_concurrent_streams */
1760
1761 /* Note: 0 means "unlimited" for haproxy's config but not for
1762 * the protocol, so never send this value!
1763 */
1764 write_n32(str + 2, h2_settings_max_concurrent_streams);
1765 chunk_memcat(&buf, str, 6);
1766 }
1767
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001768 mfs = h2_settings_max_frame_size;
1769 if (mfs > global.tune.bufsize)
1770 mfs = global.tune.bufsize;
1771
1772 if (!mfs)
1773 mfs = global.tune.bufsize;
1774
1775 if (mfs != 16384) {
Willy Tarreaube5b7152017-09-25 16:25:39 +02001776 char str[6] = "\x00\x05"; /* max_frame_size */
1777
1778 /* note: similarly we could also emit MAX_HEADER_LIST_SIZE to
1779 * match bufsize - rewrite size, but at the moment it seems
1780 * that clients don't take care of it.
1781 */
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001782 write_n32(str + 2, mfs);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001783 chunk_memcat(&buf, str, 6);
1784 }
1785
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001786 h2_set_frame_size(buf.area, buf.data - 9);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001787
1788 res = br_tail(h2c->mbuf);
1789 retry:
1790 if (!h2_get_buf(h2c, res)) {
1791 h2c->flags |= H2_CF_MUX_MALLOC;
1792 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001793 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001794 }
1795
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001796 ret = b_istput(res, ist2(buf.area, buf.data));
Willy Tarreaube5b7152017-09-25 16:25:39 +02001797 if (unlikely(ret <= 0)) {
1798 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001799 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1800 goto retry;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001801 h2c->flags |= H2_CF_MUX_MFULL;
1802 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001803 }
1804 else {
1805 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001806 ret = 0;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001807 }
1808 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001809 out:
1810 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001811 return ret;
1812}
1813
Willy Tarreau52eed752017-09-22 15:05:09 +02001814/* Try to receive a connection preface, then upon success try to send our
1815 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1816 * missing data. It may return an error in h2c.
1817 */
1818static int h2c_frt_recv_preface(struct h2c *h2c)
1819{
1820 int ret1;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001821 int ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001822
Willy Tarreau7838a792019-08-12 18:42:03 +02001823 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
1824
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001825 ret1 = b_isteq(&h2c->dbuf, 0, b_data(&h2c->dbuf), ist(H2_CONN_PREFACE));
Willy Tarreau52eed752017-09-22 15:05:09 +02001826
1827 if (unlikely(ret1 <= 0)) {
Christopher Fauletb5f7b522021-07-26 12:06:53 +02001828 if (!ret1)
1829 h2c->flags |= H2_CF_DEM_SHORT_READ;
Amaury Denoyellea8879232020-10-27 17:16:03 +01001830 if (ret1 < 0 || conn_xprt_read0_pending(h2c->conn)) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01001831 TRACE_ERROR("I/O error or short read", H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02001832 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauee4684f2021-06-17 08:08:48 +02001833 if (b_data(&h2c->dbuf) ||
1834 !(((const struct session *)h2c->conn->owner)->fe->options & PR_O_IGNORE_PRB))
1835 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01001836 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001837 ret2 = 0;
1838 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02001839 }
1840
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001841 ret2 = h2c_send_settings(h2c);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001842 if (ret2 > 0)
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001843 b_del(&h2c->dbuf, ret1);
Willy Tarreau7838a792019-08-12 18:42:03 +02001844 out:
1845 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001846 return ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001847}
1848
Willy Tarreau01b44822018-10-03 14:26:37 +02001849/* Try to send a connection preface, then upon success try to send our
1850 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1851 * missing data. It may return an error in h2c.
1852 */
1853static int h2c_bck_send_preface(struct h2c *h2c)
1854{
1855 struct buffer *res;
Willy Tarreau7838a792019-08-12 18:42:03 +02001856 int ret = 0;
1857
1858 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02001859
1860 if (h2c_mux_busy(h2c, NULL)) {
1861 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001862 goto out;
Willy Tarreau01b44822018-10-03 14:26:37 +02001863 }
1864
Willy Tarreaubcc45952019-05-26 10:05:50 +02001865 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001866 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001867 if (!h2_get_buf(h2c, res)) {
Willy Tarreau01b44822018-10-03 14:26:37 +02001868 h2c->flags |= H2_CF_MUX_MALLOC;
1869 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001870 goto out;
Willy Tarreau01b44822018-10-03 14:26:37 +02001871 }
1872
1873 if (!b_data(res)) {
1874 /* preface not yet sent */
Willy Tarreau9c218e72019-05-26 10:08:28 +02001875 ret = b_istput(res, ist(H2_CONN_PREFACE));
1876 if (unlikely(ret <= 0)) {
1877 if (!ret) {
1878 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1879 goto retry;
1880 h2c->flags |= H2_CF_MUX_MFULL;
1881 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001882 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001883 }
1884 else {
1885 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001886 ret = 0;
1887 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001888 }
1889 }
Willy Tarreau01b44822018-10-03 14:26:37 +02001890 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001891 ret = h2c_send_settings(h2c);
1892 out:
1893 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
1894 return ret;
Willy Tarreau01b44822018-10-03 14:26:37 +02001895}
1896
Willy Tarreau081d4722017-05-16 21:51:05 +02001897/* try to send a GOAWAY frame on the connection to report an error or a graceful
1898 * shutdown, with h2c->errcode as the error code. Returns > 0 on success or zero
1899 * if nothing was done. It uses h2c->last_sid as the advertised ID, or copies it
1900 * from h2c->max_id if it's not set yet (<0). In case of lack of room to write
1901 * the message, it subscribes the requester (either <h2s> or <h2c>) to future
1902 * notifications. It sets H2_CF_GOAWAY_SENT on success, and H2_CF_GOAWAY_FAILED
1903 * on unrecoverable failure. It will not attempt to send one again in this last
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001904 * case, nor will it send one if settings were not sent (e.g. still waiting for
1905 * a preface) so that it is safe to use h2c_error() to report such errors.
Willy Tarreau081d4722017-05-16 21:51:05 +02001906 */
1907static int h2c_send_goaway_error(struct h2c *h2c, struct h2s *h2s)
1908{
1909 struct buffer *res;
1910 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02001911 int ret = 0;
Willy Tarreau081d4722017-05-16 21:51:05 +02001912
Willy Tarreau7838a792019-08-12 18:42:03 +02001913 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
1914
Willy Tarreau15dbedd2022-04-13 09:40:52 +02001915 if ((h2c->flags & H2_CF_GOAWAY_FAILED) || h2c->st0 < H2_CS_SETTINGS1) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001916 ret = 1; // claim that it worked
1917 goto out;
1918 }
Willy Tarreau081d4722017-05-16 21:51:05 +02001919
1920 if (h2c_mux_busy(h2c, h2s)) {
1921 if (h2s)
1922 h2s->flags |= H2_SF_BLK_MBUSY;
1923 else
1924 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001925 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001926 }
1927
Willy Tarreau9c218e72019-05-26 10:08:28 +02001928 /* len: 8, type: 7, flags: none, sid: 0 */
1929 memcpy(str, "\x00\x00\x08\x07\x00\x00\x00\x00\x00", 9);
1930
1931 if (h2c->last_sid < 0)
1932 h2c->last_sid = h2c->max_id;
1933
1934 write_n32(str + 9, h2c->last_sid);
1935 write_n32(str + 13, h2c->errcode);
1936
Willy Tarreaubcc45952019-05-26 10:05:50 +02001937 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001938 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001939 if (!h2_get_buf(h2c, res)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02001940 h2c->flags |= H2_CF_MUX_MALLOC;
1941 if (h2s)
1942 h2s->flags |= H2_SF_BLK_MROOM;
1943 else
1944 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001945 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001946 }
1947
Willy Tarreauea1b06d2018-07-12 09:02:47 +02001948 ret = b_istput(res, ist2(str, 17));
Willy Tarreau081d4722017-05-16 21:51:05 +02001949 if (unlikely(ret <= 0)) {
1950 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001951 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1952 goto retry;
Willy Tarreau081d4722017-05-16 21:51:05 +02001953 h2c->flags |= H2_CF_MUX_MFULL;
1954 if (h2s)
1955 h2s->flags |= H2_SF_BLK_MROOM;
1956 else
1957 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001958 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001959 }
1960 else {
1961 /* we cannot report this error using GOAWAY, so we mark
1962 * it and claim a success.
1963 */
1964 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
1965 h2c->flags |= H2_CF_GOAWAY_FAILED;
Willy Tarreau7838a792019-08-12 18:42:03 +02001966 ret = 1;
1967 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001968 }
1969 }
1970 h2c->flags |= H2_CF_GOAWAY_SENT;
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001971
1972 /* some codes are not for real errors, just attempts to close cleanly */
1973 switch (h2c->errcode) {
1974 case H2_ERR_NO_ERROR:
1975 case H2_ERR_ENHANCE_YOUR_CALM:
1976 case H2_ERR_REFUSED_STREAM:
1977 case H2_ERR_CANCEL:
1978 break;
1979 default:
Willy Tarreau4781b152021-04-06 13:53:36 +02001980 HA_ATOMIC_INC(&h2c->px_counters->goaway_resp);
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001981 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001982 out:
1983 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
Willy Tarreau081d4722017-05-16 21:51:05 +02001984 return ret;
1985}
1986
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001987/* Try to send an RST_STREAM frame on the connection for the indicated stream
1988 * during mux operations. This stream must be valid and cannot be closed
1989 * already. h2s->id will be used for the stream ID and h2s->errcode will be
1990 * used for the error code. h2s->st will be update to H2_SS_CLOSED if it was
1991 * not yet.
1992 *
1993 * Returns > 0 on success or zero if nothing was done. In case of lack of room
1994 * to write the message, it subscribes the stream to future notifications.
1995 */
1996static int h2s_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
1997{
1998 struct buffer *res;
1999 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02002000 int ret = 0;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002001
Willy Tarreau7838a792019-08-12 18:42:03 +02002002 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
2003
2004 if (!h2s || h2s->st == H2_SS_CLOSED) {
2005 ret = 1;
2006 goto out;
2007 }
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002008
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002009 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
2010 * RST_STREAM in response to a RST_STREAM frame.
2011 */
Willy Tarreau231f6162019-08-06 10:01:40 +02002012 if (h2c->dsi == h2s->id && h2c->dft == H2_FT_RST_STREAM) {
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002013 ret = 1;
2014 goto ignore;
2015 }
2016
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002017 if (h2c_mux_busy(h2c, h2s)) {
2018 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002019 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002020 }
2021
Willy Tarreau9c218e72019-05-26 10:08:28 +02002022 /* len: 4, type: 3, flags: none */
2023 memcpy(str, "\x00\x00\x04\x03\x00", 5);
2024 write_n32(str + 5, h2s->id);
2025 write_n32(str + 9, h2s->errcode);
2026
Willy Tarreaubcc45952019-05-26 10:05:50 +02002027 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002028 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002029 if (!h2_get_buf(h2c, res)) {
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002030 h2c->flags |= H2_CF_MUX_MALLOC;
2031 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002032 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002033 }
2034
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002035 ret = b_istput(res, ist2(str, 13));
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002036 if (unlikely(ret <= 0)) {
2037 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002038 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2039 goto retry;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002040 h2c->flags |= H2_CF_MUX_MFULL;
2041 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002042 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002043 }
2044 else {
2045 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002046 ret = 0;
2047 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002048 }
2049 }
2050
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002051 ignore:
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002052 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01002053 h2s_close(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002054 out:
2055 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002056 return ret;
2057}
2058
2059/* Try to send an RST_STREAM frame on the connection for the stream being
2060 * demuxed using h2c->dsi for the stream ID. It will use h2s->errcode as the
Willy Tarreaue6888ff2018-12-23 18:26:26 +01002061 * error code, even if the stream is one of the dummy ones, and will update
2062 * h2s->st to H2_SS_CLOSED if it was not yet.
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002063 *
2064 * Returns > 0 on success or zero if nothing was done. In case of lack of room
2065 * to write the message, it blocks the demuxer and subscribes it to future
Joseph Herlantd77575d2018-11-25 10:54:45 -08002066 * notifications. It's worth mentioning that an RST may even be sent for a
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002067 * closed stream.
Willy Tarreau27a84c92017-10-17 08:10:17 +02002068 */
2069static int h2c_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
2070{
2071 struct buffer *res;
2072 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02002073 int ret = 0;
2074
2075 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02002076
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002077 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
2078 * RST_STREAM in response to a RST_STREAM frame.
2079 */
2080 if (h2c->dft == H2_FT_RST_STREAM) {
2081 ret = 1;
2082 goto ignore;
2083 }
2084
Willy Tarreau27a84c92017-10-17 08:10:17 +02002085 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002086 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002087 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002088 }
2089
Willy Tarreau9c218e72019-05-26 10:08:28 +02002090 /* len: 4, type: 3, flags: none */
2091 memcpy(str, "\x00\x00\x04\x03\x00", 5);
2092
2093 write_n32(str + 5, h2c->dsi);
2094 write_n32(str + 9, h2s->errcode);
2095
Willy Tarreaubcc45952019-05-26 10:05:50 +02002096 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002097 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002098 if (!h2_get_buf(h2c, res)) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02002099 h2c->flags |= H2_CF_MUX_MALLOC;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002100 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002101 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002102 }
2103
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002104 ret = b_istput(res, ist2(str, 13));
Willy Tarreau27a84c92017-10-17 08:10:17 +02002105 if (unlikely(ret <= 0)) {
2106 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002107 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2108 goto retry;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002109 h2c->flags |= H2_CF_MUX_MFULL;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002110 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002111 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002112 }
2113 else {
2114 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002115 ret = 0;
2116 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002117 }
2118 }
2119
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002120 ignore:
Willy Tarreauab0e1da2018-10-05 10:16:37 +02002121 if (h2s->id) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02002122 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01002123 h2s_close(h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002124 }
2125
Willy Tarreau7838a792019-08-12 18:42:03 +02002126 out:
Willy Tarreau4781b152021-04-06 13:53:36 +02002127 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_resp);
Willy Tarreau7838a792019-08-12 18:42:03 +02002128 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02002129 return ret;
2130}
2131
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002132/* try to send an empty DATA frame with the ES flag set to notify about the
2133 * end of stream and match a shutdown(write). If an ES was already sent as
2134 * indicated by HLOC/ERROR/RESET/CLOSED states, nothing is done. Returns > 0
2135 * on success or zero if nothing was done. In case of lack of room to write the
2136 * message, it subscribes the requesting stream to future notifications.
2137 */
2138static int h2_send_empty_data_es(struct h2s *h2s)
2139{
2140 struct h2c *h2c = h2s->h2c;
2141 struct buffer *res;
2142 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02002143 int ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002144
Willy Tarreau7838a792019-08-12 18:42:03 +02002145 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
2146
2147 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_ERROR || h2s->st == H2_SS_CLOSED) {
2148 ret = 1;
2149 goto out;
2150 }
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002151
2152 if (h2c_mux_busy(h2c, h2s)) {
2153 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002154 goto out;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002155 }
2156
Willy Tarreau9c218e72019-05-26 10:08:28 +02002157 /* len: 0x000000, type: 0(DATA), flags: ES=1 */
2158 memcpy(str, "\x00\x00\x00\x00\x01", 5);
2159 write_n32(str + 5, h2s->id);
2160
Willy Tarreaubcc45952019-05-26 10:05:50 +02002161 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002162 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002163 if (!h2_get_buf(h2c, res)) {
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002164 h2c->flags |= H2_CF_MUX_MALLOC;
2165 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002166 goto out;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002167 }
2168
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002169 ret = b_istput(res, ist2(str, 9));
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002170 if (likely(ret > 0)) {
2171 h2s->flags |= H2_SF_ES_SENT;
2172 }
2173 else if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002174 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2175 goto retry;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002176 h2c->flags |= H2_CF_MUX_MFULL;
2177 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002178 }
2179 else {
2180 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002181 ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002182 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002183 out:
2184 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002185 return ret;
2186}
2187
Christopher Fauletb041b232022-03-24 10:27:02 +01002188/* wake a specific stream and assign its conn_stream some CS_EP_* flags among
2189 * CS_EP_ERR_PENDING and CS_EP_ERROR if needed. The stream's state
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002190 * is automatically updated accordingly. If the stream is orphaned, it is
2191 * destroyed.
Christopher Fauletf02ca002019-03-07 16:21:34 +01002192 */
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002193static void h2s_wake_one_stream(struct h2s *h2s)
Christopher Fauletf02ca002019-03-07 16:21:34 +01002194{
Willy Tarreau7838a792019-08-12 18:42:03 +02002195 struct h2c *h2c = h2s->h2c;
2196
2197 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn, h2s);
2198
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02002199 if (!h2s->endp->cs) {
Christopher Fauletf02ca002019-03-07 16:21:34 +01002200 /* this stream was already orphaned */
2201 h2s_destroy(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002202 TRACE_DEVEL("leaving with no h2s", H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002203 return;
2204 }
2205
Christopher Fauletaade4ed2020-10-08 15:38:41 +02002206 if (h2c_read0_pending(h2s->h2c)) {
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002207 if (h2s->st == H2_SS_OPEN)
2208 h2s->st = H2_SS_HREM;
2209 else if (h2s->st == H2_SS_HLOC)
2210 h2s_close(h2s);
2211 }
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002212
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002213 if ((h2s->h2c->st0 >= H2_CS_ERROR || h2s->h2c->conn->flags & CO_FL_ERROR) ||
2214 (h2s->h2c->last_sid > 0 && (!h2s->id || h2s->id > h2s->h2c->last_sid))) {
Christopher Fauletb041b232022-03-24 10:27:02 +01002215 h2s->endp->flags |= CS_EP_ERR_PENDING;
2216 if (h2s->endp->flags & CS_EP_EOS)
2217 h2s->endp->flags |= CS_EP_ERROR;
Willy Tarreau23482912019-05-07 15:23:14 +02002218
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002219 if (h2s->st < H2_SS_ERROR)
2220 h2s->st = H2_SS_ERROR;
2221 }
Christopher Fauletf02ca002019-03-07 16:21:34 +01002222
2223 h2s_alert(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002224 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002225}
2226
2227/* wake the streams attached to the connection, whose id is greater than <last>
2228 * or unassigned.
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002229 */
Willy Tarreau23482912019-05-07 15:23:14 +02002230static void h2_wake_some_streams(struct h2c *h2c, int last)
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002231{
2232 struct eb32_node *node;
2233 struct h2s *h2s;
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002234
Willy Tarreau7838a792019-08-12 18:42:03 +02002235 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn);
2236
Christopher Fauletf02ca002019-03-07 16:21:34 +01002237 /* Wake all streams with ID > last */
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002238 node = eb32_lookup_ge(&h2c->streams_by_id, last + 1);
2239 while (node) {
2240 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002241 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002242 h2s_wake_one_stream(h2s);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002243 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01002244
Christopher Fauletf02ca002019-03-07 16:21:34 +01002245 /* Wake all streams with unassigned ID (ID == 0) */
2246 node = eb32_lookup(&h2c->streams_by_id, 0);
2247 while (node) {
2248 h2s = container_of(node, struct h2s, by_id);
2249 if (h2s->id > 0)
2250 break;
2251 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002252 h2s_wake_one_stream(h2s);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002253 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002254
2255 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002256}
2257
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002258/* Wake up all blocked streams whose window size has become positive after the
2259 * mux's initial window was adjusted. This should be done after having processed
2260 * SETTINGS frames which have updated the mux's initial window size.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002261 */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002262static void h2c_unblock_sfctl(struct h2c *h2c)
Willy Tarreau3421aba2017-07-27 15:41:03 +02002263{
2264 struct h2s *h2s;
2265 struct eb32_node *node;
2266
Willy Tarreau7838a792019-08-12 18:42:03 +02002267 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
2268
Willy Tarreau3421aba2017-07-27 15:41:03 +02002269 node = eb32_first(&h2c->streams_by_id);
2270 while (node) {
2271 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002272 if (h2s->flags & H2_SF_BLK_SFCTL && h2s_mws(h2s) > 0) {
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002273 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002274 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002275 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2276 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002277 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002278 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002279 node = eb32_next(node);
2280 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002281
2282 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002283}
2284
2285/* processes a SETTINGS frame whose payload is <payload> for <plen> bytes, and
2286 * ACKs it if needed. Returns > 0 on success or zero on missing data. It may
Willy Tarreaub860c732019-01-30 15:39:55 +01002287 * return an error in h2c. The caller must have already verified frame length
2288 * and stream ID validity. Described in RFC7540#6.5.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002289 */
2290static int h2c_handle_settings(struct h2c *h2c)
2291{
2292 unsigned int offset;
2293 int error;
2294
Willy Tarreau7838a792019-08-12 18:42:03 +02002295 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
2296
Willy Tarreau3421aba2017-07-27 15:41:03 +02002297 if (h2c->dff & H2_F_SETTINGS_ACK) {
2298 if (h2c->dfl) {
2299 error = H2_ERR_FRAME_SIZE_ERROR;
2300 goto fail;
2301 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002302 goto done;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002303 }
2304
Willy Tarreau3421aba2017-07-27 15:41:03 +02002305 /* process full frame only */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002306 if (b_data(&h2c->dbuf) < h2c->dfl) {
2307 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002308 goto out0;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002309 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002310
2311 /* parse the frame */
2312 for (offset = 0; offset < h2c->dfl; offset += 6) {
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002313 uint16_t type = h2_get_n16(&h2c->dbuf, offset);
2314 int32_t arg = h2_get_n32(&h2c->dbuf, offset + 2);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002315
2316 switch (type) {
2317 case H2_SETTINGS_INITIAL_WINDOW_SIZE:
2318 /* we need to update all existing streams with the
2319 * difference from the previous iws.
2320 */
2321 if (arg < 0) { // RFC7540#6.5.2
2322 error = H2_ERR_FLOW_CONTROL_ERROR;
2323 goto fail;
2324 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002325 h2c->miw = arg;
2326 break;
2327 case H2_SETTINGS_MAX_FRAME_SIZE:
2328 if (arg < 16384 || arg > 16777215) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002329 TRACE_ERROR("MAX_FRAME_SIZE out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002330 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002331 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002332 goto fail;
2333 }
2334 h2c->mfs = arg;
2335 break;
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01002336 case H2_SETTINGS_HEADER_TABLE_SIZE:
2337 h2c->flags |= H2_CF_SHTS_UPDATED;
2338 break;
Willy Tarreau1b38b462017-12-03 19:02:28 +01002339 case H2_SETTINGS_ENABLE_PUSH:
2340 if (arg < 0 || arg > 1) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002341 TRACE_ERROR("ENABLE_PUSH out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002342 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002343 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002344 goto fail;
2345 }
2346 break;
Willy Tarreau2e2083a2019-01-31 10:34:07 +01002347 case H2_SETTINGS_MAX_CONCURRENT_STREAMS:
2348 if (h2c->flags & H2_CF_IS_BACK) {
2349 /* the limit is only for the backend; for the frontend it is our limit */
2350 if ((unsigned int)arg > h2_settings_max_concurrent_streams)
2351 arg = h2_settings_max_concurrent_streams;
2352 h2c->streams_limit = arg;
2353 }
2354 break;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002355 case H2_SETTINGS_ENABLE_CONNECT_PROTOCOL:
Amaury Denoyelle0df04362021-10-18 09:43:29 +02002356 if (arg == 1)
2357 h2c->flags |= H2_CF_RCVD_RFC8441;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002358 break;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002359 }
2360 }
2361
2362 /* need to ACK this frame now */
2363 h2c->st0 = H2_CS_FRAME_A;
Willy Tarreau7838a792019-08-12 18:42:03 +02002364 done:
2365 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002366 return 1;
2367 fail:
Willy Tarreau9364a5f2019-10-23 11:06:35 +02002368 if (!(h2c->flags & H2_CF_IS_BACK))
2369 sess_log(h2c->conn->owner);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002370 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002371 out0:
2372 TRACE_DEVEL("leaving with missing data or error", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002373 return 0;
2374}
2375
2376/* try to send an ACK for a settings frame on the connection. Returns > 0 on
2377 * success or one of the h2_status values.
2378 */
2379static int h2c_ack_settings(struct h2c *h2c)
2380{
2381 struct buffer *res;
2382 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02002383 int ret = 0;
2384
2385 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002386
2387 if (h2c_mux_busy(h2c, NULL)) {
2388 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002389 goto out;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002390 }
2391
Willy Tarreau9c218e72019-05-26 10:08:28 +02002392 memcpy(str,
2393 "\x00\x00\x00" /* length : 0 (no data) */
2394 "\x04" "\x01" /* type : 4, flags : ACK */
2395 "\x00\x00\x00\x00" /* stream ID */, 9);
2396
Willy Tarreaubcc45952019-05-26 10:05:50 +02002397 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002398 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002399 if (!h2_get_buf(h2c, res)) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02002400 h2c->flags |= H2_CF_MUX_MALLOC;
2401 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002402 goto out;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002403 }
2404
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002405 ret = b_istput(res, ist2(str, 9));
Willy Tarreau3421aba2017-07-27 15:41:03 +02002406 if (unlikely(ret <= 0)) {
2407 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002408 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2409 goto retry;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002410 h2c->flags |= H2_CF_MUX_MFULL;
2411 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002412 }
2413 else {
2414 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002415 ret = 0;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002416 }
2417 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002418 out:
2419 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002420 return ret;
2421}
2422
Willy Tarreaucf68c782017-10-10 17:11:41 +02002423/* processes a PING frame and schedules an ACK if needed. The caller must pass
2424 * the pointer to the payload in <payload>. Returns > 0 on success or zero on
Willy Tarreaub860c732019-01-30 15:39:55 +01002425 * missing data. The caller must have already verified frame length
2426 * and stream ID validity.
Willy Tarreaucf68c782017-10-10 17:11:41 +02002427 */
2428static int h2c_handle_ping(struct h2c *h2c)
2429{
Willy Tarreaucf68c782017-10-10 17:11:41 +02002430 /* schedule a response */
Willy Tarreau68ed6412017-12-03 18:15:56 +01002431 if (!(h2c->dff & H2_F_PING_ACK))
Willy Tarreaucf68c782017-10-10 17:11:41 +02002432 h2c->st0 = H2_CS_FRAME_A;
2433 return 1;
2434}
2435
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002436/* Try to send a window update for stream id <sid> and value <increment>.
2437 * Returns > 0 on success or zero on missing room or failure. It may return an
2438 * error in h2c.
2439 */
2440static int h2c_send_window_update(struct h2c *h2c, int sid, uint32_t increment)
2441{
2442 struct buffer *res;
2443 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02002444 int ret = 0;
2445
2446 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002447
2448 if (h2c_mux_busy(h2c, NULL)) {
2449 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002450 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002451 }
2452
Willy Tarreau9c218e72019-05-26 10:08:28 +02002453 /* length: 4, type: 8, flags: none */
2454 memcpy(str, "\x00\x00\x04\x08\x00", 5);
2455 write_n32(str + 5, sid);
2456 write_n32(str + 9, increment);
2457
Willy Tarreaubcc45952019-05-26 10:05:50 +02002458 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002459 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002460 if (!h2_get_buf(h2c, res)) {
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002461 h2c->flags |= H2_CF_MUX_MALLOC;
2462 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002463 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002464 }
2465
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002466 ret = b_istput(res, ist2(str, 13));
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002467 if (unlikely(ret <= 0)) {
2468 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002469 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2470 goto retry;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002471 h2c->flags |= H2_CF_MUX_MFULL;
2472 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002473 }
2474 else {
2475 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002476 ret = 0;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002477 }
2478 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002479 out:
2480 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002481 return ret;
2482}
2483
2484/* try to send pending window update for the connection. It's safe to call it
2485 * with no pending updates. Returns > 0 on success or zero on missing room or
2486 * failure. It may return an error in h2c.
2487 */
2488static int h2c_send_conn_wu(struct h2c *h2c)
2489{
2490 int ret = 1;
2491
Willy Tarreau7838a792019-08-12 18:42:03 +02002492 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2493
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002494 if (h2c->rcvd_c <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002495 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002496
Willy Tarreau97aaa672018-12-23 09:49:04 +01002497 if (!(h2c->flags & H2_CF_WINDOW_OPENED)) {
2498 /* increase the advertised connection window to 2G on
2499 * first update.
2500 */
2501 h2c->flags |= H2_CF_WINDOW_OPENED;
2502 h2c->rcvd_c += H2_INITIAL_WINDOW_INCREMENT;
2503 }
2504
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002505 /* send WU for the connection */
2506 ret = h2c_send_window_update(h2c, 0, h2c->rcvd_c);
2507 if (ret > 0)
2508 h2c->rcvd_c = 0;
2509
Willy Tarreau7838a792019-08-12 18:42:03 +02002510 out:
2511 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002512 return ret;
2513}
2514
2515/* try to send pending window update for the current dmux stream. It's safe to
2516 * call it with no pending updates. Returns > 0 on success or zero on missing
2517 * room or failure. It may return an error in h2c.
2518 */
2519static int h2c_send_strm_wu(struct h2c *h2c)
2520{
2521 int ret = 1;
2522
Willy Tarreau7838a792019-08-12 18:42:03 +02002523 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2524
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002525 if (h2c->rcvd_s <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002526 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002527
2528 /* send WU for the stream */
2529 ret = h2c_send_window_update(h2c, h2c->dsi, h2c->rcvd_s);
2530 if (ret > 0)
2531 h2c->rcvd_s = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002532 out:
2533 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002534 return ret;
2535}
2536
Willy Tarreaucf68c782017-10-10 17:11:41 +02002537/* try to send an ACK for a ping frame on the connection. Returns > 0 on
2538 * success, 0 on missing data or one of the h2_status values.
2539 */
2540static int h2c_ack_ping(struct h2c *h2c)
2541{
2542 struct buffer *res;
2543 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02002544 int ret = 0;
2545
2546 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002547
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002548 if (b_data(&h2c->dbuf) < 8)
Willy Tarreau7838a792019-08-12 18:42:03 +02002549 goto out;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002550
2551 if (h2c_mux_busy(h2c, NULL)) {
2552 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002553 goto out;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002554 }
2555
Willy Tarreaucf68c782017-10-10 17:11:41 +02002556 memcpy(str,
2557 "\x00\x00\x08" /* length : 8 (same payload) */
2558 "\x06" "\x01" /* type : 6, flags : ACK */
2559 "\x00\x00\x00\x00" /* stream ID */, 9);
2560
2561 /* copy the original payload */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002562 h2_get_buf_bytes(str + 9, 8, &h2c->dbuf, 0);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002563
Willy Tarreau9c218e72019-05-26 10:08:28 +02002564 res = br_tail(h2c->mbuf);
2565 retry:
2566 if (!h2_get_buf(h2c, res)) {
2567 h2c->flags |= H2_CF_MUX_MALLOC;
2568 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002569 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02002570 }
2571
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002572 ret = b_istput(res, ist2(str, 17));
Willy Tarreaucf68c782017-10-10 17:11:41 +02002573 if (unlikely(ret <= 0)) {
2574 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002575 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2576 goto retry;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002577 h2c->flags |= H2_CF_MUX_MFULL;
2578 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002579 }
2580 else {
2581 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002582 ret = 0;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002583 }
2584 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002585 out:
2586 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002587 return ret;
2588}
2589
Willy Tarreau26f95952017-07-27 17:18:30 +02002590/* processes a WINDOW_UPDATE frame whose payload is <payload> for <plen> bytes.
2591 * Returns > 0 on success or zero on missing data. It may return an error in
Willy Tarreaub860c732019-01-30 15:39:55 +01002592 * h2c or h2s. The caller must have already verified frame length and stream ID
2593 * validity. Described in RFC7540#6.9.
Willy Tarreau26f95952017-07-27 17:18:30 +02002594 */
2595static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s)
2596{
2597 int32_t inc;
2598 int error;
2599
Willy Tarreau7838a792019-08-12 18:42:03 +02002600 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
2601
Willy Tarreau26f95952017-07-27 17:18:30 +02002602 /* process full frame only */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002603 if (b_data(&h2c->dbuf) < h2c->dfl) {
2604 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002605 goto out0;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002606 }
Willy Tarreau26f95952017-07-27 17:18:30 +02002607
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002608 inc = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau26f95952017-07-27 17:18:30 +02002609
2610 if (h2c->dsi != 0) {
2611 /* stream window update */
Willy Tarreau26f95952017-07-27 17:18:30 +02002612
2613 /* it's not an error to receive WU on a closed stream */
2614 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau7838a792019-08-12 18:42:03 +02002615 goto done;
Willy Tarreau26f95952017-07-27 17:18:30 +02002616
2617 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002618 TRACE_ERROR("stream WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002619 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002620 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002621 goto strm_err;
2622 }
2623
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002624 if (h2s_mws(h2s) >= 0 && h2s_mws(h2s) + inc < 0) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002625 TRACE_ERROR("stream WINDOW_UPDATE inc<0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002626 error = H2_ERR_FLOW_CONTROL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002627 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002628 goto strm_err;
2629 }
2630
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002631 h2s->sws += inc;
2632 if (h2s_mws(h2s) > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
Willy Tarreau26f95952017-07-27 17:18:30 +02002633 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002634 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002635 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2636 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002637 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreau26f95952017-07-27 17:18:30 +02002638 }
2639 }
2640 else {
2641 /* connection window update */
2642 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002643 TRACE_ERROR("conn WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002644 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002645 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002646 goto conn_err;
2647 }
2648
2649 if (h2c->mws >= 0 && h2c->mws + inc < 0) {
2650 error = H2_ERR_FLOW_CONTROL_ERROR;
2651 goto conn_err;
2652 }
2653
2654 h2c->mws += inc;
2655 }
2656
Willy Tarreau7838a792019-08-12 18:42:03 +02002657 done:
2658 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002659 return 1;
2660
2661 conn_err:
2662 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002663 out0:
2664 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002665 return 0;
2666
2667 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01002668 h2s_error(h2s, error);
2669 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002670 TRACE_DEVEL("leaving on stream error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002671 return 0;
2672}
2673
Willy Tarreaue96b0922017-10-30 00:28:29 +01002674/* processes a GOAWAY frame, and signals all streams whose ID is greater than
Willy Tarreaub860c732019-01-30 15:39:55 +01002675 * the last ID. Returns > 0 on success or zero on missing data. The caller must
2676 * have already verified frame length and stream ID validity. Described in
2677 * RFC7540#6.8.
Willy Tarreaue96b0922017-10-30 00:28:29 +01002678 */
2679static int h2c_handle_goaway(struct h2c *h2c)
2680{
Willy Tarreaue96b0922017-10-30 00:28:29 +01002681 int last;
2682
Willy Tarreau7838a792019-08-12 18:42:03 +02002683 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002684 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002685 if (b_data(&h2c->dbuf) < h2c->dfl) {
2686 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002687 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002688 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002689 }
Willy Tarreaue96b0922017-10-30 00:28:29 +01002690
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002691 last = h2_get_n32(&h2c->dbuf, 0);
2692 h2c->errcode = h2_get_n32(&h2c->dbuf, 4);
Willy Tarreau11cc2d62017-12-03 10:27:47 +01002693 if (h2c->last_sid < 0)
2694 h2c->last_sid = last;
Willy Tarreau23482912019-05-07 15:23:14 +02002695 h2_wake_some_streams(h2c, last);
Willy Tarreau7838a792019-08-12 18:42:03 +02002696 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002697 return 1;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002698}
2699
Willy Tarreau92153fc2017-12-03 19:46:19 +01002700/* processes a PRIORITY frame, and either skips it or rejects if it is
Willy Tarreaub860c732019-01-30 15:39:55 +01002701 * invalid. Returns > 0 on success or zero on missing data. It may return an
2702 * error in h2c. The caller must have already verified frame length and stream
2703 * ID validity. Described in RFC7540#6.3.
Willy Tarreau92153fc2017-12-03 19:46:19 +01002704 */
2705static int h2c_handle_priority(struct h2c *h2c)
2706{
Willy Tarreau7838a792019-08-12 18:42:03 +02002707 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
2708
Willy Tarreau92153fc2017-12-03 19:46:19 +01002709 /* process full frame only */
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002710 if (b_data(&h2c->dbuf) < h2c->dfl) {
Willy Tarreau7838a792019-08-12 18:42:03 +02002711 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002712 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002713 return 0;
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002714 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01002715
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002716 if (h2_get_n32(&h2c->dbuf, 0) == h2c->dsi) {
Willy Tarreau92153fc2017-12-03 19:46:19 +01002717 /* 7540#5.3 : can't depend on itself */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002718 TRACE_ERROR("PRIORITY depends on itself", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002719 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02002720 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002721 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002722 return 0;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002723 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002724 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreau92153fc2017-12-03 19:46:19 +01002725 return 1;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002726}
2727
Willy Tarreaucd234e92017-08-18 10:59:39 +02002728/* processes an RST_STREAM frame, and sets the 32-bit error code on the stream.
Willy Tarreaub860c732019-01-30 15:39:55 +01002729 * Returns > 0 on success or zero on missing data. The caller must have already
2730 * verified frame length and stream ID validity. Described in RFC7540#6.4.
Willy Tarreaucd234e92017-08-18 10:59:39 +02002731 */
2732static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s)
2733{
Willy Tarreau7838a792019-08-12 18:42:03 +02002734 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
2735
Willy Tarreaucd234e92017-08-18 10:59:39 +02002736 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002737 if (b_data(&h2c->dbuf) < h2c->dfl) {
2738 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002739 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002740 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002741 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002742
2743 /* late RST, already handled */
Willy Tarreau7838a792019-08-12 18:42:03 +02002744 if (h2s->st == H2_SS_CLOSED) {
2745 TRACE_DEVEL("leaving on stream closed", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002746 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02002747 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002748
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002749 h2s->errcode = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau00dd0782018-03-01 16:31:34 +01002750 h2s_close(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002751
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02002752 if (h2s->endp->cs) {
Willy Tarreau386346f2022-05-10 08:46:07 +02002753 cs_ep_set_error(h2s->endp);
Willy Tarreauf830f012018-12-19 17:44:55 +01002754 h2s_alert(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002755 }
2756
2757 h2s->flags |= H2_SF_RST_RCVD;
Willy Tarreau7838a792019-08-12 18:42:03 +02002758 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002759 return 1;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002760}
2761
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002762/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2763 * It may return an error in h2c or h2s. The caller must consider that the
2764 * return value is the new h2s in case one was allocated (most common case).
2765 * Described in RFC7540#6.2. Most of the
Willy Tarreau13278b42017-10-13 19:23:14 +02002766 * errors here are reported as connection errors since it's impossible to
2767 * recover from such errors after the compression context has been altered.
2768 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002769static struct h2s *h2c_frt_handle_headers(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau13278b42017-10-13 19:23:14 +02002770{
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002771 struct buffer rxbuf = BUF_NULL;
Willy Tarreau4790f7c2019-01-24 11:33:02 +01002772 unsigned long long body_len = 0;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002773 uint32_t flags = 0;
Willy Tarreau13278b42017-10-13 19:23:14 +02002774 int error;
2775
Willy Tarreau7838a792019-08-12 18:42:03 +02002776 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2777
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002778 if (!b_size(&h2c->dbuf)) {
2779 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002780 goto out; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002781 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002782
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002783 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2784 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002785 goto out; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002786 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002787
2788 /* now either the frame is complete or the buffer is complete */
2789 if (h2s->st != H2_SS_IDLE) {
Willy Tarreau88d138e2019-01-02 19:38:14 +01002790 /* The stream exists/existed, this must be a trailers frame */
2791 if (h2s->st != H2_SS_CLOSED) {
Amaury Denoyelle74162742020-12-11 17:53:05 +01002792 error = h2c_decode_headers(h2c, &h2s->rxbuf, &h2s->flags, &body_len, NULL);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002793 /* unrecoverable error ? */
2794 if (h2c->st0 >= H2_CS_ERROR)
2795 goto out;
2796
Christopher Faulet485da0b2021-10-08 08:56:00 +02002797 if (error == 0) {
2798 /* Demux not blocked because of the stream, it is an incomplete frame */
2799 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2800 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002801 goto out; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002802 }
Willy Tarreauaab1a602019-05-06 11:12:18 +02002803
2804 if (error < 0) {
2805 /* Failed to decode this frame (e.g. too large request)
2806 * but the HPACK decompressor is still synchronized.
2807 */
2808 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
2809 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau88d138e2019-01-02 19:38:14 +01002810 goto out;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002811 }
Willy Tarreau88d138e2019-01-02 19:38:14 +01002812 goto done;
2813 }
Willy Tarreau1f035502019-01-30 11:44:07 +01002814 /* the connection was already killed by an RST, let's consume
2815 * the data and send another RST.
2816 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01002817 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreau1f035502019-01-30 11:44:07 +01002818 h2s = (struct h2s*)h2_error_stream;
2819 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002820 }
2821 else if (h2c->dsi <= h2c->max_id || !(h2c->dsi & 1)) {
2822 /* RFC7540#5.1.1 stream id > prev ones, and must be odd here */
2823 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002824 TRACE_ERROR("HEADERS on invalid stream ID", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau4781b152021-04-06 13:53:36 +02002825 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau22de8d32018-09-05 19:55:58 +02002826 sess_log(h2c->conn->owner);
Willy Tarreau13278b42017-10-13 19:23:14 +02002827 goto conn_err;
2828 }
Willy Tarreau415b1ee2019-01-02 13:59:43 +01002829 else if (h2c->flags & H2_CF_DEM_TOOMANY)
2830 goto out; // IDLE but too many cs still present
Willy Tarreau13278b42017-10-13 19:23:14 +02002831
Amaury Denoyelle74162742020-12-11 17:53:05 +01002832 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002833
Willy Tarreau25919232019-01-03 14:48:18 +01002834 /* unrecoverable error ? */
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002835 if (h2c->st0 >= H2_CS_ERROR)
2836 goto out;
2837
Willy Tarreau25919232019-01-03 14:48:18 +01002838 if (error <= 0) {
Christopher Faulet485da0b2021-10-08 08:56:00 +02002839 if (error == 0) {
2840 /* Demux not blocked because of the stream, it is an incomplete frame */
2841 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2842 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau25919232019-01-03 14:48:18 +01002843 goto out; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002844 }
Willy Tarreau25919232019-01-03 14:48:18 +01002845
2846 /* Failed to decode this stream (e.g. too large request)
2847 * but the HPACK decompressor is still synchronized.
2848 */
2849 h2s = (struct h2s*)h2_error_stream;
2850 goto send_rst;
2851 }
2852
Willy Tarreau29268e92021-06-17 08:29:14 +02002853 TRACE_USER("rcvd H2 request ", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW, h2c->conn, 0, &rxbuf);
2854
Willy Tarreau198b5072022-05-12 09:08:51 +02002855 /* Now we cannot roll back and we won't come back here anymore for this
2856 * stream, this stream ID is open.
2857 */
2858 if (h2c->dsi > h2c->max_id)
2859 h2c->max_id = h2c->dsi;
2860
Willy Tarreau22de8d32018-09-05 19:55:58 +02002861 /* Note: we don't emit any other logs below because ff we return
Willy Tarreaua8e49542018-10-03 18:53:55 +02002862 * positively from h2c_frt_stream_new(), the stream will report the error,
2863 * and if we return in error, h2c_frt_stream_new() will emit the error.
Christopher Faulet7d013e72020-12-15 16:56:50 +01002864 *
2865 * Xfer the rxbuf to the stream. On success, the new stream owns the
2866 * rxbuf. On error, it is released here.
Willy Tarreau22de8d32018-09-05 19:55:58 +02002867 */
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02002868 h2s = h2c_frt_stream_new(h2c, h2c->dsi, &rxbuf, flags);
Willy Tarreau13278b42017-10-13 19:23:14 +02002869 if (!h2s) {
Willy Tarreau96a10c22018-12-23 18:30:44 +01002870 h2s = (struct h2s*)h2_refused_stream;
2871 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002872 }
2873
2874 h2s->st = H2_SS_OPEN;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002875 h2s->flags |= flags;
Willy Tarreau1915ca22019-01-24 11:49:37 +01002876 h2s->body_len = body_len;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002877
Willy Tarreau88d138e2019-01-02 19:38:14 +01002878 done:
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002879 if (h2c->dff & H2_F_HEADERS_END_STREAM)
Willy Tarreau13278b42017-10-13 19:23:14 +02002880 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002881
2882 if (h2s->flags & H2_SF_ES_RCVD) {
Willy Tarreaufc10f592019-01-30 19:28:32 +01002883 if (h2s->st == H2_SS_OPEN)
2884 h2s->st = H2_SS_HREM;
2885 else
2886 h2s_close(h2s);
Willy Tarreau13278b42017-10-13 19:23:14 +02002887 }
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002888 return h2s;
Willy Tarreau13278b42017-10-13 19:23:14 +02002889
2890 conn_err:
2891 h2c_error(h2c, error);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002892 goto out;
Willy Tarreau13278b42017-10-13 19:23:14 +02002893
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002894 out:
2895 h2_release_buf(h2c, &rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002896 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002897 return NULL;
Willy Tarreau96a10c22018-12-23 18:30:44 +01002898
2899 send_rst:
2900 /* make the demux send an RST for the current stream. We may only
2901 * do this if we're certain that the HEADERS frame was properly
2902 * decompressed so that the HPACK decoder is still kept up to date.
2903 */
2904 h2_release_buf(h2c, &rxbuf);
2905 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002906
Willy Tarreau022e5e52020-09-10 09:33:15 +02002907 TRACE_USER("rejected H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002908 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau96a10c22018-12-23 18:30:44 +01002909 return h2s;
Willy Tarreau13278b42017-10-13 19:23:14 +02002910}
2911
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002912/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2913 * It may return an error in h2c or h2s. Described in RFC7540#6.2. Most of the
2914 * errors here are reported as connection errors since it's impossible to
2915 * recover from such errors after the compression context has been altered.
2916 */
2917static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
2918{
Christopher Faulet6884aa32019-09-23 15:28:20 +02002919 struct buffer rxbuf = BUF_NULL;
2920 unsigned long long body_len = 0;
2921 uint32_t flags = 0;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002922 int error;
2923
Willy Tarreau7838a792019-08-12 18:42:03 +02002924 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2925
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002926 if (!b_size(&h2c->dbuf)) {
2927 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002928 goto fail; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002929 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002930
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002931 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2932 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002933 goto fail; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002934 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002935
Christopher Faulet6884aa32019-09-23 15:28:20 +02002936 if (h2s->st != H2_SS_CLOSED) {
Amaury Denoyelle74162742020-12-11 17:53:05 +01002937 error = h2c_decode_headers(h2c, &h2s->rxbuf, &h2s->flags, &h2s->body_len, h2s->upgrade_protocol);
Christopher Faulet6884aa32019-09-23 15:28:20 +02002938 }
2939 else {
2940 /* the connection was already killed by an RST, let's consume
2941 * the data and send another RST.
2942 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01002943 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Christopher Fauletea7a7782019-09-26 16:19:13 +02002944 h2s = (struct h2s*)h2_error_stream;
Christopher Faulet6884aa32019-09-23 15:28:20 +02002945 h2c->st0 = H2_CS_FRAME_E;
2946 goto send_rst;
2947 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002948
Willy Tarreau25919232019-01-03 14:48:18 +01002949 /* unrecoverable error ? */
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002950 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02002951 goto fail;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002952
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002953 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
2954 /* RFC7540#5.1 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002955 TRACE_ERROR("response HEADERS in invalid state", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002956 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
2957 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002958 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002959 goto fail;
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002960 }
2961
Willy Tarreau25919232019-01-03 14:48:18 +01002962 if (error <= 0) {
Christopher Faulet485da0b2021-10-08 08:56:00 +02002963 if (error == 0) {
2964 /* Demux not blocked because of the stream, it is an incomplete frame */
2965 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2966 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002967 goto fail; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002968 }
Willy Tarreau25919232019-01-03 14:48:18 +01002969
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002970 /* stream error : send RST_STREAM */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002971 TRACE_ERROR("couldn't decode response HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau25919232019-01-03 14:48:18 +01002972 h2s_error(h2s, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002973 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002974 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002975 goto fail;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002976 }
2977
Christopher Fauletfa922f02019-05-07 10:55:17 +02002978 if (h2c->dff & H2_F_HEADERS_END_STREAM)
Willy Tarreau45ffc0c2019-01-03 09:32:20 +01002979 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreau45ffc0c2019-01-03 09:32:20 +01002980
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02002981 if (h2s->endp->flags & CS_EP_ERROR && h2s->st < H2_SS_ERROR)
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002982 h2s->st = H2_SS_ERROR;
Christopher Fauletfa922f02019-05-07 10:55:17 +02002983 else if (h2s->flags & H2_SF_ES_RCVD) {
2984 if (h2s->st == H2_SS_OPEN)
2985 h2s->st = H2_SS_HREM;
2986 else if (h2s->st == H2_SS_HLOC)
2987 h2s_close(h2s);
2988 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002989
Christopher Fauletf95f8762021-01-22 11:59:07 +01002990 /* Unblock busy server h2s waiting for the response headers to validate
2991 * the tunnel establishment or the end of the response of an oborted
2992 * tunnel
2993 */
2994 if ((h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY)) == (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY) ||
2995 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
2996 TRACE_STATE("Unblock h2s blocked on tunnel establishment/abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
2997 h2s->flags &= ~H2_SF_BLK_MBUSY;
2998 }
2999
Willy Tarreau9abb3172021-06-16 18:32:42 +02003000 TRACE_USER("rcvd H2 response ", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, 0, &h2s->rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02003001 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003002 return h2s;
Willy Tarreau7838a792019-08-12 18:42:03 +02003003 fail:
3004 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
3005 return NULL;
Christopher Faulet6884aa32019-09-23 15:28:20 +02003006
3007 send_rst:
3008 /* make the demux send an RST for the current stream. We may only
3009 * do this if we're certain that the HEADERS frame was properly
3010 * decompressed so that the HPACK decoder is still kept up to date.
3011 */
3012 h2_release_buf(h2c, &rxbuf);
3013 h2c->st0 = H2_CS_FRAME_E;
3014
Willy Tarreau022e5e52020-09-10 09:33:15 +02003015 TRACE_USER("rejected H2 response", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Christopher Faulet6884aa32019-09-23 15:28:20 +02003016 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
3017 return h2s;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003018}
3019
Willy Tarreau454f9052017-10-26 19:40:35 +02003020/* processes a DATA frame. Returns > 0 on success or zero on missing data.
3021 * It may return an error in h2c or h2s. Described in RFC7540#6.1.
3022 */
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01003023static int h2c_handle_data(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02003024{
3025 int error;
3026
Willy Tarreau7838a792019-08-12 18:42:03 +02003027 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3028
Willy Tarreau454f9052017-10-26 19:40:35 +02003029 /* note that empty DATA frames are perfectly valid and sometimes used
3030 * to signal an end of stream (with the ES flag).
3031 */
3032
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003033 if (!b_size(&h2c->dbuf) && h2c->dfl) {
3034 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02003035 goto fail; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003036 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003037
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003038 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
3039 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02003040 goto fail; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003041 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003042
3043 /* now either the frame is complete or the buffer is complete */
3044
Willy Tarreau454f9052017-10-26 19:40:35 +02003045 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
3046 /* RFC7540#6.1 */
3047 error = H2_ERR_STREAM_CLOSED;
3048 goto strm_err;
3049 }
3050
Christopher Faulet4f09ec82019-06-19 09:25:58 +02003051 if ((h2s->flags & H2_SF_DATA_CLEN) && (h2c->dfl - h2c->dpl) > h2s->body_len) {
Willy Tarreau1915ca22019-01-24 11:49:37 +01003052 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003053 TRACE_ERROR("DATA frame larger than content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003054 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02003055 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003056 goto strm_err;
3057 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01003058 if (!(h2c->flags & H2_CF_IS_BACK) &&
3059 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT) &&
3060 ((h2c->dfl - h2c->dpl) || !(h2c->dff & H2_F_DATA_END_STREAM))) {
3061 /* a tunnel attempt was aborted but the client still try to send some raw data.
3062 * Thus the stream is closed with the CANCEL error. Here we take care it is not
3063 * an empty DATA Frame with the ES flag. The error is only handled if ES was
3064 * already sent to the client because depending on the scheduling, these data may
Ilya Shipitsinacf84592021-02-06 22:29:08 +05003065 * have been sent before the server response but not handle here.
Christopher Faulet91b21dc2021-01-22 12:13:15 +01003066 */
3067 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3068 error = H2_ERR_CANCEL;
3069 goto strm_err;
3070 }
Willy Tarreau1915ca22019-01-24 11:49:37 +01003071
Willy Tarreaua56a6de2018-02-26 15:59:07 +01003072 if (!h2_frt_transfer_data(h2s))
Willy Tarreau7838a792019-08-12 18:42:03 +02003073 goto fail;
Willy Tarreaua56a6de2018-02-26 15:59:07 +01003074
Willy Tarreau454f9052017-10-26 19:40:35 +02003075 /* call the upper layers to process the frame, then let the upper layer
3076 * notify the stream about any change.
3077 */
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02003078 if (!h2s->endp->cs) {
Willy Tarreau082c4572019-08-06 10:11:02 +02003079 /* The upper layer has already closed, this may happen on
3080 * 4xx/redirects during POST, or when receiving a response
3081 * from an H2 server after the client has aborted.
3082 */
3083 error = H2_ERR_CANCEL;
Willy Tarreau454f9052017-10-26 19:40:35 +02003084 goto strm_err;
3085 }
3086
Willy Tarreau8f650c32017-11-21 19:36:21 +01003087 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02003088 goto fail;
Willy Tarreau8f650c32017-11-21 19:36:21 +01003089
Willy Tarreau721c9742017-11-07 11:05:42 +01003090 if (h2s->st >= H2_SS_ERROR) {
Willy Tarreau454f9052017-10-26 19:40:35 +02003091 /* stream error : send RST_STREAM */
Willy Tarreaua20a5192017-12-27 11:02:06 +01003092 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau454f9052017-10-26 19:40:35 +02003093 }
3094
3095 /* check for completion : the callee will change this to FRAME_A or
3096 * FRAME_H once done.
3097 */
3098 if (h2c->st0 == H2_CS_FRAME_P)
Willy Tarreau7838a792019-08-12 18:42:03 +02003099 goto fail;
Willy Tarreau454f9052017-10-26 19:40:35 +02003100
Willy Tarreauc4134ba2017-12-11 18:45:08 +01003101 /* last frame */
3102 if (h2c->dff & H2_F_DATA_END_STREAM) {
Christopher Fauletfa922f02019-05-07 10:55:17 +02003103 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreaufc10f592019-01-30 19:28:32 +01003104 if (h2s->st == H2_SS_OPEN)
3105 h2s->st = H2_SS_HREM;
3106 else
3107 h2s_close(h2s);
3108
Willy Tarreau1915ca22019-01-24 11:49:37 +01003109 if (h2s->flags & H2_SF_DATA_CLEN && h2s->body_len) {
3110 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003111 TRACE_ERROR("ES on DATA frame before content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003112 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02003113 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003114 goto strm_err;
3115 }
Willy Tarreauc4134ba2017-12-11 18:45:08 +01003116 }
3117
Christopher Fauletf95f8762021-01-22 11:59:07 +01003118 /* Unblock busy server h2s waiting for the end of the response for an
3119 * aborted tunnel
3120 */
3121 if ((h2c->flags & H2_CF_IS_BACK) &&
3122 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
3123 TRACE_STATE("Unblock h2s blocked on tunnel abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3124 h2s->flags &= ~H2_SF_BLK_MBUSY;
3125 }
3126
Willy Tarreau7838a792019-08-12 18:42:03 +02003127 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003128 return 1;
3129
Willy Tarreau454f9052017-10-26 19:40:35 +02003130 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01003131 h2s_error(h2s, error);
3132 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02003133 fail:
3134 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003135 return 0;
3136}
3137
Willy Tarreau63864812019-08-07 14:25:20 +02003138/* check that the current frame described in h2c->{dsi,dft,dfl,dff,...} is
3139 * valid for the current stream state. This is needed only after parsing the
3140 * frame header but in practice it can be performed at any time during
3141 * H2_CS_FRAME_P since no state transition happens there. Returns >0 on success
3142 * or 0 in case of error, in which case either h2s or h2c will carry an error.
3143 */
3144static int h2_frame_check_vs_state(struct h2c *h2c, struct h2s *h2s)
3145{
Willy Tarreau7838a792019-08-12 18:42:03 +02003146 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
3147
Willy Tarreau63864812019-08-07 14:25:20 +02003148 if (h2s->st == H2_SS_IDLE &&
3149 h2c->dft != H2_FT_HEADERS && h2c->dft != H2_FT_PRIORITY) {
3150 /* RFC7540#5.1: any frame other than HEADERS or PRIORITY in
3151 * this state MUST be treated as a connection error
3152 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003153 TRACE_ERROR("invalid frame type for IDLE state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003154 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003155 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau63864812019-08-07 14:25:20 +02003156 /* only log if no other stream can report the error */
3157 sess_log(h2c->conn->owner);
3158 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003159 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02003160 TRACE_DEVEL("leaving in error (idle&!hdrs&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003161 return 0;
3162 }
3163
Willy Tarreau57a18162019-11-24 14:57:53 +01003164 if (h2s->st == H2_SS_IDLE && (h2c->flags & H2_CF_IS_BACK)) {
3165 /* only PUSH_PROMISE would be permitted here */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003166 TRACE_ERROR("invalid frame type for IDLE state (back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau57a18162019-11-24 14:57:53 +01003167 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02003168 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau57a18162019-11-24 14:57:53 +01003169 TRACE_DEVEL("leaving in error (idle&back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
3170 return 0;
3171 }
3172
Willy Tarreau63864812019-08-07 14:25:20 +02003173 if (h2s->st == H2_SS_HREM && h2c->dft != H2_FT_WINDOW_UPDATE &&
3174 h2c->dft != H2_FT_RST_STREAM && h2c->dft != H2_FT_PRIORITY) {
3175 /* RFC7540#5.1: any frame other than WU/PRIO/RST in
3176 * this state MUST be treated as a stream error.
3177 * 6.2, 6.6 and 6.10 further mandate that HEADERS/
3178 * PUSH_PROMISE/CONTINUATION cause connection errors.
3179 */
Amaury Denoyellea8879232020-10-27 17:16:03 +01003180 if (h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003181 TRACE_ERROR("invalid frame type for HREM state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003182 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02003183 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01003184 }
3185 else {
Willy Tarreau63864812019-08-07 14:25:20 +02003186 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
Amaury Denoyellea8879232020-10-27 17:16:03 +01003187 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003188 TRACE_DEVEL("leaving in error (hrem&!wu&!rst&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003189 return 0;
3190 }
3191
3192 /* Below the management of frames received in closed state is a
3193 * bit hackish because the spec makes strong differences between
3194 * streams closed by receiving RST, sending RST, and seeing ES
3195 * in both directions. In addition to this, the creation of a
3196 * new stream reusing the identifier of a closed one will be
3197 * detected here. Given that we cannot keep track of all closed
3198 * streams forever, we consider that unknown closed streams were
3199 * closed on RST received, which allows us to respond with an
3200 * RST without breaking the connection (eg: to abort a transfer).
3201 * Some frames have to be silently ignored as well.
3202 */
3203 if (h2s->st == H2_SS_CLOSED && h2c->dsi) {
3204 if (!(h2c->flags & H2_CF_IS_BACK) && h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
3205 /* #5.1.1: The identifier of a newly
3206 * established stream MUST be numerically
3207 * greater than all streams that the initiating
3208 * endpoint has opened or reserved. This
3209 * governs streams that are opened using a
3210 * HEADERS frame and streams that are reserved
3211 * using PUSH_PROMISE. An endpoint that
3212 * receives an unexpected stream identifier
3213 * MUST respond with a connection error.
3214 */
3215 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003216 TRACE_DEVEL("leaving in error (closed&hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003217 return 0;
3218 }
3219
Willy Tarreau4c08f122019-09-26 08:47:15 +02003220 if (h2s->flags & H2_SF_RST_RCVD &&
3221 !(h2_ft_bit(h2c->dft) & (H2_FT_HDR_MASK | H2_FT_RST_STREAM_BIT | H2_FT_PRIORITY_BIT | H2_FT_WINDOW_UPDATE_BIT))) {
Willy Tarreau63864812019-08-07 14:25:20 +02003222 /* RFC7540#5.1:closed: an endpoint that
3223 * receives any frame other than PRIORITY after
3224 * receiving a RST_STREAM MUST treat that as a
3225 * stream error of type STREAM_CLOSED.
3226 *
3227 * Note that old streams fall into this category
3228 * and will lead to an RST being sent.
3229 *
3230 * However, we cannot generalize this to all frame types. Those
3231 * carrying compression state must still be processed before
3232 * being dropped or we'll desynchronize the decoder. This can
3233 * happen with request trailers received after sending an
3234 * RST_STREAM, or with header/trailers responses received after
3235 * sending RST_STREAM (aborted stream).
Willy Tarreau4c08f122019-09-26 08:47:15 +02003236 *
3237 * In addition, since our CLOSED streams always carry the
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003238 * RST_RCVD bit, we don't want to accidentally catch valid
Willy Tarreau4c08f122019-09-26 08:47:15 +02003239 * frames for a closed stream, i.e. RST/PRIO/WU.
Willy Tarreau63864812019-08-07 14:25:20 +02003240 */
3241 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
3242 h2c->st0 = H2_CS_FRAME_E;
Christopher Faulet6884aa32019-09-23 15:28:20 +02003243 TRACE_DEVEL("leaving in error (rst_rcvd&!hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003244 return 0;
3245 }
3246
3247 /* RFC7540#5.1:closed: if this state is reached as a
3248 * result of sending a RST_STREAM frame, the peer that
3249 * receives the RST_STREAM might have already sent
3250 * frames on the stream that cannot be withdrawn. An
3251 * endpoint MUST ignore frames that it receives on
3252 * closed streams after it has sent a RST_STREAM
3253 * frame. An endpoint MAY choose to limit the period
3254 * over which it ignores frames and treat frames that
3255 * arrive after this time as being in error.
3256 */
3257 if (h2s->id && !(h2s->flags & H2_SF_RST_SENT)) {
3258 /* RFC7540#5.1:closed: any frame other than
3259 * PRIO/WU/RST in this state MUST be treated as
3260 * a connection error
3261 */
3262 if (h2c->dft != H2_FT_RST_STREAM &&
3263 h2c->dft != H2_FT_PRIORITY &&
3264 h2c->dft != H2_FT_WINDOW_UPDATE) {
3265 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003266 TRACE_DEVEL("leaving in error (rst_sent&!rst&!prio&!wu)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003267 return 0;
3268 }
3269 }
3270 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003271 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003272 return 1;
3273}
3274
Willy Tarreaubc933932017-10-09 16:21:43 +02003275/* process Rx frames to be demultiplexed */
3276static void h2_process_demux(struct h2c *h2c)
3277{
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003278 struct h2s *h2s = NULL, *tmp_h2s;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003279 struct h2_fh hdr;
3280 unsigned int padlen = 0;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003281 int32_t old_iw = h2c->miw;
Willy Tarreauf3ee0692017-10-17 08:18:25 +02003282
Willy Tarreau7838a792019-08-12 18:42:03 +02003283 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3284
Willy Tarreau081d4722017-05-16 21:51:05 +02003285 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02003286 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02003287
3288 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3289 if (h2c->st0 == H2_CS_PREFACE) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003290 TRACE_STATE("expecting preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02003291 if (h2c->flags & H2_CF_IS_BACK)
Willy Tarreau7838a792019-08-12 18:42:03 +02003292 goto out;
3293
Willy Tarreau52eed752017-09-22 15:05:09 +02003294 if (unlikely(h2c_frt_recv_preface(h2c) <= 0)) {
3295 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau22de8d32018-09-05 19:55:58 +02003296 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003297 TRACE_PROTO("failed to receive preface", H2_EV_RX_PREFACE|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003298 h2c->st0 = H2_CS_ERROR2;
Willy Tarreauee4684f2021-06-17 08:08:48 +02003299 if (b_data(&h2c->dbuf) ||
Christopher Faulet3f35da22021-07-26 10:18:35 +02003300 !(((const struct session *)h2c->conn->owner)->fe->options & (PR_O_NULLNOLOG|PR_O_IGNORE_PRB)))
Willy Tarreauee4684f2021-06-17 08:08:48 +02003301 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003302 }
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003303 goto done;
Willy Tarreau52eed752017-09-22 15:05:09 +02003304 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003305 TRACE_PROTO("received preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003306
3307 h2c->max_id = 0;
3308 h2c->st0 = H2_CS_SETTINGS1;
Willy Tarreau7838a792019-08-12 18:42:03 +02003309 TRACE_STATE("switching to SETTINGS1", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003310 }
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003311
3312 if (h2c->st0 == H2_CS_SETTINGS1) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003313 /* ensure that what is pending is a valid SETTINGS frame
3314 * without an ACK.
3315 */
Willy Tarreau7838a792019-08-12 18:42:03 +02003316 TRACE_STATE("expecting settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003317 if (!h2_get_frame_hdr(&h2c->dbuf, &hdr)) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003318 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003319 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau22de8d32018-09-05 19:55:58 +02003320 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003321 TRACE_ERROR("failed to receive settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003322 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003323 if (!(h2c->flags & H2_CF_IS_BACK))
3324 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003325 }
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003326 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003327 }
3328
3329 if (hdr.sid || hdr.ft != H2_FT_SETTINGS || hdr.ff & H2_F_SETTINGS_ACK) {
3330 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003331 TRACE_ERROR("unexpected frame type or flags", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003332 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
3333 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003334 if (!(h2c->flags & H2_CF_IS_BACK))
3335 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003336 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003337 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003338 }
3339
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003340 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003341 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003342 TRACE_ERROR("invalid settings frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003343 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
3344 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003345 if (!(h2c->flags & H2_CF_IS_BACK))
3346 sess_log(h2c->conn->owner);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003347 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003348 }
3349
Willy Tarreau3bf69182018-12-21 15:34:50 +01003350 /* that's OK, switch to FRAME_P to process it. This is
3351 * a SETTINGS frame whose header has already been
3352 * deleted above.
3353 */
Willy Tarreau54f46e52019-01-30 15:11:03 +01003354 padlen = 0;
Willy Tarreau4781b152021-04-06 13:53:36 +02003355 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003356 goto new_frame;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003357 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003358 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003359
3360 /* process as many incoming frames as possible below */
Willy Tarreau7838a792019-08-12 18:42:03 +02003361 while (1) {
Willy Tarreau7e98c052017-10-10 15:56:59 +02003362 int ret = 0;
3363
Willy Tarreau7838a792019-08-12 18:42:03 +02003364 if (!b_data(&h2c->dbuf)) {
3365 TRACE_DEVEL("no more Rx data", H2_EV_RX_FRAME, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003366 h2c->flags |= H2_CF_DEM_SHORT_READ;
3367 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003368 }
3369
3370 if (h2c->st0 >= H2_CS_ERROR) {
3371 TRACE_STATE("end of connection reported", H2_EV_RX_FRAME|H2_EV_RX_EOI, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003372 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003373 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003374
3375 if (h2c->st0 == H2_CS_FRAME_H) {
Willy Tarreau30d05f32019-08-06 15:49:51 +02003376 h2c->rcvd_s = 0;
3377
Willy Tarreau7838a792019-08-12 18:42:03 +02003378 TRACE_STATE("expecting H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003379 if (!h2_peek_frame_hdr(&h2c->dbuf, 0, &hdr)) {
3380 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003381 break;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003382 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003383
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003384 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003385 TRACE_ERROR("invalid H2 frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003386 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003387 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau22de8d32018-09-05 19:55:58 +02003388 /* only log if no other stream can report the error */
3389 sess_log(h2c->conn->owner);
3390 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003391 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003392 break;
3393 }
3394
Christopher Fauletdd2a5622019-06-18 12:22:38 +02003395 padlen = 0;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003396 if (h2_ft_bit(hdr.ft) & H2_FT_PADDED_MASK && hdr.ff & H2_F_PADDED) {
3397 /* If the frame is padded (HEADERS, PUSH_PROMISE or DATA),
3398 * we read the pad length and drop it from the remaining
3399 * payload (one byte + the 9 remaining ones = 10 total
3400 * removed), so we have a frame payload starting after the
3401 * pad len. Flow controlled frames (DATA) also count the
3402 * padlen in the flow control, so it must be adjusted.
3403 */
3404 if (hdr.len < 1) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003405 TRACE_ERROR("invalid H2 padded frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003406 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003407 if (!(h2c->flags & H2_CF_IS_BACK))
3408 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003409 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003410 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003411 }
3412 hdr.len--;
3413
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003414 if (b_data(&h2c->dbuf) < 10) {
3415 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003416 break; // missing padlen
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003417 }
Willy Tarreau3bf69182018-12-21 15:34:50 +01003418
3419 padlen = *(uint8_t *)b_peek(&h2c->dbuf, 9);
3420
3421 if (padlen > hdr.len) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003422 TRACE_ERROR("invalid H2 padding length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003423 /* RFC7540#6.1 : pad length = length of
3424 * frame payload or greater => error.
3425 */
3426 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003427 if (!(h2c->flags & H2_CF_IS_BACK))
3428 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003429 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003430 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003431 }
3432
3433 if (h2_ft_bit(hdr.ft) & H2_FT_FC_MASK) {
3434 h2c->rcvd_c++;
3435 h2c->rcvd_s++;
3436 }
3437 b_del(&h2c->dbuf, 1);
3438 }
3439 h2_skip_frame_hdr(&h2c->dbuf);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003440
3441 new_frame:
Willy Tarreau7e98c052017-10-10 15:56:59 +02003442 h2c->dfl = hdr.len;
3443 h2c->dsi = hdr.sid;
3444 h2c->dft = hdr.ft;
3445 h2c->dff = hdr.ff;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003446 h2c->dpl = padlen;
Willy Tarreau73db4342019-09-25 07:28:44 +02003447 TRACE_STATE("rcvd H2 frame header, switching to FRAME_P state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003448 h2c->st0 = H2_CS_FRAME_P;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003449
3450 /* check for minimum basic frame format validity */
3451 ret = h2_frame_check(h2c->dft, 1, h2c->dsi, h2c->dfl, global.tune.bufsize);
3452 if (ret != H2_ERR_NO_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003453 TRACE_ERROR("received invalid H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003454 h2c_error(h2c, ret);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003455 if (!(h2c->flags & H2_CF_IS_BACK))
3456 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003457 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003458 goto done;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003459 }
Willy Tarreau15a47332022-03-18 15:57:34 +01003460
3461 /* transition to HEADERS frame ends the keep-alive idle
3462 * timer and starts the http-request idle delay.
3463 */
3464 if (hdr.ft == H2_FT_HEADERS)
3465 h2c->idle_start = now_ms;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003466 }
3467
Willy Tarreau9fd5aa82019-08-06 15:21:45 +02003468 /* Only H2_CS_FRAME_P, H2_CS_FRAME_A and H2_CS_FRAME_E here.
3469 * H2_CS_FRAME_P indicates an incomplete previous operation
3470 * (most often the first attempt) and requires some validity
3471 * checks for the frame and the current state. The two other
3472 * ones are set after completion (or abortion) and must skip
3473 * validity checks.
3474 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003475 tmp_h2s = h2c_st_by_id(h2c, h2c->dsi);
3476
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02003477 if (tmp_h2s != h2s && h2s && h2s->endp->cs &&
Willy Tarreau567beb82018-12-18 16:52:44 +01003478 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003479 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003480 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003481 (h2s->flags & H2_SF_ES_RCVD) ||
Christopher Fauletb041b232022-03-24 10:27:02 +01003482 (h2s->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003483 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003484 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_STRM_WAKE, h2c->conn, h2s);
Christopher Fauletb041b232022-03-24 10:27:02 +01003485 h2s->endp->flags |= CS_EP_RCV_MORE;
Willy Tarreau7e094452018-12-19 18:08:52 +01003486 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003487 }
3488 h2s = tmp_h2s;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003489
Willy Tarreau63864812019-08-07 14:25:20 +02003490 if (h2c->st0 == H2_CS_FRAME_E ||
Willy Tarreau7838a792019-08-12 18:42:03 +02003491 (h2c->st0 == H2_CS_FRAME_P && !h2_frame_check_vs_state(h2c, h2s))) {
3492 TRACE_PROTO("stream error reported", H2_EV_RX_FRAME|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003493 goto strm_err;
Willy Tarreau7838a792019-08-12 18:42:03 +02003494 }
Willy Tarreauc0da1962017-10-30 18:38:00 +01003495
Willy Tarreau7e98c052017-10-10 15:56:59 +02003496 switch (h2c->dft) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02003497 case H2_FT_SETTINGS:
Willy Tarreau7838a792019-08-12 18:42:03 +02003498 if (h2c->st0 == H2_CS_FRAME_P) {
3499 TRACE_PROTO("receiving H2 SETTINGS frame", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003500 ret = h2c_handle_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003501 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003502 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003503
Willy Tarreau7838a792019-08-12 18:42:03 +02003504 if (h2c->st0 == H2_CS_FRAME_A) {
3505 TRACE_PROTO("sending H2 SETTINGS ACK frame", H2_EV_TX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003506 ret = h2c_ack_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003507 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02003508 break;
3509
Willy Tarreaucf68c782017-10-10 17:11:41 +02003510 case H2_FT_PING:
Willy Tarreau7838a792019-08-12 18:42:03 +02003511 if (h2c->st0 == H2_CS_FRAME_P) {
3512 TRACE_PROTO("receiving H2 PING frame", H2_EV_RX_FRAME|H2_EV_RX_PING, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003513 ret = h2c_handle_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003514 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003515
Willy Tarreau7838a792019-08-12 18:42:03 +02003516 if (h2c->st0 == H2_CS_FRAME_A) {
3517 TRACE_PROTO("sending H2 PING ACK frame", H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003518 ret = h2c_ack_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003519 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003520 break;
3521
Willy Tarreau26f95952017-07-27 17:18:30 +02003522 case H2_FT_WINDOW_UPDATE:
Willy Tarreau7838a792019-08-12 18:42:03 +02003523 if (h2c->st0 == H2_CS_FRAME_P) {
3524 TRACE_PROTO("receiving H2 WINDOW_UPDATE frame", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02003525 ret = h2c_handle_window_update(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003526 }
Willy Tarreau26f95952017-07-27 17:18:30 +02003527 break;
3528
Willy Tarreau61290ec2017-10-17 08:19:21 +02003529 case H2_FT_CONTINUATION:
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003530 /* RFC7540#6.10: CONTINUATION may only be preceded by
Willy Tarreauea18f862018-12-22 20:19:26 +01003531 * a HEADERS/PUSH_PROMISE/CONTINUATION frame. These
3532 * frames' parsers consume all following CONTINUATION
3533 * frames so this one is out of sequence.
Willy Tarreau61290ec2017-10-17 08:19:21 +02003534 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003535 TRACE_ERROR("received unexpected H2 CONTINUATION frame", H2_EV_RX_FRAME|H2_EV_RX_CONT|H2_EV_H2C_ERR, h2c->conn, h2s);
Willy Tarreauea18f862018-12-22 20:19:26 +01003536 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003537 if (!(h2c->flags & H2_CF_IS_BACK))
3538 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003539 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003540 goto done;
Willy Tarreau61290ec2017-10-17 08:19:21 +02003541
Willy Tarreau13278b42017-10-13 19:23:14 +02003542 case H2_FT_HEADERS:
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003543 if (h2c->st0 == H2_CS_FRAME_P) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003544 TRACE_PROTO("receiving H2 HEADERS frame", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003545 if (h2c->flags & H2_CF_IS_BACK)
3546 tmp_h2s = h2c_bck_handle_headers(h2c, h2s);
3547 else
3548 tmp_h2s = h2c_frt_handle_headers(h2c, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003549 if (tmp_h2s) {
3550 h2s = tmp_h2s;
3551 ret = 1;
3552 }
3553 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003554 HA_ATOMIC_INC(&h2c->px_counters->headers_rcvd);
Willy Tarreau13278b42017-10-13 19:23:14 +02003555 break;
3556
Willy Tarreau454f9052017-10-26 19:40:35 +02003557 case H2_FT_DATA:
Willy Tarreau7838a792019-08-12 18:42:03 +02003558 if (h2c->st0 == H2_CS_FRAME_P) {
3559 TRACE_PROTO("receiving H2 DATA frame", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01003560 ret = h2c_handle_data(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003561 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003562 HA_ATOMIC_INC(&h2c->px_counters->data_rcvd);
Willy Tarreau454f9052017-10-26 19:40:35 +02003563
Willy Tarreau7838a792019-08-12 18:42:03 +02003564 if (h2c->st0 == H2_CS_FRAME_A) {
3565 TRACE_PROTO("sending stream WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003566 ret = h2c_send_strm_wu(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003567 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003568 break;
Willy Tarreaucd234e92017-08-18 10:59:39 +02003569
Willy Tarreau92153fc2017-12-03 19:46:19 +01003570 case H2_FT_PRIORITY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003571 if (h2c->st0 == H2_CS_FRAME_P) {
3572 TRACE_PROTO("receiving H2 PRIORITY frame", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn, h2s);
Willy Tarreau92153fc2017-12-03 19:46:19 +01003573 ret = h2c_handle_priority(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003574 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01003575 break;
3576
Willy Tarreaucd234e92017-08-18 10:59:39 +02003577 case H2_FT_RST_STREAM:
Willy Tarreau7838a792019-08-12 18:42:03 +02003578 if (h2c->st0 == H2_CS_FRAME_P) {
3579 TRACE_PROTO("receiving H2 RST_STREAM frame", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003580 ret = h2c_handle_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003581 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003582 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_rcvd);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003583 break;
3584
Willy Tarreaue96b0922017-10-30 00:28:29 +01003585 case H2_FT_GOAWAY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003586 if (h2c->st0 == H2_CS_FRAME_P) {
3587 TRACE_PROTO("receiving H2 GOAWAY frame", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn, h2s);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003588 ret = h2c_handle_goaway(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003589 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003590 HA_ATOMIC_INC(&h2c->px_counters->goaway_rcvd);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003591 break;
3592
Willy Tarreau1c661982017-10-30 13:52:01 +01003593 /* implement all extra frame types here */
Willy Tarreau7e98c052017-10-10 15:56:59 +02003594 default:
Willy Tarreau7838a792019-08-12 18:42:03 +02003595 TRACE_PROTO("receiving H2 ignored frame", H2_EV_RX_FRAME, h2c->conn, h2s);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003596 /* drop frames that we ignore. They may be larger than
3597 * the buffer so we drain all of their contents until
3598 * we reach the end.
3599 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003600 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3601 b_del(&h2c->dbuf, ret);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003602 h2c->dfl -= ret;
3603 ret = h2c->dfl == 0;
3604 }
3605
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003606 strm_err:
Willy Tarreaua20a5192017-12-27 11:02:06 +01003607 /* We may have to send an RST if not done yet */
Willy Tarreau7838a792019-08-12 18:42:03 +02003608 if (h2s->st == H2_SS_ERROR) {
3609 TRACE_STATE("stream error, switching to FRAME_E", H2_EV_RX_FRAME|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003610 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02003611 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003612
Willy Tarreau7838a792019-08-12 18:42:03 +02003613 if (h2c->st0 == H2_CS_FRAME_E) {
3614 TRACE_PROTO("sending H2 RST_STREAM frame", H2_EV_TX_FRAME|H2_EV_TX_RST|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003615 ret = h2c_send_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003616 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003617
Willy Tarreau7e98c052017-10-10 15:56:59 +02003618 /* error or missing data condition met above ? */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003619 if (ret <= 0)
Willy Tarreau7e98c052017-10-10 15:56:59 +02003620 break;
3621
3622 if (h2c->st0 != H2_CS_FRAME_H) {
Willy Tarreaubba7a4d2020-09-18 07:41:28 +02003623 if (h2c->dfl)
3624 TRACE_DEVEL("skipping remaining frame payload", H2_EV_RX_FRAME, h2c->conn, h2s);
Christopher Faulet5112a602019-09-26 16:38:28 +02003625 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3626 b_del(&h2c->dbuf, ret);
3627 h2c->dfl -= ret;
3628 if (!h2c->dfl) {
3629 TRACE_STATE("switching to FRAME_H", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
3630 h2c->st0 = H2_CS_FRAME_H;
3631 h2c->dsi = -1;
3632 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003633 }
3634 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003635
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003636 if (h2c->rcvd_c > 0 &&
Willy Tarreau7838a792019-08-12 18:42:03 +02003637 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MBUSY | H2_CF_DEM_MROOM))) {
3638 TRACE_PROTO("sending H2 WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003639 h2c_send_conn_wu(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003640 }
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003641
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003642 done:
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003643 if (h2c->st0 >= H2_CS_ERROR || (h2c->flags & H2_CF_DEM_SHORT_READ)) {
3644 if (h2c->flags & H2_CF_RCVD_SHUT)
3645 h2c->flags |= H2_CF_END_REACHED;
3646 }
3647
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02003648 if (h2s && h2s->endp->cs &&
Willy Tarreau567beb82018-12-18 16:52:44 +01003649 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003650 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003651 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003652 (h2s->flags & H2_SF_ES_RCVD) ||
Christopher Fauletb041b232022-03-24 10:27:02 +01003653 (h2s->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003654 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003655 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn, h2s);
Christopher Fauletb041b232022-03-24 10:27:02 +01003656 h2s->endp->flags |= CS_EP_RCV_MORE;
Willy Tarreau7e094452018-12-19 18:08:52 +01003657 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003658 }
Willy Tarreau1ed87b72018-11-25 08:45:16 +01003659
Willy Tarreau7838a792019-08-12 18:42:03 +02003660 if (old_iw != h2c->miw) {
3661 TRACE_STATE("notifying streams about SFCTL increase", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003662 h2c_unblock_sfctl(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003663 }
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003664
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02003665 h2c_restart_reading(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02003666 out:
3667 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003668 return;
Willy Tarreaubc933932017-10-09 16:21:43 +02003669}
3670
Willy Tarreau989539b2020-01-10 17:01:29 +01003671/* resume each h2s eligible for sending in list head <head> */
3672static void h2_resume_each_sending_h2s(struct h2c *h2c, struct list *head)
3673{
3674 struct h2s *h2s, *h2s_back;
3675
3676 TRACE_ENTER(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3677
3678 list_for_each_entry_safe(h2s, h2s_back, head, list) {
3679 if (h2c->mws <= 0 ||
3680 h2c->flags & H2_CF_MUX_BLOCK_ANY ||
3681 h2c->st0 >= H2_CS_ERROR)
3682 break;
3683
3684 h2s->flags &= ~H2_SF_BLK_ANY;
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003685
Willy Tarreaud9464162020-01-10 18:25:07 +01003686 if (h2s->flags & H2_SF_NOTIFIED)
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003687 continue;
3688
Willy Tarreau5723f292020-01-10 15:16:57 +01003689 /* If the sender changed his mind and unsubscribed, let's just
3690 * remove the stream from the send_list.
Willy Tarreau989539b2020-01-10 17:01:29 +01003691 */
Willy Tarreauf96508a2020-01-10 11:12:48 +01003692 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) &&
3693 (!h2s->subs || !(h2s->subs->events & SUB_RETRY_SEND))) {
Willy Tarreau989539b2020-01-10 17:01:29 +01003694 LIST_DEL_INIT(&h2s->list);
3695 continue;
3696 }
3697
Willy Tarreauf96508a2020-01-10 11:12:48 +01003698 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau5723f292020-01-10 15:16:57 +01003699 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01003700 tasklet_wakeup(h2s->subs->tasklet);
3701 h2s->subs->events &= ~SUB_RETRY_SEND;
3702 if (!h2s->subs->events)
3703 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01003704 }
3705 else if (h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) {
3706 tasklet_wakeup(h2s->shut_tl);
3707 }
Willy Tarreau989539b2020-01-10 17:01:29 +01003708 }
3709
3710 TRACE_LEAVE(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3711}
3712
Willy Tarreaubc933932017-10-09 16:21:43 +02003713/* process Tx frames from streams to be multiplexed. Returns > 0 if it reached
3714 * the end.
3715 */
3716static int h2_process_mux(struct h2c *h2c)
3717{
Willy Tarreau7838a792019-08-12 18:42:03 +02003718 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3719
Willy Tarreau01b44822018-10-03 14:26:37 +02003720 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3721 if (unlikely(h2c->st0 == H2_CS_PREFACE && (h2c->flags & H2_CF_IS_BACK))) {
3722 if (unlikely(h2c_bck_send_preface(h2c) <= 0)) {
3723 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003724 if (h2c->st0 == H2_CS_ERROR)
Willy Tarreau01b44822018-10-03 14:26:37 +02003725 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau01b44822018-10-03 14:26:37 +02003726 goto fail;
3727 }
3728 h2c->st0 = H2_CS_SETTINGS1;
3729 }
3730 /* need to wait for the other side */
Willy Tarreau75a930a2018-12-12 08:03:58 +01003731 if (h2c->st0 < H2_CS_FRAME_H)
Willy Tarreau7838a792019-08-12 18:42:03 +02003732 goto done;
Willy Tarreau01b44822018-10-03 14:26:37 +02003733 }
3734
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003735 /* start by sending possibly pending window updates */
Willy Tarreaue74679a2019-08-06 15:39:32 +02003736 if (h2c->rcvd_s > 0 &&
3737 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3738 h2c_send_strm_wu(h2c) < 0)
3739 goto fail;
3740
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003741 if (h2c->rcvd_c > 0 &&
3742 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3743 h2c_send_conn_wu(h2c) < 0)
3744 goto fail;
3745
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003746 /* First we always process the flow control list because the streams
3747 * waiting there were already elected for immediate emission but were
3748 * blocked just on this.
3749 */
Willy Tarreau989539b2020-01-10 17:01:29 +01003750 h2_resume_each_sending_h2s(h2c, &h2c->fctl_list);
3751 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003752
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003753 fail:
Willy Tarreau3eabe9b2017-11-07 11:03:01 +01003754 if (unlikely(h2c->st0 >= H2_CS_ERROR)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02003755 if (h2c->st0 == H2_CS_ERROR) {
3756 if (h2c->max_id >= 0) {
3757 h2c_send_goaway_error(h2c, NULL);
3758 if (h2c->flags & H2_CF_MUX_BLOCK_ANY)
Willy Tarreau7838a792019-08-12 18:42:03 +02003759 goto out0;
Willy Tarreau081d4722017-05-16 21:51:05 +02003760 }
3761
3762 h2c->st0 = H2_CS_ERROR2; // sent (or failed hard) !
3763 }
Willy Tarreau081d4722017-05-16 21:51:05 +02003764 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003765 done:
3766 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
3767 return 1;
3768 out0:
3769 TRACE_DEVEL("leaving in blocked situation", H2_EV_H2C_WAKE, h2c->conn);
3770 return 0;
Willy Tarreaubc933932017-10-09 16:21:43 +02003771}
3772
Willy Tarreau62f52692017-10-08 23:01:42 +02003773
Willy Tarreau479998a2018-11-18 06:30:59 +01003774/* Attempt to read data, and subscribe if none available.
3775 * The function returns 1 if data has been received, otherwise zero.
3776 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003777static int h2_recv(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003778{
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003779 struct connection *conn = h2c->conn;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003780 struct buffer *buf;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003781 int max;
Olivier Houchard7505f942018-08-21 18:10:44 +02003782 size_t ret;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003783
Willy Tarreau7838a792019-08-12 18:42:03 +02003784 TRACE_ENTER(H2_EV_H2C_RECV, h2c->conn);
3785
3786 if (h2c->wait_event.events & SUB_RETRY_RECV) {
3787 TRACE_DEVEL("leaving on sub_recv", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003788 return (b_data(&h2c->dbuf));
Willy Tarreau7838a792019-08-12 18:42:03 +02003789 }
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003790
Willy Tarreau7838a792019-08-12 18:42:03 +02003791 if (!h2_recv_allowed(h2c)) {
3792 TRACE_DEVEL("leaving on !recv_allowed", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003793 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02003794 }
Willy Tarreaua2af5122017-10-09 11:56:46 +02003795
Willy Tarreau44e973f2018-03-01 17:49:30 +01003796 buf = h2_get_buf(h2c, &h2c->dbuf);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003797 if (!buf) {
3798 h2c->flags |= H2_CF_DEM_DALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02003799 TRACE_DEVEL("leaving on !alloc", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003800 return 0;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003801 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003802
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003803 if (h2c->flags & H2_CF_RCVD_SHUT) {
3804 TRACE_DEVEL("leaving on rcvd_shut", H2_EV_H2C_RECV, h2c->conn);
Willy Tarreau3a8bbcc2021-11-19 11:41:10 +01003805 return 1;
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003806 }
3807
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003808 if (!b_data(buf)) {
3809 /* try to pre-align the buffer like the
3810 * rxbufs will be to optimize memory copies. We'll make
3811 * sure that the frame header lands at the end of the
3812 * HTX block to alias it upon recv. We cannot use the
3813 * head because rcv_buf() will realign the buffer if
3814 * it's empty. Thus we cheat and pretend we already
3815 * have a few bytes there.
3816 */
3817 max = buf_room_for_htx_data(buf) + 9;
3818 buf->head = sizeof(struct htx) - 9;
3819 }
3820 else
3821 max = b_room(buf);
Willy Tarreau2a59e872018-12-12 08:23:47 +01003822
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003823 ret = max ? conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, max, 0) : 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003824
Christopher Fauletde9d6052021-04-23 12:25:18 +02003825 if (max && !ret && h2_recv_allowed(h2c)) {
3826 TRACE_DATA("failed to receive data, subscribing", H2_EV_H2C_RECV, h2c->conn);
3827 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h2c->wait_event);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003828 } else if (ret) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02003829 TRACE_DATA("received data", H2_EV_H2C_RECV, h2c->conn, 0, 0, (void*)(long)ret);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003830 h2c->flags &= ~H2_CF_DEM_SHORT_READ;
3831 }
Olivier Houchard81a15af2018-10-19 17:26:49 +02003832
Christopher Fauletde9d6052021-04-23 12:25:18 +02003833 if (conn_xprt_read0_pending(h2c->conn)) {
3834 TRACE_DATA("received read0", H2_EV_H2C_RECV, h2c->conn);
3835 h2c->flags |= H2_CF_RCVD_SHUT;
3836 }
3837
Olivier Houcharda1411e62018-08-17 18:42:48 +02003838 if (!b_data(buf)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +01003839 h2_release_buf(h2c, &h2c->dbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02003840 TRACE_LEAVE(H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard46677732018-11-29 17:06:17 +01003841 return (conn->flags & CO_FL_ERROR || conn_xprt_read0_pending(conn));
Willy Tarreaua2af5122017-10-09 11:56:46 +02003842 }
3843
Willy Tarreau7838a792019-08-12 18:42:03 +02003844 if (b_data(buf) == buf->size) {
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003845 h2c->flags |= H2_CF_DEM_DFULL;
Willy Tarreau35fb8462019-10-02 11:05:46 +02003846 TRACE_STATE("demux buffer full", H2_EV_H2C_RECV|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau7838a792019-08-12 18:42:03 +02003847 }
3848
3849 TRACE_LEAVE(H2_EV_H2C_RECV, h2c->conn);
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003850 return !!ret || (conn->flags & CO_FL_ERROR) || conn_xprt_read0_pending(conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02003851}
3852
Willy Tarreau479998a2018-11-18 06:30:59 +01003853/* Try to send data if possible.
3854 * The function returns 1 if data have been sent, otherwise zero.
3855 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003856static int h2_send(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003857{
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003858 struct connection *conn = h2c->conn;
Willy Tarreaubc933932017-10-09 16:21:43 +02003859 int done;
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003860 int sent = 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003861
Willy Tarreau7838a792019-08-12 18:42:03 +02003862 TRACE_ENTER(H2_EV_H2C_SEND, h2c->conn);
Willy Tarreaua2af5122017-10-09 11:56:46 +02003863
Willy Tarreau7838a792019-08-12 18:42:03 +02003864 if (conn->flags & CO_FL_ERROR) {
3865 TRACE_DEVEL("leaving on error", H2_EV_H2C_SEND, h2c->conn);
3866 return 1;
3867 }
Olivier Houchard7505f942018-08-21 18:10:44 +02003868
Willy Tarreau911db9b2020-01-23 16:27:54 +01003869 if (conn->flags & CO_FL_WAIT_XPRT) {
Willy Tarreaua2af5122017-10-09 11:56:46 +02003870 /* a handshake was requested */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003871 goto schedule;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003872 }
3873
Willy Tarreaubc933932017-10-09 16:21:43 +02003874 /* This loop is quite simple : it tries to fill as much as it can from
3875 * pending streams into the existing buffer until it's reportedly full
3876 * or the end of send requests is reached. Then it tries to send this
3877 * buffer's contents out, marks it not full if at least one byte could
3878 * be sent, and tries again.
3879 *
3880 * The snd_buf() function normally takes a "flags" argument which may
3881 * be made of a combination of CO_SFL_MSG_MORE to indicate that more
3882 * data immediately comes and CO_SFL_STREAMER to indicate that the
3883 * connection is streaming lots of data (used to increase TLS record
3884 * size at the expense of latency). The former can be sent any time
3885 * there's a buffer full flag, as it indicates at least one stream
3886 * attempted to send and failed so there are pending data. An
3887 * alternative would be to set it as long as there's an active stream
3888 * but that would be problematic for ACKs until we have an absolute
3889 * guarantee that all waiters have at least one byte to send. The
3890 * latter should possibly not be set for now.
3891 */
3892
3893 done = 0;
3894 while (!done) {
3895 unsigned int flags = 0;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003896 unsigned int released = 0;
3897 struct buffer *buf;
Willy Tarreaubc933932017-10-09 16:21:43 +02003898
3899 /* fill as much as we can into the current buffer */
3900 while (((h2c->flags & (H2_CF_MUX_MFULL|H2_CF_MUX_MALLOC)) == 0) && !done)
3901 done = h2_process_mux(h2c);
3902
Olivier Houchard2b094432019-01-29 18:28:36 +01003903 if (h2c->flags & H2_CF_MUX_MALLOC)
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003904 done = 1; // we won't go further without extra buffers
Olivier Houchard2b094432019-01-29 18:28:36 +01003905
Christopher Faulet9a3d3fc2020-10-22 16:24:58 +02003906 if ((conn->flags & (CO_FL_SOCK_WR_SH|CO_FL_ERROR)) ||
Willy Tarreaue6dc7a02021-10-21 17:30:06 +02003907 (h2c->flags & H2_CF_GOAWAY_FAILED))
Willy Tarreaubc933932017-10-09 16:21:43 +02003908 break;
3909
3910 if (h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MBUSY | H2_CF_DEM_MROOM))
3911 flags |= CO_SFL_MSG_MORE;
3912
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003913 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
3914 if (b_data(buf)) {
3915 int ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, b_data(buf), flags);
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003916 if (!ret) {
3917 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003918 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003919 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003920 sent = 1;
Willy Tarreau022e5e52020-09-10 09:33:15 +02003921 TRACE_DATA("sent data", H2_EV_H2C_SEND, h2c->conn, 0, buf, (void*)(long)ret);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003922 b_del(buf, ret);
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003923 if (b_data(buf)) {
3924 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003925 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003926 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003927 }
3928 b_free(buf);
3929 released++;
Willy Tarreau787db9a2018-06-14 18:31:46 +02003930 }
Willy Tarreaubc933932017-10-09 16:21:43 +02003931
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003932 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01003933 offer_buffers(NULL, released);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003934
Willy Tarreaubc933932017-10-09 16:21:43 +02003935 /* wrote at least one byte, the buffer is not full anymore */
Christopher Faulet69fe5ce2019-10-24 10:31:01 +02003936 if (sent)
3937 h2c->flags &= ~(H2_CF_MUX_MFULL | H2_CF_DEM_MROOM);
Willy Tarreaubc933932017-10-09 16:21:43 +02003938 }
3939
Willy Tarreaua2af5122017-10-09 11:56:46 +02003940 if (conn->flags & CO_FL_SOCK_WR_SH) {
3941 /* output closed, nothing to send, clear the buffer to release it */
Willy Tarreau51330962019-05-26 09:38:07 +02003942 b_reset(br_tail(h2c->mbuf));
Willy Tarreaua2af5122017-10-09 11:56:46 +02003943 }
Olivier Houchard6ff20392018-07-17 18:46:31 +02003944 /* We're not full anymore, so we can wake any task that are waiting
3945 * for us.
3946 */
Willy Tarreau989539b2020-01-10 17:01:29 +01003947 if (!(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM)) && h2c->st0 >= H2_CS_FRAME_H)
3948 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Olivier Houchardd360ac62019-03-22 17:37:16 +01003949
Olivier Houchard910b2bc2018-07-17 18:49:38 +02003950 /* We're done, no more to send */
Willy Tarreau7838a792019-08-12 18:42:03 +02003951 if (!br_data(h2c->mbuf)) {
3952 TRACE_DEVEL("leaving with everything sent", H2_EV_H2C_SEND, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003953 return sent;
Willy Tarreau7838a792019-08-12 18:42:03 +02003954 }
Olivier Houchard910b2bc2018-07-17 18:49:38 +02003955schedule:
Willy Tarreau7838a792019-08-12 18:42:03 +02003956 if (!(conn->flags & CO_FL_ERROR) && !(h2c->wait_event.events & SUB_RETRY_SEND)) {
3957 TRACE_STATE("more data to send, subscribing", H2_EV_H2C_SEND, h2c->conn);
Olivier Houcharde179d0e2019-03-21 18:27:17 +01003958 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &h2c->wait_event);
Willy Tarreau7838a792019-08-12 18:42:03 +02003959 }
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003960
Willy Tarreau7838a792019-08-12 18:42:03 +02003961 TRACE_DEVEL("leaving with some data left to send", H2_EV_H2C_SEND, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003962 return sent;
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003963}
3964
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02003965/* this is the tasklet referenced in h2c->wait_event.tasklet */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003966struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003967{
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003968 struct connection *conn;
3969 struct tasklet *tl = (struct tasklet *)t;
3970 int conn_in_list;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003971 struct h2c *h2c = ctx;
Olivier Houchard7505f942018-08-21 18:10:44 +02003972 int ret = 0;
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003973
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003974 if (state & TASK_F_USR1) {
3975 /* the tasklet was idling on an idle connection, it might have
3976 * been stolen, let's be careful!
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003977 */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003978 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3979 if (t->context == NULL) {
3980 /* The connection has been taken over by another thread,
3981 * we're no longer responsible for it, so just free the
3982 * tasklet, and do nothing.
3983 */
3984 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3985 tasklet_free(tl);
Willy Tarreau74163142021-03-13 11:30:19 +01003986 t = NULL;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003987 goto leave;
3988 }
3989 conn = h2c->conn;
3990 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003991
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003992 conn_in_list = conn->flags & CO_FL_LIST_MASK;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003993
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003994 /* Remove the connection from the list, to be sure nobody attempts
3995 * to use it while we handle the I/O events
3996 */
3997 if (conn_in_list)
3998 conn_delete_from_tree(&conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003999
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004000 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4001 } else {
4002 /* we're certain the connection was not in an idle list */
4003 conn = h2c->conn;
4004 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4005 conn_in_list = 0;
4006 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004007
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004008 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Olivier Houchard7505f942018-08-21 18:10:44 +02004009 ret = h2_send(h2c);
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004010 if (!(h2c->wait_event.events & SUB_RETRY_RECV))
Olivier Houchard7505f942018-08-21 18:10:44 +02004011 ret |= h2_recv(h2c);
Willy Tarreaucef5c8e2018-12-18 10:29:54 +01004012 if (ret || b_data(&h2c->dbuf))
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004013 ret = h2_process(h2c);
4014
4015 /* If we were in an idle list, we want to add it back into it,
4016 * unless h2_process() returned -1, which mean it has destroyed
4017 * the connection (testing !ret is enough, if h2_process() wasn't
4018 * called then ret will be 0 anyway.
4019 */
Willy Tarreau74163142021-03-13 11:30:19 +01004020 if (ret < 0)
4021 t = NULL;
4022
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004023 if (!ret && conn_in_list) {
4024 struct server *srv = objt_server(conn->target);
4025
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004026 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004027 if (conn_in_list == CO_FL_SAFE_LIST)
Willy Tarreau430bf4a2021-03-04 09:45:32 +01004028 ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004029 else
Willy Tarreau430bf4a2021-03-04 09:45:32 +01004030 ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004031 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004032 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004033
Willy Tarreau38468772020-06-28 00:31:13 +02004034leave:
Willy Tarreau7838a792019-08-12 18:42:03 +02004035 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreau74163142021-03-13 11:30:19 +01004036 return t;
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004037}
Willy Tarreaua2af5122017-10-09 11:56:46 +02004038
Willy Tarreau62f52692017-10-08 23:01:42 +02004039/* callback called on any event by the connection handler.
4040 * It applies changes and returns zero, or < 0 if it wants immediate
4041 * destruction of the connection (which normally doesn not happen in h2).
4042 */
Olivier Houchard7505f942018-08-21 18:10:44 +02004043static int h2_process(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02004044{
Olivier Houchard7505f942018-08-21 18:10:44 +02004045 struct connection *conn = h2c->conn;
Willy Tarreaua2af5122017-10-09 11:56:46 +02004046
Willy Tarreau7838a792019-08-12 18:42:03 +02004047 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4048
Willy Tarreauf0961222021-02-05 11:41:46 +01004049 if (!(h2c->flags & H2_CF_DEM_BLOCK_ANY) &&
4050 (b_data(&h2c->dbuf) || (h2c->flags & H2_CF_RCVD_SHUT))) {
Willy Tarreaud13bf272017-12-14 10:34:52 +01004051 h2_process_demux(h2c);
4052
4053 if (h2c->st0 >= H2_CS_ERROR || conn->flags & CO_FL_ERROR)
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004054 b_reset(&h2c->dbuf);
Willy Tarreaud13bf272017-12-14 10:34:52 +01004055
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004056 if (!b_full(&h2c->dbuf))
Willy Tarreaud13bf272017-12-14 10:34:52 +01004057 h2c->flags &= ~H2_CF_DEM_DFULL;
4058 }
Olivier Houchard7505f942018-08-21 18:10:44 +02004059 h2_send(h2c);
Willy Tarreaud13bf272017-12-14 10:34:52 +01004060
Christopher Fauletdfd10ab2021-10-06 14:24:19 +02004061 if (unlikely(h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) && !(h2c->flags & H2_CF_IS_BACK)) {
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +02004062 int send_goaway = 1;
4063 /* If a close-spread-time option is set, we want to avoid
4064 * closing all the active HTTP2 connections at once so we add a
4065 * random factor that will spread the closing.
4066 */
4067 if (tick_isset(global.close_spread_end)) {
4068 int remaining_window = tick_remain(now_ms, global.close_spread_end);
4069 if (remaining_window) {
4070 /* This should increase the closing rate the
4071 * further along the window we are. */
4072 send_goaway = (remaining_window <= statistical_prng_range(global.close_spread_time));
4073 }
4074 }
Remi Tricot-Le Breton4d7fdc62022-04-26 15:17:18 +02004075 else if (global.tune.options & GTUNE_DISABLE_ACTIVE_CLOSE)
4076 send_goaway = 0; /* let the client close his connection himself */
Willy Tarreau8ec14062017-12-30 18:08:13 +01004077 /* frontend is stopping, reload likely in progress, let's try
4078 * to announce a graceful shutdown if not yet done. We don't
4079 * care if it fails, it will be tried again later.
4080 */
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +02004081 if (send_goaway) {
4082 TRACE_STATE("proxy stopped, sending GOAWAY", H2_EV_H2C_WAKE|H2_EV_TX_FRAME, conn);
4083 if (!(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
4084 if (h2c->last_sid < 0)
4085 h2c->last_sid = (1U << 31) - 1;
4086 h2c_send_goaway_error(h2c, NULL);
4087 }
Willy Tarreau8ec14062017-12-30 18:08:13 +01004088 }
4089 }
4090
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004091 /*
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004092 * If we received early data, and the handshake is done, wake
4093 * any stream that was waiting for it.
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004094 */
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004095 if (!(h2c->flags & H2_CF_WAIT_FOR_HS) &&
Willy Tarreau911db9b2020-01-23 16:27:54 +01004096 (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT | CO_FL_EARLY_DATA)) == CO_FL_EARLY_DATA) {
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004097 struct eb32_node *node;
4098 struct h2s *h2s;
4099
4100 h2c->flags |= H2_CF_WAIT_FOR_HS;
4101 node = eb32_lookup_ge(&h2c->streams_by_id, 1);
4102
4103 while (node) {
4104 h2s = container_of(node, struct h2s, by_id);
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02004105 if (h2s->endp->flags & CS_EP_WAIT_FOR_HS)
Willy Tarreau7e094452018-12-19 18:08:52 +01004106 h2s_notify_recv(h2s);
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004107 node = eb32_next(node);
4108 }
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004109 }
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004110
Christopher Fauletaade4ed2020-10-08 15:38:41 +02004111 if (conn->flags & CO_FL_ERROR || h2c_read0_pending(h2c) ||
Willy Tarreau29a98242017-10-31 06:59:15 +01004112 h2c->st0 == H2_CS_ERROR2 || h2c->flags & H2_CF_GOAWAY_FAILED ||
4113 (eb_is_empty(&h2c->streams_by_id) && h2c->last_sid >= 0 &&
4114 h2c->max_id >= h2c->last_sid)) {
Willy Tarreau23482912019-05-07 15:23:14 +02004115 h2_wake_some_streams(h2c, 0);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004116
4117 if (eb_is_empty(&h2c->streams_by_id)) {
4118 /* no more stream, kill the connection now */
Christopher Faulet73c12072019-04-08 11:23:22 +02004119 h2_release(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004120 TRACE_DEVEL("leaving after releasing the connection", H2_EV_H2C_WAKE);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004121 return -1;
4122 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004123
4124 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004125 if (conn->flags & CO_FL_LIST_MASK) {
4126 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004127 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004128 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4129 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004130 }
4131 else if (h2c->st0 == H2_CS_ERROR) {
4132 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004133 if (conn->flags & CO_FL_LIST_MASK) {
4134 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004135 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004136 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4137 }
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004138 }
4139
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004140 if (!b_data(&h2c->dbuf))
Willy Tarreau44e973f2018-03-01 17:49:30 +01004141 h2_release_buf(h2c, &h2c->dbuf);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004142
Olivier Houchard53216e72018-10-10 15:46:36 +02004143 if ((conn->flags & CO_FL_SOCK_WR_SH) ||
4144 h2c->st0 == H2_CS_ERROR2 || (h2c->flags & H2_CF_GOAWAY_FAILED) ||
4145 (h2c->st0 != H2_CS_ERROR &&
Willy Tarreau662fafc2019-05-26 09:43:07 +02004146 !br_data(h2c->mbuf) &&
Olivier Houchard53216e72018-10-10 15:46:36 +02004147 (h2c->mws <= 0 || LIST_ISEMPTY(&h2c->fctl_list)) &&
4148 ((h2c->flags & H2_CF_MUX_BLOCK_ANY) || LIST_ISEMPTY(&h2c->send_list))))
Willy Tarreau2e3c0002019-05-26 09:45:23 +02004149 h2_release_mbuf(h2c);
Willy Tarreaua2af5122017-10-09 11:56:46 +02004150
Willy Tarreau15a47332022-03-18 15:57:34 +01004151 h2c_update_timeout(h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +02004152 h2_send(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004153 TRACE_LEAVE(H2_EV_H2C_WAKE, conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02004154 return 0;
4155}
4156
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004157/* wake-up function called by the connection layer (mux_ops.wake) */
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004158static int h2_wake(struct connection *conn)
4159{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004160 struct h2c *h2c = conn->ctx;
Willy Tarreau7838a792019-08-12 18:42:03 +02004161 int ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004162
Willy Tarreau7838a792019-08-12 18:42:03 +02004163 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4164 ret = h2_process(h2c);
Willy Tarreau508f9892020-02-11 04:38:56 +01004165 if (ret >= 0)
4166 h2_wake_some_streams(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02004167 TRACE_LEAVE(H2_EV_H2C_WAKE);
4168 return ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004169}
4170
Willy Tarreauea392822017-10-31 10:02:25 +01004171/* Connection timeout management. The principle is that if there's no receipt
4172 * nor sending for a certain amount of time, the connection is closed. If the
4173 * MUX buffer still has lying data or is not allocatable, the connection is
4174 * immediately killed. If it's allocatable and empty, we attempt to send a
4175 * GOAWAY frame.
4176 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004177struct task *h2_timeout_task(struct task *t, void *context, unsigned int state)
Willy Tarreauea392822017-10-31 10:02:25 +01004178{
Olivier Houchard9f6af332018-05-25 14:04:04 +02004179 struct h2c *h2c = context;
Willy Tarreauea392822017-10-31 10:02:25 +01004180 int expired = tick_is_expired(t->expire, now_ms);
4181
Willy Tarreau7838a792019-08-12 18:42:03 +02004182 TRACE_ENTER(H2_EV_H2C_WAKE, h2c ? h2c->conn : NULL);
4183
Willy Tarreaubd42e922020-06-30 11:19:23 +02004184 if (h2c) {
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004185 /* Make sure nobody stole the connection from us */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004186 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004187
4188 /* Somebody already stole the connection from us, so we should not
4189 * free it, we just have to free the task.
4190 */
4191 if (!t->context) {
4192 h2c = NULL;
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004193 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004194 goto do_leave;
4195 }
4196
4197
Willy Tarreaubd42e922020-06-30 11:19:23 +02004198 if (!expired) {
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004199 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004200 TRACE_DEVEL("leaving (not expired)", H2_EV_H2C_WAKE, h2c->conn);
4201 return t;
4202 }
Willy Tarreauea392822017-10-31 10:02:25 +01004203
Willy Tarreaubd42e922020-06-30 11:19:23 +02004204 if (!h2c_may_expire(h2c)) {
4205 /* we do still have streams but all of them are idle, waiting
4206 * for the data layer, so we must not enforce the timeout here.
4207 */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004208 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004209 t->expire = TICK_ETERNITY;
4210 return t;
4211 }
Willy Tarreauc2ea47f2019-10-01 10:12:00 +02004212
Willy Tarreaubd42e922020-06-30 11:19:23 +02004213 /* We're about to destroy the connection, so make sure nobody attempts
4214 * to steal it from us.
4215 */
Willy Tarreaubd42e922020-06-30 11:19:23 +02004216 if (h2c->conn->flags & CO_FL_LIST_MASK)
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004217 conn_delete_from_tree(&h2c->conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004218
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004219 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004220 }
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004221
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004222do_leave:
Olivier Houchard3f795f72019-04-17 22:51:06 +02004223 task_destroy(t);
Willy Tarreau0975f112018-03-29 15:22:59 +02004224
4225 if (!h2c) {
4226 /* resources were already deleted */
Willy Tarreau7838a792019-08-12 18:42:03 +02004227 TRACE_DEVEL("leaving (not more h2c)", H2_EV_H2C_WAKE);
Willy Tarreau0975f112018-03-29 15:22:59 +02004228 return NULL;
4229 }
4230
4231 h2c->task = NULL;
Willy Tarreauea392822017-10-31 10:02:25 +01004232 h2c_error(h2c, H2_ERR_NO_ERROR);
Willy Tarreau23482912019-05-07 15:23:14 +02004233 h2_wake_some_streams(h2c, 0);
Willy Tarreauea392822017-10-31 10:02:25 +01004234
Willy Tarreau662fafc2019-05-26 09:43:07 +02004235 if (br_data(h2c->mbuf)) {
Willy Tarreauea392822017-10-31 10:02:25 +01004236 /* don't even try to send a GOAWAY, the buffer is stuck */
4237 h2c->flags |= H2_CF_GOAWAY_FAILED;
4238 }
4239
4240 /* try to send but no need to insist */
Willy Tarreau599391a2017-11-24 10:16:00 +01004241 h2c->last_sid = h2c->max_id;
Willy Tarreauea392822017-10-31 10:02:25 +01004242 if (h2c_send_goaway_error(h2c, NULL) <= 0)
4243 h2c->flags |= H2_CF_GOAWAY_FAILED;
4244
Willy Tarreau662fafc2019-05-26 09:43:07 +02004245 if (br_data(h2c->mbuf) && !(h2c->flags & H2_CF_GOAWAY_FAILED) && conn_xprt_ready(h2c->conn)) {
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004246 unsigned int released = 0;
4247 struct buffer *buf;
4248
4249 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
4250 if (b_data(buf)) {
4251 int ret = h2c->conn->xprt->snd_buf(h2c->conn, h2c->conn->xprt_ctx, buf, b_data(buf), 0);
4252 if (!ret)
4253 break;
4254 b_del(buf, ret);
4255 if (b_data(buf))
4256 break;
4257 b_free(buf);
4258 released++;
4259 }
Willy Tarreau787db9a2018-06-14 18:31:46 +02004260 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004261
4262 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01004263 offer_buffers(NULL, released);
Willy Tarreau787db9a2018-06-14 18:31:46 +02004264 }
Willy Tarreauea392822017-10-31 10:02:25 +01004265
Willy Tarreau4481e262019-10-31 15:36:30 +01004266 /* in any case this connection must not be considered idle anymore */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004267 if (h2c->conn->flags & CO_FL_LIST_MASK) {
4268 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004269 conn_delete_from_tree(&h2c->conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004270 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4271 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004272
Willy Tarreau0975f112018-03-29 15:22:59 +02004273 /* either we can release everything now or it will be done later once
4274 * the last stream closes.
4275 */
4276 if (eb_is_empty(&h2c->streams_by_id))
Christopher Faulet73c12072019-04-08 11:23:22 +02004277 h2_release(h2c);
Willy Tarreauea392822017-10-31 10:02:25 +01004278
Willy Tarreau7838a792019-08-12 18:42:03 +02004279 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreauea392822017-10-31 10:02:25 +01004280 return NULL;
4281}
4282
4283
Willy Tarreau62f52692017-10-08 23:01:42 +02004284/*******************************************/
4285/* functions below are used by the streams */
4286/*******************************************/
4287
4288/*
4289 * Attach a new stream to a connection
4290 * (Used for outgoing connections)
4291 */
Willy Tarreau4201ab72022-05-10 19:18:52 +02004292static int h2_attach(struct connection *conn, struct cs_endpoint *endp, struct session *sess)
Willy Tarreau62f52692017-10-08 23:01:42 +02004293{
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004294 struct h2s *h2s;
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004295 struct h2c *h2c = conn->ctx;
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004296
Willy Tarreau7838a792019-08-12 18:42:03 +02004297 TRACE_ENTER(H2_EV_H2S_NEW, conn);
Willy Tarreau4201ab72022-05-10 19:18:52 +02004298 h2s = h2c_bck_stream_new(h2c, endp->cs, sess);
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004299 if (!h2s) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004300 TRACE_DEVEL("leaving on stream creation failure", H2_EV_H2S_NEW|H2_EV_H2S_ERR, conn);
Christopher Faulete00ad352021-12-16 14:44:31 +01004301 return -1;
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004302 }
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004303
4304 /* the connection is not idle anymore, let's mark this */
4305 HA_ATOMIC_AND(&h2c->wait_event.tasklet->state, ~TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004306 xprt_set_used(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004307
Willy Tarreau7838a792019-08-12 18:42:03 +02004308 TRACE_LEAVE(H2_EV_H2S_NEW, conn, h2s);
Christopher Faulete00ad352021-12-16 14:44:31 +01004309 return 0;
Willy Tarreau62f52692017-10-08 23:01:42 +02004310}
4311
Willy Tarreaufafd3982018-11-18 21:29:20 +01004312/* Retrieves the first valid conn_stream from this connection, or returns NULL.
4313 * We have to scan because we may have some orphan streams. It might be
4314 * beneficial to scan backwards from the end to reduce the likeliness to find
4315 * orphans.
4316 */
Christopher Faulet64b8d332022-04-01 13:21:41 +02004317static struct conn_stream *h2_get_first_cs(const struct connection *conn)
Willy Tarreaufafd3982018-11-18 21:29:20 +01004318{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004319 struct h2c *h2c = conn->ctx;
Willy Tarreaufafd3982018-11-18 21:29:20 +01004320 struct h2s *h2s;
4321 struct eb32_node *node;
4322
4323 node = eb32_first(&h2c->streams_by_id);
4324 while (node) {
4325 h2s = container_of(node, struct h2s, by_id);
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02004326 if (h2s->endp->cs)
4327 return h2s->endp->cs;
Willy Tarreaufafd3982018-11-18 21:29:20 +01004328 node = eb32_next(node);
4329 }
4330 return NULL;
4331}
4332
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004333static int h2_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
4334{
4335 int ret = 0;
4336 struct h2c *h2c = conn->ctx;
4337
4338 switch (mux_ctl) {
4339 case MUX_STATUS:
4340 /* Only consider the mux to be ready if we're done with
4341 * the preface and settings, and we had no error.
4342 */
4343 if (h2c->st0 >= H2_CS_FRAME_H && h2c->st0 < H2_CS_ERROR)
4344 ret |= MUX_STATUS_READY;
4345 return ret;
Christopher Faulet4c8ad842020-10-06 14:59:17 +02004346 case MUX_EXIT_STATUS:
4347 return MUX_ES_UNKNOWN;
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004348 default:
4349 return -1;
4350 }
4351}
4352
Willy Tarreau62f52692017-10-08 23:01:42 +02004353/*
Olivier Houchard060ed432018-11-06 16:32:42 +01004354 * Destroy the mux and the associated connection, if it is no longer used
4355 */
Christopher Faulet73c12072019-04-08 11:23:22 +02004356static void h2_destroy(void *ctx)
Olivier Houchard060ed432018-11-06 16:32:42 +01004357{
Christopher Faulet73c12072019-04-08 11:23:22 +02004358 struct h2c *h2c = ctx;
Olivier Houchard060ed432018-11-06 16:32:42 +01004359
Willy Tarreau7838a792019-08-12 18:42:03 +02004360 TRACE_ENTER(H2_EV_H2C_END, h2c->conn);
Christopher Faulet4e610962022-04-14 11:23:50 +02004361 if (eb_is_empty(&h2c->streams_by_id)) {
4362 BUG_ON(h2c->conn->ctx != h2c);
Christopher Faulet73c12072019-04-08 11:23:22 +02004363 h2_release(h2c);
Christopher Faulet4e610962022-04-14 11:23:50 +02004364 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004365 TRACE_LEAVE(H2_EV_H2C_END);
Olivier Houchard060ed432018-11-06 16:32:42 +01004366}
4367
4368/*
Willy Tarreau62f52692017-10-08 23:01:42 +02004369 * Detach the stream from the connection and possibly release the connection.
4370 */
Willy Tarreau4201ab72022-05-10 19:18:52 +02004371static void h2_detach(struct cs_endpoint *endp)
Willy Tarreau62f52692017-10-08 23:01:42 +02004372{
Willy Tarreau4201ab72022-05-10 19:18:52 +02004373 struct h2s *h2s = endp->target;
Willy Tarreau60935142017-10-16 18:11:19 +02004374 struct h2c *h2c;
Olivier Houchardf502aca2018-12-14 19:42:40 +01004375 struct session *sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004376
Willy Tarreau7838a792019-08-12 18:42:03 +02004377 TRACE_ENTER(H2_EV_STRM_END, h2s ? h2s->h2c->conn : NULL, h2s);
4378
Willy Tarreau7838a792019-08-12 18:42:03 +02004379 if (!h2s) {
4380 TRACE_LEAVE(H2_EV_STRM_END);
Willy Tarreau60935142017-10-16 18:11:19 +02004381 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004382 }
Willy Tarreau60935142017-10-16 18:11:19 +02004383
Willy Tarreaud9464162020-01-10 18:25:07 +01004384 /* there's no txbuf so we're certain not to be able to send anything */
4385 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02004386
Olivier Houchardf502aca2018-12-14 19:42:40 +01004387 sess = h2s->sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004388 h2c = h2s->h2c;
Willy Tarreau7ac60e82018-07-19 09:04:05 +02004389 h2c->nb_cs--;
Willy Tarreau15a47332022-03-18 15:57:34 +01004390 if (!h2c->nb_cs)
4391 h2c->idle_start = now_ms;
4392
Willy Tarreaufa1d3572019-01-31 10:31:51 +01004393 if ((h2c->flags & (H2_CF_IS_BACK|H2_CF_DEM_TOOMANY)) == H2_CF_DEM_TOOMANY &&
4394 !h2_frt_has_too_many_cs(h2c)) {
4395 /* frontend connection was blocking new streams creation */
Willy Tarreauf2101912018-07-19 10:11:38 +02004396 h2c->flags &= ~H2_CF_DEM_TOOMANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004397 h2c_restart_reading(h2c, 1);
Willy Tarreauf2101912018-07-19 10:11:38 +02004398 }
Willy Tarreau60935142017-10-16 18:11:19 +02004399
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004400 /* this stream may be blocked waiting for some data to leave (possibly
4401 * an ES or RST frame), so orphan it in this case.
4402 */
Christopher Faulet897d6122021-12-17 17:28:35 +01004403 if (!(h2c->conn->flags & CO_FL_ERROR) &&
Willy Tarreaua2b51812018-07-27 09:55:14 +02004404 (h2c->st0 < H2_CS_ERROR) &&
Willy Tarreau5723f292020-01-10 15:16:57 +01004405 (h2s->flags & (H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL)) &&
Willy Tarreauf96508a2020-01-10 11:12:48 +01004406 ((h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) || h2s->subs)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004407 TRACE_DEVEL("leaving on stream blocked", H2_EV_STRM_END|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau15a47332022-03-18 15:57:34 +01004408 /* refresh the timeout if none was active, so that the last
4409 * leaving stream may arm it.
4410 */
4411 if (!tick_isset(h2c->task->expire))
4412 h2c_update_timeout(h2c);
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004413 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004414 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004415
Willy Tarreau45f752e2017-10-30 15:44:59 +01004416 if ((h2c->flags & H2_CF_DEM_BLOCK_ANY && h2s->id == h2c->dsi) ||
4417 (h2c->flags & H2_CF_MUX_BLOCK_ANY && h2s->id == h2c->msi)) {
4418 /* unblock the connection if it was blocked on this
4419 * stream.
4420 */
4421 h2c->flags &= ~H2_CF_DEM_BLOCK_ANY;
4422 h2c->flags &= ~H2_CF_MUX_BLOCK_ANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004423 h2c_restart_reading(h2c, 1);
Willy Tarreau45f752e2017-10-30 15:44:59 +01004424 }
4425
Willy Tarreau71049cc2018-03-28 13:56:39 +02004426 h2s_destroy(h2s);
Willy Tarreau60935142017-10-16 18:11:19 +02004427
Christopher Faulet9b79a102019-07-15 11:22:56 +02004428 if (h2c->flags & H2_CF_IS_BACK) {
Olivier Houchard8a786902018-12-15 16:05:40 +01004429 if (!(h2c->conn->flags &
4430 (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH))) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004431 if (h2c->conn->flags & CO_FL_PRIVATE) {
Christopher Faulet08016ab2020-07-01 16:10:06 +02004432 /* Add the connection in the session server list, if not already done */
4433 if (!session_add_conn(sess, h2c->conn, h2c->conn->target)) {
4434 h2c->conn->owner = NULL;
4435 if (eb_is_empty(&h2c->streams_by_id)) {
4436 h2c->conn->mux->destroy(h2c);
4437 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4438 return;
Christopher Fauletc5579d12020-07-01 15:45:41 +02004439 }
4440 }
Christopher Faulet08016ab2020-07-01 16:10:06 +02004441 if (eb_is_empty(&h2c->streams_by_id)) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004442 if (session_check_idle_conn(h2c->conn->owner, h2c->conn) != 0) {
4443 /* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
4444 TRACE_DEVEL("leaving without reusable idle connection", H2_EV_STRM_END);
Olivier Houchard351411f2018-12-27 17:20:54 +01004445 return;
4446 }
4447 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004448 }
Christopher Fauletc5579d12020-07-01 15:45:41 +02004449 else {
4450 if (eb_is_empty(&h2c->streams_by_id)) {
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004451 /* If the connection is owned by the session, first remove it
4452 * from its list
4453 */
4454 if (h2c->conn->owner) {
4455 session_unown_conn(h2c->conn->owner, h2c->conn);
4456 h2c->conn->owner = NULL;
4457 }
4458
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004459 /* mark that the tasklet may lose its context to another thread and
4460 * that the handler needs to check it under the idle conns lock.
4461 */
4462 HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004463 xprt_set_idle(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
4464
Olivier Houcharddc2f2752020-02-13 19:12:07 +01004465 if (!srv_add_to_idle_list(objt_server(h2c->conn->target), h2c->conn, 1)) {
Olivier Houchard2444aa52020-01-20 13:56:01 +01004466 /* The server doesn't want it, let's kill the connection right away */
4467 h2c->conn->mux->destroy(h2c);
4468 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4469 return;
4470 }
Olivier Houchard199d4fa2020-03-22 23:25:51 +01004471 /* At this point, the connection has been added to the
4472 * server idle list, so another thread may already have
4473 * hijacked it, so we can't do anything with it.
4474 */
Olivier Houchard2444aa52020-01-20 13:56:01 +01004475 TRACE_DEVEL("reusable idle connection", H2_EV_STRM_END);
4476 return;
Olivier Houchard8a786902018-12-15 16:05:40 +01004477
Olivier Houchard8a786902018-12-15 16:05:40 +01004478 }
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004479 else if (!h2c->conn->hash_node->node.node.leaf_p &&
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004480 h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02004481 !LIST_INLIST(&h2c->conn->session_list)) {
Willy Tarreau430bf4a2021-03-04 09:45:32 +01004482 ebmb_insert(&__objt_server(h2c->conn->target)->per_thr[tid].avail_conns,
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004483 &h2c->conn->hash_node->node,
4484 sizeof(h2c->conn->hash_node->hash));
Christopher Fauletc5579d12020-07-01 15:45:41 +02004485 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004486 }
4487 }
4488 }
4489
Willy Tarreaue323f342018-03-28 13:51:45 +02004490 /* We don't want to close right now unless we're removing the
4491 * last stream, and either the connection is in error, or it
4492 * reached the ID already specified in a GOAWAY frame received
4493 * or sent (as seen by last_sid >= 0).
4494 */
Olivier Houchard7a977432019-03-21 15:47:13 +01004495 if (h2c_is_dead(h2c)) {
Willy Tarreaue323f342018-03-28 13:51:45 +02004496 /* no more stream will come, kill it now */
Willy Tarreau7838a792019-08-12 18:42:03 +02004497 TRACE_DEVEL("leaving and killing dead connection", H2_EV_STRM_END, h2c->conn);
Christopher Faulet73c12072019-04-08 11:23:22 +02004498 h2_release(h2c);
Willy Tarreaue323f342018-03-28 13:51:45 +02004499 }
4500 else if (h2c->task) {
Willy Tarreau15a47332022-03-18 15:57:34 +01004501 h2c_update_timeout(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004502 TRACE_DEVEL("leaving, refreshing connection's timeout", H2_EV_STRM_END, h2c->conn);
Willy Tarreau60935142017-10-16 18:11:19 +02004503 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004504 else
4505 TRACE_DEVEL("leaving", H2_EV_STRM_END, h2c->conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02004506}
4507
Willy Tarreau88bdba32019-05-13 18:17:53 +02004508/* Performs a synchronous or asynchronous shutr(). */
4509static void h2_do_shutr(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004510{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004511 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004512
Willy Tarreauf983d002019-05-14 10:40:21 +02004513 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004514 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004515
Willy Tarreau7838a792019-08-12 18:42:03 +02004516 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4517
Willy Tarreau18059042019-01-31 19:12:48 +01004518 /* a connstream may require us to immediately kill the whole connection
4519 * for example because of a "tcp-request content reject" rule that is
4520 * normally used to limit abuse. In this case we schedule a goaway to
4521 * close the connection.
Willy Tarreau926fa4c2017-11-07 14:42:12 +01004522 */
Christopher Fauletca2b5272022-03-30 14:48:10 +02004523 if ((h2s->endp->flags & CS_EP_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004524 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004525 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004526 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4527 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4528 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004529 else if (!(h2s->flags & H2_SF_HEADERS_SENT)) {
4530 /* Nothing was never sent for this stream, so reset with
4531 * REFUSED_STREAM error to let the client retry the
4532 * request.
4533 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004534 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004535 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4536 }
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004537 else {
4538 /* a final response was already provided, we don't want this
4539 * stream anymore. This may happen when the server responds
4540 * before the end of an upload and closes quickly (redirect,
4541 * deny, ...)
4542 */
4543 h2s_error(h2s, H2_ERR_CANCEL);
4544 }
Willy Tarreau18059042019-01-31 19:12:48 +01004545
Willy Tarreau90c32322017-11-24 08:00:30 +01004546 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004547 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004548 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004549
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004550 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004551 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau00dd0782018-03-01 16:31:34 +01004552 h2s_close(h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004553 done:
4554 h2s->flags &= ~H2_SF_WANT_SHUTR;
Willy Tarreau7838a792019-08-12 18:42:03 +02004555 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004556 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004557add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004558 /* Let the handler know we want to shutr, and add ourselves to the
4559 * most relevant list if not yet done. h2_deferred_shut() will be
4560 * automatically called via the shut_tl tasklet when there's room
4561 * again.
4562 */
4563 h2s->flags |= H2_SF_WANT_SHUTR;
Willy Tarreau2b718102021-04-21 07:32:39 +02004564 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004565 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004566 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004567 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004568 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004569 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004570 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004571 return;
Willy Tarreau62f52692017-10-08 23:01:42 +02004572}
4573
Willy Tarreau88bdba32019-05-13 18:17:53 +02004574/* Performs a synchronous or asynchronous shutw(). */
4575static void h2_do_shutw(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004576{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004577 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004578
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004579 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004580 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004581
Willy Tarreau7838a792019-08-12 18:42:03 +02004582 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4583
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004584 if (h2s->st != H2_SS_ERROR && (h2s->flags & H2_SF_HEADERS_SENT)) {
Willy Tarreau58e32082017-11-07 14:41:09 +01004585 /* we can cleanly close using an empty data frame only after headers */
4586
4587 if (!(h2s->flags & (H2_SF_ES_SENT|H2_SF_RST_SENT)) &&
4588 h2_send_empty_data_es(h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004589 goto add_to_list;
Willy Tarreau58e32082017-11-07 14:41:09 +01004590
4591 if (h2s->st == H2_SS_HREM)
Willy Tarreau00dd0782018-03-01 16:31:34 +01004592 h2s_close(h2s);
Willy Tarreau58e32082017-11-07 14:41:09 +01004593 else
4594 h2s->st = H2_SS_HLOC;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004595 } else {
Willy Tarreau18059042019-01-31 19:12:48 +01004596 /* a connstream may require us to immediately kill the whole connection
4597 * for example because of a "tcp-request content reject" rule that is
4598 * normally used to limit abuse. In this case we schedule a goaway to
4599 * close the connection.
Willy Tarreaua1349f02017-10-31 07:41:55 +01004600 */
Christopher Fauletca2b5272022-03-30 14:48:10 +02004601 if ((h2s->endp->flags & CS_EP_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004602 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004603 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004604 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4605 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4606 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004607 else {
4608 /* Nothing was never sent for this stream, so reset with
4609 * REFUSED_STREAM error to let the client retry the
4610 * request.
4611 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004612 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004613 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4614 }
Willy Tarreau18059042019-01-31 19:12:48 +01004615
Willy Tarreau90c32322017-11-24 08:00:30 +01004616 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004617 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004618 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004619
Willy Tarreau00dd0782018-03-01 16:31:34 +01004620 h2s_close(h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004621 }
4622
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004623 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004624 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau7838a792019-08-12 18:42:03 +02004625
4626 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
4627
Willy Tarreau88bdba32019-05-13 18:17:53 +02004628 done:
4629 h2s->flags &= ~H2_SF_WANT_SHUTW;
4630 return;
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004631
4632 add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004633 /* Let the handler know we want to shutw, and add ourselves to the
4634 * most relevant list if not yet done. h2_deferred_shut() will be
4635 * automatically called via the shut_tl tasklet when there's room
4636 * again.
4637 */
4638 h2s->flags |= H2_SF_WANT_SHUTW;
Willy Tarreau2b718102021-04-21 07:32:39 +02004639 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004640 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004641 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004642 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004643 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004644 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004645 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004646 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004647}
4648
Willy Tarreau5723f292020-01-10 15:16:57 +01004649/* This is the tasklet referenced in h2s->shut_tl, it is used for
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004650 * deferred shutdowns when the h2_detach() was done but the mux buffer was full
4651 * and prevented the last frame from being emitted.
4652 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004653struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004654{
4655 struct h2s *h2s = ctx;
Willy Tarreau88bdba32019-05-13 18:17:53 +02004656 struct h2c *h2c = h2s->h2c;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004657
Willy Tarreau7838a792019-08-12 18:42:03 +02004658 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4659
Willy Tarreau5723f292020-01-10 15:16:57 +01004660 if (h2s->flags & H2_SF_NOTIFIED) {
4661 /* some data processing remains to be done first */
4662 goto end;
4663 }
4664
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004665 if (h2s->flags & H2_SF_WANT_SHUTW)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004666 h2_do_shutw(h2s);
4667
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004668 if (h2s->flags & H2_SF_WANT_SHUTR)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004669 h2_do_shutr(h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004670
Willy Tarreau88bdba32019-05-13 18:17:53 +02004671 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Olivier Houchardafc7cb82019-03-25 14:08:01 +01004672 /* We're done trying to send, remove ourself from the send_list */
Olivier Houchardafc7cb82019-03-25 14:08:01 +01004673 LIST_DEL_INIT(&h2s->list);
Olivier Houchard7a977432019-03-21 15:47:13 +01004674
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02004675 if (!h2s->endp->cs) {
Willy Tarreau88bdba32019-05-13 18:17:53 +02004676 h2s_destroy(h2s);
Willy Tarreau74163142021-03-13 11:30:19 +01004677 if (h2c_is_dead(h2c)) {
Willy Tarreau88bdba32019-05-13 18:17:53 +02004678 h2_release(h2c);
Willy Tarreau74163142021-03-13 11:30:19 +01004679 t = NULL;
4680 }
Willy Tarreau88bdba32019-05-13 18:17:53 +02004681 }
Olivier Houchard7a977432019-03-21 15:47:13 +01004682 }
Willy Tarreau5723f292020-01-10 15:16:57 +01004683 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02004684 TRACE_LEAVE(H2_EV_STRM_SHUT);
Willy Tarreau74163142021-03-13 11:30:19 +01004685 return t;
Willy Tarreau62f52692017-10-08 23:01:42 +02004686}
4687
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004688/* shutr() called by the conn_stream (mux_ops.shutr) */
Christopher Faulet07976562022-03-31 11:05:05 +02004689static void h2_shutr(struct conn_stream *cs, enum co_shr_mode mode)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004690{
Christopher Fauletdb90f2a2022-03-22 16:06:25 +01004691 struct h2s *h2s = __cs_mux(cs);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004692
Willy Tarreau7838a792019-08-12 18:42:03 +02004693 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004694 if (mode)
4695 h2_do_shutr(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004696 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004697}
4698
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004699/* shutw() called by the conn_stream (mux_ops.shutw) */
Christopher Faulet07976562022-03-31 11:05:05 +02004700static void h2_shutw(struct conn_stream *cs, enum co_shw_mode mode)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004701{
Christopher Fauletdb90f2a2022-03-22 16:06:25 +01004702 struct h2s *h2s = __cs_mux(cs);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004703
Willy Tarreau7838a792019-08-12 18:42:03 +02004704 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004705 h2_do_shutw(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004706 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004707}
4708
Christopher Faulet9b79a102019-07-15 11:22:56 +02004709/* Decode the payload of a HEADERS frame and produce the HTX request or response
4710 * depending on the connection's side. Returns a positive value on success, a
4711 * negative value on failure, or 0 if it couldn't proceed. May report connection
4712 * errors in h2c->errcode if the frame is non-decodable and the connection
4713 * unrecoverable. In absence of connection error when a failure is reported, the
4714 * caller must assume a stream error.
Willy Tarreauea18f862018-12-22 20:19:26 +01004715 *
4716 * The function may fold CONTINUATION frames into the initial HEADERS frame
4717 * by removing padding and next frame header, then moving the CONTINUATION
4718 * frame's payload and adjusting h2c->dfl to match the new aggregated frame,
4719 * leaving a hole between the main frame and the beginning of the next one.
4720 * The possibly remaining incomplete or next frame at the end may be moved
4721 * if the aggregated frame is not deleted, in order to fill the hole. Wrapped
4722 * HEADERS frames are unwrapped into a temporary buffer before decoding.
4723 *
4724 * A buffer at the beginning of processing may look like this :
4725 *
4726 * ,---.---------.-----.--------------.--------------.------.---.
4727 * |///| HEADERS | PAD | CONTINUATION | CONTINUATION | DATA |///|
4728 * `---^---------^-----^--------------^--------------^------^---'
4729 * | | <-----> | |
4730 * area | dpl | wrap
4731 * |<--------------> |
4732 * | dfl |
4733 * |<-------------------------------------------------->|
4734 * head data
4735 *
4736 * Padding is automatically overwritten when folding, participating to the
4737 * hole size after dfl :
4738 *
4739 * ,---.------------------------.-----.--------------.------.---.
4740 * |///| HEADERS : CONTINUATION |/////| CONTINUATION | DATA |///|
4741 * `---^------------------------^-----^--------------^------^---'
4742 * | | <-----> | |
4743 * area | hole | wrap
4744 * |<-----------------------> |
4745 * | dfl |
4746 * |<-------------------------------------------------->|
4747 * head data
4748 *
4749 * Please note that the HEADERS frame is always deprived from its PADLEN byte
4750 * however it may start with the 5 stream-dep+weight bytes in case of PRIORITY
4751 * bit.
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004752 *
4753 * The <flags> field must point to either the stream's flags or to a copy of it
4754 * so that the function can update the following flags :
4755 * - H2_SF_DATA_CLEN when content-length is seen
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004756 * - H2_SF_HEADERS_RCVD once the frame is successfully decoded
Willy Tarreau88d138e2019-01-02 19:38:14 +01004757 *
4758 * The H2_SF_HEADERS_RCVD flag is also looked at in the <flags> field prior to
4759 * decoding, in order to detect if we're dealing with a headers or a trailers
4760 * block (the trailers block appears after H2_SF_HEADERS_RCVD was seen).
Willy Tarreau13278b42017-10-13 19:23:14 +02004761 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01004762static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol)
Willy Tarreau13278b42017-10-13 19:23:14 +02004763{
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004764 const uint8_t *hdrs = (uint8_t *)b_head(&h2c->dbuf);
Willy Tarreau83061a82018-07-13 11:56:34 +02004765 struct buffer *tmp = get_trash_chunk();
Christopher Faulete4ab11b2019-06-11 15:05:37 +02004766 struct http_hdr list[global.tune.max_http_hdr * 2];
Willy Tarreau83061a82018-07-13 11:56:34 +02004767 struct buffer *copy = NULL;
Willy Tarreau174b06a2018-04-25 18:13:58 +02004768 unsigned int msgf;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004769 struct htx *htx = NULL;
Willy Tarreauea18f862018-12-22 20:19:26 +01004770 int flen; // header frame len
4771 int hole = 0;
Willy Tarreau86277d42019-01-02 15:36:11 +01004772 int ret = 0;
4773 int outlen;
Willy Tarreau13278b42017-10-13 19:23:14 +02004774 int wrap;
Willy Tarreau13278b42017-10-13 19:23:14 +02004775
Willy Tarreau7838a792019-08-12 18:42:03 +02004776 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
4777
Willy Tarreauea18f862018-12-22 20:19:26 +01004778next_frame:
4779 if (b_data(&h2c->dbuf) - hole < h2c->dfl)
4780 goto leave; // incomplete input frame
4781
4782 /* No END_HEADERS means there's one or more CONTINUATION frames. In
4783 * this case, we'll try to paste it immediately after the initial
4784 * HEADERS frame payload and kill any possible padding. The initial
4785 * frame's length will be increased to represent the concatenation
4786 * of the two frames. The next frame is read from position <tlen>
4787 * and written at position <flen> (minus padding if some is present).
4788 */
4789 if (unlikely(!(h2c->dff & H2_F_HEADERS_END_HEADERS))) {
4790 struct h2_fh hdr;
4791 int clen; // CONTINUATION frame's payload length
4792
Willy Tarreau7838a792019-08-12 18:42:03 +02004793 TRACE_STATE("EH missing, expecting continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004794 if (!h2_peek_frame_hdr(&h2c->dbuf, h2c->dfl + hole, &hdr)) {
4795 /* no more data, the buffer may be full, either due to
4796 * too large a frame or because of too large a hole that
4797 * we're going to compact at the end.
4798 */
4799 goto leave;
4800 }
4801
4802 if (hdr.ft != H2_FT_CONTINUATION) {
4803 /* RFC7540#6.10: frame of unexpected type */
Willy Tarreau7838a792019-08-12 18:42:03 +02004804 TRACE_STATE("not continuation!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004805 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004806 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004807 goto fail;
4808 }
4809
4810 if (hdr.sid != h2c->dsi) {
4811 /* RFC7540#6.10: frame of different stream */
Willy Tarreau7838a792019-08-12 18:42:03 +02004812 TRACE_STATE("different stream ID!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004813 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004814 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004815 goto fail;
4816 }
4817
4818 if ((unsigned)hdr.len > (unsigned)global.tune.bufsize) {
4819 /* RFC7540#4.2: invalid frame length */
Willy Tarreau7838a792019-08-12 18:42:03 +02004820 TRACE_STATE("too large frame!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004821 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4822 goto fail;
4823 }
4824
4825 /* detect when we must stop aggragating frames */
4826 h2c->dff |= hdr.ff & H2_F_HEADERS_END_HEADERS;
4827
4828 /* Take as much as we can of the CONTINUATION frame's payload */
4829 clen = b_data(&h2c->dbuf) - (h2c->dfl + hole + 9);
4830 if (clen > hdr.len)
4831 clen = hdr.len;
4832
4833 /* Move the frame's payload over the padding, hole and frame
4834 * header. At least one of hole or dpl is null (see diagrams
4835 * above). The hole moves after the new aggragated frame.
4836 */
4837 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole + 9), clen, -(h2c->dpl + hole + 9));
Christopher Fauletcb1847c2021-04-21 11:11:21 +02004838 h2c->dfl += hdr.len - h2c->dpl;
Willy Tarreauea18f862018-12-22 20:19:26 +01004839 hole += h2c->dpl + 9;
4840 h2c->dpl = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02004841 TRACE_STATE("waiting for next continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_CONT|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004842 goto next_frame;
4843 }
4844
4845 flen = h2c->dfl - h2c->dpl;
Willy Tarreau68472622017-12-11 18:36:37 +01004846
Willy Tarreau13278b42017-10-13 19:23:14 +02004847 /* if the input buffer wraps, take a temporary copy of it (rare) */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004848 wrap = b_wrap(&h2c->dbuf) - b_head(&h2c->dbuf);
Willy Tarreau13278b42017-10-13 19:23:14 +02004849 if (wrap < h2c->dfl) {
Willy Tarreau68dd9852017-07-03 14:44:26 +02004850 copy = alloc_trash_chunk();
4851 if (!copy) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004852 TRACE_DEVEL("failed to allocate temporary buffer", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR, h2c->conn);
Willy Tarreau68dd9852017-07-03 14:44:26 +02004853 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
4854 goto fail;
4855 }
Willy Tarreau843b7cb2018-07-13 10:54:26 +02004856 memcpy(copy->area, b_head(&h2c->dbuf), wrap);
4857 memcpy(copy->area + wrap, b_orig(&h2c->dbuf), h2c->dfl - wrap);
4858 hdrs = (uint8_t *) copy->area;
Willy Tarreau13278b42017-10-13 19:23:14 +02004859 }
4860
Willy Tarreau13278b42017-10-13 19:23:14 +02004861 /* Skip StreamDep and weight for now (we don't support PRIORITY) */
4862 if (h2c->dff & H2_F_HEADERS_PRIORITY) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004863 if (read_n32(hdrs) == h2c->dsi) {
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004864 /* RFC7540#5.3.1 : stream dep may not depend on itself */
Willy Tarreau7838a792019-08-12 18:42:03 +02004865 TRACE_STATE("invalid stream dependency!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004866 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004867 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreaua0d11b62018-09-05 18:30:05 +02004868 goto fail;
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004869 }
4870
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004871 if (flen < 5) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004872 TRACE_STATE("frame too short for priority!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004873 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4874 goto fail;
4875 }
4876
Willy Tarreau13278b42017-10-13 19:23:14 +02004877 hdrs += 5; // stream dep = 4, weight = 1
4878 flen -= 5;
4879 }
4880
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004881 if (!h2_get_buf(h2c, rxbuf)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004882 TRACE_STATE("waiting for h2c rxbuf allocation", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau937f7602018-02-26 15:22:17 +01004883 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau86277d42019-01-02 15:36:11 +01004884 goto leave;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004885 }
Willy Tarreau13278b42017-10-13 19:23:14 +02004886
Willy Tarreau937f7602018-02-26 15:22:17 +01004887 /* we can't retry a failed decompression operation so we must be very
4888 * careful not to take any risks. In practice the output buffer is
4889 * always empty except maybe for trailers, in which case we simply have
4890 * to wait for the upper layer to finish consuming what is available.
4891 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004892 htx = htx_from_buf(rxbuf);
4893 if (!htx_is_empty(htx)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004894 TRACE_STATE("waiting for room in h2c rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004895 h2c->flags |= H2_CF_DEM_SFULL;
4896 goto leave;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004897 }
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004898
Willy Tarreau25919232019-01-03 14:48:18 +01004899 /* past this point we cannot roll back in case of error */
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004900 outlen = hpack_decode_frame(h2c->ddht, hdrs, flen, list,
4901 sizeof(list)/sizeof(list[0]), tmp);
4902 if (outlen < 0) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004903 TRACE_STATE("failed to decompress HPACK", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004904 h2c_error(h2c, H2_ERR_COMPRESSION_ERROR);
4905 goto fail;
4906 }
4907
Willy Tarreau25919232019-01-03 14:48:18 +01004908 /* The PACK decompressor was updated, let's update the input buffer and
4909 * the parser's state to commit these changes and allow us to later
4910 * fail solely on the stream if needed.
4911 */
4912 b_del(&h2c->dbuf, h2c->dfl + hole);
4913 h2c->dfl = hole = 0;
4914 h2c->st0 = H2_CS_FRAME_H;
4915
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004916 /* OK now we have our header list in <list> */
Willy Tarreau880f5802019-01-03 08:10:14 +01004917 msgf = (h2c->dff & H2_F_HEADERS_END_STREAM) ? 0 : H2_MSGF_BODY;
Christopher Fauletd0db4232021-01-22 11:46:30 +01004918 msgf |= (*flags & H2_SF_BODY_TUNNEL) ? H2_MSGF_BODY_TUNNEL: 0;
Amaury Denoyelle74162742020-12-11 17:53:05 +01004919 /* If an Extended CONNECT has been sent on this stream, set message flag
Ilya Shipitsinacf84592021-02-06 22:29:08 +05004920 * to convert 200 response to 101 htx response */
Amaury Denoyelle74162742020-12-11 17:53:05 +01004921 msgf |= (*flags & H2_SF_EXT_CONNECT_SENT) ? H2_MSGF_EXT_CONNECT: 0;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004922
Willy Tarreau88d138e2019-01-02 19:38:14 +01004923 if (*flags & H2_SF_HEADERS_RCVD)
4924 goto trailers;
4925
4926 /* This is the first HEADERS frame so it's a headers block */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004927 if (h2c->flags & H2_CF_IS_BACK)
Amaury Denoyelle74162742020-12-11 17:53:05 +01004928 outlen = h2_make_htx_response(list, htx, &msgf, body_len, upgrade_protocol);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004929 else
4930 outlen = h2_make_htx_request(list, htx, &msgf, body_len);
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004931
Christopher Faulet3d875582021-04-26 17:46:13 +02004932 if (outlen < 0 || htx_free_space(htx) < global.tune.maxrewrite) {
Willy Tarreau25919232019-01-03 14:48:18 +01004933 /* too large headers? this is a stream error only */
Christopher Faulet3d875582021-04-26 17:46:13 +02004934 TRACE_STATE("message headers too large", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR|H2_EV_PROTO_ERR, h2c->conn);
4935 htx->flags |= HTX_FL_PARSING_ERROR;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004936 goto fail;
4937 }
Willy Tarreau13278b42017-10-13 19:23:14 +02004938
Willy Tarreau174b06a2018-04-25 18:13:58 +02004939 if (msgf & H2_MSGF_BODY) {
4940 /* a payload is present */
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01004941 if (msgf & H2_MSGF_BODY_CL) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004942 *flags |= H2_SF_DATA_CLEN;
Christopher Faulet9b79a102019-07-15 11:22:56 +02004943 htx->extra = *body_len;
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01004944 }
Willy Tarreau174b06a2018-04-25 18:13:58 +02004945 }
Christopher Faulet7d247f02020-12-02 14:26:36 +01004946 if (msgf & H2_MSGF_BODYLESS_RSP)
4947 *flags |= H2_SF_BODYLESS_RESP;
Willy Tarreau174b06a2018-04-25 18:13:58 +02004948
Christopher Fauletd0db4232021-01-22 11:46:30 +01004949 if (msgf & H2_MSGF_BODY_TUNNEL)
4950 *flags |= H2_SF_BODY_TUNNEL;
4951 else {
4952 /* Abort the tunnel attempt, if any */
4953 if (*flags & H2_SF_BODY_TUNNEL)
4954 *flags |= H2_SF_TUNNEL_ABRT;
4955 *flags &= ~H2_SF_BODY_TUNNEL;
4956 }
4957
Willy Tarreau88d138e2019-01-02 19:38:14 +01004958 done:
Christopher Faulet0b465482019-02-19 15:14:23 +01004959 /* indicate that a HEADERS frame was received for this stream, except
4960 * for 1xx responses. For 1xx responses, another HEADERS frame is
4961 * expected.
4962 */
4963 if (!(msgf & H2_MSGF_RSP_1XX))
4964 *flags |= H2_SF_HEADERS_RCVD;
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004965
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01004966 if (h2c->dff & H2_F_HEADERS_END_STREAM) {
4967 /* no more data are expected for this message */
4968 htx->flags |= HTX_FL_EOM;
Willy Tarreau88d138e2019-01-02 19:38:14 +01004969 }
Willy Tarreau937f7602018-02-26 15:22:17 +01004970
Amaury Denoyelleefe22762020-12-11 17:53:08 +01004971 if (msgf & H2_MSGF_EXT_CONNECT)
4972 *flags |= H2_SF_EXT_CONNECT_RCVD;
4973
Willy Tarreau86277d42019-01-02 15:36:11 +01004974 /* success */
4975 ret = 1;
4976
Willy Tarreau68dd9852017-07-03 14:44:26 +02004977 leave:
Willy Tarreau86277d42019-01-02 15:36:11 +01004978 /* If there is a hole left and it's not at the end, we are forced to
Willy Tarreauea18f862018-12-22 20:19:26 +01004979 * move the remaining data over it.
4980 */
4981 if (hole) {
4982 if (b_data(&h2c->dbuf) > h2c->dfl + hole)
4983 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole),
4984 b_data(&h2c->dbuf) - (h2c->dfl + hole), -hole);
4985 b_sub(&h2c->dbuf, hole);
4986 }
4987
Christopher Faulet07f88d72021-04-21 10:39:53 +02004988 if (b_full(&h2c->dbuf) && h2c->dfl) {
Willy Tarreauea18f862018-12-22 20:19:26 +01004989 /* too large frames */
4990 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau86277d42019-01-02 15:36:11 +01004991 ret = -1;
Willy Tarreauea18f862018-12-22 20:19:26 +01004992 }
4993
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01004994 if (htx)
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004995 htx_to_buf(htx, rxbuf);
Willy Tarreau68dd9852017-07-03 14:44:26 +02004996 free_trash_chunk(copy);
Willy Tarreau7838a792019-08-12 18:42:03 +02004997 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau86277d42019-01-02 15:36:11 +01004998 return ret;
4999
Willy Tarreau68dd9852017-07-03 14:44:26 +02005000 fail:
Willy Tarreau86277d42019-01-02 15:36:11 +01005001 ret = -1;
Willy Tarreau68dd9852017-07-03 14:44:26 +02005002 goto leave;
Willy Tarreau88d138e2019-01-02 19:38:14 +01005003
5004 trailers:
5005 /* This is the last HEADERS frame hence a trailer */
Willy Tarreau88d138e2019-01-02 19:38:14 +01005006 if (!(h2c->dff & H2_F_HEADERS_END_STREAM)) {
5007 /* It's a trailer but it's missing ES flag */
Willy Tarreau7838a792019-08-12 18:42:03 +02005008 TRACE_STATE("missing EH on trailers frame", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau88d138e2019-01-02 19:38:14 +01005009 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02005010 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau88d138e2019-01-02 19:38:14 +01005011 goto fail;
5012 }
5013
Christopher Faulet9b79a102019-07-15 11:22:56 +02005014 /* Trailers terminate a DATA sequence */
Willy Tarreau7838a792019-08-12 18:42:03 +02005015 if (h2_make_htx_trailers(list, htx) <= 0) {
5016 TRACE_STATE("failed to append HTX trailers into rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02005017 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02005018 }
Willy Tarreau88d138e2019-01-02 19:38:14 +01005019 goto done;
Willy Tarreau13278b42017-10-13 19:23:14 +02005020}
5021
Christopher Faulet9b79a102019-07-15 11:22:56 +02005022/* Transfer the payload of a DATA frame to the HTTP/1 side. The HTTP/2 frame
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005023 * parser state is automatically updated. Returns > 0 if it could completely
5024 * send the current frame, 0 if it couldn't complete, in which case
Christopher Fauletb041b232022-03-24 10:27:02 +01005025 * CS_EP_RCV_MORE must be checked to know if some data remain pending (an empty
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005026 * DATA frame can return 0 as a valid result). Stream errors are reported in
5027 * h2s->errcode and connection errors in h2c->errcode. The caller must already
5028 * have checked the frame header and ensured that the frame was complete or the
5029 * buffer full. It changes the frame state to FRAME_A once done.
Willy Tarreau454f9052017-10-26 19:40:35 +02005030 */
Willy Tarreau454b57b2018-02-26 15:50:05 +01005031static int h2_frt_transfer_data(struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02005032{
5033 struct h2c *h2c = h2s->h2c;
Christopher Faulet9b79a102019-07-15 11:22:56 +02005034 int block;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005035 unsigned int flen = 0;
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005036 struct htx *htx = NULL;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005037 struct buffer *csbuf;
Christopher Faulet9b79a102019-07-15 11:22:56 +02005038 unsigned int sent;
Willy Tarreau454f9052017-10-26 19:40:35 +02005039
Willy Tarreau7838a792019-08-12 18:42:03 +02005040 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
5041
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005042 h2c->flags &= ~H2_CF_DEM_SFULL;
Willy Tarreau454f9052017-10-26 19:40:35 +02005043
Olivier Houchard638b7992018-08-16 15:41:52 +02005044 csbuf = h2_get_buf(h2c, &h2s->rxbuf);
Willy Tarreaud755ea62018-02-26 15:44:54 +01005045 if (!csbuf) {
5046 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02005047 TRACE_STATE("waiting for an h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005048 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005049 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02005050 htx = htx_from_buf(csbuf);
Willy Tarreaud755ea62018-02-26 15:44:54 +01005051
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005052try_again:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005053 flen = h2c->dfl - h2c->dpl;
5054 if (!flen)
Willy Tarreau4a28da12018-01-04 14:41:00 +01005055 goto end_transfer;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005056
Willy Tarreauc9fa0482018-07-10 17:43:27 +02005057 if (flen > b_data(&h2c->dbuf)) {
5058 flen = b_data(&h2c->dbuf);
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005059 if (!flen)
Willy Tarreau454b57b2018-02-26 15:50:05 +01005060 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005061 }
5062
Christopher Faulet9b79a102019-07-15 11:22:56 +02005063 block = htx_free_data_space(htx);
5064 if (!block) {
5065 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02005066 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Christopher Faulet9b79a102019-07-15 11:22:56 +02005067 goto fail;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005068 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02005069 if (flen > block)
5070 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005071
Christopher Faulet9b79a102019-07-15 11:22:56 +02005072 /* here, flen is the max we can copy into the output buffer */
5073 block = b_contig_data(&h2c->dbuf, 0);
5074 if (flen > block)
5075 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005076
Christopher Faulet9b79a102019-07-15 11:22:56 +02005077 sent = htx_add_data(htx, ist2(b_head(&h2c->dbuf), flen));
Willy Tarreau022e5e52020-09-10 09:33:15 +02005078 TRACE_DATA("move some data to h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s, 0, (void *)(long)sent);
Willy Tarreau454f9052017-10-26 19:40:35 +02005079
Christopher Faulet9b79a102019-07-15 11:22:56 +02005080 b_del(&h2c->dbuf, sent);
5081 h2c->dfl -= sent;
5082 h2c->rcvd_c += sent;
5083 h2c->rcvd_s += sent; // warning, this can also affect the closed streams!
Willy Tarreau454f9052017-10-26 19:40:35 +02005084
Christopher Faulet9b79a102019-07-15 11:22:56 +02005085 if (h2s->flags & H2_SF_DATA_CLEN) {
5086 h2s->body_len -= sent;
5087 htx->extra = h2s->body_len;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005088 }
5089
Christopher Faulet9b79a102019-07-15 11:22:56 +02005090 if (sent < flen) {
Willy Tarreaud755ea62018-02-26 15:44:54 +01005091 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02005092 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005093 goto fail;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005094 }
5095
Christopher Faulet9b79a102019-07-15 11:22:56 +02005096 goto try_again;
5097
Willy Tarreau4a28da12018-01-04 14:41:00 +01005098 end_transfer:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005099 /* here we're done with the frame, all the payload (except padding) was
5100 * transferred.
5101 */
Willy Tarreaueba10f22018-04-25 20:44:22 +02005102
Christopher Faulet5be651d2021-01-22 15:28:03 +01005103 if (!(h2s->flags & H2_SF_BODY_TUNNEL) && (h2c->dff & H2_F_DATA_END_STREAM)) {
5104 /* no more data are expected for this message. This add the EOM
5105 * flag but only on the response path or if no tunnel attempt
5106 * was aborted. Otherwise (request path + tunnel abrted), the
5107 * EOM was already reported.
5108 */
Christopher Faulet33724322021-02-10 09:04:59 +01005109 if ((h2c->flags & H2_CF_IS_BACK) || !(h2s->flags & H2_SF_TUNNEL_ABRT)) {
5110 /* If we receive an empty DATA frame with ES flag while the HTX
5111 * message is empty, we must be sure to push a block to be sure
5112 * the HTX EOM flag will be handled on the other side. It is a
5113 * workaround because for now it is not possible to push empty
5114 * HTX DATA block. And without this block, there is no way to
5115 * "commit" the end of the message.
5116 */
5117 if (htx_is_empty(htx)) {
5118 if (!htx_add_endof(htx, HTX_BLK_EOT))
5119 goto fail;
5120 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005121 htx->flags |= HTX_FL_EOM;
Christopher Faulet33724322021-02-10 09:04:59 +01005122 }
Willy Tarreaueba10f22018-04-25 20:44:22 +02005123 }
5124
Willy Tarreaud1023bb2018-03-22 16:53:12 +01005125 h2c->rcvd_c += h2c->dpl;
5126 h2c->rcvd_s += h2c->dpl;
5127 h2c->dpl = 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02005128 h2c->st0 = H2_CS_FRAME_A; // send the corresponding window update
Christopher Faulet9b79a102019-07-15 11:22:56 +02005129 htx_to_buf(htx, csbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02005130 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005131 return 1;
Willy Tarreau454b57b2018-02-26 15:50:05 +01005132 fail:
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01005133 if (htx)
5134 htx_to_buf(htx, csbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02005135 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005136 return 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02005137}
5138
Willy Tarreau115e83b2018-12-01 19:17:53 +01005139/* Try to send a HEADERS frame matching HTX response present in HTX message
5140 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
5141 * must check the stream's status to detect any error which might have happened
5142 * subsequently to a successful send. The htx blocks are automatically removed
5143 * from the message. The htx message is assumed to be valid since produced from
5144 * the internal code, hence it contains a start line, an optional series of
5145 * header blocks and an end of header, otherwise an invalid frame could be
5146 * emitted and the resulting htx message could be left in an inconsistent state.
5147 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02005148static size_t h2s_frt_make_resp_headers(struct h2s *h2s, struct htx *htx)
Willy Tarreau115e83b2018-12-01 19:17:53 +01005149{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02005150 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau115e83b2018-12-01 19:17:53 +01005151 struct h2c *h2c = h2s->h2c;
5152 struct htx_blk *blk;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005153 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005154 struct buffer *mbuf;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005155 struct htx_sl *sl;
5156 enum htx_blk_type type;
5157 int es_now = 0;
5158 int ret = 0;
5159 int hdr;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005160
Willy Tarreau7838a792019-08-12 18:42:03 +02005161 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5162
Willy Tarreau115e83b2018-12-01 19:17:53 +01005163 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005164 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005165 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02005166 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005167 return 0;
5168 }
5169
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005170 /* get the start line (we do have one) and the rest of the headers,
5171 * that we dump starting at header 0 */
5172 sl = NULL;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005173 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005174 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau115e83b2018-12-01 19:17:53 +01005175 type = htx_get_blk_type(blk);
5176
5177 if (type == HTX_BLK_UNUSED)
5178 continue;
5179
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005180 if (type == HTX_BLK_EOH)
Willy Tarreau115e83b2018-12-01 19:17:53 +01005181 break;
5182
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005183 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005184 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005185 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5186 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5187 goto fail;
5188 }
5189
5190 list[hdr].n = htx_get_blk_name(htx, blk);
5191 list[hdr].v = htx_get_blk_value(htx, blk);
5192 hdr++;
5193 }
5194 else if (type == HTX_BLK_RES_SL) {
Christopher Faulet56498132021-01-29 11:39:43 +01005195 BUG_ON(sl); /* Only one start-line expected */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005196 sl = htx_get_blk_ptr(htx, blk);
5197 h2s->status = sl->info.res.status;
Christopher Faulet7d247f02020-12-02 14:26:36 +01005198 if (h2s->status == 204 || h2s->status == 304)
5199 h2s->flags |= H2_SF_BODYLESS_RESP;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005200 if (h2s->status < 100 || h2s->status > 999) {
5201 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5202 goto fail;
5203 }
5204 else if (h2s->status == 101) {
Amaury Denoyelleefe22762020-12-11 17:53:08 +01005205 if (unlikely(h2s->flags & H2_SF_EXT_CONNECT_RCVD)) {
5206 /* If an Extended CONNECT has been received, we need to convert 101 to 200 */
5207 h2s->status = 200;
5208 h2s->flags &= ~H2_SF_EXT_CONNECT_RCVD;
5209 }
5210 else {
5211 /* Otherwise, 101 responses are not supported in H2, so return a error (RFC7540#8.1.1) */
5212 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5213 goto fail;
5214 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005215 }
5216 else if ((h2s->flags & H2_SF_BODY_TUNNEL) && h2s->status >= 300) {
5217 /* Abort the tunnel attempt */
5218 h2s->flags &= ~H2_SF_BODY_TUNNEL;
5219 h2s->flags |= H2_SF_TUNNEL_ABRT;
5220 }
5221 }
5222 else {
5223 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005224 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02005225 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005226 }
5227
Christopher Faulet56498132021-01-29 11:39:43 +01005228 /* The start-line me be defined */
5229 BUG_ON(!sl);
5230
Willy Tarreau115e83b2018-12-01 19:17:53 +01005231 /* marker for end of headers */
5232 list[hdr].n = ist("");
5233
Willy Tarreau9c218e72019-05-26 10:08:28 +02005234 mbuf = br_tail(h2c->mbuf);
5235 retry:
5236 if (!h2_get_buf(h2c, mbuf)) {
5237 h2c->flags |= H2_CF_MUX_MALLOC;
5238 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005239 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005240 return 0;
5241 }
5242
Willy Tarreau115e83b2018-12-01 19:17:53 +01005243 chunk_reset(&outbuf);
5244
5245 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005246 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5247 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005248 break;
5249 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005250 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau115e83b2018-12-01 19:17:53 +01005251 }
5252
5253 if (outbuf.size < 9)
5254 goto full;
5255
5256 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5257 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5258 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5259 outbuf.data = 9;
5260
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005261 if ((h2c->flags & (H2_CF_SHTS_UPDATED|H2_CF_DTSU_EMITTED)) == H2_CF_SHTS_UPDATED) {
5262 /* SETTINGS_HEADER_TABLE_SIZE changed, we must send an HPACK
5263 * dynamic table size update so that some clients are not
5264 * confused. In practice we only need to send the DTSU when the
5265 * advertised size is lower than the current one, and since we
5266 * don't use it and don't care about the default 4096 bytes,
5267 * we only ack it with a zero size thus we at most have to deal
5268 * with this once. See RFC7541#4.2 and #6.3 for the spec, and
5269 * below for the whole context and interoperability risks:
5270 * https://lists.w3.org/Archives/Public/ietf-http-wg/2021OctDec/0235.html
5271 */
5272 if (b_room(&outbuf) < 1)
5273 goto full;
5274 outbuf.area[outbuf.data++] = 0x20; // HPACK DTSU 0 bytes
5275
5276 /* let's not update the flags now but only once the buffer is
5277 * really committed.
5278 */
5279 }
5280
Willy Tarreau115e83b2018-12-01 19:17:53 +01005281 /* encode status, which necessarily is the first one */
Willy Tarreauaafdf582018-12-10 18:06:40 +01005282 if (!hpack_encode_int_status(&outbuf, h2s->status)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005283 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005284 goto realign_again;
5285 goto full;
5286 }
5287
5288 /* encode all headers, stop at empty name */
5289 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
5290 /* these ones do not exist in H2 and must be dropped. */
5291 if (isteq(list[hdr].n, ist("connection")) ||
5292 isteq(list[hdr].n, ist("proxy-connection")) ||
5293 isteq(list[hdr].n, ist("keep-alive")) ||
5294 isteq(list[hdr].n, ist("upgrade")) ||
5295 isteq(list[hdr].n, ist("transfer-encoding")))
5296 continue;
5297
Christopher Faulet86d144c2019-08-14 16:32:25 +02005298 /* Skip all pseudo-headers */
5299 if (*(list[hdr].n.ptr) == ':')
5300 continue;
5301
Willy Tarreau115e83b2018-12-01 19:17:53 +01005302 if (isteq(list[hdr].n, ist("")))
5303 break; // end
5304
5305 if (!hpack_encode_header(&outbuf, list[hdr].n, list[hdr].v)) {
5306 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005307 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005308 goto realign_again;
5309 goto full;
5310 }
5311 }
5312
Willy Tarreaucb985a42019-10-07 16:56:34 +02005313 /* update the frame's size */
5314 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5315
5316 if (outbuf.data > h2c->mfs + 9) {
5317 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5318 /* output full */
5319 if (b_space_wraps(mbuf))
5320 goto realign_again;
5321 goto full;
5322 }
5323 }
5324
Willy Tarreau3a537072021-06-17 08:40:04 +02005325 TRACE_USER("sent H2 response ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5326
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005327 /* remove all header blocks including the EOH and compute the
5328 * corresponding size.
Willy Tarreau115e83b2018-12-01 19:17:53 +01005329 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005330 ret = 0;
5331 blk = htx_get_head_blk(htx);
5332 while (blk) {
5333 type = htx_get_blk_type(blk);
5334 ret += htx_get_blksz(blk);
5335 blk = htx_remove_blk(htx, blk);
5336 /* The removed block is the EOH */
5337 if (type == HTX_BLK_EOH)
5338 break;
Christopher Faulet5be651d2021-01-22 15:28:03 +01005339 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005340
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02005341 if (!h2s->endp->cs || h2s->endp->flags & CS_EP_SHW) {
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005342 /* Response already closed: add END_STREAM */
5343 es_now = 1;
5344 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005345 else if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx) && h2s->status >= 200) {
5346 /* EOM+empty: we may need to add END_STREAM except for 1xx
Christopher Faulet991febd2020-12-02 15:17:31 +01005347 * responses and tunneled response.
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005348 */
Christopher Faulet991febd2020-12-02 15:17:31 +01005349 if (!(h2s->flags & H2_SF_BODY_TUNNEL) || h2s->status >= 300)
5350 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005351 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005352
Willy Tarreau115e83b2018-12-01 19:17:53 +01005353 if (es_now)
5354 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5355
5356 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005357 b_add(mbuf, outbuf.data);
Christopher Faulet0b465482019-02-19 15:14:23 +01005358
5359 /* indicates the HEADERS frame was sent, except for 1xx responses. For
5360 * 1xx responses, another HEADERS frame is expected.
5361 */
Christopher Faulet89899422020-12-07 18:24:43 +01005362 if (h2s->status >= 200)
Christopher Faulet0b465482019-02-19 15:14:23 +01005363 h2s->flags |= H2_SF_HEADERS_SENT;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005364
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005365 if (h2c->flags & H2_CF_SHTS_UPDATED) {
5366 /* was sent above */
5367 h2c->flags |= H2_CF_DTSU_EMITTED;
Willy Tarreauc7d85482022-02-16 14:28:14 +01005368 h2c->flags &= ~H2_CF_SHTS_UPDATED;
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005369 }
5370
Willy Tarreau115e83b2018-12-01 19:17:53 +01005371 if (es_now) {
5372 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02005373 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005374 if (h2s->st == H2_SS_OPEN)
5375 h2s->st = H2_SS_HLOC;
5376 else
5377 h2s_close(h2s);
5378 }
5379
5380 /* OK we could properly deliver the response */
Willy Tarreau115e83b2018-12-01 19:17:53 +01005381 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02005382 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005383 return ret;
5384 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005385 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5386 goto retry;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005387 h2c->flags |= H2_CF_MUX_MFULL;
5388 h2s->flags |= H2_SF_BLK_MROOM;
5389 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005390 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005391 goto end;
5392 fail:
5393 /* unparsable HTX messages, too large ones to be produced in the local
5394 * list etc go here (unrecoverable errors).
5395 */
5396 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5397 ret = 0;
5398 goto end;
5399}
5400
Willy Tarreau80739692018-10-05 11:35:57 +02005401/* Try to send a HEADERS frame matching HTX request present in HTX message
5402 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
5403 * must check the stream's status to detect any error which might have happened
5404 * subsequently to a successful send. The htx blocks are automatically removed
5405 * from the message. The htx message is assumed to be valid since produced from
5406 * the internal code, hence it contains a start line, an optional series of
5407 * header blocks and an end of header, otherwise an invalid frame could be
5408 * emitted and the resulting htx message could be left in an inconsistent state.
5409 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02005410static size_t h2s_bck_make_req_headers(struct h2s *h2s, struct htx *htx)
Willy Tarreau80739692018-10-05 11:35:57 +02005411{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02005412 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau80739692018-10-05 11:35:57 +02005413 struct h2c *h2c = h2s->h2c;
5414 struct htx_blk *blk;
Willy Tarreau80739692018-10-05 11:35:57 +02005415 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005416 struct buffer *mbuf;
Willy Tarreau80739692018-10-05 11:35:57 +02005417 struct htx_sl *sl;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005418 struct ist meth, uri, auth, host = IST_NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005419 enum htx_blk_type type;
5420 int es_now = 0;
5421 int ret = 0;
5422 int hdr;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005423 int extended_connect = 0;
Willy Tarreau80739692018-10-05 11:35:57 +02005424
Willy Tarreau7838a792019-08-12 18:42:03 +02005425 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5426
Willy Tarreau80739692018-10-05 11:35:57 +02005427 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005428 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005429 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02005430 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005431 return 0;
5432 }
5433
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005434 /* get the start line (we do have one) and the rest of the headers,
5435 * that we dump starting at header 0 */
5436 sl = NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005437 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005438 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005439 type = htx_get_blk_type(blk);
5440
5441 if (type == HTX_BLK_UNUSED)
5442 continue;
5443
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005444 if (type == HTX_BLK_EOH)
Willy Tarreau80739692018-10-05 11:35:57 +02005445 break;
5446
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005447 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005448 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005449 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5450 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5451 goto fail;
5452 }
Willy Tarreau80739692018-10-05 11:35:57 +02005453
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005454 list[hdr].n = htx_get_blk_name(htx, blk);
5455 list[hdr].v = htx_get_blk_value(htx, blk);
Christopher Faulet67d58092019-10-02 10:51:38 +02005456
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005457 /* Skip header if same name is used to add the server name */
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005458 if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name) &&
5459 isteq(list[hdr].n, h2c->proxy->server_id_hdr_name))
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005460 continue;
Christopher Faulet67d58092019-10-02 10:51:38 +02005461
Ilya Shipitsinacf84592021-02-06 22:29:08 +05005462 /* Convert connection: upgrade to Extended connect from rfc 8441 */
Christopher Faulet52a5ec22021-09-09 09:52:51 +02005463 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteqi(list[hdr].n, ist("connection"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005464 /* rfc 7230 #6.1 Connection = list of tokens */
5465 struct ist connection_ist = list[hdr].v;
5466 do {
5467 if (isteqi(iststop(connection_ist, ','),
5468 ist("upgrade"))) {
Amaury Denoyelle0df04362021-10-18 09:43:29 +02005469 if (!(h2c->flags & H2_CF_RCVD_RFC8441)) {
5470 TRACE_STATE("reject upgrade because of no RFC8441 support", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5471 goto fail;
5472 }
5473
Amaury Denoyellee0c258c2021-10-18 10:05:16 +02005474 TRACE_STATE("convert upgrade to extended connect method", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005475 h2s->flags |= (H2_SF_BODY_TUNNEL|H2_SF_EXT_CONNECT_SENT);
5476 sl->info.req.meth = HTTP_METH_CONNECT;
5477 meth = ist("CONNECT");
5478
5479 extended_connect = 1;
5480 break;
5481 }
5482
5483 connection_ist = istadv(istfind(connection_ist, ','), 1);
5484 } while (istlen(connection_ist));
5485 }
5486
Christopher Faulet52a5ec22021-09-09 09:52:51 +02005487 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteq(list[hdr].n, ist("upgrade"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005488 /* rfc 7230 #6.7 Upgrade = list of protocols
5489 * rfc 8441 #4 Extended connect = :protocol is single-valued
5490 *
5491 * only first HTTP/1 protocol is preserved
5492 */
5493 const struct ist protocol = iststop(list[hdr].v, ',');
5494 /* upgrade_protocol field is 16 bytes long in h2s */
5495 istpad(h2s->upgrade_protocol, isttrim(protocol, 15));
5496 }
5497
5498 if (isteq(list[hdr].n, ist("host")))
5499 host = list[hdr].v;
5500
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005501 hdr++;
5502 }
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005503 else if (type == HTX_BLK_REQ_SL) {
5504 BUG_ON(sl); /* Only one start-line expected */
5505 sl = htx_get_blk_ptr(htx, blk);
5506 meth = htx_sl_req_meth(sl);
5507 uri = htx_sl_req_uri(sl);
5508 if (sl->info.req.meth == HTTP_METH_HEAD)
5509 h2s->flags |= H2_SF_BODYLESS_RESP;
5510 if (unlikely(uri.len == 0)) {
5511 TRACE_ERROR("no URI in HTX request", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5512 goto fail;
5513 }
5514 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005515 else {
5516 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5517 goto fail;
5518 }
Willy Tarreau80739692018-10-05 11:35:57 +02005519 }
5520
Christopher Faulet56498132021-01-29 11:39:43 +01005521 /* The start-line me be defined */
5522 BUG_ON(!sl);
5523
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005524 /* Now add the server name to a header (if requested) */
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005525 if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name)) {
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005526 struct server *srv = objt_server(h2c->conn->target);
5527
5528 if (srv) {
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005529 list[hdr].n = h2c->proxy->server_id_hdr_name;
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005530 list[hdr].v = ist(srv->id);
5531 hdr++;
5532 }
5533 }
5534
Willy Tarreau80739692018-10-05 11:35:57 +02005535 /* marker for end of headers */
5536 list[hdr].n = ist("");
5537
Willy Tarreau9c218e72019-05-26 10:08:28 +02005538 mbuf = br_tail(h2c->mbuf);
5539 retry:
5540 if (!h2_get_buf(h2c, mbuf)) {
5541 h2c->flags |= H2_CF_MUX_MALLOC;
5542 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005543 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005544 return 0;
5545 }
5546
Willy Tarreau80739692018-10-05 11:35:57 +02005547 chunk_reset(&outbuf);
5548
5549 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005550 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5551 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005552 break;
5553 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005554 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau80739692018-10-05 11:35:57 +02005555 }
5556
5557 if (outbuf.size < 9)
5558 goto full;
5559
5560 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5561 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5562 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5563 outbuf.data = 9;
5564
5565 /* encode the method, which necessarily is the first one */
Willy Tarreaubdabc3a2018-12-10 18:25:11 +01005566 if (!hpack_encode_method(&outbuf, sl->info.req.meth, meth)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005567 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005568 goto realign_again;
5569 goto full;
5570 }
5571
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005572 auth = ist(NULL);
5573
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005574 /* RFC7540 #8.3: the CONNECT method must have :
5575 * - :authority set to the URI part (host:port)
5576 * - :method set to CONNECT
5577 * - :scheme and :path omitted
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005578 *
5579 * Note that this is not applicable in case of the Extended CONNECT
5580 * protocol from rfc 8441.
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005581 */
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005582 if (unlikely(sl->info.req.meth == HTTP_METH_CONNECT) && !extended_connect) {
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005583 auth = uri;
5584
5585 if (!hpack_encode_header(&outbuf, ist(":authority"), auth)) {
5586 /* output full */
5587 if (b_space_wraps(mbuf))
5588 goto realign_again;
5589 goto full;
5590 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005591 h2s->flags |= H2_SF_BODY_TUNNEL;
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005592 } else {
5593 /* other methods need a :scheme. If an authority is known from
5594 * the request line, it must be sent, otherwise only host is
5595 * sent. Host is never sent as the authority.
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005596 *
5597 * This code is also applicable for Extended CONNECT protocol
5598 * from rfc 8441.
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005599 */
5600 struct ist scheme = { };
Christopher Faulet3b44c542019-06-14 10:46:51 +02005601
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005602 if (uri.ptr[0] != '/' && uri.ptr[0] != '*') {
5603 /* the URI seems to start with a scheme */
5604 int len = 1;
5605
5606 while (len < uri.len && uri.ptr[len] != ':')
5607 len++;
5608
5609 if (len + 2 < uri.len && uri.ptr[len + 1] == '/' && uri.ptr[len + 2] == '/') {
5610 /* make the uri start at the authority now */
Tim Duesterhus9f75ed12021-03-02 18:57:26 +01005611 scheme = ist2(uri.ptr, len);
Tim Duesterhus154374c2021-03-02 18:57:27 +01005612 uri = istadv(uri, len + 3);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005613
5614 /* find the auth part of the URI */
Tim Duesterhus92c696e2021-02-28 16:11:36 +01005615 auth = ist2(uri.ptr, 0);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005616 while (auth.len < uri.len && auth.ptr[auth.len] != '/')
5617 auth.len++;
5618
Tim Duesterhus154374c2021-03-02 18:57:27 +01005619 uri = istadv(uri, auth.len);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005620 }
5621 }
5622
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005623 /* For Extended CONNECT, the :authority must be present.
5624 * Use host value for it.
5625 */
5626 if (unlikely(extended_connect) && isttest(host))
5627 auth = host;
5628
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005629 if (!scheme.len) {
5630 /* no explicit scheme, we're using an origin-form URI,
5631 * probably from an H1 request transcoded to H2 via an
5632 * external layer, then received as H2 without authority.
5633 * So we have to look up the scheme from the HTX flags.
5634 * In such a case only http and https are possible, and
5635 * https is the default (sent by browsers).
5636 */
5637 if ((sl->flags & (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP)) == (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP))
5638 scheme = ist("http");
5639 else
5640 scheme = ist("https");
5641 }
Christopher Faulet3b44c542019-06-14 10:46:51 +02005642
5643 if (!hpack_encode_scheme(&outbuf, scheme)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005644 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005645 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005646 goto realign_again;
5647 goto full;
5648 }
Willy Tarreau80739692018-10-05 11:35:57 +02005649
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005650 if (auth.len && !hpack_encode_header(&outbuf, ist(":authority"), auth)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005651 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005652 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005653 goto realign_again;
5654 goto full;
5655 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005656
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005657 /* encode the path. RFC7540#8.1.2.3: if path is empty it must
5658 * be sent as '/' or '*'.
5659 */
5660 if (unlikely(!uri.len)) {
5661 if (sl->info.req.meth == HTTP_METH_OPTIONS)
5662 uri = ist("*");
5663 else
5664 uri = ist("/");
Willy Tarreau053c1572019-02-01 16:13:59 +01005665 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005666
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005667 if (!hpack_encode_path(&outbuf, uri)) {
5668 /* output full */
5669 if (b_space_wraps(mbuf))
5670 goto realign_again;
5671 goto full;
5672 }
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005673
5674 /* encode the pseudo-header protocol from rfc8441 if using
5675 * Extended CONNECT method.
5676 */
5677 if (unlikely(extended_connect)) {
5678 const struct ist protocol = ist(h2s->upgrade_protocol);
5679 if (isttest(protocol)) {
5680 if (!hpack_encode_header(&outbuf,
5681 ist(":protocol"),
5682 protocol)) {
5683 /* output full */
5684 if (b_space_wraps(mbuf))
5685 goto realign_again;
5686 goto full;
5687 }
5688 }
5689 }
Willy Tarreau80739692018-10-05 11:35:57 +02005690 }
5691
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005692 /* encode all headers, stop at empty name. Host is only sent if we
5693 * do not provide an authority.
5694 */
Willy Tarreau80739692018-10-05 11:35:57 +02005695 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005696 struct ist n = list[hdr].n;
5697 struct ist v = list[hdr].v;
5698
Willy Tarreau80739692018-10-05 11:35:57 +02005699 /* these ones do not exist in H2 and must be dropped. */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005700 if (isteq(n, ist("connection")) ||
5701 (auth.len && isteq(n, ist("host"))) ||
5702 isteq(n, ist("proxy-connection")) ||
5703 isteq(n, ist("keep-alive")) ||
5704 isteq(n, ist("upgrade")) ||
5705 isteq(n, ist("transfer-encoding")))
Willy Tarreau80739692018-10-05 11:35:57 +02005706 continue;
5707
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005708 if (isteq(n, ist("te"))) {
5709 /* "te" may only be sent with "trailers" if this value
5710 * is present, otherwise it must be deleted.
5711 */
5712 v = istist(v, ist("trailers"));
Tim Duesterhus7b5777d2021-03-02 18:57:28 +01005713 if (!isttest(v) || (v.len > 8 && v.ptr[8] != ','))
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005714 continue;
5715 v = ist("trailers");
5716 }
5717
Christopher Faulet86d144c2019-08-14 16:32:25 +02005718 /* Skip all pseudo-headers */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005719 if (*(n.ptr) == ':')
Christopher Faulet86d144c2019-08-14 16:32:25 +02005720 continue;
5721
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005722 if (isteq(n, ist("")))
Willy Tarreau80739692018-10-05 11:35:57 +02005723 break; // end
5724
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005725 if (!hpack_encode_header(&outbuf, n, v)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005726 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005727 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005728 goto realign_again;
5729 goto full;
5730 }
5731 }
5732
Willy Tarreaucb985a42019-10-07 16:56:34 +02005733 /* update the frame's size */
5734 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5735
5736 if (outbuf.data > h2c->mfs + 9) {
5737 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5738 /* output full */
5739 if (b_space_wraps(mbuf))
5740 goto realign_again;
5741 goto full;
5742 }
5743 }
5744
Willy Tarreau3a537072021-06-17 08:40:04 +02005745 TRACE_USER("sent H2 request ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5746
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005747 /* remove all header blocks including the EOH and compute the
5748 * corresponding size.
Willy Tarreau80739692018-10-05 11:35:57 +02005749 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005750 ret = 0;
5751 blk = htx_get_head_blk(htx);
5752 while (blk) {
5753 type = htx_get_blk_type(blk);
5754 ret += htx_get_blksz(blk);
5755 blk = htx_remove_blk(htx, blk);
5756 /* The removed block is the EOH */
5757 if (type == HTX_BLK_EOH)
5758 break;
Christopher Fauletd0db4232021-01-22 11:46:30 +01005759 }
Willy Tarreau80739692018-10-05 11:35:57 +02005760
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02005761 if (!h2s->endp->cs || h2s->endp->flags & CS_EP_SHW) {
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005762 /* Request already closed: add END_STREAM */
Willy Tarreau80739692018-10-05 11:35:57 +02005763 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005764 }
5765 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
5766 /* EOM+empty: we may need to add END_STREAM (except for CONNECT
5767 * request)
5768 */
5769 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5770 es_now = 1;
5771 }
Willy Tarreau80739692018-10-05 11:35:57 +02005772
Willy Tarreau80739692018-10-05 11:35:57 +02005773 if (es_now)
5774 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5775
5776 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005777 b_add(mbuf, outbuf.data);
Willy Tarreau80739692018-10-05 11:35:57 +02005778 h2s->flags |= H2_SF_HEADERS_SENT;
5779 h2s->st = H2_SS_OPEN;
5780
Willy Tarreau80739692018-10-05 11:35:57 +02005781 if (es_now) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005782 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02005783 // trim any possibly pending data (eg: inconsistent content-length)
5784 h2s->flags |= H2_SF_ES_SENT;
5785 h2s->st = H2_SS_HLOC;
5786 }
5787
Willy Tarreau80739692018-10-05 11:35:57 +02005788 end:
5789 return ret;
5790 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005791 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5792 goto retry;
Willy Tarreau80739692018-10-05 11:35:57 +02005793 h2c->flags |= H2_CF_MUX_MFULL;
5794 h2s->flags |= H2_SF_BLK_MROOM;
5795 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005796 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005797 goto end;
5798 fail:
5799 /* unparsable HTX messages, too large ones to be produced in the local
5800 * list etc go here (unrecoverable errors).
5801 */
5802 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5803 ret = 0;
5804 goto end;
5805}
5806
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005807/* Try to send a DATA frame matching HTTP response present in HTX structure
Willy Tarreau98de12a2018-12-12 07:03:00 +01005808 * present in <buf>, for stream <h2s>. Returns the number of bytes sent. The
5809 * caller must check the stream's status to detect any error which might have
5810 * happened subsequently to a successful send. Returns the number of data bytes
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005811 * consumed, or zero if nothing done.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005812 */
Christopher Faulet142854b2020-12-02 15:12:40 +01005813static size_t h2s_make_data(struct h2s *h2s, struct buffer *buf, size_t count)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005814{
5815 struct h2c *h2c = h2s->h2c;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005816 struct htx *htx;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005817 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005818 struct buffer *mbuf;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005819 size_t total = 0;
5820 int es_now = 0;
5821 int bsize; /* htx block size */
5822 int fsize; /* h2 frame size */
5823 struct htx_blk *blk;
5824 enum htx_blk_type type;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01005825 int trunc_out; /* non-zero if truncated on out buf */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005826
Willy Tarreau7838a792019-08-12 18:42:03 +02005827 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5828
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005829 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005830 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005831 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02005832 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005833 goto end;
5834 }
5835
Willy Tarreau98de12a2018-12-12 07:03:00 +01005836 htx = htx_from_buf(buf);
5837
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005838 /* We only come here with HTX_BLK_DATA blocks */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005839
5840 new_frame:
Willy Tarreauee573762018-12-04 15:25:57 +01005841 if (!count || htx_is_empty(htx))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005842 goto end;
5843
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005844 if ((h2c->flags & H2_CF_IS_BACK) &&
Christopher Fauletf95f8762021-01-22 11:59:07 +01005845 (h2s->flags & (H2_SF_HEADERS_RCVD|H2_SF_BODY_TUNNEL)) == H2_SF_BODY_TUNNEL) {
5846 /* The response HEADERS frame not received yet. Thus the tunnel
5847 * is not fully established yet. In this situation, we block
5848 * data sending.
5849 */
5850 h2s->flags |= H2_SF_BLK_MBUSY;
5851 TRACE_STATE("Request DATA frame blocked waiting for tunnel establishment", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5852 goto end;
5853 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01005854 else if ((h2c->flags & H2_CF_IS_BACK) && (h2s->flags & H2_SF_TUNNEL_ABRT)) {
5855 /* a tunnel attempt was aborted but the is pending raw data to xfer to the server.
5856 * Thus the stream is closed with the CANCEL error. The error will be reported to
5857 * the upper layer as aserver abort. But at this stage there is nothing more we can
5858 * do. We just wait for the end of the response to be sure to not truncate it.
5859 */
5860 if (!(h2s->flags & H2_SF_ES_RCVD)) {
5861 TRACE_STATE("Request DATA frame blocked waiting end of aborted tunnel", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5862 h2s->flags |= H2_SF_BLK_MBUSY;
5863 }
5864 else {
5865 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
5866 h2s_error(h2s, H2_ERR_CANCEL);
5867 }
5868 goto end;
5869 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01005870
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005871 blk = htx_get_head_blk(htx);
5872 type = htx_get_blk_type(blk);
5873 bsize = htx_get_blksz(blk);
5874 fsize = bsize;
5875 trunc_out = 0;
5876 if (type != HTX_BLK_DATA)
5877 goto end;
5878
Willy Tarreau9c218e72019-05-26 10:08:28 +02005879 mbuf = br_tail(h2c->mbuf);
5880 retry:
5881 if (!h2_get_buf(h2c, mbuf)) {
5882 h2c->flags |= H2_CF_MUX_MALLOC;
5883 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005884 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005885 goto end;
5886 }
5887
Willy Tarreau98de12a2018-12-12 07:03:00 +01005888 /* Perform some optimizations to reduce the number of buffer copies.
5889 * First, if the mux's buffer is empty and the htx area contains
5890 * exactly one data block of the same size as the requested count, and
5891 * this count fits within the frame size, the stream's window size, and
5892 * the connection's window size, then it's possible to simply swap the
5893 * caller's buffer with the mux's output buffer and adjust offsets and
5894 * length to match the entire DATA HTX block in the middle. In this
5895 * case we perform a true zero-copy operation from end-to-end. This is
5896 * the situation that happens all the time with large files. Second, if
5897 * this is not possible, but the mux's output buffer is empty, we still
5898 * have an opportunity to avoid the copy to the intermediary buffer, by
5899 * making the intermediary buffer's area point to the output buffer's
5900 * area. In this case we want to skip the HTX header to make sure that
5901 * copies remain aligned and that this operation remains possible all
5902 * the time. This goes for headers, data blocks and any data extracted
5903 * from the HTX blocks.
5904 */
5905 if (unlikely(fsize == count &&
Christopher Faulet192c6a22019-06-11 16:32:24 +02005906 htx_nbblks(htx) == 1 && type == HTX_BLK_DATA &&
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005907 fsize <= h2s_mws(h2s) && fsize <= h2c->mws && fsize <= h2c->mfs)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005908 void *old_area = mbuf->area;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005909
Willy Tarreaubcc45952019-05-26 10:05:50 +02005910 if (b_data(mbuf)) {
Willy Tarreau8ab128c2019-03-21 17:47:28 +01005911 /* Too bad there are data left there. We're willing to memcpy/memmove
5912 * up to 1/4 of the buffer, which means that it's OK to copy a large
5913 * frame into a buffer containing few data if it needs to be realigned,
5914 * and that it's also OK to copy few data without realigning. Otherwise
5915 * we'll pretend the mbuf is full and wait for it to become empty.
Willy Tarreau98de12a2018-12-12 07:03:00 +01005916 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005917 if (fsize + 9 <= b_room(mbuf) &&
5918 (b_data(mbuf) <= b_size(mbuf) / 4 ||
Willy Tarreau7838a792019-08-12 18:42:03 +02005919 (fsize <= b_size(mbuf) / 4 && fsize + 9 <= b_contig_space(mbuf)))) {
5920 TRACE_STATE("small data present in output buffer, appending", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005921 goto copy;
Willy Tarreau7838a792019-08-12 18:42:03 +02005922 }
Willy Tarreau8ab128c2019-03-21 17:47:28 +01005923
Willy Tarreau9c218e72019-05-26 10:08:28 +02005924 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5925 goto retry;
5926
Willy Tarreau98de12a2018-12-12 07:03:00 +01005927 h2c->flags |= H2_CF_MUX_MFULL;
5928 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005929 TRACE_STATE("too large data present in output buffer, waiting for emptiness", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005930 goto end;
5931 }
5932
Christopher Faulet925abdf2021-04-27 22:51:07 +02005933 if (htx->flags & HTX_FL_EOM) {
5934 /* EOM+empty: we may need to add END_STREAM (except for tunneled
5935 * message)
5936 */
5937 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5938 es_now = 1;
5939 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01005940 /* map an H2 frame to the HTX block so that we can put the
5941 * frame header there.
5942 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005943 *mbuf = b_make(buf->area, buf->size, sizeof(struct htx) + blk->addr - 9, fsize + 9);
5944 outbuf.area = b_head(mbuf);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005945
5946 /* prepend an H2 DATA frame header just before the DATA block */
5947 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
5948 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
Christopher Faulet925abdf2021-04-27 22:51:07 +02005949 if (es_now)
5950 outbuf.area[4] |= H2_F_DATA_END_STREAM;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005951 h2_set_frame_size(outbuf.area, fsize);
5952
5953 /* update windows */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005954 h2s->sws -= fsize;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005955 h2c->mws -= fsize;
5956
5957 /* and exchange with our old area */
5958 buf->area = old_area;
5959 buf->data = buf->head = 0;
5960 total += fsize;
Christopher Faulet925abdf2021-04-27 22:51:07 +02005961 fsize = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005962
5963 TRACE_PROTO("sent H2 DATA frame (zero-copy)", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Christopher Faulet925abdf2021-04-27 22:51:07 +02005964 goto out;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005965 }
Willy Tarreau2fb1d4c2018-12-04 15:28:03 +01005966
Willy Tarreau98de12a2018-12-12 07:03:00 +01005967 copy:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005968 /* for DATA and EOM we'll have to emit a frame, even if empty */
5969
5970 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005971 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5972 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005973 break;
5974 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005975 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005976 }
5977
5978 if (outbuf.size < 9) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02005979 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5980 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005981 h2c->flags |= H2_CF_MUX_MFULL;
5982 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005983 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005984 goto end;
5985 }
5986
5987 /* len: 0x000000 (fill later), type: 0(DATA), flags: none=0 */
5988 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
5989 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5990 outbuf.data = 9;
5991
5992 /* we have in <fsize> the exact number of bytes we need to copy from
5993 * the HTX buffer. We need to check this against the connection's and
5994 * the stream's send windows, and to ensure that this fits in the max
5995 * frame size and in the buffer's available space minus 9 bytes (for
5996 * the frame header). The connection's flow control is applied last so
5997 * that we can use a separate list of streams which are immediately
5998 * unblocked on window opening. Note: we don't implement padding.
5999 */
6000
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006001 if (!fsize)
6002 goto send_empty;
6003
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006004 if (h2s_mws(h2s) <= 0) {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006005 h2s->flags |= H2_SF_BLK_SFCTL;
Willy Tarreau2b718102021-04-21 07:32:39 +02006006 if (LIST_INLIST(&h2s->list))
Olivier Houchardbfe2a832019-05-10 14:02:21 +02006007 LIST_DEL_INIT(&h2s->list);
Willy Tarreau2b718102021-04-21 07:32:39 +02006008 LIST_APPEND(&h2c->blocked_list, &h2s->list);
Willy Tarreau7838a792019-08-12 18:42:03 +02006009 TRACE_STATE("stream window <=0, flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006010 goto end;
6011 }
6012
Willy Tarreauee573762018-12-04 15:25:57 +01006013 if (fsize > count)
6014 fsize = count;
6015
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006016 if (fsize > h2s_mws(h2s))
6017 fsize = h2s_mws(h2s); // >0
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006018
6019 if (h2c->mfs && fsize > h2c->mfs)
6020 fsize = h2c->mfs; // >0
6021
6022 if (fsize + 9 > outbuf.size) {
Willy Tarreau455d5682019-05-24 19:42:18 +02006023 /* It doesn't fit at once. If it at least fits once split and
6024 * the amount of data to move is low, let's defragment the
6025 * buffer now.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006026 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006027 if (b_space_wraps(mbuf) &&
6028 (fsize + 9 <= b_room(mbuf)) &&
6029 b_data(mbuf) <= MAX_DATA_REALIGN)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006030 goto realign_again;
6031 fsize = outbuf.size - 9;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01006032 trunc_out = 1;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006033
6034 if (fsize <= 0) {
6035 /* no need to send an empty frame here */
Willy Tarreau9c218e72019-05-26 10:08:28 +02006036 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6037 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006038 h2c->flags |= H2_CF_MUX_MFULL;
6039 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006040 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006041 goto end;
6042 }
6043 }
6044
6045 if (h2c->mws <= 0) {
6046 h2s->flags |= H2_SF_BLK_MFCTL;
Willy Tarreau7838a792019-08-12 18:42:03 +02006047 TRACE_STATE("connection window <=0, stream flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2C_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006048 goto end;
6049 }
6050
6051 if (fsize > h2c->mws)
6052 fsize = h2c->mws;
6053
6054 /* now let's copy this this into the output buffer */
6055 memcpy(outbuf.area + 9, htx_get_blk_ptr(htx, blk), fsize);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006056 h2s->sws -= fsize;
Willy Tarreau0f799ca2018-12-04 15:20:11 +01006057 h2c->mws -= fsize;
Willy Tarreauee573762018-12-04 15:25:57 +01006058 count -= fsize;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006059
6060 send_empty:
6061 /* update the frame's size */
6062 h2_set_frame_size(outbuf.area, fsize);
6063
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006064 /* consume incoming HTX block */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006065 total += fsize;
6066 if (fsize == bsize) {
6067 htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006068 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
6069 /* EOM+empty: we may need to add END_STREAM (except for tunneled
6070 * message)
6071 */
6072 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
6073 es_now = 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02006074 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006075 }
6076 else {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006077 /* we've truncated this block */
6078 htx_cut_data_blk(htx, blk, fsize);
6079 }
6080
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006081 if (es_now)
6082 outbuf.area[4] |= H2_F_DATA_END_STREAM;
6083
6084 /* commit the H2 response */
6085 b_add(mbuf, fsize + 9);
6086
Christopher Faulet925abdf2021-04-27 22:51:07 +02006087 out:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006088 if (es_now) {
6089 if (h2s->st == H2_SS_OPEN)
6090 h2s->st = H2_SS_HLOC;
6091 else
6092 h2s_close(h2s);
6093
6094 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02006095 TRACE_PROTO("ES flag set on outgoing frame", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006096 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006097 else if (fsize) {
6098 if (fsize == bsize) {
6099 TRACE_DEVEL("more data may be available, trying to send another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6100 goto new_frame;
6101 }
6102 else if (trunc_out) {
6103 /* we've truncated this block */
6104 goto new_frame;
6105 }
6106 }
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006107
6108 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02006109 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006110 return total;
6111}
6112
Christopher Faulet991febd2020-12-02 15:17:31 +01006113/* Skip the message payload (DATA blocks) and emit an empty DATA frame with the
6114 * ES flag set for stream <h2s>. This function is called for response known to
6115 * have no payload. Only DATA blocks are skipped. This means the trailers are
Ilya Shipitsinacf84592021-02-06 22:29:08 +05006116 * still emitted. The caller must check the stream's status to detect any error
Christopher Faulet991febd2020-12-02 15:17:31 +01006117 * which might have happened subsequently to a successful send. Returns the
6118 * number of data bytes consumed, or zero if nothing done.
6119 */
6120static size_t h2s_skip_data(struct h2s *h2s, struct buffer *buf, size_t count)
6121{
6122 struct h2c *h2c = h2s->h2c;
6123 struct htx *htx;
6124 int bsize; /* htx block size */
6125 int fsize; /* h2 frame size */
6126 struct htx_blk *blk;
6127 enum htx_blk_type type;
6128 size_t total = 0;
6129
6130 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6131
6132 if (h2c_mux_busy(h2c, h2s)) {
6133 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6134 h2s->flags |= H2_SF_BLK_MBUSY;
6135 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6136 goto end;
6137 }
6138
6139 htx = htx_from_buf(buf);
6140
6141 next_data:
6142 if (!count || htx_is_empty(htx))
6143 goto end;
6144 blk = htx_get_head_blk(htx);
6145 type = htx_get_blk_type(blk);
6146 bsize = htx_get_blksz(blk);
6147 fsize = bsize;
6148 if (type != HTX_BLK_DATA)
6149 goto end;
6150
6151 if (fsize > count)
6152 fsize = count;
6153
6154 if (fsize != bsize)
6155 goto skip_data;
6156
6157 if (!(htx->flags & HTX_FL_EOM) || !htx_is_unique_blk(htx, blk))
6158 goto skip_data;
6159
6160 /* Here, it is the last block and it is also the end of the message. So
6161 * we can emit an empty DATA frame with the ES flag set
6162 */
6163 if (h2_send_empty_data_es(h2s) <= 0)
6164 goto end;
6165
6166 if (h2s->st == H2_SS_OPEN)
6167 h2s->st = H2_SS_HLOC;
6168 else
6169 h2s_close(h2s);
6170
6171 skip_data:
6172 /* consume incoming HTX block */
6173 total += fsize;
6174 if (fsize == bsize) {
6175 TRACE_DEVEL("more data may be available, trying to skip another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6176 htx_remove_blk(htx, blk);
6177 goto next_data;
6178 }
6179 else {
6180 /* we've truncated this block */
6181 htx_cut_data_blk(htx, blk, fsize);
6182 }
6183
6184 end:
6185 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6186 return total;
6187}
6188
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006189/* Try to send a HEADERS frame matching HTX_BLK_TLR series of blocks present in
6190 * HTX message <htx> for the H2 stream <h2s>. Returns the number of bytes
6191 * processed. The caller must check the stream's status to detect any error
6192 * which might have happened subsequently to a successful send. The htx blocks
6193 * are automatically removed from the message. The htx message is assumed to be
6194 * valid since produced from the internal code. Processing stops when meeting
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006195 * the EOT, which *is* removed. All trailers are processed at once and sent as a
6196 * single frame. The ES flag is always set.
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006197 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006198static size_t h2s_make_trailers(struct h2s *h2s, struct htx *htx)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006199{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02006200 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006201 struct h2c *h2c = h2s->h2c;
6202 struct htx_blk *blk;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006203 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02006204 struct buffer *mbuf;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006205 enum htx_blk_type type;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006206 int ret = 0;
6207 int hdr;
6208 int idx;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006209
Willy Tarreau7838a792019-08-12 18:42:03 +02006210 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
6211
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006212 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006213 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006214 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02006215 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006216 goto end;
6217 }
6218
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006219 /* get trailers. */
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006220 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006221 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006222 type = htx_get_blk_type(blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006223
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006224 if (type == HTX_BLK_UNUSED)
6225 continue;
6226
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006227 if (type == HTX_BLK_EOT)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006228 break;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006229 if (type == HTX_BLK_TLR) {
6230 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
6231 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
6232 goto fail;
6233 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006234
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006235 list[hdr].n = htx_get_blk_name(htx, blk);
6236 list[hdr].v = htx_get_blk_value(htx, blk);
6237 hdr++;
6238 }
6239 else {
6240 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006241 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02006242 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006243 }
6244
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006245 /* marker for end of trailers */
6246 list[hdr].n = ist("");
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006247
Willy Tarreau9c218e72019-05-26 10:08:28 +02006248 mbuf = br_tail(h2c->mbuf);
6249 retry:
6250 if (!h2_get_buf(h2c, mbuf)) {
6251 h2c->flags |= H2_CF_MUX_MALLOC;
6252 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006253 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02006254 goto end;
6255 }
6256
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006257 chunk_reset(&outbuf);
6258
6259 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02006260 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
6261 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006262 break;
6263 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02006264 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006265 }
6266
6267 if (outbuf.size < 9)
6268 goto full;
6269
6270 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4,ES=1 */
6271 memcpy(outbuf.area, "\x00\x00\x00\x01\x05", 5);
6272 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
6273 outbuf.data = 9;
6274
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006275 /* encode all headers */
6276 for (idx = 0; idx < hdr; idx++) {
6277 /* these ones do not exist in H2 or must not appear in
6278 * trailers and must be dropped.
6279 */
6280 if (isteq(list[idx].n, ist("host")) ||
6281 isteq(list[idx].n, ist("content-length")) ||
6282 isteq(list[idx].n, ist("connection")) ||
6283 isteq(list[idx].n, ist("proxy-connection")) ||
6284 isteq(list[idx].n, ist("keep-alive")) ||
6285 isteq(list[idx].n, ist("upgrade")) ||
6286 isteq(list[idx].n, ist("te")) ||
6287 isteq(list[idx].n, ist("transfer-encoding")))
6288 continue;
6289
Christopher Faulet86d144c2019-08-14 16:32:25 +02006290 /* Skip all pseudo-headers */
6291 if (*(list[idx].n.ptr) == ':')
6292 continue;
6293
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006294 if (!hpack_encode_header(&outbuf, list[idx].n, list[idx].v)) {
6295 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006296 if (b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006297 goto realign_again;
6298 goto full;
6299 }
6300 }
6301
Willy Tarreau5121e5d2019-05-06 15:13:41 +02006302 if (outbuf.data == 9) {
6303 /* here we have a problem, we have nothing to emit (either we
6304 * received an empty trailers block followed or we removed its
6305 * contents above). Because of this we can't send a HEADERS
6306 * frame, so we have to cheat and instead send an empty DATA
6307 * frame conveying the ES flag.
Willy Tarreau67b8cae2019-02-21 18:16:35 +01006308 */
6309 outbuf.area[3] = H2_FT_DATA;
6310 outbuf.area[4] = H2_F_DATA_END_STREAM;
6311 }
6312
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006313 /* update the frame's size */
6314 h2_set_frame_size(outbuf.area, outbuf.data - 9);
6315
Willy Tarreau572d9f52019-10-11 16:58:37 +02006316 if (outbuf.data > h2c->mfs + 9) {
6317 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
6318 /* output full */
6319 if (b_space_wraps(mbuf))
6320 goto realign_again;
6321 goto full;
6322 }
6323 }
6324
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006325 /* commit the H2 response */
Willy Tarreau7838a792019-08-12 18:42:03 +02006326 TRACE_PROTO("sent H2 trailers HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006327 b_add(mbuf, outbuf.data);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006328 h2s->flags |= H2_SF_ES_SENT;
6329
6330 if (h2s->st == H2_SS_OPEN)
6331 h2s->st = H2_SS_HLOC;
6332 else
6333 h2s_close(h2s);
6334
6335 /* OK we could properly deliver the response */
6336 done:
Willy Tarreaufb07b3f2019-05-06 11:23:29 +02006337 /* remove all header blocks till the end and compute the corresponding size. */
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006338 ret = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006339 blk = htx_get_head_blk(htx);
6340 while (blk) {
6341 type = htx_get_blk_type(blk);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006342 ret += htx_get_blksz(blk);
6343 blk = htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006344 /* The removed block is the EOT */
6345 if (type == HTX_BLK_EOT)
6346 break;
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006347 }
6348
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006349 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02006350 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006351 return ret;
6352 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02006353 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6354 goto retry;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006355 h2c->flags |= H2_CF_MUX_MFULL;
6356 h2s->flags |= H2_SF_BLK_MROOM;
6357 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006358 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006359 goto end;
6360 fail:
6361 /* unparsable HTX messages, too large ones to be produced in the local
6362 * list etc go here (unrecoverable errors).
6363 */
6364 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
6365 ret = 0;
6366 goto end;
6367}
6368
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006369/* Called from the upper layer, to subscribe <es> to events <event_type>. The
6370 * event subscriber <es> is not allowed to change from a previous call as long
6371 * as at least one event is still subscribed. The <event_type> must only be a
6372 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006373 */
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006374static int h2_subscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
Olivier Houchard6ff20392018-07-17 18:46:31 +02006375{
Christopher Fauletdb90f2a2022-03-22 16:06:25 +01006376 struct h2s *h2s = __cs_mux(cs);
Olivier Houchard4cf7fb12018-08-02 19:23:05 +02006377 struct h2c *h2c = h2s->h2c;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006378
Willy Tarreau7838a792019-08-12 18:42:03 +02006379 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006380
6381 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006382 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006383
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006384 es->events |= event_type;
6385 h2s->subs = es;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006386
6387 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006388 TRACE_DEVEL("subscribe(recv)", H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006389
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006390 if (event_type & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006391 TRACE_DEVEL("subscribe(send)", H2_EV_STRM_SEND, h2c->conn, h2s);
Olivier Houchardf8338152019-05-14 17:50:32 +02006392 if (!(h2s->flags & H2_SF_BLK_SFCTL) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02006393 !LIST_INLIST(&h2s->list)) {
Olivier Houchardf8338152019-05-14 17:50:32 +02006394 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02006395 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Olivier Houchardf8338152019-05-14 17:50:32 +02006396 else
Willy Tarreau2b718102021-04-21 07:32:39 +02006397 LIST_APPEND(&h2c->send_list, &h2s->list);
Olivier Houcharde1c6dbc2018-08-01 17:06:43 +02006398 }
Olivier Houchard6ff20392018-07-17 18:46:31 +02006399 }
Willy Tarreau7838a792019-08-12 18:42:03 +02006400 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006401 return 0;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006402}
6403
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006404/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
6405 * The <es> pointer is not allowed to differ from the one passed to the
6406 * subscribe() call. It always returns zero.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006407 */
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006408static int h2_unsubscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006409{
Christopher Fauletdb90f2a2022-03-22 16:06:25 +01006410 struct h2s *h2s = __cs_mux(cs);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006411
Willy Tarreau7838a792019-08-12 18:42:03 +02006412 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006413
6414 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006415 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006416
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006417 es->events &= ~event_type;
6418 if (!es->events)
Willy Tarreauf96508a2020-01-10 11:12:48 +01006419 h2s->subs = NULL;
6420
6421 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006422 TRACE_DEVEL("unsubscribe(recv)", H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006423
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006424 if (event_type & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006425 TRACE_DEVEL("subscribe(send)", H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006426 h2s->flags &= ~H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006427 if (!(h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)))
6428 LIST_DEL_INIT(&h2s->list);
Olivier Houchardd846c262018-10-19 17:24:29 +02006429 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01006430
Willy Tarreau7838a792019-08-12 18:42:03 +02006431 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006432 return 0;
6433}
6434
6435
Christopher Faulet564e39c2021-09-21 15:50:55 +02006436/* Called from the upper layer, to receive data
6437 *
6438 * The caller is responsible for defragmenting <buf> if necessary. But <flags>
6439 * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
6440 * means the caller wants to flush input data (from the mux buffer and the
6441 * channel buffer) to be able to use kernel splicing or any kind of mux-to-mux
6442 * xfer. If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
6443 * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
6444 * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
6445 * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
6446 * copy as much data as possible.
6447 */
Olivier Houchard511efea2018-08-16 15:30:32 +02006448static size_t h2_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
6449{
Christopher Fauletdb90f2a2022-03-22 16:06:25 +01006450 struct h2s *h2s = __cs_mux(cs);
Willy Tarreau082f5592018-11-25 08:03:32 +01006451 struct h2c *h2c = h2s->h2c;
Willy Tarreau86724e22018-12-01 23:19:43 +01006452 struct htx *h2s_htx = NULL;
6453 struct htx *buf_htx = NULL;
Olivier Houchard511efea2018-08-16 15:30:32 +02006454 size_t ret = 0;
6455
Willy Tarreau7838a792019-08-12 18:42:03 +02006456 TRACE_ENTER(H2_EV_STRM_RECV, h2c->conn, h2s);
6457
Olivier Houchard511efea2018-08-16 15:30:32 +02006458 /* transfer possibly pending data to the upper layer */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006459 h2s_htx = htx_from_buf(&h2s->rxbuf);
Christopher Fauletec361bb2022-02-21 15:12:54 +01006460 if (htx_is_empty(h2s_htx) && !(h2s_htx->flags & HTX_FL_PARSING_ERROR)) {
Christopher Faulet9b79a102019-07-15 11:22:56 +02006461 /* Here htx_to_buf() will set buffer data to 0 because
6462 * the HTX is empty.
6463 */
6464 htx_to_buf(h2s_htx, &h2s->rxbuf);
6465 goto end;
6466 }
Willy Tarreau7196dd62019-03-05 10:51:11 +01006467
Christopher Faulet9b79a102019-07-15 11:22:56 +02006468 ret = h2s_htx->data;
6469 buf_htx = htx_from_buf(buf);
Willy Tarreau7196dd62019-03-05 10:51:11 +01006470
Christopher Faulet9b79a102019-07-15 11:22:56 +02006471 /* <buf> is empty and the message is small enough, swap the
6472 * buffers. */
6473 if (htx_is_empty(buf_htx) && htx_used_space(h2s_htx) <= count) {
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01006474 htx_to_buf(buf_htx, buf);
6475 htx_to_buf(h2s_htx, &h2s->rxbuf);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006476 b_xfer(buf, &h2s->rxbuf, b_data(&h2s->rxbuf));
6477 goto end;
Willy Tarreau86724e22018-12-01 23:19:43 +01006478 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02006479
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006480 htx_xfer_blks(buf_htx, h2s_htx, count, HTX_BLK_UNUSED);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006481
6482 if (h2s_htx->flags & HTX_FL_PARSING_ERROR) {
6483 buf_htx->flags |= HTX_FL_PARSING_ERROR;
6484 if (htx_is_empty(buf_htx))
Willy Tarreauaff21f92022-05-10 10:31:08 +02006485 h2s->endp->flags |= CS_EP_EOI;
Willy Tarreau86724e22018-12-01 23:19:43 +01006486 }
Christopher Faulet810df062020-07-22 16:20:34 +02006487 else if (htx_is_empty(h2s_htx))
Christopher Faulet42432f32020-11-20 17:43:16 +01006488 buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM);
Olivier Houchard511efea2018-08-16 15:30:32 +02006489
Christopher Faulet9b79a102019-07-15 11:22:56 +02006490 buf_htx->extra = (h2s_htx->extra ? (h2s_htx->data + h2s_htx->extra) : 0);
6491 htx_to_buf(buf_htx, buf);
6492 htx_to_buf(h2s_htx, &h2s->rxbuf);
6493 ret -= h2s_htx->data;
6494
Christopher Faulet37070b22019-02-14 15:12:14 +01006495 end:
Olivier Houchard638b7992018-08-16 15:41:52 +02006496 if (b_data(&h2s->rxbuf))
Willy Tarreauaff21f92022-05-10 10:31:08 +02006497 h2s->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
Olivier Houchard511efea2018-08-16 15:30:32 +02006498 else {
Willy Tarreauaff21f92022-05-10 10:31:08 +02006499 h2s->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
Christopher Fauletd0db4232021-01-22 11:46:30 +01006500 if (h2s->flags & H2_SF_ES_RCVD) {
Willy Tarreauaff21f92022-05-10 10:31:08 +02006501 h2s->endp->flags |= CS_EP_EOI;
Christopher Fauletd0db4232021-01-22 11:46:30 +01006502 /* Add EOS flag for tunnel */
6503 if (h2s->flags & H2_SF_BODY_TUNNEL)
Willy Tarreauaff21f92022-05-10 10:31:08 +02006504 h2s->endp->flags |= CS_EP_EOS;
Christopher Fauletd0db4232021-01-22 11:46:30 +01006505 }
Christopher Fauletaade4ed2020-10-08 15:38:41 +02006506 if (h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED)
Willy Tarreauaff21f92022-05-10 10:31:08 +02006507 h2s->endp->flags |= CS_EP_EOS;
6508 if (h2s->endp->flags & CS_EP_ERR_PENDING)
6509 h2s->endp->flags |= CS_EP_ERROR;
Olivier Houchard638b7992018-08-16 15:41:52 +02006510 if (b_size(&h2s->rxbuf)) {
6511 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01006512 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02006513 }
Olivier Houchard511efea2018-08-16 15:30:32 +02006514 }
6515
Willy Tarreau082f5592018-11-25 08:03:32 +01006516 if (ret && h2c->dsi == h2s->id) {
6517 /* demux is blocking on this stream's buffer */
6518 h2c->flags &= ~H2_CF_DEM_SFULL;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02006519 h2c_restart_reading(h2c, 1);
Willy Tarreau082f5592018-11-25 08:03:32 +01006520 }
Christopher Faulet37070b22019-02-14 15:12:14 +01006521
Willy Tarreau7838a792019-08-12 18:42:03 +02006522 TRACE_LEAVE(H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard511efea2018-08-16 15:30:32 +02006523 return ret;
6524}
6525
Olivier Houchardd846c262018-10-19 17:24:29 +02006526
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006527/* Called from the upper layer, to send data from buffer <buf> for no more than
6528 * <count> bytes. Returns the number of bytes effectively sent. Some status
6529 * flags may be updated on the conn_stream.
6530 */
Christopher Fauletd44a9b32018-07-27 11:59:41 +02006531static size_t h2_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
Willy Tarreau62f52692017-10-08 23:01:42 +02006532{
Christopher Fauletdb90f2a2022-03-22 16:06:25 +01006533 struct h2s *h2s = __cs_mux(cs);
Willy Tarreau1dc41e72018-06-14 13:21:28 +02006534 size_t total = 0;
Willy Tarreau5dd17352018-06-14 13:33:30 +02006535 size_t ret;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006536 struct htx *htx;
6537 struct htx_blk *blk;
6538 enum htx_blk_type btype;
6539 uint32_t bsize;
6540 int32_t idx;
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006541
Willy Tarreau7838a792019-08-12 18:42:03 +02006542 TRACE_ENTER(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
6543
Olivier Houchardd360ac62019-03-22 17:37:16 +01006544 /* If we were not just woken because we wanted to send but couldn't,
6545 * and there's somebody else that is waiting to send, do nothing,
6546 * we will subscribe later and be put at the end of the list
6547 */
Willy Tarreaud9464162020-01-10 18:25:07 +01006548 if (!(h2s->flags & H2_SF_NOTIFIED) &&
Willy Tarreau7838a792019-08-12 18:42:03 +02006549 (!LIST_ISEMPTY(&h2s->h2c->send_list) || !LIST_ISEMPTY(&h2s->h2c->fctl_list))) {
6550 TRACE_DEVEL("other streams already waiting, going to the queue and leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
Olivier Houchardd360ac62019-03-22 17:37:16 +01006551 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006552 }
Willy Tarreaud9464162020-01-10 18:25:07 +01006553 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02006554
Willy Tarreau7838a792019-08-12 18:42:03 +02006555 if (h2s->h2c->st0 < H2_CS_FRAME_H) {
6556 TRACE_DEVEL("connection not ready, leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006557 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006558 }
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006559
Willy Tarreaucab22952019-10-31 15:48:18 +01006560 if (h2s->h2c->st0 >= H2_CS_ERROR) {
Willy Tarreauaff21f92022-05-10 10:31:08 +02006561 h2s->endp->flags |= CS_EP_ERROR;
Willy Tarreaucab22952019-10-31 15:48:18 +01006562 TRACE_DEVEL("connection is in error, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
6563 return 0;
6564 }
6565
Christopher Faulet9b79a102019-07-15 11:22:56 +02006566 htx = htx_from_buf(buf);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006567
Willy Tarreau0bad0432018-06-14 16:54:01 +02006568 if (!(h2s->flags & H2_SF_OUTGOING_DATA) && count)
Willy Tarreauc4312d32017-11-07 12:01:53 +01006569 h2s->flags |= H2_SF_OUTGOING_DATA;
6570
Willy Tarreau751f2d02018-10-05 09:35:00 +02006571 if (h2s->id == 0) {
6572 int32_t id = h2c_get_next_sid(h2s->h2c);
6573
6574 if (id < 0) {
Willy Tarreauaff21f92022-05-10 10:31:08 +02006575 h2s->endp->flags |= CS_EP_ERROR;
Willy Tarreau7838a792019-08-12 18:42:03 +02006576 TRACE_DEVEL("couldn't get a stream ID, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02006577 return 0;
6578 }
6579
6580 eb32_delete(&h2s->by_id);
6581 h2s->by_id.key = h2s->id = id;
6582 h2s->h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01006583 h2s->h2c->nb_reserved--;
Willy Tarreau751f2d02018-10-05 09:35:00 +02006584 eb32_insert(&h2s->h2c->streams_by_id, &h2s->by_id);
6585 }
6586
Christopher Faulet9b79a102019-07-15 11:22:56 +02006587 while (h2s->st < H2_SS_HLOC && !(h2s->flags & H2_SF_BLK_ANY) &&
6588 count && !htx_is_empty(htx)) {
6589 idx = htx_get_head(htx);
6590 blk = htx_get_blk(htx, idx);
6591 btype = htx_get_blk_type(blk);
6592 bsize = htx_get_blksz(blk);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006593
Christopher Faulet9b79a102019-07-15 11:22:56 +02006594 switch (btype) {
Willy Tarreau80739692018-10-05 11:35:57 +02006595 case HTX_BLK_REQ_SL:
6596 /* start-line before headers */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006597 ret = h2s_bck_make_req_headers(h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02006598 if (ret > 0) {
6599 total += ret;
6600 count -= ret;
6601 if (ret < bsize)
6602 goto done;
6603 }
6604 break;
6605
Willy Tarreau115e83b2018-12-01 19:17:53 +01006606 case HTX_BLK_RES_SL:
6607 /* start-line before headers */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006608 ret = h2s_frt_make_resp_headers(h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01006609 if (ret > 0) {
6610 total += ret;
6611 count -= ret;
6612 if (ret < bsize)
6613 goto done;
6614 }
6615 break;
6616
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006617 case HTX_BLK_DATA:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006618 /* all these cause the emission of a DATA frame (possibly empty) */
Christopher Faulet991febd2020-12-02 15:17:31 +01006619 if (!(h2s->h2c->flags & H2_CF_IS_BACK) &&
6620 (h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BODYLESS_RESP)) == H2_SF_BODYLESS_RESP)
6621 ret = h2s_skip_data(h2s, buf, count);
6622 else
6623 ret = h2s_make_data(h2s, buf, count);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006624 if (ret > 0) {
Willy Tarreau98de12a2018-12-12 07:03:00 +01006625 htx = htx_from_buf(buf);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006626 total += ret;
6627 count -= ret;
6628 if (ret < bsize)
6629 goto done;
6630 }
6631 break;
6632
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006633 case HTX_BLK_TLR:
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006634 case HTX_BLK_EOT:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006635 /* This is the first trailers block, all the subsequent ones */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006636 ret = h2s_make_trailers(h2s, htx);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006637 if (ret > 0) {
6638 total += ret;
6639 count -= ret;
6640 if (ret < bsize)
6641 goto done;
6642 }
6643 break;
6644
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006645 default:
6646 htx_remove_blk(htx, blk);
6647 total += bsize;
6648 count -= bsize;
6649 break;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006650 }
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006651 }
6652
Christopher Faulet9b79a102019-07-15 11:22:56 +02006653 done:
Willy Tarreau2b778482019-05-06 15:00:22 +02006654 if (h2s->st >= H2_SS_HLOC) {
Willy Tarreau00610962018-07-19 10:58:28 +02006655 /* trim any possibly pending data after we close (extra CR-LF,
6656 * unprocessed trailers, abnormal extra data, ...)
6657 */
Willy Tarreau0bad0432018-06-14 16:54:01 +02006658 total += count;
6659 count = 0;
Willy Tarreau00610962018-07-19 10:58:28 +02006660 }
6661
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006662 /* RST are sent similarly to frame acks */
Willy Tarreau02492192017-12-07 15:59:29 +01006663 if (h2s->st == H2_SS_ERROR || h2s->flags & H2_SF_RST_RCVD) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006664 TRACE_DEVEL("reporting RST/error to the app-layer stream", H2_EV_H2S_SEND|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreauaff21f92022-05-10 10:31:08 +02006665 cs_ep_set_error(h2s->endp);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01006666 if (h2s_send_rst_stream(h2s->h2c, h2s) > 0)
Willy Tarreau00dd0782018-03-01 16:31:34 +01006667 h2s_close(h2s);
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006668 }
6669
Christopher Faulet9b79a102019-07-15 11:22:56 +02006670 htx_to_buf(htx, buf);
Olivier Houchardd846c262018-10-19 17:24:29 +02006671
Olivier Houchard7505f942018-08-21 18:10:44 +02006672 if (total > 0) {
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006673 if (!(h2s->h2c->wait_event.events & SUB_RETRY_SEND)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006674 TRACE_DEVEL("data queued, waking up h2c sender", H2_EV_H2S_SEND|H2_EV_H2C_SEND, h2s->h2c->conn, h2s);
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02006675 tasklet_wakeup(h2s->h2c->wait_event.tasklet);
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006676 }
Olivier Houchardd846c262018-10-19 17:24:29 +02006677
Olivier Houchard7505f942018-08-21 18:10:44 +02006678 }
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006679 /* If we're waiting for flow control, and we got a shutr on the
6680 * connection, we will never be unlocked, so add an error on
6681 * the conn_stream.
6682 */
6683 if (conn_xprt_read0_pending(h2s->h2c->conn) &&
6684 !b_data(&h2s->h2c->dbuf) &&
6685 (h2s->flags & (H2_SF_BLK_SFCTL | H2_SF_BLK_MFCTL))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006686 TRACE_DEVEL("fctl with shutr, reporting error to app-layer", H2_EV_H2S_SEND|H2_EV_STRM_SEND|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreauaff21f92022-05-10 10:31:08 +02006687 if (h2s->endp->flags & CS_EP_EOS)
6688 h2s->endp->flags |= CS_EP_ERROR;
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006689 else
Willy Tarreauaff21f92022-05-10 10:31:08 +02006690 h2s->endp->flags |= CS_EP_ERR_PENDING;
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006691 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006692
Willy Tarreau5723f292020-01-10 15:16:57 +01006693 if (total > 0 && !(h2s->flags & H2_SF_BLK_SFCTL) &&
6694 !(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006695 /* Ok we managed to send something, leave the send_list if we were still there */
Olivier Houchardd360ac62019-03-22 17:37:16 +01006696 LIST_DEL_INIT(&h2s->list);
6697 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006698
Willy Tarreau7838a792019-08-12 18:42:03 +02006699 TRACE_LEAVE(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006700 return total;
Willy Tarreau62f52692017-10-08 23:01:42 +02006701}
6702
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006703/* for debugging with CLI's "show fd" command */
Willy Tarreau8050efe2021-01-21 08:26:06 +01006704static int h2_show_fd(struct buffer *msg, struct connection *conn)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006705{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01006706 struct h2c *h2c = conn->ctx;
Willy Tarreau987c0632018-12-18 10:32:05 +01006707 struct h2s *h2s = NULL;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006708 struct eb32_node *node;
6709 int fctl_cnt = 0;
6710 int send_cnt = 0;
6711 int tree_cnt = 0;
6712 int orph_cnt = 0;
Willy Tarreau60f62682019-05-26 11:32:27 +02006713 struct buffer *hmbuf, *tmbuf;
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006714 int ret = 0;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006715
6716 if (!h2c)
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006717 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006718
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006719 list_for_each_entry(h2s, &h2c->fctl_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006720 fctl_cnt++;
6721
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006722 list_for_each_entry(h2s, &h2c->send_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006723 send_cnt++;
6724
Willy Tarreau3af37712018-12-18 14:34:41 +01006725 h2s = NULL;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006726 node = eb32_first(&h2c->streams_by_id);
6727 while (node) {
6728 h2s = container_of(node, struct h2s, by_id);
6729 tree_cnt++;
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02006730 if (!h2s->endp->cs)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006731 orph_cnt++;
6732 node = eb32_next(node);
6733 }
6734
Willy Tarreau60f62682019-05-26 11:32:27 +02006735 hmbuf = br_head(h2c->mbuf);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006736 tmbuf = br_tail(h2c->mbuf);
Willy Tarreauab2ec452019-08-30 07:07:08 +02006737 chunk_appendf(msg, " h2c.st0=%s .err=%d .maxid=%d .lastid=%d .flg=0x%04x"
Willy Tarreau987c0632018-12-18 10:32:05 +01006738 " .nbst=%u .nbcs=%u .fctl_cnt=%d .send_cnt=%d .tree_cnt=%d"
Willy Tarreau60f62682019-05-26 11:32:27 +02006739 " .orph_cnt=%d .sub=%d .dsi=%d .dbuf=%u@%p+%u/%u .msi=%d"
6740 " .mbuf=[%u..%u|%u],h=[%u@%p+%u/%u],t=[%u@%p+%u/%u]",
Willy Tarreauab2ec452019-08-30 07:07:08 +02006741 h2c_st_to_str(h2c->st0), h2c->errcode, h2c->max_id, h2c->last_sid, h2c->flags,
Willy Tarreau616ac812018-07-24 14:12:42 +02006742 h2c->nb_streams, h2c->nb_cs, fctl_cnt, send_cnt, tree_cnt, orph_cnt,
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006743 h2c->wait_event.events, h2c->dsi,
Willy Tarreau987c0632018-12-18 10:32:05 +01006744 (unsigned int)b_data(&h2c->dbuf), b_orig(&h2c->dbuf),
6745 (unsigned int)b_head_ofs(&h2c->dbuf), (unsigned int)b_size(&h2c->dbuf),
6746 h2c->msi,
Willy Tarreau60f62682019-05-26 11:32:27 +02006747 br_head_idx(h2c->mbuf), br_tail_idx(h2c->mbuf), br_size(h2c->mbuf),
6748 (unsigned int)b_data(hmbuf), b_orig(hmbuf),
6749 (unsigned int)b_head_ofs(hmbuf), (unsigned int)b_size(hmbuf),
Willy Tarreaubcc45952019-05-26 10:05:50 +02006750 (unsigned int)b_data(tmbuf), b_orig(tmbuf),
6751 (unsigned int)b_head_ofs(tmbuf), (unsigned int)b_size(tmbuf));
Willy Tarreau987c0632018-12-18 10:32:05 +01006752
6753 if (h2s) {
Willy Tarreaued4464e2021-01-20 15:50:03 +01006754 chunk_appendf(msg, " last_h2s=%p .id=%d .st=%s .flg=0x%04x .rxbuf=%u@%p+%u/%u .cs=%p",
Willy Tarreauab2ec452019-08-30 07:07:08 +02006755 h2s, h2s->id, h2s_st_to_str(h2s->st), h2s->flags,
Willy Tarreau987c0632018-12-18 10:32:05 +01006756 (unsigned int)b_data(&h2s->rxbuf), b_orig(&h2s->rxbuf),
6757 (unsigned int)b_head_ofs(&h2s->rxbuf), (unsigned int)b_size(&h2s->rxbuf),
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02006758 h2s->endp->cs);
6759 if (h2s->endp->cs)
Christopher Fauletf835dea2021-12-21 14:35:17 +01006760 chunk_appendf(msg, "(.flg=0x%08x .app=%p)",
Willy Tarreaucd6bb1a2022-05-10 15:00:03 +02006761 h2s->endp->cs->flags, h2s->endp->cs->app);
Willy Tarreau98e40b92021-01-20 16:27:01 +01006762
Christopher Faulet22050e02022-04-13 12:08:09 +02006763 chunk_appendf(msg, "endp=%p", h2s->endp);
6764 if (h2s->endp)
6765 chunk_appendf(msg, "(.flg=0x%08x)",
6766 h2s->endp->flags);
6767
Willy Tarreau98e40b92021-01-20 16:27:01 +01006768 chunk_appendf(&trash, " .subs=%p", h2s->subs);
6769 if (h2s->subs) {
Christopher Faulet6c93c4e2021-02-25 10:06:29 +01006770 chunk_appendf(&trash, "(ev=%d tl=%p", h2s->subs->events, h2s->subs->tasklet);
6771 chunk_appendf(&trash, " tl.calls=%d tl.ctx=%p tl.fct=",
6772 h2s->subs->tasklet->calls,
6773 h2s->subs->tasklet->context);
6774 if (h2s->subs->tasklet->calls >= 1000000)
6775 ret = 1;
6776 resolve_sym_name(&trash, NULL, h2s->subs->tasklet->process);
6777 chunk_appendf(&trash, ")");
Willy Tarreau98e40b92021-01-20 16:27:01 +01006778 }
Willy Tarreau987c0632018-12-18 10:32:05 +01006779 }
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006780 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006781}
Willy Tarreau62f52692017-10-08 23:01:42 +02006782
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006783/* Migrate the the connection to the current thread.
6784 * Return 0 if successful, non-zero otherwise.
6785 * Expected to be called with the old thread lock held.
6786 */
Olivier Houchard1662cdb2020-07-03 14:04:37 +02006787static int h2_takeover(struct connection *conn, int orig_tid)
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006788{
6789 struct h2c *h2c = conn->ctx;
Willy Tarreau617e80f2020-07-01 16:39:33 +02006790 struct task *task;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006791
6792 if (fd_takeover(conn->handle.fd, conn) != 0)
6793 return -1;
Olivier Houcharda74bb7e2020-07-03 14:01:21 +02006794
6795 if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
6796 /* We failed to takeover the xprt, even if the connection may
6797 * still be valid, flag it as error'd, as we have already
6798 * taken over the fd, and wake the tasklet, so that it will
6799 * destroy it.
6800 */
6801 conn->flags |= CO_FL_ERROR;
6802 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
6803 return -1;
6804 }
6805
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006806 if (h2c->wait_event.events)
6807 h2c->conn->xprt->unsubscribe(h2c->conn, h2c->conn->xprt_ctx,
6808 h2c->wait_event.events, &h2c->wait_event);
6809 /* To let the tasklet know it should free itself, and do nothing else,
6810 * set its context to NULL.
6811 */
6812 h2c->wait_event.tasklet->context = NULL;
Olivier Houchard1662cdb2020-07-03 14:04:37 +02006813 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
Willy Tarreau617e80f2020-07-01 16:39:33 +02006814
6815 task = h2c->task;
6816 if (task) {
6817 task->context = NULL;
6818 h2c->task = NULL;
6819 __ha_barrier_store();
6820 task_kill(task);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006821
Willy Tarreaubeeabf52021-10-01 18:23:30 +02006822 h2c->task = task_new_here();
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006823 if (!h2c->task) {
6824 h2_release(h2c);
6825 return -1;
6826 }
6827 h2c->task->process = h2_timeout_task;
6828 h2c->task->context = h2c;
6829 }
6830 h2c->wait_event.tasklet = tasklet_new();
6831 if (!h2c->wait_event.tasklet) {
6832 h2_release(h2c);
6833 return -1;
6834 }
6835 h2c->wait_event.tasklet->process = h2_io_cb;
6836 h2c->wait_event.tasklet->context = h2c;
6837 h2c->conn->xprt->subscribe(h2c->conn, h2c->conn->xprt_ctx,
6838 SUB_RETRY_RECV, &h2c->wait_event);
6839
6840 return 0;
6841}
6842
Willy Tarreau62f52692017-10-08 23:01:42 +02006843/*******************************************************/
6844/* functions below are dedicated to the config parsers */
6845/*******************************************************/
6846
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006847/* config parser for global "tune.h2.header-table-size" */
6848static int h2_parse_header_table_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006849 const struct proxy *defpx, const char *file, int line,
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006850 char **err)
6851{
6852 if (too_many_args(1, args, err, NULL))
6853 return -1;
6854
6855 h2_settings_header_table_size = atoi(args[1]);
6856 if (h2_settings_header_table_size < 4096 || h2_settings_header_table_size > 65536) {
6857 memprintf(err, "'%s' expects a numeric value between 4096 and 65536.", args[0]);
6858 return -1;
6859 }
6860 return 0;
6861}
Willy Tarreau62f52692017-10-08 23:01:42 +02006862
Willy Tarreaue6baec02017-07-27 11:45:11 +02006863/* config parser for global "tune.h2.initial-window-size" */
6864static int h2_parse_initial_window_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006865 const struct proxy *defpx, const char *file, int line,
Willy Tarreaue6baec02017-07-27 11:45:11 +02006866 char **err)
6867{
6868 if (too_many_args(1, args, err, NULL))
6869 return -1;
6870
6871 h2_settings_initial_window_size = atoi(args[1]);
6872 if (h2_settings_initial_window_size < 0) {
6873 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
6874 return -1;
6875 }
6876 return 0;
6877}
6878
Willy Tarreau5242ef82017-07-27 11:47:28 +02006879/* config parser for global "tune.h2.max-concurrent-streams" */
6880static int h2_parse_max_concurrent_streams(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006881 const struct proxy *defpx, const char *file, int line,
Willy Tarreau5242ef82017-07-27 11:47:28 +02006882 char **err)
6883{
6884 if (too_many_args(1, args, err, NULL))
6885 return -1;
6886
6887 h2_settings_max_concurrent_streams = atoi(args[1]);
Willy Tarreau5a490b62019-01-31 10:39:51 +01006888 if ((int)h2_settings_max_concurrent_streams < 0) {
Willy Tarreau5242ef82017-07-27 11:47:28 +02006889 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
6890 return -1;
6891 }
6892 return 0;
6893}
6894
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006895/* config parser for global "tune.h2.max-frame-size" */
6896static int h2_parse_max_frame_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006897 const struct proxy *defpx, const char *file, int line,
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006898 char **err)
6899{
6900 if (too_many_args(1, args, err, NULL))
6901 return -1;
6902
6903 h2_settings_max_frame_size = atoi(args[1]);
6904 if (h2_settings_max_frame_size < 16384 || h2_settings_max_frame_size > 16777215) {
6905 memprintf(err, "'%s' expects a numeric value between 16384 and 16777215.", args[0]);
6906 return -1;
6907 }
6908 return 0;
6909}
6910
Willy Tarreau62f52692017-10-08 23:01:42 +02006911
6912/****************************************/
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05006913/* MUX initialization and instantiation */
Willy Tarreau62f52692017-10-08 23:01:42 +02006914/***************************************/
6915
6916/* The mux operations */
Willy Tarreau680b2bd2018-11-27 07:30:17 +01006917static const struct mux_ops h2_ops = {
Willy Tarreau62f52692017-10-08 23:01:42 +02006918 .init = h2_init,
Olivier Houchard21df6cc2018-09-14 23:21:44 +02006919 .wake = h2_wake,
Willy Tarreau62f52692017-10-08 23:01:42 +02006920 .snd_buf = h2_snd_buf,
Olivier Houchard511efea2018-08-16 15:30:32 +02006921 .rcv_buf = h2_rcv_buf,
Olivier Houchard6ff20392018-07-17 18:46:31 +02006922 .subscribe = h2_subscribe,
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006923 .unsubscribe = h2_unsubscribe,
Willy Tarreau62f52692017-10-08 23:01:42 +02006924 .attach = h2_attach,
Willy Tarreaufafd3982018-11-18 21:29:20 +01006925 .get_first_cs = h2_get_first_cs,
Willy Tarreau62f52692017-10-08 23:01:42 +02006926 .detach = h2_detach,
Olivier Houchard060ed432018-11-06 16:32:42 +01006927 .destroy = h2_destroy,
Olivier Houchardd540b362018-11-05 18:37:53 +01006928 .avail_streams = h2_avail_streams,
Willy Tarreau00f18a32019-01-26 12:19:01 +01006929 .used_streams = h2_used_streams,
Willy Tarreau62f52692017-10-08 23:01:42 +02006930 .shutr = h2_shutr,
6931 .shutw = h2_shutw,
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02006932 .ctl = h2_ctl,
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006933 .show_fd = h2_show_fd,
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006934 .takeover = h2_takeover,
Christopher Fauleta97cced2022-04-12 18:04:10 +02006935 .flags = MX_FL_HTX|MX_FL_HOL_RISK|MX_FL_NO_UPG,
Willy Tarreau62f52692017-10-08 23:01:42 +02006936 .name = "H2",
6937};
6938
Christopher Faulet32f61c02018-04-10 14:33:41 +02006939static struct mux_proto_list mux_proto_h2 =
Christopher Fauletc985f6c2019-07-15 11:42:52 +02006940 { .token = IST("h2"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BOTH, .mux = &h2_ops };
Willy Tarreau62f52692017-10-08 23:01:42 +02006941
Willy Tarreau0108d902018-11-25 19:14:37 +01006942INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_h2);
6943
Willy Tarreau62f52692017-10-08 23:01:42 +02006944/* config keyword parsers */
6945static struct cfg_kw_list cfg_kws = {ILH, {
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006946 { CFG_GLOBAL, "tune.h2.header-table-size", h2_parse_header_table_size },
Willy Tarreaue6baec02017-07-27 11:45:11 +02006947 { CFG_GLOBAL, "tune.h2.initial-window-size", h2_parse_initial_window_size },
Willy Tarreau5242ef82017-07-27 11:47:28 +02006948 { CFG_GLOBAL, "tune.h2.max-concurrent-streams", h2_parse_max_concurrent_streams },
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006949 { CFG_GLOBAL, "tune.h2.max-frame-size", h2_parse_max_frame_size },
Willy Tarreau62f52692017-10-08 23:01:42 +02006950 { 0, NULL, NULL }
6951}};
6952
Willy Tarreau0108d902018-11-25 19:14:37 +01006953INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
Willy Tarreau2bdcc702020-05-19 11:31:11 +02006954
6955/* initialize internal structs after the config is parsed.
6956 * Returns zero on success, non-zero on error.
6957 */
6958static int init_h2()
6959{
6960 pool_head_hpack_tbl = create_pool("hpack_tbl",
6961 h2_settings_header_table_size,
6962 MEM_F_SHARED|MEM_F_EXACT);
Christopher Faulet52140992020-11-06 15:23:39 +01006963 if (!pool_head_hpack_tbl) {
6964 ha_alert("failed to allocate hpack_tbl memory pool\n");
6965 return (ERR_ALERT | ERR_FATAL);
6966 }
6967 return ERR_NONE;
Willy Tarreau2bdcc702020-05-19 11:31:11 +02006968}
6969
6970REGISTER_POST_CHECK(init_h2);