blob: a16d972c99d92bf1c9f9fd557b9bf204a1762b84 [file] [log] [blame]
Willy Tarreau62f52692017-10-08 23:01:42 +02001/*
2 * HTTP/2 mux-demux for connections
3 *
4 * Copyright 2017 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreaudfd3de82020-06-04 23:46:14 +020013#include <import/eb32tree.h>
Willy Tarreau63617db2021-10-06 18:23:40 +020014#include <import/ebmbtree.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020015#include <haproxy/api.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020016#include <haproxy/cfgparse.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020017#include <haproxy/connection.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +010018#include <haproxy/conn_stream.h>
Willy Tarreaubf073142020-06-03 12:04:01 +020019#include <haproxy/h2.h>
Willy Tarreaube327fa2020-06-03 09:09:57 +020020#include <haproxy/hpack-dec.h>
21#include <haproxy/hpack-enc.h>
22#include <haproxy/hpack-tbl.h>
Willy Tarreau87735332020-06-04 09:08:41 +020023#include <haproxy/http_htx.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020024#include <haproxy/htx.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020025#include <haproxy/istbuf.h>
Willy Tarreau36979d92020-06-05 17:27:29 +020026#include <haproxy/log.h>
Willy Tarreau6131d6a2020-06-02 16:48:09 +020027#include <haproxy/net_helper.h>
Willy Tarreau48d25b32020-06-04 18:58:52 +020028#include <haproxy/session-t.h>
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +010029#include <haproxy/stats.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020030#include <haproxy/stream.h>
Willy Tarreau5e539c92020-06-04 20:45:39 +020031#include <haproxy/stream_interface.h>
Willy Tarreauc6d61d72020-06-04 19:02:42 +020032#include <haproxy/trace.h>
Willy Tarreau62f52692017-10-08 23:01:42 +020033
34
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010035/* dummy streams returned for closed, error, refused, idle and states */
Willy Tarreau2a856182017-05-16 15:20:39 +020036static const struct h2s *h2_closed_stream;
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010037static const struct h2s *h2_error_stream;
Willy Tarreau8d0d58b2018-12-23 18:29:12 +010038static const struct h2s *h2_refused_stream;
Willy Tarreau2a856182017-05-16 15:20:39 +020039static const struct h2s *h2_idle_stream;
40
Willy Tarreau5ab6b572017-09-22 08:05:00 +020041/* Connection flags (32 bit), in h2c->flags */
42#define H2_CF_NONE 0x00000000
43
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020044/* Flags indicating why writing to the mux is blocked. */
45#define H2_CF_MUX_MALLOC 0x00000001 // mux blocked on lack of connection's mux buffer
46#define H2_CF_MUX_MFULL 0x00000002 // mux blocked on connection's mux buffer full
47#define H2_CF_MUX_BLOCK_ANY 0x00000003 // aggregate of the mux flags above
48
Willy Tarreau315d8072017-12-10 22:17:57 +010049/* Flags indicating why writing to the demux is blocked.
50 * The first two ones directly affect the ability for the mux to receive data
51 * from the connection. The other ones affect the mux's ability to demux
52 * received data.
53 */
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020054#define H2_CF_DEM_DALLOC 0x00000004 // demux blocked on lack of connection's demux buffer
55#define H2_CF_DEM_DFULL 0x00000008 // demux blocked on connection's demux buffer full
Willy Tarreau315d8072017-12-10 22:17:57 +010056
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020057#define H2_CF_DEM_MBUSY 0x00000010 // demux blocked on connection's mux side busy
58#define H2_CF_DEM_MROOM 0x00000020 // demux blocked on lack of room in mux buffer
59#define H2_CF_DEM_SALLOC 0x00000040 // demux blocked on lack of stream's request buffer
60#define H2_CF_DEM_SFULL 0x00000080 // demux blocked on stream request buffer full
Willy Tarreauf2101912018-07-19 10:11:38 +020061#define H2_CF_DEM_TOOMANY 0x00000100 // demux blocked waiting for some conn_streams to leave
62#define H2_CF_DEM_BLOCK_ANY 0x000001F0 // aggregate of the demux flags above except DALLOC/DFULL
Christopher Fauletb5f7b522021-07-26 12:06:53 +020063 // (SHORT_READ is also excluded)
64
Christopher Faulet47940c32021-11-10 17:50:10 +010065#define H2_CF_DEM_SHORT_READ 0x00000200 // demux blocked on incomplete frame
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020066
Willy Tarreau081d4722017-05-16 21:51:05 +020067/* other flags */
Willy Tarreauf2101912018-07-19 10:11:38 +020068#define H2_CF_GOAWAY_SENT 0x00001000 // a GOAWAY frame was successfully sent
69#define H2_CF_GOAWAY_FAILED 0x00002000 // a GOAWAY frame failed to be sent
70#define H2_CF_WAIT_FOR_HS 0x00004000 // We did check that at least a stream was waiting for handshake
Willy Tarreaub3fb56d2018-10-03 13:56:38 +020071#define H2_CF_IS_BACK 0x00008000 // this is an outgoing connection
Willy Tarreau3d4631f2021-01-20 10:53:13 +010072#define H2_CF_WINDOW_OPENED 0x00010000 // demux increased window already advertised
73#define H2_CF_RCVD_SHUT 0x00020000 // a recv() attempt already failed on a shutdown
74#define H2_CF_END_REACHED 0x00040000 // pending data too short with RCVD_SHUT present
Willy Tarreau081d4722017-05-16 21:51:05 +020075
Amaury Denoyelle0df04362021-10-18 09:43:29 +020076#define H2_CF_RCVD_RFC8441 0x00100000 // settings from RFC8441 has been received indicating support for Extended CONNECT
Willy Tarreau39a0a1e2022-01-13 16:00:12 +010077#define H2_CF_SHTS_UPDATED 0x00200000 // SETTINGS_HEADER_TABLE_SIZE updated
78#define H2_CF_DTSU_EMITTED 0x00400000 // HPACK Dynamic Table Size Update opcode emitted
Amaury Denoyelle0df04362021-10-18 09:43:29 +020079
Willy Tarreau5ab6b572017-09-22 08:05:00 +020080/* H2 connection state, in h2c->st0 */
81enum h2_cs {
82 H2_CS_PREFACE, // init done, waiting for connection preface
83 H2_CS_SETTINGS1, // preface OK, waiting for first settings frame
84 H2_CS_FRAME_H, // first settings frame ok, waiting for frame header
85 H2_CS_FRAME_P, // frame header OK, waiting for frame payload
Willy Tarreaua20a5192017-12-27 11:02:06 +010086 H2_CS_FRAME_A, // frame payload OK, trying to send ACK frame
87 H2_CS_FRAME_E, // frame payload OK, trying to send RST frame
Willy Tarreau5ab6b572017-09-22 08:05:00 +020088 H2_CS_ERROR, // send GOAWAY(errcode) and close the connection ASAP
89 H2_CS_ERROR2, // GOAWAY(errcode) sent, close the connection ASAP
90 H2_CS_ENTRIES // must be last
91} __attribute__((packed));
92
Willy Tarreau51330962019-05-26 09:38:07 +020093
Willy Tarreau9c218e72019-05-26 10:08:28 +020094/* 32 buffers: one for the ring's root, rest for the mbuf itself */
95#define H2C_MBUF_CNT 32
Willy Tarreau51330962019-05-26 09:38:07 +020096
Willy Tarreau5ab6b572017-09-22 08:05:00 +020097/* H2 connection descriptor */
98struct h2c {
99 struct connection *conn;
100
101 enum h2_cs st0; /* mux state */
102 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
103
104 /* 16 bit hole here */
105 uint32_t flags; /* connection flags: H2_CF_* */
Willy Tarreau2e2083a2019-01-31 10:34:07 +0100106 uint32_t streams_limit; /* maximum number of concurrent streams the peer supports */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200107 int32_t max_id; /* highest ID known on this connection, <0 before preface */
108 uint32_t rcvd_c; /* newly received data to ACK for the connection */
109 uint32_t rcvd_s; /* newly received data to ACK for the current stream (dsi) */
110
111 /* states for the demux direction */
112 struct hpack_dht *ddht; /* demux dynamic header table */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200113 struct buffer dbuf; /* demux buffer */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200114
115 int32_t dsi; /* demux stream ID (<0 = idle) */
116 int32_t dfl; /* demux frame length (if dsi >= 0) */
117 int8_t dft; /* demux frame type (if dsi >= 0) */
118 int8_t dff; /* demux frame flags (if dsi >= 0) */
Willy Tarreau05e5daf2017-12-11 15:17:36 +0100119 uint8_t dpl; /* demux pad length (part of dfl), init to 0 */
120 /* 8 bit hole here */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200121 int32_t last_sid; /* last processed stream ID for GOAWAY, <0 before preface */
122
123 /* states for the mux direction */
Willy Tarreau51330962019-05-26 09:38:07 +0200124 struct buffer mbuf[H2C_MBUF_CNT]; /* mux buffers (ring) */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200125 int32_t msi; /* mux stream ID (<0 = idle) */
126 int32_t mfl; /* mux frame length (if dsi >= 0) */
127 int8_t mft; /* mux frame type (if dsi >= 0) */
128 int8_t mff; /* mux frame flags (if dsi >= 0) */
129 /* 16 bit hole here */
130 int32_t miw; /* mux initial window size for all new streams */
131 int32_t mws; /* mux window size. Can be negative. */
132 int32_t mfs; /* mux's max frame size */
133
Willy Tarreauea392822017-10-31 10:02:25 +0100134 int timeout; /* idle timeout duration in ticks */
Willy Tarreau599391a2017-11-24 10:16:00 +0100135 int shut_timeout; /* idle timeout duration in ticks after GOAWAY was sent */
Willy Tarreau15a47332022-03-18 15:57:34 +0100136 int idle_start; /* date of the last time the connection went idle */
137 /* 32-bit hole here */
Willy Tarreau49745612017-12-03 18:56:02 +0100138 unsigned int nb_streams; /* number of streams in the tree */
Willy Tarreau7ac60e82018-07-19 09:04:05 +0200139 unsigned int nb_cs; /* number of attached conn_streams */
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100140 unsigned int nb_reserved; /* number of reserved streams */
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100141 unsigned int stream_cnt; /* total number of streams seen */
Willy Tarreau0b37d652018-10-03 10:33:02 +0200142 struct proxy *proxy; /* the proxy this connection was created for */
Willy Tarreauea392822017-10-31 10:02:25 +0100143 struct task *task; /* timeout management task */
Amaury Denoyellec92697d2020-10-27 17:16:01 +0100144 struct h2_counters *px_counters; /* h2 counters attached to proxy */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200145 struct eb_root streams_by_id; /* all active streams by their ID */
146 struct list send_list; /* list of blocked streams requesting to send */
147 struct list fctl_list; /* list of streams blocked by connection's fctl */
Willy Tarreau9edf6db2019-10-02 10:49:59 +0200148 struct list blocked_list; /* list of streams blocked for other reasons (e.g. sfctl, dep) */
Willy Tarreau44e973f2018-03-01 17:49:30 +0100149 struct buffer_wait buf_wait; /* wait list for buffer allocations */
Olivier Houchardfa8aa862018-10-10 18:25:41 +0200150 struct wait_event wait_event; /* To be used if we're waiting for I/Os */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200151};
152
Willy Tarreau18312642017-10-11 07:57:07 +0200153/* H2 stream state, in h2s->st */
154enum h2_ss {
155 H2_SS_IDLE = 0, // idle
156 H2_SS_RLOC, // reserved(local)
157 H2_SS_RREM, // reserved(remote)
158 H2_SS_OPEN, // open
159 H2_SS_HREM, // half-closed(remote)
160 H2_SS_HLOC, // half-closed(local)
Willy Tarreau96060ba2017-10-16 18:34:34 +0200161 H2_SS_ERROR, // an error needs to be sent using RST_STREAM
Willy Tarreau18312642017-10-11 07:57:07 +0200162 H2_SS_CLOSED, // closed
163 H2_SS_ENTRIES // must be last
164} __attribute__((packed));
165
Willy Tarreau4c688eb2019-05-14 11:44:03 +0200166#define H2_SS_MASK(state) (1UL << (state))
167#define H2_SS_IDLE_BIT (1UL << H2_SS_IDLE)
168#define H2_SS_RLOC_BIT (1UL << H2_SS_RLOC)
169#define H2_SS_RREM_BIT (1UL << H2_SS_RREM)
170#define H2_SS_OPEN_BIT (1UL << H2_SS_OPEN)
171#define H2_SS_HREM_BIT (1UL << H2_SS_HREM)
172#define H2_SS_HLOC_BIT (1UL << H2_SS_HLOC)
173#define H2_SS_ERROR_BIT (1UL << H2_SS_ERROR)
174#define H2_SS_CLOSED_BIT (1UL << H2_SS_CLOSED)
Willy Tarreau4c688eb2019-05-14 11:44:03 +0200175
Willy Tarreau18312642017-10-11 07:57:07 +0200176/* HTTP/2 stream flags (32 bit), in h2s->flags */
177#define H2_SF_NONE 0x00000000
178#define H2_SF_ES_RCVD 0x00000001
179#define H2_SF_ES_SENT 0x00000002
180
181#define H2_SF_RST_RCVD 0x00000004 // received RST_STREAM
182#define H2_SF_RST_SENT 0x00000008 // sent RST_STREAM
183
Willy Tarreau2e5b60e2017-09-25 11:49:03 +0200184/* stream flags indicating the reason the stream is blocked */
185#define H2_SF_BLK_MBUSY 0x00000010 // blocked waiting for mux access (transient)
Willy Tarreau9edf6db2019-10-02 10:49:59 +0200186#define H2_SF_BLK_MROOM 0x00000020 // blocked waiting for room in the mux (must be in send list)
187#define H2_SF_BLK_MFCTL 0x00000040 // blocked due to mux fctl (must be in fctl list)
188#define H2_SF_BLK_SFCTL 0x00000080 // blocked due to stream fctl (must be in blocked list)
Willy Tarreau2e5b60e2017-09-25 11:49:03 +0200189#define H2_SF_BLK_ANY 0x000000F0 // any of the reasons above
190
Willy Tarreau454f9052017-10-26 19:40:35 +0200191/* stream flags indicating how data is supposed to be sent */
192#define H2_SF_DATA_CLEN 0x00000100 // data sent using content-length
Christopher Faulet7d247f02020-12-02 14:26:36 +0100193#define H2_SF_BODYLESS_RESP 0x00000200 /* Bodyless response message */
Christopher Fauletd0db4232021-01-22 11:46:30 +0100194#define H2_SF_BODY_TUNNEL 0x00000400 // Attempt to establish a Tunnelled stream (the result depends on the status code)
195
Willy Tarreau454f9052017-10-26 19:40:35 +0200196
Willy Tarreaud9464162020-01-10 18:25:07 +0100197#define H2_SF_NOTIFIED 0x00000800 // a paused stream was notified to try to send again
Willy Tarreau67434202017-11-06 20:20:51 +0100198#define H2_SF_HEADERS_SENT 0x00001000 // a HEADERS frame was sent for this stream
Willy Tarreauc4312d32017-11-07 12:01:53 +0100199#define H2_SF_OUTGOING_DATA 0x00002000 // set whenever we've seen outgoing data
Willy Tarreau67434202017-11-06 20:20:51 +0100200
Willy Tarreau6cc85a52019-01-02 15:49:20 +0100201#define H2_SF_HEADERS_RCVD 0x00004000 // a HEADERS frame was received for this stream
202
Willy Tarreau2c249eb2019-05-13 18:06:17 +0200203#define H2_SF_WANT_SHUTR 0x00008000 // a stream couldn't shutr() (mux full/busy)
204#define H2_SF_WANT_SHUTW 0x00010000 // a stream couldn't shutw() (mux full/busy)
Willy Tarreau3cf69fe2019-05-14 10:44:40 +0200205#define H2_SF_KILL_CONN 0x00020000 // kill the whole connection with this stream
Willy Tarreau2c249eb2019-05-13 18:06:17 +0200206
Amaury Denoyelle5fb48ea2020-12-11 17:53:04 +0100207#define H2_SF_EXT_CONNECT_SENT 0x00040000 // rfc 8441 an Extended CONNECT has been sent
Amaury Denoyelleefe22762020-12-11 17:53:08 +0100208#define H2_SF_EXT_CONNECT_RCVD 0x00080000 // rfc 8441 an Extended CONNECT has been received and parsed
Christopher Fauletd0db4232021-01-22 11:46:30 +0100209
Amaury Denoyelle5fb48ea2020-12-11 17:53:04 +0100210#define H2_SF_TUNNEL_ABRT 0x00100000 // A tunnel attempt was aborted
Willy Tarreau2c249eb2019-05-13 18:06:17 +0200211
Willy Tarreau18312642017-10-11 07:57:07 +0200212/* H2 stream descriptor, describing the stream as it appears in the H2C, and as
Christopher Fauletfafd1b02020-11-03 18:25:52 +0100213 * it is being processed in the internal HTTP representation (HTX).
Willy Tarreau18312642017-10-11 07:57:07 +0200214 */
215struct h2s {
216 struct conn_stream *cs;
Olivier Houchardf502aca2018-12-14 19:42:40 +0100217 struct session *sess;
Willy Tarreau18312642017-10-11 07:57:07 +0200218 struct h2c *h2c;
Willy Tarreau18312642017-10-11 07:57:07 +0200219 struct eb32_node by_id; /* place in h2c's streams_by_id */
Willy Tarreau18312642017-10-11 07:57:07 +0200220 int32_t id; /* stream ID */
221 uint32_t flags; /* H2_SF_* */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +0200222 int sws; /* stream window size, to be added to the mux's initial window size */
Willy Tarreau18312642017-10-11 07:57:07 +0200223 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
224 enum h2_ss st;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +0200225 uint16_t status; /* HTTP response status */
Willy Tarreau1915ca22019-01-24 11:49:37 +0100226 unsigned long long body_len; /* remaining body length according to content-length if H2_SF_DATA_CLEN */
Olivier Houchard638b7992018-08-16 15:41:52 +0200227 struct buffer rxbuf; /* receive buffer, always valid (buf_empty or real buffer) */
Willy Tarreauf96508a2020-01-10 11:12:48 +0100228 struct wait_event *subs; /* recv wait_event the conn_stream associated is waiting on (via h2_subscribe) */
Olivier Houchardfa8aa862018-10-10 18:25:41 +0200229 struct list list; /* To be used when adding in h2c->send_list or h2c->fctl_lsit */
Willy Tarreau5723f292020-01-10 15:16:57 +0100230 struct tasklet *shut_tl; /* deferred shutdown tasklet, to retry to send an RST after we failed to,
231 * in case there's no other subscription to do it */
Amaury Denoyelle74162742020-12-11 17:53:05 +0100232
233 char upgrade_protocol[16]; /* rfc 8441: requested protocol on Extended CONNECT */
Willy Tarreau18312642017-10-11 07:57:07 +0200234};
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200235
Willy Tarreauc6405142017-09-21 20:23:50 +0200236/* descriptor for an h2 frame header */
237struct h2_fh {
238 uint32_t len; /* length, host order, 24 bits */
239 uint32_t sid; /* stream id, host order, 31 bits */
240 uint8_t ft; /* frame type */
241 uint8_t ff; /* frame flags */
242};
243
Willy Tarreau12ae2122019-08-08 18:23:12 +0200244/* trace source and events */
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200245static void h2_trace(enum trace_level level, uint64_t mask, \
246 const struct trace_source *src,
247 const struct ist where, const struct ist func,
248 const void *a1, const void *a2, const void *a3, const void *a4);
Willy Tarreau12ae2122019-08-08 18:23:12 +0200249
250/* The event representation is split like this :
251 * strm - application layer
252 * h2s - internal H2 stream
253 * h2c - internal H2 connection
254 * conn - external connection
255 *
256 */
257static const struct trace_event h2_trace_events[] = {
258#define H2_EV_H2C_NEW (1ULL << 0)
Willy Tarreau87951942019-08-30 07:34:36 +0200259 { .mask = H2_EV_H2C_NEW, .name = "h2c_new", .desc = "new H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200260#define H2_EV_H2C_RECV (1ULL << 1)
Willy Tarreau87951942019-08-30 07:34:36 +0200261 { .mask = H2_EV_H2C_RECV, .name = "h2c_recv", .desc = "Rx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200262#define H2_EV_H2C_SEND (1ULL << 2)
Willy Tarreau87951942019-08-30 07:34:36 +0200263 { .mask = H2_EV_H2C_SEND, .name = "h2c_send", .desc = "Tx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200264#define H2_EV_H2C_FCTL (1ULL << 3)
Willy Tarreau87951942019-08-30 07:34:36 +0200265 { .mask = H2_EV_H2C_FCTL, .name = "h2c_fctl", .desc = "H2 connection flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200266#define H2_EV_H2C_BLK (1ULL << 4)
Willy Tarreau87951942019-08-30 07:34:36 +0200267 { .mask = H2_EV_H2C_BLK, .name = "h2c_blk", .desc = "H2 connection blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200268#define H2_EV_H2C_WAKE (1ULL << 5)
Willy Tarreau87951942019-08-30 07:34:36 +0200269 { .mask = H2_EV_H2C_WAKE, .name = "h2c_wake", .desc = "H2 connection woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200270#define H2_EV_H2C_END (1ULL << 6)
Willy Tarreau87951942019-08-30 07:34:36 +0200271 { .mask = H2_EV_H2C_END, .name = "h2c_end", .desc = "H2 connection terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200272#define H2_EV_H2C_ERR (1ULL << 7)
Willy Tarreau87951942019-08-30 07:34:36 +0200273 { .mask = H2_EV_H2C_ERR, .name = "h2c_err", .desc = "error on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200274#define H2_EV_RX_FHDR (1ULL << 8)
Willy Tarreau87951942019-08-30 07:34:36 +0200275 { .mask = H2_EV_RX_FHDR, .name = "rx_fhdr", .desc = "H2 frame header received" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200276#define H2_EV_RX_FRAME (1ULL << 9)
Willy Tarreau87951942019-08-30 07:34:36 +0200277 { .mask = H2_EV_RX_FRAME, .name = "rx_frame", .desc = "receipt of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200278#define H2_EV_RX_EOI (1ULL << 10)
Willy Tarreau87951942019-08-30 07:34:36 +0200279 { .mask = H2_EV_RX_EOI, .name = "rx_eoi", .desc = "receipt of end of H2 input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200280#define H2_EV_RX_PREFACE (1ULL << 11)
Willy Tarreau87951942019-08-30 07:34:36 +0200281 { .mask = H2_EV_RX_PREFACE, .name = "rx_preface", .desc = "receipt of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200282#define H2_EV_RX_DATA (1ULL << 12)
Willy Tarreau87951942019-08-30 07:34:36 +0200283 { .mask = H2_EV_RX_DATA, .name = "rx_data", .desc = "receipt of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200284#define H2_EV_RX_HDR (1ULL << 13)
Willy Tarreau87951942019-08-30 07:34:36 +0200285 { .mask = H2_EV_RX_HDR, .name = "rx_hdr", .desc = "receipt of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200286#define H2_EV_RX_PRIO (1ULL << 14)
Willy Tarreau87951942019-08-30 07:34:36 +0200287 { .mask = H2_EV_RX_PRIO, .name = "rx_prio", .desc = "receipt of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200288#define H2_EV_RX_RST (1ULL << 15)
Willy Tarreau87951942019-08-30 07:34:36 +0200289 { .mask = H2_EV_RX_RST, .name = "rx_rst", .desc = "receipt of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200290#define H2_EV_RX_SETTINGS (1ULL << 16)
Willy Tarreau87951942019-08-30 07:34:36 +0200291 { .mask = H2_EV_RX_SETTINGS, .name = "rx_settings", .desc = "receipt of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200292#define H2_EV_RX_PUSH (1ULL << 17)
Willy Tarreau87951942019-08-30 07:34:36 +0200293 { .mask = H2_EV_RX_PUSH, .name = "rx_push", .desc = "receipt of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200294#define H2_EV_RX_PING (1ULL << 18)
Willy Tarreau87951942019-08-30 07:34:36 +0200295 { .mask = H2_EV_RX_PING, .name = "rx_ping", .desc = "receipt of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200296#define H2_EV_RX_GOAWAY (1ULL << 19)
Willy Tarreau87951942019-08-30 07:34:36 +0200297 { .mask = H2_EV_RX_GOAWAY, .name = "rx_goaway", .desc = "receipt of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200298#define H2_EV_RX_WU (1ULL << 20)
Willy Tarreau87951942019-08-30 07:34:36 +0200299 { .mask = H2_EV_RX_WU, .name = "rx_wu", .desc = "receipt of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200300#define H2_EV_RX_CONT (1ULL << 21)
Willy Tarreau87951942019-08-30 07:34:36 +0200301 { .mask = H2_EV_RX_CONT, .name = "rx_cont", .desc = "receipt of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200302#define H2_EV_TX_FRAME (1ULL << 22)
Willy Tarreau87951942019-08-30 07:34:36 +0200303 { .mask = H2_EV_TX_FRAME, .name = "tx_frame", .desc = "transmission of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200304#define H2_EV_TX_EOI (1ULL << 23)
Willy Tarreau87951942019-08-30 07:34:36 +0200305 { .mask = H2_EV_TX_EOI, .name = "tx_eoi", .desc = "transmission of H2 end of input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200306#define H2_EV_TX_PREFACE (1ULL << 24)
Willy Tarreau87951942019-08-30 07:34:36 +0200307 { .mask = H2_EV_TX_PREFACE, .name = "tx_preface", .desc = "transmission of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200308#define H2_EV_TX_DATA (1ULL << 25)
Willy Tarreau87951942019-08-30 07:34:36 +0200309 { .mask = H2_EV_TX_DATA, .name = "tx_data", .desc = "transmission of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200310#define H2_EV_TX_HDR (1ULL << 26)
Willy Tarreau87951942019-08-30 07:34:36 +0200311 { .mask = H2_EV_TX_HDR, .name = "tx_hdr", .desc = "transmission of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200312#define H2_EV_TX_PRIO (1ULL << 27)
Willy Tarreau87951942019-08-30 07:34:36 +0200313 { .mask = H2_EV_TX_PRIO, .name = "tx_prio", .desc = "transmission of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200314#define H2_EV_TX_RST (1ULL << 28)
Willy Tarreau87951942019-08-30 07:34:36 +0200315 { .mask = H2_EV_TX_RST, .name = "tx_rst", .desc = "transmission of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200316#define H2_EV_TX_SETTINGS (1ULL << 29)
Willy Tarreau87951942019-08-30 07:34:36 +0200317 { .mask = H2_EV_TX_SETTINGS, .name = "tx_settings", .desc = "transmission of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200318#define H2_EV_TX_PUSH (1ULL << 30)
Willy Tarreau87951942019-08-30 07:34:36 +0200319 { .mask = H2_EV_TX_PUSH, .name = "tx_push", .desc = "transmission of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200320#define H2_EV_TX_PING (1ULL << 31)
Willy Tarreau87951942019-08-30 07:34:36 +0200321 { .mask = H2_EV_TX_PING, .name = "tx_ping", .desc = "transmission of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200322#define H2_EV_TX_GOAWAY (1ULL << 32)
Willy Tarreau87951942019-08-30 07:34:36 +0200323 { .mask = H2_EV_TX_GOAWAY, .name = "tx_goaway", .desc = "transmission of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200324#define H2_EV_TX_WU (1ULL << 33)
Willy Tarreau87951942019-08-30 07:34:36 +0200325 { .mask = H2_EV_TX_WU, .name = "tx_wu", .desc = "transmission of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200326#define H2_EV_TX_CONT (1ULL << 34)
Willy Tarreau87951942019-08-30 07:34:36 +0200327 { .mask = H2_EV_TX_CONT, .name = "tx_cont", .desc = "transmission of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200328#define H2_EV_H2S_NEW (1ULL << 35)
Willy Tarreau87951942019-08-30 07:34:36 +0200329 { .mask = H2_EV_H2S_NEW, .name = "h2s_new", .desc = "new H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200330#define H2_EV_H2S_RECV (1ULL << 36)
Willy Tarreau87951942019-08-30 07:34:36 +0200331 { .mask = H2_EV_H2S_RECV, .name = "h2s_recv", .desc = "Rx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200332#define H2_EV_H2S_SEND (1ULL << 37)
Willy Tarreau87951942019-08-30 07:34:36 +0200333 { .mask = H2_EV_H2S_SEND, .name = "h2s_send", .desc = "Tx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200334#define H2_EV_H2S_FCTL (1ULL << 38)
Willy Tarreau87951942019-08-30 07:34:36 +0200335 { .mask = H2_EV_H2S_FCTL, .name = "h2s_fctl", .desc = "H2 stream flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200336#define H2_EV_H2S_BLK (1ULL << 39)
Willy Tarreau87951942019-08-30 07:34:36 +0200337 { .mask = H2_EV_H2S_BLK, .name = "h2s_blk", .desc = "H2 stream blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200338#define H2_EV_H2S_WAKE (1ULL << 40)
Willy Tarreau87951942019-08-30 07:34:36 +0200339 { .mask = H2_EV_H2S_WAKE, .name = "h2s_wake", .desc = "H2 stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200340#define H2_EV_H2S_END (1ULL << 41)
Willy Tarreau87951942019-08-30 07:34:36 +0200341 { .mask = H2_EV_H2S_END, .name = "h2s_end", .desc = "H2 stream terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200342#define H2_EV_H2S_ERR (1ULL << 42)
Willy Tarreau87951942019-08-30 07:34:36 +0200343 { .mask = H2_EV_H2S_ERR, .name = "h2s_err", .desc = "error on H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200344#define H2_EV_STRM_NEW (1ULL << 43)
Willy Tarreau87951942019-08-30 07:34:36 +0200345 { .mask = H2_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200346#define H2_EV_STRM_RECV (1ULL << 44)
Willy Tarreau87951942019-08-30 07:34:36 +0200347 { .mask = H2_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200348#define H2_EV_STRM_SEND (1ULL << 45)
Willy Tarreau87951942019-08-30 07:34:36 +0200349 { .mask = H2_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200350#define H2_EV_STRM_FULL (1ULL << 46)
Willy Tarreau87951942019-08-30 07:34:36 +0200351 { .mask = H2_EV_STRM_FULL, .name = "strm_full", .desc = "stream buffer full" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200352#define H2_EV_STRM_WAKE (1ULL << 47)
Willy Tarreau87951942019-08-30 07:34:36 +0200353 { .mask = H2_EV_STRM_WAKE, .name = "strm_wake", .desc = "stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200354#define H2_EV_STRM_SHUT (1ULL << 48)
Willy Tarreau87951942019-08-30 07:34:36 +0200355 { .mask = H2_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200356#define H2_EV_STRM_END (1ULL << 49)
Willy Tarreau87951942019-08-30 07:34:36 +0200357 { .mask = H2_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200358#define H2_EV_STRM_ERR (1ULL << 50)
Willy Tarreau87951942019-08-30 07:34:36 +0200359 { .mask = H2_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200360#define H2_EV_PROTO_ERR (1ULL << 51)
Willy Tarreau87951942019-08-30 07:34:36 +0200361 { .mask = H2_EV_PROTO_ERR, .name = "proto_err", .desc = "protocol error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200362 { }
363};
364
365static const struct name_desc h2_trace_lockon_args[4] = {
366 /* arg1 */ { /* already used by the connection */ },
367 /* arg2 */ { .name="h2s", .desc="H2 stream" },
368 /* arg3 */ { },
369 /* arg4 */ { }
370};
371
372static const struct name_desc h2_trace_decoding[] = {
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200373#define H2_VERB_CLEAN 1
374 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
375#define H2_VERB_MINIMAL 2
Willy Tarreau12ae2122019-08-08 18:23:12 +0200376 { .name="minimal", .desc="report only h2c/h2s state and flags, no real decoding" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200377#define H2_VERB_SIMPLE 3
Willy Tarreau12ae2122019-08-08 18:23:12 +0200378 { .name="simple", .desc="add request/response status line or frame info when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200379#define H2_VERB_ADVANCED 4
Willy Tarreau12ae2122019-08-08 18:23:12 +0200380 { .name="advanced", .desc="add header fields or frame decoding when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200381#define H2_VERB_COMPLETE 5
Willy Tarreau12ae2122019-08-08 18:23:12 +0200382 { .name="complete", .desc="add full data dump when available" },
383 { /* end */ }
384};
385
Willy Tarreau6eb3d372021-04-10 19:29:26 +0200386static struct trace_source trace_h2 __read_mostly = {
Willy Tarreau12ae2122019-08-08 18:23:12 +0200387 .name = IST("h2"),
388 .desc = "HTTP/2 multiplexer",
389 .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200390 .default_cb = h2_trace,
Willy Tarreau12ae2122019-08-08 18:23:12 +0200391 .known_events = h2_trace_events,
392 .lockon_args = h2_trace_lockon_args,
393 .decoding = h2_trace_decoding,
394 .report_events = ~0, // report everything by default
395};
396
397#define TRACE_SOURCE &trace_h2
398INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
399
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100400/* h2 stats module */
401enum {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100402 H2_ST_HEADERS_RCVD,
403 H2_ST_DATA_RCVD,
404 H2_ST_SETTINGS_RCVD,
405 H2_ST_RST_STREAM_RCVD,
406 H2_ST_GOAWAY_RCVD,
407
Amaury Denoyellea8879232020-10-27 17:16:03 +0100408 H2_ST_CONN_PROTO_ERR,
409 H2_ST_STRM_PROTO_ERR,
410 H2_ST_RST_STREAM_RESP,
411 H2_ST_GOAWAY_RESP,
412
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100413 H2_ST_OPEN_CONN,
414 H2_ST_OPEN_STREAM,
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100415 H2_ST_TOTAL_CONN,
416 H2_ST_TOTAL_STREAM,
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100417
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100418 H2_STATS_COUNT /* must be the last member of the enum */
419};
420
421static struct name_desc h2_stats[] = {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100422 [H2_ST_HEADERS_RCVD] = { .name = "h2_headers_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100423 .desc = "Total number of received HEADERS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100424 [H2_ST_DATA_RCVD] = { .name = "h2_data_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100425 .desc = "Total number of received DATA frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100426 [H2_ST_SETTINGS_RCVD] = { .name = "h2_settings_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100427 .desc = "Total number of received SETTINGS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100428 [H2_ST_RST_STREAM_RCVD] = { .name = "h2_rst_stream_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100429 .desc = "Total number of received RST_STREAM frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100430 [H2_ST_GOAWAY_RCVD] = { .name = "h2_goaway_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100431 .desc = "Total number of received GOAWAY frames" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100432
433 [H2_ST_CONN_PROTO_ERR] = { .name = "h2_detected_conn_protocol_errors",
434 .desc = "Total number of connection protocol errors" },
435 [H2_ST_STRM_PROTO_ERR] = { .name = "h2_detected_strm_protocol_errors",
436 .desc = "Total number of stream protocol errors" },
437 [H2_ST_RST_STREAM_RESP] = { .name = "h2_rst_stream_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100438 .desc = "Total number of RST_STREAM sent on detected error" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100439 [H2_ST_GOAWAY_RESP] = { .name = "h2_goaway_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100440 .desc = "Total number of GOAWAY sent on detected error" },
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100441
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100442 [H2_ST_OPEN_CONN] = { .name = "h2_open_connections",
443 .desc = "Count of currently open connections" },
444 [H2_ST_OPEN_STREAM] = { .name = "h2_backend_open_streams",
445 .desc = "Count of currently open streams" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100446 [H2_ST_TOTAL_CONN] = { .name = "h2_total_connections",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100447 .desc = "Total number of connections" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100448 [H2_ST_TOTAL_STREAM] = { .name = "h2_backend_total_streams",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100449 .desc = "Total number of streams" },
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100450};
451
452static struct h2_counters {
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100453 long long headers_rcvd; /* total number of HEADERS frame received */
454 long long data_rcvd; /* total number of DATA frame received */
455 long long settings_rcvd; /* total number of SETTINGS frame received */
456 long long rst_stream_rcvd; /* total number of RST_STREAM frame received */
457 long long goaway_rcvd; /* total number of GOAWAY frame received */
Amaury Denoyellea8879232020-10-27 17:16:03 +0100458
459 long long conn_proto_err; /* total number of protocol errors detected */
460 long long strm_proto_err; /* total number of protocol errors detected */
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100461 long long rst_stream_resp; /* total number of RST_STREAM frame sent on error */
462 long long goaway_resp; /* total number of GOAWAY frame sent on error */
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100463
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100464 long long open_conns; /* count of currently open connections */
465 long long open_streams; /* count of currently open streams */
466 long long total_conns; /* total number of connections */
467 long long total_streams; /* total number of streams */
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100468} h2_counters;
469
470static void h2_fill_stats(void *data, struct field *stats)
471{
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100472 struct h2_counters *counters = data;
473
474 stats[H2_ST_HEADERS_RCVD] = mkf_u64(FN_COUNTER, counters->headers_rcvd);
475 stats[H2_ST_DATA_RCVD] = mkf_u64(FN_COUNTER, counters->data_rcvd);
476 stats[H2_ST_SETTINGS_RCVD] = mkf_u64(FN_COUNTER, counters->settings_rcvd);
477 stats[H2_ST_RST_STREAM_RCVD] = mkf_u64(FN_COUNTER, counters->rst_stream_rcvd);
478 stats[H2_ST_GOAWAY_RCVD] = mkf_u64(FN_COUNTER, counters->goaway_rcvd);
Amaury Denoyellea8879232020-10-27 17:16:03 +0100479
480 stats[H2_ST_CONN_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->conn_proto_err);
481 stats[H2_ST_STRM_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->strm_proto_err);
482 stats[H2_ST_RST_STREAM_RESP] = mkf_u64(FN_COUNTER, counters->rst_stream_resp);
483 stats[H2_ST_GOAWAY_RESP] = mkf_u64(FN_COUNTER, counters->goaway_resp);
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100484
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100485 stats[H2_ST_OPEN_CONN] = mkf_u64(FN_GAUGE, counters->open_conns);
486 stats[H2_ST_OPEN_STREAM] = mkf_u64(FN_GAUGE, counters->open_streams);
487 stats[H2_ST_TOTAL_CONN] = mkf_u64(FN_COUNTER, counters->total_conns);
488 stats[H2_ST_TOTAL_STREAM] = mkf_u64(FN_COUNTER, counters->total_streams);
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100489}
490
491static struct stats_module h2_stats_module = {
492 .name = "h2",
493 .fill_stats = h2_fill_stats,
494 .stats = h2_stats,
495 .stats_count = H2_STATS_COUNT,
496 .counters = &h2_counters,
497 .counters_size = sizeof(h2_counters),
498 .domain_flags = MK_STATS_PROXY_DOMAIN(STATS_PX_CAP_FE|STATS_PX_CAP_BE),
499 .clearable = 1,
500};
501
502INITCALL1(STG_REGISTER, stats_register_module, &h2_stats_module);
503
Willy Tarreau8ceae722018-11-26 11:58:30 +0100504/* the h2c connection pool */
505DECLARE_STATIC_POOL(pool_head_h2c, "h2c", sizeof(struct h2c));
506
507/* the h2s stream pool */
508DECLARE_STATIC_POOL(pool_head_h2s, "h2s", sizeof(struct h2s));
509
Willy Tarreaudc572362018-12-12 08:08:05 +0100510/* The default connection window size is 65535, it may only be enlarged using
511 * a WINDOW_UPDATE message. Since the window must never be larger than 2G-1,
512 * we'll pretend we already received the difference between the two to send
513 * an equivalent window update to enlarge it to 2G-1.
514 */
515#define H2_INITIAL_WINDOW_INCREMENT ((1U<<31)-1 - 65535)
516
Willy Tarreau455d5682019-05-24 19:42:18 +0200517/* maximum amount of data we're OK with re-aligning for buffer optimizations */
518#define MAX_DATA_REALIGN 1024
519
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200520/* a few settings from the global section */
521static int h2_settings_header_table_size = 4096; /* initial value */
Willy Tarreaue6baec02017-07-27 11:45:11 +0200522static int h2_settings_initial_window_size = 65535; /* initial value */
Willy Tarreau5a490b62019-01-31 10:39:51 +0100523static unsigned int h2_settings_max_concurrent_streams = 100;
Willy Tarreaua24b35c2019-02-21 13:24:36 +0100524static int h2_settings_max_frame_size = 0; /* unset */
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200525
Willy Tarreau2a856182017-05-16 15:20:39 +0200526/* a dmumy closed stream */
527static const struct h2s *h2_closed_stream = &(const struct h2s){
528 .cs = NULL,
529 .h2c = NULL,
530 .st = H2_SS_CLOSED,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100531 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreauab837502017-12-27 15:07:30 +0100532 .flags = H2_SF_RST_RCVD,
Willy Tarreau2a856182017-05-16 15:20:39 +0200533 .id = 0,
534};
535
Willy Tarreauecb9dcd2019-01-03 12:00:17 +0100536/* a dmumy closed stream returning a PROTOCOL_ERROR error */
537static const struct h2s *h2_error_stream = &(const struct h2s){
538 .cs = NULL,
539 .h2c = NULL,
540 .st = H2_SS_CLOSED,
541 .errcode = H2_ERR_PROTOCOL_ERROR,
542 .flags = 0,
543 .id = 0,
544};
545
Willy Tarreau8d0d58b2018-12-23 18:29:12 +0100546/* a dmumy closed stream returning a REFUSED_STREAM error */
547static const struct h2s *h2_refused_stream = &(const struct h2s){
548 .cs = NULL,
549 .h2c = NULL,
550 .st = H2_SS_CLOSED,
551 .errcode = H2_ERR_REFUSED_STREAM,
552 .flags = 0,
553 .id = 0,
554};
555
Willy Tarreau2a856182017-05-16 15:20:39 +0200556/* and a dummy idle stream for use with any unannounced stream */
557static const struct h2s *h2_idle_stream = &(const struct h2s){
558 .cs = NULL,
559 .h2c = NULL,
560 .st = H2_SS_IDLE,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100561 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreau2a856182017-05-16 15:20:39 +0200562 .id = 0,
563};
564
Willy Tarreau144f84a2021-03-02 16:09:26 +0100565struct task *h2_timeout_task(struct task *t, void *context, unsigned int state);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +0200566static int h2_send(struct h2c *h2c);
567static int h2_recv(struct h2c *h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +0200568static int h2_process(struct h2c *h2c);
Willy Tarreau691d5032021-01-20 14:55:01 +0100569/* h2_io_cb is exported to see it resolved in "show fd" */
Willy Tarreau144f84a2021-03-02 16:09:26 +0100570struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state);
Willy Tarreau0b559072018-02-26 15:22:17 +0100571static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id);
Amaury Denoyelle74162742020-12-11 17:53:05 +0100572static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol);
Willy Tarreaua56a6de2018-02-26 15:59:07 +0100573static int h2_frt_transfer_data(struct h2s *h2s);
Willy Tarreau144f84a2021-03-02 16:09:26 +0100574struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state);
Olivier Houchardf502aca2018-12-14 19:42:40 +0100575static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct conn_stream *cs, struct session *sess);
Willy Tarreau8b2757c2018-12-19 17:36:48 +0100576static void h2s_alert(struct h2s *h2s);
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200577
Willy Tarreauab2ec452019-08-30 07:07:08 +0200578/* returns a h2c state as an abbreviated 3-letter string, or "???" if unknown */
579static inline const char *h2c_st_to_str(enum h2_cs st)
580{
581 switch (st) {
582 case H2_CS_PREFACE: return "PRF";
583 case H2_CS_SETTINGS1: return "STG";
584 case H2_CS_FRAME_H: return "FRH";
585 case H2_CS_FRAME_P: return "FRP";
586 case H2_CS_FRAME_A: return "FRA";
587 case H2_CS_FRAME_E: return "FRE";
588 case H2_CS_ERROR: return "ERR";
589 case H2_CS_ERROR2: return "ER2";
590 default: return "???";
591 }
592}
593
594/* returns a h2s state as an abbreviated 3-letter string, or "???" if unknown */
595static inline const char *h2s_st_to_str(enum h2_ss st)
596{
597 switch (st) {
598 case H2_SS_IDLE: return "IDL"; // idle
599 case H2_SS_RLOC: return "RSL"; // reserved local
600 case H2_SS_RREM: return "RSR"; // reserved remote
601 case H2_SS_OPEN: return "OPN"; // open
602 case H2_SS_HREM: return "HCR"; // half-closed remote
603 case H2_SS_HLOC: return "HCL"; // half-closed local
604 case H2_SS_ERROR : return "ERR"; // error
605 case H2_SS_CLOSED: return "CLO"; // closed
606 default: return "???";
607 }
608}
609
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200610/* the H2 traces always expect that arg1, if non-null, is of type connection
611 * (from which we can derive h2c), that arg2, if non-null, is of type h2s, and
612 * that arg3, if non-null, is either of type htx for tx headers, or of type
613 * buffer for everything else.
614 */
615static void h2_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
616 const struct ist where, const struct ist func,
617 const void *a1, const void *a2, const void *a3, const void *a4)
618{
619 const struct connection *conn = a1;
620 const struct h2c *h2c = conn ? conn->ctx : NULL;
621 const struct h2s *h2s = a2;
622 const struct buffer *buf = a3;
623 const struct htx *htx;
624 int pos;
625
626 if (!h2c) // nothing to add
627 return;
628
Willy Tarreau17104d42019-08-30 07:12:55 +0200629 if (src->verbosity > H2_VERB_CLEAN) {
Willy Tarreau73db4342019-09-25 07:28:44 +0200630 chunk_appendf(&trace_buf, " : h2c=%p(%c,%s)", h2c, conn_is_back(conn) ? 'B' : 'F', h2c_st_to_str(h2c->st0));
631
Willy Tarreau8e6f7492021-06-16 17:47:24 +0200632 if (mask & H2_EV_H2C_NEW) // inside h2_init, otherwise it's hard to match conn & h2c
633 conn_append_debug_info(&trace_buf, conn, " : ");
634
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100635 if (h2c->errcode)
636 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2c->errcode), h2c->errcode);
637
Willy Tarreau73db4342019-09-25 07:28:44 +0200638 if (h2c->dsi >= 0 &&
639 (mask & (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) == (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) {
Willy Tarreau8520d872020-09-18 07:39:29 +0200640 chunk_appendf(&trace_buf, " dft=%s/%02x dfl=%d", h2_ft_str(h2c->dft), h2c->dff, h2c->dfl);
Willy Tarreau73db4342019-09-25 07:28:44 +0200641 }
642
643 if (h2s) {
644 if (h2s->id <= 0)
645 chunk_appendf(&trace_buf, " dsi=%d", h2c->dsi);
646 chunk_appendf(&trace_buf, " h2s=%p(%d,%s)", h2s, h2s->id, h2s_st_to_str(h2s->st));
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100647 if (h2s->id && h2s->errcode)
648 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2s->errcode), h2s->errcode);
Willy Tarreau73db4342019-09-25 07:28:44 +0200649 }
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200650 }
651
652 /* Let's dump decoded requests and responses right after parsing. They
653 * are traced at level USER with a few recognizable flags.
654 */
655 if ((mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW) ||
656 mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR)) && buf)
657 htx = htxbuf(buf); // recv req/res
658 else if (mask == (H2_EV_TX_FRAME|H2_EV_TX_HDR))
659 htx = a3; // send req/res
660 else
661 htx = NULL;
662
Willy Tarreau94f1dcf2019-08-30 07:11:30 +0200663 if (level == TRACE_LEVEL_USER && src->verbosity != H2_VERB_MINIMAL && htx && (pos = htx_get_head(htx)) != -1) {
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200664 const struct htx_blk *blk = htx_get_blk(htx, pos);
665 const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
666 enum htx_blk_type type = htx_get_blk_type(blk);
667
668 if (type == HTX_BLK_REQ_SL)
669 chunk_appendf(&trace_buf, " : [%d] H2 REQ: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200670 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200671 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
672 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
673 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
674 else if (type == HTX_BLK_RES_SL)
675 chunk_appendf(&trace_buf, " : [%d] H2 RES: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200676 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200677 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
678 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
679 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
680 }
681}
682
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200683
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100684/* Detect a pending read0 for a H2 connection. It happens if a read0 was
685 * already reported on a previous xprt->rcvbuf() AND a frame parser failed
686 * to parse pending data, confirming no more progress is possible because
687 * we're facing a truncated frame. The function returns 1 to report a read0
688 * or 0 otherwise.
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200689 */
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100690static inline int h2c_read0_pending(struct h2c *h2c)
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200691{
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100692 return !!(h2c->flags & H2_CF_END_REACHED);
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200693}
694
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200695/* returns true if the connection is allowed to expire, false otherwise. A
Willy Tarreau34395832022-03-18 14:59:54 +0100696 * connection may expire when it has no attached streams. As long as streams
697 * are attached, the application layer is responsible for timeout management,
698 * and each layer will detach when it doesn't want to wait anymore. When the
699 * last one leaves, the connection must take over timeout management.
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200700 */
701static inline int h2c_may_expire(const struct h2c *h2c)
702{
Willy Tarreau34395832022-03-18 14:59:54 +0100703 return !h2c->nb_cs;
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200704}
705
Willy Tarreau15a47332022-03-18 15:57:34 +0100706/* update h2c timeout if needed */
707static void h2c_update_timeout(struct h2c *h2c)
708{
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200709 int is_idle_conn = 0;
710
Willy Tarreau15a47332022-03-18 15:57:34 +0100711 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
712
713 if (!h2c->task)
714 goto leave;
715
716 if (h2c_may_expire(h2c)) {
717 /* no more streams attached */
718 if (h2c->last_sid >= 0) {
719 /* GOAWAY sent, closing in progress */
720 h2c->task->expire = tick_add_ifset(now_ms, h2c->shut_timeout);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200721 is_idle_conn = 1;
Willy Tarreau15a47332022-03-18 15:57:34 +0100722 } else if (br_data(h2c->mbuf)) {
723 /* pending output data: always the regular data timeout */
724 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
725 } else if (h2c->max_id > 0 && !b_data(&h2c->dbuf)) {
726 /* idle after having seen one stream => keep-alive */
727 h2c->task->expire = tick_add_ifset(h2c->idle_start, h2c->proxy->timeout.httpka);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200728 is_idle_conn = 1;
Willy Tarreau15a47332022-03-18 15:57:34 +0100729 } else {
730 /* before first request, or started to deserialize a
731 * new req => http-request, but only set, not refresh.
732 */
733 int exp = (h2c->flags & H2_CF_IS_BACK) ? TICK_ETERNITY : h2c->proxy->timeout.httpreq;
734 h2c->task->expire = tick_add_ifset(h2c->idle_start, exp);
735 }
736 /* if a timeout above was not set, fall back to the default one */
737 if (!tick_isset(h2c->task->expire))
738 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +0200739
740 if ((h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) &&
741 is_idle_conn && tick_isset(global.close_spread_end)) {
742 /* If a soft-stop is in progress and a close-spread-time
743 * is set, we want to spread idle connection closing roughly
744 * evenly across the defined window. This should only
745 * act on idle frontend connections.
746 * If the window end is already in the past, we wake the
747 * timeout task up immediately so that it can be closed.
748 */
749 int remaining_window = tick_remain(now_ms, global.close_spread_end);
750 if (remaining_window) {
751 /* We don't need to reset the expire if it would
752 * already happen before the close window end.
753 */
754 if (tick_isset(h2c->task->expire) &&
755 tick_is_le(global.close_spread_end, h2c->task->expire)) {
756 /* Set an expire value shorter than the current value
757 * because the close spread window end comes earlier.
758 */
759 h2c->task->expire = tick_add(now_ms, statistical_prng_range(remaining_window));
760 }
761 }
762 else {
763 /* We are past the soft close window end, wake the timeout
764 * task up immediately.
765 */
766 task_wakeup(h2c->task, TASK_WOKEN_TIMER);
767 }
768 }
769
Willy Tarreau15a47332022-03-18 15:57:34 +0100770 } else {
771 h2c->task->expire = TICK_ETERNITY;
772 }
773 task_queue(h2c->task);
774 leave:
775 TRACE_LEAVE(H2_EV_H2C_WAKE);
776}
777
Olivier Houchard7a977432019-03-21 15:47:13 +0100778static __inline int
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200779h2c_is_dead(const struct h2c *h2c)
Olivier Houchard7a977432019-03-21 15:47:13 +0100780{
781 if (eb_is_empty(&h2c->streams_by_id) && /* don't close if streams exist */
782 ((h2c->conn->flags & CO_FL_ERROR) || /* errors close immediately */
783 (h2c->st0 >= H2_CS_ERROR && !h2c->task) || /* a timeout stroke earlier */
784 (!(h2c->conn->owner)) || /* Nobody's left to take care of the connection, drop it now */
Willy Tarreau662fafc2019-05-26 09:43:07 +0200785 (!br_data(h2c->mbuf) && /* mux buffer empty, also process clean events below */
Olivier Houchard7a977432019-03-21 15:47:13 +0100786 (conn_xprt_read0_pending(h2c->conn) ||
787 (h2c->last_sid >= 0 && h2c->max_id >= h2c->last_sid)))))
788 return 1;
789
790 return 0;
Olivier Houchard7a977432019-03-21 15:47:13 +0100791}
792
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200793/*****************************************************/
794/* functions below are for dynamic buffer management */
795/*****************************************************/
796
Willy Tarreau315d8072017-12-10 22:17:57 +0100797/* indicates whether or not the we may call the h2_recv() function to attempt
798 * to receive data into the buffer and/or demux pending data. The condition is
799 * a bit complex due to some API limits for now. The rules are the following :
800 * - if an error or a shutdown was detected on the connection and the buffer
801 * is empty, we must not attempt to receive
802 * - if the demux buf failed to be allocated, we must not try to receive and
803 * we know there is nothing pending
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100804 * - if no flag indicates a blocking condition, we may attempt to receive,
805 * regardless of whether the demux buffer is full or not, so that only
806 * de demux part decides whether or not to block. This is needed because
807 * the connection API indeed prevents us from re-enabling receipt that is
808 * already enabled in a polled state, so we must always immediately stop
809 * as soon as the demux can't proceed so as never to hit an end of read
810 * with data pending in the buffers.
Willy Tarreau315d8072017-12-10 22:17:57 +0100811 * - otherwise must may not attempt
812 */
813static inline int h2_recv_allowed(const struct h2c *h2c)
814{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200815 if (b_data(&h2c->dbuf) == 0 &&
Willy Tarreau315d8072017-12-10 22:17:57 +0100816 (h2c->st0 >= H2_CS_ERROR ||
817 h2c->conn->flags & CO_FL_ERROR ||
818 conn_xprt_read0_pending(h2c->conn)))
819 return 0;
820
821 if (!(h2c->flags & H2_CF_DEM_DALLOC) &&
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100822 !(h2c->flags & H2_CF_DEM_BLOCK_ANY))
Willy Tarreau315d8072017-12-10 22:17:57 +0100823 return 1;
824
825 return 0;
826}
827
Willy Tarreau47b515a2018-12-21 16:09:41 +0100828/* restarts reading on the connection if it was not enabled */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200829static inline void h2c_restart_reading(const struct h2c *h2c, int consider_buffer)
Willy Tarreau47b515a2018-12-21 16:09:41 +0100830{
831 if (!h2_recv_allowed(h2c))
832 return;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200833 if ((!consider_buffer || !b_data(&h2c->dbuf))
834 && (h2c->wait_event.events & SUB_RETRY_RECV))
Willy Tarreau47b515a2018-12-21 16:09:41 +0100835 return;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200836 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau47b515a2018-12-21 16:09:41 +0100837}
838
839
Willy Tarreaufa1d3572019-01-31 10:31:51 +0100840/* returns true if the front connection has too many conn_streams attached */
841static inline int h2_frt_has_too_many_cs(const struct h2c *h2c)
Willy Tarreauf2101912018-07-19 10:11:38 +0200842{
Willy Tarreaua8754662018-12-23 20:43:58 +0100843 return h2c->nb_cs > h2_settings_max_concurrent_streams;
Willy Tarreauf2101912018-07-19 10:11:38 +0200844}
845
Willy Tarreau44e973f2018-03-01 17:49:30 +0100846/* Tries to grab a buffer and to re-enable processing on mux <target>. The h2c
847 * flags are used to figure what buffer was requested. It returns 1 if the
848 * allocation succeeds, in which case the connection is woken up, or 0 if it's
849 * impossible to wake up and we prefer to be woken up later.
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200850 */
Willy Tarreau44e973f2018-03-01 17:49:30 +0100851static int h2_buf_available(void *target)
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200852{
853 struct h2c *h2c = target;
Willy Tarreau0b559072018-02-26 15:22:17 +0100854 struct h2s *h2s;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200855
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100856 if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200857 h2c->flags &= ~H2_CF_DEM_DALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200858 h2c_restart_reading(h2c, 1);
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200859 return 1;
860 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200861
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100862 if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100863 h2c->flags &= ~H2_CF_MUX_MALLOC;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200864
865 if (h2c->flags & H2_CF_DEM_MROOM) {
866 h2c->flags &= ~H2_CF_DEM_MROOM;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200867 h2c_restart_reading(h2c, 1);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200868 }
Willy Tarreau14398122017-09-22 14:26:04 +0200869 return 1;
870 }
Willy Tarreau0b559072018-02-26 15:22:17 +0100871
872 if ((h2c->flags & H2_CF_DEM_SALLOC) &&
873 (h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s->cs &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100874 b_alloc(&h2s->rxbuf)) {
Willy Tarreau0b559072018-02-26 15:22:17 +0100875 h2c->flags &= ~H2_CF_DEM_SALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200876 h2c_restart_reading(h2c, 1);
Willy Tarreau0b559072018-02-26 15:22:17 +0100877 return 1;
878 }
879
Willy Tarreau14398122017-09-22 14:26:04 +0200880 return 0;
881}
882
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200883static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200884{
885 struct buffer *buf = NULL;
886
Willy Tarreau2b718102021-04-21 07:32:39 +0200887 if (likely(!LIST_INLIST(&h2c->buf_wait.list)) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100888 unlikely((buf = b_alloc(bptr)) == NULL)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100889 h2c->buf_wait.target = h2c;
890 h2c->buf_wait.wakeup_cb = h2_buf_available;
Willy Tarreaub4e34762021-09-30 19:02:18 +0200891 LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +0200892 }
893 return buf;
894}
895
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200896static inline void h2_release_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200897{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200898 if (bptr->size) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100899 b_free(bptr);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100900 offer_buffers(NULL, 1);
Willy Tarreau14398122017-09-22 14:26:04 +0200901 }
902}
903
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200904static inline void h2_release_mbuf(struct h2c *h2c)
905{
906 struct buffer *buf;
907 unsigned int count = 0;
908
909 while (b_size(buf = br_head_pick(h2c->mbuf))) {
910 b_free(buf);
911 count++;
912 }
913 if (count)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100914 offer_buffers(NULL, count);
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200915}
916
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100917/* returns the number of allocatable outgoing streams for the connection taking
918 * the last_sid and the reserved ones into account.
919 */
920static inline int h2_streams_left(const struct h2c *h2c)
921{
922 int ret;
923
924 /* consider the number of outgoing streams we're allowed to create before
925 * reaching the last GOAWAY frame seen. max_id is the last assigned id,
926 * nb_reserved is the number of streams which don't yet have an ID.
927 */
928 ret = (h2c->last_sid >= 0) ? h2c->last_sid : 0x7FFFFFFF;
929 ret = (unsigned int)(ret - h2c->max_id) / 2 - h2c->nb_reserved - 1;
930 if (ret < 0)
931 ret = 0;
932 return ret;
933}
934
Willy Tarreau00f18a32019-01-26 12:19:01 +0100935/* returns the number of streams in use on a connection to figure if it's
936 * idle or not. We check nb_cs and not nb_streams as the caller will want
937 * to know if it was the last one after a detach().
938 */
939static int h2_used_streams(struct connection *conn)
940{
941 struct h2c *h2c = conn->ctx;
942
943 return h2c->nb_cs;
944}
945
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100946/* returns the number of concurrent streams available on the connection */
Olivier Houchardd540b362018-11-05 18:37:53 +0100947static int h2_avail_streams(struct connection *conn)
948{
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100949 struct server *srv = objt_server(conn->target);
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100950 struct h2c *h2c = conn->ctx;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100951 int ret1, ret2;
Olivier Houchardd540b362018-11-05 18:37:53 +0100952
Willy Tarreau6afec462019-01-28 06:40:19 +0100953 /* RFC7540#6.8: Receivers of a GOAWAY frame MUST NOT open additional
954 * streams on the connection.
955 */
956 if (h2c->last_sid >= 0)
957 return 0;
958
Willy Tarreauc61966f2019-10-31 15:10:03 +0100959 if (h2c->st0 >= H2_CS_ERROR)
960 return 0;
961
Willy Tarreau86949782019-01-31 10:42:05 +0100962 /* note: may be negative if a SETTINGS frame changes the limit */
963 ret1 = h2c->streams_limit - h2c->nb_streams;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100964
965 /* we must also consider the limit imposed by stream IDs */
966 ret2 = h2_streams_left(h2c);
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100967 ret1 = MIN(ret1, ret2);
Willy Tarreau86949782019-01-31 10:42:05 +0100968 if (ret1 > 0 && srv && srv->max_reuse >= 0) {
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100969 ret2 = h2c->stream_cnt <= srv->max_reuse ? srv->max_reuse - h2c->stream_cnt + 1: 0;
970 ret1 = MIN(ret1, ret2);
971 }
972 return ret1;
Olivier Houchardd540b362018-11-05 18:37:53 +0100973}
974
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200975
Willy Tarreau62f52692017-10-08 23:01:42 +0200976/*****************************************************************/
977/* functions below are dedicated to the mux setup and management */
978/*****************************************************************/
979
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200980/* Initialize the mux once it's attached. For outgoing connections, the context
981 * is already initialized before installing the mux, so we detect incoming
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200982 * connections from the fact that the context is still NULL (even during mux
983 * upgrades). <input> is always used as Input buffer and may contain data. It is
984 * the caller responsibility to not reuse it anymore. Returns < 0 on error.
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200985 */
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200986static int h2_init(struct connection *conn, struct proxy *prx, struct session *sess,
987 struct buffer *input)
Willy Tarreau32218eb2017-09-22 08:07:25 +0200988{
989 struct h2c *h2c;
Willy Tarreauea392822017-10-31 10:02:25 +0100990 struct task *t = NULL;
Christopher Fauletf81ef032019-10-04 15:19:43 +0200991 void *conn_ctx = conn->ctx;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200992
Christopher Fauletf81ef032019-10-04 15:19:43 +0200993 TRACE_ENTER(H2_EV_H2C_NEW);
Willy Tarreau7838a792019-08-12 18:42:03 +0200994
Willy Tarreaubafbe012017-11-24 17:34:44 +0100995 h2c = pool_alloc(pool_head_h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +0200996 if (!h2c)
mildiscd2d7de2018-10-02 16:44:18 +0200997 goto fail_no_h2c;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200998
Christopher Faulete9b70722019-04-08 10:46:02 +0200999 if (conn_is_back(conn)) {
Willy Tarreau01b44822018-10-03 14:26:37 +02001000 h2c->flags = H2_CF_IS_BACK;
1001 h2c->shut_timeout = h2c->timeout = prx->timeout.server;
1002 if (tick_isset(prx->timeout.serverfin))
1003 h2c->shut_timeout = prx->timeout.serverfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +01001004
1005 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_be,
1006 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +02001007 } else {
1008 h2c->flags = H2_CF_NONE;
1009 h2c->shut_timeout = h2c->timeout = prx->timeout.client;
1010 if (tick_isset(prx->timeout.clientfin))
1011 h2c->shut_timeout = prx->timeout.clientfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +01001012
1013 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe,
1014 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +02001015 }
Willy Tarreau3f133572017-10-31 19:21:06 +01001016
Willy Tarreau0b37d652018-10-03 10:33:02 +02001017 h2c->proxy = prx;
Willy Tarreau33400292017-11-05 11:23:40 +01001018 h2c->task = NULL;
Willy Tarreau15a47332022-03-18 15:57:34 +01001019 h2c->idle_start = now_ms;
Willy Tarreau3f133572017-10-31 19:21:06 +01001020 if (tick_isset(h2c->timeout)) {
Willy Tarreaubeeabf52021-10-01 18:23:30 +02001021 t = task_new_here();
Willy Tarreau3f133572017-10-31 19:21:06 +01001022 if (!t)
1023 goto fail;
1024
1025 h2c->task = t;
1026 t->process = h2_timeout_task;
1027 t->context = h2c;
1028 t->expire = tick_add(now_ms, h2c->timeout);
1029 }
Willy Tarreauea392822017-10-31 10:02:25 +01001030
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001031 h2c->wait_event.tasklet = tasklet_new();
1032 if (!h2c->wait_event.tasklet)
Olivier Houchard910b2bc2018-07-17 18:49:38 +02001033 goto fail;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001034 h2c->wait_event.tasklet->process = h2_io_cb;
1035 h2c->wait_event.tasklet->context = h2c;
Willy Tarreau4f6516d2018-12-19 13:59:17 +01001036 h2c->wait_event.events = 0;
Amaury Denoyelled3a88c12021-05-03 10:47:51 +02001037 if (!conn_is_back(conn)) {
1038 /* Connection might already be in the stopping_list if subject
1039 * to h1->h2 upgrade.
1040 */
1041 if (!LIST_INLIST(&conn->stopping_list)) {
1042 LIST_APPEND(&mux_stopping_data[tid].list,
1043 &conn->stopping_list);
1044 }
1045 }
Olivier Houchard910b2bc2018-07-17 18:49:38 +02001046
Willy Tarreau2bdcc702020-05-19 11:31:11 +02001047 h2c->ddht = hpack_dht_alloc();
Willy Tarreau32218eb2017-09-22 08:07:25 +02001048 if (!h2c->ddht)
1049 goto fail;
1050
1051 /* Initialise the context. */
1052 h2c->st0 = H2_CS_PREFACE;
1053 h2c->conn = conn;
Willy Tarreau2e2083a2019-01-31 10:34:07 +01001054 h2c->streams_limit = h2_settings_max_concurrent_streams;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001055 h2c->max_id = -1;
1056 h2c->errcode = H2_ERR_NO_ERROR;
Willy Tarreau97aaa672018-12-23 09:49:04 +01001057 h2c->rcvd_c = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001058 h2c->rcvd_s = 0;
Willy Tarreau49745612017-12-03 18:56:02 +01001059 h2c->nb_streams = 0;
Willy Tarreau7ac60e82018-07-19 09:04:05 +02001060 h2c->nb_cs = 0;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001061 h2c->nb_reserved = 0;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001062 h2c->stream_cnt = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001063
Christopher Faulet51f73eb2019-04-08 11:22:47 +02001064 h2c->dbuf = *input;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001065 h2c->dsi = -1;
1066 h2c->msi = -1;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001067
Willy Tarreau32218eb2017-09-22 08:07:25 +02001068 h2c->last_sid = -1;
1069
Willy Tarreau51330962019-05-26 09:38:07 +02001070 br_init(h2c->mbuf, sizeof(h2c->mbuf) / sizeof(h2c->mbuf[0]));
Willy Tarreau32218eb2017-09-22 08:07:25 +02001071 h2c->miw = 65535; /* mux initial window size */
1072 h2c->mws = 65535; /* mux window size */
1073 h2c->mfs = 16384; /* initial max frame size */
Willy Tarreau751f2d02018-10-05 09:35:00 +02001074 h2c->streams_by_id = EB_ROOT;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001075 LIST_INIT(&h2c->send_list);
1076 LIST_INIT(&h2c->fctl_list);
Willy Tarreau9edf6db2019-10-02 10:49:59 +02001077 LIST_INIT(&h2c->blocked_list);
Willy Tarreau90f366b2021-02-20 11:49:49 +01001078 LIST_INIT(&h2c->buf_wait.list);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001079
Christopher Fauletf81ef032019-10-04 15:19:43 +02001080 conn->ctx = h2c;
1081
Willy Tarreau8e6f7492021-06-16 17:47:24 +02001082 TRACE_USER("new H2 connection", H2_EV_H2C_NEW, conn);
1083
Willy Tarreau3f133572017-10-31 19:21:06 +01001084 if (t)
1085 task_queue(t);
Willy Tarreauea392822017-10-31 10:02:25 +01001086
Willy Tarreau01b44822018-10-03 14:26:37 +02001087 if (h2c->flags & H2_CF_IS_BACK) {
1088 /* FIXME: this is temporary, for outgoing connections we need
1089 * to immediately allocate a stream until the code is modified
1090 * so that the caller calls ->attach(). For now the outgoing cs
Christopher Fauletf81ef032019-10-04 15:19:43 +02001091 * is stored as conn->ctx by the caller and saved in conn_ctx.
Willy Tarreau01b44822018-10-03 14:26:37 +02001092 */
1093 struct h2s *h2s;
1094
Christopher Fauletf81ef032019-10-04 15:19:43 +02001095 h2s = h2c_bck_stream_new(h2c, conn_ctx, sess);
Willy Tarreau01b44822018-10-03 14:26:37 +02001096 if (!h2s)
1097 goto fail_stream;
1098 }
1099
Willy Tarreau4781b152021-04-06 13:53:36 +02001100 HA_ATOMIC_INC(&h2c->px_counters->open_conns);
1101 HA_ATOMIC_INC(&h2c->px_counters->total_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001102
Willy Tarreau0f383582018-10-03 14:22:21 +02001103 /* prepare to read something */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02001104 h2c_restart_reading(h2c, 1);
Willy Tarreau7838a792019-08-12 18:42:03 +02001105 TRACE_LEAVE(H2_EV_H2C_NEW, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001106 return 0;
Willy Tarreau01b44822018-10-03 14:26:37 +02001107 fail_stream:
1108 hpack_dht_free(h2c->ddht);
mildiscd2d7de2018-10-02 16:44:18 +02001109 fail:
Willy Tarreauf6562792019-05-07 19:05:35 +02001110 task_destroy(t);
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001111 if (h2c->wait_event.tasklet)
1112 tasklet_free(h2c->wait_event.tasklet);
Willy Tarreaubafbe012017-11-24 17:34:44 +01001113 pool_free(pool_head_h2c, h2c);
mildiscd2d7de2018-10-02 16:44:18 +02001114 fail_no_h2c:
Willy Tarreau3b990fe2022-01-12 17:24:26 +01001115 if (!conn_is_back(conn))
1116 LIST_DEL_INIT(&conn->stopping_list);
Christopher Fauletf81ef032019-10-04 15:19:43 +02001117 conn->ctx = conn_ctx; /* restore saved ctx */
1118 TRACE_DEVEL("leaving in error", H2_EV_H2C_NEW|H2_EV_H2C_END|H2_EV_H2C_ERR);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001119 return -1;
1120}
1121
Willy Tarreau751f2d02018-10-05 09:35:00 +02001122/* returns the next allocatable outgoing stream ID for the H2 connection, or
1123 * -1 if no more is allocatable.
1124 */
1125static inline int32_t h2c_get_next_sid(const struct h2c *h2c)
1126{
1127 int32_t id = (h2c->max_id + 1) | 1;
Willy Tarreaua80dca82019-01-24 17:08:28 +01001128
1129 if ((id & 0x80000000U) || (h2c->last_sid >= 0 && id > h2c->last_sid))
Willy Tarreau751f2d02018-10-05 09:35:00 +02001130 id = -1;
1131 return id;
1132}
1133
Willy Tarreau2373acc2017-10-12 17:35:14 +02001134/* returns the stream associated with id <id> or NULL if not found */
1135static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id)
1136{
1137 struct eb32_node *node;
1138
Willy Tarreau751f2d02018-10-05 09:35:00 +02001139 if (id == 0)
1140 return (struct h2s *)h2_closed_stream;
1141
Willy Tarreau2a856182017-05-16 15:20:39 +02001142 if (id > h2c->max_id)
1143 return (struct h2s *)h2_idle_stream;
1144
Willy Tarreau2373acc2017-10-12 17:35:14 +02001145 node = eb32_lookup(&h2c->streams_by_id, id);
1146 if (!node)
Willy Tarreau2a856182017-05-16 15:20:39 +02001147 return (struct h2s *)h2_closed_stream;
Willy Tarreau2373acc2017-10-12 17:35:14 +02001148
1149 return container_of(node, struct h2s, by_id);
1150}
1151
Christopher Faulet73c12072019-04-08 11:23:22 +02001152/* release function. This one should be called to free all resources allocated
1153 * to the mux.
Willy Tarreau62f52692017-10-08 23:01:42 +02001154 */
Christopher Faulet73c12072019-04-08 11:23:22 +02001155static void h2_release(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02001156{
William Dauchy477757c2020-08-07 22:19:23 +02001157 struct connection *conn = NULL;
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001158
Willy Tarreau7838a792019-08-12 18:42:03 +02001159 TRACE_ENTER(H2_EV_H2C_END);
1160
Willy Tarreau32218eb2017-09-22 08:07:25 +02001161 if (h2c) {
Christopher Faulet61840e72019-04-15 09:33:32 +02001162 /* The connection must be aattached to this mux to be released */
1163 if (h2c->conn && h2c->conn->ctx == h2c)
1164 conn = h2c->conn;
1165
Willy Tarreau7838a792019-08-12 18:42:03 +02001166 TRACE_DEVEL("freeing h2c", H2_EV_H2C_END, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001167 hpack_dht_free(h2c->ddht);
Willy Tarreau14398122017-09-22 14:26:04 +02001168
Willy Tarreau2b718102021-04-21 07:32:39 +02001169 if (LIST_INLIST(&h2c->buf_wait.list))
Willy Tarreau90f366b2021-02-20 11:49:49 +01001170 LIST_DEL_INIT(&h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +02001171
Willy Tarreau44e973f2018-03-01 17:49:30 +01001172 h2_release_buf(h2c, &h2c->dbuf);
Willy Tarreau2e3c0002019-05-26 09:45:23 +02001173 h2_release_mbuf(h2c);
Willy Tarreau44e973f2018-03-01 17:49:30 +01001174
Willy Tarreauea392822017-10-31 10:02:25 +01001175 if (h2c->task) {
Willy Tarreau0975f112018-03-29 15:22:59 +02001176 h2c->task->context = NULL;
1177 task_wakeup(h2c->task, TASK_WOKEN_OTHER);
Willy Tarreauea392822017-10-31 10:02:25 +01001178 h2c->task = NULL;
1179 }
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001180 if (h2c->wait_event.tasklet)
1181 tasklet_free(h2c->wait_event.tasklet);
Christopher Faulet21d849f2019-09-18 11:07:20 +02001182 if (conn && h2c->wait_event.events != 0)
Olivier Houcharde179d0e2019-03-21 18:27:17 +01001183 conn->xprt->unsubscribe(conn, conn->xprt_ctx, h2c->wait_event.events,
Christopher Faulet21d849f2019-09-18 11:07:20 +02001184 &h2c->wait_event);
Willy Tarreauea392822017-10-31 10:02:25 +01001185
Willy Tarreau4781b152021-04-06 13:53:36 +02001186 HA_ATOMIC_DEC(&h2c->px_counters->open_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001187
Willy Tarreaubafbe012017-11-24 17:34:44 +01001188 pool_free(pool_head_h2c, h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001189 }
1190
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001191 if (conn) {
Amaury Denoyelled3a88c12021-05-03 10:47:51 +02001192 if (!conn_is_back(conn))
1193 LIST_DEL_INIT(&conn->stopping_list);
1194
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001195 conn->mux = NULL;
1196 conn->ctx = NULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02001197 TRACE_DEVEL("freeing conn", H2_EV_H2C_END, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001198
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001199 conn_stop_tracking(conn);
Willy Tarreau0b222472021-10-21 22:24:31 +02001200
1201 /* there might be a GOAWAY frame still pending in the TCP
1202 * stack, and if the peer continues to send (i.e. window
1203 * updates etc), this can result in losing the GOAWAY. For
1204 * this reason we try to drain anything received in between.
1205 */
1206 conn->flags |= CO_FL_WANT_DRAIN;
1207
1208 conn_xprt_shutw(conn);
1209 conn_xprt_close(conn);
1210 conn_sock_shutw(conn, !conn_is_back(conn));
1211 conn_ctrl_close(conn);
1212
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001213 if (conn->destroy_cb)
1214 conn->destroy_cb(conn);
1215 conn_free(conn);
1216 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001217
1218 TRACE_LEAVE(H2_EV_H2C_END);
Willy Tarreau62f52692017-10-08 23:01:42 +02001219}
1220
1221
Willy Tarreau71681172017-10-23 14:39:06 +02001222/******************************************************/
1223/* functions below are for the H2 protocol processing */
1224/******************************************************/
1225
1226/* returns the stream if of stream <h2s> or 0 if <h2s> is NULL */
Willy Tarreau1f094672017-11-20 21:27:45 +01001227static inline __maybe_unused int h2s_id(const struct h2s *h2s)
Willy Tarreau71681172017-10-23 14:39:06 +02001228{
1229 return h2s ? h2s->id : 0;
1230}
1231
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001232/* returns the sum of the stream's own window size and the mux's initial
1233 * window, which together form the stream's effective window size.
1234 */
1235static inline int h2s_mws(const struct h2s *h2s)
1236{
1237 return h2s->sws + h2s->h2c->miw;
1238}
1239
Willy Tarreau5b5e6872017-09-25 16:17:25 +02001240/* returns true of the mux is currently busy as seen from stream <h2s> */
Willy Tarreau1f094672017-11-20 21:27:45 +01001241static inline __maybe_unused int h2c_mux_busy(const struct h2c *h2c, const struct h2s *h2s)
Willy Tarreau5b5e6872017-09-25 16:17:25 +02001242{
1243 if (h2c->msi < 0)
1244 return 0;
1245
1246 if (h2c->msi == h2s_id(h2s))
1247 return 0;
1248
1249 return 1;
1250}
1251
Willy Tarreau741d6df2017-10-17 08:00:59 +02001252/* marks an error on the connection */
Willy Tarreau1f094672017-11-20 21:27:45 +01001253static inline __maybe_unused void h2c_error(struct h2c *h2c, enum h2_err err)
Willy Tarreau741d6df2017-10-17 08:00:59 +02001254{
Willy Tarreau022e5e52020-09-10 09:33:15 +02001255 TRACE_POINT(H2_EV_H2C_ERR, h2c->conn, 0, 0, (void *)(long)(err));
Willy Tarreau741d6df2017-10-17 08:00:59 +02001256 h2c->errcode = err;
1257 h2c->st0 = H2_CS_ERROR;
1258}
1259
Willy Tarreau175cebb2019-01-24 10:02:24 +01001260/* marks an error on the stream. It may also update an already closed stream
1261 * (e.g. to report an error after an RST was received).
1262 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001263static inline __maybe_unused void h2s_error(struct h2s *h2s, enum h2_err err)
Willy Tarreau2e43f082017-10-17 08:03:59 +02001264{
Willy Tarreau175cebb2019-01-24 10:02:24 +01001265 if (h2s->id && h2s->st != H2_SS_ERROR) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02001266 TRACE_POINT(H2_EV_H2S_ERR, h2s->h2c->conn, h2s, 0, (void *)(long)(err));
Willy Tarreau2e43f082017-10-17 08:03:59 +02001267 h2s->errcode = err;
Willy Tarreau175cebb2019-01-24 10:02:24 +01001268 if (h2s->st < H2_SS_ERROR)
1269 h2s->st = H2_SS_ERROR;
Willy Tarreauec988c72018-12-19 18:00:29 +01001270 if (h2s->cs)
1271 cs_set_error(h2s->cs);
Willy Tarreau2e43f082017-10-17 08:03:59 +02001272 }
1273}
1274
Willy Tarreau7e094452018-12-19 18:08:52 +01001275/* attempt to notify the data layer of recv availability */
1276static void __maybe_unused h2s_notify_recv(struct h2s *h2s)
1277{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001278 if (h2s->subs && h2s->subs->events & SUB_RETRY_RECV) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001279 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01001280 tasklet_wakeup(h2s->subs->tasklet);
1281 h2s->subs->events &= ~SUB_RETRY_RECV;
1282 if (!h2s->subs->events)
1283 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001284 }
1285}
1286
1287/* attempt to notify the data layer of send availability */
1288static void __maybe_unused h2s_notify_send(struct h2s *h2s)
1289{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001290 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001291 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01001292 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01001293 tasklet_wakeup(h2s->subs->tasklet);
1294 h2s->subs->events &= ~SUB_RETRY_SEND;
1295 if (!h2s->subs->events)
1296 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001297 }
Willy Tarreau5723f292020-01-10 15:16:57 +01001298 else if (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) {
1299 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
1300 tasklet_wakeup(h2s->shut_tl);
1301 }
Willy Tarreau7e094452018-12-19 18:08:52 +01001302}
1303
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001304/* alerts the data layer, trying to wake it up by all means, following
1305 * this sequence :
1306 * - if the h2s' data layer is subscribed to recv, then it's woken up for recv
1307 * - if its subscribed to send, then it's woken up for send
1308 * - if it was subscribed to neither, its ->wake() callback is called
1309 * It is safe to call this function with a closed stream which doesn't have a
1310 * conn_stream anymore.
1311 */
1312static void __maybe_unused h2s_alert(struct h2s *h2s)
1313{
Willy Tarreau7838a792019-08-12 18:42:03 +02001314 TRACE_ENTER(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
1315
Willy Tarreauf96508a2020-01-10 11:12:48 +01001316 if (h2s->subs ||
Willy Tarreau5723f292020-01-10 15:16:57 +01001317 (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW))) {
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001318 h2s_notify_recv(h2s);
1319 h2s_notify_send(h2s);
1320 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001321 else if (h2s->cs && h2s->cs->data_cb->wake != NULL) {
1322 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001323 h2s->cs->data_cb->wake(h2s->cs);
Willy Tarreau7838a792019-08-12 18:42:03 +02001324 }
1325
1326 TRACE_LEAVE(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001327}
1328
Willy Tarreaue4820742017-07-27 13:37:23 +02001329/* writes the 24-bit frame size <len> at address <frame> */
Willy Tarreau1f094672017-11-20 21:27:45 +01001330static inline __maybe_unused void h2_set_frame_size(void *frame, uint32_t len)
Willy Tarreaue4820742017-07-27 13:37:23 +02001331{
1332 uint8_t *out = frame;
1333
1334 *out = len >> 16;
1335 write_n16(out + 1, len);
1336}
1337
Willy Tarreau54c15062017-10-10 17:10:03 +02001338/* reads <bytes> bytes from buffer <b> starting at relative offset <o> from the
1339 * current pointer, dealing with wrapping, and stores the result in <dst>. It's
1340 * the caller's responsibility to verify that there are at least <bytes> bytes
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001341 * available in the buffer's input prior to calling this function. The buffer
1342 * is assumed not to hold any output data.
Willy Tarreau54c15062017-10-10 17:10:03 +02001343 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001344static inline __maybe_unused void h2_get_buf_bytes(void *dst, size_t bytes,
Willy Tarreau54c15062017-10-10 17:10:03 +02001345 const struct buffer *b, int o)
1346{
Willy Tarreau591d4452018-06-15 17:21:00 +02001347 readv_bytes(dst, bytes, b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001348}
1349
Willy Tarreau1f094672017-11-20 21:27:45 +01001350static inline __maybe_unused uint16_t h2_get_n16(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001351{
Willy Tarreau591d4452018-06-15 17:21:00 +02001352 return readv_n16(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001353}
1354
Willy Tarreau1f094672017-11-20 21:27:45 +01001355static inline __maybe_unused uint32_t h2_get_n32(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001356{
Willy Tarreau591d4452018-06-15 17:21:00 +02001357 return readv_n32(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001358}
1359
Willy Tarreau1f094672017-11-20 21:27:45 +01001360static inline __maybe_unused uint64_t h2_get_n64(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001361{
Willy Tarreau591d4452018-06-15 17:21:00 +02001362 return readv_n64(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001363}
1364
1365
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001366/* Peeks an H2 frame header from offset <o> of buffer <b> into descriptor <h>.
1367 * The algorithm is not obvious. It turns out that H2 headers are neither
1368 * aligned nor do they use regular sizes. And to add to the trouble, the buffer
1369 * may wrap so each byte read must be checked. The header is formed like this :
Willy Tarreau715d5312017-07-11 15:20:24 +02001370 *
1371 * b0 b1 b2 b3 b4 b5..b8
1372 * +----------+---------+--------+----+----+----------------------+
1373 * |len[23:16]|len[15:8]|len[7:0]|type|flag|sid[31:0] (big endian)|
1374 * +----------+---------+--------+----+----+----------------------+
1375 *
1376 * Here we read a big-endian 64 bit word from h[1]. This way in a single read
1377 * we get the sid properly aligned and ordered, and 16 bits of len properly
1378 * ordered as well. The type and flags can be extracted using bit shifts from
1379 * the word, and only one extra read is needed to fetch len[16:23].
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001380 * Returns zero if some bytes are missing, otherwise non-zero on success. The
1381 * buffer is assumed not to contain any output data.
Willy Tarreau715d5312017-07-11 15:20:24 +02001382 */
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001383static __maybe_unused int h2_peek_frame_hdr(const struct buffer *b, int o, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001384{
1385 uint64_t w;
1386
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001387 if (b_data(b) < o + 9)
Willy Tarreau715d5312017-07-11 15:20:24 +02001388 return 0;
1389
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001390 w = h2_get_n64(b, o + 1);
1391 h->len = *(uint8_t*)b_peek(b, o) << 16;
Willy Tarreau715d5312017-07-11 15:20:24 +02001392 h->sid = w & 0x7FFFFFFF; /* RFC7540#4.1: R bit must be ignored */
1393 h->ff = w >> 32;
1394 h->ft = w >> 40;
1395 h->len += w >> 48;
1396 return 1;
1397}
1398
1399/* skip the next 9 bytes corresponding to the frame header possibly parsed by
1400 * h2_peek_frame_hdr() above.
1401 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001402static inline __maybe_unused void h2_skip_frame_hdr(struct buffer *b)
Willy Tarreau715d5312017-07-11 15:20:24 +02001403{
Willy Tarreaue5f12ce2018-06-15 10:28:05 +02001404 b_del(b, 9);
Willy Tarreau715d5312017-07-11 15:20:24 +02001405}
1406
1407/* same as above, automatically advances the buffer on success */
Willy Tarreau1f094672017-11-20 21:27:45 +01001408static inline __maybe_unused int h2_get_frame_hdr(struct buffer *b, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001409{
1410 int ret;
1411
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001412 ret = h2_peek_frame_hdr(b, 0, h);
Willy Tarreau715d5312017-07-11 15:20:24 +02001413 if (ret > 0)
1414 h2_skip_frame_hdr(b);
1415 return ret;
1416}
1417
Willy Tarreaucb985a42019-10-07 16:56:34 +02001418
1419/* try to fragment the headers frame present at the beginning of buffer <b>,
1420 * enforcing a limit of <mfs> bytes per frame. Returns 0 on failure, 1 on
1421 * success. Typical causes of failure include a buffer not large enough to
1422 * add extra frame headers. The existing frame size is read in the current
1423 * frame. Its EH flag will be cleared if CONTINUATION frames need to be added,
1424 * and its length will be adjusted. The stream ID for continuation frames will
1425 * be copied from the initial frame's.
1426 */
1427static int h2_fragment_headers(struct buffer *b, uint32_t mfs)
1428{
1429 size_t remain = b->data - 9;
1430 int extra_frames = (remain - 1) / mfs;
1431 size_t fsize;
1432 char *fptr;
1433 int frame;
1434
1435 if (b->data <= mfs + 9)
1436 return 1;
1437
1438 /* Too large a frame, we need to fragment it using CONTINUATION
1439 * frames. We start from the end and move tails as needed.
1440 */
1441 if (b->data + extra_frames * 9 > b->size)
1442 return 0;
1443
1444 for (frame = extra_frames; frame; frame--) {
1445 fsize = ((remain - 1) % mfs) + 1;
1446 remain -= fsize;
1447
1448 /* move data */
1449 fptr = b->area + 9 + remain + (frame - 1) * 9;
1450 memmove(fptr + 9, b->area + 9 + remain, fsize);
1451 b->data += 9;
1452
1453 /* write new frame header */
1454 h2_set_frame_size(fptr, fsize);
1455 fptr[3] = H2_FT_CONTINUATION;
1456 fptr[4] = (frame == extra_frames) ? H2_F_HEADERS_END_HEADERS : 0;
1457 write_n32(fptr + 5, read_n32(b->area + 5));
1458 }
1459
1460 b->area[4] &= ~H2_F_HEADERS_END_HEADERS;
1461 h2_set_frame_size(b->area, remain);
1462 return 1;
1463}
1464
1465
Willy Tarreau00dd0782018-03-01 16:31:34 +01001466/* marks stream <h2s> as CLOSED and decrement the number of active streams for
1467 * its connection if the stream was not yet closed. Please use this exclusively
1468 * before closing a stream to ensure stream count is well maintained.
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001469 */
Willy Tarreau00dd0782018-03-01 16:31:34 +01001470static inline void h2s_close(struct h2s *h2s)
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001471{
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001472 if (h2s->st != H2_SS_CLOSED) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001473 TRACE_ENTER(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001474 h2s->h2c->nb_streams--;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001475 if (!h2s->id)
1476 h2s->h2c->nb_reserved--;
Willy Tarreaua27db382019-03-25 18:13:16 +01001477 if (h2s->cs) {
Willy Tarreaua27db382019-03-25 18:13:16 +01001478 if (!(h2s->cs->flags & CS_FL_EOS) && !b_data(&h2s->rxbuf))
1479 h2s_notify_recv(h2s);
1480 }
Willy Tarreau4781b152021-04-06 13:53:36 +02001481 HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001482
Willy Tarreau7838a792019-08-12 18:42:03 +02001483 TRACE_LEAVE(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001484 }
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001485 h2s->st = H2_SS_CLOSED;
1486}
1487
Willy Tarreau71049cc2018-03-28 13:56:39 +02001488/* detaches an H2 stream from its H2C and releases it to the H2S pool. */
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001489/* h2s_destroy should only ever be called by the thread that owns the stream,
1490 * that means that a tasklet should be used if we want to destroy the h2s
1491 * from another thread
1492 */
Willy Tarreau71049cc2018-03-28 13:56:39 +02001493static void h2s_destroy(struct h2s *h2s)
Willy Tarreau0a10de62018-03-01 16:27:53 +01001494{
Willy Tarreau7838a792019-08-12 18:42:03 +02001495 struct connection *conn = h2s->h2c->conn;
1496
1497 TRACE_ENTER(H2_EV_H2S_END, conn, h2s);
1498
Willy Tarreau0a10de62018-03-01 16:27:53 +01001499 h2s_close(h2s);
1500 eb32_delete(&h2s->by_id);
Olivier Houchard638b7992018-08-16 15:41:52 +02001501 if (b_size(&h2s->rxbuf)) {
1502 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01001503 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02001504 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001505
1506 if (h2s->subs)
1507 h2s->subs->events = 0;
1508
Joseph Herlantd77575d2018-11-25 10:54:45 -08001509 /* There's no need to explicitly call unsubscribe here, the only
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001510 * reference left would be in the h2c send_list/fctl_list, and if
1511 * we're in it, we're getting out anyway
1512 */
Olivier Houchardd360ac62019-03-22 17:37:16 +01001513 LIST_DEL_INIT(&h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01001514
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001515 /* ditto, calling tasklet_free() here should be ok */
Willy Tarreau5723f292020-01-10 15:16:57 +01001516 tasklet_free(h2s->shut_tl);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001517 pool_free(pool_head_h2s, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02001518
1519 TRACE_LEAVE(H2_EV_H2S_END, conn);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001520}
1521
Willy Tarreaua8e49542018-10-03 18:53:55 +02001522/* allocates a new stream <id> for connection <h2c> and adds it into h2c's
1523 * stream tree. In case of error, nothing is added and NULL is returned. The
1524 * causes of errors can be any failed memory allocation. The caller is
1525 * responsible for checking if the connection may support an extra stream
1526 * prior to calling this function.
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001527 */
Willy Tarreaua8e49542018-10-03 18:53:55 +02001528static struct h2s *h2s_new(struct h2c *h2c, int id)
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001529{
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001530 struct h2s *h2s;
1531
Willy Tarreau7838a792019-08-12 18:42:03 +02001532 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1533
Willy Tarreaubafbe012017-11-24 17:34:44 +01001534 h2s = pool_alloc(pool_head_h2s);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001535 if (!h2s)
1536 goto out;
1537
Willy Tarreau5723f292020-01-10 15:16:57 +01001538 h2s->shut_tl = tasklet_new();
1539 if (!h2s->shut_tl) {
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001540 pool_free(pool_head_h2s, h2s);
1541 goto out;
1542 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001543 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01001544 h2s->shut_tl->process = h2_deferred_shut;
1545 h2s->shut_tl->context = h2s;
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001546 LIST_INIT(&h2s->list);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001547 h2s->h2c = h2c;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001548 h2s->cs = NULL;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001549 h2s->sws = 0;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001550 h2s->flags = H2_SF_NONE;
1551 h2s->errcode = H2_ERR_NO_ERROR;
1552 h2s->st = H2_SS_IDLE;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +02001553 h2s->status = 0;
Willy Tarreau1915ca22019-01-24 11:49:37 +01001554 h2s->body_len = 0;
Olivier Houchard638b7992018-08-16 15:41:52 +02001555 h2s->rxbuf = BUF_NULL;
Amaury Denoyelle74162742020-12-11 17:53:05 +01001556 memset(h2s->upgrade_protocol, 0, sizeof(h2s->upgrade_protocol));
Willy Tarreau751f2d02018-10-05 09:35:00 +02001557
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001558 h2s->by_id.key = h2s->id = id;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001559 if (id > 0)
1560 h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001561 else
1562 h2c->nb_reserved++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001563
1564 eb32_insert(&h2c->streams_by_id, &h2s->by_id);
Willy Tarreau49745612017-12-03 18:56:02 +01001565 h2c->nb_streams++;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001566 h2c->stream_cnt++;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001567
Willy Tarreau4781b152021-04-06 13:53:36 +02001568 HA_ATOMIC_INC(&h2c->px_counters->open_streams);
1569 HA_ATOMIC_INC(&h2c->px_counters->total_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001570
Willy Tarreau7838a792019-08-12 18:42:03 +02001571 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001572 return h2s;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001573 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001574 TRACE_DEVEL("leaving in error", H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001575 return NULL;
1576}
1577
1578/* creates a new stream <id> on the h2c connection and returns it, or NULL in
Christopher Faulet7d013e72020-12-15 16:56:50 +01001579 * case of memory allocation error. <input> is used as input buffer for the new
1580 * stream. On success, it is transferred to the stream and the mux is no longer
1581 * responsible of it. On error, <input> is unchanged, thus the mux must still
1582 * take care of it.
Willy Tarreaua8e49542018-10-03 18:53:55 +02001583 */
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02001584static struct h2s *h2c_frt_stream_new(struct h2c *h2c, int id, struct buffer *input, uint32_t flags)
Willy Tarreaua8e49542018-10-03 18:53:55 +02001585{
1586 struct session *sess = h2c->conn->owner;
1587 struct conn_stream *cs;
1588 struct h2s *h2s;
1589
Willy Tarreau7838a792019-08-12 18:42:03 +02001590 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1591
Willy Tarreaua8e49542018-10-03 18:53:55 +02001592 if (h2c->nb_streams >= h2_settings_max_concurrent_streams)
1593 goto out;
1594
1595 h2s = h2s_new(h2c, id);
1596 if (!h2s)
1597 goto out;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001598
Christopher Fauletcda94ac2021-12-23 17:28:17 +01001599 cs = cs_new();
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001600 if (!cs)
1601 goto out_close;
Olivier Houchard746fb772018-12-15 19:42:00 +01001602 cs->flags |= CS_FL_NOT_FIRST;
Christopher Fauletcda94ac2021-12-23 17:28:17 +01001603 cs_attach_endp(cs, &h2c->conn->obj_type, h2s);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001604 h2s->cs = cs;
Willy Tarreau7ac60e82018-07-19 09:04:05 +02001605 h2c->nb_cs++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001606
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02001607 /* FIXME wrong analogy between ext-connect and websocket, this need to
1608 * be refine.
1609 */
1610 if (flags & H2_SF_EXT_CONNECT_RCVD)
1611 cs->flags |= CS_FL_WEBSOCKET;
1612
Willy Tarreaud0de6772022-02-04 09:05:37 +01001613 /* The stream will record the request's accept date (which is either the
1614 * end of the connection's or the date immediately after the previous
1615 * request) and the idle time, which is the delay since the previous
1616 * request. We can set the value now, it will be copied by stream_new().
1617 */
1618 sess->t_idle = tv_ms_elapsed(&sess->tv_accept, &now) - sess->t_handshake;
Christopher Faulet13a35e52021-12-20 15:34:16 +01001619 if (!stream_new(h2c->conn->owner, cs, input))
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001620 goto out_free_cs;
1621
Willy Tarreau590a0512018-09-05 11:56:48 +02001622 /* We want the accept date presented to the next stream to be the one
1623 * we have now, the handshake time to be null (since the next stream
1624 * is not delayed by a handshake), and the idle time to count since
1625 * right now.
1626 */
1627 sess->accept_date = date;
1628 sess->tv_accept = now;
1629 sess->t_handshake = 0;
Willy Tarreaud0de6772022-02-04 09:05:37 +01001630 sess->t_idle = 0;
Willy Tarreau590a0512018-09-05 11:56:48 +02001631
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001632 /* OK done, the stream lives its own life now */
Willy Tarreaufa1d3572019-01-31 10:31:51 +01001633 if (h2_frt_has_too_many_cs(h2c))
Willy Tarreauf2101912018-07-19 10:11:38 +02001634 h2c->flags |= H2_CF_DEM_TOOMANY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001635 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001636 return h2s;
1637
1638 out_free_cs:
Willy Tarreau7ac60e82018-07-19 09:04:05 +02001639 h2c->nb_cs--;
Willy Tarreau15a47332022-03-18 15:57:34 +01001640 if (!h2c->nb_cs)
1641 h2c->idle_start = now_ms;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001642 cs_free(cs);
Olivier Houchard58d87f32019-05-29 16:44:17 +02001643 h2s->cs = NULL;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001644 out_close:
Willy Tarreau71049cc2018-03-28 13:56:39 +02001645 h2s_destroy(h2s);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001646 out:
Willy Tarreau45efc072018-10-03 18:27:52 +02001647 sess_log(sess);
Willy Tarreau7838a792019-08-12 18:42:03 +02001648 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreau45efc072018-10-03 18:27:52 +02001649 return NULL;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001650}
1651
Willy Tarreau751f2d02018-10-05 09:35:00 +02001652/* allocates a new stream associated to conn_stream <cs> on the h2c connection
1653 * and returns it, or NULL in case of memory allocation error or if the highest
1654 * possible stream ID was reached.
1655 */
Olivier Houchardf502aca2018-12-14 19:42:40 +01001656static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct conn_stream *cs, struct session *sess)
Willy Tarreau751f2d02018-10-05 09:35:00 +02001657{
1658 struct h2s *h2s = NULL;
1659
Willy Tarreau7838a792019-08-12 18:42:03 +02001660 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1661
Willy Tarreau86949782019-01-31 10:42:05 +01001662 if (h2c->nb_streams >= h2c->streams_limit)
Willy Tarreau751f2d02018-10-05 09:35:00 +02001663 goto out;
1664
Willy Tarreaua80dca82019-01-24 17:08:28 +01001665 if (h2_streams_left(h2c) < 1)
1666 goto out;
1667
Willy Tarreau751f2d02018-10-05 09:35:00 +02001668 /* Defer choosing the ID until we send the first message to create the stream */
1669 h2s = h2s_new(h2c, 0);
1670 if (!h2s)
1671 goto out;
1672
1673 h2s->cs = cs;
Olivier Houchardf502aca2018-12-14 19:42:40 +01001674 h2s->sess = sess;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001675 cs->ctx = h2s;
1676 h2c->nb_cs++;
1677
Willy Tarreau751f2d02018-10-05 09:35:00 +02001678 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001679 if (likely(h2s))
1680 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
1681 else
1682 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001683 return h2s;
1684}
1685
Willy Tarreaube5b7152017-09-25 16:25:39 +02001686/* try to send a settings frame on the connection. Returns > 0 on success, 0 if
1687 * it couldn't do anything. It may return an error in h2c. See RFC7540#11.3 for
1688 * the various settings codes.
1689 */
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001690static int h2c_send_settings(struct h2c *h2c)
Willy Tarreaube5b7152017-09-25 16:25:39 +02001691{
1692 struct buffer *res;
1693 char buf_data[100]; // enough for 15 settings
Willy Tarreau83061a82018-07-13 11:56:34 +02001694 struct buffer buf;
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001695 int mfs;
Willy Tarreau7838a792019-08-12 18:42:03 +02001696 int ret = 0;
1697
1698 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001699
1700 if (h2c_mux_busy(h2c, NULL)) {
1701 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001702 goto out;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001703 }
1704
Willy Tarreaube5b7152017-09-25 16:25:39 +02001705 chunk_init(&buf, buf_data, sizeof(buf_data));
1706 chunk_memcpy(&buf,
1707 "\x00\x00\x00" /* length : 0 for now */
1708 "\x04\x00" /* type : 4 (settings), flags : 0 */
1709 "\x00\x00\x00\x00", /* stream ID : 0 */
1710 9);
1711
Willy Tarreau0bbad6b2019-02-26 16:01:52 +01001712 if (h2c->flags & H2_CF_IS_BACK) {
1713 /* send settings_enable_push=0 */
1714 chunk_memcat(&buf, "\x00\x02\x00\x00\x00\x00", 6);
1715 }
1716
Amaury Denoyellebefeae82021-07-09 17:14:30 +02001717 /* rfc 8441 #3 SETTINGS_ENABLE_CONNECT_PROTOCOL=1,
1718 * sent automatically unless disabled in the global config */
1719 if (!(global.tune.options & GTUNE_DISABLE_H2_WEBSOCKET))
1720 chunk_memcat(&buf, "\x00\x08\x00\x00\x00\x01", 6);
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01001721
Willy Tarreaube5b7152017-09-25 16:25:39 +02001722 if (h2_settings_header_table_size != 4096) {
1723 char str[6] = "\x00\x01"; /* header_table_size */
1724
1725 write_n32(str + 2, h2_settings_header_table_size);
1726 chunk_memcat(&buf, str, 6);
1727 }
1728
1729 if (h2_settings_initial_window_size != 65535) {
1730 char str[6] = "\x00\x04"; /* initial_window_size */
1731
1732 write_n32(str + 2, h2_settings_initial_window_size);
1733 chunk_memcat(&buf, str, 6);
1734 }
1735
1736 if (h2_settings_max_concurrent_streams != 0) {
1737 char str[6] = "\x00\x03"; /* max_concurrent_streams */
1738
1739 /* Note: 0 means "unlimited" for haproxy's config but not for
1740 * the protocol, so never send this value!
1741 */
1742 write_n32(str + 2, h2_settings_max_concurrent_streams);
1743 chunk_memcat(&buf, str, 6);
1744 }
1745
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001746 mfs = h2_settings_max_frame_size;
1747 if (mfs > global.tune.bufsize)
1748 mfs = global.tune.bufsize;
1749
1750 if (!mfs)
1751 mfs = global.tune.bufsize;
1752
1753 if (mfs != 16384) {
Willy Tarreaube5b7152017-09-25 16:25:39 +02001754 char str[6] = "\x00\x05"; /* max_frame_size */
1755
1756 /* note: similarly we could also emit MAX_HEADER_LIST_SIZE to
1757 * match bufsize - rewrite size, but at the moment it seems
1758 * that clients don't take care of it.
1759 */
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001760 write_n32(str + 2, mfs);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001761 chunk_memcat(&buf, str, 6);
1762 }
1763
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001764 h2_set_frame_size(buf.area, buf.data - 9);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001765
1766 res = br_tail(h2c->mbuf);
1767 retry:
1768 if (!h2_get_buf(h2c, res)) {
1769 h2c->flags |= H2_CF_MUX_MALLOC;
1770 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001771 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001772 }
1773
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001774 ret = b_istput(res, ist2(buf.area, buf.data));
Willy Tarreaube5b7152017-09-25 16:25:39 +02001775 if (unlikely(ret <= 0)) {
1776 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001777 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1778 goto retry;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001779 h2c->flags |= H2_CF_MUX_MFULL;
1780 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001781 }
1782 else {
1783 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001784 ret = 0;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001785 }
1786 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001787 out:
1788 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001789 return ret;
1790}
1791
Willy Tarreau52eed752017-09-22 15:05:09 +02001792/* Try to receive a connection preface, then upon success try to send our
1793 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1794 * missing data. It may return an error in h2c.
1795 */
1796static int h2c_frt_recv_preface(struct h2c *h2c)
1797{
1798 int ret1;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001799 int ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001800
Willy Tarreau7838a792019-08-12 18:42:03 +02001801 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
1802
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001803 ret1 = b_isteq(&h2c->dbuf, 0, b_data(&h2c->dbuf), ist(H2_CONN_PREFACE));
Willy Tarreau52eed752017-09-22 15:05:09 +02001804
1805 if (unlikely(ret1 <= 0)) {
Christopher Fauletb5f7b522021-07-26 12:06:53 +02001806 if (!ret1)
1807 h2c->flags |= H2_CF_DEM_SHORT_READ;
Amaury Denoyellea8879232020-10-27 17:16:03 +01001808 if (ret1 < 0 || conn_xprt_read0_pending(h2c->conn)) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01001809 TRACE_ERROR("I/O error or short read", H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02001810 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauee4684f2021-06-17 08:08:48 +02001811 if (b_data(&h2c->dbuf) ||
1812 !(((const struct session *)h2c->conn->owner)->fe->options & PR_O_IGNORE_PRB))
1813 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01001814 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001815 ret2 = 0;
1816 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02001817 }
1818
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001819 ret2 = h2c_send_settings(h2c);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001820 if (ret2 > 0)
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001821 b_del(&h2c->dbuf, ret1);
Willy Tarreau7838a792019-08-12 18:42:03 +02001822 out:
1823 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001824 return ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001825}
1826
Willy Tarreau01b44822018-10-03 14:26:37 +02001827/* Try to send a connection preface, then upon success try to send our
1828 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1829 * missing data. It may return an error in h2c.
1830 */
1831static int h2c_bck_send_preface(struct h2c *h2c)
1832{
1833 struct buffer *res;
Willy Tarreau7838a792019-08-12 18:42:03 +02001834 int ret = 0;
1835
1836 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02001837
1838 if (h2c_mux_busy(h2c, NULL)) {
1839 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001840 goto out;
Willy Tarreau01b44822018-10-03 14:26:37 +02001841 }
1842
Willy Tarreaubcc45952019-05-26 10:05:50 +02001843 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001844 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001845 if (!h2_get_buf(h2c, res)) {
Willy Tarreau01b44822018-10-03 14:26:37 +02001846 h2c->flags |= H2_CF_MUX_MALLOC;
1847 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001848 goto out;
Willy Tarreau01b44822018-10-03 14:26:37 +02001849 }
1850
1851 if (!b_data(res)) {
1852 /* preface not yet sent */
Willy Tarreau9c218e72019-05-26 10:08:28 +02001853 ret = b_istput(res, ist(H2_CONN_PREFACE));
1854 if (unlikely(ret <= 0)) {
1855 if (!ret) {
1856 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1857 goto retry;
1858 h2c->flags |= H2_CF_MUX_MFULL;
1859 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001860 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001861 }
1862 else {
1863 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001864 ret = 0;
1865 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001866 }
1867 }
Willy Tarreau01b44822018-10-03 14:26:37 +02001868 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001869 ret = h2c_send_settings(h2c);
1870 out:
1871 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
1872 return ret;
Willy Tarreau01b44822018-10-03 14:26:37 +02001873}
1874
Willy Tarreau081d4722017-05-16 21:51:05 +02001875/* try to send a GOAWAY frame on the connection to report an error or a graceful
1876 * shutdown, with h2c->errcode as the error code. Returns > 0 on success or zero
1877 * if nothing was done. It uses h2c->last_sid as the advertised ID, or copies it
1878 * from h2c->max_id if it's not set yet (<0). In case of lack of room to write
1879 * the message, it subscribes the requester (either <h2s> or <h2c>) to future
1880 * notifications. It sets H2_CF_GOAWAY_SENT on success, and H2_CF_GOAWAY_FAILED
1881 * on unrecoverable failure. It will not attempt to send one again in this last
1882 * case so that it is safe to use h2c_error() to report such errors.
1883 */
1884static int h2c_send_goaway_error(struct h2c *h2c, struct h2s *h2s)
1885{
1886 struct buffer *res;
1887 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02001888 int ret = 0;
Willy Tarreau081d4722017-05-16 21:51:05 +02001889
Willy Tarreau7838a792019-08-12 18:42:03 +02001890 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
1891
1892 if (h2c->flags & H2_CF_GOAWAY_FAILED) {
1893 ret = 1; // claim that it worked
1894 goto out;
1895 }
Willy Tarreau081d4722017-05-16 21:51:05 +02001896
1897 if (h2c_mux_busy(h2c, h2s)) {
1898 if (h2s)
1899 h2s->flags |= H2_SF_BLK_MBUSY;
1900 else
1901 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001902 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001903 }
1904
Willy Tarreau9c218e72019-05-26 10:08:28 +02001905 /* len: 8, type: 7, flags: none, sid: 0 */
1906 memcpy(str, "\x00\x00\x08\x07\x00\x00\x00\x00\x00", 9);
1907
1908 if (h2c->last_sid < 0)
1909 h2c->last_sid = h2c->max_id;
1910
1911 write_n32(str + 9, h2c->last_sid);
1912 write_n32(str + 13, h2c->errcode);
1913
Willy Tarreaubcc45952019-05-26 10:05:50 +02001914 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001915 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001916 if (!h2_get_buf(h2c, res)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02001917 h2c->flags |= H2_CF_MUX_MALLOC;
1918 if (h2s)
1919 h2s->flags |= H2_SF_BLK_MROOM;
1920 else
1921 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001922 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001923 }
1924
Willy Tarreauea1b06d2018-07-12 09:02:47 +02001925 ret = b_istput(res, ist2(str, 17));
Willy Tarreau081d4722017-05-16 21:51:05 +02001926 if (unlikely(ret <= 0)) {
1927 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001928 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1929 goto retry;
Willy Tarreau081d4722017-05-16 21:51:05 +02001930 h2c->flags |= H2_CF_MUX_MFULL;
1931 if (h2s)
1932 h2s->flags |= H2_SF_BLK_MROOM;
1933 else
1934 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001935 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001936 }
1937 else {
1938 /* we cannot report this error using GOAWAY, so we mark
1939 * it and claim a success.
1940 */
1941 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
1942 h2c->flags |= H2_CF_GOAWAY_FAILED;
Willy Tarreau7838a792019-08-12 18:42:03 +02001943 ret = 1;
1944 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001945 }
1946 }
1947 h2c->flags |= H2_CF_GOAWAY_SENT;
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001948
1949 /* some codes are not for real errors, just attempts to close cleanly */
1950 switch (h2c->errcode) {
1951 case H2_ERR_NO_ERROR:
1952 case H2_ERR_ENHANCE_YOUR_CALM:
1953 case H2_ERR_REFUSED_STREAM:
1954 case H2_ERR_CANCEL:
1955 break;
1956 default:
Willy Tarreau4781b152021-04-06 13:53:36 +02001957 HA_ATOMIC_INC(&h2c->px_counters->goaway_resp);
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001958 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001959 out:
1960 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
Willy Tarreau081d4722017-05-16 21:51:05 +02001961 return ret;
1962}
1963
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001964/* Try to send an RST_STREAM frame on the connection for the indicated stream
1965 * during mux operations. This stream must be valid and cannot be closed
1966 * already. h2s->id will be used for the stream ID and h2s->errcode will be
1967 * used for the error code. h2s->st will be update to H2_SS_CLOSED if it was
1968 * not yet.
1969 *
1970 * Returns > 0 on success or zero if nothing was done. In case of lack of room
1971 * to write the message, it subscribes the stream to future notifications.
1972 */
1973static int h2s_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
1974{
1975 struct buffer *res;
1976 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02001977 int ret = 0;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001978
Willy Tarreau7838a792019-08-12 18:42:03 +02001979 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
1980
1981 if (!h2s || h2s->st == H2_SS_CLOSED) {
1982 ret = 1;
1983 goto out;
1984 }
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001985
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001986 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
1987 * RST_STREAM in response to a RST_STREAM frame.
1988 */
Willy Tarreau231f6162019-08-06 10:01:40 +02001989 if (h2c->dsi == h2s->id && h2c->dft == H2_FT_RST_STREAM) {
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001990 ret = 1;
1991 goto ignore;
1992 }
1993
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001994 if (h2c_mux_busy(h2c, h2s)) {
1995 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001996 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001997 }
1998
Willy Tarreau9c218e72019-05-26 10:08:28 +02001999 /* len: 4, type: 3, flags: none */
2000 memcpy(str, "\x00\x00\x04\x03\x00", 5);
2001 write_n32(str + 5, h2s->id);
2002 write_n32(str + 9, h2s->errcode);
2003
Willy Tarreaubcc45952019-05-26 10:05:50 +02002004 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002005 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002006 if (!h2_get_buf(h2c, res)) {
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002007 h2c->flags |= H2_CF_MUX_MALLOC;
2008 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002009 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002010 }
2011
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002012 ret = b_istput(res, ist2(str, 13));
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002013 if (unlikely(ret <= 0)) {
2014 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002015 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2016 goto retry;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002017 h2c->flags |= H2_CF_MUX_MFULL;
2018 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002019 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002020 }
2021 else {
2022 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002023 ret = 0;
2024 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002025 }
2026 }
2027
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002028 ignore:
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002029 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01002030 h2s_close(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002031 out:
2032 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002033 return ret;
2034}
2035
2036/* Try to send an RST_STREAM frame on the connection for the stream being
2037 * demuxed using h2c->dsi for the stream ID. It will use h2s->errcode as the
Willy Tarreaue6888ff2018-12-23 18:26:26 +01002038 * error code, even if the stream is one of the dummy ones, and will update
2039 * h2s->st to H2_SS_CLOSED if it was not yet.
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002040 *
2041 * Returns > 0 on success or zero if nothing was done. In case of lack of room
2042 * to write the message, it blocks the demuxer and subscribes it to future
Joseph Herlantd77575d2018-11-25 10:54:45 -08002043 * notifications. It's worth mentioning that an RST may even be sent for a
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002044 * closed stream.
Willy Tarreau27a84c92017-10-17 08:10:17 +02002045 */
2046static int h2c_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
2047{
2048 struct buffer *res;
2049 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02002050 int ret = 0;
2051
2052 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02002053
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002054 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
2055 * RST_STREAM in response to a RST_STREAM frame.
2056 */
2057 if (h2c->dft == H2_FT_RST_STREAM) {
2058 ret = 1;
2059 goto ignore;
2060 }
2061
Willy Tarreau27a84c92017-10-17 08:10:17 +02002062 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002063 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002064 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002065 }
2066
Willy Tarreau9c218e72019-05-26 10:08:28 +02002067 /* len: 4, type: 3, flags: none */
2068 memcpy(str, "\x00\x00\x04\x03\x00", 5);
2069
2070 write_n32(str + 5, h2c->dsi);
2071 write_n32(str + 9, h2s->errcode);
2072
Willy Tarreaubcc45952019-05-26 10:05:50 +02002073 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002074 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002075 if (!h2_get_buf(h2c, res)) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02002076 h2c->flags |= H2_CF_MUX_MALLOC;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002077 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002078 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002079 }
2080
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002081 ret = b_istput(res, ist2(str, 13));
Willy Tarreau27a84c92017-10-17 08:10:17 +02002082 if (unlikely(ret <= 0)) {
2083 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002084 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2085 goto retry;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002086 h2c->flags |= H2_CF_MUX_MFULL;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002087 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002088 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002089 }
2090 else {
2091 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002092 ret = 0;
2093 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002094 }
2095 }
2096
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002097 ignore:
Willy Tarreauab0e1da2018-10-05 10:16:37 +02002098 if (h2s->id) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02002099 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01002100 h2s_close(h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002101 }
2102
Willy Tarreau7838a792019-08-12 18:42:03 +02002103 out:
Willy Tarreau4781b152021-04-06 13:53:36 +02002104 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_resp);
Willy Tarreau7838a792019-08-12 18:42:03 +02002105 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02002106 return ret;
2107}
2108
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002109/* try to send an empty DATA frame with the ES flag set to notify about the
2110 * end of stream and match a shutdown(write). If an ES was already sent as
2111 * indicated by HLOC/ERROR/RESET/CLOSED states, nothing is done. Returns > 0
2112 * on success or zero if nothing was done. In case of lack of room to write the
2113 * message, it subscribes the requesting stream to future notifications.
2114 */
2115static int h2_send_empty_data_es(struct h2s *h2s)
2116{
2117 struct h2c *h2c = h2s->h2c;
2118 struct buffer *res;
2119 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02002120 int ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002121
Willy Tarreau7838a792019-08-12 18:42:03 +02002122 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
2123
2124 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_ERROR || h2s->st == H2_SS_CLOSED) {
2125 ret = 1;
2126 goto out;
2127 }
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002128
2129 if (h2c_mux_busy(h2c, h2s)) {
2130 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002131 goto out;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002132 }
2133
Willy Tarreau9c218e72019-05-26 10:08:28 +02002134 /* len: 0x000000, type: 0(DATA), flags: ES=1 */
2135 memcpy(str, "\x00\x00\x00\x00\x01", 5);
2136 write_n32(str + 5, h2s->id);
2137
Willy Tarreaubcc45952019-05-26 10:05:50 +02002138 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002139 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002140 if (!h2_get_buf(h2c, res)) {
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002141 h2c->flags |= H2_CF_MUX_MALLOC;
2142 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002143 goto out;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002144 }
2145
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002146 ret = b_istput(res, ist2(str, 9));
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002147 if (likely(ret > 0)) {
2148 h2s->flags |= H2_SF_ES_SENT;
2149 }
2150 else if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002151 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2152 goto retry;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002153 h2c->flags |= H2_CF_MUX_MFULL;
2154 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002155 }
2156 else {
2157 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002158 ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002159 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002160 out:
2161 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002162 return ret;
2163}
2164
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05002165/* wake a specific stream and assign its conn_stream some CS_FL_* flags among
Willy Tarreau99ad1b32019-05-14 11:46:28 +02002166 * CS_FL_ERR_PENDING and CS_FL_ERROR if needed. The stream's state
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002167 * is automatically updated accordingly. If the stream is orphaned, it is
2168 * destroyed.
Christopher Fauletf02ca002019-03-07 16:21:34 +01002169 */
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002170static void h2s_wake_one_stream(struct h2s *h2s)
Christopher Fauletf02ca002019-03-07 16:21:34 +01002171{
Willy Tarreau7838a792019-08-12 18:42:03 +02002172 struct h2c *h2c = h2s->h2c;
2173
2174 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn, h2s);
2175
Christopher Fauletf02ca002019-03-07 16:21:34 +01002176 if (!h2s->cs) {
2177 /* this stream was already orphaned */
2178 h2s_destroy(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002179 TRACE_DEVEL("leaving with no h2s", H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002180 return;
2181 }
2182
Christopher Fauletaade4ed2020-10-08 15:38:41 +02002183 if (h2c_read0_pending(h2s->h2c)) {
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002184 if (h2s->st == H2_SS_OPEN)
2185 h2s->st = H2_SS_HREM;
2186 else if (h2s->st == H2_SS_HLOC)
2187 h2s_close(h2s);
2188 }
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002189
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002190 if ((h2s->h2c->st0 >= H2_CS_ERROR || h2s->h2c->conn->flags & CO_FL_ERROR) ||
2191 (h2s->h2c->last_sid > 0 && (!h2s->id || h2s->id > h2s->h2c->last_sid))) {
2192 h2s->cs->flags |= CS_FL_ERR_PENDING;
2193 if (h2s->cs->flags & CS_FL_EOS)
2194 h2s->cs->flags |= CS_FL_ERROR;
Willy Tarreau23482912019-05-07 15:23:14 +02002195
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002196 if (h2s->st < H2_SS_ERROR)
2197 h2s->st = H2_SS_ERROR;
2198 }
Christopher Fauletf02ca002019-03-07 16:21:34 +01002199
2200 h2s_alert(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002201 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002202}
2203
2204/* wake the streams attached to the connection, whose id is greater than <last>
2205 * or unassigned.
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002206 */
Willy Tarreau23482912019-05-07 15:23:14 +02002207static void h2_wake_some_streams(struct h2c *h2c, int last)
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002208{
2209 struct eb32_node *node;
2210 struct h2s *h2s;
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002211
Willy Tarreau7838a792019-08-12 18:42:03 +02002212 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn);
2213
Christopher Fauletf02ca002019-03-07 16:21:34 +01002214 /* Wake all streams with ID > last */
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002215 node = eb32_lookup_ge(&h2c->streams_by_id, last + 1);
2216 while (node) {
2217 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002218 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002219 h2s_wake_one_stream(h2s);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002220 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01002221
Christopher Fauletf02ca002019-03-07 16:21:34 +01002222 /* Wake all streams with unassigned ID (ID == 0) */
2223 node = eb32_lookup(&h2c->streams_by_id, 0);
2224 while (node) {
2225 h2s = container_of(node, struct h2s, by_id);
2226 if (h2s->id > 0)
2227 break;
2228 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002229 h2s_wake_one_stream(h2s);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002230 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002231
2232 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002233}
2234
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002235/* Wake up all blocked streams whose window size has become positive after the
2236 * mux's initial window was adjusted. This should be done after having processed
2237 * SETTINGS frames which have updated the mux's initial window size.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002238 */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002239static void h2c_unblock_sfctl(struct h2c *h2c)
Willy Tarreau3421aba2017-07-27 15:41:03 +02002240{
2241 struct h2s *h2s;
2242 struct eb32_node *node;
2243
Willy Tarreau7838a792019-08-12 18:42:03 +02002244 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
2245
Willy Tarreau3421aba2017-07-27 15:41:03 +02002246 node = eb32_first(&h2c->streams_by_id);
2247 while (node) {
2248 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002249 if (h2s->flags & H2_SF_BLK_SFCTL && h2s_mws(h2s) > 0) {
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002250 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002251 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002252 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2253 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002254 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002255 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002256 node = eb32_next(node);
2257 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002258
2259 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002260}
2261
2262/* processes a SETTINGS frame whose payload is <payload> for <plen> bytes, and
2263 * ACKs it if needed. Returns > 0 on success or zero on missing data. It may
Willy Tarreaub860c732019-01-30 15:39:55 +01002264 * return an error in h2c. The caller must have already verified frame length
2265 * and stream ID validity. Described in RFC7540#6.5.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002266 */
2267static int h2c_handle_settings(struct h2c *h2c)
2268{
2269 unsigned int offset;
2270 int error;
2271
Willy Tarreau7838a792019-08-12 18:42:03 +02002272 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
2273
Willy Tarreau3421aba2017-07-27 15:41:03 +02002274 if (h2c->dff & H2_F_SETTINGS_ACK) {
2275 if (h2c->dfl) {
2276 error = H2_ERR_FRAME_SIZE_ERROR;
2277 goto fail;
2278 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002279 goto done;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002280 }
2281
Willy Tarreau3421aba2017-07-27 15:41:03 +02002282 /* process full frame only */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002283 if (b_data(&h2c->dbuf) < h2c->dfl) {
2284 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002285 goto out0;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002286 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002287
2288 /* parse the frame */
2289 for (offset = 0; offset < h2c->dfl; offset += 6) {
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002290 uint16_t type = h2_get_n16(&h2c->dbuf, offset);
2291 int32_t arg = h2_get_n32(&h2c->dbuf, offset + 2);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002292
2293 switch (type) {
2294 case H2_SETTINGS_INITIAL_WINDOW_SIZE:
2295 /* we need to update all existing streams with the
2296 * difference from the previous iws.
2297 */
2298 if (arg < 0) { // RFC7540#6.5.2
2299 error = H2_ERR_FLOW_CONTROL_ERROR;
2300 goto fail;
2301 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002302 h2c->miw = arg;
2303 break;
2304 case H2_SETTINGS_MAX_FRAME_SIZE:
2305 if (arg < 16384 || arg > 16777215) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002306 TRACE_ERROR("MAX_FRAME_SIZE out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002307 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002308 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002309 goto fail;
2310 }
2311 h2c->mfs = arg;
2312 break;
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01002313 case H2_SETTINGS_HEADER_TABLE_SIZE:
2314 h2c->flags |= H2_CF_SHTS_UPDATED;
2315 break;
Willy Tarreau1b38b462017-12-03 19:02:28 +01002316 case H2_SETTINGS_ENABLE_PUSH:
2317 if (arg < 0 || arg > 1) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002318 TRACE_ERROR("ENABLE_PUSH out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002319 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002320 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002321 goto fail;
2322 }
2323 break;
Willy Tarreau2e2083a2019-01-31 10:34:07 +01002324 case H2_SETTINGS_MAX_CONCURRENT_STREAMS:
2325 if (h2c->flags & H2_CF_IS_BACK) {
2326 /* the limit is only for the backend; for the frontend it is our limit */
2327 if ((unsigned int)arg > h2_settings_max_concurrent_streams)
2328 arg = h2_settings_max_concurrent_streams;
2329 h2c->streams_limit = arg;
2330 }
2331 break;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002332 case H2_SETTINGS_ENABLE_CONNECT_PROTOCOL:
Amaury Denoyelle0df04362021-10-18 09:43:29 +02002333 if (arg == 1)
2334 h2c->flags |= H2_CF_RCVD_RFC8441;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002335 break;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002336 }
2337 }
2338
2339 /* need to ACK this frame now */
2340 h2c->st0 = H2_CS_FRAME_A;
Willy Tarreau7838a792019-08-12 18:42:03 +02002341 done:
2342 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002343 return 1;
2344 fail:
Willy Tarreau9364a5f2019-10-23 11:06:35 +02002345 if (!(h2c->flags & H2_CF_IS_BACK))
2346 sess_log(h2c->conn->owner);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002347 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002348 out0:
2349 TRACE_DEVEL("leaving with missing data or error", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002350 return 0;
2351}
2352
2353/* try to send an ACK for a settings frame on the connection. Returns > 0 on
2354 * success or one of the h2_status values.
2355 */
2356static int h2c_ack_settings(struct h2c *h2c)
2357{
2358 struct buffer *res;
2359 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02002360 int ret = 0;
2361
2362 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002363
2364 if (h2c_mux_busy(h2c, NULL)) {
2365 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002366 goto out;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002367 }
2368
Willy Tarreau9c218e72019-05-26 10:08:28 +02002369 memcpy(str,
2370 "\x00\x00\x00" /* length : 0 (no data) */
2371 "\x04" "\x01" /* type : 4, flags : ACK */
2372 "\x00\x00\x00\x00" /* stream ID */, 9);
2373
Willy Tarreaubcc45952019-05-26 10:05:50 +02002374 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002375 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002376 if (!h2_get_buf(h2c, res)) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02002377 h2c->flags |= H2_CF_MUX_MALLOC;
2378 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002379 goto out;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002380 }
2381
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002382 ret = b_istput(res, ist2(str, 9));
Willy Tarreau3421aba2017-07-27 15:41:03 +02002383 if (unlikely(ret <= 0)) {
2384 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002385 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2386 goto retry;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002387 h2c->flags |= H2_CF_MUX_MFULL;
2388 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002389 }
2390 else {
2391 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002392 ret = 0;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002393 }
2394 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002395 out:
2396 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002397 return ret;
2398}
2399
Willy Tarreaucf68c782017-10-10 17:11:41 +02002400/* processes a PING frame and schedules an ACK if needed. The caller must pass
2401 * the pointer to the payload in <payload>. Returns > 0 on success or zero on
Willy Tarreaub860c732019-01-30 15:39:55 +01002402 * missing data. The caller must have already verified frame length
2403 * and stream ID validity.
Willy Tarreaucf68c782017-10-10 17:11:41 +02002404 */
2405static int h2c_handle_ping(struct h2c *h2c)
2406{
Willy Tarreaucf68c782017-10-10 17:11:41 +02002407 /* schedule a response */
Willy Tarreau68ed6412017-12-03 18:15:56 +01002408 if (!(h2c->dff & H2_F_PING_ACK))
Willy Tarreaucf68c782017-10-10 17:11:41 +02002409 h2c->st0 = H2_CS_FRAME_A;
2410 return 1;
2411}
2412
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002413/* Try to send a window update for stream id <sid> and value <increment>.
2414 * Returns > 0 on success or zero on missing room or failure. It may return an
2415 * error in h2c.
2416 */
2417static int h2c_send_window_update(struct h2c *h2c, int sid, uint32_t increment)
2418{
2419 struct buffer *res;
2420 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02002421 int ret = 0;
2422
2423 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002424
2425 if (h2c_mux_busy(h2c, NULL)) {
2426 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002427 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002428 }
2429
Willy Tarreau9c218e72019-05-26 10:08:28 +02002430 /* length: 4, type: 8, flags: none */
2431 memcpy(str, "\x00\x00\x04\x08\x00", 5);
2432 write_n32(str + 5, sid);
2433 write_n32(str + 9, increment);
2434
Willy Tarreaubcc45952019-05-26 10:05:50 +02002435 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002436 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002437 if (!h2_get_buf(h2c, res)) {
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002438 h2c->flags |= H2_CF_MUX_MALLOC;
2439 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002440 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002441 }
2442
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002443 ret = b_istput(res, ist2(str, 13));
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002444 if (unlikely(ret <= 0)) {
2445 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002446 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2447 goto retry;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002448 h2c->flags |= H2_CF_MUX_MFULL;
2449 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002450 }
2451 else {
2452 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002453 ret = 0;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002454 }
2455 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002456 out:
2457 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002458 return ret;
2459}
2460
2461/* try to send pending window update for the connection. It's safe to call it
2462 * with no pending updates. Returns > 0 on success or zero on missing room or
2463 * failure. It may return an error in h2c.
2464 */
2465static int h2c_send_conn_wu(struct h2c *h2c)
2466{
2467 int ret = 1;
2468
Willy Tarreau7838a792019-08-12 18:42:03 +02002469 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2470
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002471 if (h2c->rcvd_c <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002472 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002473
Willy Tarreau97aaa672018-12-23 09:49:04 +01002474 if (!(h2c->flags & H2_CF_WINDOW_OPENED)) {
2475 /* increase the advertised connection window to 2G on
2476 * first update.
2477 */
2478 h2c->flags |= H2_CF_WINDOW_OPENED;
2479 h2c->rcvd_c += H2_INITIAL_WINDOW_INCREMENT;
2480 }
2481
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002482 /* send WU for the connection */
2483 ret = h2c_send_window_update(h2c, 0, h2c->rcvd_c);
2484 if (ret > 0)
2485 h2c->rcvd_c = 0;
2486
Willy Tarreau7838a792019-08-12 18:42:03 +02002487 out:
2488 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002489 return ret;
2490}
2491
2492/* try to send pending window update for the current dmux stream. It's safe to
2493 * call it with no pending updates. Returns > 0 on success or zero on missing
2494 * room or failure. It may return an error in h2c.
2495 */
2496static int h2c_send_strm_wu(struct h2c *h2c)
2497{
2498 int ret = 1;
2499
Willy Tarreau7838a792019-08-12 18:42:03 +02002500 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2501
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002502 if (h2c->rcvd_s <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002503 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002504
2505 /* send WU for the stream */
2506 ret = h2c_send_window_update(h2c, h2c->dsi, h2c->rcvd_s);
2507 if (ret > 0)
2508 h2c->rcvd_s = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002509 out:
2510 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002511 return ret;
2512}
2513
Willy Tarreaucf68c782017-10-10 17:11:41 +02002514/* try to send an ACK for a ping frame on the connection. Returns > 0 on
2515 * success, 0 on missing data or one of the h2_status values.
2516 */
2517static int h2c_ack_ping(struct h2c *h2c)
2518{
2519 struct buffer *res;
2520 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02002521 int ret = 0;
2522
2523 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002524
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002525 if (b_data(&h2c->dbuf) < 8)
Willy Tarreau7838a792019-08-12 18:42:03 +02002526 goto out;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002527
2528 if (h2c_mux_busy(h2c, NULL)) {
2529 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002530 goto out;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002531 }
2532
Willy Tarreaucf68c782017-10-10 17:11:41 +02002533 memcpy(str,
2534 "\x00\x00\x08" /* length : 8 (same payload) */
2535 "\x06" "\x01" /* type : 6, flags : ACK */
2536 "\x00\x00\x00\x00" /* stream ID */, 9);
2537
2538 /* copy the original payload */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002539 h2_get_buf_bytes(str + 9, 8, &h2c->dbuf, 0);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002540
Willy Tarreau9c218e72019-05-26 10:08:28 +02002541 res = br_tail(h2c->mbuf);
2542 retry:
2543 if (!h2_get_buf(h2c, res)) {
2544 h2c->flags |= H2_CF_MUX_MALLOC;
2545 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002546 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02002547 }
2548
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002549 ret = b_istput(res, ist2(str, 17));
Willy Tarreaucf68c782017-10-10 17:11:41 +02002550 if (unlikely(ret <= 0)) {
2551 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002552 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2553 goto retry;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002554 h2c->flags |= H2_CF_MUX_MFULL;
2555 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002556 }
2557 else {
2558 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002559 ret = 0;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002560 }
2561 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002562 out:
2563 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002564 return ret;
2565}
2566
Willy Tarreau26f95952017-07-27 17:18:30 +02002567/* processes a WINDOW_UPDATE frame whose payload is <payload> for <plen> bytes.
2568 * Returns > 0 on success or zero on missing data. It may return an error in
Willy Tarreaub860c732019-01-30 15:39:55 +01002569 * h2c or h2s. The caller must have already verified frame length and stream ID
2570 * validity. Described in RFC7540#6.9.
Willy Tarreau26f95952017-07-27 17:18:30 +02002571 */
2572static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s)
2573{
2574 int32_t inc;
2575 int error;
2576
Willy Tarreau7838a792019-08-12 18:42:03 +02002577 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
2578
Willy Tarreau26f95952017-07-27 17:18:30 +02002579 /* process full frame only */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002580 if (b_data(&h2c->dbuf) < h2c->dfl) {
2581 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002582 goto out0;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002583 }
Willy Tarreau26f95952017-07-27 17:18:30 +02002584
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002585 inc = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau26f95952017-07-27 17:18:30 +02002586
2587 if (h2c->dsi != 0) {
2588 /* stream window update */
Willy Tarreau26f95952017-07-27 17:18:30 +02002589
2590 /* it's not an error to receive WU on a closed stream */
2591 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau7838a792019-08-12 18:42:03 +02002592 goto done;
Willy Tarreau26f95952017-07-27 17:18:30 +02002593
2594 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002595 TRACE_ERROR("stream WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002596 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002597 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002598 goto strm_err;
2599 }
2600
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002601 if (h2s_mws(h2s) >= 0 && h2s_mws(h2s) + inc < 0) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002602 TRACE_ERROR("stream WINDOW_UPDATE inc<0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002603 error = H2_ERR_FLOW_CONTROL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002604 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002605 goto strm_err;
2606 }
2607
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002608 h2s->sws += inc;
2609 if (h2s_mws(h2s) > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
Willy Tarreau26f95952017-07-27 17:18:30 +02002610 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002611 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002612 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2613 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002614 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreau26f95952017-07-27 17:18:30 +02002615 }
2616 }
2617 else {
2618 /* connection window update */
2619 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002620 TRACE_ERROR("conn WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002621 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002622 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002623 goto conn_err;
2624 }
2625
2626 if (h2c->mws >= 0 && h2c->mws + inc < 0) {
2627 error = H2_ERR_FLOW_CONTROL_ERROR;
2628 goto conn_err;
2629 }
2630
2631 h2c->mws += inc;
2632 }
2633
Willy Tarreau7838a792019-08-12 18:42:03 +02002634 done:
2635 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002636 return 1;
2637
2638 conn_err:
2639 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002640 out0:
2641 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002642 return 0;
2643
2644 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01002645 h2s_error(h2s, error);
2646 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002647 TRACE_DEVEL("leaving on stream error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002648 return 0;
2649}
2650
Willy Tarreaue96b0922017-10-30 00:28:29 +01002651/* processes a GOAWAY frame, and signals all streams whose ID is greater than
Willy Tarreaub860c732019-01-30 15:39:55 +01002652 * the last ID. Returns > 0 on success or zero on missing data. The caller must
2653 * have already verified frame length and stream ID validity. Described in
2654 * RFC7540#6.8.
Willy Tarreaue96b0922017-10-30 00:28:29 +01002655 */
2656static int h2c_handle_goaway(struct h2c *h2c)
2657{
Willy Tarreaue96b0922017-10-30 00:28:29 +01002658 int last;
2659
Willy Tarreau7838a792019-08-12 18:42:03 +02002660 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002661 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002662 if (b_data(&h2c->dbuf) < h2c->dfl) {
2663 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002664 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002665 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002666 }
Willy Tarreaue96b0922017-10-30 00:28:29 +01002667
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002668 last = h2_get_n32(&h2c->dbuf, 0);
2669 h2c->errcode = h2_get_n32(&h2c->dbuf, 4);
Willy Tarreau11cc2d62017-12-03 10:27:47 +01002670 if (h2c->last_sid < 0)
2671 h2c->last_sid = last;
Willy Tarreau23482912019-05-07 15:23:14 +02002672 h2_wake_some_streams(h2c, last);
Willy Tarreau7838a792019-08-12 18:42:03 +02002673 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002674 return 1;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002675}
2676
Willy Tarreau92153fc2017-12-03 19:46:19 +01002677/* processes a PRIORITY frame, and either skips it or rejects if it is
Willy Tarreaub860c732019-01-30 15:39:55 +01002678 * invalid. Returns > 0 on success or zero on missing data. It may return an
2679 * error in h2c. The caller must have already verified frame length and stream
2680 * ID validity. Described in RFC7540#6.3.
Willy Tarreau92153fc2017-12-03 19:46:19 +01002681 */
2682static int h2c_handle_priority(struct h2c *h2c)
2683{
Willy Tarreau7838a792019-08-12 18:42:03 +02002684 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
2685
Willy Tarreau92153fc2017-12-03 19:46:19 +01002686 /* process full frame only */
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002687 if (b_data(&h2c->dbuf) < h2c->dfl) {
Willy Tarreau7838a792019-08-12 18:42:03 +02002688 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002689 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002690 return 0;
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002691 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01002692
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002693 if (h2_get_n32(&h2c->dbuf, 0) == h2c->dsi) {
Willy Tarreau92153fc2017-12-03 19:46:19 +01002694 /* 7540#5.3 : can't depend on itself */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002695 TRACE_ERROR("PRIORITY depends on itself", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002696 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02002697 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002698 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002699 return 0;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002700 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002701 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreau92153fc2017-12-03 19:46:19 +01002702 return 1;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002703}
2704
Willy Tarreaucd234e92017-08-18 10:59:39 +02002705/* processes an RST_STREAM frame, and sets the 32-bit error code on the stream.
Willy Tarreaub860c732019-01-30 15:39:55 +01002706 * Returns > 0 on success or zero on missing data. The caller must have already
2707 * verified frame length and stream ID validity. Described in RFC7540#6.4.
Willy Tarreaucd234e92017-08-18 10:59:39 +02002708 */
2709static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s)
2710{
Willy Tarreau7838a792019-08-12 18:42:03 +02002711 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
2712
Willy Tarreaucd234e92017-08-18 10:59:39 +02002713 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002714 if (b_data(&h2c->dbuf) < h2c->dfl) {
2715 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002716 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002717 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002718 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002719
2720 /* late RST, already handled */
Willy Tarreau7838a792019-08-12 18:42:03 +02002721 if (h2s->st == H2_SS_CLOSED) {
2722 TRACE_DEVEL("leaving on stream closed", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002723 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02002724 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002725
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002726 h2s->errcode = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau00dd0782018-03-01 16:31:34 +01002727 h2s_close(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002728
2729 if (h2s->cs) {
Willy Tarreauec988c72018-12-19 18:00:29 +01002730 cs_set_error(h2s->cs);
Willy Tarreauf830f012018-12-19 17:44:55 +01002731 h2s_alert(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002732 }
2733
2734 h2s->flags |= H2_SF_RST_RCVD;
Willy Tarreau7838a792019-08-12 18:42:03 +02002735 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002736 return 1;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002737}
2738
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002739/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2740 * It may return an error in h2c or h2s. The caller must consider that the
2741 * return value is the new h2s in case one was allocated (most common case).
2742 * Described in RFC7540#6.2. Most of the
Willy Tarreau13278b42017-10-13 19:23:14 +02002743 * errors here are reported as connection errors since it's impossible to
2744 * recover from such errors after the compression context has been altered.
2745 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002746static struct h2s *h2c_frt_handle_headers(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau13278b42017-10-13 19:23:14 +02002747{
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002748 struct buffer rxbuf = BUF_NULL;
Willy Tarreau4790f7c2019-01-24 11:33:02 +01002749 unsigned long long body_len = 0;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002750 uint32_t flags = 0;
Willy Tarreau13278b42017-10-13 19:23:14 +02002751 int error;
2752
Willy Tarreau7838a792019-08-12 18:42:03 +02002753 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2754
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002755 if (!b_size(&h2c->dbuf)) {
2756 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002757 goto out; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002758 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002759
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002760 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2761 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002762 goto out; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002763 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002764
2765 /* now either the frame is complete or the buffer is complete */
2766 if (h2s->st != H2_SS_IDLE) {
Willy Tarreau88d138e2019-01-02 19:38:14 +01002767 /* The stream exists/existed, this must be a trailers frame */
2768 if (h2s->st != H2_SS_CLOSED) {
Amaury Denoyelle74162742020-12-11 17:53:05 +01002769 error = h2c_decode_headers(h2c, &h2s->rxbuf, &h2s->flags, &body_len, NULL);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002770 /* unrecoverable error ? */
2771 if (h2c->st0 >= H2_CS_ERROR)
2772 goto out;
2773
Christopher Faulet485da0b2021-10-08 08:56:00 +02002774 if (error == 0) {
2775 /* Demux not blocked because of the stream, it is an incomplete frame */
2776 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2777 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002778 goto out; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002779 }
Willy Tarreauaab1a602019-05-06 11:12:18 +02002780
2781 if (error < 0) {
2782 /* Failed to decode this frame (e.g. too large request)
2783 * but the HPACK decompressor is still synchronized.
2784 */
2785 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
2786 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau88d138e2019-01-02 19:38:14 +01002787 goto out;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002788 }
Willy Tarreau88d138e2019-01-02 19:38:14 +01002789 goto done;
2790 }
Willy Tarreau1f035502019-01-30 11:44:07 +01002791 /* the connection was already killed by an RST, let's consume
2792 * the data and send another RST.
2793 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01002794 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreau1f035502019-01-30 11:44:07 +01002795 h2s = (struct h2s*)h2_error_stream;
2796 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002797 }
2798 else if (h2c->dsi <= h2c->max_id || !(h2c->dsi & 1)) {
2799 /* RFC7540#5.1.1 stream id > prev ones, and must be odd here */
2800 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002801 TRACE_ERROR("HEADERS on invalid stream ID", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau4781b152021-04-06 13:53:36 +02002802 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau22de8d32018-09-05 19:55:58 +02002803 sess_log(h2c->conn->owner);
Willy Tarreau13278b42017-10-13 19:23:14 +02002804 goto conn_err;
2805 }
Willy Tarreau415b1ee2019-01-02 13:59:43 +01002806 else if (h2c->flags & H2_CF_DEM_TOOMANY)
2807 goto out; // IDLE but too many cs still present
Willy Tarreau13278b42017-10-13 19:23:14 +02002808
Amaury Denoyelle74162742020-12-11 17:53:05 +01002809 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002810
Willy Tarreau25919232019-01-03 14:48:18 +01002811 /* unrecoverable error ? */
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002812 if (h2c->st0 >= H2_CS_ERROR)
2813 goto out;
2814
Willy Tarreau25919232019-01-03 14:48:18 +01002815 if (error <= 0) {
Christopher Faulet485da0b2021-10-08 08:56:00 +02002816 if (error == 0) {
2817 /* Demux not blocked because of the stream, it is an incomplete frame */
2818 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2819 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau25919232019-01-03 14:48:18 +01002820 goto out; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002821 }
Willy Tarreau25919232019-01-03 14:48:18 +01002822
2823 /* Failed to decode this stream (e.g. too large request)
2824 * but the HPACK decompressor is still synchronized.
2825 */
2826 h2s = (struct h2s*)h2_error_stream;
2827 goto send_rst;
2828 }
2829
Willy Tarreau29268e92021-06-17 08:29:14 +02002830 TRACE_USER("rcvd H2 request ", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW, h2c->conn, 0, &rxbuf);
2831
Willy Tarreau22de8d32018-09-05 19:55:58 +02002832 /* Note: we don't emit any other logs below because ff we return
Willy Tarreaua8e49542018-10-03 18:53:55 +02002833 * positively from h2c_frt_stream_new(), the stream will report the error,
2834 * and if we return in error, h2c_frt_stream_new() will emit the error.
Christopher Faulet7d013e72020-12-15 16:56:50 +01002835 *
2836 * Xfer the rxbuf to the stream. On success, the new stream owns the
2837 * rxbuf. On error, it is released here.
Willy Tarreau22de8d32018-09-05 19:55:58 +02002838 */
Amaury Denoyelle90ac6052021-10-18 14:45:49 +02002839 h2s = h2c_frt_stream_new(h2c, h2c->dsi, &rxbuf, flags);
Willy Tarreau13278b42017-10-13 19:23:14 +02002840 if (!h2s) {
Willy Tarreau96a10c22018-12-23 18:30:44 +01002841 h2s = (struct h2s*)h2_refused_stream;
2842 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002843 }
2844
2845 h2s->st = H2_SS_OPEN;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002846 h2s->flags |= flags;
Willy Tarreau1915ca22019-01-24 11:49:37 +01002847 h2s->body_len = body_len;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002848
Willy Tarreau88d138e2019-01-02 19:38:14 +01002849 done:
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002850 if (h2c->dff & H2_F_HEADERS_END_STREAM)
Willy Tarreau13278b42017-10-13 19:23:14 +02002851 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002852
2853 if (h2s->flags & H2_SF_ES_RCVD) {
Willy Tarreaufc10f592019-01-30 19:28:32 +01002854 if (h2s->st == H2_SS_OPEN)
2855 h2s->st = H2_SS_HREM;
2856 else
2857 h2s_close(h2s);
Willy Tarreau13278b42017-10-13 19:23:14 +02002858 }
2859
Willy Tarreau3a429f02019-01-03 11:41:50 +01002860 /* update the max stream ID if the request is being processed */
2861 if (h2s->id > h2c->max_id)
2862 h2c->max_id = h2s->id;
Willy Tarreau13278b42017-10-13 19:23:14 +02002863
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002864 return h2s;
Willy Tarreau13278b42017-10-13 19:23:14 +02002865
2866 conn_err:
2867 h2c_error(h2c, error);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002868 goto out;
Willy Tarreau13278b42017-10-13 19:23:14 +02002869
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002870 out:
2871 h2_release_buf(h2c, &rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002872 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002873 return NULL;
Willy Tarreau96a10c22018-12-23 18:30:44 +01002874
2875 send_rst:
2876 /* make the demux send an RST for the current stream. We may only
2877 * do this if we're certain that the HEADERS frame was properly
2878 * decompressed so that the HPACK decoder is still kept up to date.
2879 */
2880 h2_release_buf(h2c, &rxbuf);
2881 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002882
Willy Tarreau022e5e52020-09-10 09:33:15 +02002883 TRACE_USER("rejected H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002884 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau96a10c22018-12-23 18:30:44 +01002885 return h2s;
Willy Tarreau13278b42017-10-13 19:23:14 +02002886}
2887
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002888/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2889 * It may return an error in h2c or h2s. Described in RFC7540#6.2. Most of the
2890 * errors here are reported as connection errors since it's impossible to
2891 * recover from such errors after the compression context has been altered.
2892 */
2893static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
2894{
Christopher Faulet6884aa32019-09-23 15:28:20 +02002895 struct buffer rxbuf = BUF_NULL;
2896 unsigned long long body_len = 0;
2897 uint32_t flags = 0;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002898 int error;
2899
Willy Tarreau7838a792019-08-12 18:42:03 +02002900 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2901
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002902 if (!b_size(&h2c->dbuf)) {
2903 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002904 goto fail; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002905 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002906
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002907 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2908 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002909 goto fail; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02002910 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002911
Christopher Faulet6884aa32019-09-23 15:28:20 +02002912 if (h2s->st != H2_SS_CLOSED) {
Amaury Denoyelle74162742020-12-11 17:53:05 +01002913 error = h2c_decode_headers(h2c, &h2s->rxbuf, &h2s->flags, &h2s->body_len, h2s->upgrade_protocol);
Christopher Faulet6884aa32019-09-23 15:28:20 +02002914 }
2915 else {
2916 /* the connection was already killed by an RST, let's consume
2917 * the data and send another RST.
2918 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01002919 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Christopher Fauletea7a7782019-09-26 16:19:13 +02002920 h2s = (struct h2s*)h2_error_stream;
Christopher Faulet6884aa32019-09-23 15:28:20 +02002921 h2c->st0 = H2_CS_FRAME_E;
2922 goto send_rst;
2923 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002924
Willy Tarreau25919232019-01-03 14:48:18 +01002925 /* unrecoverable error ? */
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002926 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02002927 goto fail;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002928
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002929 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
2930 /* RFC7540#5.1 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002931 TRACE_ERROR("response HEADERS in invalid state", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002932 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
2933 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002934 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002935 goto fail;
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002936 }
2937
Willy Tarreau25919232019-01-03 14:48:18 +01002938 if (error <= 0) {
Christopher Faulet485da0b2021-10-08 08:56:00 +02002939 if (error == 0) {
2940 /* Demux not blocked because of the stream, it is an incomplete frame */
2941 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2942 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002943 goto fail; // missing data
Christopher Faulet485da0b2021-10-08 08:56:00 +02002944 }
Willy Tarreau25919232019-01-03 14:48:18 +01002945
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002946 /* stream error : send RST_STREAM */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002947 TRACE_ERROR("couldn't decode response HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau25919232019-01-03 14:48:18 +01002948 h2s_error(h2s, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002949 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002950 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002951 goto fail;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002952 }
2953
Christopher Fauletfa922f02019-05-07 10:55:17 +02002954 if (h2c->dff & H2_F_HEADERS_END_STREAM)
Willy Tarreau45ffc0c2019-01-03 09:32:20 +01002955 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreau45ffc0c2019-01-03 09:32:20 +01002956
Willy Tarreau927b88b2019-03-04 08:03:25 +01002957 if (h2s->cs && h2s->cs->flags & CS_FL_ERROR && h2s->st < H2_SS_ERROR)
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002958 h2s->st = H2_SS_ERROR;
Christopher Fauletfa922f02019-05-07 10:55:17 +02002959 else if (h2s->flags & H2_SF_ES_RCVD) {
2960 if (h2s->st == H2_SS_OPEN)
2961 h2s->st = H2_SS_HREM;
2962 else if (h2s->st == H2_SS_HLOC)
2963 h2s_close(h2s);
2964 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002965
Christopher Fauletf95f8762021-01-22 11:59:07 +01002966 /* Unblock busy server h2s waiting for the response headers to validate
2967 * the tunnel establishment or the end of the response of an oborted
2968 * tunnel
2969 */
2970 if ((h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY)) == (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY) ||
2971 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
2972 TRACE_STATE("Unblock h2s blocked on tunnel establishment/abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
2973 h2s->flags &= ~H2_SF_BLK_MBUSY;
2974 }
2975
Willy Tarreau9abb3172021-06-16 18:32:42 +02002976 TRACE_USER("rcvd H2 response ", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, 0, &h2s->rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002977 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002978 return h2s;
Willy Tarreau7838a792019-08-12 18:42:03 +02002979 fail:
2980 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2981 return NULL;
Christopher Faulet6884aa32019-09-23 15:28:20 +02002982
2983 send_rst:
2984 /* make the demux send an RST for the current stream. We may only
2985 * do this if we're certain that the HEADERS frame was properly
2986 * decompressed so that the HPACK decoder is still kept up to date.
2987 */
2988 h2_release_buf(h2c, &rxbuf);
2989 h2c->st0 = H2_CS_FRAME_E;
2990
Willy Tarreau022e5e52020-09-10 09:33:15 +02002991 TRACE_USER("rejected H2 response", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Christopher Faulet6884aa32019-09-23 15:28:20 +02002992 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2993 return h2s;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002994}
2995
Willy Tarreau454f9052017-10-26 19:40:35 +02002996/* processes a DATA frame. Returns > 0 on success or zero on missing data.
2997 * It may return an error in h2c or h2s. Described in RFC7540#6.1.
2998 */
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01002999static int h2c_handle_data(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02003000{
3001 int error;
3002
Willy Tarreau7838a792019-08-12 18:42:03 +02003003 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3004
Willy Tarreau454f9052017-10-26 19:40:35 +02003005 /* note that empty DATA frames are perfectly valid and sometimes used
3006 * to signal an end of stream (with the ES flag).
3007 */
3008
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003009 if (!b_size(&h2c->dbuf) && h2c->dfl) {
3010 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02003011 goto fail; // empty buffer
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003012 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003013
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003014 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
3015 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02003016 goto fail; // incomplete frame
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003017 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003018
3019 /* now either the frame is complete or the buffer is complete */
3020
Willy Tarreau454f9052017-10-26 19:40:35 +02003021 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
3022 /* RFC7540#6.1 */
3023 error = H2_ERR_STREAM_CLOSED;
3024 goto strm_err;
3025 }
3026
Christopher Faulet4f09ec82019-06-19 09:25:58 +02003027 if ((h2s->flags & H2_SF_DATA_CLEN) && (h2c->dfl - h2c->dpl) > h2s->body_len) {
Willy Tarreau1915ca22019-01-24 11:49:37 +01003028 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003029 TRACE_ERROR("DATA frame larger than content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003030 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02003031 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003032 goto strm_err;
3033 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01003034 if (!(h2c->flags & H2_CF_IS_BACK) &&
3035 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT) &&
3036 ((h2c->dfl - h2c->dpl) || !(h2c->dff & H2_F_DATA_END_STREAM))) {
3037 /* a tunnel attempt was aborted but the client still try to send some raw data.
3038 * Thus the stream is closed with the CANCEL error. Here we take care it is not
3039 * an empty DATA Frame with the ES flag. The error is only handled if ES was
3040 * already sent to the client because depending on the scheduling, these data may
Ilya Shipitsinacf84592021-02-06 22:29:08 +05003041 * have been sent before the server response but not handle here.
Christopher Faulet91b21dc2021-01-22 12:13:15 +01003042 */
3043 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3044 error = H2_ERR_CANCEL;
3045 goto strm_err;
3046 }
Willy Tarreau1915ca22019-01-24 11:49:37 +01003047
Willy Tarreaua56a6de2018-02-26 15:59:07 +01003048 if (!h2_frt_transfer_data(h2s))
Willy Tarreau7838a792019-08-12 18:42:03 +02003049 goto fail;
Willy Tarreaua56a6de2018-02-26 15:59:07 +01003050
Willy Tarreau454f9052017-10-26 19:40:35 +02003051 /* call the upper layers to process the frame, then let the upper layer
3052 * notify the stream about any change.
3053 */
3054 if (!h2s->cs) {
Willy Tarreau082c4572019-08-06 10:11:02 +02003055 /* The upper layer has already closed, this may happen on
3056 * 4xx/redirects during POST, or when receiving a response
3057 * from an H2 server after the client has aborted.
3058 */
3059 error = H2_ERR_CANCEL;
Willy Tarreau454f9052017-10-26 19:40:35 +02003060 goto strm_err;
3061 }
3062
Willy Tarreau8f650c32017-11-21 19:36:21 +01003063 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02003064 goto fail;
Willy Tarreau8f650c32017-11-21 19:36:21 +01003065
Willy Tarreau721c9742017-11-07 11:05:42 +01003066 if (h2s->st >= H2_SS_ERROR) {
Willy Tarreau454f9052017-10-26 19:40:35 +02003067 /* stream error : send RST_STREAM */
Willy Tarreaua20a5192017-12-27 11:02:06 +01003068 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau454f9052017-10-26 19:40:35 +02003069 }
3070
3071 /* check for completion : the callee will change this to FRAME_A or
3072 * FRAME_H once done.
3073 */
3074 if (h2c->st0 == H2_CS_FRAME_P)
Willy Tarreau7838a792019-08-12 18:42:03 +02003075 goto fail;
Willy Tarreau454f9052017-10-26 19:40:35 +02003076
Willy Tarreauc4134ba2017-12-11 18:45:08 +01003077 /* last frame */
3078 if (h2c->dff & H2_F_DATA_END_STREAM) {
Christopher Fauletfa922f02019-05-07 10:55:17 +02003079 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreaufc10f592019-01-30 19:28:32 +01003080 if (h2s->st == H2_SS_OPEN)
3081 h2s->st = H2_SS_HREM;
3082 else
3083 h2s_close(h2s);
3084
Willy Tarreau1915ca22019-01-24 11:49:37 +01003085 if (h2s->flags & H2_SF_DATA_CLEN && h2s->body_len) {
3086 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003087 TRACE_ERROR("ES on DATA frame before content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003088 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02003089 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003090 goto strm_err;
3091 }
Willy Tarreauc4134ba2017-12-11 18:45:08 +01003092 }
3093
Christopher Fauletf95f8762021-01-22 11:59:07 +01003094 /* Unblock busy server h2s waiting for the end of the response for an
3095 * aborted tunnel
3096 */
3097 if ((h2c->flags & H2_CF_IS_BACK) &&
3098 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
3099 TRACE_STATE("Unblock h2s blocked on tunnel abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3100 h2s->flags &= ~H2_SF_BLK_MBUSY;
3101 }
3102
Willy Tarreau7838a792019-08-12 18:42:03 +02003103 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003104 return 1;
3105
Willy Tarreau454f9052017-10-26 19:40:35 +02003106 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01003107 h2s_error(h2s, error);
3108 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02003109 fail:
3110 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003111 return 0;
3112}
3113
Willy Tarreau63864812019-08-07 14:25:20 +02003114/* check that the current frame described in h2c->{dsi,dft,dfl,dff,...} is
3115 * valid for the current stream state. This is needed only after parsing the
3116 * frame header but in practice it can be performed at any time during
3117 * H2_CS_FRAME_P since no state transition happens there. Returns >0 on success
3118 * or 0 in case of error, in which case either h2s or h2c will carry an error.
3119 */
3120static int h2_frame_check_vs_state(struct h2c *h2c, struct h2s *h2s)
3121{
Willy Tarreau7838a792019-08-12 18:42:03 +02003122 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
3123
Willy Tarreau63864812019-08-07 14:25:20 +02003124 if (h2s->st == H2_SS_IDLE &&
3125 h2c->dft != H2_FT_HEADERS && h2c->dft != H2_FT_PRIORITY) {
3126 /* RFC7540#5.1: any frame other than HEADERS or PRIORITY in
3127 * this state MUST be treated as a connection error
3128 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003129 TRACE_ERROR("invalid frame type for IDLE state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003130 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003131 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau63864812019-08-07 14:25:20 +02003132 /* only log if no other stream can report the error */
3133 sess_log(h2c->conn->owner);
3134 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003135 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02003136 TRACE_DEVEL("leaving in error (idle&!hdrs&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003137 return 0;
3138 }
3139
Willy Tarreau57a18162019-11-24 14:57:53 +01003140 if (h2s->st == H2_SS_IDLE && (h2c->flags & H2_CF_IS_BACK)) {
3141 /* only PUSH_PROMISE would be permitted here */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003142 TRACE_ERROR("invalid frame type for IDLE state (back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau57a18162019-11-24 14:57:53 +01003143 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02003144 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau57a18162019-11-24 14:57:53 +01003145 TRACE_DEVEL("leaving in error (idle&back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
3146 return 0;
3147 }
3148
Willy Tarreau63864812019-08-07 14:25:20 +02003149 if (h2s->st == H2_SS_HREM && h2c->dft != H2_FT_WINDOW_UPDATE &&
3150 h2c->dft != H2_FT_RST_STREAM && h2c->dft != H2_FT_PRIORITY) {
3151 /* RFC7540#5.1: any frame other than WU/PRIO/RST in
3152 * this state MUST be treated as a stream error.
3153 * 6.2, 6.6 and 6.10 further mandate that HEADERS/
3154 * PUSH_PROMISE/CONTINUATION cause connection errors.
3155 */
Amaury Denoyellea8879232020-10-27 17:16:03 +01003156 if (h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003157 TRACE_ERROR("invalid frame type for HREM state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003158 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02003159 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01003160 }
3161 else {
Willy Tarreau63864812019-08-07 14:25:20 +02003162 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
Amaury Denoyellea8879232020-10-27 17:16:03 +01003163 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003164 TRACE_DEVEL("leaving in error (hrem&!wu&!rst&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003165 return 0;
3166 }
3167
3168 /* Below the management of frames received in closed state is a
3169 * bit hackish because the spec makes strong differences between
3170 * streams closed by receiving RST, sending RST, and seeing ES
3171 * in both directions. In addition to this, the creation of a
3172 * new stream reusing the identifier of a closed one will be
3173 * detected here. Given that we cannot keep track of all closed
3174 * streams forever, we consider that unknown closed streams were
3175 * closed on RST received, which allows us to respond with an
3176 * RST without breaking the connection (eg: to abort a transfer).
3177 * Some frames have to be silently ignored as well.
3178 */
3179 if (h2s->st == H2_SS_CLOSED && h2c->dsi) {
3180 if (!(h2c->flags & H2_CF_IS_BACK) && h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
3181 /* #5.1.1: The identifier of a newly
3182 * established stream MUST be numerically
3183 * greater than all streams that the initiating
3184 * endpoint has opened or reserved. This
3185 * governs streams that are opened using a
3186 * HEADERS frame and streams that are reserved
3187 * using PUSH_PROMISE. An endpoint that
3188 * receives an unexpected stream identifier
3189 * MUST respond with a connection error.
3190 */
3191 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003192 TRACE_DEVEL("leaving in error (closed&hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003193 return 0;
3194 }
3195
Willy Tarreau4c08f122019-09-26 08:47:15 +02003196 if (h2s->flags & H2_SF_RST_RCVD &&
3197 !(h2_ft_bit(h2c->dft) & (H2_FT_HDR_MASK | H2_FT_RST_STREAM_BIT | H2_FT_PRIORITY_BIT | H2_FT_WINDOW_UPDATE_BIT))) {
Willy Tarreau63864812019-08-07 14:25:20 +02003198 /* RFC7540#5.1:closed: an endpoint that
3199 * receives any frame other than PRIORITY after
3200 * receiving a RST_STREAM MUST treat that as a
3201 * stream error of type STREAM_CLOSED.
3202 *
3203 * Note that old streams fall into this category
3204 * and will lead to an RST being sent.
3205 *
3206 * However, we cannot generalize this to all frame types. Those
3207 * carrying compression state must still be processed before
3208 * being dropped or we'll desynchronize the decoder. This can
3209 * happen with request trailers received after sending an
3210 * RST_STREAM, or with header/trailers responses received after
3211 * sending RST_STREAM (aborted stream).
Willy Tarreau4c08f122019-09-26 08:47:15 +02003212 *
3213 * In addition, since our CLOSED streams always carry the
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003214 * RST_RCVD bit, we don't want to accidentally catch valid
Willy Tarreau4c08f122019-09-26 08:47:15 +02003215 * frames for a closed stream, i.e. RST/PRIO/WU.
Willy Tarreau63864812019-08-07 14:25:20 +02003216 */
3217 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
3218 h2c->st0 = H2_CS_FRAME_E;
Christopher Faulet6884aa32019-09-23 15:28:20 +02003219 TRACE_DEVEL("leaving in error (rst_rcvd&!hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003220 return 0;
3221 }
3222
3223 /* RFC7540#5.1:closed: if this state is reached as a
3224 * result of sending a RST_STREAM frame, the peer that
3225 * receives the RST_STREAM might have already sent
3226 * frames on the stream that cannot be withdrawn. An
3227 * endpoint MUST ignore frames that it receives on
3228 * closed streams after it has sent a RST_STREAM
3229 * frame. An endpoint MAY choose to limit the period
3230 * over which it ignores frames and treat frames that
3231 * arrive after this time as being in error.
3232 */
3233 if (h2s->id && !(h2s->flags & H2_SF_RST_SENT)) {
3234 /* RFC7540#5.1:closed: any frame other than
3235 * PRIO/WU/RST in this state MUST be treated as
3236 * a connection error
3237 */
3238 if (h2c->dft != H2_FT_RST_STREAM &&
3239 h2c->dft != H2_FT_PRIORITY &&
3240 h2c->dft != H2_FT_WINDOW_UPDATE) {
3241 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003242 TRACE_DEVEL("leaving in error (rst_sent&!rst&!prio&!wu)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003243 return 0;
3244 }
3245 }
3246 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003247 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003248 return 1;
3249}
3250
Willy Tarreaubc933932017-10-09 16:21:43 +02003251/* process Rx frames to be demultiplexed */
3252static void h2_process_demux(struct h2c *h2c)
3253{
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003254 struct h2s *h2s = NULL, *tmp_h2s;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003255 struct h2_fh hdr;
3256 unsigned int padlen = 0;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003257 int32_t old_iw = h2c->miw;
Willy Tarreauf3ee0692017-10-17 08:18:25 +02003258
Willy Tarreau7838a792019-08-12 18:42:03 +02003259 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3260
Willy Tarreau081d4722017-05-16 21:51:05 +02003261 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02003262 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02003263
3264 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3265 if (h2c->st0 == H2_CS_PREFACE) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003266 TRACE_STATE("expecting preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02003267 if (h2c->flags & H2_CF_IS_BACK)
Willy Tarreau7838a792019-08-12 18:42:03 +02003268 goto out;
3269
Willy Tarreau52eed752017-09-22 15:05:09 +02003270 if (unlikely(h2c_frt_recv_preface(h2c) <= 0)) {
3271 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau22de8d32018-09-05 19:55:58 +02003272 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003273 TRACE_PROTO("failed to receive preface", H2_EV_RX_PREFACE|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003274 h2c->st0 = H2_CS_ERROR2;
Willy Tarreauee4684f2021-06-17 08:08:48 +02003275 if (b_data(&h2c->dbuf) ||
Christopher Faulet3f35da22021-07-26 10:18:35 +02003276 !(((const struct session *)h2c->conn->owner)->fe->options & (PR_O_NULLNOLOG|PR_O_IGNORE_PRB)))
Willy Tarreauee4684f2021-06-17 08:08:48 +02003277 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003278 }
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003279 goto done;
Willy Tarreau52eed752017-09-22 15:05:09 +02003280 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003281 TRACE_PROTO("received preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003282
3283 h2c->max_id = 0;
3284 h2c->st0 = H2_CS_SETTINGS1;
Willy Tarreau7838a792019-08-12 18:42:03 +02003285 TRACE_STATE("switching to SETTINGS1", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003286 }
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003287
3288 if (h2c->st0 == H2_CS_SETTINGS1) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003289 /* ensure that what is pending is a valid SETTINGS frame
3290 * without an ACK.
3291 */
Willy Tarreau7838a792019-08-12 18:42:03 +02003292 TRACE_STATE("expecting settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003293 if (!h2_get_frame_hdr(&h2c->dbuf, &hdr)) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003294 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003295 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau22de8d32018-09-05 19:55:58 +02003296 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003297 TRACE_ERROR("failed to receive settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003298 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003299 if (!(h2c->flags & H2_CF_IS_BACK))
3300 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003301 }
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003302 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003303 }
3304
3305 if (hdr.sid || hdr.ft != H2_FT_SETTINGS || hdr.ff & H2_F_SETTINGS_ACK) {
3306 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003307 TRACE_ERROR("unexpected frame type or flags", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003308 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
3309 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003310 if (!(h2c->flags & H2_CF_IS_BACK))
3311 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003312 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003313 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003314 }
3315
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003316 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003317 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003318 TRACE_ERROR("invalid settings frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003319 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
3320 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003321 if (!(h2c->flags & H2_CF_IS_BACK))
3322 sess_log(h2c->conn->owner);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003323 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003324 }
3325
Willy Tarreau3bf69182018-12-21 15:34:50 +01003326 /* that's OK, switch to FRAME_P to process it. This is
3327 * a SETTINGS frame whose header has already been
3328 * deleted above.
3329 */
Willy Tarreau54f46e52019-01-30 15:11:03 +01003330 padlen = 0;
Willy Tarreau4781b152021-04-06 13:53:36 +02003331 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003332 goto new_frame;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003333 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003334 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003335
3336 /* process as many incoming frames as possible below */
Willy Tarreau7838a792019-08-12 18:42:03 +02003337 while (1) {
Willy Tarreau7e98c052017-10-10 15:56:59 +02003338 int ret = 0;
3339
Willy Tarreau7838a792019-08-12 18:42:03 +02003340 if (!b_data(&h2c->dbuf)) {
3341 TRACE_DEVEL("no more Rx data", H2_EV_RX_FRAME, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003342 h2c->flags |= H2_CF_DEM_SHORT_READ;
3343 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003344 }
3345
3346 if (h2c->st0 >= H2_CS_ERROR) {
3347 TRACE_STATE("end of connection reported", H2_EV_RX_FRAME|H2_EV_RX_EOI, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003348 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003349 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003350
3351 if (h2c->st0 == H2_CS_FRAME_H) {
Willy Tarreau30d05f32019-08-06 15:49:51 +02003352 h2c->rcvd_s = 0;
3353
Willy Tarreau7838a792019-08-12 18:42:03 +02003354 TRACE_STATE("expecting H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003355 if (!h2_peek_frame_hdr(&h2c->dbuf, 0, &hdr)) {
3356 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003357 break;
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003358 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003359
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003360 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003361 TRACE_ERROR("invalid H2 frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003362 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003363 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau22de8d32018-09-05 19:55:58 +02003364 /* only log if no other stream can report the error */
3365 sess_log(h2c->conn->owner);
3366 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003367 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003368 break;
3369 }
3370
Christopher Fauletdd2a5622019-06-18 12:22:38 +02003371 padlen = 0;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003372 if (h2_ft_bit(hdr.ft) & H2_FT_PADDED_MASK && hdr.ff & H2_F_PADDED) {
3373 /* If the frame is padded (HEADERS, PUSH_PROMISE or DATA),
3374 * we read the pad length and drop it from the remaining
3375 * payload (one byte + the 9 remaining ones = 10 total
3376 * removed), so we have a frame payload starting after the
3377 * pad len. Flow controlled frames (DATA) also count the
3378 * padlen in the flow control, so it must be adjusted.
3379 */
3380 if (hdr.len < 1) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003381 TRACE_ERROR("invalid H2 padded frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003382 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003383 if (!(h2c->flags & H2_CF_IS_BACK))
3384 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003385 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003386 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003387 }
3388 hdr.len--;
3389
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003390 if (b_data(&h2c->dbuf) < 10) {
3391 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003392 break; // missing padlen
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003393 }
Willy Tarreau3bf69182018-12-21 15:34:50 +01003394
3395 padlen = *(uint8_t *)b_peek(&h2c->dbuf, 9);
3396
3397 if (padlen > hdr.len) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003398 TRACE_ERROR("invalid H2 padding length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003399 /* RFC7540#6.1 : pad length = length of
3400 * frame payload or greater => error.
3401 */
3402 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003403 if (!(h2c->flags & H2_CF_IS_BACK))
3404 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003405 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003406 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003407 }
3408
3409 if (h2_ft_bit(hdr.ft) & H2_FT_FC_MASK) {
3410 h2c->rcvd_c++;
3411 h2c->rcvd_s++;
3412 }
3413 b_del(&h2c->dbuf, 1);
3414 }
3415 h2_skip_frame_hdr(&h2c->dbuf);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003416
3417 new_frame:
Willy Tarreau7e98c052017-10-10 15:56:59 +02003418 h2c->dfl = hdr.len;
3419 h2c->dsi = hdr.sid;
3420 h2c->dft = hdr.ft;
3421 h2c->dff = hdr.ff;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003422 h2c->dpl = padlen;
Willy Tarreau73db4342019-09-25 07:28:44 +02003423 TRACE_STATE("rcvd H2 frame header, switching to FRAME_P state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003424 h2c->st0 = H2_CS_FRAME_P;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003425
3426 /* check for minimum basic frame format validity */
3427 ret = h2_frame_check(h2c->dft, 1, h2c->dsi, h2c->dfl, global.tune.bufsize);
3428 if (ret != H2_ERR_NO_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003429 TRACE_ERROR("received invalid H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003430 h2c_error(h2c, ret);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003431 if (!(h2c->flags & H2_CF_IS_BACK))
3432 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003433 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003434 goto done;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003435 }
Willy Tarreau15a47332022-03-18 15:57:34 +01003436
3437 /* transition to HEADERS frame ends the keep-alive idle
3438 * timer and starts the http-request idle delay.
3439 */
3440 if (hdr.ft == H2_FT_HEADERS)
3441 h2c->idle_start = now_ms;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003442 }
3443
Willy Tarreau9fd5aa82019-08-06 15:21:45 +02003444 /* Only H2_CS_FRAME_P, H2_CS_FRAME_A and H2_CS_FRAME_E here.
3445 * H2_CS_FRAME_P indicates an incomplete previous operation
3446 * (most often the first attempt) and requires some validity
3447 * checks for the frame and the current state. The two other
3448 * ones are set after completion (or abortion) and must skip
3449 * validity checks.
3450 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003451 tmp_h2s = h2c_st_by_id(h2c, h2c->dsi);
3452
Willy Tarreau567beb82018-12-18 16:52:44 +01003453 if (tmp_h2s != h2s && h2s && h2s->cs &&
3454 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003455 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003456 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003457 (h2s->flags & H2_SF_ES_RCVD) ||
3458 (h2s->cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING|CS_FL_EOS)))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003459 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003460 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_STRM_WAKE, h2c->conn, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003461 h2s->cs->flags |= CS_FL_RCV_MORE;
Willy Tarreau7e094452018-12-19 18:08:52 +01003462 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003463 }
3464 h2s = tmp_h2s;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003465
Willy Tarreau63864812019-08-07 14:25:20 +02003466 if (h2c->st0 == H2_CS_FRAME_E ||
Willy Tarreau7838a792019-08-12 18:42:03 +02003467 (h2c->st0 == H2_CS_FRAME_P && !h2_frame_check_vs_state(h2c, h2s))) {
3468 TRACE_PROTO("stream error reported", H2_EV_RX_FRAME|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003469 goto strm_err;
Willy Tarreau7838a792019-08-12 18:42:03 +02003470 }
Willy Tarreauc0da1962017-10-30 18:38:00 +01003471
Willy Tarreau7e98c052017-10-10 15:56:59 +02003472 switch (h2c->dft) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02003473 case H2_FT_SETTINGS:
Willy Tarreau7838a792019-08-12 18:42:03 +02003474 if (h2c->st0 == H2_CS_FRAME_P) {
3475 TRACE_PROTO("receiving H2 SETTINGS frame", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003476 ret = h2c_handle_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003477 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003478 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003479
Willy Tarreau7838a792019-08-12 18:42:03 +02003480 if (h2c->st0 == H2_CS_FRAME_A) {
3481 TRACE_PROTO("sending H2 SETTINGS ACK frame", H2_EV_TX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003482 ret = h2c_ack_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003483 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02003484 break;
3485
Willy Tarreaucf68c782017-10-10 17:11:41 +02003486 case H2_FT_PING:
Willy Tarreau7838a792019-08-12 18:42:03 +02003487 if (h2c->st0 == H2_CS_FRAME_P) {
3488 TRACE_PROTO("receiving H2 PING frame", H2_EV_RX_FRAME|H2_EV_RX_PING, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003489 ret = h2c_handle_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003490 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003491
Willy Tarreau7838a792019-08-12 18:42:03 +02003492 if (h2c->st0 == H2_CS_FRAME_A) {
3493 TRACE_PROTO("sending H2 PING ACK frame", H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003494 ret = h2c_ack_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003495 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003496 break;
3497
Willy Tarreau26f95952017-07-27 17:18:30 +02003498 case H2_FT_WINDOW_UPDATE:
Willy Tarreau7838a792019-08-12 18:42:03 +02003499 if (h2c->st0 == H2_CS_FRAME_P) {
3500 TRACE_PROTO("receiving H2 WINDOW_UPDATE frame", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02003501 ret = h2c_handle_window_update(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003502 }
Willy Tarreau26f95952017-07-27 17:18:30 +02003503 break;
3504
Willy Tarreau61290ec2017-10-17 08:19:21 +02003505 case H2_FT_CONTINUATION:
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003506 /* RFC7540#6.10: CONTINUATION may only be preceded by
Willy Tarreauea18f862018-12-22 20:19:26 +01003507 * a HEADERS/PUSH_PROMISE/CONTINUATION frame. These
3508 * frames' parsers consume all following CONTINUATION
3509 * frames so this one is out of sequence.
Willy Tarreau61290ec2017-10-17 08:19:21 +02003510 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003511 TRACE_ERROR("received unexpected H2 CONTINUATION frame", H2_EV_RX_FRAME|H2_EV_RX_CONT|H2_EV_H2C_ERR, h2c->conn, h2s);
Willy Tarreauea18f862018-12-22 20:19:26 +01003512 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003513 if (!(h2c->flags & H2_CF_IS_BACK))
3514 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003515 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003516 goto done;
Willy Tarreau61290ec2017-10-17 08:19:21 +02003517
Willy Tarreau13278b42017-10-13 19:23:14 +02003518 case H2_FT_HEADERS:
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003519 if (h2c->st0 == H2_CS_FRAME_P) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003520 TRACE_PROTO("receiving H2 HEADERS frame", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003521 if (h2c->flags & H2_CF_IS_BACK)
3522 tmp_h2s = h2c_bck_handle_headers(h2c, h2s);
3523 else
3524 tmp_h2s = h2c_frt_handle_headers(h2c, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003525 if (tmp_h2s) {
3526 h2s = tmp_h2s;
3527 ret = 1;
3528 }
3529 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003530 HA_ATOMIC_INC(&h2c->px_counters->headers_rcvd);
Willy Tarreau13278b42017-10-13 19:23:14 +02003531 break;
3532
Willy Tarreau454f9052017-10-26 19:40:35 +02003533 case H2_FT_DATA:
Willy Tarreau7838a792019-08-12 18:42:03 +02003534 if (h2c->st0 == H2_CS_FRAME_P) {
3535 TRACE_PROTO("receiving H2 DATA frame", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01003536 ret = h2c_handle_data(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003537 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003538 HA_ATOMIC_INC(&h2c->px_counters->data_rcvd);
Willy Tarreau454f9052017-10-26 19:40:35 +02003539
Willy Tarreau7838a792019-08-12 18:42:03 +02003540 if (h2c->st0 == H2_CS_FRAME_A) {
3541 TRACE_PROTO("sending stream WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003542 ret = h2c_send_strm_wu(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003543 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003544 break;
Willy Tarreaucd234e92017-08-18 10:59:39 +02003545
Willy Tarreau92153fc2017-12-03 19:46:19 +01003546 case H2_FT_PRIORITY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003547 if (h2c->st0 == H2_CS_FRAME_P) {
3548 TRACE_PROTO("receiving H2 PRIORITY frame", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn, h2s);
Willy Tarreau92153fc2017-12-03 19:46:19 +01003549 ret = h2c_handle_priority(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003550 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01003551 break;
3552
Willy Tarreaucd234e92017-08-18 10:59:39 +02003553 case H2_FT_RST_STREAM:
Willy Tarreau7838a792019-08-12 18:42:03 +02003554 if (h2c->st0 == H2_CS_FRAME_P) {
3555 TRACE_PROTO("receiving H2 RST_STREAM frame", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003556 ret = h2c_handle_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003557 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003558 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_rcvd);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003559 break;
3560
Willy Tarreaue96b0922017-10-30 00:28:29 +01003561 case H2_FT_GOAWAY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003562 if (h2c->st0 == H2_CS_FRAME_P) {
3563 TRACE_PROTO("receiving H2 GOAWAY frame", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn, h2s);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003564 ret = h2c_handle_goaway(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003565 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003566 HA_ATOMIC_INC(&h2c->px_counters->goaway_rcvd);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003567 break;
3568
Willy Tarreau1c661982017-10-30 13:52:01 +01003569 /* implement all extra frame types here */
Willy Tarreau7e98c052017-10-10 15:56:59 +02003570 default:
Willy Tarreau7838a792019-08-12 18:42:03 +02003571 TRACE_PROTO("receiving H2 ignored frame", H2_EV_RX_FRAME, h2c->conn, h2s);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003572 /* drop frames that we ignore. They may be larger than
3573 * the buffer so we drain all of their contents until
3574 * we reach the end.
3575 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003576 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3577 b_del(&h2c->dbuf, ret);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003578 h2c->dfl -= ret;
3579 ret = h2c->dfl == 0;
3580 }
3581
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003582 strm_err:
Willy Tarreaua20a5192017-12-27 11:02:06 +01003583 /* We may have to send an RST if not done yet */
Willy Tarreau7838a792019-08-12 18:42:03 +02003584 if (h2s->st == H2_SS_ERROR) {
3585 TRACE_STATE("stream error, switching to FRAME_E", H2_EV_RX_FRAME|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003586 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02003587 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003588
Willy Tarreau7838a792019-08-12 18:42:03 +02003589 if (h2c->st0 == H2_CS_FRAME_E) {
3590 TRACE_PROTO("sending H2 RST_STREAM frame", H2_EV_TX_FRAME|H2_EV_TX_RST|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003591 ret = h2c_send_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003592 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003593
Willy Tarreau7e98c052017-10-10 15:56:59 +02003594 /* error or missing data condition met above ? */
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003595 if (ret <= 0)
Willy Tarreau7e98c052017-10-10 15:56:59 +02003596 break;
3597
3598 if (h2c->st0 != H2_CS_FRAME_H) {
Willy Tarreaubba7a4d2020-09-18 07:41:28 +02003599 if (h2c->dfl)
3600 TRACE_DEVEL("skipping remaining frame payload", H2_EV_RX_FRAME, h2c->conn, h2s);
Christopher Faulet5112a602019-09-26 16:38:28 +02003601 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3602 b_del(&h2c->dbuf, ret);
3603 h2c->dfl -= ret;
3604 if (!h2c->dfl) {
3605 TRACE_STATE("switching to FRAME_H", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
3606 h2c->st0 = H2_CS_FRAME_H;
3607 h2c->dsi = -1;
3608 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003609 }
3610 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003611
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003612 if (h2c->rcvd_c > 0 &&
Willy Tarreau7838a792019-08-12 18:42:03 +02003613 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MBUSY | H2_CF_DEM_MROOM))) {
3614 TRACE_PROTO("sending H2 WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003615 h2c_send_conn_wu(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003616 }
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003617
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003618 done:
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003619 if (h2c->st0 >= H2_CS_ERROR || (h2c->flags & H2_CF_DEM_SHORT_READ)) {
3620 if (h2c->flags & H2_CF_RCVD_SHUT)
3621 h2c->flags |= H2_CF_END_REACHED;
3622 }
3623
Willy Tarreau567beb82018-12-18 16:52:44 +01003624 if (h2s && h2s->cs &&
3625 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003626 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003627 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003628 (h2s->flags & H2_SF_ES_RCVD) ||
3629 (h2s->cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING|CS_FL_EOS)))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003630 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003631 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003632 h2s->cs->flags |= CS_FL_RCV_MORE;
Willy Tarreau7e094452018-12-19 18:08:52 +01003633 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003634 }
Willy Tarreau1ed87b72018-11-25 08:45:16 +01003635
Willy Tarreau7838a792019-08-12 18:42:03 +02003636 if (old_iw != h2c->miw) {
3637 TRACE_STATE("notifying streams about SFCTL increase", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003638 h2c_unblock_sfctl(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003639 }
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003640
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02003641 h2c_restart_reading(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02003642 out:
3643 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003644 return;
Willy Tarreaubc933932017-10-09 16:21:43 +02003645}
3646
Willy Tarreau989539b2020-01-10 17:01:29 +01003647/* resume each h2s eligible for sending in list head <head> */
3648static void h2_resume_each_sending_h2s(struct h2c *h2c, struct list *head)
3649{
3650 struct h2s *h2s, *h2s_back;
3651
3652 TRACE_ENTER(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3653
3654 list_for_each_entry_safe(h2s, h2s_back, head, list) {
3655 if (h2c->mws <= 0 ||
3656 h2c->flags & H2_CF_MUX_BLOCK_ANY ||
3657 h2c->st0 >= H2_CS_ERROR)
3658 break;
3659
3660 h2s->flags &= ~H2_SF_BLK_ANY;
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003661
Willy Tarreaud9464162020-01-10 18:25:07 +01003662 if (h2s->flags & H2_SF_NOTIFIED)
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003663 continue;
3664
Willy Tarreau5723f292020-01-10 15:16:57 +01003665 /* If the sender changed his mind and unsubscribed, let's just
3666 * remove the stream from the send_list.
Willy Tarreau989539b2020-01-10 17:01:29 +01003667 */
Willy Tarreauf96508a2020-01-10 11:12:48 +01003668 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) &&
3669 (!h2s->subs || !(h2s->subs->events & SUB_RETRY_SEND))) {
Willy Tarreau989539b2020-01-10 17:01:29 +01003670 LIST_DEL_INIT(&h2s->list);
3671 continue;
3672 }
3673
Willy Tarreauf96508a2020-01-10 11:12:48 +01003674 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau5723f292020-01-10 15:16:57 +01003675 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01003676 tasklet_wakeup(h2s->subs->tasklet);
3677 h2s->subs->events &= ~SUB_RETRY_SEND;
3678 if (!h2s->subs->events)
3679 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01003680 }
3681 else if (h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) {
3682 tasklet_wakeup(h2s->shut_tl);
3683 }
Willy Tarreau989539b2020-01-10 17:01:29 +01003684 }
3685
3686 TRACE_LEAVE(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3687}
3688
Willy Tarreaubc933932017-10-09 16:21:43 +02003689/* process Tx frames from streams to be multiplexed. Returns > 0 if it reached
3690 * the end.
3691 */
3692static int h2_process_mux(struct h2c *h2c)
3693{
Willy Tarreau7838a792019-08-12 18:42:03 +02003694 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3695
Willy Tarreau01b44822018-10-03 14:26:37 +02003696 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3697 if (unlikely(h2c->st0 == H2_CS_PREFACE && (h2c->flags & H2_CF_IS_BACK))) {
3698 if (unlikely(h2c_bck_send_preface(h2c) <= 0)) {
3699 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003700 if (h2c->st0 == H2_CS_ERROR)
Willy Tarreau01b44822018-10-03 14:26:37 +02003701 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau01b44822018-10-03 14:26:37 +02003702 goto fail;
3703 }
3704 h2c->st0 = H2_CS_SETTINGS1;
3705 }
3706 /* need to wait for the other side */
Willy Tarreau75a930a2018-12-12 08:03:58 +01003707 if (h2c->st0 < H2_CS_FRAME_H)
Willy Tarreau7838a792019-08-12 18:42:03 +02003708 goto done;
Willy Tarreau01b44822018-10-03 14:26:37 +02003709 }
3710
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003711 /* start by sending possibly pending window updates */
Willy Tarreaue74679a2019-08-06 15:39:32 +02003712 if (h2c->rcvd_s > 0 &&
3713 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3714 h2c_send_strm_wu(h2c) < 0)
3715 goto fail;
3716
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003717 if (h2c->rcvd_c > 0 &&
3718 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3719 h2c_send_conn_wu(h2c) < 0)
3720 goto fail;
3721
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003722 /* First we always process the flow control list because the streams
3723 * waiting there were already elected for immediate emission but were
3724 * blocked just on this.
3725 */
Willy Tarreau989539b2020-01-10 17:01:29 +01003726 h2_resume_each_sending_h2s(h2c, &h2c->fctl_list);
3727 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003728
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003729 fail:
Willy Tarreau3eabe9b2017-11-07 11:03:01 +01003730 if (unlikely(h2c->st0 >= H2_CS_ERROR)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02003731 if (h2c->st0 == H2_CS_ERROR) {
3732 if (h2c->max_id >= 0) {
3733 h2c_send_goaway_error(h2c, NULL);
3734 if (h2c->flags & H2_CF_MUX_BLOCK_ANY)
Willy Tarreau7838a792019-08-12 18:42:03 +02003735 goto out0;
Willy Tarreau081d4722017-05-16 21:51:05 +02003736 }
3737
3738 h2c->st0 = H2_CS_ERROR2; // sent (or failed hard) !
3739 }
Willy Tarreau081d4722017-05-16 21:51:05 +02003740 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003741 done:
3742 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
3743 return 1;
3744 out0:
3745 TRACE_DEVEL("leaving in blocked situation", H2_EV_H2C_WAKE, h2c->conn);
3746 return 0;
Willy Tarreaubc933932017-10-09 16:21:43 +02003747}
3748
Willy Tarreau62f52692017-10-08 23:01:42 +02003749
Willy Tarreau479998a2018-11-18 06:30:59 +01003750/* Attempt to read data, and subscribe if none available.
3751 * The function returns 1 if data has been received, otherwise zero.
3752 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003753static int h2_recv(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003754{
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003755 struct connection *conn = h2c->conn;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003756 struct buffer *buf;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003757 int max;
Olivier Houchard7505f942018-08-21 18:10:44 +02003758 size_t ret;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003759
Willy Tarreau7838a792019-08-12 18:42:03 +02003760 TRACE_ENTER(H2_EV_H2C_RECV, h2c->conn);
3761
3762 if (h2c->wait_event.events & SUB_RETRY_RECV) {
3763 TRACE_DEVEL("leaving on sub_recv", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003764 return (b_data(&h2c->dbuf));
Willy Tarreau7838a792019-08-12 18:42:03 +02003765 }
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003766
Willy Tarreau7838a792019-08-12 18:42:03 +02003767 if (!h2_recv_allowed(h2c)) {
3768 TRACE_DEVEL("leaving on !recv_allowed", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003769 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02003770 }
Willy Tarreaua2af5122017-10-09 11:56:46 +02003771
Willy Tarreau44e973f2018-03-01 17:49:30 +01003772 buf = h2_get_buf(h2c, &h2c->dbuf);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003773 if (!buf) {
3774 h2c->flags |= H2_CF_DEM_DALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02003775 TRACE_DEVEL("leaving on !alloc", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003776 return 0;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003777 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003778
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003779 if (h2c->flags & H2_CF_RCVD_SHUT) {
3780 TRACE_DEVEL("leaving on rcvd_shut", H2_EV_H2C_RECV, h2c->conn);
Willy Tarreau3a8bbcc2021-11-19 11:41:10 +01003781 return 1;
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003782 }
3783
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003784 if (!b_data(buf)) {
3785 /* try to pre-align the buffer like the
3786 * rxbufs will be to optimize memory copies. We'll make
3787 * sure that the frame header lands at the end of the
3788 * HTX block to alias it upon recv. We cannot use the
3789 * head because rcv_buf() will realign the buffer if
3790 * it's empty. Thus we cheat and pretend we already
3791 * have a few bytes there.
3792 */
3793 max = buf_room_for_htx_data(buf) + 9;
3794 buf->head = sizeof(struct htx) - 9;
3795 }
3796 else
3797 max = b_room(buf);
Willy Tarreau2a59e872018-12-12 08:23:47 +01003798
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003799 ret = max ? conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, max, 0) : 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003800
Christopher Fauletde9d6052021-04-23 12:25:18 +02003801 if (max && !ret && h2_recv_allowed(h2c)) {
3802 TRACE_DATA("failed to receive data, subscribing", H2_EV_H2C_RECV, h2c->conn);
3803 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h2c->wait_event);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003804 } else if (ret) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02003805 TRACE_DATA("received data", H2_EV_H2C_RECV, h2c->conn, 0, 0, (void*)(long)ret);
Christopher Fauletb5f7b522021-07-26 12:06:53 +02003806 h2c->flags &= ~H2_CF_DEM_SHORT_READ;
3807 }
Olivier Houchard81a15af2018-10-19 17:26:49 +02003808
Christopher Fauletde9d6052021-04-23 12:25:18 +02003809 if (conn_xprt_read0_pending(h2c->conn)) {
3810 TRACE_DATA("received read0", H2_EV_H2C_RECV, h2c->conn);
3811 h2c->flags |= H2_CF_RCVD_SHUT;
3812 }
3813
Olivier Houcharda1411e62018-08-17 18:42:48 +02003814 if (!b_data(buf)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +01003815 h2_release_buf(h2c, &h2c->dbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02003816 TRACE_LEAVE(H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard46677732018-11-29 17:06:17 +01003817 return (conn->flags & CO_FL_ERROR || conn_xprt_read0_pending(conn));
Willy Tarreaua2af5122017-10-09 11:56:46 +02003818 }
3819
Willy Tarreau7838a792019-08-12 18:42:03 +02003820 if (b_data(buf) == buf->size) {
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003821 h2c->flags |= H2_CF_DEM_DFULL;
Willy Tarreau35fb8462019-10-02 11:05:46 +02003822 TRACE_STATE("demux buffer full", H2_EV_H2C_RECV|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau7838a792019-08-12 18:42:03 +02003823 }
3824
3825 TRACE_LEAVE(H2_EV_H2C_RECV, h2c->conn);
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003826 return !!ret || (conn->flags & CO_FL_ERROR) || conn_xprt_read0_pending(conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02003827}
3828
Willy Tarreau479998a2018-11-18 06:30:59 +01003829/* Try to send data if possible.
3830 * The function returns 1 if data have been sent, otherwise zero.
3831 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003832static int h2_send(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003833{
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003834 struct connection *conn = h2c->conn;
Willy Tarreaubc933932017-10-09 16:21:43 +02003835 int done;
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003836 int sent = 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003837
Willy Tarreau7838a792019-08-12 18:42:03 +02003838 TRACE_ENTER(H2_EV_H2C_SEND, h2c->conn);
Willy Tarreaua2af5122017-10-09 11:56:46 +02003839
Willy Tarreau7838a792019-08-12 18:42:03 +02003840 if (conn->flags & CO_FL_ERROR) {
3841 TRACE_DEVEL("leaving on error", H2_EV_H2C_SEND, h2c->conn);
3842 return 1;
3843 }
Olivier Houchard7505f942018-08-21 18:10:44 +02003844
Willy Tarreau911db9b2020-01-23 16:27:54 +01003845 if (conn->flags & CO_FL_WAIT_XPRT) {
Willy Tarreaua2af5122017-10-09 11:56:46 +02003846 /* a handshake was requested */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003847 goto schedule;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003848 }
3849
Willy Tarreaubc933932017-10-09 16:21:43 +02003850 /* This loop is quite simple : it tries to fill as much as it can from
3851 * pending streams into the existing buffer until it's reportedly full
3852 * or the end of send requests is reached. Then it tries to send this
3853 * buffer's contents out, marks it not full if at least one byte could
3854 * be sent, and tries again.
3855 *
3856 * The snd_buf() function normally takes a "flags" argument which may
3857 * be made of a combination of CO_SFL_MSG_MORE to indicate that more
3858 * data immediately comes and CO_SFL_STREAMER to indicate that the
3859 * connection is streaming lots of data (used to increase TLS record
3860 * size at the expense of latency). The former can be sent any time
3861 * there's a buffer full flag, as it indicates at least one stream
3862 * attempted to send and failed so there are pending data. An
3863 * alternative would be to set it as long as there's an active stream
3864 * but that would be problematic for ACKs until we have an absolute
3865 * guarantee that all waiters have at least one byte to send. The
3866 * latter should possibly not be set for now.
3867 */
3868
3869 done = 0;
3870 while (!done) {
3871 unsigned int flags = 0;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003872 unsigned int released = 0;
3873 struct buffer *buf;
Willy Tarreaubc933932017-10-09 16:21:43 +02003874
3875 /* fill as much as we can into the current buffer */
3876 while (((h2c->flags & (H2_CF_MUX_MFULL|H2_CF_MUX_MALLOC)) == 0) && !done)
3877 done = h2_process_mux(h2c);
3878
Olivier Houchard2b094432019-01-29 18:28:36 +01003879 if (h2c->flags & H2_CF_MUX_MALLOC)
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003880 done = 1; // we won't go further without extra buffers
Olivier Houchard2b094432019-01-29 18:28:36 +01003881
Christopher Faulet9a3d3fc2020-10-22 16:24:58 +02003882 if ((conn->flags & (CO_FL_SOCK_WR_SH|CO_FL_ERROR)) ||
Willy Tarreaue6dc7a02021-10-21 17:30:06 +02003883 (h2c->flags & H2_CF_GOAWAY_FAILED))
Willy Tarreaubc933932017-10-09 16:21:43 +02003884 break;
3885
3886 if (h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MBUSY | H2_CF_DEM_MROOM))
3887 flags |= CO_SFL_MSG_MORE;
3888
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003889 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
3890 if (b_data(buf)) {
3891 int ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, b_data(buf), flags);
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003892 if (!ret) {
3893 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003894 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003895 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003896 sent = 1;
Willy Tarreau022e5e52020-09-10 09:33:15 +02003897 TRACE_DATA("sent data", H2_EV_H2C_SEND, h2c->conn, 0, buf, (void*)(long)ret);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003898 b_del(buf, ret);
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003899 if (b_data(buf)) {
3900 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003901 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003902 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003903 }
3904 b_free(buf);
3905 released++;
Willy Tarreau787db9a2018-06-14 18:31:46 +02003906 }
Willy Tarreaubc933932017-10-09 16:21:43 +02003907
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003908 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01003909 offer_buffers(NULL, released);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003910
Willy Tarreaubc933932017-10-09 16:21:43 +02003911 /* wrote at least one byte, the buffer is not full anymore */
Christopher Faulet69fe5ce2019-10-24 10:31:01 +02003912 if (sent)
3913 h2c->flags &= ~(H2_CF_MUX_MFULL | H2_CF_DEM_MROOM);
Willy Tarreaubc933932017-10-09 16:21:43 +02003914 }
3915
Willy Tarreaua2af5122017-10-09 11:56:46 +02003916 if (conn->flags & CO_FL_SOCK_WR_SH) {
3917 /* output closed, nothing to send, clear the buffer to release it */
Willy Tarreau51330962019-05-26 09:38:07 +02003918 b_reset(br_tail(h2c->mbuf));
Willy Tarreaua2af5122017-10-09 11:56:46 +02003919 }
Olivier Houchard6ff20392018-07-17 18:46:31 +02003920 /* We're not full anymore, so we can wake any task that are waiting
3921 * for us.
3922 */
Willy Tarreau989539b2020-01-10 17:01:29 +01003923 if (!(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM)) && h2c->st0 >= H2_CS_FRAME_H)
3924 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Olivier Houchardd360ac62019-03-22 17:37:16 +01003925
Olivier Houchard910b2bc2018-07-17 18:49:38 +02003926 /* We're done, no more to send */
Willy Tarreau7838a792019-08-12 18:42:03 +02003927 if (!br_data(h2c->mbuf)) {
3928 TRACE_DEVEL("leaving with everything sent", H2_EV_H2C_SEND, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003929 return sent;
Willy Tarreau7838a792019-08-12 18:42:03 +02003930 }
Olivier Houchard910b2bc2018-07-17 18:49:38 +02003931schedule:
Willy Tarreau7838a792019-08-12 18:42:03 +02003932 if (!(conn->flags & CO_FL_ERROR) && !(h2c->wait_event.events & SUB_RETRY_SEND)) {
3933 TRACE_STATE("more data to send, subscribing", H2_EV_H2C_SEND, h2c->conn);
Olivier Houcharde179d0e2019-03-21 18:27:17 +01003934 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &h2c->wait_event);
Willy Tarreau7838a792019-08-12 18:42:03 +02003935 }
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003936
Willy Tarreau7838a792019-08-12 18:42:03 +02003937 TRACE_DEVEL("leaving with some data left to send", H2_EV_H2C_SEND, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003938 return sent;
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003939}
3940
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02003941/* this is the tasklet referenced in h2c->wait_event.tasklet */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003942struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003943{
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003944 struct connection *conn;
3945 struct tasklet *tl = (struct tasklet *)t;
3946 int conn_in_list;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003947 struct h2c *h2c = ctx;
Olivier Houchard7505f942018-08-21 18:10:44 +02003948 int ret = 0;
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003949
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003950 if (state & TASK_F_USR1) {
3951 /* the tasklet was idling on an idle connection, it might have
3952 * been stolen, let's be careful!
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003953 */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003954 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3955 if (t->context == NULL) {
3956 /* The connection has been taken over by another thread,
3957 * we're no longer responsible for it, so just free the
3958 * tasklet, and do nothing.
3959 */
3960 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3961 tasklet_free(tl);
Willy Tarreau74163142021-03-13 11:30:19 +01003962 t = NULL;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003963 goto leave;
3964 }
3965 conn = h2c->conn;
3966 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003967
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003968 conn_in_list = conn->flags & CO_FL_LIST_MASK;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003969
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003970 /* Remove the connection from the list, to be sure nobody attempts
3971 * to use it while we handle the I/O events
3972 */
3973 if (conn_in_list)
3974 conn_delete_from_tree(&conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003975
Willy Tarreaue388f2f2021-03-02 16:51:09 +01003976 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
3977 } else {
3978 /* we're certain the connection was not in an idle list */
3979 conn = h2c->conn;
3980 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
3981 conn_in_list = 0;
3982 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003983
Willy Tarreau4f6516d2018-12-19 13:59:17 +01003984 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Olivier Houchard7505f942018-08-21 18:10:44 +02003985 ret = h2_send(h2c);
Willy Tarreau4f6516d2018-12-19 13:59:17 +01003986 if (!(h2c->wait_event.events & SUB_RETRY_RECV))
Olivier Houchard7505f942018-08-21 18:10:44 +02003987 ret |= h2_recv(h2c);
Willy Tarreaucef5c8e2018-12-18 10:29:54 +01003988 if (ret || b_data(&h2c->dbuf))
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003989 ret = h2_process(h2c);
3990
3991 /* If we were in an idle list, we want to add it back into it,
3992 * unless h2_process() returned -1, which mean it has destroyed
3993 * the connection (testing !ret is enough, if h2_process() wasn't
3994 * called then ret will be 0 anyway.
3995 */
Willy Tarreau74163142021-03-13 11:30:19 +01003996 if (ret < 0)
3997 t = NULL;
3998
Olivier Houchardcd4159f2020-03-10 18:39:42 +01003999 if (!ret && conn_in_list) {
4000 struct server *srv = objt_server(conn->target);
4001
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004002 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004003 if (conn_in_list == CO_FL_SAFE_LIST)
Willy Tarreau430bf4a2021-03-04 09:45:32 +01004004 ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004005 else
Willy Tarreau430bf4a2021-03-04 09:45:32 +01004006 ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004007 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004008 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004009
Willy Tarreau38468772020-06-28 00:31:13 +02004010leave:
Willy Tarreau7838a792019-08-12 18:42:03 +02004011 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreau74163142021-03-13 11:30:19 +01004012 return t;
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004013}
Willy Tarreaua2af5122017-10-09 11:56:46 +02004014
Willy Tarreau62f52692017-10-08 23:01:42 +02004015/* callback called on any event by the connection handler.
4016 * It applies changes and returns zero, or < 0 if it wants immediate
4017 * destruction of the connection (which normally doesn not happen in h2).
4018 */
Olivier Houchard7505f942018-08-21 18:10:44 +02004019static int h2_process(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02004020{
Olivier Houchard7505f942018-08-21 18:10:44 +02004021 struct connection *conn = h2c->conn;
Willy Tarreaua2af5122017-10-09 11:56:46 +02004022
Willy Tarreau7838a792019-08-12 18:42:03 +02004023 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4024
Willy Tarreauf0961222021-02-05 11:41:46 +01004025 if (!(h2c->flags & H2_CF_DEM_BLOCK_ANY) &&
4026 (b_data(&h2c->dbuf) || (h2c->flags & H2_CF_RCVD_SHUT))) {
Willy Tarreaud13bf272017-12-14 10:34:52 +01004027 h2_process_demux(h2c);
4028
4029 if (h2c->st0 >= H2_CS_ERROR || conn->flags & CO_FL_ERROR)
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004030 b_reset(&h2c->dbuf);
Willy Tarreaud13bf272017-12-14 10:34:52 +01004031
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004032 if (!b_full(&h2c->dbuf))
Willy Tarreaud13bf272017-12-14 10:34:52 +01004033 h2c->flags &= ~H2_CF_DEM_DFULL;
4034 }
Olivier Houchard7505f942018-08-21 18:10:44 +02004035 h2_send(h2c);
Willy Tarreaud13bf272017-12-14 10:34:52 +01004036
Christopher Fauletdfd10ab2021-10-06 14:24:19 +02004037 if (unlikely(h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) && !(h2c->flags & H2_CF_IS_BACK)) {
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +02004038 int send_goaway = 1;
4039 /* If a close-spread-time option is set, we want to avoid
4040 * closing all the active HTTP2 connections at once so we add a
4041 * random factor that will spread the closing.
4042 */
4043 if (tick_isset(global.close_spread_end)) {
4044 int remaining_window = tick_remain(now_ms, global.close_spread_end);
4045 if (remaining_window) {
4046 /* This should increase the closing rate the
4047 * further along the window we are. */
4048 send_goaway = (remaining_window <= statistical_prng_range(global.close_spread_time));
4049 }
4050 }
Willy Tarreau8ec14062017-12-30 18:08:13 +01004051 /* frontend is stopping, reload likely in progress, let's try
4052 * to announce a graceful shutdown if not yet done. We don't
4053 * care if it fails, it will be tried again later.
4054 */
Remi Tricot-Le Bretonb5d968d2022-04-08 18:04:18 +02004055 if (send_goaway) {
4056 TRACE_STATE("proxy stopped, sending GOAWAY", H2_EV_H2C_WAKE|H2_EV_TX_FRAME, conn);
4057 if (!(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
4058 if (h2c->last_sid < 0)
4059 h2c->last_sid = (1U << 31) - 1;
4060 h2c_send_goaway_error(h2c, NULL);
4061 }
Willy Tarreau8ec14062017-12-30 18:08:13 +01004062 }
4063 }
4064
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004065 /*
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004066 * If we received early data, and the handshake is done, wake
4067 * any stream that was waiting for it.
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004068 */
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004069 if (!(h2c->flags & H2_CF_WAIT_FOR_HS) &&
Willy Tarreau911db9b2020-01-23 16:27:54 +01004070 (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT | CO_FL_EARLY_DATA)) == CO_FL_EARLY_DATA) {
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004071 struct eb32_node *node;
4072 struct h2s *h2s;
4073
4074 h2c->flags |= H2_CF_WAIT_FOR_HS;
4075 node = eb32_lookup_ge(&h2c->streams_by_id, 1);
4076
4077 while (node) {
4078 h2s = container_of(node, struct h2s, by_id);
Willy Tarreaufde287c2018-12-19 18:33:16 +01004079 if (h2s->cs && h2s->cs->flags & CS_FL_WAIT_FOR_HS)
Willy Tarreau7e094452018-12-19 18:08:52 +01004080 h2s_notify_recv(h2s);
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004081 node = eb32_next(node);
4082 }
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004083 }
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004084
Christopher Fauletaade4ed2020-10-08 15:38:41 +02004085 if (conn->flags & CO_FL_ERROR || h2c_read0_pending(h2c) ||
Willy Tarreau29a98242017-10-31 06:59:15 +01004086 h2c->st0 == H2_CS_ERROR2 || h2c->flags & H2_CF_GOAWAY_FAILED ||
4087 (eb_is_empty(&h2c->streams_by_id) && h2c->last_sid >= 0 &&
4088 h2c->max_id >= h2c->last_sid)) {
Willy Tarreau23482912019-05-07 15:23:14 +02004089 h2_wake_some_streams(h2c, 0);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004090
4091 if (eb_is_empty(&h2c->streams_by_id)) {
4092 /* no more stream, kill the connection now */
Christopher Faulet73c12072019-04-08 11:23:22 +02004093 h2_release(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004094 TRACE_DEVEL("leaving after releasing the connection", H2_EV_H2C_WAKE);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004095 return -1;
4096 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004097
4098 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004099 if (conn->flags & CO_FL_LIST_MASK) {
4100 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004101 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004102 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4103 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004104 }
4105 else if (h2c->st0 == H2_CS_ERROR) {
4106 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004107 if (conn->flags & CO_FL_LIST_MASK) {
4108 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004109 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004110 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4111 }
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004112 }
4113
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004114 if (!b_data(&h2c->dbuf))
Willy Tarreau44e973f2018-03-01 17:49:30 +01004115 h2_release_buf(h2c, &h2c->dbuf);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004116
Olivier Houchard53216e72018-10-10 15:46:36 +02004117 if ((conn->flags & CO_FL_SOCK_WR_SH) ||
4118 h2c->st0 == H2_CS_ERROR2 || (h2c->flags & H2_CF_GOAWAY_FAILED) ||
4119 (h2c->st0 != H2_CS_ERROR &&
Willy Tarreau662fafc2019-05-26 09:43:07 +02004120 !br_data(h2c->mbuf) &&
Olivier Houchard53216e72018-10-10 15:46:36 +02004121 (h2c->mws <= 0 || LIST_ISEMPTY(&h2c->fctl_list)) &&
4122 ((h2c->flags & H2_CF_MUX_BLOCK_ANY) || LIST_ISEMPTY(&h2c->send_list))))
Willy Tarreau2e3c0002019-05-26 09:45:23 +02004123 h2_release_mbuf(h2c);
Willy Tarreaua2af5122017-10-09 11:56:46 +02004124
Willy Tarreau15a47332022-03-18 15:57:34 +01004125 h2c_update_timeout(h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +02004126 h2_send(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004127 TRACE_LEAVE(H2_EV_H2C_WAKE, conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02004128 return 0;
4129}
4130
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004131/* wake-up function called by the connection layer (mux_ops.wake) */
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004132static int h2_wake(struct connection *conn)
4133{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004134 struct h2c *h2c = conn->ctx;
Willy Tarreau7838a792019-08-12 18:42:03 +02004135 int ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004136
Willy Tarreau7838a792019-08-12 18:42:03 +02004137 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4138 ret = h2_process(h2c);
Willy Tarreau508f9892020-02-11 04:38:56 +01004139 if (ret >= 0)
4140 h2_wake_some_streams(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02004141 TRACE_LEAVE(H2_EV_H2C_WAKE);
4142 return ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004143}
4144
Willy Tarreauea392822017-10-31 10:02:25 +01004145/* Connection timeout management. The principle is that if there's no receipt
4146 * nor sending for a certain amount of time, the connection is closed. If the
4147 * MUX buffer still has lying data or is not allocatable, the connection is
4148 * immediately killed. If it's allocatable and empty, we attempt to send a
4149 * GOAWAY frame.
4150 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004151struct task *h2_timeout_task(struct task *t, void *context, unsigned int state)
Willy Tarreauea392822017-10-31 10:02:25 +01004152{
Olivier Houchard9f6af332018-05-25 14:04:04 +02004153 struct h2c *h2c = context;
Willy Tarreauea392822017-10-31 10:02:25 +01004154 int expired = tick_is_expired(t->expire, now_ms);
4155
Willy Tarreau7838a792019-08-12 18:42:03 +02004156 TRACE_ENTER(H2_EV_H2C_WAKE, h2c ? h2c->conn : NULL);
4157
Willy Tarreaubd42e922020-06-30 11:19:23 +02004158 if (h2c) {
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004159 /* Make sure nobody stole the connection from us */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004160 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004161
4162 /* Somebody already stole the connection from us, so we should not
4163 * free it, we just have to free the task.
4164 */
4165 if (!t->context) {
4166 h2c = NULL;
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004167 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004168 goto do_leave;
4169 }
4170
4171
Willy Tarreaubd42e922020-06-30 11:19:23 +02004172 if (!expired) {
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004173 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004174 TRACE_DEVEL("leaving (not expired)", H2_EV_H2C_WAKE, h2c->conn);
4175 return t;
4176 }
Willy Tarreauea392822017-10-31 10:02:25 +01004177
Willy Tarreaubd42e922020-06-30 11:19:23 +02004178 if (!h2c_may_expire(h2c)) {
4179 /* we do still have streams but all of them are idle, waiting
4180 * for the data layer, so we must not enforce the timeout here.
4181 */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004182 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004183 t->expire = TICK_ETERNITY;
4184 return t;
4185 }
Willy Tarreauc2ea47f2019-10-01 10:12:00 +02004186
Willy Tarreaubd42e922020-06-30 11:19:23 +02004187 /* We're about to destroy the connection, so make sure nobody attempts
4188 * to steal it from us.
4189 */
Willy Tarreaubd42e922020-06-30 11:19:23 +02004190 if (h2c->conn->flags & CO_FL_LIST_MASK)
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004191 conn_delete_from_tree(&h2c->conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004192
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004193 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004194 }
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004195
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004196do_leave:
Olivier Houchard3f795f72019-04-17 22:51:06 +02004197 task_destroy(t);
Willy Tarreau0975f112018-03-29 15:22:59 +02004198
4199 if (!h2c) {
4200 /* resources were already deleted */
Willy Tarreau7838a792019-08-12 18:42:03 +02004201 TRACE_DEVEL("leaving (not more h2c)", H2_EV_H2C_WAKE);
Willy Tarreau0975f112018-03-29 15:22:59 +02004202 return NULL;
4203 }
4204
4205 h2c->task = NULL;
Willy Tarreauea392822017-10-31 10:02:25 +01004206 h2c_error(h2c, H2_ERR_NO_ERROR);
Willy Tarreau23482912019-05-07 15:23:14 +02004207 h2_wake_some_streams(h2c, 0);
Willy Tarreauea392822017-10-31 10:02:25 +01004208
Willy Tarreau662fafc2019-05-26 09:43:07 +02004209 if (br_data(h2c->mbuf)) {
Willy Tarreauea392822017-10-31 10:02:25 +01004210 /* don't even try to send a GOAWAY, the buffer is stuck */
4211 h2c->flags |= H2_CF_GOAWAY_FAILED;
4212 }
4213
4214 /* try to send but no need to insist */
Willy Tarreau599391a2017-11-24 10:16:00 +01004215 h2c->last_sid = h2c->max_id;
Willy Tarreauea392822017-10-31 10:02:25 +01004216 if (h2c_send_goaway_error(h2c, NULL) <= 0)
4217 h2c->flags |= H2_CF_GOAWAY_FAILED;
4218
Willy Tarreau662fafc2019-05-26 09:43:07 +02004219 if (br_data(h2c->mbuf) && !(h2c->flags & H2_CF_GOAWAY_FAILED) && conn_xprt_ready(h2c->conn)) {
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004220 unsigned int released = 0;
4221 struct buffer *buf;
4222
4223 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
4224 if (b_data(buf)) {
4225 int ret = h2c->conn->xprt->snd_buf(h2c->conn, h2c->conn->xprt_ctx, buf, b_data(buf), 0);
4226 if (!ret)
4227 break;
4228 b_del(buf, ret);
4229 if (b_data(buf))
4230 break;
4231 b_free(buf);
4232 released++;
4233 }
Willy Tarreau787db9a2018-06-14 18:31:46 +02004234 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004235
4236 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01004237 offer_buffers(NULL, released);
Willy Tarreau787db9a2018-06-14 18:31:46 +02004238 }
Willy Tarreauea392822017-10-31 10:02:25 +01004239
Willy Tarreau4481e262019-10-31 15:36:30 +01004240 /* in any case this connection must not be considered idle anymore */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004241 if (h2c->conn->flags & CO_FL_LIST_MASK) {
4242 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004243 conn_delete_from_tree(&h2c->conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004244 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4245 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004246
Willy Tarreau0975f112018-03-29 15:22:59 +02004247 /* either we can release everything now or it will be done later once
4248 * the last stream closes.
4249 */
4250 if (eb_is_empty(&h2c->streams_by_id))
Christopher Faulet73c12072019-04-08 11:23:22 +02004251 h2_release(h2c);
Willy Tarreauea392822017-10-31 10:02:25 +01004252
Willy Tarreau7838a792019-08-12 18:42:03 +02004253 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreauea392822017-10-31 10:02:25 +01004254 return NULL;
4255}
4256
4257
Willy Tarreau62f52692017-10-08 23:01:42 +02004258/*******************************************/
4259/* functions below are used by the streams */
4260/*******************************************/
4261
4262/*
4263 * Attach a new stream to a connection
4264 * (Used for outgoing connections)
4265 */
Christopher Faulete00ad352021-12-16 14:44:31 +01004266static int h2_attach(struct connection *conn, struct conn_stream *cs, struct session *sess)
Willy Tarreau62f52692017-10-08 23:01:42 +02004267{
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004268 struct h2s *h2s;
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004269 struct h2c *h2c = conn->ctx;
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004270
Willy Tarreau7838a792019-08-12 18:42:03 +02004271 TRACE_ENTER(H2_EV_H2S_NEW, conn);
Olivier Houchardf502aca2018-12-14 19:42:40 +01004272 h2s = h2c_bck_stream_new(h2c, cs, sess);
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004273 if (!h2s) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004274 TRACE_DEVEL("leaving on stream creation failure", H2_EV_H2S_NEW|H2_EV_H2S_ERR, conn);
Christopher Faulete00ad352021-12-16 14:44:31 +01004275 return -1;
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004276 }
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004277
4278 /* the connection is not idle anymore, let's mark this */
4279 HA_ATOMIC_AND(&h2c->wait_event.tasklet->state, ~TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004280 xprt_set_used(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004281
Willy Tarreau7838a792019-08-12 18:42:03 +02004282 TRACE_LEAVE(H2_EV_H2S_NEW, conn, h2s);
Christopher Faulete00ad352021-12-16 14:44:31 +01004283 return 0;
Willy Tarreau62f52692017-10-08 23:01:42 +02004284}
4285
Willy Tarreaufafd3982018-11-18 21:29:20 +01004286/* Retrieves the first valid conn_stream from this connection, or returns NULL.
4287 * We have to scan because we may have some orphan streams. It might be
4288 * beneficial to scan backwards from the end to reduce the likeliness to find
4289 * orphans.
4290 */
4291static const struct conn_stream *h2_get_first_cs(const struct connection *conn)
4292{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004293 struct h2c *h2c = conn->ctx;
Willy Tarreaufafd3982018-11-18 21:29:20 +01004294 struct h2s *h2s;
4295 struct eb32_node *node;
4296
4297 node = eb32_first(&h2c->streams_by_id);
4298 while (node) {
4299 h2s = container_of(node, struct h2s, by_id);
4300 if (h2s->cs)
4301 return h2s->cs;
4302 node = eb32_next(node);
4303 }
4304 return NULL;
4305}
4306
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004307static int h2_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
4308{
4309 int ret = 0;
4310 struct h2c *h2c = conn->ctx;
4311
4312 switch (mux_ctl) {
4313 case MUX_STATUS:
4314 /* Only consider the mux to be ready if we're done with
4315 * the preface and settings, and we had no error.
4316 */
4317 if (h2c->st0 >= H2_CS_FRAME_H && h2c->st0 < H2_CS_ERROR)
4318 ret |= MUX_STATUS_READY;
4319 return ret;
Christopher Faulet4c8ad842020-10-06 14:59:17 +02004320 case MUX_EXIT_STATUS:
4321 return MUX_ES_UNKNOWN;
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004322 default:
4323 return -1;
4324 }
4325}
4326
Willy Tarreau62f52692017-10-08 23:01:42 +02004327/*
Olivier Houchard060ed432018-11-06 16:32:42 +01004328 * Destroy the mux and the associated connection, if it is no longer used
4329 */
Christopher Faulet73c12072019-04-08 11:23:22 +02004330static void h2_destroy(void *ctx)
Olivier Houchard060ed432018-11-06 16:32:42 +01004331{
Christopher Faulet73c12072019-04-08 11:23:22 +02004332 struct h2c *h2c = ctx;
Olivier Houchard060ed432018-11-06 16:32:42 +01004333
Willy Tarreau7838a792019-08-12 18:42:03 +02004334 TRACE_ENTER(H2_EV_H2C_END, h2c->conn);
Christopher Faulet39a96ee2019-04-08 10:52:21 +02004335 if (eb_is_empty(&h2c->streams_by_id) || !h2c->conn || h2c->conn->ctx != h2c)
Christopher Faulet73c12072019-04-08 11:23:22 +02004336 h2_release(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004337 TRACE_LEAVE(H2_EV_H2C_END);
Olivier Houchard060ed432018-11-06 16:32:42 +01004338}
4339
4340/*
Willy Tarreau62f52692017-10-08 23:01:42 +02004341 * Detach the stream from the connection and possibly release the connection.
4342 */
4343static void h2_detach(struct conn_stream *cs)
4344{
Willy Tarreau60935142017-10-16 18:11:19 +02004345 struct h2s *h2s = cs->ctx;
4346 struct h2c *h2c;
Olivier Houchardf502aca2018-12-14 19:42:40 +01004347 struct session *sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004348
Willy Tarreau7838a792019-08-12 18:42:03 +02004349 TRACE_ENTER(H2_EV_STRM_END, h2s ? h2s->h2c->conn : NULL, h2s);
4350
Willy Tarreau60935142017-10-16 18:11:19 +02004351 cs->ctx = NULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02004352 if (!h2s) {
4353 TRACE_LEAVE(H2_EV_STRM_END);
Willy Tarreau60935142017-10-16 18:11:19 +02004354 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004355 }
Willy Tarreau60935142017-10-16 18:11:19 +02004356
Willy Tarreaud9464162020-01-10 18:25:07 +01004357 /* there's no txbuf so we're certain not to be able to send anything */
4358 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02004359
Olivier Houchardf502aca2018-12-14 19:42:40 +01004360 sess = h2s->sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004361 h2c = h2s->h2c;
4362 h2s->cs = NULL;
Willy Tarreau7ac60e82018-07-19 09:04:05 +02004363 h2c->nb_cs--;
Willy Tarreau15a47332022-03-18 15:57:34 +01004364 if (!h2c->nb_cs)
4365 h2c->idle_start = now_ms;
4366
Willy Tarreaufa1d3572019-01-31 10:31:51 +01004367 if ((h2c->flags & (H2_CF_IS_BACK|H2_CF_DEM_TOOMANY)) == H2_CF_DEM_TOOMANY &&
4368 !h2_frt_has_too_many_cs(h2c)) {
4369 /* frontend connection was blocking new streams creation */
Willy Tarreauf2101912018-07-19 10:11:38 +02004370 h2c->flags &= ~H2_CF_DEM_TOOMANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004371 h2c_restart_reading(h2c, 1);
Willy Tarreauf2101912018-07-19 10:11:38 +02004372 }
Willy Tarreau60935142017-10-16 18:11:19 +02004373
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004374 /* this stream may be blocked waiting for some data to leave (possibly
4375 * an ES or RST frame), so orphan it in this case.
4376 */
Christopher Faulet897d6122021-12-17 17:28:35 +01004377 if (!(h2c->conn->flags & CO_FL_ERROR) &&
Willy Tarreaua2b51812018-07-27 09:55:14 +02004378 (h2c->st0 < H2_CS_ERROR) &&
Willy Tarreau5723f292020-01-10 15:16:57 +01004379 (h2s->flags & (H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL)) &&
Willy Tarreauf96508a2020-01-10 11:12:48 +01004380 ((h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) || h2s->subs)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004381 TRACE_DEVEL("leaving on stream blocked", H2_EV_STRM_END|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau15a47332022-03-18 15:57:34 +01004382 /* refresh the timeout if none was active, so that the last
4383 * leaving stream may arm it.
4384 */
4385 if (!tick_isset(h2c->task->expire))
4386 h2c_update_timeout(h2c);
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004387 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004388 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004389
Willy Tarreau45f752e2017-10-30 15:44:59 +01004390 if ((h2c->flags & H2_CF_DEM_BLOCK_ANY && h2s->id == h2c->dsi) ||
4391 (h2c->flags & H2_CF_MUX_BLOCK_ANY && h2s->id == h2c->msi)) {
4392 /* unblock the connection if it was blocked on this
4393 * stream.
4394 */
4395 h2c->flags &= ~H2_CF_DEM_BLOCK_ANY;
4396 h2c->flags &= ~H2_CF_MUX_BLOCK_ANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004397 h2c_restart_reading(h2c, 1);
Willy Tarreau45f752e2017-10-30 15:44:59 +01004398 }
4399
Willy Tarreau71049cc2018-03-28 13:56:39 +02004400 h2s_destroy(h2s);
Willy Tarreau60935142017-10-16 18:11:19 +02004401
Christopher Faulet9b79a102019-07-15 11:22:56 +02004402 if (h2c->flags & H2_CF_IS_BACK) {
Olivier Houchard8a786902018-12-15 16:05:40 +01004403 if (!(h2c->conn->flags &
4404 (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH))) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004405 if (h2c->conn->flags & CO_FL_PRIVATE) {
Christopher Faulet08016ab2020-07-01 16:10:06 +02004406 /* Add the connection in the session server list, if not already done */
4407 if (!session_add_conn(sess, h2c->conn, h2c->conn->target)) {
4408 h2c->conn->owner = NULL;
4409 if (eb_is_empty(&h2c->streams_by_id)) {
4410 h2c->conn->mux->destroy(h2c);
4411 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4412 return;
Christopher Fauletc5579d12020-07-01 15:45:41 +02004413 }
4414 }
Christopher Faulet08016ab2020-07-01 16:10:06 +02004415 if (eb_is_empty(&h2c->streams_by_id)) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004416 if (session_check_idle_conn(h2c->conn->owner, h2c->conn) != 0) {
4417 /* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
4418 TRACE_DEVEL("leaving without reusable idle connection", H2_EV_STRM_END);
Olivier Houchard351411f2018-12-27 17:20:54 +01004419 return;
4420 }
4421 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004422 }
Christopher Fauletc5579d12020-07-01 15:45:41 +02004423 else {
4424 if (eb_is_empty(&h2c->streams_by_id)) {
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004425 /* If the connection is owned by the session, first remove it
4426 * from its list
4427 */
4428 if (h2c->conn->owner) {
4429 session_unown_conn(h2c->conn->owner, h2c->conn);
4430 h2c->conn->owner = NULL;
4431 }
4432
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004433 /* mark that the tasklet may lose its context to another thread and
4434 * that the handler needs to check it under the idle conns lock.
4435 */
4436 HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004437 xprt_set_idle(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
4438
Olivier Houcharddc2f2752020-02-13 19:12:07 +01004439 if (!srv_add_to_idle_list(objt_server(h2c->conn->target), h2c->conn, 1)) {
Olivier Houchard2444aa52020-01-20 13:56:01 +01004440 /* The server doesn't want it, let's kill the connection right away */
4441 h2c->conn->mux->destroy(h2c);
4442 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4443 return;
4444 }
Olivier Houchard199d4fa2020-03-22 23:25:51 +01004445 /* At this point, the connection has been added to the
4446 * server idle list, so another thread may already have
4447 * hijacked it, so we can't do anything with it.
4448 */
Olivier Houchard2444aa52020-01-20 13:56:01 +01004449 TRACE_DEVEL("reusable idle connection", H2_EV_STRM_END);
4450 return;
Olivier Houchard8a786902018-12-15 16:05:40 +01004451
Olivier Houchard8a786902018-12-15 16:05:40 +01004452 }
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004453 else if (!h2c->conn->hash_node->node.node.leaf_p &&
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004454 h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02004455 !LIST_INLIST(&h2c->conn->session_list)) {
Willy Tarreau430bf4a2021-03-04 09:45:32 +01004456 ebmb_insert(&__objt_server(h2c->conn->target)->per_thr[tid].avail_conns,
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004457 &h2c->conn->hash_node->node,
4458 sizeof(h2c->conn->hash_node->hash));
Christopher Fauletc5579d12020-07-01 15:45:41 +02004459 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004460 }
4461 }
4462 }
4463
Willy Tarreaue323f342018-03-28 13:51:45 +02004464 /* We don't want to close right now unless we're removing the
4465 * last stream, and either the connection is in error, or it
4466 * reached the ID already specified in a GOAWAY frame received
4467 * or sent (as seen by last_sid >= 0).
4468 */
Olivier Houchard7a977432019-03-21 15:47:13 +01004469 if (h2c_is_dead(h2c)) {
Willy Tarreaue323f342018-03-28 13:51:45 +02004470 /* no more stream will come, kill it now */
Willy Tarreau7838a792019-08-12 18:42:03 +02004471 TRACE_DEVEL("leaving and killing dead connection", H2_EV_STRM_END, h2c->conn);
Christopher Faulet73c12072019-04-08 11:23:22 +02004472 h2_release(h2c);
Willy Tarreaue323f342018-03-28 13:51:45 +02004473 }
4474 else if (h2c->task) {
Willy Tarreau15a47332022-03-18 15:57:34 +01004475 h2c_update_timeout(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004476 TRACE_DEVEL("leaving, refreshing connection's timeout", H2_EV_STRM_END, h2c->conn);
Willy Tarreau60935142017-10-16 18:11:19 +02004477 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004478 else
4479 TRACE_DEVEL("leaving", H2_EV_STRM_END, h2c->conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02004480}
4481
Willy Tarreau88bdba32019-05-13 18:17:53 +02004482/* Performs a synchronous or asynchronous shutr(). */
4483static void h2_do_shutr(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004484{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004485 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004486
Willy Tarreauf983d002019-05-14 10:40:21 +02004487 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004488 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004489
Willy Tarreau7838a792019-08-12 18:42:03 +02004490 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4491
Willy Tarreau18059042019-01-31 19:12:48 +01004492 /* a connstream may require us to immediately kill the whole connection
4493 * for example because of a "tcp-request content reject" rule that is
4494 * normally used to limit abuse. In this case we schedule a goaway to
4495 * close the connection.
Willy Tarreau926fa4c2017-11-07 14:42:12 +01004496 */
Willy Tarreau3cf69fe2019-05-14 10:44:40 +02004497 if ((h2s->flags & H2_SF_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004498 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004499 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004500 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4501 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4502 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004503 else if (!(h2s->flags & H2_SF_HEADERS_SENT)) {
4504 /* Nothing was never sent for this stream, so reset with
4505 * REFUSED_STREAM error to let the client retry the
4506 * request.
4507 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004508 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004509 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4510 }
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004511 else {
4512 /* a final response was already provided, we don't want this
4513 * stream anymore. This may happen when the server responds
4514 * before the end of an upload and closes quickly (redirect,
4515 * deny, ...)
4516 */
4517 h2s_error(h2s, H2_ERR_CANCEL);
4518 }
Willy Tarreau18059042019-01-31 19:12:48 +01004519
Willy Tarreau90c32322017-11-24 08:00:30 +01004520 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004521 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004522 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004523
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004524 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004525 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau00dd0782018-03-01 16:31:34 +01004526 h2s_close(h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004527 done:
4528 h2s->flags &= ~H2_SF_WANT_SHUTR;
Willy Tarreau7838a792019-08-12 18:42:03 +02004529 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004530 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004531add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004532 /* Let the handler know we want to shutr, and add ourselves to the
4533 * most relevant list if not yet done. h2_deferred_shut() will be
4534 * automatically called via the shut_tl tasklet when there's room
4535 * again.
4536 */
4537 h2s->flags |= H2_SF_WANT_SHUTR;
Willy Tarreau2b718102021-04-21 07:32:39 +02004538 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004539 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004540 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004541 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004542 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004543 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004544 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004545 return;
Willy Tarreau62f52692017-10-08 23:01:42 +02004546}
4547
Willy Tarreau88bdba32019-05-13 18:17:53 +02004548/* Performs a synchronous or asynchronous shutw(). */
4549static void h2_do_shutw(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004550{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004551 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004552
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004553 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004554 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004555
Willy Tarreau7838a792019-08-12 18:42:03 +02004556 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4557
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004558 if (h2s->st != H2_SS_ERROR && (h2s->flags & H2_SF_HEADERS_SENT)) {
Willy Tarreau58e32082017-11-07 14:41:09 +01004559 /* we can cleanly close using an empty data frame only after headers */
4560
4561 if (!(h2s->flags & (H2_SF_ES_SENT|H2_SF_RST_SENT)) &&
4562 h2_send_empty_data_es(h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004563 goto add_to_list;
Willy Tarreau58e32082017-11-07 14:41:09 +01004564
4565 if (h2s->st == H2_SS_HREM)
Willy Tarreau00dd0782018-03-01 16:31:34 +01004566 h2s_close(h2s);
Willy Tarreau58e32082017-11-07 14:41:09 +01004567 else
4568 h2s->st = H2_SS_HLOC;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004569 } else {
Willy Tarreau18059042019-01-31 19:12:48 +01004570 /* a connstream may require us to immediately kill the whole connection
4571 * for example because of a "tcp-request content reject" rule that is
4572 * normally used to limit abuse. In this case we schedule a goaway to
4573 * close the connection.
Willy Tarreaua1349f02017-10-31 07:41:55 +01004574 */
Willy Tarreau3cf69fe2019-05-14 10:44:40 +02004575 if ((h2s->flags & H2_SF_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004576 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004577 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004578 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4579 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4580 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004581 else {
4582 /* Nothing was never sent for this stream, so reset with
4583 * REFUSED_STREAM error to let the client retry the
4584 * request.
4585 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004586 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004587 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4588 }
Willy Tarreau18059042019-01-31 19:12:48 +01004589
Willy Tarreau90c32322017-11-24 08:00:30 +01004590 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004591 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004592 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004593
Willy Tarreau00dd0782018-03-01 16:31:34 +01004594 h2s_close(h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004595 }
4596
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004597 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004598 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau7838a792019-08-12 18:42:03 +02004599
4600 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
4601
Willy Tarreau88bdba32019-05-13 18:17:53 +02004602 done:
4603 h2s->flags &= ~H2_SF_WANT_SHUTW;
4604 return;
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004605
4606 add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004607 /* Let the handler know we want to shutw, and add ourselves to the
4608 * most relevant list if not yet done. h2_deferred_shut() will be
4609 * automatically called via the shut_tl tasklet when there's room
4610 * again.
4611 */
4612 h2s->flags |= H2_SF_WANT_SHUTW;
Willy Tarreau2b718102021-04-21 07:32:39 +02004613 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004614 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004615 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004616 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004617 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004618 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004619 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004620 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004621}
4622
Willy Tarreau5723f292020-01-10 15:16:57 +01004623/* This is the tasklet referenced in h2s->shut_tl, it is used for
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004624 * deferred shutdowns when the h2_detach() was done but the mux buffer was full
4625 * and prevented the last frame from being emitted.
4626 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004627struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004628{
4629 struct h2s *h2s = ctx;
Willy Tarreau88bdba32019-05-13 18:17:53 +02004630 struct h2c *h2c = h2s->h2c;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004631
Willy Tarreau7838a792019-08-12 18:42:03 +02004632 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4633
Willy Tarreau5723f292020-01-10 15:16:57 +01004634 if (h2s->flags & H2_SF_NOTIFIED) {
4635 /* some data processing remains to be done first */
4636 goto end;
4637 }
4638
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004639 if (h2s->flags & H2_SF_WANT_SHUTW)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004640 h2_do_shutw(h2s);
4641
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004642 if (h2s->flags & H2_SF_WANT_SHUTR)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004643 h2_do_shutr(h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004644
Willy Tarreau88bdba32019-05-13 18:17:53 +02004645 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Olivier Houchardafc7cb82019-03-25 14:08:01 +01004646 /* We're done trying to send, remove ourself from the send_list */
Olivier Houchardafc7cb82019-03-25 14:08:01 +01004647 LIST_DEL_INIT(&h2s->list);
Olivier Houchard7a977432019-03-21 15:47:13 +01004648
Willy Tarreau88bdba32019-05-13 18:17:53 +02004649 if (!h2s->cs) {
4650 h2s_destroy(h2s);
Willy Tarreau74163142021-03-13 11:30:19 +01004651 if (h2c_is_dead(h2c)) {
Willy Tarreau88bdba32019-05-13 18:17:53 +02004652 h2_release(h2c);
Willy Tarreau74163142021-03-13 11:30:19 +01004653 t = NULL;
4654 }
Willy Tarreau88bdba32019-05-13 18:17:53 +02004655 }
Olivier Houchard7a977432019-03-21 15:47:13 +01004656 }
Willy Tarreau5723f292020-01-10 15:16:57 +01004657 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02004658 TRACE_LEAVE(H2_EV_STRM_SHUT);
Willy Tarreau74163142021-03-13 11:30:19 +01004659 return t;
Willy Tarreau62f52692017-10-08 23:01:42 +02004660}
4661
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004662/* shutr() called by the conn_stream (mux_ops.shutr) */
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004663static void h2_shutr(struct conn_stream *cs, enum cs_shr_mode mode)
4664{
4665 struct h2s *h2s = cs->ctx;
4666
Willy Tarreau7838a792019-08-12 18:42:03 +02004667 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Willy Tarreau3cf69fe2019-05-14 10:44:40 +02004668 if (cs->flags & CS_FL_KILL_CONN)
4669 h2s->flags |= H2_SF_KILL_CONN;
4670
Willy Tarreau7838a792019-08-12 18:42:03 +02004671 if (mode)
4672 h2_do_shutr(h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004673
Willy Tarreau7838a792019-08-12 18:42:03 +02004674 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004675}
4676
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004677/* shutw() called by the conn_stream (mux_ops.shutw) */
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004678static void h2_shutw(struct conn_stream *cs, enum cs_shw_mode mode)
4679{
4680 struct h2s *h2s = cs->ctx;
4681
Willy Tarreau7838a792019-08-12 18:42:03 +02004682 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Willy Tarreau3cf69fe2019-05-14 10:44:40 +02004683 if (cs->flags & CS_FL_KILL_CONN)
4684 h2s->flags |= H2_SF_KILL_CONN;
4685
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004686 h2_do_shutw(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004687 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004688}
4689
Christopher Faulet9b79a102019-07-15 11:22:56 +02004690/* Decode the payload of a HEADERS frame and produce the HTX request or response
4691 * depending on the connection's side. Returns a positive value on success, a
4692 * negative value on failure, or 0 if it couldn't proceed. May report connection
4693 * errors in h2c->errcode if the frame is non-decodable and the connection
4694 * unrecoverable. In absence of connection error when a failure is reported, the
4695 * caller must assume a stream error.
Willy Tarreauea18f862018-12-22 20:19:26 +01004696 *
4697 * The function may fold CONTINUATION frames into the initial HEADERS frame
4698 * by removing padding and next frame header, then moving the CONTINUATION
4699 * frame's payload and adjusting h2c->dfl to match the new aggregated frame,
4700 * leaving a hole between the main frame and the beginning of the next one.
4701 * The possibly remaining incomplete or next frame at the end may be moved
4702 * if the aggregated frame is not deleted, in order to fill the hole. Wrapped
4703 * HEADERS frames are unwrapped into a temporary buffer before decoding.
4704 *
4705 * A buffer at the beginning of processing may look like this :
4706 *
4707 * ,---.---------.-----.--------------.--------------.------.---.
4708 * |///| HEADERS | PAD | CONTINUATION | CONTINUATION | DATA |///|
4709 * `---^---------^-----^--------------^--------------^------^---'
4710 * | | <-----> | |
4711 * area | dpl | wrap
4712 * |<--------------> |
4713 * | dfl |
4714 * |<-------------------------------------------------->|
4715 * head data
4716 *
4717 * Padding is automatically overwritten when folding, participating to the
4718 * hole size after dfl :
4719 *
4720 * ,---.------------------------.-----.--------------.------.---.
4721 * |///| HEADERS : CONTINUATION |/////| CONTINUATION | DATA |///|
4722 * `---^------------------------^-----^--------------^------^---'
4723 * | | <-----> | |
4724 * area | hole | wrap
4725 * |<-----------------------> |
4726 * | dfl |
4727 * |<-------------------------------------------------->|
4728 * head data
4729 *
4730 * Please note that the HEADERS frame is always deprived from its PADLEN byte
4731 * however it may start with the 5 stream-dep+weight bytes in case of PRIORITY
4732 * bit.
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004733 *
4734 * The <flags> field must point to either the stream's flags or to a copy of it
4735 * so that the function can update the following flags :
4736 * - H2_SF_DATA_CLEN when content-length is seen
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004737 * - H2_SF_HEADERS_RCVD once the frame is successfully decoded
Willy Tarreau88d138e2019-01-02 19:38:14 +01004738 *
4739 * The H2_SF_HEADERS_RCVD flag is also looked at in the <flags> field prior to
4740 * decoding, in order to detect if we're dealing with a headers or a trailers
4741 * block (the trailers block appears after H2_SF_HEADERS_RCVD was seen).
Willy Tarreau13278b42017-10-13 19:23:14 +02004742 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01004743static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol)
Willy Tarreau13278b42017-10-13 19:23:14 +02004744{
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004745 const uint8_t *hdrs = (uint8_t *)b_head(&h2c->dbuf);
Willy Tarreau83061a82018-07-13 11:56:34 +02004746 struct buffer *tmp = get_trash_chunk();
Christopher Faulete4ab11b2019-06-11 15:05:37 +02004747 struct http_hdr list[global.tune.max_http_hdr * 2];
Willy Tarreau83061a82018-07-13 11:56:34 +02004748 struct buffer *copy = NULL;
Willy Tarreau174b06a2018-04-25 18:13:58 +02004749 unsigned int msgf;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004750 struct htx *htx = NULL;
Willy Tarreauea18f862018-12-22 20:19:26 +01004751 int flen; // header frame len
4752 int hole = 0;
Willy Tarreau86277d42019-01-02 15:36:11 +01004753 int ret = 0;
4754 int outlen;
Willy Tarreau13278b42017-10-13 19:23:14 +02004755 int wrap;
Willy Tarreau13278b42017-10-13 19:23:14 +02004756
Willy Tarreau7838a792019-08-12 18:42:03 +02004757 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
4758
Willy Tarreauea18f862018-12-22 20:19:26 +01004759next_frame:
4760 if (b_data(&h2c->dbuf) - hole < h2c->dfl)
4761 goto leave; // incomplete input frame
4762
4763 /* No END_HEADERS means there's one or more CONTINUATION frames. In
4764 * this case, we'll try to paste it immediately after the initial
4765 * HEADERS frame payload and kill any possible padding. The initial
4766 * frame's length will be increased to represent the concatenation
4767 * of the two frames. The next frame is read from position <tlen>
4768 * and written at position <flen> (minus padding if some is present).
4769 */
4770 if (unlikely(!(h2c->dff & H2_F_HEADERS_END_HEADERS))) {
4771 struct h2_fh hdr;
4772 int clen; // CONTINUATION frame's payload length
4773
Willy Tarreau7838a792019-08-12 18:42:03 +02004774 TRACE_STATE("EH missing, expecting continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004775 if (!h2_peek_frame_hdr(&h2c->dbuf, h2c->dfl + hole, &hdr)) {
4776 /* no more data, the buffer may be full, either due to
4777 * too large a frame or because of too large a hole that
4778 * we're going to compact at the end.
4779 */
4780 goto leave;
4781 }
4782
4783 if (hdr.ft != H2_FT_CONTINUATION) {
4784 /* RFC7540#6.10: frame of unexpected type */
Willy Tarreau7838a792019-08-12 18:42:03 +02004785 TRACE_STATE("not continuation!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004786 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004787 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004788 goto fail;
4789 }
4790
4791 if (hdr.sid != h2c->dsi) {
4792 /* RFC7540#6.10: frame of different stream */
Willy Tarreau7838a792019-08-12 18:42:03 +02004793 TRACE_STATE("different stream ID!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004794 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004795 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004796 goto fail;
4797 }
4798
4799 if ((unsigned)hdr.len > (unsigned)global.tune.bufsize) {
4800 /* RFC7540#4.2: invalid frame length */
Willy Tarreau7838a792019-08-12 18:42:03 +02004801 TRACE_STATE("too large frame!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004802 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4803 goto fail;
4804 }
4805
4806 /* detect when we must stop aggragating frames */
4807 h2c->dff |= hdr.ff & H2_F_HEADERS_END_HEADERS;
4808
4809 /* Take as much as we can of the CONTINUATION frame's payload */
4810 clen = b_data(&h2c->dbuf) - (h2c->dfl + hole + 9);
4811 if (clen > hdr.len)
4812 clen = hdr.len;
4813
4814 /* Move the frame's payload over the padding, hole and frame
4815 * header. At least one of hole or dpl is null (see diagrams
4816 * above). The hole moves after the new aggragated frame.
4817 */
4818 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole + 9), clen, -(h2c->dpl + hole + 9));
Christopher Fauletcb1847c2021-04-21 11:11:21 +02004819 h2c->dfl += hdr.len - h2c->dpl;
Willy Tarreauea18f862018-12-22 20:19:26 +01004820 hole += h2c->dpl + 9;
4821 h2c->dpl = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02004822 TRACE_STATE("waiting for next continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_CONT|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004823 goto next_frame;
4824 }
4825
4826 flen = h2c->dfl - h2c->dpl;
Willy Tarreau68472622017-12-11 18:36:37 +01004827
Willy Tarreau13278b42017-10-13 19:23:14 +02004828 /* if the input buffer wraps, take a temporary copy of it (rare) */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004829 wrap = b_wrap(&h2c->dbuf) - b_head(&h2c->dbuf);
Willy Tarreau13278b42017-10-13 19:23:14 +02004830 if (wrap < h2c->dfl) {
Willy Tarreau68dd9852017-07-03 14:44:26 +02004831 copy = alloc_trash_chunk();
4832 if (!copy) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004833 TRACE_DEVEL("failed to allocate temporary buffer", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR, h2c->conn);
Willy Tarreau68dd9852017-07-03 14:44:26 +02004834 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
4835 goto fail;
4836 }
Willy Tarreau843b7cb2018-07-13 10:54:26 +02004837 memcpy(copy->area, b_head(&h2c->dbuf), wrap);
4838 memcpy(copy->area + wrap, b_orig(&h2c->dbuf), h2c->dfl - wrap);
4839 hdrs = (uint8_t *) copy->area;
Willy Tarreau13278b42017-10-13 19:23:14 +02004840 }
4841
Willy Tarreau13278b42017-10-13 19:23:14 +02004842 /* Skip StreamDep and weight for now (we don't support PRIORITY) */
4843 if (h2c->dff & H2_F_HEADERS_PRIORITY) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004844 if (read_n32(hdrs) == h2c->dsi) {
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004845 /* RFC7540#5.3.1 : stream dep may not depend on itself */
Willy Tarreau7838a792019-08-12 18:42:03 +02004846 TRACE_STATE("invalid stream dependency!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004847 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004848 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreaua0d11b62018-09-05 18:30:05 +02004849 goto fail;
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004850 }
4851
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004852 if (flen < 5) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004853 TRACE_STATE("frame too short for priority!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004854 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4855 goto fail;
4856 }
4857
Willy Tarreau13278b42017-10-13 19:23:14 +02004858 hdrs += 5; // stream dep = 4, weight = 1
4859 flen -= 5;
4860 }
4861
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004862 if (!h2_get_buf(h2c, rxbuf)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004863 TRACE_STATE("waiting for h2c rxbuf allocation", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau937f7602018-02-26 15:22:17 +01004864 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau86277d42019-01-02 15:36:11 +01004865 goto leave;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004866 }
Willy Tarreau13278b42017-10-13 19:23:14 +02004867
Willy Tarreau937f7602018-02-26 15:22:17 +01004868 /* we can't retry a failed decompression operation so we must be very
4869 * careful not to take any risks. In practice the output buffer is
4870 * always empty except maybe for trailers, in which case we simply have
4871 * to wait for the upper layer to finish consuming what is available.
4872 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004873 htx = htx_from_buf(rxbuf);
4874 if (!htx_is_empty(htx)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004875 TRACE_STATE("waiting for room in h2c rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004876 h2c->flags |= H2_CF_DEM_SFULL;
4877 goto leave;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004878 }
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004879
Willy Tarreau25919232019-01-03 14:48:18 +01004880 /* past this point we cannot roll back in case of error */
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004881 outlen = hpack_decode_frame(h2c->ddht, hdrs, flen, list,
4882 sizeof(list)/sizeof(list[0]), tmp);
4883 if (outlen < 0) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004884 TRACE_STATE("failed to decompress HPACK", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004885 h2c_error(h2c, H2_ERR_COMPRESSION_ERROR);
4886 goto fail;
4887 }
4888
Willy Tarreau25919232019-01-03 14:48:18 +01004889 /* The PACK decompressor was updated, let's update the input buffer and
4890 * the parser's state to commit these changes and allow us to later
4891 * fail solely on the stream if needed.
4892 */
4893 b_del(&h2c->dbuf, h2c->dfl + hole);
4894 h2c->dfl = hole = 0;
4895 h2c->st0 = H2_CS_FRAME_H;
4896
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004897 /* OK now we have our header list in <list> */
Willy Tarreau880f5802019-01-03 08:10:14 +01004898 msgf = (h2c->dff & H2_F_HEADERS_END_STREAM) ? 0 : H2_MSGF_BODY;
Christopher Fauletd0db4232021-01-22 11:46:30 +01004899 msgf |= (*flags & H2_SF_BODY_TUNNEL) ? H2_MSGF_BODY_TUNNEL: 0;
Amaury Denoyelle74162742020-12-11 17:53:05 +01004900 /* If an Extended CONNECT has been sent on this stream, set message flag
Ilya Shipitsinacf84592021-02-06 22:29:08 +05004901 * to convert 200 response to 101 htx response */
Amaury Denoyelle74162742020-12-11 17:53:05 +01004902 msgf |= (*flags & H2_SF_EXT_CONNECT_SENT) ? H2_MSGF_EXT_CONNECT: 0;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004903
Willy Tarreau88d138e2019-01-02 19:38:14 +01004904 if (*flags & H2_SF_HEADERS_RCVD)
4905 goto trailers;
4906
4907 /* This is the first HEADERS frame so it's a headers block */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004908 if (h2c->flags & H2_CF_IS_BACK)
Amaury Denoyelle74162742020-12-11 17:53:05 +01004909 outlen = h2_make_htx_response(list, htx, &msgf, body_len, upgrade_protocol);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004910 else
4911 outlen = h2_make_htx_request(list, htx, &msgf, body_len);
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004912
Christopher Faulet3d875582021-04-26 17:46:13 +02004913 if (outlen < 0 || htx_free_space(htx) < global.tune.maxrewrite) {
Willy Tarreau25919232019-01-03 14:48:18 +01004914 /* too large headers? this is a stream error only */
Christopher Faulet3d875582021-04-26 17:46:13 +02004915 TRACE_STATE("message headers too large", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR|H2_EV_PROTO_ERR, h2c->conn);
4916 htx->flags |= HTX_FL_PARSING_ERROR;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004917 goto fail;
4918 }
Willy Tarreau13278b42017-10-13 19:23:14 +02004919
Willy Tarreau174b06a2018-04-25 18:13:58 +02004920 if (msgf & H2_MSGF_BODY) {
4921 /* a payload is present */
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01004922 if (msgf & H2_MSGF_BODY_CL) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004923 *flags |= H2_SF_DATA_CLEN;
Christopher Faulet9b79a102019-07-15 11:22:56 +02004924 htx->extra = *body_len;
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01004925 }
Willy Tarreau174b06a2018-04-25 18:13:58 +02004926 }
Christopher Faulet7d247f02020-12-02 14:26:36 +01004927 if (msgf & H2_MSGF_BODYLESS_RSP)
4928 *flags |= H2_SF_BODYLESS_RESP;
Willy Tarreau174b06a2018-04-25 18:13:58 +02004929
Christopher Fauletd0db4232021-01-22 11:46:30 +01004930 if (msgf & H2_MSGF_BODY_TUNNEL)
4931 *flags |= H2_SF_BODY_TUNNEL;
4932 else {
4933 /* Abort the tunnel attempt, if any */
4934 if (*flags & H2_SF_BODY_TUNNEL)
4935 *flags |= H2_SF_TUNNEL_ABRT;
4936 *flags &= ~H2_SF_BODY_TUNNEL;
4937 }
4938
Willy Tarreau88d138e2019-01-02 19:38:14 +01004939 done:
Christopher Faulet0b465482019-02-19 15:14:23 +01004940 /* indicate that a HEADERS frame was received for this stream, except
4941 * for 1xx responses. For 1xx responses, another HEADERS frame is
4942 * expected.
4943 */
4944 if (!(msgf & H2_MSGF_RSP_1XX))
4945 *flags |= H2_SF_HEADERS_RCVD;
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004946
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01004947 if (h2c->dff & H2_F_HEADERS_END_STREAM) {
4948 /* no more data are expected for this message */
4949 htx->flags |= HTX_FL_EOM;
Willy Tarreau88d138e2019-01-02 19:38:14 +01004950 }
Willy Tarreau937f7602018-02-26 15:22:17 +01004951
Amaury Denoyelleefe22762020-12-11 17:53:08 +01004952 if (msgf & H2_MSGF_EXT_CONNECT)
4953 *flags |= H2_SF_EXT_CONNECT_RCVD;
4954
Willy Tarreau86277d42019-01-02 15:36:11 +01004955 /* success */
4956 ret = 1;
4957
Willy Tarreau68dd9852017-07-03 14:44:26 +02004958 leave:
Willy Tarreau86277d42019-01-02 15:36:11 +01004959 /* If there is a hole left and it's not at the end, we are forced to
Willy Tarreauea18f862018-12-22 20:19:26 +01004960 * move the remaining data over it.
4961 */
4962 if (hole) {
4963 if (b_data(&h2c->dbuf) > h2c->dfl + hole)
4964 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole),
4965 b_data(&h2c->dbuf) - (h2c->dfl + hole), -hole);
4966 b_sub(&h2c->dbuf, hole);
4967 }
4968
Christopher Faulet07f88d72021-04-21 10:39:53 +02004969 if (b_full(&h2c->dbuf) && h2c->dfl) {
Willy Tarreauea18f862018-12-22 20:19:26 +01004970 /* too large frames */
4971 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau86277d42019-01-02 15:36:11 +01004972 ret = -1;
Willy Tarreauea18f862018-12-22 20:19:26 +01004973 }
4974
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01004975 if (htx)
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004976 htx_to_buf(htx, rxbuf);
Willy Tarreau68dd9852017-07-03 14:44:26 +02004977 free_trash_chunk(copy);
Willy Tarreau7838a792019-08-12 18:42:03 +02004978 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau86277d42019-01-02 15:36:11 +01004979 return ret;
4980
Willy Tarreau68dd9852017-07-03 14:44:26 +02004981 fail:
Willy Tarreau86277d42019-01-02 15:36:11 +01004982 ret = -1;
Willy Tarreau68dd9852017-07-03 14:44:26 +02004983 goto leave;
Willy Tarreau88d138e2019-01-02 19:38:14 +01004984
4985 trailers:
4986 /* This is the last HEADERS frame hence a trailer */
Willy Tarreau88d138e2019-01-02 19:38:14 +01004987 if (!(h2c->dff & H2_F_HEADERS_END_STREAM)) {
4988 /* It's a trailer but it's missing ES flag */
Willy Tarreau7838a792019-08-12 18:42:03 +02004989 TRACE_STATE("missing EH on trailers frame", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau88d138e2019-01-02 19:38:14 +01004990 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004991 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau88d138e2019-01-02 19:38:14 +01004992 goto fail;
4993 }
4994
Christopher Faulet9b79a102019-07-15 11:22:56 +02004995 /* Trailers terminate a DATA sequence */
Willy Tarreau7838a792019-08-12 18:42:03 +02004996 if (h2_make_htx_trailers(list, htx) <= 0) {
4997 TRACE_STATE("failed to append HTX trailers into rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004998 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02004999 }
Willy Tarreau88d138e2019-01-02 19:38:14 +01005000 goto done;
Willy Tarreau13278b42017-10-13 19:23:14 +02005001}
5002
Christopher Faulet9b79a102019-07-15 11:22:56 +02005003/* Transfer the payload of a DATA frame to the HTTP/1 side. The HTTP/2 frame
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005004 * parser state is automatically updated. Returns > 0 if it could completely
5005 * send the current frame, 0 if it couldn't complete, in which case
5006 * CS_FL_RCV_MORE must be checked to know if some data remain pending (an empty
5007 * DATA frame can return 0 as a valid result). Stream errors are reported in
5008 * h2s->errcode and connection errors in h2c->errcode. The caller must already
5009 * have checked the frame header and ensured that the frame was complete or the
5010 * buffer full. It changes the frame state to FRAME_A once done.
Willy Tarreau454f9052017-10-26 19:40:35 +02005011 */
Willy Tarreau454b57b2018-02-26 15:50:05 +01005012static int h2_frt_transfer_data(struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02005013{
5014 struct h2c *h2c = h2s->h2c;
Christopher Faulet9b79a102019-07-15 11:22:56 +02005015 int block;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005016 unsigned int flen = 0;
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005017 struct htx *htx = NULL;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005018 struct buffer *csbuf;
Christopher Faulet9b79a102019-07-15 11:22:56 +02005019 unsigned int sent;
Willy Tarreau454f9052017-10-26 19:40:35 +02005020
Willy Tarreau7838a792019-08-12 18:42:03 +02005021 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
5022
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005023 h2c->flags &= ~H2_CF_DEM_SFULL;
Willy Tarreau454f9052017-10-26 19:40:35 +02005024
Olivier Houchard638b7992018-08-16 15:41:52 +02005025 csbuf = h2_get_buf(h2c, &h2s->rxbuf);
Willy Tarreaud755ea62018-02-26 15:44:54 +01005026 if (!csbuf) {
5027 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02005028 TRACE_STATE("waiting for an h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005029 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005030 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02005031 htx = htx_from_buf(csbuf);
Willy Tarreaud755ea62018-02-26 15:44:54 +01005032
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005033try_again:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005034 flen = h2c->dfl - h2c->dpl;
5035 if (!flen)
Willy Tarreau4a28da12018-01-04 14:41:00 +01005036 goto end_transfer;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005037
Willy Tarreauc9fa0482018-07-10 17:43:27 +02005038 if (flen > b_data(&h2c->dbuf)) {
5039 flen = b_data(&h2c->dbuf);
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005040 if (!flen)
Willy Tarreau454b57b2018-02-26 15:50:05 +01005041 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005042 }
5043
Christopher Faulet9b79a102019-07-15 11:22:56 +02005044 block = htx_free_data_space(htx);
5045 if (!block) {
5046 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02005047 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Christopher Faulet9b79a102019-07-15 11:22:56 +02005048 goto fail;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005049 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02005050 if (flen > block)
5051 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005052
Christopher Faulet9b79a102019-07-15 11:22:56 +02005053 /* here, flen is the max we can copy into the output buffer */
5054 block = b_contig_data(&h2c->dbuf, 0);
5055 if (flen > block)
5056 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005057
Christopher Faulet9b79a102019-07-15 11:22:56 +02005058 sent = htx_add_data(htx, ist2(b_head(&h2c->dbuf), flen));
Willy Tarreau022e5e52020-09-10 09:33:15 +02005059 TRACE_DATA("move some data to h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s, 0, (void *)(long)sent);
Willy Tarreau454f9052017-10-26 19:40:35 +02005060
Christopher Faulet9b79a102019-07-15 11:22:56 +02005061 b_del(&h2c->dbuf, sent);
5062 h2c->dfl -= sent;
5063 h2c->rcvd_c += sent;
5064 h2c->rcvd_s += sent; // warning, this can also affect the closed streams!
Willy Tarreau454f9052017-10-26 19:40:35 +02005065
Christopher Faulet9b79a102019-07-15 11:22:56 +02005066 if (h2s->flags & H2_SF_DATA_CLEN) {
5067 h2s->body_len -= sent;
5068 htx->extra = h2s->body_len;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005069 }
5070
Christopher Faulet9b79a102019-07-15 11:22:56 +02005071 if (sent < flen) {
Willy Tarreaud755ea62018-02-26 15:44:54 +01005072 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02005073 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005074 goto fail;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005075 }
5076
Christopher Faulet9b79a102019-07-15 11:22:56 +02005077 goto try_again;
5078
Willy Tarreau4a28da12018-01-04 14:41:00 +01005079 end_transfer:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005080 /* here we're done with the frame, all the payload (except padding) was
5081 * transferred.
5082 */
Willy Tarreaueba10f22018-04-25 20:44:22 +02005083
Christopher Faulet5be651d2021-01-22 15:28:03 +01005084 if (!(h2s->flags & H2_SF_BODY_TUNNEL) && (h2c->dff & H2_F_DATA_END_STREAM)) {
5085 /* no more data are expected for this message. This add the EOM
5086 * flag but only on the response path or if no tunnel attempt
5087 * was aborted. Otherwise (request path + tunnel abrted), the
5088 * EOM was already reported.
5089 */
Christopher Faulet33724322021-02-10 09:04:59 +01005090 if ((h2c->flags & H2_CF_IS_BACK) || !(h2s->flags & H2_SF_TUNNEL_ABRT)) {
5091 /* If we receive an empty DATA frame with ES flag while the HTX
5092 * message is empty, we must be sure to push a block to be sure
5093 * the HTX EOM flag will be handled on the other side. It is a
5094 * workaround because for now it is not possible to push empty
5095 * HTX DATA block. And without this block, there is no way to
5096 * "commit" the end of the message.
5097 */
5098 if (htx_is_empty(htx)) {
5099 if (!htx_add_endof(htx, HTX_BLK_EOT))
5100 goto fail;
5101 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005102 htx->flags |= HTX_FL_EOM;
Christopher Faulet33724322021-02-10 09:04:59 +01005103 }
Willy Tarreaueba10f22018-04-25 20:44:22 +02005104 }
5105
Willy Tarreaud1023bb2018-03-22 16:53:12 +01005106 h2c->rcvd_c += h2c->dpl;
5107 h2c->rcvd_s += h2c->dpl;
5108 h2c->dpl = 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02005109 h2c->st0 = H2_CS_FRAME_A; // send the corresponding window update
Christopher Faulet9b79a102019-07-15 11:22:56 +02005110 htx_to_buf(htx, csbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02005111 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005112 return 1;
Willy Tarreau454b57b2018-02-26 15:50:05 +01005113 fail:
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01005114 if (htx)
5115 htx_to_buf(htx, csbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02005116 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005117 return 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02005118}
5119
Willy Tarreau115e83b2018-12-01 19:17:53 +01005120/* Try to send a HEADERS frame matching HTX response present in HTX message
5121 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
5122 * must check the stream's status to detect any error which might have happened
5123 * subsequently to a successful send. The htx blocks are automatically removed
5124 * from the message. The htx message is assumed to be valid since produced from
5125 * the internal code, hence it contains a start line, an optional series of
5126 * header blocks and an end of header, otherwise an invalid frame could be
5127 * emitted and the resulting htx message could be left in an inconsistent state.
5128 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02005129static size_t h2s_frt_make_resp_headers(struct h2s *h2s, struct htx *htx)
Willy Tarreau115e83b2018-12-01 19:17:53 +01005130{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02005131 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau115e83b2018-12-01 19:17:53 +01005132 struct h2c *h2c = h2s->h2c;
5133 struct htx_blk *blk;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005134 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005135 struct buffer *mbuf;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005136 struct htx_sl *sl;
5137 enum htx_blk_type type;
5138 int es_now = 0;
5139 int ret = 0;
5140 int hdr;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005141
Willy Tarreau7838a792019-08-12 18:42:03 +02005142 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5143
Willy Tarreau115e83b2018-12-01 19:17:53 +01005144 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005145 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005146 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02005147 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005148 return 0;
5149 }
5150
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005151 /* get the start line (we do have one) and the rest of the headers,
5152 * that we dump starting at header 0 */
5153 sl = NULL;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005154 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005155 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau115e83b2018-12-01 19:17:53 +01005156 type = htx_get_blk_type(blk);
5157
5158 if (type == HTX_BLK_UNUSED)
5159 continue;
5160
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005161 if (type == HTX_BLK_EOH)
Willy Tarreau115e83b2018-12-01 19:17:53 +01005162 break;
5163
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005164 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005165 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005166 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5167 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5168 goto fail;
5169 }
5170
5171 list[hdr].n = htx_get_blk_name(htx, blk);
5172 list[hdr].v = htx_get_blk_value(htx, blk);
5173 hdr++;
5174 }
5175 else if (type == HTX_BLK_RES_SL) {
Christopher Faulet56498132021-01-29 11:39:43 +01005176 BUG_ON(sl); /* Only one start-line expected */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005177 sl = htx_get_blk_ptr(htx, blk);
5178 h2s->status = sl->info.res.status;
Christopher Faulet7d247f02020-12-02 14:26:36 +01005179 if (h2s->status == 204 || h2s->status == 304)
5180 h2s->flags |= H2_SF_BODYLESS_RESP;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005181 if (h2s->status < 100 || h2s->status > 999) {
5182 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5183 goto fail;
5184 }
5185 else if (h2s->status == 101) {
Amaury Denoyelleefe22762020-12-11 17:53:08 +01005186 if (unlikely(h2s->flags & H2_SF_EXT_CONNECT_RCVD)) {
5187 /* If an Extended CONNECT has been received, we need to convert 101 to 200 */
5188 h2s->status = 200;
5189 h2s->flags &= ~H2_SF_EXT_CONNECT_RCVD;
5190 }
5191 else {
5192 /* Otherwise, 101 responses are not supported in H2, so return a error (RFC7540#8.1.1) */
5193 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5194 goto fail;
5195 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005196 }
5197 else if ((h2s->flags & H2_SF_BODY_TUNNEL) && h2s->status >= 300) {
5198 /* Abort the tunnel attempt */
5199 h2s->flags &= ~H2_SF_BODY_TUNNEL;
5200 h2s->flags |= H2_SF_TUNNEL_ABRT;
5201 }
5202 }
5203 else {
5204 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005205 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02005206 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005207 }
5208
Christopher Faulet56498132021-01-29 11:39:43 +01005209 /* The start-line me be defined */
5210 BUG_ON(!sl);
5211
Willy Tarreau115e83b2018-12-01 19:17:53 +01005212 /* marker for end of headers */
5213 list[hdr].n = ist("");
5214
Willy Tarreau9c218e72019-05-26 10:08:28 +02005215 mbuf = br_tail(h2c->mbuf);
5216 retry:
5217 if (!h2_get_buf(h2c, mbuf)) {
5218 h2c->flags |= H2_CF_MUX_MALLOC;
5219 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005220 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005221 return 0;
5222 }
5223
Willy Tarreau115e83b2018-12-01 19:17:53 +01005224 chunk_reset(&outbuf);
5225
5226 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005227 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5228 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005229 break;
5230 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005231 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau115e83b2018-12-01 19:17:53 +01005232 }
5233
5234 if (outbuf.size < 9)
5235 goto full;
5236
5237 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5238 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5239 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5240 outbuf.data = 9;
5241
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005242 if ((h2c->flags & (H2_CF_SHTS_UPDATED|H2_CF_DTSU_EMITTED)) == H2_CF_SHTS_UPDATED) {
5243 /* SETTINGS_HEADER_TABLE_SIZE changed, we must send an HPACK
5244 * dynamic table size update so that some clients are not
5245 * confused. In practice we only need to send the DTSU when the
5246 * advertised size is lower than the current one, and since we
5247 * don't use it and don't care about the default 4096 bytes,
5248 * we only ack it with a zero size thus we at most have to deal
5249 * with this once. See RFC7541#4.2 and #6.3 for the spec, and
5250 * below for the whole context and interoperability risks:
5251 * https://lists.w3.org/Archives/Public/ietf-http-wg/2021OctDec/0235.html
5252 */
5253 if (b_room(&outbuf) < 1)
5254 goto full;
5255 outbuf.area[outbuf.data++] = 0x20; // HPACK DTSU 0 bytes
5256
5257 /* let's not update the flags now but only once the buffer is
5258 * really committed.
5259 */
5260 }
5261
Willy Tarreau115e83b2018-12-01 19:17:53 +01005262 /* encode status, which necessarily is the first one */
Willy Tarreauaafdf582018-12-10 18:06:40 +01005263 if (!hpack_encode_int_status(&outbuf, h2s->status)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005264 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005265 goto realign_again;
5266 goto full;
5267 }
5268
5269 /* encode all headers, stop at empty name */
5270 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
5271 /* these ones do not exist in H2 and must be dropped. */
5272 if (isteq(list[hdr].n, ist("connection")) ||
5273 isteq(list[hdr].n, ist("proxy-connection")) ||
5274 isteq(list[hdr].n, ist("keep-alive")) ||
5275 isteq(list[hdr].n, ist("upgrade")) ||
5276 isteq(list[hdr].n, ist("transfer-encoding")))
5277 continue;
5278
Christopher Faulet86d144c2019-08-14 16:32:25 +02005279 /* Skip all pseudo-headers */
5280 if (*(list[hdr].n.ptr) == ':')
5281 continue;
5282
Willy Tarreau115e83b2018-12-01 19:17:53 +01005283 if (isteq(list[hdr].n, ist("")))
5284 break; // end
5285
5286 if (!hpack_encode_header(&outbuf, list[hdr].n, list[hdr].v)) {
5287 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005288 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005289 goto realign_again;
5290 goto full;
5291 }
5292 }
5293
Willy Tarreaucb985a42019-10-07 16:56:34 +02005294 /* update the frame's size */
5295 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5296
5297 if (outbuf.data > h2c->mfs + 9) {
5298 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5299 /* output full */
5300 if (b_space_wraps(mbuf))
5301 goto realign_again;
5302 goto full;
5303 }
5304 }
5305
Willy Tarreau3a537072021-06-17 08:40:04 +02005306 TRACE_USER("sent H2 response ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5307
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005308 /* remove all header blocks including the EOH and compute the
5309 * corresponding size.
Willy Tarreau115e83b2018-12-01 19:17:53 +01005310 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005311 ret = 0;
5312 blk = htx_get_head_blk(htx);
5313 while (blk) {
5314 type = htx_get_blk_type(blk);
5315 ret += htx_get_blksz(blk);
5316 blk = htx_remove_blk(htx, blk);
5317 /* The removed block is the EOH */
5318 if (type == HTX_BLK_EOH)
5319 break;
Christopher Faulet5be651d2021-01-22 15:28:03 +01005320 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005321
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005322 if (!h2s->cs || h2s->cs->flags & CS_FL_SHW) {
5323 /* Response already closed: add END_STREAM */
5324 es_now = 1;
5325 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005326 else if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx) && h2s->status >= 200) {
5327 /* EOM+empty: we may need to add END_STREAM except for 1xx
Christopher Faulet991febd2020-12-02 15:17:31 +01005328 * responses and tunneled response.
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005329 */
Christopher Faulet991febd2020-12-02 15:17:31 +01005330 if (!(h2s->flags & H2_SF_BODY_TUNNEL) || h2s->status >= 300)
5331 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005332 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005333
Willy Tarreau115e83b2018-12-01 19:17:53 +01005334 if (es_now)
5335 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5336
5337 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005338 b_add(mbuf, outbuf.data);
Christopher Faulet0b465482019-02-19 15:14:23 +01005339
5340 /* indicates the HEADERS frame was sent, except for 1xx responses. For
5341 * 1xx responses, another HEADERS frame is expected.
5342 */
Christopher Faulet89899422020-12-07 18:24:43 +01005343 if (h2s->status >= 200)
Christopher Faulet0b465482019-02-19 15:14:23 +01005344 h2s->flags |= H2_SF_HEADERS_SENT;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005345
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005346 if (h2c->flags & H2_CF_SHTS_UPDATED) {
5347 /* was sent above */
5348 h2c->flags |= H2_CF_DTSU_EMITTED;
Willy Tarreauc7d85482022-02-16 14:28:14 +01005349 h2c->flags &= ~H2_CF_SHTS_UPDATED;
Willy Tarreau39a0a1e2022-01-13 16:00:12 +01005350 }
5351
Willy Tarreau115e83b2018-12-01 19:17:53 +01005352 if (es_now) {
5353 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02005354 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005355 if (h2s->st == H2_SS_OPEN)
5356 h2s->st = H2_SS_HLOC;
5357 else
5358 h2s_close(h2s);
5359 }
5360
5361 /* OK we could properly deliver the response */
Willy Tarreau115e83b2018-12-01 19:17:53 +01005362 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02005363 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005364 return ret;
5365 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005366 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5367 goto retry;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005368 h2c->flags |= H2_CF_MUX_MFULL;
5369 h2s->flags |= H2_SF_BLK_MROOM;
5370 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005371 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005372 goto end;
5373 fail:
5374 /* unparsable HTX messages, too large ones to be produced in the local
5375 * list etc go here (unrecoverable errors).
5376 */
5377 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5378 ret = 0;
5379 goto end;
5380}
5381
Willy Tarreau80739692018-10-05 11:35:57 +02005382/* Try to send a HEADERS frame matching HTX request present in HTX message
5383 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
5384 * must check the stream's status to detect any error which might have happened
5385 * subsequently to a successful send. The htx blocks are automatically removed
5386 * from the message. The htx message is assumed to be valid since produced from
5387 * the internal code, hence it contains a start line, an optional series of
5388 * header blocks and an end of header, otherwise an invalid frame could be
5389 * emitted and the resulting htx message could be left in an inconsistent state.
5390 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02005391static size_t h2s_bck_make_req_headers(struct h2s *h2s, struct htx *htx)
Willy Tarreau80739692018-10-05 11:35:57 +02005392{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02005393 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau80739692018-10-05 11:35:57 +02005394 struct h2c *h2c = h2s->h2c;
5395 struct htx_blk *blk;
Willy Tarreau80739692018-10-05 11:35:57 +02005396 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005397 struct buffer *mbuf;
Willy Tarreau80739692018-10-05 11:35:57 +02005398 struct htx_sl *sl;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005399 struct ist meth, uri, auth, host = IST_NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005400 enum htx_blk_type type;
5401 int es_now = 0;
5402 int ret = 0;
5403 int hdr;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005404 int extended_connect = 0;
Willy Tarreau80739692018-10-05 11:35:57 +02005405
Willy Tarreau7838a792019-08-12 18:42:03 +02005406 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5407
Willy Tarreau80739692018-10-05 11:35:57 +02005408 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005409 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005410 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02005411 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005412 return 0;
5413 }
5414
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005415 /* get the start line (we do have one) and the rest of the headers,
5416 * that we dump starting at header 0 */
5417 sl = NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005418 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005419 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005420 type = htx_get_blk_type(blk);
5421
5422 if (type == HTX_BLK_UNUSED)
5423 continue;
5424
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005425 if (type == HTX_BLK_EOH)
Willy Tarreau80739692018-10-05 11:35:57 +02005426 break;
5427
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005428 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005429 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005430 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5431 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5432 goto fail;
5433 }
Willy Tarreau80739692018-10-05 11:35:57 +02005434
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005435 list[hdr].n = htx_get_blk_name(htx, blk);
5436 list[hdr].v = htx_get_blk_value(htx, blk);
Christopher Faulet67d58092019-10-02 10:51:38 +02005437
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005438 /* Skip header if same name is used to add the server name */
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005439 if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name) &&
5440 isteq(list[hdr].n, h2c->proxy->server_id_hdr_name))
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005441 continue;
Christopher Faulet67d58092019-10-02 10:51:38 +02005442
Ilya Shipitsinacf84592021-02-06 22:29:08 +05005443 /* Convert connection: upgrade to Extended connect from rfc 8441 */
Christopher Faulet52a5ec22021-09-09 09:52:51 +02005444 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteqi(list[hdr].n, ist("connection"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005445 /* rfc 7230 #6.1 Connection = list of tokens */
5446 struct ist connection_ist = list[hdr].v;
5447 do {
5448 if (isteqi(iststop(connection_ist, ','),
5449 ist("upgrade"))) {
Amaury Denoyelle0df04362021-10-18 09:43:29 +02005450 if (!(h2c->flags & H2_CF_RCVD_RFC8441)) {
5451 TRACE_STATE("reject upgrade because of no RFC8441 support", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5452 goto fail;
5453 }
5454
Amaury Denoyellee0c258c2021-10-18 10:05:16 +02005455 TRACE_STATE("convert upgrade to extended connect method", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005456 h2s->flags |= (H2_SF_BODY_TUNNEL|H2_SF_EXT_CONNECT_SENT);
5457 sl->info.req.meth = HTTP_METH_CONNECT;
5458 meth = ist("CONNECT");
5459
5460 extended_connect = 1;
5461 break;
5462 }
5463
5464 connection_ist = istadv(istfind(connection_ist, ','), 1);
5465 } while (istlen(connection_ist));
5466 }
5467
Christopher Faulet52a5ec22021-09-09 09:52:51 +02005468 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteq(list[hdr].n, ist("upgrade"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005469 /* rfc 7230 #6.7 Upgrade = list of protocols
5470 * rfc 8441 #4 Extended connect = :protocol is single-valued
5471 *
5472 * only first HTTP/1 protocol is preserved
5473 */
5474 const struct ist protocol = iststop(list[hdr].v, ',');
5475 /* upgrade_protocol field is 16 bytes long in h2s */
5476 istpad(h2s->upgrade_protocol, isttrim(protocol, 15));
5477 }
5478
5479 if (isteq(list[hdr].n, ist("host")))
5480 host = list[hdr].v;
5481
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005482 hdr++;
5483 }
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005484 else if (type == HTX_BLK_REQ_SL) {
5485 BUG_ON(sl); /* Only one start-line expected */
5486 sl = htx_get_blk_ptr(htx, blk);
5487 meth = htx_sl_req_meth(sl);
5488 uri = htx_sl_req_uri(sl);
5489 if (sl->info.req.meth == HTTP_METH_HEAD)
5490 h2s->flags |= H2_SF_BODYLESS_RESP;
5491 if (unlikely(uri.len == 0)) {
5492 TRACE_ERROR("no URI in HTX request", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5493 goto fail;
5494 }
5495 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005496 else {
5497 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5498 goto fail;
5499 }
Willy Tarreau80739692018-10-05 11:35:57 +02005500 }
5501
Christopher Faulet56498132021-01-29 11:39:43 +01005502 /* The start-line me be defined */
5503 BUG_ON(!sl);
5504
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005505 /* Now add the server name to a header (if requested) */
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005506 if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name)) {
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005507 struct server *srv = objt_server(h2c->conn->target);
5508
5509 if (srv) {
Tim Duesterhusb4b03772022-03-05 00:52:43 +01005510 list[hdr].n = h2c->proxy->server_id_hdr_name;
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005511 list[hdr].v = ist(srv->id);
5512 hdr++;
5513 }
5514 }
5515
Willy Tarreau80739692018-10-05 11:35:57 +02005516 /* marker for end of headers */
5517 list[hdr].n = ist("");
5518
Willy Tarreau9c218e72019-05-26 10:08:28 +02005519 mbuf = br_tail(h2c->mbuf);
5520 retry:
5521 if (!h2_get_buf(h2c, mbuf)) {
5522 h2c->flags |= H2_CF_MUX_MALLOC;
5523 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005524 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005525 return 0;
5526 }
5527
Willy Tarreau80739692018-10-05 11:35:57 +02005528 chunk_reset(&outbuf);
5529
5530 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005531 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5532 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005533 break;
5534 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005535 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau80739692018-10-05 11:35:57 +02005536 }
5537
5538 if (outbuf.size < 9)
5539 goto full;
5540
5541 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5542 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5543 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5544 outbuf.data = 9;
5545
5546 /* encode the method, which necessarily is the first one */
Willy Tarreaubdabc3a2018-12-10 18:25:11 +01005547 if (!hpack_encode_method(&outbuf, sl->info.req.meth, meth)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005548 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005549 goto realign_again;
5550 goto full;
5551 }
5552
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005553 auth = ist(NULL);
5554
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005555 /* RFC7540 #8.3: the CONNECT method must have :
5556 * - :authority set to the URI part (host:port)
5557 * - :method set to CONNECT
5558 * - :scheme and :path omitted
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005559 *
5560 * Note that this is not applicable in case of the Extended CONNECT
5561 * protocol from rfc 8441.
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005562 */
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005563 if (unlikely(sl->info.req.meth == HTTP_METH_CONNECT) && !extended_connect) {
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005564 auth = uri;
5565
5566 if (!hpack_encode_header(&outbuf, ist(":authority"), auth)) {
5567 /* output full */
5568 if (b_space_wraps(mbuf))
5569 goto realign_again;
5570 goto full;
5571 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005572 h2s->flags |= H2_SF_BODY_TUNNEL;
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005573 } else {
5574 /* other methods need a :scheme. If an authority is known from
5575 * the request line, it must be sent, otherwise only host is
5576 * sent. Host is never sent as the authority.
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005577 *
5578 * This code is also applicable for Extended CONNECT protocol
5579 * from rfc 8441.
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005580 */
5581 struct ist scheme = { };
Christopher Faulet3b44c542019-06-14 10:46:51 +02005582
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005583 if (uri.ptr[0] != '/' && uri.ptr[0] != '*') {
5584 /* the URI seems to start with a scheme */
5585 int len = 1;
5586
5587 while (len < uri.len && uri.ptr[len] != ':')
5588 len++;
5589
5590 if (len + 2 < uri.len && uri.ptr[len + 1] == '/' && uri.ptr[len + 2] == '/') {
5591 /* make the uri start at the authority now */
Tim Duesterhus9f75ed12021-03-02 18:57:26 +01005592 scheme = ist2(uri.ptr, len);
Tim Duesterhus154374c2021-03-02 18:57:27 +01005593 uri = istadv(uri, len + 3);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005594
5595 /* find the auth part of the URI */
Tim Duesterhus92c696e2021-02-28 16:11:36 +01005596 auth = ist2(uri.ptr, 0);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005597 while (auth.len < uri.len && auth.ptr[auth.len] != '/')
5598 auth.len++;
5599
Tim Duesterhus154374c2021-03-02 18:57:27 +01005600 uri = istadv(uri, auth.len);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005601 }
5602 }
5603
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005604 /* For Extended CONNECT, the :authority must be present.
5605 * Use host value for it.
5606 */
5607 if (unlikely(extended_connect) && isttest(host))
5608 auth = host;
5609
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005610 if (!scheme.len) {
5611 /* no explicit scheme, we're using an origin-form URI,
5612 * probably from an H1 request transcoded to H2 via an
5613 * external layer, then received as H2 without authority.
5614 * So we have to look up the scheme from the HTX flags.
5615 * In such a case only http and https are possible, and
5616 * https is the default (sent by browsers).
5617 */
5618 if ((sl->flags & (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP)) == (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP))
5619 scheme = ist("http");
5620 else
5621 scheme = ist("https");
5622 }
Christopher Faulet3b44c542019-06-14 10:46:51 +02005623
5624 if (!hpack_encode_scheme(&outbuf, scheme)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005625 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005626 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005627 goto realign_again;
5628 goto full;
5629 }
Willy Tarreau80739692018-10-05 11:35:57 +02005630
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005631 if (auth.len && !hpack_encode_header(&outbuf, ist(":authority"), auth)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005632 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005633 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005634 goto realign_again;
5635 goto full;
5636 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005637
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005638 /* encode the path. RFC7540#8.1.2.3: if path is empty it must
5639 * be sent as '/' or '*'.
5640 */
5641 if (unlikely(!uri.len)) {
5642 if (sl->info.req.meth == HTTP_METH_OPTIONS)
5643 uri = ist("*");
5644 else
5645 uri = ist("/");
Willy Tarreau053c1572019-02-01 16:13:59 +01005646 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005647
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005648 if (!hpack_encode_path(&outbuf, uri)) {
5649 /* output full */
5650 if (b_space_wraps(mbuf))
5651 goto realign_again;
5652 goto full;
5653 }
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005654
5655 /* encode the pseudo-header protocol from rfc8441 if using
5656 * Extended CONNECT method.
5657 */
5658 if (unlikely(extended_connect)) {
5659 const struct ist protocol = ist(h2s->upgrade_protocol);
5660 if (isttest(protocol)) {
5661 if (!hpack_encode_header(&outbuf,
5662 ist(":protocol"),
5663 protocol)) {
5664 /* output full */
5665 if (b_space_wraps(mbuf))
5666 goto realign_again;
5667 goto full;
5668 }
5669 }
5670 }
Willy Tarreau80739692018-10-05 11:35:57 +02005671 }
5672
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005673 /* encode all headers, stop at empty name. Host is only sent if we
5674 * do not provide an authority.
5675 */
Willy Tarreau80739692018-10-05 11:35:57 +02005676 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005677 struct ist n = list[hdr].n;
5678 struct ist v = list[hdr].v;
5679
Willy Tarreau80739692018-10-05 11:35:57 +02005680 /* these ones do not exist in H2 and must be dropped. */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005681 if (isteq(n, ist("connection")) ||
5682 (auth.len && isteq(n, ist("host"))) ||
5683 isteq(n, ist("proxy-connection")) ||
5684 isteq(n, ist("keep-alive")) ||
5685 isteq(n, ist("upgrade")) ||
5686 isteq(n, ist("transfer-encoding")))
Willy Tarreau80739692018-10-05 11:35:57 +02005687 continue;
5688
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005689 if (isteq(n, ist("te"))) {
5690 /* "te" may only be sent with "trailers" if this value
5691 * is present, otherwise it must be deleted.
5692 */
5693 v = istist(v, ist("trailers"));
Tim Duesterhus7b5777d2021-03-02 18:57:28 +01005694 if (!isttest(v) || (v.len > 8 && v.ptr[8] != ','))
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005695 continue;
5696 v = ist("trailers");
5697 }
5698
Christopher Faulet86d144c2019-08-14 16:32:25 +02005699 /* Skip all pseudo-headers */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005700 if (*(n.ptr) == ':')
Christopher Faulet86d144c2019-08-14 16:32:25 +02005701 continue;
5702
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005703 if (isteq(n, ist("")))
Willy Tarreau80739692018-10-05 11:35:57 +02005704 break; // end
5705
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005706 if (!hpack_encode_header(&outbuf, n, v)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005707 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005708 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005709 goto realign_again;
5710 goto full;
5711 }
5712 }
5713
Willy Tarreaucb985a42019-10-07 16:56:34 +02005714 /* update the frame's size */
5715 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5716
5717 if (outbuf.data > h2c->mfs + 9) {
5718 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5719 /* output full */
5720 if (b_space_wraps(mbuf))
5721 goto realign_again;
5722 goto full;
5723 }
5724 }
5725
Willy Tarreau3a537072021-06-17 08:40:04 +02005726 TRACE_USER("sent H2 request ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5727
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005728 /* remove all header blocks including the EOH and compute the
5729 * corresponding size.
Willy Tarreau80739692018-10-05 11:35:57 +02005730 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005731 ret = 0;
5732 blk = htx_get_head_blk(htx);
5733 while (blk) {
5734 type = htx_get_blk_type(blk);
5735 ret += htx_get_blksz(blk);
5736 blk = htx_remove_blk(htx, blk);
5737 /* The removed block is the EOH */
5738 if (type == HTX_BLK_EOH)
5739 break;
Christopher Fauletd0db4232021-01-22 11:46:30 +01005740 }
Willy Tarreau80739692018-10-05 11:35:57 +02005741
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005742 if (!h2s->cs || h2s->cs->flags & CS_FL_SHW) {
5743 /* Request already closed: add END_STREAM */
Willy Tarreau80739692018-10-05 11:35:57 +02005744 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005745 }
5746 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
5747 /* EOM+empty: we may need to add END_STREAM (except for CONNECT
5748 * request)
5749 */
5750 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5751 es_now = 1;
5752 }
Willy Tarreau80739692018-10-05 11:35:57 +02005753
Willy Tarreau80739692018-10-05 11:35:57 +02005754 if (es_now)
5755 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5756
5757 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005758 b_add(mbuf, outbuf.data);
Willy Tarreau80739692018-10-05 11:35:57 +02005759 h2s->flags |= H2_SF_HEADERS_SENT;
5760 h2s->st = H2_SS_OPEN;
5761
Willy Tarreau80739692018-10-05 11:35:57 +02005762 if (es_now) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005763 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02005764 // trim any possibly pending data (eg: inconsistent content-length)
5765 h2s->flags |= H2_SF_ES_SENT;
5766 h2s->st = H2_SS_HLOC;
5767 }
5768
Willy Tarreau80739692018-10-05 11:35:57 +02005769 end:
5770 return ret;
5771 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005772 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5773 goto retry;
Willy Tarreau80739692018-10-05 11:35:57 +02005774 h2c->flags |= H2_CF_MUX_MFULL;
5775 h2s->flags |= H2_SF_BLK_MROOM;
5776 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005777 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005778 goto end;
5779 fail:
5780 /* unparsable HTX messages, too large ones to be produced in the local
5781 * list etc go here (unrecoverable errors).
5782 */
5783 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5784 ret = 0;
5785 goto end;
5786}
5787
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005788/* Try to send a DATA frame matching HTTP response present in HTX structure
Willy Tarreau98de12a2018-12-12 07:03:00 +01005789 * present in <buf>, for stream <h2s>. Returns the number of bytes sent. The
5790 * caller must check the stream's status to detect any error which might have
5791 * happened subsequently to a successful send. Returns the number of data bytes
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005792 * consumed, or zero if nothing done.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005793 */
Christopher Faulet142854b2020-12-02 15:12:40 +01005794static size_t h2s_make_data(struct h2s *h2s, struct buffer *buf, size_t count)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005795{
5796 struct h2c *h2c = h2s->h2c;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005797 struct htx *htx;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005798 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005799 struct buffer *mbuf;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005800 size_t total = 0;
5801 int es_now = 0;
5802 int bsize; /* htx block size */
5803 int fsize; /* h2 frame size */
5804 struct htx_blk *blk;
5805 enum htx_blk_type type;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01005806 int trunc_out; /* non-zero if truncated on out buf */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005807
Willy Tarreau7838a792019-08-12 18:42:03 +02005808 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5809
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005810 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005811 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005812 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02005813 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005814 goto end;
5815 }
5816
Willy Tarreau98de12a2018-12-12 07:03:00 +01005817 htx = htx_from_buf(buf);
5818
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005819 /* We only come here with HTX_BLK_DATA blocks */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005820
5821 new_frame:
Willy Tarreauee573762018-12-04 15:25:57 +01005822 if (!count || htx_is_empty(htx))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005823 goto end;
5824
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005825 if ((h2c->flags & H2_CF_IS_BACK) &&
Christopher Fauletf95f8762021-01-22 11:59:07 +01005826 (h2s->flags & (H2_SF_HEADERS_RCVD|H2_SF_BODY_TUNNEL)) == H2_SF_BODY_TUNNEL) {
5827 /* The response HEADERS frame not received yet. Thus the tunnel
5828 * is not fully established yet. In this situation, we block
5829 * data sending.
5830 */
5831 h2s->flags |= H2_SF_BLK_MBUSY;
5832 TRACE_STATE("Request DATA frame blocked waiting for tunnel establishment", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5833 goto end;
5834 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01005835 else if ((h2c->flags & H2_CF_IS_BACK) && (h2s->flags & H2_SF_TUNNEL_ABRT)) {
5836 /* a tunnel attempt was aborted but the is pending raw data to xfer to the server.
5837 * Thus the stream is closed with the CANCEL error. The error will be reported to
5838 * the upper layer as aserver abort. But at this stage there is nothing more we can
5839 * do. We just wait for the end of the response to be sure to not truncate it.
5840 */
5841 if (!(h2s->flags & H2_SF_ES_RCVD)) {
5842 TRACE_STATE("Request DATA frame blocked waiting end of aborted tunnel", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5843 h2s->flags |= H2_SF_BLK_MBUSY;
5844 }
5845 else {
5846 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
5847 h2s_error(h2s, H2_ERR_CANCEL);
5848 }
5849 goto end;
5850 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01005851
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005852 blk = htx_get_head_blk(htx);
5853 type = htx_get_blk_type(blk);
5854 bsize = htx_get_blksz(blk);
5855 fsize = bsize;
5856 trunc_out = 0;
5857 if (type != HTX_BLK_DATA)
5858 goto end;
5859
Willy Tarreau9c218e72019-05-26 10:08:28 +02005860 mbuf = br_tail(h2c->mbuf);
5861 retry:
5862 if (!h2_get_buf(h2c, mbuf)) {
5863 h2c->flags |= H2_CF_MUX_MALLOC;
5864 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005865 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005866 goto end;
5867 }
5868
Willy Tarreau98de12a2018-12-12 07:03:00 +01005869 /* Perform some optimizations to reduce the number of buffer copies.
5870 * First, if the mux's buffer is empty and the htx area contains
5871 * exactly one data block of the same size as the requested count, and
5872 * this count fits within the frame size, the stream's window size, and
5873 * the connection's window size, then it's possible to simply swap the
5874 * caller's buffer with the mux's output buffer and adjust offsets and
5875 * length to match the entire DATA HTX block in the middle. In this
5876 * case we perform a true zero-copy operation from end-to-end. This is
5877 * the situation that happens all the time with large files. Second, if
5878 * this is not possible, but the mux's output buffer is empty, we still
5879 * have an opportunity to avoid the copy to the intermediary buffer, by
5880 * making the intermediary buffer's area point to the output buffer's
5881 * area. In this case we want to skip the HTX header to make sure that
5882 * copies remain aligned and that this operation remains possible all
5883 * the time. This goes for headers, data blocks and any data extracted
5884 * from the HTX blocks.
5885 */
5886 if (unlikely(fsize == count &&
Christopher Faulet192c6a22019-06-11 16:32:24 +02005887 htx_nbblks(htx) == 1 && type == HTX_BLK_DATA &&
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005888 fsize <= h2s_mws(h2s) && fsize <= h2c->mws && fsize <= h2c->mfs)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005889 void *old_area = mbuf->area;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005890
Willy Tarreaubcc45952019-05-26 10:05:50 +02005891 if (b_data(mbuf)) {
Willy Tarreau8ab128c2019-03-21 17:47:28 +01005892 /* Too bad there are data left there. We're willing to memcpy/memmove
5893 * up to 1/4 of the buffer, which means that it's OK to copy a large
5894 * frame into a buffer containing few data if it needs to be realigned,
5895 * and that it's also OK to copy few data without realigning. Otherwise
5896 * we'll pretend the mbuf is full and wait for it to become empty.
Willy Tarreau98de12a2018-12-12 07:03:00 +01005897 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005898 if (fsize + 9 <= b_room(mbuf) &&
5899 (b_data(mbuf) <= b_size(mbuf) / 4 ||
Willy Tarreau7838a792019-08-12 18:42:03 +02005900 (fsize <= b_size(mbuf) / 4 && fsize + 9 <= b_contig_space(mbuf)))) {
5901 TRACE_STATE("small data present in output buffer, appending", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005902 goto copy;
Willy Tarreau7838a792019-08-12 18:42:03 +02005903 }
Willy Tarreau8ab128c2019-03-21 17:47:28 +01005904
Willy Tarreau9c218e72019-05-26 10:08:28 +02005905 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5906 goto retry;
5907
Willy Tarreau98de12a2018-12-12 07:03:00 +01005908 h2c->flags |= H2_CF_MUX_MFULL;
5909 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005910 TRACE_STATE("too large data present in output buffer, waiting for emptiness", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005911 goto end;
5912 }
5913
Christopher Faulet925abdf2021-04-27 22:51:07 +02005914 if (htx->flags & HTX_FL_EOM) {
5915 /* EOM+empty: we may need to add END_STREAM (except for tunneled
5916 * message)
5917 */
5918 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5919 es_now = 1;
5920 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01005921 /* map an H2 frame to the HTX block so that we can put the
5922 * frame header there.
5923 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005924 *mbuf = b_make(buf->area, buf->size, sizeof(struct htx) + blk->addr - 9, fsize + 9);
5925 outbuf.area = b_head(mbuf);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005926
5927 /* prepend an H2 DATA frame header just before the DATA block */
5928 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
5929 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
Christopher Faulet925abdf2021-04-27 22:51:07 +02005930 if (es_now)
5931 outbuf.area[4] |= H2_F_DATA_END_STREAM;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005932 h2_set_frame_size(outbuf.area, fsize);
5933
5934 /* update windows */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005935 h2s->sws -= fsize;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005936 h2c->mws -= fsize;
5937
5938 /* and exchange with our old area */
5939 buf->area = old_area;
5940 buf->data = buf->head = 0;
5941 total += fsize;
Christopher Faulet925abdf2021-04-27 22:51:07 +02005942 fsize = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005943
5944 TRACE_PROTO("sent H2 DATA frame (zero-copy)", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Christopher Faulet925abdf2021-04-27 22:51:07 +02005945 goto out;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005946 }
Willy Tarreau2fb1d4c2018-12-04 15:28:03 +01005947
Willy Tarreau98de12a2018-12-12 07:03:00 +01005948 copy:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005949 /* for DATA and EOM we'll have to emit a frame, even if empty */
5950
5951 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005952 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5953 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005954 break;
5955 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005956 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005957 }
5958
5959 if (outbuf.size < 9) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02005960 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5961 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005962 h2c->flags |= H2_CF_MUX_MFULL;
5963 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005964 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005965 goto end;
5966 }
5967
5968 /* len: 0x000000 (fill later), type: 0(DATA), flags: none=0 */
5969 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
5970 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5971 outbuf.data = 9;
5972
5973 /* we have in <fsize> the exact number of bytes we need to copy from
5974 * the HTX buffer. We need to check this against the connection's and
5975 * the stream's send windows, and to ensure that this fits in the max
5976 * frame size and in the buffer's available space minus 9 bytes (for
5977 * the frame header). The connection's flow control is applied last so
5978 * that we can use a separate list of streams which are immediately
5979 * unblocked on window opening. Note: we don't implement padding.
5980 */
5981
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005982 if (!fsize)
5983 goto send_empty;
5984
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005985 if (h2s_mws(h2s) <= 0) {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005986 h2s->flags |= H2_SF_BLK_SFCTL;
Willy Tarreau2b718102021-04-21 07:32:39 +02005987 if (LIST_INLIST(&h2s->list))
Olivier Houchardbfe2a832019-05-10 14:02:21 +02005988 LIST_DEL_INIT(&h2s->list);
Willy Tarreau2b718102021-04-21 07:32:39 +02005989 LIST_APPEND(&h2c->blocked_list, &h2s->list);
Willy Tarreau7838a792019-08-12 18:42:03 +02005990 TRACE_STATE("stream window <=0, flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005991 goto end;
5992 }
5993
Willy Tarreauee573762018-12-04 15:25:57 +01005994 if (fsize > count)
5995 fsize = count;
5996
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005997 if (fsize > h2s_mws(h2s))
5998 fsize = h2s_mws(h2s); // >0
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005999
6000 if (h2c->mfs && fsize > h2c->mfs)
6001 fsize = h2c->mfs; // >0
6002
6003 if (fsize + 9 > outbuf.size) {
Willy Tarreau455d5682019-05-24 19:42:18 +02006004 /* It doesn't fit at once. If it at least fits once split and
6005 * the amount of data to move is low, let's defragment the
6006 * buffer now.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006007 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006008 if (b_space_wraps(mbuf) &&
6009 (fsize + 9 <= b_room(mbuf)) &&
6010 b_data(mbuf) <= MAX_DATA_REALIGN)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006011 goto realign_again;
6012 fsize = outbuf.size - 9;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01006013 trunc_out = 1;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006014
6015 if (fsize <= 0) {
6016 /* no need to send an empty frame here */
Willy Tarreau9c218e72019-05-26 10:08:28 +02006017 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6018 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006019 h2c->flags |= H2_CF_MUX_MFULL;
6020 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006021 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006022 goto end;
6023 }
6024 }
6025
6026 if (h2c->mws <= 0) {
6027 h2s->flags |= H2_SF_BLK_MFCTL;
Willy Tarreau7838a792019-08-12 18:42:03 +02006028 TRACE_STATE("connection window <=0, stream flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2C_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006029 goto end;
6030 }
6031
6032 if (fsize > h2c->mws)
6033 fsize = h2c->mws;
6034
6035 /* now let's copy this this into the output buffer */
6036 memcpy(outbuf.area + 9, htx_get_blk_ptr(htx, blk), fsize);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006037 h2s->sws -= fsize;
Willy Tarreau0f799ca2018-12-04 15:20:11 +01006038 h2c->mws -= fsize;
Willy Tarreauee573762018-12-04 15:25:57 +01006039 count -= fsize;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006040
6041 send_empty:
6042 /* update the frame's size */
6043 h2_set_frame_size(outbuf.area, fsize);
6044
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006045 /* consume incoming HTX block */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006046 total += fsize;
6047 if (fsize == bsize) {
6048 htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006049 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
6050 /* EOM+empty: we may need to add END_STREAM (except for tunneled
6051 * message)
6052 */
6053 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
6054 es_now = 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02006055 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006056 }
6057 else {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006058 /* we've truncated this block */
6059 htx_cut_data_blk(htx, blk, fsize);
6060 }
6061
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006062 if (es_now)
6063 outbuf.area[4] |= H2_F_DATA_END_STREAM;
6064
6065 /* commit the H2 response */
6066 b_add(mbuf, fsize + 9);
6067
Christopher Faulet925abdf2021-04-27 22:51:07 +02006068 out:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006069 if (es_now) {
6070 if (h2s->st == H2_SS_OPEN)
6071 h2s->st = H2_SS_HLOC;
6072 else
6073 h2s_close(h2s);
6074
6075 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02006076 TRACE_PROTO("ES flag set on outgoing frame", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006077 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006078 else if (fsize) {
6079 if (fsize == bsize) {
6080 TRACE_DEVEL("more data may be available, trying to send another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6081 goto new_frame;
6082 }
6083 else if (trunc_out) {
6084 /* we've truncated this block */
6085 goto new_frame;
6086 }
6087 }
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006088
6089 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02006090 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006091 return total;
6092}
6093
Christopher Faulet991febd2020-12-02 15:17:31 +01006094/* Skip the message payload (DATA blocks) and emit an empty DATA frame with the
6095 * ES flag set for stream <h2s>. This function is called for response known to
6096 * have no payload. Only DATA blocks are skipped. This means the trailers are
Ilya Shipitsinacf84592021-02-06 22:29:08 +05006097 * still emitted. The caller must check the stream's status to detect any error
Christopher Faulet991febd2020-12-02 15:17:31 +01006098 * which might have happened subsequently to a successful send. Returns the
6099 * number of data bytes consumed, or zero if nothing done.
6100 */
6101static size_t h2s_skip_data(struct h2s *h2s, struct buffer *buf, size_t count)
6102{
6103 struct h2c *h2c = h2s->h2c;
6104 struct htx *htx;
6105 int bsize; /* htx block size */
6106 int fsize; /* h2 frame size */
6107 struct htx_blk *blk;
6108 enum htx_blk_type type;
6109 size_t total = 0;
6110
6111 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6112
6113 if (h2c_mux_busy(h2c, h2s)) {
6114 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6115 h2s->flags |= H2_SF_BLK_MBUSY;
6116 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6117 goto end;
6118 }
6119
6120 htx = htx_from_buf(buf);
6121
6122 next_data:
6123 if (!count || htx_is_empty(htx))
6124 goto end;
6125 blk = htx_get_head_blk(htx);
6126 type = htx_get_blk_type(blk);
6127 bsize = htx_get_blksz(blk);
6128 fsize = bsize;
6129 if (type != HTX_BLK_DATA)
6130 goto end;
6131
6132 if (fsize > count)
6133 fsize = count;
6134
6135 if (fsize != bsize)
6136 goto skip_data;
6137
6138 if (!(htx->flags & HTX_FL_EOM) || !htx_is_unique_blk(htx, blk))
6139 goto skip_data;
6140
6141 /* Here, it is the last block and it is also the end of the message. So
6142 * we can emit an empty DATA frame with the ES flag set
6143 */
6144 if (h2_send_empty_data_es(h2s) <= 0)
6145 goto end;
6146
6147 if (h2s->st == H2_SS_OPEN)
6148 h2s->st = H2_SS_HLOC;
6149 else
6150 h2s_close(h2s);
6151
6152 skip_data:
6153 /* consume incoming HTX block */
6154 total += fsize;
6155 if (fsize == bsize) {
6156 TRACE_DEVEL("more data may be available, trying to skip another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6157 htx_remove_blk(htx, blk);
6158 goto next_data;
6159 }
6160 else {
6161 /* we've truncated this block */
6162 htx_cut_data_blk(htx, blk, fsize);
6163 }
6164
6165 end:
6166 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6167 return total;
6168}
6169
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006170/* Try to send a HEADERS frame matching HTX_BLK_TLR series of blocks present in
6171 * HTX message <htx> for the H2 stream <h2s>. Returns the number of bytes
6172 * processed. The caller must check the stream's status to detect any error
6173 * which might have happened subsequently to a successful send. The htx blocks
6174 * are automatically removed from the message. The htx message is assumed to be
6175 * valid since produced from the internal code. Processing stops when meeting
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006176 * the EOT, which *is* removed. All trailers are processed at once and sent as a
6177 * single frame. The ES flag is always set.
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006178 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006179static size_t h2s_make_trailers(struct h2s *h2s, struct htx *htx)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006180{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02006181 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006182 struct h2c *h2c = h2s->h2c;
6183 struct htx_blk *blk;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006184 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02006185 struct buffer *mbuf;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006186 enum htx_blk_type type;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006187 int ret = 0;
6188 int hdr;
6189 int idx;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006190
Willy Tarreau7838a792019-08-12 18:42:03 +02006191 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
6192
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006193 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006194 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006195 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02006196 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006197 goto end;
6198 }
6199
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006200 /* get trailers. */
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006201 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006202 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006203 type = htx_get_blk_type(blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006204
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006205 if (type == HTX_BLK_UNUSED)
6206 continue;
6207
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006208 if (type == HTX_BLK_EOT)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006209 break;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006210 if (type == HTX_BLK_TLR) {
6211 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
6212 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
6213 goto fail;
6214 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006215
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006216 list[hdr].n = htx_get_blk_name(htx, blk);
6217 list[hdr].v = htx_get_blk_value(htx, blk);
6218 hdr++;
6219 }
6220 else {
6221 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006222 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02006223 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006224 }
6225
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006226 /* marker for end of trailers */
6227 list[hdr].n = ist("");
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006228
Willy Tarreau9c218e72019-05-26 10:08:28 +02006229 mbuf = br_tail(h2c->mbuf);
6230 retry:
6231 if (!h2_get_buf(h2c, mbuf)) {
6232 h2c->flags |= H2_CF_MUX_MALLOC;
6233 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006234 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02006235 goto end;
6236 }
6237
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006238 chunk_reset(&outbuf);
6239
6240 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02006241 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
6242 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006243 break;
6244 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02006245 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006246 }
6247
6248 if (outbuf.size < 9)
6249 goto full;
6250
6251 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4,ES=1 */
6252 memcpy(outbuf.area, "\x00\x00\x00\x01\x05", 5);
6253 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
6254 outbuf.data = 9;
6255
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006256 /* encode all headers */
6257 for (idx = 0; idx < hdr; idx++) {
6258 /* these ones do not exist in H2 or must not appear in
6259 * trailers and must be dropped.
6260 */
6261 if (isteq(list[idx].n, ist("host")) ||
6262 isteq(list[idx].n, ist("content-length")) ||
6263 isteq(list[idx].n, ist("connection")) ||
6264 isteq(list[idx].n, ist("proxy-connection")) ||
6265 isteq(list[idx].n, ist("keep-alive")) ||
6266 isteq(list[idx].n, ist("upgrade")) ||
6267 isteq(list[idx].n, ist("te")) ||
6268 isteq(list[idx].n, ist("transfer-encoding")))
6269 continue;
6270
Christopher Faulet86d144c2019-08-14 16:32:25 +02006271 /* Skip all pseudo-headers */
6272 if (*(list[idx].n.ptr) == ':')
6273 continue;
6274
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006275 if (!hpack_encode_header(&outbuf, list[idx].n, list[idx].v)) {
6276 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006277 if (b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006278 goto realign_again;
6279 goto full;
6280 }
6281 }
6282
Willy Tarreau5121e5d2019-05-06 15:13:41 +02006283 if (outbuf.data == 9) {
6284 /* here we have a problem, we have nothing to emit (either we
6285 * received an empty trailers block followed or we removed its
6286 * contents above). Because of this we can't send a HEADERS
6287 * frame, so we have to cheat and instead send an empty DATA
6288 * frame conveying the ES flag.
Willy Tarreau67b8cae2019-02-21 18:16:35 +01006289 */
6290 outbuf.area[3] = H2_FT_DATA;
6291 outbuf.area[4] = H2_F_DATA_END_STREAM;
6292 }
6293
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006294 /* update the frame's size */
6295 h2_set_frame_size(outbuf.area, outbuf.data - 9);
6296
Willy Tarreau572d9f52019-10-11 16:58:37 +02006297 if (outbuf.data > h2c->mfs + 9) {
6298 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
6299 /* output full */
6300 if (b_space_wraps(mbuf))
6301 goto realign_again;
6302 goto full;
6303 }
6304 }
6305
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006306 /* commit the H2 response */
Willy Tarreau7838a792019-08-12 18:42:03 +02006307 TRACE_PROTO("sent H2 trailers HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006308 b_add(mbuf, outbuf.data);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006309 h2s->flags |= H2_SF_ES_SENT;
6310
6311 if (h2s->st == H2_SS_OPEN)
6312 h2s->st = H2_SS_HLOC;
6313 else
6314 h2s_close(h2s);
6315
6316 /* OK we could properly deliver the response */
6317 done:
Willy Tarreaufb07b3f2019-05-06 11:23:29 +02006318 /* remove all header blocks till the end and compute the corresponding size. */
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006319 ret = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006320 blk = htx_get_head_blk(htx);
6321 while (blk) {
6322 type = htx_get_blk_type(blk);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006323 ret += htx_get_blksz(blk);
6324 blk = htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006325 /* The removed block is the EOT */
6326 if (type == HTX_BLK_EOT)
6327 break;
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006328 }
6329
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006330 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02006331 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006332 return ret;
6333 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02006334 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6335 goto retry;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006336 h2c->flags |= H2_CF_MUX_MFULL;
6337 h2s->flags |= H2_SF_BLK_MROOM;
6338 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006339 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006340 goto end;
6341 fail:
6342 /* unparsable HTX messages, too large ones to be produced in the local
6343 * list etc go here (unrecoverable errors).
6344 */
6345 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
6346 ret = 0;
6347 goto end;
6348}
6349
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006350/* Called from the upper layer, to subscribe <es> to events <event_type>. The
6351 * event subscriber <es> is not allowed to change from a previous call as long
6352 * as at least one event is still subscribed. The <event_type> must only be a
6353 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006354 */
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006355static int h2_subscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
Olivier Houchard6ff20392018-07-17 18:46:31 +02006356{
Olivier Houchard6ff20392018-07-17 18:46:31 +02006357 struct h2s *h2s = cs->ctx;
Olivier Houchard4cf7fb12018-08-02 19:23:05 +02006358 struct h2c *h2c = h2s->h2c;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006359
Willy Tarreau7838a792019-08-12 18:42:03 +02006360 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006361
6362 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006363 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006364
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006365 es->events |= event_type;
6366 h2s->subs = es;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006367
6368 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006369 TRACE_DEVEL("subscribe(recv)", H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006370
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006371 if (event_type & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006372 TRACE_DEVEL("subscribe(send)", H2_EV_STRM_SEND, h2c->conn, h2s);
Olivier Houchardf8338152019-05-14 17:50:32 +02006373 if (!(h2s->flags & H2_SF_BLK_SFCTL) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02006374 !LIST_INLIST(&h2s->list)) {
Olivier Houchardf8338152019-05-14 17:50:32 +02006375 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02006376 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Olivier Houchardf8338152019-05-14 17:50:32 +02006377 else
Willy Tarreau2b718102021-04-21 07:32:39 +02006378 LIST_APPEND(&h2c->send_list, &h2s->list);
Olivier Houcharde1c6dbc2018-08-01 17:06:43 +02006379 }
Olivier Houchard6ff20392018-07-17 18:46:31 +02006380 }
Willy Tarreau7838a792019-08-12 18:42:03 +02006381 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006382 return 0;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006383}
6384
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006385/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
6386 * The <es> pointer is not allowed to differ from the one passed to the
6387 * subscribe() call. It always returns zero.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006388 */
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006389static int h2_unsubscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006390{
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006391 struct h2s *h2s = cs->ctx;
6392
Willy Tarreau7838a792019-08-12 18:42:03 +02006393 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006394
6395 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006396 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006397
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006398 es->events &= ~event_type;
6399 if (!es->events)
Willy Tarreauf96508a2020-01-10 11:12:48 +01006400 h2s->subs = NULL;
6401
6402 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006403 TRACE_DEVEL("unsubscribe(recv)", H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006404
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006405 if (event_type & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006406 TRACE_DEVEL("subscribe(send)", H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006407 h2s->flags &= ~H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006408 if (!(h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)))
6409 LIST_DEL_INIT(&h2s->list);
Olivier Houchardd846c262018-10-19 17:24:29 +02006410 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01006411
Willy Tarreau7838a792019-08-12 18:42:03 +02006412 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006413 return 0;
6414}
6415
6416
Christopher Faulet564e39c2021-09-21 15:50:55 +02006417/* Called from the upper layer, to receive data
6418 *
6419 * The caller is responsible for defragmenting <buf> if necessary. But <flags>
6420 * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
6421 * means the caller wants to flush input data (from the mux buffer and the
6422 * channel buffer) to be able to use kernel splicing or any kind of mux-to-mux
6423 * xfer. If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
6424 * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
6425 * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
6426 * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
6427 * copy as much data as possible.
6428 */
Olivier Houchard511efea2018-08-16 15:30:32 +02006429static size_t h2_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
6430{
Olivier Houchard638b7992018-08-16 15:41:52 +02006431 struct h2s *h2s = cs->ctx;
Willy Tarreau082f5592018-11-25 08:03:32 +01006432 struct h2c *h2c = h2s->h2c;
Willy Tarreau86724e22018-12-01 23:19:43 +01006433 struct htx *h2s_htx = NULL;
6434 struct htx *buf_htx = NULL;
Olivier Houchard511efea2018-08-16 15:30:32 +02006435 size_t ret = 0;
6436
Willy Tarreau7838a792019-08-12 18:42:03 +02006437 TRACE_ENTER(H2_EV_STRM_RECV, h2c->conn, h2s);
6438
Olivier Houchard511efea2018-08-16 15:30:32 +02006439 /* transfer possibly pending data to the upper layer */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006440 h2s_htx = htx_from_buf(&h2s->rxbuf);
Christopher Fauletec361bb2022-02-21 15:12:54 +01006441 if (htx_is_empty(h2s_htx) && !(h2s_htx->flags & HTX_FL_PARSING_ERROR)) {
Christopher Faulet9b79a102019-07-15 11:22:56 +02006442 /* Here htx_to_buf() will set buffer data to 0 because
6443 * the HTX is empty.
6444 */
6445 htx_to_buf(h2s_htx, &h2s->rxbuf);
6446 goto end;
6447 }
Willy Tarreau7196dd62019-03-05 10:51:11 +01006448
Christopher Faulet9b79a102019-07-15 11:22:56 +02006449 ret = h2s_htx->data;
6450 buf_htx = htx_from_buf(buf);
Willy Tarreau7196dd62019-03-05 10:51:11 +01006451
Christopher Faulet9b79a102019-07-15 11:22:56 +02006452 /* <buf> is empty and the message is small enough, swap the
6453 * buffers. */
6454 if (htx_is_empty(buf_htx) && htx_used_space(h2s_htx) <= count) {
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01006455 htx_to_buf(buf_htx, buf);
6456 htx_to_buf(h2s_htx, &h2s->rxbuf);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006457 b_xfer(buf, &h2s->rxbuf, b_data(&h2s->rxbuf));
6458 goto end;
Willy Tarreau86724e22018-12-01 23:19:43 +01006459 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02006460
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006461 htx_xfer_blks(buf_htx, h2s_htx, count, HTX_BLK_UNUSED);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006462
6463 if (h2s_htx->flags & HTX_FL_PARSING_ERROR) {
6464 buf_htx->flags |= HTX_FL_PARSING_ERROR;
6465 if (htx_is_empty(buf_htx))
6466 cs->flags |= CS_FL_EOI;
Willy Tarreau86724e22018-12-01 23:19:43 +01006467 }
Christopher Faulet810df062020-07-22 16:20:34 +02006468 else if (htx_is_empty(h2s_htx))
Christopher Faulet42432f32020-11-20 17:43:16 +01006469 buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM);
Olivier Houchard511efea2018-08-16 15:30:32 +02006470
Christopher Faulet9b79a102019-07-15 11:22:56 +02006471 buf_htx->extra = (h2s_htx->extra ? (h2s_htx->data + h2s_htx->extra) : 0);
6472 htx_to_buf(buf_htx, buf);
6473 htx_to_buf(h2s_htx, &h2s->rxbuf);
6474 ret -= h2s_htx->data;
6475
Christopher Faulet37070b22019-02-14 15:12:14 +01006476 end:
Olivier Houchard638b7992018-08-16 15:41:52 +02006477 if (b_data(&h2s->rxbuf))
Olivier Houchardd247be02018-12-06 16:22:29 +01006478 cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Olivier Houchard511efea2018-08-16 15:30:32 +02006479 else {
Olivier Houchardd247be02018-12-06 16:22:29 +01006480 cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Christopher Fauletd0db4232021-01-22 11:46:30 +01006481 if (h2s->flags & H2_SF_ES_RCVD) {
Christopher Fauletfa922f02019-05-07 10:55:17 +02006482 cs->flags |= CS_FL_EOI;
Christopher Fauletd0db4232021-01-22 11:46:30 +01006483 /* Add EOS flag for tunnel */
6484 if (h2s->flags & H2_SF_BODY_TUNNEL)
6485 cs->flags |= CS_FL_EOS;
6486 }
Christopher Fauletaade4ed2020-10-08 15:38:41 +02006487 if (h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED)
Olivier Houchard511efea2018-08-16 15:30:32 +02006488 cs->flags |= CS_FL_EOS;
Olivier Houchard71748cb2018-12-17 14:16:46 +01006489 if (cs->flags & CS_FL_ERR_PENDING)
6490 cs->flags |= CS_FL_ERROR;
Olivier Houchard638b7992018-08-16 15:41:52 +02006491 if (b_size(&h2s->rxbuf)) {
6492 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01006493 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02006494 }
Olivier Houchard511efea2018-08-16 15:30:32 +02006495 }
6496
Willy Tarreau082f5592018-11-25 08:03:32 +01006497 if (ret && h2c->dsi == h2s->id) {
6498 /* demux is blocking on this stream's buffer */
6499 h2c->flags &= ~H2_CF_DEM_SFULL;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02006500 h2c_restart_reading(h2c, 1);
Willy Tarreau082f5592018-11-25 08:03:32 +01006501 }
Christopher Faulet37070b22019-02-14 15:12:14 +01006502
Willy Tarreau7838a792019-08-12 18:42:03 +02006503 TRACE_LEAVE(H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard511efea2018-08-16 15:30:32 +02006504 return ret;
6505}
6506
Olivier Houchardd846c262018-10-19 17:24:29 +02006507
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006508/* Called from the upper layer, to send data from buffer <buf> for no more than
6509 * <count> bytes. Returns the number of bytes effectively sent. Some status
6510 * flags may be updated on the conn_stream.
6511 */
Christopher Fauletd44a9b32018-07-27 11:59:41 +02006512static size_t h2_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
Willy Tarreau62f52692017-10-08 23:01:42 +02006513{
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006514 struct h2s *h2s = cs->ctx;
Willy Tarreau1dc41e72018-06-14 13:21:28 +02006515 size_t total = 0;
Willy Tarreau5dd17352018-06-14 13:33:30 +02006516 size_t ret;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006517 struct htx *htx;
6518 struct htx_blk *blk;
6519 enum htx_blk_type btype;
6520 uint32_t bsize;
6521 int32_t idx;
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006522
Willy Tarreau7838a792019-08-12 18:42:03 +02006523 TRACE_ENTER(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
6524
Olivier Houchardd360ac62019-03-22 17:37:16 +01006525 /* If we were not just woken because we wanted to send but couldn't,
6526 * and there's somebody else that is waiting to send, do nothing,
6527 * we will subscribe later and be put at the end of the list
6528 */
Willy Tarreaud9464162020-01-10 18:25:07 +01006529 if (!(h2s->flags & H2_SF_NOTIFIED) &&
Willy Tarreau7838a792019-08-12 18:42:03 +02006530 (!LIST_ISEMPTY(&h2s->h2c->send_list) || !LIST_ISEMPTY(&h2s->h2c->fctl_list))) {
6531 TRACE_DEVEL("other streams already waiting, going to the queue and leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
Olivier Houchardd360ac62019-03-22 17:37:16 +01006532 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006533 }
Willy Tarreaud9464162020-01-10 18:25:07 +01006534 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02006535
Willy Tarreau7838a792019-08-12 18:42:03 +02006536 if (h2s->h2c->st0 < H2_CS_FRAME_H) {
6537 TRACE_DEVEL("connection not ready, leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006538 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006539 }
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006540
Willy Tarreaucab22952019-10-31 15:48:18 +01006541 if (h2s->h2c->st0 >= H2_CS_ERROR) {
6542 cs->flags |= CS_FL_ERROR;
6543 TRACE_DEVEL("connection is in error, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
6544 return 0;
6545 }
6546
Christopher Faulet9b79a102019-07-15 11:22:56 +02006547 htx = htx_from_buf(buf);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006548
Willy Tarreau0bad0432018-06-14 16:54:01 +02006549 if (!(h2s->flags & H2_SF_OUTGOING_DATA) && count)
Willy Tarreauc4312d32017-11-07 12:01:53 +01006550 h2s->flags |= H2_SF_OUTGOING_DATA;
6551
Willy Tarreau751f2d02018-10-05 09:35:00 +02006552 if (h2s->id == 0) {
6553 int32_t id = h2c_get_next_sid(h2s->h2c);
6554
6555 if (id < 0) {
Willy Tarreau751f2d02018-10-05 09:35:00 +02006556 cs->flags |= CS_FL_ERROR;
Willy Tarreau7838a792019-08-12 18:42:03 +02006557 TRACE_DEVEL("couldn't get a stream ID, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02006558 return 0;
6559 }
6560
6561 eb32_delete(&h2s->by_id);
6562 h2s->by_id.key = h2s->id = id;
6563 h2s->h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01006564 h2s->h2c->nb_reserved--;
Willy Tarreau751f2d02018-10-05 09:35:00 +02006565 eb32_insert(&h2s->h2c->streams_by_id, &h2s->by_id);
6566 }
6567
Christopher Faulet9b79a102019-07-15 11:22:56 +02006568 while (h2s->st < H2_SS_HLOC && !(h2s->flags & H2_SF_BLK_ANY) &&
6569 count && !htx_is_empty(htx)) {
6570 idx = htx_get_head(htx);
6571 blk = htx_get_blk(htx, idx);
6572 btype = htx_get_blk_type(blk);
6573 bsize = htx_get_blksz(blk);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006574
Christopher Faulet9b79a102019-07-15 11:22:56 +02006575 switch (btype) {
Willy Tarreau80739692018-10-05 11:35:57 +02006576 case HTX_BLK_REQ_SL:
6577 /* start-line before headers */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006578 ret = h2s_bck_make_req_headers(h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02006579 if (ret > 0) {
6580 total += ret;
6581 count -= ret;
6582 if (ret < bsize)
6583 goto done;
6584 }
6585 break;
6586
Willy Tarreau115e83b2018-12-01 19:17:53 +01006587 case HTX_BLK_RES_SL:
6588 /* start-line before headers */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006589 ret = h2s_frt_make_resp_headers(h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01006590 if (ret > 0) {
6591 total += ret;
6592 count -= ret;
6593 if (ret < bsize)
6594 goto done;
6595 }
6596 break;
6597
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006598 case HTX_BLK_DATA:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006599 /* all these cause the emission of a DATA frame (possibly empty) */
Christopher Faulet991febd2020-12-02 15:17:31 +01006600 if (!(h2s->h2c->flags & H2_CF_IS_BACK) &&
6601 (h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BODYLESS_RESP)) == H2_SF_BODYLESS_RESP)
6602 ret = h2s_skip_data(h2s, buf, count);
6603 else
6604 ret = h2s_make_data(h2s, buf, count);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006605 if (ret > 0) {
Willy Tarreau98de12a2018-12-12 07:03:00 +01006606 htx = htx_from_buf(buf);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006607 total += ret;
6608 count -= ret;
6609 if (ret < bsize)
6610 goto done;
6611 }
6612 break;
6613
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006614 case HTX_BLK_TLR:
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006615 case HTX_BLK_EOT:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006616 /* This is the first trailers block, all the subsequent ones */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006617 ret = h2s_make_trailers(h2s, htx);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006618 if (ret > 0) {
6619 total += ret;
6620 count -= ret;
6621 if (ret < bsize)
6622 goto done;
6623 }
6624 break;
6625
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006626 default:
6627 htx_remove_blk(htx, blk);
6628 total += bsize;
6629 count -= bsize;
6630 break;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006631 }
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006632 }
6633
Christopher Faulet9b79a102019-07-15 11:22:56 +02006634 done:
Willy Tarreau2b778482019-05-06 15:00:22 +02006635 if (h2s->st >= H2_SS_HLOC) {
Willy Tarreau00610962018-07-19 10:58:28 +02006636 /* trim any possibly pending data after we close (extra CR-LF,
6637 * unprocessed trailers, abnormal extra data, ...)
6638 */
Willy Tarreau0bad0432018-06-14 16:54:01 +02006639 total += count;
6640 count = 0;
Willy Tarreau00610962018-07-19 10:58:28 +02006641 }
6642
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006643 /* RST are sent similarly to frame acks */
Willy Tarreau02492192017-12-07 15:59:29 +01006644 if (h2s->st == H2_SS_ERROR || h2s->flags & H2_SF_RST_RCVD) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006645 TRACE_DEVEL("reporting RST/error to the app-layer stream", H2_EV_H2S_SEND|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreauec988c72018-12-19 18:00:29 +01006646 cs_set_error(cs);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01006647 if (h2s_send_rst_stream(h2s->h2c, h2s) > 0)
Willy Tarreau00dd0782018-03-01 16:31:34 +01006648 h2s_close(h2s);
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006649 }
6650
Christopher Faulet9b79a102019-07-15 11:22:56 +02006651 htx_to_buf(htx, buf);
Olivier Houchardd846c262018-10-19 17:24:29 +02006652
Olivier Houchard7505f942018-08-21 18:10:44 +02006653 if (total > 0) {
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006654 if (!(h2s->h2c->wait_event.events & SUB_RETRY_SEND)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006655 TRACE_DEVEL("data queued, waking up h2c sender", H2_EV_H2S_SEND|H2_EV_H2C_SEND, h2s->h2c->conn, h2s);
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02006656 tasklet_wakeup(h2s->h2c->wait_event.tasklet);
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006657 }
Olivier Houchardd846c262018-10-19 17:24:29 +02006658
Olivier Houchard7505f942018-08-21 18:10:44 +02006659 }
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006660 /* If we're waiting for flow control, and we got a shutr on the
6661 * connection, we will never be unlocked, so add an error on
6662 * the conn_stream.
6663 */
6664 if (conn_xprt_read0_pending(h2s->h2c->conn) &&
6665 !b_data(&h2s->h2c->dbuf) &&
6666 (h2s->flags & (H2_SF_BLK_SFCTL | H2_SF_BLK_MFCTL))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006667 TRACE_DEVEL("fctl with shutr, reporting error to app-layer", H2_EV_H2S_SEND|H2_EV_STRM_SEND|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006668 if (cs->flags & CS_FL_EOS)
6669 cs->flags |= CS_FL_ERROR;
6670 else
6671 cs->flags |= CS_FL_ERR_PENDING;
6672 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006673
Willy Tarreau5723f292020-01-10 15:16:57 +01006674 if (total > 0 && !(h2s->flags & H2_SF_BLK_SFCTL) &&
6675 !(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006676 /* Ok we managed to send something, leave the send_list if we were still there */
Olivier Houchardd360ac62019-03-22 17:37:16 +01006677 LIST_DEL_INIT(&h2s->list);
6678 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006679
Willy Tarreau7838a792019-08-12 18:42:03 +02006680 TRACE_LEAVE(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006681 return total;
Willy Tarreau62f52692017-10-08 23:01:42 +02006682}
6683
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006684/* for debugging with CLI's "show fd" command */
Willy Tarreau8050efe2021-01-21 08:26:06 +01006685static int h2_show_fd(struct buffer *msg, struct connection *conn)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006686{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01006687 struct h2c *h2c = conn->ctx;
Willy Tarreau987c0632018-12-18 10:32:05 +01006688 struct h2s *h2s = NULL;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006689 struct eb32_node *node;
6690 int fctl_cnt = 0;
6691 int send_cnt = 0;
6692 int tree_cnt = 0;
6693 int orph_cnt = 0;
Willy Tarreau60f62682019-05-26 11:32:27 +02006694 struct buffer *hmbuf, *tmbuf;
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006695 int ret = 0;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006696
6697 if (!h2c)
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006698 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006699
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006700 list_for_each_entry(h2s, &h2c->fctl_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006701 fctl_cnt++;
6702
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006703 list_for_each_entry(h2s, &h2c->send_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006704 send_cnt++;
6705
Willy Tarreau3af37712018-12-18 14:34:41 +01006706 h2s = NULL;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006707 node = eb32_first(&h2c->streams_by_id);
6708 while (node) {
6709 h2s = container_of(node, struct h2s, by_id);
6710 tree_cnt++;
6711 if (!h2s->cs)
6712 orph_cnt++;
6713 node = eb32_next(node);
6714 }
6715
Willy Tarreau60f62682019-05-26 11:32:27 +02006716 hmbuf = br_head(h2c->mbuf);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006717 tmbuf = br_tail(h2c->mbuf);
Willy Tarreauab2ec452019-08-30 07:07:08 +02006718 chunk_appendf(msg, " h2c.st0=%s .err=%d .maxid=%d .lastid=%d .flg=0x%04x"
Willy Tarreau987c0632018-12-18 10:32:05 +01006719 " .nbst=%u .nbcs=%u .fctl_cnt=%d .send_cnt=%d .tree_cnt=%d"
Willy Tarreau60f62682019-05-26 11:32:27 +02006720 " .orph_cnt=%d .sub=%d .dsi=%d .dbuf=%u@%p+%u/%u .msi=%d"
6721 " .mbuf=[%u..%u|%u],h=[%u@%p+%u/%u],t=[%u@%p+%u/%u]",
Willy Tarreauab2ec452019-08-30 07:07:08 +02006722 h2c_st_to_str(h2c->st0), h2c->errcode, h2c->max_id, h2c->last_sid, h2c->flags,
Willy Tarreau616ac812018-07-24 14:12:42 +02006723 h2c->nb_streams, h2c->nb_cs, fctl_cnt, send_cnt, tree_cnt, orph_cnt,
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006724 h2c->wait_event.events, h2c->dsi,
Willy Tarreau987c0632018-12-18 10:32:05 +01006725 (unsigned int)b_data(&h2c->dbuf), b_orig(&h2c->dbuf),
6726 (unsigned int)b_head_ofs(&h2c->dbuf), (unsigned int)b_size(&h2c->dbuf),
6727 h2c->msi,
Willy Tarreau60f62682019-05-26 11:32:27 +02006728 br_head_idx(h2c->mbuf), br_tail_idx(h2c->mbuf), br_size(h2c->mbuf),
6729 (unsigned int)b_data(hmbuf), b_orig(hmbuf),
6730 (unsigned int)b_head_ofs(hmbuf), (unsigned int)b_size(hmbuf),
Willy Tarreaubcc45952019-05-26 10:05:50 +02006731 (unsigned int)b_data(tmbuf), b_orig(tmbuf),
6732 (unsigned int)b_head_ofs(tmbuf), (unsigned int)b_size(tmbuf));
Willy Tarreau987c0632018-12-18 10:32:05 +01006733
6734 if (h2s) {
Willy Tarreaued4464e2021-01-20 15:50:03 +01006735 chunk_appendf(msg, " last_h2s=%p .id=%d .st=%s .flg=0x%04x .rxbuf=%u@%p+%u/%u .cs=%p",
Willy Tarreauab2ec452019-08-30 07:07:08 +02006736 h2s, h2s->id, h2s_st_to_str(h2s->st), h2s->flags,
Willy Tarreau987c0632018-12-18 10:32:05 +01006737 (unsigned int)b_data(&h2s->rxbuf), b_orig(&h2s->rxbuf),
6738 (unsigned int)b_head_ofs(&h2s->rxbuf), (unsigned int)b_size(&h2s->rxbuf),
6739 h2s->cs);
6740 if (h2s->cs)
Christopher Fauletf835dea2021-12-21 14:35:17 +01006741 chunk_appendf(msg, "(.flg=0x%08x .app=%p)",
6742 h2s->cs->flags, h2s->cs->app);
Willy Tarreau98e40b92021-01-20 16:27:01 +01006743
6744 chunk_appendf(&trash, " .subs=%p", h2s->subs);
6745 if (h2s->subs) {
Christopher Faulet6c93c4e2021-02-25 10:06:29 +01006746 chunk_appendf(&trash, "(ev=%d tl=%p", h2s->subs->events, h2s->subs->tasklet);
6747 chunk_appendf(&trash, " tl.calls=%d tl.ctx=%p tl.fct=",
6748 h2s->subs->tasklet->calls,
6749 h2s->subs->tasklet->context);
6750 if (h2s->subs->tasklet->calls >= 1000000)
6751 ret = 1;
6752 resolve_sym_name(&trash, NULL, h2s->subs->tasklet->process);
6753 chunk_appendf(&trash, ")");
Willy Tarreau98e40b92021-01-20 16:27:01 +01006754 }
Willy Tarreau987c0632018-12-18 10:32:05 +01006755 }
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006756 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006757}
Willy Tarreau62f52692017-10-08 23:01:42 +02006758
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006759/* Migrate the the connection to the current thread.
6760 * Return 0 if successful, non-zero otherwise.
6761 * Expected to be called with the old thread lock held.
6762 */
Olivier Houchard1662cdb2020-07-03 14:04:37 +02006763static int h2_takeover(struct connection *conn, int orig_tid)
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006764{
6765 struct h2c *h2c = conn->ctx;
Willy Tarreau617e80f2020-07-01 16:39:33 +02006766 struct task *task;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006767
6768 if (fd_takeover(conn->handle.fd, conn) != 0)
6769 return -1;
Olivier Houcharda74bb7e2020-07-03 14:01:21 +02006770
6771 if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
6772 /* We failed to takeover the xprt, even if the connection may
6773 * still be valid, flag it as error'd, as we have already
6774 * taken over the fd, and wake the tasklet, so that it will
6775 * destroy it.
6776 */
6777 conn->flags |= CO_FL_ERROR;
6778 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
6779 return -1;
6780 }
6781
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006782 if (h2c->wait_event.events)
6783 h2c->conn->xprt->unsubscribe(h2c->conn, h2c->conn->xprt_ctx,
6784 h2c->wait_event.events, &h2c->wait_event);
6785 /* To let the tasklet know it should free itself, and do nothing else,
6786 * set its context to NULL.
6787 */
6788 h2c->wait_event.tasklet->context = NULL;
Olivier Houchard1662cdb2020-07-03 14:04:37 +02006789 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
Willy Tarreau617e80f2020-07-01 16:39:33 +02006790
6791 task = h2c->task;
6792 if (task) {
6793 task->context = NULL;
6794 h2c->task = NULL;
6795 __ha_barrier_store();
6796 task_kill(task);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006797
Willy Tarreaubeeabf52021-10-01 18:23:30 +02006798 h2c->task = task_new_here();
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006799 if (!h2c->task) {
6800 h2_release(h2c);
6801 return -1;
6802 }
6803 h2c->task->process = h2_timeout_task;
6804 h2c->task->context = h2c;
6805 }
6806 h2c->wait_event.tasklet = tasklet_new();
6807 if (!h2c->wait_event.tasklet) {
6808 h2_release(h2c);
6809 return -1;
6810 }
6811 h2c->wait_event.tasklet->process = h2_io_cb;
6812 h2c->wait_event.tasklet->context = h2c;
6813 h2c->conn->xprt->subscribe(h2c->conn, h2c->conn->xprt_ctx,
6814 SUB_RETRY_RECV, &h2c->wait_event);
6815
6816 return 0;
6817}
6818
Willy Tarreau62f52692017-10-08 23:01:42 +02006819/*******************************************************/
6820/* functions below are dedicated to the config parsers */
6821/*******************************************************/
6822
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006823/* config parser for global "tune.h2.header-table-size" */
6824static int h2_parse_header_table_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006825 const struct proxy *defpx, const char *file, int line,
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006826 char **err)
6827{
6828 if (too_many_args(1, args, err, NULL))
6829 return -1;
6830
6831 h2_settings_header_table_size = atoi(args[1]);
6832 if (h2_settings_header_table_size < 4096 || h2_settings_header_table_size > 65536) {
6833 memprintf(err, "'%s' expects a numeric value between 4096 and 65536.", args[0]);
6834 return -1;
6835 }
6836 return 0;
6837}
Willy Tarreau62f52692017-10-08 23:01:42 +02006838
Willy Tarreaue6baec02017-07-27 11:45:11 +02006839/* config parser for global "tune.h2.initial-window-size" */
6840static int h2_parse_initial_window_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006841 const struct proxy *defpx, const char *file, int line,
Willy Tarreaue6baec02017-07-27 11:45:11 +02006842 char **err)
6843{
6844 if (too_many_args(1, args, err, NULL))
6845 return -1;
6846
6847 h2_settings_initial_window_size = atoi(args[1]);
6848 if (h2_settings_initial_window_size < 0) {
6849 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
6850 return -1;
6851 }
6852 return 0;
6853}
6854
Willy Tarreau5242ef82017-07-27 11:47:28 +02006855/* config parser for global "tune.h2.max-concurrent-streams" */
6856static int h2_parse_max_concurrent_streams(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006857 const struct proxy *defpx, const char *file, int line,
Willy Tarreau5242ef82017-07-27 11:47:28 +02006858 char **err)
6859{
6860 if (too_many_args(1, args, err, NULL))
6861 return -1;
6862
6863 h2_settings_max_concurrent_streams = atoi(args[1]);
Willy Tarreau5a490b62019-01-31 10:39:51 +01006864 if ((int)h2_settings_max_concurrent_streams < 0) {
Willy Tarreau5242ef82017-07-27 11:47:28 +02006865 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
6866 return -1;
6867 }
6868 return 0;
6869}
6870
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006871/* config parser for global "tune.h2.max-frame-size" */
6872static int h2_parse_max_frame_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006873 const struct proxy *defpx, const char *file, int line,
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006874 char **err)
6875{
6876 if (too_many_args(1, args, err, NULL))
6877 return -1;
6878
6879 h2_settings_max_frame_size = atoi(args[1]);
6880 if (h2_settings_max_frame_size < 16384 || h2_settings_max_frame_size > 16777215) {
6881 memprintf(err, "'%s' expects a numeric value between 16384 and 16777215.", args[0]);
6882 return -1;
6883 }
6884 return 0;
6885}
6886
Willy Tarreau62f52692017-10-08 23:01:42 +02006887
6888/****************************************/
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05006889/* MUX initialization and instantiation */
Willy Tarreau62f52692017-10-08 23:01:42 +02006890/***************************************/
6891
6892/* The mux operations */
Willy Tarreau680b2bd2018-11-27 07:30:17 +01006893static const struct mux_ops h2_ops = {
Willy Tarreau62f52692017-10-08 23:01:42 +02006894 .init = h2_init,
Olivier Houchard21df6cc2018-09-14 23:21:44 +02006895 .wake = h2_wake,
Willy Tarreau62f52692017-10-08 23:01:42 +02006896 .snd_buf = h2_snd_buf,
Olivier Houchard511efea2018-08-16 15:30:32 +02006897 .rcv_buf = h2_rcv_buf,
Olivier Houchard6ff20392018-07-17 18:46:31 +02006898 .subscribe = h2_subscribe,
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006899 .unsubscribe = h2_unsubscribe,
Willy Tarreau62f52692017-10-08 23:01:42 +02006900 .attach = h2_attach,
Willy Tarreaufafd3982018-11-18 21:29:20 +01006901 .get_first_cs = h2_get_first_cs,
Willy Tarreau62f52692017-10-08 23:01:42 +02006902 .detach = h2_detach,
Olivier Houchard060ed432018-11-06 16:32:42 +01006903 .destroy = h2_destroy,
Olivier Houchardd540b362018-11-05 18:37:53 +01006904 .avail_streams = h2_avail_streams,
Willy Tarreau00f18a32019-01-26 12:19:01 +01006905 .used_streams = h2_used_streams,
Willy Tarreau62f52692017-10-08 23:01:42 +02006906 .shutr = h2_shutr,
6907 .shutw = h2_shutw,
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02006908 .ctl = h2_ctl,
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006909 .show_fd = h2_show_fd,
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006910 .takeover = h2_takeover,
Christopher Fauleta4600572021-03-08 15:28:28 +01006911 .flags = MX_FL_CLEAN_ABRT|MX_FL_HTX|MX_FL_HOL_RISK|MX_FL_NO_UPG,
Willy Tarreau62f52692017-10-08 23:01:42 +02006912 .name = "H2",
6913};
6914
Christopher Faulet32f61c02018-04-10 14:33:41 +02006915static struct mux_proto_list mux_proto_h2 =
Christopher Fauletc985f6c2019-07-15 11:42:52 +02006916 { .token = IST("h2"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BOTH, .mux = &h2_ops };
Willy Tarreau62f52692017-10-08 23:01:42 +02006917
Willy Tarreau0108d902018-11-25 19:14:37 +01006918INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_h2);
6919
Willy Tarreau62f52692017-10-08 23:01:42 +02006920/* config keyword parsers */
6921static struct cfg_kw_list cfg_kws = {ILH, {
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006922 { CFG_GLOBAL, "tune.h2.header-table-size", h2_parse_header_table_size },
Willy Tarreaue6baec02017-07-27 11:45:11 +02006923 { CFG_GLOBAL, "tune.h2.initial-window-size", h2_parse_initial_window_size },
Willy Tarreau5242ef82017-07-27 11:47:28 +02006924 { CFG_GLOBAL, "tune.h2.max-concurrent-streams", h2_parse_max_concurrent_streams },
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006925 { CFG_GLOBAL, "tune.h2.max-frame-size", h2_parse_max_frame_size },
Willy Tarreau62f52692017-10-08 23:01:42 +02006926 { 0, NULL, NULL }
6927}};
6928
Willy Tarreau0108d902018-11-25 19:14:37 +01006929INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
Willy Tarreau2bdcc702020-05-19 11:31:11 +02006930
6931/* initialize internal structs after the config is parsed.
6932 * Returns zero on success, non-zero on error.
6933 */
6934static int init_h2()
6935{
6936 pool_head_hpack_tbl = create_pool("hpack_tbl",
6937 h2_settings_header_table_size,
6938 MEM_F_SHARED|MEM_F_EXACT);
Christopher Faulet52140992020-11-06 15:23:39 +01006939 if (!pool_head_hpack_tbl) {
6940 ha_alert("failed to allocate hpack_tbl memory pool\n");
6941 return (ERR_ALERT | ERR_FATAL);
6942 }
6943 return ERR_NONE;
Willy Tarreau2bdcc702020-05-19 11:31:11 +02006944}
6945
6946REGISTER_POST_CHECK(init_h2);