blob: 15021eca1cf1e9898f8a03316a2b8c7cf896217c [file] [log] [blame]
Willy Tarreau62f52692017-10-08 23:01:42 +02001/*
2 * HTTP/2 mux-demux for connections
3 *
4 * Copyright 2017 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreaudfd3de82020-06-04 23:46:14 +020013#include <import/eb32tree.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020014#include <haproxy/api.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020015#include <haproxy/cfgparse.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020016#include <haproxy/connection.h>
Willy Tarreaubf073142020-06-03 12:04:01 +020017#include <haproxy/h2.h>
Willy Tarreaube327fa2020-06-03 09:09:57 +020018#include <haproxy/hpack-dec.h>
19#include <haproxy/hpack-enc.h>
20#include <haproxy/hpack-tbl.h>
Willy Tarreau87735332020-06-04 09:08:41 +020021#include <haproxy/http_htx.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020022#include <haproxy/htx.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020023#include <haproxy/istbuf.h>
Willy Tarreau36979d92020-06-05 17:27:29 +020024#include <haproxy/log.h>
Willy Tarreau6131d6a2020-06-02 16:48:09 +020025#include <haproxy/net_helper.h>
Willy Tarreau48d25b32020-06-04 18:58:52 +020026#include <haproxy/session-t.h>
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +010027#include <haproxy/stats.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020028#include <haproxy/stream.h>
Willy Tarreau5e539c92020-06-04 20:45:39 +020029#include <haproxy/stream_interface.h>
Willy Tarreauc6d61d72020-06-04 19:02:42 +020030#include <haproxy/trace.h>
Willy Tarreau62f52692017-10-08 23:01:42 +020031
32
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010033/* dummy streams returned for closed, error, refused, idle and states */
Willy Tarreau2a856182017-05-16 15:20:39 +020034static const struct h2s *h2_closed_stream;
Willy Tarreauecb9dcd2019-01-03 12:00:17 +010035static const struct h2s *h2_error_stream;
Willy Tarreau8d0d58b2018-12-23 18:29:12 +010036static const struct h2s *h2_refused_stream;
Willy Tarreau2a856182017-05-16 15:20:39 +020037static const struct h2s *h2_idle_stream;
38
Willy Tarreau5ab6b572017-09-22 08:05:00 +020039/* Connection flags (32 bit), in h2c->flags */
40#define H2_CF_NONE 0x00000000
41
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020042/* Flags indicating why writing to the mux is blocked. */
43#define H2_CF_MUX_MALLOC 0x00000001 // mux blocked on lack of connection's mux buffer
44#define H2_CF_MUX_MFULL 0x00000002 // mux blocked on connection's mux buffer full
45#define H2_CF_MUX_BLOCK_ANY 0x00000003 // aggregate of the mux flags above
46
Willy Tarreau315d8072017-12-10 22:17:57 +010047/* Flags indicating why writing to the demux is blocked.
48 * The first two ones directly affect the ability for the mux to receive data
49 * from the connection. The other ones affect the mux's ability to demux
50 * received data.
51 */
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020052#define H2_CF_DEM_DALLOC 0x00000004 // demux blocked on lack of connection's demux buffer
53#define H2_CF_DEM_DFULL 0x00000008 // demux blocked on connection's demux buffer full
Willy Tarreau315d8072017-12-10 22:17:57 +010054
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020055#define H2_CF_DEM_MBUSY 0x00000010 // demux blocked on connection's mux side busy
56#define H2_CF_DEM_MROOM 0x00000020 // demux blocked on lack of room in mux buffer
57#define H2_CF_DEM_SALLOC 0x00000040 // demux blocked on lack of stream's request buffer
58#define H2_CF_DEM_SFULL 0x00000080 // demux blocked on stream request buffer full
Willy Tarreauf2101912018-07-19 10:11:38 +020059#define H2_CF_DEM_TOOMANY 0x00000100 // demux blocked waiting for some conn_streams to leave
60#define H2_CF_DEM_BLOCK_ANY 0x000001F0 // aggregate of the demux flags above except DALLOC/DFULL
Christopher Fauleta9cc1e82021-07-26 12:06:53 +020061 // (SHORT_READ is also excluded)
62
Christopher Faulet484f10a2021-11-10 17:50:10 +010063#define H2_CF_DEM_SHORT_READ 0x00000200 // demux blocked on incomplete frame
Willy Tarreau6c3eeca2022-08-18 11:19:57 +020064#define H2_CF_DEM_IN_PROGRESS 0x00000400 // demux in progress (dsi,dfl,dft are valid)
Willy Tarreau2e5b60e2017-09-25 11:49:03 +020065
Willy Tarreau081d4722017-05-16 21:51:05 +020066/* other flags */
Willy Tarreauf2101912018-07-19 10:11:38 +020067#define H2_CF_GOAWAY_SENT 0x00001000 // a GOAWAY frame was successfully sent
68#define H2_CF_GOAWAY_FAILED 0x00002000 // a GOAWAY frame failed to be sent
69#define H2_CF_WAIT_FOR_HS 0x00004000 // We did check that at least a stream was waiting for handshake
Willy Tarreaub3fb56d2018-10-03 13:56:38 +020070#define H2_CF_IS_BACK 0x00008000 // this is an outgoing connection
Willy Tarreau3d4631f2021-01-20 10:53:13 +010071#define H2_CF_WINDOW_OPENED 0x00010000 // demux increased window already advertised
72#define H2_CF_RCVD_SHUT 0x00020000 // a recv() attempt already failed on a shutdown
73#define H2_CF_END_REACHED 0x00040000 // pending data too short with RCVD_SHUT present
Willy Tarreau081d4722017-05-16 21:51:05 +020074
Amaury Denoyelle68993a12021-10-18 09:43:29 +020075#define H2_CF_RCVD_RFC8441 0x00100000 // settings from RFC8441 has been received indicating support for Extended CONNECT
Willy Tarreau8a4dca02022-01-13 16:00:12 +010076#define H2_CF_SHTS_UPDATED 0x00200000 // SETTINGS_HEADER_TABLE_SIZE updated
77#define H2_CF_DTSU_EMITTED 0x00400000 // HPACK Dynamic Table Size Update opcode emitted
Amaury Denoyelle68993a12021-10-18 09:43:29 +020078
Willy Tarreau54f4c192023-10-17 08:25:19 +020079/* Note: changed value from 2.7 (0x00000010 there) */
80#define H2_CF_WAIT_INLIST 0x00800000 // there is at least one stream blocked by another stream in send_list/fctl_list
81
Willy Tarreau5ab6b572017-09-22 08:05:00 +020082/* H2 connection state, in h2c->st0 */
83enum h2_cs {
84 H2_CS_PREFACE, // init done, waiting for connection preface
85 H2_CS_SETTINGS1, // preface OK, waiting for first settings frame
86 H2_CS_FRAME_H, // first settings frame ok, waiting for frame header
87 H2_CS_FRAME_P, // frame header OK, waiting for frame payload
Willy Tarreaua20a5192017-12-27 11:02:06 +010088 H2_CS_FRAME_A, // frame payload OK, trying to send ACK frame
89 H2_CS_FRAME_E, // frame payload OK, trying to send RST frame
Willy Tarreau5ab6b572017-09-22 08:05:00 +020090 H2_CS_ERROR, // send GOAWAY(errcode) and close the connection ASAP
91 H2_CS_ERROR2, // GOAWAY(errcode) sent, close the connection ASAP
92 H2_CS_ENTRIES // must be last
93} __attribute__((packed));
94
Willy Tarreau51330962019-05-26 09:38:07 +020095
Willy Tarreau9c218e72019-05-26 10:08:28 +020096/* 32 buffers: one for the ring's root, rest for the mbuf itself */
97#define H2C_MBUF_CNT 32
Willy Tarreau51330962019-05-26 09:38:07 +020098
Willy Tarreau5ab6b572017-09-22 08:05:00 +020099/* H2 connection descriptor */
100struct h2c {
101 struct connection *conn;
102
103 enum h2_cs st0; /* mux state */
104 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
105
106 /* 16 bit hole here */
107 uint32_t flags; /* connection flags: H2_CF_* */
Willy Tarreau2e2083a2019-01-31 10:34:07 +0100108 uint32_t streams_limit; /* maximum number of concurrent streams the peer supports */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200109 int32_t max_id; /* highest ID known on this connection, <0 before preface */
110 uint32_t rcvd_c; /* newly received data to ACK for the connection */
Willy Tarreau1d7138e2022-06-08 16:32:22 +0200111 uint32_t rcvd_s; /* newly received data to ACK for the current stream (dsi) or zero */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200112
113 /* states for the demux direction */
114 struct hpack_dht *ddht; /* demux dynamic header table */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200115 struct buffer dbuf; /* demux buffer */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200116
117 int32_t dsi; /* demux stream ID (<0 = idle) */
118 int32_t dfl; /* demux frame length (if dsi >= 0) */
119 int8_t dft; /* demux frame type (if dsi >= 0) */
120 int8_t dff; /* demux frame flags (if dsi >= 0) */
Willy Tarreau05e5daf2017-12-11 15:17:36 +0100121 uint8_t dpl; /* demux pad length (part of dfl), init to 0 */
122 /* 8 bit hole here */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200123 int32_t last_sid; /* last processed stream ID for GOAWAY, <0 before preface */
124
125 /* states for the mux direction */
Willy Tarreau51330962019-05-26 09:38:07 +0200126 struct buffer mbuf[H2C_MBUF_CNT]; /* mux buffers (ring) */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200127 int32_t msi; /* mux stream ID (<0 = idle) */
128 int32_t mfl; /* mux frame length (if dsi >= 0) */
129 int8_t mft; /* mux frame type (if dsi >= 0) */
130 int8_t mff; /* mux frame flags (if dsi >= 0) */
131 /* 16 bit hole here */
132 int32_t miw; /* mux initial window size for all new streams */
133 int32_t mws; /* mux window size. Can be negative. */
134 int32_t mfs; /* mux's max frame size */
135
Willy Tarreauea392822017-10-31 10:02:25 +0100136 int timeout; /* idle timeout duration in ticks */
Willy Tarreau599391a2017-11-24 10:16:00 +0100137 int shut_timeout; /* idle timeout duration in ticks after GOAWAY was sent */
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +0100138 int idle_start; /* date of the last time the connection went idle */
139 /* 32-bit hole here */
Willy Tarreau49745612017-12-03 18:56:02 +0100140 unsigned int nb_streams; /* number of streams in the tree */
Willy Tarreau7ac60e82018-07-19 09:04:05 +0200141 unsigned int nb_cs; /* number of attached conn_streams */
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100142 unsigned int nb_reserved; /* number of reserved streams */
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100143 unsigned int stream_cnt; /* total number of streams seen */
Willy Tarreau0b37d652018-10-03 10:33:02 +0200144 struct proxy *proxy; /* the proxy this connection was created for */
Willy Tarreauea392822017-10-31 10:02:25 +0100145 struct task *task; /* timeout management task */
Amaury Denoyellec92697d2020-10-27 17:16:01 +0100146 struct h2_counters *px_counters; /* h2 counters attached to proxy */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200147 struct eb_root streams_by_id; /* all active streams by their ID */
148 struct list send_list; /* list of blocked streams requesting to send */
149 struct list fctl_list; /* list of streams blocked by connection's fctl */
Willy Tarreau9edf6db2019-10-02 10:49:59 +0200150 struct list blocked_list; /* list of streams blocked for other reasons (e.g. sfctl, dep) */
Willy Tarreau44e973f2018-03-01 17:49:30 +0100151 struct buffer_wait buf_wait; /* wait list for buffer allocations */
Olivier Houchardfa8aa862018-10-10 18:25:41 +0200152 struct wait_event wait_event; /* To be used if we're waiting for I/Os */
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200153};
154
Willy Tarreau18312642017-10-11 07:57:07 +0200155/* H2 stream state, in h2s->st */
156enum h2_ss {
157 H2_SS_IDLE = 0, // idle
158 H2_SS_RLOC, // reserved(local)
159 H2_SS_RREM, // reserved(remote)
160 H2_SS_OPEN, // open
161 H2_SS_HREM, // half-closed(remote)
162 H2_SS_HLOC, // half-closed(local)
Willy Tarreau96060ba2017-10-16 18:34:34 +0200163 H2_SS_ERROR, // an error needs to be sent using RST_STREAM
Willy Tarreau18312642017-10-11 07:57:07 +0200164 H2_SS_CLOSED, // closed
165 H2_SS_ENTRIES // must be last
166} __attribute__((packed));
167
Willy Tarreau4c688eb2019-05-14 11:44:03 +0200168#define H2_SS_MASK(state) (1UL << (state))
169#define H2_SS_IDLE_BIT (1UL << H2_SS_IDLE)
170#define H2_SS_RLOC_BIT (1UL << H2_SS_RLOC)
171#define H2_SS_RREM_BIT (1UL << H2_SS_RREM)
172#define H2_SS_OPEN_BIT (1UL << H2_SS_OPEN)
173#define H2_SS_HREM_BIT (1UL << H2_SS_HREM)
174#define H2_SS_HLOC_BIT (1UL << H2_SS_HLOC)
175#define H2_SS_ERROR_BIT (1UL << H2_SS_ERROR)
176#define H2_SS_CLOSED_BIT (1UL << H2_SS_CLOSED)
Willy Tarreau4c688eb2019-05-14 11:44:03 +0200177
Willy Tarreau18312642017-10-11 07:57:07 +0200178/* HTTP/2 stream flags (32 bit), in h2s->flags */
179#define H2_SF_NONE 0x00000000
180#define H2_SF_ES_RCVD 0x00000001
181#define H2_SF_ES_SENT 0x00000002
182
183#define H2_SF_RST_RCVD 0x00000004 // received RST_STREAM
184#define H2_SF_RST_SENT 0x00000008 // sent RST_STREAM
185
Willy Tarreau2e5b60e2017-09-25 11:49:03 +0200186/* stream flags indicating the reason the stream is blocked */
187#define H2_SF_BLK_MBUSY 0x00000010 // blocked waiting for mux access (transient)
Willy Tarreau9edf6db2019-10-02 10:49:59 +0200188#define H2_SF_BLK_MROOM 0x00000020 // blocked waiting for room in the mux (must be in send list)
189#define H2_SF_BLK_MFCTL 0x00000040 // blocked due to mux fctl (must be in fctl list)
190#define H2_SF_BLK_SFCTL 0x00000080 // blocked due to stream fctl (must be in blocked list)
Willy Tarreau2e5b60e2017-09-25 11:49:03 +0200191#define H2_SF_BLK_ANY 0x000000F0 // any of the reasons above
192
Willy Tarreau454f9052017-10-26 19:40:35 +0200193/* stream flags indicating how data is supposed to be sent */
194#define H2_SF_DATA_CLEN 0x00000100 // data sent using content-length
Christopher Faulet7d247f02020-12-02 14:26:36 +0100195#define H2_SF_BODYLESS_RESP 0x00000200 /* Bodyless response message */
Christopher Fauletd0db4232021-01-22 11:46:30 +0100196#define H2_SF_BODY_TUNNEL 0x00000400 // Attempt to establish a Tunnelled stream (the result depends on the status code)
197
Willy Tarreau454f9052017-10-26 19:40:35 +0200198
Willy Tarreaud9464162020-01-10 18:25:07 +0100199#define H2_SF_NOTIFIED 0x00000800 // a paused stream was notified to try to send again
Willy Tarreau67434202017-11-06 20:20:51 +0100200#define H2_SF_HEADERS_SENT 0x00001000 // a HEADERS frame was sent for this stream
Willy Tarreauc4312d32017-11-07 12:01:53 +0100201#define H2_SF_OUTGOING_DATA 0x00002000 // set whenever we've seen outgoing data
Willy Tarreau67434202017-11-06 20:20:51 +0100202
Willy Tarreau6cc85a52019-01-02 15:49:20 +0100203#define H2_SF_HEADERS_RCVD 0x00004000 // a HEADERS frame was received for this stream
204
Willy Tarreau2c249eb2019-05-13 18:06:17 +0200205#define H2_SF_WANT_SHUTR 0x00008000 // a stream couldn't shutr() (mux full/busy)
206#define H2_SF_WANT_SHUTW 0x00010000 // a stream couldn't shutw() (mux full/busy)
Willy Tarreau3cf69fe2019-05-14 10:44:40 +0200207#define H2_SF_KILL_CONN 0x00020000 // kill the whole connection with this stream
Willy Tarreau2c249eb2019-05-13 18:06:17 +0200208
Amaury Denoyelle5fb48ea2020-12-11 17:53:04 +0100209#define H2_SF_EXT_CONNECT_SENT 0x00040000 // rfc 8441 an Extended CONNECT has been sent
Amaury Denoyelleefe22762020-12-11 17:53:08 +0100210#define H2_SF_EXT_CONNECT_RCVD 0x00080000 // rfc 8441 an Extended CONNECT has been received and parsed
Christopher Fauletd0db4232021-01-22 11:46:30 +0100211
Amaury Denoyelle5fb48ea2020-12-11 17:53:04 +0100212#define H2_SF_TUNNEL_ABRT 0x00100000 // A tunnel attempt was aborted
Willy Tarreau2c249eb2019-05-13 18:06:17 +0200213
Willy Tarreau18312642017-10-11 07:57:07 +0200214/* H2 stream descriptor, describing the stream as it appears in the H2C, and as
Christopher Fauletfafd1b02020-11-03 18:25:52 +0100215 * it is being processed in the internal HTTP representation (HTX).
Willy Tarreau18312642017-10-11 07:57:07 +0200216 */
217struct h2s {
218 struct conn_stream *cs;
Olivier Houchardf502aca2018-12-14 19:42:40 +0100219 struct session *sess;
Willy Tarreau18312642017-10-11 07:57:07 +0200220 struct h2c *h2c;
Willy Tarreau18312642017-10-11 07:57:07 +0200221 struct eb32_node by_id; /* place in h2c's streams_by_id */
Willy Tarreau18312642017-10-11 07:57:07 +0200222 int32_t id; /* stream ID */
223 uint32_t flags; /* H2_SF_* */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +0200224 int sws; /* stream window size, to be added to the mux's initial window size */
Willy Tarreau18312642017-10-11 07:57:07 +0200225 enum h2_err errcode; /* H2 err code (H2_ERR_*) */
226 enum h2_ss st;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +0200227 uint16_t status; /* HTTP response status */
Willy Tarreau1915ca22019-01-24 11:49:37 +0100228 unsigned long long body_len; /* remaining body length according to content-length if H2_SF_DATA_CLEN */
Olivier Houchard638b7992018-08-16 15:41:52 +0200229 struct buffer rxbuf; /* receive buffer, always valid (buf_empty or real buffer) */
Willy Tarreauf96508a2020-01-10 11:12:48 +0100230 struct wait_event *subs; /* recv wait_event the conn_stream associated is waiting on (via h2_subscribe) */
Olivier Houchardfa8aa862018-10-10 18:25:41 +0200231 struct list list; /* To be used when adding in h2c->send_list or h2c->fctl_lsit */
Willy Tarreau5723f292020-01-10 15:16:57 +0100232 struct tasklet *shut_tl; /* deferred shutdown tasklet, to retry to send an RST after we failed to,
233 * in case there's no other subscription to do it */
Amaury Denoyelle74162742020-12-11 17:53:05 +0100234
235 char upgrade_protocol[16]; /* rfc 8441: requested protocol on Extended CONNECT */
Willy Tarreau18312642017-10-11 07:57:07 +0200236};
Willy Tarreau5ab6b572017-09-22 08:05:00 +0200237
Willy Tarreauc6405142017-09-21 20:23:50 +0200238/* descriptor for an h2 frame header */
239struct h2_fh {
240 uint32_t len; /* length, host order, 24 bits */
241 uint32_t sid; /* stream id, host order, 31 bits */
242 uint8_t ft; /* frame type */
243 uint8_t ff; /* frame flags */
244};
245
Willy Tarreau12ae2122019-08-08 18:23:12 +0200246/* trace source and events */
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200247static void h2_trace(enum trace_level level, uint64_t mask, \
248 const struct trace_source *src,
249 const struct ist where, const struct ist func,
250 const void *a1, const void *a2, const void *a3, const void *a4);
Willy Tarreau12ae2122019-08-08 18:23:12 +0200251
252/* The event representation is split like this :
253 * strm - application layer
254 * h2s - internal H2 stream
255 * h2c - internal H2 connection
256 * conn - external connection
257 *
258 */
259static const struct trace_event h2_trace_events[] = {
260#define H2_EV_H2C_NEW (1ULL << 0)
Willy Tarreau87951942019-08-30 07:34:36 +0200261 { .mask = H2_EV_H2C_NEW, .name = "h2c_new", .desc = "new H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200262#define H2_EV_H2C_RECV (1ULL << 1)
Willy Tarreau87951942019-08-30 07:34:36 +0200263 { .mask = H2_EV_H2C_RECV, .name = "h2c_recv", .desc = "Rx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200264#define H2_EV_H2C_SEND (1ULL << 2)
Willy Tarreau87951942019-08-30 07:34:36 +0200265 { .mask = H2_EV_H2C_SEND, .name = "h2c_send", .desc = "Tx on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200266#define H2_EV_H2C_FCTL (1ULL << 3)
Willy Tarreau87951942019-08-30 07:34:36 +0200267 { .mask = H2_EV_H2C_FCTL, .name = "h2c_fctl", .desc = "H2 connection flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200268#define H2_EV_H2C_BLK (1ULL << 4)
Willy Tarreau87951942019-08-30 07:34:36 +0200269 { .mask = H2_EV_H2C_BLK, .name = "h2c_blk", .desc = "H2 connection blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200270#define H2_EV_H2C_WAKE (1ULL << 5)
Willy Tarreau87951942019-08-30 07:34:36 +0200271 { .mask = H2_EV_H2C_WAKE, .name = "h2c_wake", .desc = "H2 connection woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200272#define H2_EV_H2C_END (1ULL << 6)
Willy Tarreau87951942019-08-30 07:34:36 +0200273 { .mask = H2_EV_H2C_END, .name = "h2c_end", .desc = "H2 connection terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200274#define H2_EV_H2C_ERR (1ULL << 7)
Willy Tarreau87951942019-08-30 07:34:36 +0200275 { .mask = H2_EV_H2C_ERR, .name = "h2c_err", .desc = "error on H2 connection" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200276#define H2_EV_RX_FHDR (1ULL << 8)
Willy Tarreau87951942019-08-30 07:34:36 +0200277 { .mask = H2_EV_RX_FHDR, .name = "rx_fhdr", .desc = "H2 frame header received" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200278#define H2_EV_RX_FRAME (1ULL << 9)
Willy Tarreau87951942019-08-30 07:34:36 +0200279 { .mask = H2_EV_RX_FRAME, .name = "rx_frame", .desc = "receipt of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200280#define H2_EV_RX_EOI (1ULL << 10)
Willy Tarreau87951942019-08-30 07:34:36 +0200281 { .mask = H2_EV_RX_EOI, .name = "rx_eoi", .desc = "receipt of end of H2 input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200282#define H2_EV_RX_PREFACE (1ULL << 11)
Willy Tarreau87951942019-08-30 07:34:36 +0200283 { .mask = H2_EV_RX_PREFACE, .name = "rx_preface", .desc = "receipt of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200284#define H2_EV_RX_DATA (1ULL << 12)
Willy Tarreau87951942019-08-30 07:34:36 +0200285 { .mask = H2_EV_RX_DATA, .name = "rx_data", .desc = "receipt of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200286#define H2_EV_RX_HDR (1ULL << 13)
Willy Tarreau87951942019-08-30 07:34:36 +0200287 { .mask = H2_EV_RX_HDR, .name = "rx_hdr", .desc = "receipt of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200288#define H2_EV_RX_PRIO (1ULL << 14)
Willy Tarreau87951942019-08-30 07:34:36 +0200289 { .mask = H2_EV_RX_PRIO, .name = "rx_prio", .desc = "receipt of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200290#define H2_EV_RX_RST (1ULL << 15)
Willy Tarreau87951942019-08-30 07:34:36 +0200291 { .mask = H2_EV_RX_RST, .name = "rx_rst", .desc = "receipt of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200292#define H2_EV_RX_SETTINGS (1ULL << 16)
Willy Tarreau87951942019-08-30 07:34:36 +0200293 { .mask = H2_EV_RX_SETTINGS, .name = "rx_settings", .desc = "receipt of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200294#define H2_EV_RX_PUSH (1ULL << 17)
Willy Tarreau87951942019-08-30 07:34:36 +0200295 { .mask = H2_EV_RX_PUSH, .name = "rx_push", .desc = "receipt of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200296#define H2_EV_RX_PING (1ULL << 18)
Willy Tarreau87951942019-08-30 07:34:36 +0200297 { .mask = H2_EV_RX_PING, .name = "rx_ping", .desc = "receipt of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200298#define H2_EV_RX_GOAWAY (1ULL << 19)
Willy Tarreau87951942019-08-30 07:34:36 +0200299 { .mask = H2_EV_RX_GOAWAY, .name = "rx_goaway", .desc = "receipt of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200300#define H2_EV_RX_WU (1ULL << 20)
Willy Tarreau87951942019-08-30 07:34:36 +0200301 { .mask = H2_EV_RX_WU, .name = "rx_wu", .desc = "receipt of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200302#define H2_EV_RX_CONT (1ULL << 21)
Willy Tarreau87951942019-08-30 07:34:36 +0200303 { .mask = H2_EV_RX_CONT, .name = "rx_cont", .desc = "receipt of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200304#define H2_EV_TX_FRAME (1ULL << 22)
Willy Tarreau87951942019-08-30 07:34:36 +0200305 { .mask = H2_EV_TX_FRAME, .name = "tx_frame", .desc = "transmission of any H2 frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200306#define H2_EV_TX_EOI (1ULL << 23)
Willy Tarreau87951942019-08-30 07:34:36 +0200307 { .mask = H2_EV_TX_EOI, .name = "tx_eoi", .desc = "transmission of H2 end of input (ES or RST)" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200308#define H2_EV_TX_PREFACE (1ULL << 24)
Willy Tarreau87951942019-08-30 07:34:36 +0200309 { .mask = H2_EV_TX_PREFACE, .name = "tx_preface", .desc = "transmission of H2 preface" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200310#define H2_EV_TX_DATA (1ULL << 25)
Willy Tarreau87951942019-08-30 07:34:36 +0200311 { .mask = H2_EV_TX_DATA, .name = "tx_data", .desc = "transmission of H2 DATA frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200312#define H2_EV_TX_HDR (1ULL << 26)
Willy Tarreau87951942019-08-30 07:34:36 +0200313 { .mask = H2_EV_TX_HDR, .name = "tx_hdr", .desc = "transmission of H2 HEADERS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200314#define H2_EV_TX_PRIO (1ULL << 27)
Willy Tarreau87951942019-08-30 07:34:36 +0200315 { .mask = H2_EV_TX_PRIO, .name = "tx_prio", .desc = "transmission of H2 PRIORITY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200316#define H2_EV_TX_RST (1ULL << 28)
Willy Tarreau87951942019-08-30 07:34:36 +0200317 { .mask = H2_EV_TX_RST, .name = "tx_rst", .desc = "transmission of H2 RST_STREAM frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200318#define H2_EV_TX_SETTINGS (1ULL << 29)
Willy Tarreau87951942019-08-30 07:34:36 +0200319 { .mask = H2_EV_TX_SETTINGS, .name = "tx_settings", .desc = "transmission of H2 SETTINGS frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200320#define H2_EV_TX_PUSH (1ULL << 30)
Willy Tarreau87951942019-08-30 07:34:36 +0200321 { .mask = H2_EV_TX_PUSH, .name = "tx_push", .desc = "transmission of H2 PUSH_PROMISE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200322#define H2_EV_TX_PING (1ULL << 31)
Willy Tarreau87951942019-08-30 07:34:36 +0200323 { .mask = H2_EV_TX_PING, .name = "tx_ping", .desc = "transmission of H2 PING frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200324#define H2_EV_TX_GOAWAY (1ULL << 32)
Willy Tarreau87951942019-08-30 07:34:36 +0200325 { .mask = H2_EV_TX_GOAWAY, .name = "tx_goaway", .desc = "transmission of H2 GOAWAY frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200326#define H2_EV_TX_WU (1ULL << 33)
Willy Tarreau87951942019-08-30 07:34:36 +0200327 { .mask = H2_EV_TX_WU, .name = "tx_wu", .desc = "transmission of H2 WINDOW_UPDATE frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200328#define H2_EV_TX_CONT (1ULL << 34)
Willy Tarreau87951942019-08-30 07:34:36 +0200329 { .mask = H2_EV_TX_CONT, .name = "tx_cont", .desc = "transmission of H2 CONTINUATION frame" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200330#define H2_EV_H2S_NEW (1ULL << 35)
Willy Tarreau87951942019-08-30 07:34:36 +0200331 { .mask = H2_EV_H2S_NEW, .name = "h2s_new", .desc = "new H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200332#define H2_EV_H2S_RECV (1ULL << 36)
Willy Tarreau87951942019-08-30 07:34:36 +0200333 { .mask = H2_EV_H2S_RECV, .name = "h2s_recv", .desc = "Rx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200334#define H2_EV_H2S_SEND (1ULL << 37)
Willy Tarreau87951942019-08-30 07:34:36 +0200335 { .mask = H2_EV_H2S_SEND, .name = "h2s_send", .desc = "Tx for H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200336#define H2_EV_H2S_FCTL (1ULL << 38)
Willy Tarreau87951942019-08-30 07:34:36 +0200337 { .mask = H2_EV_H2S_FCTL, .name = "h2s_fctl", .desc = "H2 stream flow-controlled" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200338#define H2_EV_H2S_BLK (1ULL << 39)
Willy Tarreau87951942019-08-30 07:34:36 +0200339 { .mask = H2_EV_H2S_BLK, .name = "h2s_blk", .desc = "H2 stream blocked" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200340#define H2_EV_H2S_WAKE (1ULL << 40)
Willy Tarreau87951942019-08-30 07:34:36 +0200341 { .mask = H2_EV_H2S_WAKE, .name = "h2s_wake", .desc = "H2 stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200342#define H2_EV_H2S_END (1ULL << 41)
Willy Tarreau87951942019-08-30 07:34:36 +0200343 { .mask = H2_EV_H2S_END, .name = "h2s_end", .desc = "H2 stream terminated" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200344#define H2_EV_H2S_ERR (1ULL << 42)
Willy Tarreau87951942019-08-30 07:34:36 +0200345 { .mask = H2_EV_H2S_ERR, .name = "h2s_err", .desc = "error on H2 stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200346#define H2_EV_STRM_NEW (1ULL << 43)
Willy Tarreau87951942019-08-30 07:34:36 +0200347 { .mask = H2_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200348#define H2_EV_STRM_RECV (1ULL << 44)
Willy Tarreau87951942019-08-30 07:34:36 +0200349 { .mask = H2_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200350#define H2_EV_STRM_SEND (1ULL << 45)
Willy Tarreau87951942019-08-30 07:34:36 +0200351 { .mask = H2_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200352#define H2_EV_STRM_FULL (1ULL << 46)
Willy Tarreau87951942019-08-30 07:34:36 +0200353 { .mask = H2_EV_STRM_FULL, .name = "strm_full", .desc = "stream buffer full" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200354#define H2_EV_STRM_WAKE (1ULL << 47)
Willy Tarreau87951942019-08-30 07:34:36 +0200355 { .mask = H2_EV_STRM_WAKE, .name = "strm_wake", .desc = "stream woken up" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200356#define H2_EV_STRM_SHUT (1ULL << 48)
Willy Tarreau87951942019-08-30 07:34:36 +0200357 { .mask = H2_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200358#define H2_EV_STRM_END (1ULL << 49)
Willy Tarreau87951942019-08-30 07:34:36 +0200359 { .mask = H2_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200360#define H2_EV_STRM_ERR (1ULL << 50)
Willy Tarreau87951942019-08-30 07:34:36 +0200361 { .mask = H2_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200362#define H2_EV_PROTO_ERR (1ULL << 51)
Willy Tarreau87951942019-08-30 07:34:36 +0200363 { .mask = H2_EV_PROTO_ERR, .name = "proto_err", .desc = "protocol error" },
Willy Tarreau12ae2122019-08-08 18:23:12 +0200364 { }
365};
366
367static const struct name_desc h2_trace_lockon_args[4] = {
368 /* arg1 */ { /* already used by the connection */ },
369 /* arg2 */ { .name="h2s", .desc="H2 stream" },
370 /* arg3 */ { },
371 /* arg4 */ { }
372};
373
374static const struct name_desc h2_trace_decoding[] = {
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200375#define H2_VERB_CLEAN 1
376 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
377#define H2_VERB_MINIMAL 2
Willy Tarreau12ae2122019-08-08 18:23:12 +0200378 { .name="minimal", .desc="report only h2c/h2s state and flags, no real decoding" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200379#define H2_VERB_SIMPLE 3
Willy Tarreau12ae2122019-08-08 18:23:12 +0200380 { .name="simple", .desc="add request/response status line or frame info when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200381#define H2_VERB_ADVANCED 4
Willy Tarreau12ae2122019-08-08 18:23:12 +0200382 { .name="advanced", .desc="add header fields or frame decoding when available" },
Willy Tarreauf7dd5192019-08-30 07:21:18 +0200383#define H2_VERB_COMPLETE 5
Willy Tarreau12ae2122019-08-08 18:23:12 +0200384 { .name="complete", .desc="add full data dump when available" },
385 { /* end */ }
386};
387
Willy Tarreau6eb3d372021-04-10 19:29:26 +0200388static struct trace_source trace_h2 __read_mostly = {
Willy Tarreau12ae2122019-08-08 18:23:12 +0200389 .name = IST("h2"),
390 .desc = "HTTP/2 multiplexer",
391 .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200392 .default_cb = h2_trace,
Willy Tarreau12ae2122019-08-08 18:23:12 +0200393 .known_events = h2_trace_events,
394 .lockon_args = h2_trace_lockon_args,
395 .decoding = h2_trace_decoding,
396 .report_events = ~0, // report everything by default
397};
398
399#define TRACE_SOURCE &trace_h2
400INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
401
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100402/* h2 stats module */
403enum {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100404 H2_ST_HEADERS_RCVD,
405 H2_ST_DATA_RCVD,
406 H2_ST_SETTINGS_RCVD,
407 H2_ST_RST_STREAM_RCVD,
408 H2_ST_GOAWAY_RCVD,
409
Amaury Denoyellea8879232020-10-27 17:16:03 +0100410 H2_ST_CONN_PROTO_ERR,
411 H2_ST_STRM_PROTO_ERR,
412 H2_ST_RST_STREAM_RESP,
413 H2_ST_GOAWAY_RESP,
414
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100415 H2_ST_OPEN_CONN,
416 H2_ST_OPEN_STREAM,
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100417 H2_ST_TOTAL_CONN,
418 H2_ST_TOTAL_STREAM,
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100419
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100420 H2_STATS_COUNT /* must be the last member of the enum */
421};
422
423static struct name_desc h2_stats[] = {
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100424 [H2_ST_HEADERS_RCVD] = { .name = "h2_headers_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100425 .desc = "Total number of received HEADERS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100426 [H2_ST_DATA_RCVD] = { .name = "h2_data_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100427 .desc = "Total number of received DATA frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100428 [H2_ST_SETTINGS_RCVD] = { .name = "h2_settings_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100429 .desc = "Total number of received SETTINGS frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100430 [H2_ST_RST_STREAM_RCVD] = { .name = "h2_rst_stream_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100431 .desc = "Total number of received RST_STREAM frames" },
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100432 [H2_ST_GOAWAY_RCVD] = { .name = "h2_goaway_rcvd",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100433 .desc = "Total number of received GOAWAY frames" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100434
435 [H2_ST_CONN_PROTO_ERR] = { .name = "h2_detected_conn_protocol_errors",
436 .desc = "Total number of connection protocol errors" },
437 [H2_ST_STRM_PROTO_ERR] = { .name = "h2_detected_strm_protocol_errors",
438 .desc = "Total number of stream protocol errors" },
439 [H2_ST_RST_STREAM_RESP] = { .name = "h2_rst_stream_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100440 .desc = "Total number of RST_STREAM sent on detected error" },
Amaury Denoyellea8879232020-10-27 17:16:03 +0100441 [H2_ST_GOAWAY_RESP] = { .name = "h2_goaway_resp",
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100442 .desc = "Total number of GOAWAY sent on detected error" },
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100443
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100444 [H2_ST_OPEN_CONN] = { .name = "h2_open_connections",
445 .desc = "Count of currently open connections" },
446 [H2_ST_OPEN_STREAM] = { .name = "h2_backend_open_streams",
447 .desc = "Count of currently open streams" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100448 [H2_ST_TOTAL_CONN] = { .name = "h2_total_connections",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100449 .desc = "Total number of connections" },
Amaury Denoyelle377d8782021-02-03 16:27:22 +0100450 [H2_ST_TOTAL_STREAM] = { .name = "h2_backend_total_streams",
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100451 .desc = "Total number of streams" },
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100452};
453
454static struct h2_counters {
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100455 long long headers_rcvd; /* total number of HEADERS frame received */
456 long long data_rcvd; /* total number of DATA frame received */
457 long long settings_rcvd; /* total number of SETTINGS frame received */
458 long long rst_stream_rcvd; /* total number of RST_STREAM frame received */
459 long long goaway_rcvd; /* total number of GOAWAY frame received */
Amaury Denoyellea8879232020-10-27 17:16:03 +0100460
461 long long conn_proto_err; /* total number of protocol errors detected */
462 long long strm_proto_err; /* total number of protocol errors detected */
Amaury Denoyelle2ac34d92020-11-03 15:04:44 +0100463 long long rst_stream_resp; /* total number of RST_STREAM frame sent on error */
464 long long goaway_resp; /* total number of GOAWAY frame sent on error */
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100465
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100466 long long open_conns; /* count of currently open connections */
467 long long open_streams; /* count of currently open streams */
468 long long total_conns; /* total number of connections */
469 long long total_streams; /* total number of streams */
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100470} h2_counters;
471
472static void h2_fill_stats(void *data, struct field *stats)
473{
Amaury Denoyelle2dec1eb2020-10-27 17:16:02 +0100474 struct h2_counters *counters = data;
475
476 stats[H2_ST_HEADERS_RCVD] = mkf_u64(FN_COUNTER, counters->headers_rcvd);
477 stats[H2_ST_DATA_RCVD] = mkf_u64(FN_COUNTER, counters->data_rcvd);
478 stats[H2_ST_SETTINGS_RCVD] = mkf_u64(FN_COUNTER, counters->settings_rcvd);
479 stats[H2_ST_RST_STREAM_RCVD] = mkf_u64(FN_COUNTER, counters->rst_stream_rcvd);
480 stats[H2_ST_GOAWAY_RCVD] = mkf_u64(FN_COUNTER, counters->goaway_rcvd);
Amaury Denoyellea8879232020-10-27 17:16:03 +0100481
482 stats[H2_ST_CONN_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->conn_proto_err);
483 stats[H2_ST_STRM_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->strm_proto_err);
484 stats[H2_ST_RST_STREAM_RESP] = mkf_u64(FN_COUNTER, counters->rst_stream_resp);
485 stats[H2_ST_GOAWAY_RESP] = mkf_u64(FN_COUNTER, counters->goaway_resp);
Amaury Denoyelle66942c12020-10-27 17:16:04 +0100486
Amaury Denoyellee7b891f2020-11-03 15:04:45 +0100487 stats[H2_ST_OPEN_CONN] = mkf_u64(FN_GAUGE, counters->open_conns);
488 stats[H2_ST_OPEN_STREAM] = mkf_u64(FN_GAUGE, counters->open_streams);
489 stats[H2_ST_TOTAL_CONN] = mkf_u64(FN_COUNTER, counters->total_conns);
490 stats[H2_ST_TOTAL_STREAM] = mkf_u64(FN_COUNTER, counters->total_streams);
Amaury Denoyelle3238b3f2020-10-27 17:16:00 +0100491}
492
493static struct stats_module h2_stats_module = {
494 .name = "h2",
495 .fill_stats = h2_fill_stats,
496 .stats = h2_stats,
497 .stats_count = H2_STATS_COUNT,
498 .counters = &h2_counters,
499 .counters_size = sizeof(h2_counters),
500 .domain_flags = MK_STATS_PROXY_DOMAIN(STATS_PX_CAP_FE|STATS_PX_CAP_BE),
501 .clearable = 1,
502};
503
504INITCALL1(STG_REGISTER, stats_register_module, &h2_stats_module);
505
Willy Tarreau8ceae722018-11-26 11:58:30 +0100506/* the h2c connection pool */
507DECLARE_STATIC_POOL(pool_head_h2c, "h2c", sizeof(struct h2c));
508
509/* the h2s stream pool */
510DECLARE_STATIC_POOL(pool_head_h2s, "h2s", sizeof(struct h2s));
511
Willy Tarreaudc572362018-12-12 08:08:05 +0100512/* The default connection window size is 65535, it may only be enlarged using
513 * a WINDOW_UPDATE message. Since the window must never be larger than 2G-1,
514 * we'll pretend we already received the difference between the two to send
515 * an equivalent window update to enlarge it to 2G-1.
516 */
517#define H2_INITIAL_WINDOW_INCREMENT ((1U<<31)-1 - 65535)
518
Willy Tarreau455d5682019-05-24 19:42:18 +0200519/* maximum amount of data we're OK with re-aligning for buffer optimizations */
520#define MAX_DATA_REALIGN 1024
521
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200522/* a few settings from the global section */
523static int h2_settings_header_table_size = 4096; /* initial value */
Willy Tarreaue6baec02017-07-27 11:45:11 +0200524static int h2_settings_initial_window_size = 65535; /* initial value */
Willy Tarreau5a490b62019-01-31 10:39:51 +0100525static unsigned int h2_settings_max_concurrent_streams = 100;
Willy Tarreaua24b35c2019-02-21 13:24:36 +0100526static int h2_settings_max_frame_size = 0; /* unset */
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200527
Willy Tarreau2a856182017-05-16 15:20:39 +0200528/* a dmumy closed stream */
529static const struct h2s *h2_closed_stream = &(const struct h2s){
530 .cs = NULL,
531 .h2c = NULL,
532 .st = H2_SS_CLOSED,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100533 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreauab837502017-12-27 15:07:30 +0100534 .flags = H2_SF_RST_RCVD,
Willy Tarreau2a856182017-05-16 15:20:39 +0200535 .id = 0,
536};
537
Willy Tarreauecb9dcd2019-01-03 12:00:17 +0100538/* a dmumy closed stream returning a PROTOCOL_ERROR error */
539static const struct h2s *h2_error_stream = &(const struct h2s){
540 .cs = NULL,
541 .h2c = NULL,
542 .st = H2_SS_CLOSED,
543 .errcode = H2_ERR_PROTOCOL_ERROR,
544 .flags = 0,
545 .id = 0,
546};
547
Willy Tarreau8d0d58b2018-12-23 18:29:12 +0100548/* a dmumy closed stream returning a REFUSED_STREAM error */
549static const struct h2s *h2_refused_stream = &(const struct h2s){
550 .cs = NULL,
551 .h2c = NULL,
552 .st = H2_SS_CLOSED,
553 .errcode = H2_ERR_REFUSED_STREAM,
554 .flags = 0,
555 .id = 0,
556};
557
Willy Tarreau2a856182017-05-16 15:20:39 +0200558/* and a dummy idle stream for use with any unannounced stream */
559static const struct h2s *h2_idle_stream = &(const struct h2s){
560 .cs = NULL,
561 .h2c = NULL,
562 .st = H2_SS_IDLE,
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +0100563 .errcode = H2_ERR_STREAM_CLOSED,
Willy Tarreau2a856182017-05-16 15:20:39 +0200564 .id = 0,
565};
566
Willy Tarreau54f4c192023-10-17 08:25:19 +0200567
Willy Tarreau144f84a2021-03-02 16:09:26 +0100568struct task *h2_timeout_task(struct task *t, void *context, unsigned int state);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +0200569static int h2_send(struct h2c *h2c);
570static int h2_recv(struct h2c *h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +0200571static int h2_process(struct h2c *h2c);
Willy Tarreau691d5032021-01-20 14:55:01 +0100572/* h2_io_cb is exported to see it resolved in "show fd" */
Willy Tarreau144f84a2021-03-02 16:09:26 +0100573struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state);
Willy Tarreau0b559072018-02-26 15:22:17 +0100574static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id);
Amaury Denoyelle74162742020-12-11 17:53:05 +0100575static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol);
Willy Tarreaua56a6de2018-02-26 15:59:07 +0100576static int h2_frt_transfer_data(struct h2s *h2s);
Willy Tarreau144f84a2021-03-02 16:09:26 +0100577struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state);
Olivier Houchardf502aca2018-12-14 19:42:40 +0100578static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct conn_stream *cs, struct session *sess);
Willy Tarreau8b2757c2018-12-19 17:36:48 +0100579static void h2s_alert(struct h2s *h2s);
Willy Tarreau54f4c192023-10-17 08:25:19 +0200580static inline void h2_remove_from_list(struct h2s *h2s);
Willy Tarreaufe20e5b2017-07-27 11:42:14 +0200581
Willy Tarreauab2ec452019-08-30 07:07:08 +0200582/* returns a h2c state as an abbreviated 3-letter string, or "???" if unknown */
583static inline const char *h2c_st_to_str(enum h2_cs st)
584{
585 switch (st) {
586 case H2_CS_PREFACE: return "PRF";
587 case H2_CS_SETTINGS1: return "STG";
588 case H2_CS_FRAME_H: return "FRH";
589 case H2_CS_FRAME_P: return "FRP";
590 case H2_CS_FRAME_A: return "FRA";
591 case H2_CS_FRAME_E: return "FRE";
592 case H2_CS_ERROR: return "ERR";
593 case H2_CS_ERROR2: return "ER2";
594 default: return "???";
595 }
596}
597
598/* returns a h2s state as an abbreviated 3-letter string, or "???" if unknown */
599static inline const char *h2s_st_to_str(enum h2_ss st)
600{
601 switch (st) {
602 case H2_SS_IDLE: return "IDL"; // idle
603 case H2_SS_RLOC: return "RSL"; // reserved local
604 case H2_SS_RREM: return "RSR"; // reserved remote
605 case H2_SS_OPEN: return "OPN"; // open
606 case H2_SS_HREM: return "HCR"; // half-closed remote
607 case H2_SS_HLOC: return "HCL"; // half-closed local
608 case H2_SS_ERROR : return "ERR"; // error
609 case H2_SS_CLOSED: return "CLO"; // closed
610 default: return "???";
611 }
612}
613
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200614/* the H2 traces always expect that arg1, if non-null, is of type connection
615 * (from which we can derive h2c), that arg2, if non-null, is of type h2s, and
616 * that arg3, if non-null, is either of type htx for tx headers, or of type
617 * buffer for everything else.
618 */
619static void h2_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
620 const struct ist where, const struct ist func,
621 const void *a1, const void *a2, const void *a3, const void *a4)
622{
623 const struct connection *conn = a1;
624 const struct h2c *h2c = conn ? conn->ctx : NULL;
625 const struct h2s *h2s = a2;
626 const struct buffer *buf = a3;
627 const struct htx *htx;
628 int pos;
629
630 if (!h2c) // nothing to add
631 return;
632
Willy Tarreau17104d42019-08-30 07:12:55 +0200633 if (src->verbosity > H2_VERB_CLEAN) {
Willy Tarreau73db4342019-09-25 07:28:44 +0200634 chunk_appendf(&trace_buf, " : h2c=%p(%c,%s)", h2c, conn_is_back(conn) ? 'B' : 'F', h2c_st_to_str(h2c->st0));
635
Willy Tarreaue2f68e92021-06-16 17:47:24 +0200636 if (mask & H2_EV_H2C_NEW) // inside h2_init, otherwise it's hard to match conn & h2c
637 conn_append_debug_info(&trace_buf, conn, " : ");
638
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100639 if (h2c->errcode)
640 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2c->errcode), h2c->errcode);
641
Willy Tarreau6c3eeca2022-08-18 11:19:57 +0200642 if (h2c->flags & H2_CF_DEM_IN_PROGRESS && // frame processing has started, type and length are valid
Willy Tarreau73db4342019-09-25 07:28:44 +0200643 (mask & (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) == (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) {
Willy Tarreau8520d872020-09-18 07:39:29 +0200644 chunk_appendf(&trace_buf, " dft=%s/%02x dfl=%d", h2_ft_str(h2c->dft), h2c->dff, h2c->dfl);
Willy Tarreau73db4342019-09-25 07:28:44 +0200645 }
646
647 if (h2s) {
648 if (h2s->id <= 0)
649 chunk_appendf(&trace_buf, " dsi=%d", h2c->dsi);
650 chunk_appendf(&trace_buf, " h2s=%p(%d,%s)", h2s, h2s->id, h2s_st_to_str(h2s->st));
Willy Tarreauf3ce0412019-11-24 14:57:00 +0100651 if (h2s->id && h2s->errcode)
652 chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2s->errcode), h2s->errcode);
Willy Tarreau73db4342019-09-25 07:28:44 +0200653 }
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200654 }
655
656 /* Let's dump decoded requests and responses right after parsing. They
657 * are traced at level USER with a few recognizable flags.
658 */
659 if ((mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW) ||
660 mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR)) && buf)
661 htx = htxbuf(buf); // recv req/res
662 else if (mask == (H2_EV_TX_FRAME|H2_EV_TX_HDR))
663 htx = a3; // send req/res
664 else
665 htx = NULL;
666
Willy Tarreau94f1dcf2019-08-30 07:11:30 +0200667 if (level == TRACE_LEVEL_USER && src->verbosity != H2_VERB_MINIMAL && htx && (pos = htx_get_head(htx)) != -1) {
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200668 const struct htx_blk *blk = htx_get_blk(htx, pos);
669 const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
670 enum htx_blk_type type = htx_get_blk_type(blk);
671
672 if (type == HTX_BLK_REQ_SL)
673 chunk_appendf(&trace_buf, " : [%d] H2 REQ: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200674 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200675 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
676 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
677 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
678 else if (type == HTX_BLK_RES_SL)
679 chunk_appendf(&trace_buf, " : [%d] H2 RES: %.*s %.*s %.*s",
Willy Tarreauc067a3a2019-08-30 07:28:24 +0200680 h2s ? h2s->id : h2c->dsi,
Willy Tarreaudb3cfff2019-08-19 17:56:27 +0200681 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
682 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
683 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
684 }
685}
686
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200687
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100688/* Detect a pending read0 for a H2 connection. It happens if a read0 was
689 * already reported on a previous xprt->rcvbuf() AND a frame parser failed
690 * to parse pending data, confirming no more progress is possible because
691 * we're facing a truncated frame. The function returns 1 to report a read0
692 * or 0 otherwise.
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200693 */
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100694static inline int h2c_read0_pending(struct h2c *h2c)
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200695{
Willy Tarreau3d4631f2021-01-20 10:53:13 +0100696 return !!(h2c->flags & H2_CF_END_REACHED);
Christopher Fauletaade4ed2020-10-08 15:38:41 +0200697}
698
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200699/* returns true if the connection is allowed to expire, false otherwise. A
Willy Tarreaud34d25a2022-03-18 14:59:54 +0100700 * connection may expire when it has no attached streams. As long as streams
701 * are attached, the application layer is responsible for timeout management,
702 * and each layer will detach when it doesn't want to wait anymore. When the
703 * last one leaves, the connection must take over timeout management.
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200704 */
705static inline int h2c_may_expire(const struct h2c *h2c)
706{
Willy Tarreaud34d25a2022-03-18 14:59:54 +0100707 return !h2c->nb_cs;
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200708}
709
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +0100710/* update h2c timeout if needed */
711static void h2c_update_timeout(struct h2c *h2c)
712{
713 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
714
715 if (!h2c->task)
716 goto leave;
717
718 if (h2c_may_expire(h2c)) {
719 /* no more streams attached */
720 if (h2c->last_sid >= 0) {
721 /* GOAWAY sent, closing in progress */
722 h2c->task->expire = tick_add_ifset(now_ms, h2c->shut_timeout);
723 } else if (br_data(h2c->mbuf)) {
724 /* pending output data: always the regular data timeout */
725 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
Willy Tarreau46f9bb42022-04-14 11:43:35 +0200726 } else if (!(h2c->flags & H2_CF_IS_BACK) && h2c->max_id > 0 && !b_data(&h2c->dbuf)) {
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +0100727 /* idle after having seen one stream => keep-alive */
Willy Tarreau211fc0b2022-04-13 17:40:28 +0200728 int to;
729
730 if (tick_isset(h2c->proxy->timeout.httpka))
731 to = h2c->proxy->timeout.httpka;
732 else
733 to = h2c->proxy->timeout.httpreq;
734
735 h2c->task->expire = tick_add_ifset(h2c->idle_start, to);
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +0100736 } else {
737 /* before first request, or started to deserialize a
738 * new req => http-request, but only set, not refresh.
739 */
740 int exp = (h2c->flags & H2_CF_IS_BACK) ? TICK_ETERNITY : h2c->proxy->timeout.httpreq;
741 h2c->task->expire = tick_add_ifset(h2c->idle_start, exp);
742 }
743 /* if a timeout above was not set, fall back to the default one */
744 if (!tick_isset(h2c->task->expire))
745 h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
746 } else {
747 h2c->task->expire = TICK_ETERNITY;
748 }
749 task_queue(h2c->task);
750 leave:
751 TRACE_LEAVE(H2_EV_H2C_WAKE);
752}
753
Olivier Houchard7a977432019-03-21 15:47:13 +0100754static __inline int
Willy Tarreauc2ea47f2019-10-01 10:12:00 +0200755h2c_is_dead(const struct h2c *h2c)
Olivier Houchard7a977432019-03-21 15:47:13 +0100756{
757 if (eb_is_empty(&h2c->streams_by_id) && /* don't close if streams exist */
758 ((h2c->conn->flags & CO_FL_ERROR) || /* errors close immediately */
759 (h2c->st0 >= H2_CS_ERROR && !h2c->task) || /* a timeout stroke earlier */
760 (!(h2c->conn->owner)) || /* Nobody's left to take care of the connection, drop it now */
Willy Tarreau662fafc2019-05-26 09:43:07 +0200761 (!br_data(h2c->mbuf) && /* mux buffer empty, also process clean events below */
Olivier Houchard7a977432019-03-21 15:47:13 +0100762 (conn_xprt_read0_pending(h2c->conn) ||
763 (h2c->last_sid >= 0 && h2c->max_id >= h2c->last_sid)))))
764 return 1;
765
766 return 0;
Olivier Houchard7a977432019-03-21 15:47:13 +0100767}
768
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200769/*****************************************************/
770/* functions below are for dynamic buffer management */
771/*****************************************************/
772
Willy Tarreau315d8072017-12-10 22:17:57 +0100773/* indicates whether or not the we may call the h2_recv() function to attempt
774 * to receive data into the buffer and/or demux pending data. The condition is
775 * a bit complex due to some API limits for now. The rules are the following :
776 * - if an error or a shutdown was detected on the connection and the buffer
777 * is empty, we must not attempt to receive
778 * - if the demux buf failed to be allocated, we must not try to receive and
779 * we know there is nothing pending
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100780 * - if no flag indicates a blocking condition, we may attempt to receive,
781 * regardless of whether the demux buffer is full or not, so that only
782 * de demux part decides whether or not to block. This is needed because
783 * the connection API indeed prevents us from re-enabling receipt that is
784 * already enabled in a polled state, so we must always immediately stop
785 * as soon as the demux can't proceed so as never to hit an end of read
786 * with data pending in the buffers.
Willy Tarreau315d8072017-12-10 22:17:57 +0100787 * - otherwise must may not attempt
788 */
789static inline int h2_recv_allowed(const struct h2c *h2c)
790{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200791 if (b_data(&h2c->dbuf) == 0 &&
Willy Tarreau315d8072017-12-10 22:17:57 +0100792 (h2c->st0 >= H2_CS_ERROR ||
793 h2c->conn->flags & CO_FL_ERROR ||
794 conn_xprt_read0_pending(h2c->conn)))
795 return 0;
796
797 if (!(h2c->flags & H2_CF_DEM_DALLOC) &&
Willy Tarreau6042aeb2017-12-12 11:01:44 +0100798 !(h2c->flags & H2_CF_DEM_BLOCK_ANY))
Willy Tarreau315d8072017-12-10 22:17:57 +0100799 return 1;
800
801 return 0;
802}
803
Willy Tarreau47b515a2018-12-21 16:09:41 +0100804/* restarts reading on the connection if it was not enabled */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200805static inline void h2c_restart_reading(const struct h2c *h2c, int consider_buffer)
Willy Tarreau47b515a2018-12-21 16:09:41 +0100806{
807 if (!h2_recv_allowed(h2c))
808 return;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200809 if ((!consider_buffer || !b_data(&h2c->dbuf))
810 && (h2c->wait_event.events & SUB_RETRY_RECV))
Willy Tarreau47b515a2018-12-21 16:09:41 +0100811 return;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200812 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau47b515a2018-12-21 16:09:41 +0100813}
814
815
Willy Tarreaufa1d3572019-01-31 10:31:51 +0100816/* returns true if the front connection has too many conn_streams attached */
817static inline int h2_frt_has_too_many_cs(const struct h2c *h2c)
Willy Tarreauf2101912018-07-19 10:11:38 +0200818{
Willy Tarreaua8754662018-12-23 20:43:58 +0100819 return h2c->nb_cs > h2_settings_max_concurrent_streams;
Willy Tarreauf2101912018-07-19 10:11:38 +0200820}
821
Willy Tarreau44e973f2018-03-01 17:49:30 +0100822/* Tries to grab a buffer and to re-enable processing on mux <target>. The h2c
823 * flags are used to figure what buffer was requested. It returns 1 if the
824 * allocation succeeds, in which case the connection is woken up, or 0 if it's
825 * impossible to wake up and we prefer to be woken up later.
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200826 */
Willy Tarreau44e973f2018-03-01 17:49:30 +0100827static int h2_buf_available(void *target)
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200828{
829 struct h2c *h2c = target;
Willy Tarreau0b559072018-02-26 15:22:17 +0100830 struct h2s *h2s;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200831
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100832 if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200833 h2c->flags &= ~H2_CF_DEM_DALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200834 h2c_restart_reading(h2c, 1);
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200835 return 1;
836 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200837
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100838 if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100839 h2c->flags &= ~H2_CF_MUX_MALLOC;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200840
841 if (h2c->flags & H2_CF_DEM_MROOM) {
842 h2c->flags &= ~H2_CF_DEM_MROOM;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200843 h2c_restart_reading(h2c, 1);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +0200844 }
Willy Tarreau14398122017-09-22 14:26:04 +0200845 return 1;
846 }
Willy Tarreau0b559072018-02-26 15:22:17 +0100847
848 if ((h2c->flags & H2_CF_DEM_SALLOC) &&
849 (h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s->cs &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100850 b_alloc(&h2s->rxbuf)) {
Willy Tarreau0b559072018-02-26 15:22:17 +0100851 h2c->flags &= ~H2_CF_DEM_SALLOC;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +0200852 h2c_restart_reading(h2c, 1);
Willy Tarreau0b559072018-02-26 15:22:17 +0100853 return 1;
854 }
855
Willy Tarreau14398122017-09-22 14:26:04 +0200856 return 0;
857}
858
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200859static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200860{
861 struct buffer *buf = NULL;
862
Willy Tarreau2b718102021-04-21 07:32:39 +0200863 if (likely(!LIST_INLIST(&h2c->buf_wait.list)) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100864 unlikely((buf = b_alloc(bptr)) == NULL)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100865 h2c->buf_wait.target = h2c;
866 h2c->buf_wait.wakeup_cb = h2_buf_available;
Willy Tarreau2b718102021-04-21 07:32:39 +0200867 LIST_APPEND(&ti->buffer_wq, &h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +0200868 }
869 return buf;
870}
871
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200872static inline void h2_release_buf(struct h2c *h2c, struct buffer *bptr)
Willy Tarreau14398122017-09-22 14:26:04 +0200873{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200874 if (bptr->size) {
Willy Tarreau44e973f2018-03-01 17:49:30 +0100875 b_free(bptr);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100876 offer_buffers(NULL, 1);
Willy Tarreau14398122017-09-22 14:26:04 +0200877 }
878}
879
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200880static inline void h2_release_mbuf(struct h2c *h2c)
881{
882 struct buffer *buf;
883 unsigned int count = 0;
884
885 while (b_size(buf = br_head_pick(h2c->mbuf))) {
886 b_free(buf);
887 count++;
888 }
889 if (count)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100890 offer_buffers(NULL, count);
Willy Tarreau2e3c0002019-05-26 09:45:23 +0200891}
892
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100893/* returns the number of allocatable outgoing streams for the connection taking
894 * the last_sid and the reserved ones into account.
895 */
896static inline int h2_streams_left(const struct h2c *h2c)
897{
898 int ret;
899
900 /* consider the number of outgoing streams we're allowed to create before
901 * reaching the last GOAWAY frame seen. max_id is the last assigned id,
902 * nb_reserved is the number of streams which don't yet have an ID.
903 */
904 ret = (h2c->last_sid >= 0) ? h2c->last_sid : 0x7FFFFFFF;
905 ret = (unsigned int)(ret - h2c->max_id) / 2 - h2c->nb_reserved - 1;
906 if (ret < 0)
907 ret = 0;
908 return ret;
909}
910
Willy Tarreau00f18a32019-01-26 12:19:01 +0100911/* returns the number of streams in use on a connection to figure if it's
912 * idle or not. We check nb_cs and not nb_streams as the caller will want
913 * to know if it was the last one after a detach().
914 */
915static int h2_used_streams(struct connection *conn)
916{
917 struct h2c *h2c = conn->ctx;
918
919 return h2c->nb_cs;
920}
921
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100922/* returns the number of concurrent streams available on the connection */
Olivier Houchardd540b362018-11-05 18:37:53 +0100923static int h2_avail_streams(struct connection *conn)
924{
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100925 struct server *srv = objt_server(conn->target);
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100926 struct h2c *h2c = conn->ctx;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100927 int ret1, ret2;
Olivier Houchardd540b362018-11-05 18:37:53 +0100928
Willy Tarreau6afec462019-01-28 06:40:19 +0100929 /* RFC7540#6.8: Receivers of a GOAWAY frame MUST NOT open additional
930 * streams on the connection.
931 */
932 if (h2c->last_sid >= 0)
933 return 0;
934
Willy Tarreauc61966f2019-10-31 15:10:03 +0100935 if (h2c->st0 >= H2_CS_ERROR)
936 return 0;
937
Willy Tarreau86949782019-01-31 10:42:05 +0100938 /* note: may be negative if a SETTINGS frame changes the limit */
939 ret1 = h2c->streams_limit - h2c->nb_streams;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +0100940
941 /* we must also consider the limit imposed by stream IDs */
942 ret2 = h2_streams_left(h2c);
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100943 ret1 = MIN(ret1, ret2);
Willy Tarreau86949782019-01-31 10:42:05 +0100944 if (ret1 > 0 && srv && srv->max_reuse >= 0) {
Willy Tarreaue9634bd2019-01-23 10:25:10 +0100945 ret2 = h2c->stream_cnt <= srv->max_reuse ? srv->max_reuse - h2c->stream_cnt + 1: 0;
946 ret1 = MIN(ret1, ret2);
947 }
948 return ret1;
Olivier Houchardd540b362018-11-05 18:37:53 +0100949}
950
Willy Tarreau35dbd5d2017-09-22 09:13:49 +0200951
Willy Tarreau62f52692017-10-08 23:01:42 +0200952/*****************************************************************/
953/* functions below are dedicated to the mux setup and management */
954/*****************************************************************/
955
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200956/* Initialize the mux once it's attached. For outgoing connections, the context
957 * is already initialized before installing the mux, so we detect incoming
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200958 * connections from the fact that the context is still NULL (even during mux
959 * upgrades). <input> is always used as Input buffer and may contain data. It is
960 * the caller responsibility to not reuse it anymore. Returns < 0 on error.
Willy Tarreau7dc24e42018-10-03 13:52:41 +0200961 */
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200962static int h2_init(struct connection *conn, struct proxy *prx, struct session *sess,
963 struct buffer *input)
Willy Tarreau32218eb2017-09-22 08:07:25 +0200964{
965 struct h2c *h2c;
Willy Tarreauea392822017-10-31 10:02:25 +0100966 struct task *t = NULL;
Christopher Fauletf81ef032019-10-04 15:19:43 +0200967 void *conn_ctx = conn->ctx;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200968
Christopher Fauletf81ef032019-10-04 15:19:43 +0200969 TRACE_ENTER(H2_EV_H2C_NEW);
Willy Tarreau7838a792019-08-12 18:42:03 +0200970
Willy Tarreaubafbe012017-11-24 17:34:44 +0100971 h2c = pool_alloc(pool_head_h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +0200972 if (!h2c)
mildiscd2d7de2018-10-02 16:44:18 +0200973 goto fail_no_h2c;
Willy Tarreau32218eb2017-09-22 08:07:25 +0200974
Christopher Faulete9b70722019-04-08 10:46:02 +0200975 if (conn_is_back(conn)) {
Willy Tarreau01b44822018-10-03 14:26:37 +0200976 h2c->flags = H2_CF_IS_BACK;
977 h2c->shut_timeout = h2c->timeout = prx->timeout.server;
978 if (tick_isset(prx->timeout.serverfin))
979 h2c->shut_timeout = prx->timeout.serverfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +0100980
981 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_be,
982 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +0200983 } else {
984 h2c->flags = H2_CF_NONE;
985 h2c->shut_timeout = h2c->timeout = prx->timeout.client;
986 if (tick_isset(prx->timeout.clientfin))
987 h2c->shut_timeout = prx->timeout.clientfin;
Amaury Denoyellec92697d2020-10-27 17:16:01 +0100988
989 h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe,
990 &h2_stats_module);
Willy Tarreau01b44822018-10-03 14:26:37 +0200991 }
Willy Tarreau3f133572017-10-31 19:21:06 +0100992
Willy Tarreau0b37d652018-10-03 10:33:02 +0200993 h2c->proxy = prx;
Willy Tarreau33400292017-11-05 11:23:40 +0100994 h2c->task = NULL;
Willy Tarreau4c8241a2023-03-20 19:16:04 +0100995 h2c->wait_event.tasklet = NULL;
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +0100996 h2c->idle_start = now_ms;
Willy Tarreau3f133572017-10-31 19:21:06 +0100997 if (tick_isset(h2c->timeout)) {
998 t = task_new(tid_bit);
999 if (!t)
1000 goto fail;
1001
1002 h2c->task = t;
1003 t->process = h2_timeout_task;
1004 t->context = h2c;
1005 t->expire = tick_add(now_ms, h2c->timeout);
1006 }
Willy Tarreauea392822017-10-31 10:02:25 +01001007
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001008 h2c->wait_event.tasklet = tasklet_new();
1009 if (!h2c->wait_event.tasklet)
Olivier Houchard910b2bc2018-07-17 18:49:38 +02001010 goto fail;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001011 h2c->wait_event.tasklet->process = h2_io_cb;
1012 h2c->wait_event.tasklet->context = h2c;
Willy Tarreau4f6516d2018-12-19 13:59:17 +01001013 h2c->wait_event.events = 0;
Amaury Denoyelled3a88c12021-05-03 10:47:51 +02001014 if (!conn_is_back(conn)) {
1015 /* Connection might already be in the stopping_list if subject
1016 * to h1->h2 upgrade.
1017 */
1018 if (!LIST_INLIST(&conn->stopping_list)) {
1019 LIST_APPEND(&mux_stopping_data[tid].list,
1020 &conn->stopping_list);
1021 }
1022 }
Olivier Houchard910b2bc2018-07-17 18:49:38 +02001023
Willy Tarreau2bdcc702020-05-19 11:31:11 +02001024 h2c->ddht = hpack_dht_alloc();
Willy Tarreau32218eb2017-09-22 08:07:25 +02001025 if (!h2c->ddht)
1026 goto fail;
1027
1028 /* Initialise the context. */
1029 h2c->st0 = H2_CS_PREFACE;
1030 h2c->conn = conn;
Willy Tarreau2e2083a2019-01-31 10:34:07 +01001031 h2c->streams_limit = h2_settings_max_concurrent_streams;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001032 h2c->max_id = -1;
1033 h2c->errcode = H2_ERR_NO_ERROR;
Willy Tarreau97aaa672018-12-23 09:49:04 +01001034 h2c->rcvd_c = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001035 h2c->rcvd_s = 0;
Willy Tarreau49745612017-12-03 18:56:02 +01001036 h2c->nb_streams = 0;
Willy Tarreau7ac60e82018-07-19 09:04:05 +02001037 h2c->nb_cs = 0;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001038 h2c->nb_reserved = 0;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001039 h2c->stream_cnt = 0;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001040
Christopher Faulet51f73eb2019-04-08 11:22:47 +02001041 h2c->dbuf = *input;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001042 h2c->dsi = -1;
1043 h2c->msi = -1;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001044
Willy Tarreau32218eb2017-09-22 08:07:25 +02001045 h2c->last_sid = -1;
1046
Willy Tarreau51330962019-05-26 09:38:07 +02001047 br_init(h2c->mbuf, sizeof(h2c->mbuf) / sizeof(h2c->mbuf[0]));
Willy Tarreau32218eb2017-09-22 08:07:25 +02001048 h2c->miw = 65535; /* mux initial window size */
1049 h2c->mws = 65535; /* mux window size */
1050 h2c->mfs = 16384; /* initial max frame size */
Willy Tarreau751f2d02018-10-05 09:35:00 +02001051 h2c->streams_by_id = EB_ROOT;
Willy Tarreau32218eb2017-09-22 08:07:25 +02001052 LIST_INIT(&h2c->send_list);
1053 LIST_INIT(&h2c->fctl_list);
Willy Tarreau9edf6db2019-10-02 10:49:59 +02001054 LIST_INIT(&h2c->blocked_list);
Willy Tarreau90f366b2021-02-20 11:49:49 +01001055 LIST_INIT(&h2c->buf_wait.list);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001056
Christopher Fauletf81ef032019-10-04 15:19:43 +02001057 conn->ctx = h2c;
1058
Willy Tarreaue2f68e92021-06-16 17:47:24 +02001059 TRACE_USER("new H2 connection", H2_EV_H2C_NEW, conn);
1060
Willy Tarreau3f133572017-10-31 19:21:06 +01001061 if (t)
1062 task_queue(t);
Willy Tarreauea392822017-10-31 10:02:25 +01001063
Willy Tarreau01b44822018-10-03 14:26:37 +02001064 if (h2c->flags & H2_CF_IS_BACK) {
1065 /* FIXME: this is temporary, for outgoing connections we need
1066 * to immediately allocate a stream until the code is modified
1067 * so that the caller calls ->attach(). For now the outgoing cs
Christopher Fauletf81ef032019-10-04 15:19:43 +02001068 * is stored as conn->ctx by the caller and saved in conn_ctx.
Willy Tarreau01b44822018-10-03 14:26:37 +02001069 */
1070 struct h2s *h2s;
1071
Christopher Fauletf81ef032019-10-04 15:19:43 +02001072 h2s = h2c_bck_stream_new(h2c, conn_ctx, sess);
Willy Tarreau01b44822018-10-03 14:26:37 +02001073 if (!h2s)
1074 goto fail_stream;
1075 }
1076
Willy Tarreau4781b152021-04-06 13:53:36 +02001077 HA_ATOMIC_INC(&h2c->px_counters->open_conns);
1078 HA_ATOMIC_INC(&h2c->px_counters->total_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001079
Willy Tarreau0f383582018-10-03 14:22:21 +02001080 /* prepare to read something */
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02001081 h2c_restart_reading(h2c, 1);
Willy Tarreau7838a792019-08-12 18:42:03 +02001082 TRACE_LEAVE(H2_EV_H2C_NEW, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001083 return 0;
Willy Tarreau01b44822018-10-03 14:26:37 +02001084 fail_stream:
1085 hpack_dht_free(h2c->ddht);
mildiscd2d7de2018-10-02 16:44:18 +02001086 fail:
Willy Tarreauf6562792019-05-07 19:05:35 +02001087 task_destroy(t);
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001088 if (h2c->wait_event.tasklet)
1089 tasklet_free(h2c->wait_event.tasklet);
Willy Tarreaubafbe012017-11-24 17:34:44 +01001090 pool_free(pool_head_h2c, h2c);
mildiscd2d7de2018-10-02 16:44:18 +02001091 fail_no_h2c:
Willy Tarreau8802bf12022-01-12 17:24:26 +01001092 if (!conn_is_back(conn))
1093 LIST_DEL_INIT(&conn->stopping_list);
Christopher Fauletf81ef032019-10-04 15:19:43 +02001094 conn->ctx = conn_ctx; /* restore saved ctx */
1095 TRACE_DEVEL("leaving in error", H2_EV_H2C_NEW|H2_EV_H2C_END|H2_EV_H2C_ERR);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001096 return -1;
1097}
1098
Willy Tarreau751f2d02018-10-05 09:35:00 +02001099/* returns the next allocatable outgoing stream ID for the H2 connection, or
1100 * -1 if no more is allocatable.
1101 */
1102static inline int32_t h2c_get_next_sid(const struct h2c *h2c)
1103{
1104 int32_t id = (h2c->max_id + 1) | 1;
Willy Tarreaua80dca82019-01-24 17:08:28 +01001105
1106 if ((id & 0x80000000U) || (h2c->last_sid >= 0 && id > h2c->last_sid))
Willy Tarreau751f2d02018-10-05 09:35:00 +02001107 id = -1;
1108 return id;
1109}
1110
Willy Tarreau2373acc2017-10-12 17:35:14 +02001111/* returns the stream associated with id <id> or NULL if not found */
1112static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id)
1113{
1114 struct eb32_node *node;
1115
Willy Tarreau751f2d02018-10-05 09:35:00 +02001116 if (id == 0)
1117 return (struct h2s *)h2_closed_stream;
1118
Willy Tarreau2a856182017-05-16 15:20:39 +02001119 if (id > h2c->max_id)
1120 return (struct h2s *)h2_idle_stream;
1121
Willy Tarreau2373acc2017-10-12 17:35:14 +02001122 node = eb32_lookup(&h2c->streams_by_id, id);
1123 if (!node)
Willy Tarreau2a856182017-05-16 15:20:39 +02001124 return (struct h2s *)h2_closed_stream;
Willy Tarreau2373acc2017-10-12 17:35:14 +02001125
1126 return container_of(node, struct h2s, by_id);
1127}
1128
Christopher Faulet73c12072019-04-08 11:23:22 +02001129/* release function. This one should be called to free all resources allocated
1130 * to the mux.
Willy Tarreau62f52692017-10-08 23:01:42 +02001131 */
Christopher Faulet73c12072019-04-08 11:23:22 +02001132static void h2_release(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02001133{
William Dauchy477757c2020-08-07 22:19:23 +02001134 struct connection *conn = NULL;
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001135
Willy Tarreau7838a792019-08-12 18:42:03 +02001136 TRACE_ENTER(H2_EV_H2C_END);
1137
Willy Tarreau32218eb2017-09-22 08:07:25 +02001138 if (h2c) {
Christopher Faulet61840e72019-04-15 09:33:32 +02001139 /* The connection must be aattached to this mux to be released */
1140 if (h2c->conn && h2c->conn->ctx == h2c)
1141 conn = h2c->conn;
1142
Willy Tarreau7838a792019-08-12 18:42:03 +02001143 TRACE_DEVEL("freeing h2c", H2_EV_H2C_END, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001144 hpack_dht_free(h2c->ddht);
Willy Tarreau14398122017-09-22 14:26:04 +02001145
Willy Tarreau2b718102021-04-21 07:32:39 +02001146 if (LIST_INLIST(&h2c->buf_wait.list))
Willy Tarreau90f366b2021-02-20 11:49:49 +01001147 LIST_DEL_INIT(&h2c->buf_wait.list);
Willy Tarreau14398122017-09-22 14:26:04 +02001148
Willy Tarreau44e973f2018-03-01 17:49:30 +01001149 h2_release_buf(h2c, &h2c->dbuf);
Willy Tarreau2e3c0002019-05-26 09:45:23 +02001150 h2_release_mbuf(h2c);
Willy Tarreau44e973f2018-03-01 17:49:30 +01001151
Willy Tarreauea392822017-10-31 10:02:25 +01001152 if (h2c->task) {
Willy Tarreau0975f112018-03-29 15:22:59 +02001153 h2c->task->context = NULL;
1154 task_wakeup(h2c->task, TASK_WOKEN_OTHER);
Willy Tarreauea392822017-10-31 10:02:25 +01001155 h2c->task = NULL;
1156 }
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02001157 if (h2c->wait_event.tasklet)
1158 tasklet_free(h2c->wait_event.tasklet);
Christopher Faulet21d849f2019-09-18 11:07:20 +02001159 if (conn && h2c->wait_event.events != 0)
Olivier Houcharde179d0e2019-03-21 18:27:17 +01001160 conn->xprt->unsubscribe(conn, conn->xprt_ctx, h2c->wait_event.events,
Christopher Faulet21d849f2019-09-18 11:07:20 +02001161 &h2c->wait_event);
Willy Tarreauea392822017-10-31 10:02:25 +01001162
Willy Tarreau4781b152021-04-06 13:53:36 +02001163 HA_ATOMIC_DEC(&h2c->px_counters->open_conns);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001164
Willy Tarreaubafbe012017-11-24 17:34:44 +01001165 pool_free(pool_head_h2c, h2c);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001166 }
1167
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001168 if (conn) {
Amaury Denoyelled3a88c12021-05-03 10:47:51 +02001169 if (!conn_is_back(conn))
1170 LIST_DEL_INIT(&conn->stopping_list);
1171
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001172 conn->mux = NULL;
1173 conn->ctx = NULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02001174 TRACE_DEVEL("freeing conn", H2_EV_H2C_END, conn);
Willy Tarreau32218eb2017-09-22 08:07:25 +02001175
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001176 conn_stop_tracking(conn);
Willy Tarreaudaa0e5f2021-10-21 22:24:31 +02001177
1178 /* there might be a GOAWAY frame still pending in the TCP
1179 * stack, and if the peer continues to send (i.e. window
1180 * updates etc), this can result in losing the GOAWAY. For
1181 * this reason we try to drain anything received in between.
1182 */
1183 conn->flags |= CO_FL_WANT_DRAIN;
1184
1185 conn_xprt_shutw(conn);
1186 conn_xprt_close(conn);
1187 conn_sock_shutw(conn, !conn_is_back(conn));
1188 conn_ctrl_close(conn);
1189
Christopher Faulet39a96ee2019-04-08 10:52:21 +02001190 if (conn->destroy_cb)
1191 conn->destroy_cb(conn);
1192 conn_free(conn);
1193 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001194
1195 TRACE_LEAVE(H2_EV_H2C_END);
Willy Tarreau62f52692017-10-08 23:01:42 +02001196}
1197
1198
Willy Tarreau71681172017-10-23 14:39:06 +02001199/******************************************************/
1200/* functions below are for the H2 protocol processing */
1201/******************************************************/
1202
1203/* returns the stream if of stream <h2s> or 0 if <h2s> is NULL */
Willy Tarreau1f094672017-11-20 21:27:45 +01001204static inline __maybe_unused int h2s_id(const struct h2s *h2s)
Willy Tarreau71681172017-10-23 14:39:06 +02001205{
1206 return h2s ? h2s->id : 0;
1207}
1208
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001209/* returns the sum of the stream's own window size and the mux's initial
1210 * window, which together form the stream's effective window size.
1211 */
1212static inline int h2s_mws(const struct h2s *h2s)
1213{
1214 return h2s->sws + h2s->h2c->miw;
1215}
1216
Willy Tarreau5b5e6872017-09-25 16:17:25 +02001217/* returns true of the mux is currently busy as seen from stream <h2s> */
Willy Tarreau1f094672017-11-20 21:27:45 +01001218static inline __maybe_unused int h2c_mux_busy(const struct h2c *h2c, const struct h2s *h2s)
Willy Tarreau5b5e6872017-09-25 16:17:25 +02001219{
1220 if (h2c->msi < 0)
1221 return 0;
1222
1223 if (h2c->msi == h2s_id(h2s))
1224 return 0;
1225
1226 return 1;
1227}
1228
Willy Tarreauec26f622022-04-13 09:40:52 +02001229/* marks an error on the connection. Before settings are sent, we must not send
1230 * a GOAWAY frame, and the error state will prevent h2c_send_goaway_error()
1231 * from verifying this so we set H2_CF_GOAWAY_FAILED to make sure it will not
1232 * even try.
1233 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001234static inline __maybe_unused void h2c_error(struct h2c *h2c, enum h2_err err)
Willy Tarreau741d6df2017-10-17 08:00:59 +02001235{
Willy Tarreau022e5e52020-09-10 09:33:15 +02001236 TRACE_POINT(H2_EV_H2C_ERR, h2c->conn, 0, 0, (void *)(long)(err));
Willy Tarreau741d6df2017-10-17 08:00:59 +02001237 h2c->errcode = err;
Willy Tarreauec26f622022-04-13 09:40:52 +02001238 if (h2c->st0 < H2_CS_SETTINGS1)
1239 h2c->flags |= H2_CF_GOAWAY_FAILED;
Willy Tarreau741d6df2017-10-17 08:00:59 +02001240 h2c->st0 = H2_CS_ERROR;
1241}
1242
Willy Tarreau175cebb2019-01-24 10:02:24 +01001243/* marks an error on the stream. It may also update an already closed stream
1244 * (e.g. to report an error after an RST was received).
1245 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001246static inline __maybe_unused void h2s_error(struct h2s *h2s, enum h2_err err)
Willy Tarreau2e43f082017-10-17 08:03:59 +02001247{
Willy Tarreau175cebb2019-01-24 10:02:24 +01001248 if (h2s->id && h2s->st != H2_SS_ERROR) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02001249 TRACE_POINT(H2_EV_H2S_ERR, h2s->h2c->conn, h2s, 0, (void *)(long)(err));
Willy Tarreau2e43f082017-10-17 08:03:59 +02001250 h2s->errcode = err;
Willy Tarreau175cebb2019-01-24 10:02:24 +01001251 if (h2s->st < H2_SS_ERROR)
1252 h2s->st = H2_SS_ERROR;
Willy Tarreauec988c72018-12-19 18:00:29 +01001253 if (h2s->cs)
1254 cs_set_error(h2s->cs);
Willy Tarreau2e43f082017-10-17 08:03:59 +02001255 }
1256}
1257
Willy Tarreau7e094452018-12-19 18:08:52 +01001258/* attempt to notify the data layer of recv availability */
1259static void __maybe_unused h2s_notify_recv(struct h2s *h2s)
1260{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001261 if (h2s->subs && h2s->subs->events & SUB_RETRY_RECV) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001262 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01001263 tasklet_wakeup(h2s->subs->tasklet);
1264 h2s->subs->events &= ~SUB_RETRY_RECV;
1265 if (!h2s->subs->events)
1266 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001267 }
1268}
1269
1270/* attempt to notify the data layer of send availability */
1271static void __maybe_unused h2s_notify_send(struct h2s *h2s)
1272{
Willy Tarreauf96508a2020-01-10 11:12:48 +01001273 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001274 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01001275 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01001276 tasklet_wakeup(h2s->subs->tasklet);
1277 h2s->subs->events &= ~SUB_RETRY_SEND;
1278 if (!h2s->subs->events)
1279 h2s->subs = NULL;
Willy Tarreau7e094452018-12-19 18:08:52 +01001280 }
Willy Tarreau5723f292020-01-10 15:16:57 +01001281 else if (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) {
1282 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
1283 tasklet_wakeup(h2s->shut_tl);
1284 }
Willy Tarreau7e094452018-12-19 18:08:52 +01001285}
1286
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001287/* alerts the data layer, trying to wake it up by all means, following
1288 * this sequence :
1289 * - if the h2s' data layer is subscribed to recv, then it's woken up for recv
1290 * - if its subscribed to send, then it's woken up for send
1291 * - if it was subscribed to neither, its ->wake() callback is called
1292 * It is safe to call this function with a closed stream which doesn't have a
1293 * conn_stream anymore.
1294 */
1295static void __maybe_unused h2s_alert(struct h2s *h2s)
1296{
Willy Tarreau7838a792019-08-12 18:42:03 +02001297 TRACE_ENTER(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
1298
Willy Tarreauf96508a2020-01-10 11:12:48 +01001299 if (h2s->subs ||
Willy Tarreau5723f292020-01-10 15:16:57 +01001300 (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW))) {
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001301 h2s_notify_recv(h2s);
1302 h2s_notify_send(h2s);
1303 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001304 else if (h2s->cs && h2s->cs->data_cb->wake != NULL) {
1305 TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001306 h2s->cs->data_cb->wake(h2s->cs);
Willy Tarreau7838a792019-08-12 18:42:03 +02001307 }
1308
1309 TRACE_LEAVE(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
Willy Tarreau8b2757c2018-12-19 17:36:48 +01001310}
1311
Willy Tarreaue4820742017-07-27 13:37:23 +02001312/* writes the 24-bit frame size <len> at address <frame> */
Willy Tarreau1f094672017-11-20 21:27:45 +01001313static inline __maybe_unused void h2_set_frame_size(void *frame, uint32_t len)
Willy Tarreaue4820742017-07-27 13:37:23 +02001314{
1315 uint8_t *out = frame;
1316
1317 *out = len >> 16;
1318 write_n16(out + 1, len);
1319}
1320
Willy Tarreau54c15062017-10-10 17:10:03 +02001321/* reads <bytes> bytes from buffer <b> starting at relative offset <o> from the
1322 * current pointer, dealing with wrapping, and stores the result in <dst>. It's
1323 * the caller's responsibility to verify that there are at least <bytes> bytes
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001324 * available in the buffer's input prior to calling this function. The buffer
1325 * is assumed not to hold any output data.
Willy Tarreau54c15062017-10-10 17:10:03 +02001326 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001327static inline __maybe_unused void h2_get_buf_bytes(void *dst, size_t bytes,
Willy Tarreau54c15062017-10-10 17:10:03 +02001328 const struct buffer *b, int o)
1329{
Willy Tarreau591d4452018-06-15 17:21:00 +02001330 readv_bytes(dst, bytes, b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001331}
1332
Willy Tarreau1f094672017-11-20 21:27:45 +01001333static inline __maybe_unused uint16_t h2_get_n16(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001334{
Willy Tarreau591d4452018-06-15 17:21:00 +02001335 return readv_n16(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001336}
1337
Willy Tarreau1f094672017-11-20 21:27:45 +01001338static inline __maybe_unused uint32_t h2_get_n32(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001339{
Willy Tarreau591d4452018-06-15 17:21:00 +02001340 return readv_n32(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001341}
1342
Willy Tarreau1f094672017-11-20 21:27:45 +01001343static inline __maybe_unused uint64_t h2_get_n64(const struct buffer *b, int o)
Willy Tarreau54c15062017-10-10 17:10:03 +02001344{
Willy Tarreau591d4452018-06-15 17:21:00 +02001345 return readv_n64(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
Willy Tarreau54c15062017-10-10 17:10:03 +02001346}
1347
1348
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001349/* Peeks an H2 frame header from offset <o> of buffer <b> into descriptor <h>.
1350 * The algorithm is not obvious. It turns out that H2 headers are neither
1351 * aligned nor do they use regular sizes. And to add to the trouble, the buffer
1352 * may wrap so each byte read must be checked. The header is formed like this :
Willy Tarreau715d5312017-07-11 15:20:24 +02001353 *
1354 * b0 b1 b2 b3 b4 b5..b8
1355 * +----------+---------+--------+----+----+----------------------+
1356 * |len[23:16]|len[15:8]|len[7:0]|type|flag|sid[31:0] (big endian)|
1357 * +----------+---------+--------+----+----+----------------------+
1358 *
1359 * Here we read a big-endian 64 bit word from h[1]. This way in a single read
1360 * we get the sid properly aligned and ordered, and 16 bits of len properly
1361 * ordered as well. The type and flags can be extracted using bit shifts from
1362 * the word, and only one extra read is needed to fetch len[16:23].
Willy Tarreau9c7f2d12018-06-15 11:51:32 +02001363 * Returns zero if some bytes are missing, otherwise non-zero on success. The
1364 * buffer is assumed not to contain any output data.
Willy Tarreau715d5312017-07-11 15:20:24 +02001365 */
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001366static __maybe_unused int h2_peek_frame_hdr(const struct buffer *b, int o, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001367{
1368 uint64_t w;
1369
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001370 if (b_data(b) < o + 9)
Willy Tarreau715d5312017-07-11 15:20:24 +02001371 return 0;
1372
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001373 w = h2_get_n64(b, o + 1);
1374 h->len = *(uint8_t*)b_peek(b, o) << 16;
Willy Tarreau715d5312017-07-11 15:20:24 +02001375 h->sid = w & 0x7FFFFFFF; /* RFC7540#4.1: R bit must be ignored */
1376 h->ff = w >> 32;
1377 h->ft = w >> 40;
1378 h->len += w >> 48;
1379 return 1;
1380}
1381
1382/* skip the next 9 bytes corresponding to the frame header possibly parsed by
1383 * h2_peek_frame_hdr() above.
1384 */
Willy Tarreau1f094672017-11-20 21:27:45 +01001385static inline __maybe_unused void h2_skip_frame_hdr(struct buffer *b)
Willy Tarreau715d5312017-07-11 15:20:24 +02001386{
Willy Tarreaue5f12ce2018-06-15 10:28:05 +02001387 b_del(b, 9);
Willy Tarreau715d5312017-07-11 15:20:24 +02001388}
1389
1390/* same as above, automatically advances the buffer on success */
Willy Tarreau1f094672017-11-20 21:27:45 +01001391static inline __maybe_unused int h2_get_frame_hdr(struct buffer *b, struct h2_fh *h)
Willy Tarreau715d5312017-07-11 15:20:24 +02001392{
1393 int ret;
1394
Willy Tarreaua4428bd2018-12-22 18:11:41 +01001395 ret = h2_peek_frame_hdr(b, 0, h);
Willy Tarreau715d5312017-07-11 15:20:24 +02001396 if (ret > 0)
1397 h2_skip_frame_hdr(b);
1398 return ret;
1399}
1400
Willy Tarreaucb985a42019-10-07 16:56:34 +02001401
1402/* try to fragment the headers frame present at the beginning of buffer <b>,
1403 * enforcing a limit of <mfs> bytes per frame. Returns 0 on failure, 1 on
1404 * success. Typical causes of failure include a buffer not large enough to
1405 * add extra frame headers. The existing frame size is read in the current
1406 * frame. Its EH flag will be cleared if CONTINUATION frames need to be added,
1407 * and its length will be adjusted. The stream ID for continuation frames will
1408 * be copied from the initial frame's.
1409 */
1410static int h2_fragment_headers(struct buffer *b, uint32_t mfs)
1411{
1412 size_t remain = b->data - 9;
1413 int extra_frames = (remain - 1) / mfs;
1414 size_t fsize;
1415 char *fptr;
1416 int frame;
1417
1418 if (b->data <= mfs + 9)
1419 return 1;
1420
1421 /* Too large a frame, we need to fragment it using CONTINUATION
1422 * frames. We start from the end and move tails as needed.
1423 */
1424 if (b->data + extra_frames * 9 > b->size)
1425 return 0;
1426
1427 for (frame = extra_frames; frame; frame--) {
1428 fsize = ((remain - 1) % mfs) + 1;
1429 remain -= fsize;
1430
1431 /* move data */
1432 fptr = b->area + 9 + remain + (frame - 1) * 9;
1433 memmove(fptr + 9, b->area + 9 + remain, fsize);
1434 b->data += 9;
1435
1436 /* write new frame header */
1437 h2_set_frame_size(fptr, fsize);
1438 fptr[3] = H2_FT_CONTINUATION;
1439 fptr[4] = (frame == extra_frames) ? H2_F_HEADERS_END_HEADERS : 0;
1440 write_n32(fptr + 5, read_n32(b->area + 5));
1441 }
1442
1443 b->area[4] &= ~H2_F_HEADERS_END_HEADERS;
1444 h2_set_frame_size(b->area, remain);
1445 return 1;
1446}
1447
1448
Willy Tarreau00dd0782018-03-01 16:31:34 +01001449/* marks stream <h2s> as CLOSED and decrement the number of active streams for
1450 * its connection if the stream was not yet closed. Please use this exclusively
1451 * before closing a stream to ensure stream count is well maintained.
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001452 */
Willy Tarreau00dd0782018-03-01 16:31:34 +01001453static inline void h2s_close(struct h2s *h2s)
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001454{
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001455 if (h2s->st != H2_SS_CLOSED) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001456 TRACE_ENTER(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001457 h2s->h2c->nb_streams--;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001458 if (!h2s->id)
1459 h2s->h2c->nb_reserved--;
Willy Tarreaua27db382019-03-25 18:13:16 +01001460 if (h2s->cs) {
Willy Tarreaua27db382019-03-25 18:13:16 +01001461 if (!(h2s->cs->flags & CS_FL_EOS) && !b_data(&h2s->rxbuf))
1462 h2s_notify_recv(h2s);
1463 }
Willy Tarreau4781b152021-04-06 13:53:36 +02001464 HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001465
Willy Tarreau7838a792019-08-12 18:42:03 +02001466 TRACE_LEAVE(H2_EV_H2S_END, h2s->h2c->conn, h2s);
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001467 }
Willy Tarreau91bfdd72017-12-14 12:00:14 +01001468 h2s->st = H2_SS_CLOSED;
1469}
1470
Willy Tarreau71049cc2018-03-28 13:56:39 +02001471/* detaches an H2 stream from its H2C and releases it to the H2S pool. */
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001472/* h2s_destroy should only ever be called by the thread that owns the stream,
1473 * that means that a tasklet should be used if we want to destroy the h2s
1474 * from another thread
1475 */
Willy Tarreau71049cc2018-03-28 13:56:39 +02001476static void h2s_destroy(struct h2s *h2s)
Willy Tarreau0a10de62018-03-01 16:27:53 +01001477{
Willy Tarreau7838a792019-08-12 18:42:03 +02001478 struct connection *conn = h2s->h2c->conn;
1479
1480 TRACE_ENTER(H2_EV_H2S_END, conn, h2s);
1481
Willy Tarreau0a10de62018-03-01 16:27:53 +01001482 h2s_close(h2s);
1483 eb32_delete(&h2s->by_id);
Olivier Houchard638b7992018-08-16 15:41:52 +02001484 if (b_size(&h2s->rxbuf)) {
1485 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01001486 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02001487 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001488
1489 if (h2s->subs)
1490 h2s->subs->events = 0;
1491
Joseph Herlantd77575d2018-11-25 10:54:45 -08001492 /* There's no need to explicitly call unsubscribe here, the only
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001493 * reference left would be in the h2c send_list/fctl_list, and if
1494 * we're in it, we're getting out anyway
1495 */
Willy Tarreau54f4c192023-10-17 08:25:19 +02001496 h2_remove_from_list(h2s);
Willy Tarreau5723f292020-01-10 15:16:57 +01001497
Olivier Houchard5a3671d2019-10-11 16:33:49 +02001498 /* ditto, calling tasklet_free() here should be ok */
Willy Tarreau5723f292020-01-10 15:16:57 +01001499 tasklet_free(h2s->shut_tl);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001500 pool_free(pool_head_h2s, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02001501
1502 TRACE_LEAVE(H2_EV_H2S_END, conn);
Willy Tarreau0a10de62018-03-01 16:27:53 +01001503}
1504
Willy Tarreaua8e49542018-10-03 18:53:55 +02001505/* allocates a new stream <id> for connection <h2c> and adds it into h2c's
1506 * stream tree. In case of error, nothing is added and NULL is returned. The
1507 * causes of errors can be any failed memory allocation. The caller is
1508 * responsible for checking if the connection may support an extra stream
1509 * prior to calling this function.
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001510 */
Willy Tarreaua8e49542018-10-03 18:53:55 +02001511static struct h2s *h2s_new(struct h2c *h2c, int id)
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001512{
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001513 struct h2s *h2s;
1514
Willy Tarreau7838a792019-08-12 18:42:03 +02001515 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1516
Willy Tarreaubafbe012017-11-24 17:34:44 +01001517 h2s = pool_alloc(pool_head_h2s);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001518 if (!h2s)
1519 goto out;
1520
Willy Tarreau5723f292020-01-10 15:16:57 +01001521 h2s->shut_tl = tasklet_new();
1522 if (!h2s->shut_tl) {
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001523 pool_free(pool_head_h2s, h2s);
1524 goto out;
1525 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01001526 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01001527 h2s->shut_tl->process = h2_deferred_shut;
1528 h2s->shut_tl->context = h2s;
Olivier Houchardfa8aa862018-10-10 18:25:41 +02001529 LIST_INIT(&h2s->list);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001530 h2s->h2c = h2c;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001531 h2s->cs = NULL;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02001532 h2s->sws = 0;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001533 h2s->flags = H2_SF_NONE;
1534 h2s->errcode = H2_ERR_NO_ERROR;
1535 h2s->st = H2_SS_IDLE;
Willy Tarreau9c5e22e2018-09-11 19:22:14 +02001536 h2s->status = 0;
Willy Tarreau1915ca22019-01-24 11:49:37 +01001537 h2s->body_len = 0;
Olivier Houchard638b7992018-08-16 15:41:52 +02001538 h2s->rxbuf = BUF_NULL;
Amaury Denoyelle74162742020-12-11 17:53:05 +01001539 memset(h2s->upgrade_protocol, 0, sizeof(h2s->upgrade_protocol));
Willy Tarreau751f2d02018-10-05 09:35:00 +02001540
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001541 h2s->by_id.key = h2s->id = id;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001542 if (id > 0)
1543 h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01001544 else
1545 h2c->nb_reserved++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001546
1547 eb32_insert(&h2c->streams_by_id, &h2s->by_id);
Willy Tarreau49745612017-12-03 18:56:02 +01001548 h2c->nb_streams++;
Willy Tarreaue9634bd2019-01-23 10:25:10 +01001549 h2c->stream_cnt++;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001550
Willy Tarreau4781b152021-04-06 13:53:36 +02001551 HA_ATOMIC_INC(&h2c->px_counters->open_streams);
1552 HA_ATOMIC_INC(&h2c->px_counters->total_streams);
Amaury Denoyelle66942c12020-10-27 17:16:04 +01001553
Willy Tarreau7838a792019-08-12 18:42:03 +02001554 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001555 return h2s;
Willy Tarreaua8e49542018-10-03 18:53:55 +02001556 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001557 TRACE_DEVEL("leaving in error", H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001558 return NULL;
1559}
1560
1561/* creates a new stream <id> on the h2c connection and returns it, or NULL in
Christopher Faulet7d013e72020-12-15 16:56:50 +01001562 * case of memory allocation error. <input> is used as input buffer for the new
1563 * stream. On success, it is transferred to the stream and the mux is no longer
1564 * responsible of it. On error, <input> is unchanged, thus the mux must still
1565 * take care of it.
Willy Tarreaua8e49542018-10-03 18:53:55 +02001566 */
Amaury Denoyelleee7fcd52021-10-18 14:45:49 +02001567static struct h2s *h2c_frt_stream_new(struct h2c *h2c, int id, struct buffer *input, uint32_t flags)
Willy Tarreaua8e49542018-10-03 18:53:55 +02001568{
1569 struct session *sess = h2c->conn->owner;
1570 struct conn_stream *cs;
1571 struct h2s *h2s;
1572
Willy Tarreau7838a792019-08-12 18:42:03 +02001573 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1574
Willy Tarreau715c8fc2022-05-12 09:24:41 +02001575 if (h2c->nb_streams >= h2_settings_max_concurrent_streams) {
1576 TRACE_ERROR("HEADERS frame causing MAX_CONCURRENT_STREAMS to be exceeded", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau62bc7202023-10-20 18:38:34 +02001577 session_inc_http_req_ctr(sess);
1578 session_inc_http_err_ctr(sess);
Willy Tarreaua8e49542018-10-03 18:53:55 +02001579 goto out;
Willy Tarreau715c8fc2022-05-12 09:24:41 +02001580 }
Willy Tarreaua8e49542018-10-03 18:53:55 +02001581
1582 h2s = h2s_new(h2c, id);
1583 if (!h2s)
Willy Tarreau715c8fc2022-05-12 09:24:41 +02001584 goto out_alloc;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001585
Christopher Faulet236c93b2020-07-02 09:19:54 +02001586 cs = cs_new(h2c->conn, h2c->conn->target);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001587 if (!cs)
1588 goto out_close;
1589
Olivier Houchard746fb772018-12-15 19:42:00 +01001590 cs->flags |= CS_FL_NOT_FIRST;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001591 h2s->cs = cs;
1592 cs->ctx = h2s;
Willy Tarreau7ac60e82018-07-19 09:04:05 +02001593 h2c->nb_cs++;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001594
Amaury Denoyelleee7fcd52021-10-18 14:45:49 +02001595 /* FIXME wrong analogy between ext-connect and websocket, this need to
1596 * be refine.
1597 */
1598 if (flags & H2_SF_EXT_CONNECT_RCVD)
1599 cs->flags |= CS_FL_WEBSOCKET;
1600
Willy Tarreaua217df92022-02-04 09:05:37 +01001601 /* The stream will record the request's accept date (which is either the
1602 * end of the connection's or the date immediately after the previous
1603 * request) and the idle time, which is the delay since the previous
1604 * request. We can set the value now, it will be copied by stream_new().
1605 */
1606 sess->t_idle = tv_ms_elapsed(&sess->tv_accept, &now) - sess->t_handshake;
Christopher Faulet7d013e72020-12-15 16:56:50 +01001607 if (stream_create_from_cs(cs, input) < 0)
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001608 goto out_free_cs;
1609
Willy Tarreau590a0512018-09-05 11:56:48 +02001610 /* We want the accept date presented to the next stream to be the one
1611 * we have now, the handshake time to be null (since the next stream
1612 * is not delayed by a handshake), and the idle time to count since
1613 * right now.
1614 */
1615 sess->accept_date = date;
1616 sess->tv_accept = now;
1617 sess->t_handshake = 0;
Willy Tarreaua217df92022-02-04 09:05:37 +01001618 sess->t_idle = 0;
Willy Tarreau590a0512018-09-05 11:56:48 +02001619
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001620 /* OK done, the stream lives its own life now */
Willy Tarreaufa1d3572019-01-31 10:31:51 +01001621 if (h2_frt_has_too_many_cs(h2c))
Willy Tarreauf2101912018-07-19 10:11:38 +02001622 h2c->flags |= H2_CF_DEM_TOOMANY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001623 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001624 return h2s;
1625
1626 out_free_cs:
Willy Tarreau7ac60e82018-07-19 09:04:05 +02001627 h2c->nb_cs--;
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +01001628 if (!h2c->nb_cs)
1629 h2c->idle_start = now_ms;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001630 cs_free(cs);
Olivier Houchard58d87f32019-05-29 16:44:17 +02001631 h2s->cs = NULL;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001632 out_close:
Willy Tarreau71049cc2018-03-28 13:56:39 +02001633 h2s_destroy(h2s);
Willy Tarreau715c8fc2022-05-12 09:24:41 +02001634 out_alloc:
1635 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001636 out:
Willy Tarreau45efc072018-10-03 18:27:52 +02001637 sess_log(sess);
Willy Tarreau7838a792019-08-12 18:42:03 +02001638 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
Willy Tarreau45efc072018-10-03 18:27:52 +02001639 return NULL;
Willy Tarreau3ccf4b22017-10-13 19:07:26 +02001640}
1641
Willy Tarreau751f2d02018-10-05 09:35:00 +02001642/* allocates a new stream associated to conn_stream <cs> on the h2c connection
1643 * and returns it, or NULL in case of memory allocation error or if the highest
1644 * possible stream ID was reached.
1645 */
Olivier Houchardf502aca2018-12-14 19:42:40 +01001646static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct conn_stream *cs, struct session *sess)
Willy Tarreau751f2d02018-10-05 09:35:00 +02001647{
1648 struct h2s *h2s = NULL;
1649
Willy Tarreau7838a792019-08-12 18:42:03 +02001650 TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
1651
Willy Tarreau715c8fc2022-05-12 09:24:41 +02001652 if (h2c->nb_streams >= h2c->streams_limit) {
1653 TRACE_ERROR("Aborting stream since negotiated limit is too low", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001654 goto out;
Willy Tarreau715c8fc2022-05-12 09:24:41 +02001655 }
Willy Tarreau751f2d02018-10-05 09:35:00 +02001656
Willy Tarreau715c8fc2022-05-12 09:24:41 +02001657 if (h2_streams_left(h2c) < 1) {
1658 TRACE_ERROR("Aborting stream since no more streams left", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreaua80dca82019-01-24 17:08:28 +01001659 goto out;
Willy Tarreau715c8fc2022-05-12 09:24:41 +02001660 }
Willy Tarreaua80dca82019-01-24 17:08:28 +01001661
Willy Tarreau751f2d02018-10-05 09:35:00 +02001662 /* Defer choosing the ID until we send the first message to create the stream */
1663 h2s = h2s_new(h2c, 0);
Willy Tarreau715c8fc2022-05-12 09:24:41 +02001664 if (!h2s) {
1665 TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW, h2c->conn);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001666 goto out;
Willy Tarreau715c8fc2022-05-12 09:24:41 +02001667 }
Willy Tarreau751f2d02018-10-05 09:35:00 +02001668
1669 h2s->cs = cs;
Olivier Houchardf502aca2018-12-14 19:42:40 +01001670 h2s->sess = sess;
Willy Tarreau751f2d02018-10-05 09:35:00 +02001671 cs->ctx = h2s;
1672 h2c->nb_cs++;
1673
Willy Tarreau751f2d02018-10-05 09:35:00 +02001674 out:
Willy Tarreau7838a792019-08-12 18:42:03 +02001675 if (likely(h2s))
1676 TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
1677 else
1678 TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02001679 return h2s;
1680}
1681
Willy Tarreaube5b7152017-09-25 16:25:39 +02001682/* try to send a settings frame on the connection. Returns > 0 on success, 0 if
1683 * it couldn't do anything. It may return an error in h2c. See RFC7540#11.3 for
1684 * the various settings codes.
1685 */
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001686static int h2c_send_settings(struct h2c *h2c)
Willy Tarreaube5b7152017-09-25 16:25:39 +02001687{
1688 struct buffer *res;
1689 char buf_data[100]; // enough for 15 settings
Willy Tarreau83061a82018-07-13 11:56:34 +02001690 struct buffer buf;
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001691 int mfs;
Willy Tarreau7838a792019-08-12 18:42:03 +02001692 int ret = 0;
1693
1694 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001695
1696 if (h2c_mux_busy(h2c, NULL)) {
1697 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001698 goto out;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001699 }
1700
Willy Tarreaube5b7152017-09-25 16:25:39 +02001701 chunk_init(&buf, buf_data, sizeof(buf_data));
1702 chunk_memcpy(&buf,
1703 "\x00\x00\x00" /* length : 0 for now */
1704 "\x04\x00" /* type : 4 (settings), flags : 0 */
1705 "\x00\x00\x00\x00", /* stream ID : 0 */
1706 9);
1707
Willy Tarreau0bbad6b2019-02-26 16:01:52 +01001708 if (h2c->flags & H2_CF_IS_BACK) {
1709 /* send settings_enable_push=0 */
1710 chunk_memcat(&buf, "\x00\x02\x00\x00\x00\x00", 6);
1711 }
1712
Amaury Denoyelle0ea2c4f2021-07-09 17:14:30 +02001713 /* rfc 8441 #3 SETTINGS_ENABLE_CONNECT_PROTOCOL=1,
1714 * sent automatically unless disabled in the global config */
1715 if (!(global.tune.options & GTUNE_DISABLE_H2_WEBSOCKET))
1716 chunk_memcat(&buf, "\x00\x08\x00\x00\x00\x01", 6);
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01001717
Willy Tarreaube5b7152017-09-25 16:25:39 +02001718 if (h2_settings_header_table_size != 4096) {
1719 char str[6] = "\x00\x01"; /* header_table_size */
1720
1721 write_n32(str + 2, h2_settings_header_table_size);
1722 chunk_memcat(&buf, str, 6);
1723 }
1724
1725 if (h2_settings_initial_window_size != 65535) {
1726 char str[6] = "\x00\x04"; /* initial_window_size */
1727
1728 write_n32(str + 2, h2_settings_initial_window_size);
1729 chunk_memcat(&buf, str, 6);
1730 }
1731
1732 if (h2_settings_max_concurrent_streams != 0) {
1733 char str[6] = "\x00\x03"; /* max_concurrent_streams */
1734
1735 /* Note: 0 means "unlimited" for haproxy's config but not for
1736 * the protocol, so never send this value!
1737 */
1738 write_n32(str + 2, h2_settings_max_concurrent_streams);
1739 chunk_memcat(&buf, str, 6);
1740 }
1741
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001742 mfs = h2_settings_max_frame_size;
1743 if (mfs > global.tune.bufsize)
1744 mfs = global.tune.bufsize;
1745
1746 if (!mfs)
1747 mfs = global.tune.bufsize;
1748
1749 if (mfs != 16384) {
Willy Tarreaube5b7152017-09-25 16:25:39 +02001750 char str[6] = "\x00\x05"; /* max_frame_size */
1751
1752 /* note: similarly we could also emit MAX_HEADER_LIST_SIZE to
1753 * match bufsize - rewrite size, but at the moment it seems
1754 * that clients don't take care of it.
1755 */
Willy Tarreaua24b35c2019-02-21 13:24:36 +01001756 write_n32(str + 2, mfs);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001757 chunk_memcat(&buf, str, 6);
1758 }
1759
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001760 h2_set_frame_size(buf.area, buf.data - 9);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001761
1762 res = br_tail(h2c->mbuf);
1763 retry:
1764 if (!h2_get_buf(h2c, res)) {
1765 h2c->flags |= H2_CF_MUX_MALLOC;
1766 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001767 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001768 }
1769
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001770 ret = b_istput(res, ist2(buf.area, buf.data));
Willy Tarreaube5b7152017-09-25 16:25:39 +02001771 if (unlikely(ret <= 0)) {
1772 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001773 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1774 goto retry;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001775 h2c->flags |= H2_CF_MUX_MFULL;
1776 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001777 }
1778 else {
1779 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001780 ret = 0;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001781 }
1782 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001783 out:
1784 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001785 return ret;
1786}
1787
Willy Tarreau52eed752017-09-22 15:05:09 +02001788/* Try to receive a connection preface, then upon success try to send our
1789 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1790 * missing data. It may return an error in h2c.
1791 */
1792static int h2c_frt_recv_preface(struct h2c *h2c)
1793{
1794 int ret1;
Willy Tarreaube5b7152017-09-25 16:25:39 +02001795 int ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001796
Willy Tarreau7838a792019-08-12 18:42:03 +02001797 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
1798
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001799 ret1 = b_isteq(&h2c->dbuf, 0, b_data(&h2c->dbuf), ist(H2_CONN_PREFACE));
Willy Tarreau52eed752017-09-22 15:05:09 +02001800
1801 if (unlikely(ret1 <= 0)) {
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02001802 if (!ret1)
1803 h2c->flags |= H2_CF_DEM_SHORT_READ;
Amaury Denoyellea8879232020-10-27 17:16:03 +01001804 if (ret1 < 0 || conn_xprt_read0_pending(h2c->conn)) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01001805 TRACE_ERROR("I/O error or short read", H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02001806 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauc23d6d12021-06-17 08:08:48 +02001807 if (b_data(&h2c->dbuf) ||
1808 !(((const struct session *)h2c->conn->owner)->fe->options & PR_O_IGNORE_PRB))
1809 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01001810 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001811 ret2 = 0;
1812 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02001813 }
1814
Willy Tarreau7f0cc492018-10-08 07:13:08 +02001815 ret2 = h2c_send_settings(h2c);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001816 if (ret2 > 0)
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001817 b_del(&h2c->dbuf, ret1);
Willy Tarreau7838a792019-08-12 18:42:03 +02001818 out:
1819 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreaube5b7152017-09-25 16:25:39 +02001820 return ret2;
Willy Tarreau52eed752017-09-22 15:05:09 +02001821}
1822
Willy Tarreau01b44822018-10-03 14:26:37 +02001823/* Try to send a connection preface, then upon success try to send our
1824 * preface which is a SETTINGS frame. Returns > 0 on success or zero on
1825 * missing data. It may return an error in h2c.
1826 */
1827static int h2c_bck_send_preface(struct h2c *h2c)
1828{
1829 struct buffer *res;
Willy Tarreau7838a792019-08-12 18:42:03 +02001830 int ret = 0;
1831
1832 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02001833
1834 if (h2c_mux_busy(h2c, NULL)) {
1835 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001836 goto out;
Willy Tarreau01b44822018-10-03 14:26:37 +02001837 }
1838
Willy Tarreaubcc45952019-05-26 10:05:50 +02001839 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001840 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001841 if (!h2_get_buf(h2c, res)) {
Willy Tarreau01b44822018-10-03 14:26:37 +02001842 h2c->flags |= H2_CF_MUX_MALLOC;
1843 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001844 goto out;
Willy Tarreau01b44822018-10-03 14:26:37 +02001845 }
1846
1847 if (!b_data(res)) {
1848 /* preface not yet sent */
Willy Tarreau9c218e72019-05-26 10:08:28 +02001849 ret = b_istput(res, ist(H2_CONN_PREFACE));
1850 if (unlikely(ret <= 0)) {
1851 if (!ret) {
1852 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1853 goto retry;
1854 h2c->flags |= H2_CF_MUX_MFULL;
1855 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001856 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001857 }
1858 else {
1859 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02001860 ret = 0;
1861 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02001862 }
1863 }
Willy Tarreau01b44822018-10-03 14:26:37 +02001864 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001865 ret = h2c_send_settings(h2c);
1866 out:
1867 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
1868 return ret;
Willy Tarreau01b44822018-10-03 14:26:37 +02001869}
1870
Willy Tarreau081d4722017-05-16 21:51:05 +02001871/* try to send a GOAWAY frame on the connection to report an error or a graceful
1872 * shutdown, with h2c->errcode as the error code. Returns > 0 on success or zero
1873 * if nothing was done. It uses h2c->last_sid as the advertised ID, or copies it
1874 * from h2c->max_id if it's not set yet (<0). In case of lack of room to write
1875 * the message, it subscribes the requester (either <h2s> or <h2c>) to future
1876 * notifications. It sets H2_CF_GOAWAY_SENT on success, and H2_CF_GOAWAY_FAILED
1877 * on unrecoverable failure. It will not attempt to send one again in this last
Willy Tarreauec26f622022-04-13 09:40:52 +02001878 * case, nor will it send one if settings were not sent (e.g. still waiting for
1879 * a preface) so that it is safe to use h2c_error() to report such errors.
Willy Tarreau081d4722017-05-16 21:51:05 +02001880 */
1881static int h2c_send_goaway_error(struct h2c *h2c, struct h2s *h2s)
1882{
1883 struct buffer *res;
1884 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02001885 int ret = 0;
Willy Tarreau081d4722017-05-16 21:51:05 +02001886
Willy Tarreau7838a792019-08-12 18:42:03 +02001887 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
1888
Willy Tarreauec26f622022-04-13 09:40:52 +02001889 if ((h2c->flags & H2_CF_GOAWAY_FAILED) || h2c->st0 < H2_CS_SETTINGS1) {
Willy Tarreau7838a792019-08-12 18:42:03 +02001890 ret = 1; // claim that it worked
1891 goto out;
1892 }
Willy Tarreau081d4722017-05-16 21:51:05 +02001893
1894 if (h2c_mux_busy(h2c, h2s)) {
1895 if (h2s)
1896 h2s->flags |= H2_SF_BLK_MBUSY;
1897 else
1898 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001899 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001900 }
1901
Willy Tarreau9c218e72019-05-26 10:08:28 +02001902 /* len: 8, type: 7, flags: none, sid: 0 */
1903 memcpy(str, "\x00\x00\x08\x07\x00\x00\x00\x00\x00", 9);
1904
1905 if (h2c->last_sid < 0)
1906 h2c->last_sid = h2c->max_id;
1907
1908 write_n32(str + 9, h2c->last_sid);
1909 write_n32(str + 13, h2c->errcode);
1910
Willy Tarreaubcc45952019-05-26 10:05:50 +02001911 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02001912 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02001913 if (!h2_get_buf(h2c, res)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02001914 h2c->flags |= H2_CF_MUX_MALLOC;
1915 if (h2s)
1916 h2s->flags |= H2_SF_BLK_MROOM;
1917 else
1918 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001919 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001920 }
1921
Willy Tarreauea1b06d2018-07-12 09:02:47 +02001922 ret = b_istput(res, ist2(str, 17));
Willy Tarreau081d4722017-05-16 21:51:05 +02001923 if (unlikely(ret <= 0)) {
1924 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02001925 if ((res = br_tail_add(h2c->mbuf)) != NULL)
1926 goto retry;
Willy Tarreau081d4722017-05-16 21:51:05 +02001927 h2c->flags |= H2_CF_MUX_MFULL;
1928 if (h2s)
1929 h2s->flags |= H2_SF_BLK_MROOM;
1930 else
1931 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02001932 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001933 }
1934 else {
1935 /* we cannot report this error using GOAWAY, so we mark
1936 * it and claim a success.
1937 */
1938 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
1939 h2c->flags |= H2_CF_GOAWAY_FAILED;
Willy Tarreau7838a792019-08-12 18:42:03 +02001940 ret = 1;
1941 goto out;
Willy Tarreau081d4722017-05-16 21:51:05 +02001942 }
1943 }
1944 h2c->flags |= H2_CF_GOAWAY_SENT;
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001945
1946 /* some codes are not for real errors, just attempts to close cleanly */
1947 switch (h2c->errcode) {
1948 case H2_ERR_NO_ERROR:
1949 case H2_ERR_ENHANCE_YOUR_CALM:
1950 case H2_ERR_REFUSED_STREAM:
1951 case H2_ERR_CANCEL:
1952 break;
1953 default:
Willy Tarreau4781b152021-04-06 13:53:36 +02001954 HA_ATOMIC_INC(&h2c->px_counters->goaway_resp);
Willy Tarreauf965b2a2020-12-01 10:47:18 +01001955 }
Willy Tarreau7838a792019-08-12 18:42:03 +02001956 out:
1957 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
Willy Tarreau081d4722017-05-16 21:51:05 +02001958 return ret;
1959}
1960
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001961/* Try to send an RST_STREAM frame on the connection for the indicated stream
1962 * during mux operations. This stream must be valid and cannot be closed
1963 * already. h2s->id will be used for the stream ID and h2s->errcode will be
1964 * used for the error code. h2s->st will be update to H2_SS_CLOSED if it was
1965 * not yet.
1966 *
1967 * Returns > 0 on success or zero if nothing was done. In case of lack of room
1968 * to write the message, it subscribes the stream to future notifications.
1969 */
1970static int h2s_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
1971{
1972 struct buffer *res;
1973 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02001974 int ret = 0;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001975
Willy Tarreau7838a792019-08-12 18:42:03 +02001976 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
1977
1978 if (!h2s || h2s->st == H2_SS_CLOSED) {
1979 ret = 1;
1980 goto out;
1981 }
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001982
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001983 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
1984 * RST_STREAM in response to a RST_STREAM frame.
1985 */
Willy Tarreau231f6162019-08-06 10:01:40 +02001986 if (h2c->dsi == h2s->id && h2c->dft == H2_FT_RST_STREAM) {
Willy Tarreau8adae7c2018-03-22 17:37:05 +01001987 ret = 1;
1988 goto ignore;
1989 }
1990
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001991 if (h2c_mux_busy(h2c, h2s)) {
1992 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02001993 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01001994 }
1995
Willy Tarreau9c218e72019-05-26 10:08:28 +02001996 /* len: 4, type: 3, flags: none */
1997 memcpy(str, "\x00\x00\x04\x03\x00", 5);
1998 write_n32(str + 5, h2s->id);
1999 write_n32(str + 9, h2s->errcode);
2000
Willy Tarreaubcc45952019-05-26 10:05:50 +02002001 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002002 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002003 if (!h2_get_buf(h2c, res)) {
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002004 h2c->flags |= H2_CF_MUX_MALLOC;
2005 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002006 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002007 }
2008
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002009 ret = b_istput(res, ist2(str, 13));
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002010 if (unlikely(ret <= 0)) {
2011 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002012 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2013 goto retry;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002014 h2c->flags |= H2_CF_MUX_MFULL;
2015 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002016 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002017 }
2018 else {
2019 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002020 ret = 0;
2021 goto out;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002022 }
2023 }
2024
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002025 ignore:
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002026 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01002027 h2s_close(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002028 out:
2029 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002030 return ret;
2031}
2032
2033/* Try to send an RST_STREAM frame on the connection for the stream being
2034 * demuxed using h2c->dsi for the stream ID. It will use h2s->errcode as the
Willy Tarreaue6888ff2018-12-23 18:26:26 +01002035 * error code, even if the stream is one of the dummy ones, and will update
2036 * h2s->st to H2_SS_CLOSED if it was not yet.
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002037 *
2038 * Returns > 0 on success or zero if nothing was done. In case of lack of room
2039 * to write the message, it blocks the demuxer and subscribes it to future
Joseph Herlantd77575d2018-11-25 10:54:45 -08002040 * notifications. It's worth mentioning that an RST may even be sent for a
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002041 * closed stream.
Willy Tarreau27a84c92017-10-17 08:10:17 +02002042 */
2043static int h2c_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
2044{
2045 struct buffer *res;
2046 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02002047 int ret = 0;
2048
2049 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02002050
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002051 /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
2052 * RST_STREAM in response to a RST_STREAM frame.
2053 */
2054 if (h2c->dft == H2_FT_RST_STREAM) {
2055 ret = 1;
2056 goto ignore;
2057 }
2058
Willy Tarreau27a84c92017-10-17 08:10:17 +02002059 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002060 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002061 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002062 }
2063
Willy Tarreau9c218e72019-05-26 10:08:28 +02002064 /* len: 4, type: 3, flags: none */
2065 memcpy(str, "\x00\x00\x04\x03\x00", 5);
2066
2067 write_n32(str + 5, h2c->dsi);
2068 write_n32(str + 9, h2s->errcode);
2069
Willy Tarreaubcc45952019-05-26 10:05:50 +02002070 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002071 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002072 if (!h2_get_buf(h2c, res)) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02002073 h2c->flags |= H2_CF_MUX_MALLOC;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002074 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002075 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002076 }
2077
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002078 ret = b_istput(res, ist2(str, 13));
Willy Tarreau27a84c92017-10-17 08:10:17 +02002079 if (unlikely(ret <= 0)) {
2080 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002081 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2082 goto retry;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002083 h2c->flags |= H2_CF_MUX_MFULL;
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002084 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002085 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002086 }
2087 else {
2088 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002089 ret = 0;
2090 goto out;
Willy Tarreau27a84c92017-10-17 08:10:17 +02002091 }
2092 }
2093
Willy Tarreau8adae7c2018-03-22 17:37:05 +01002094 ignore:
Willy Tarreauab0e1da2018-10-05 10:16:37 +02002095 if (h2s->id) {
Willy Tarreau27a84c92017-10-17 08:10:17 +02002096 h2s->flags |= H2_SF_RST_SENT;
Willy Tarreau00dd0782018-03-01 16:31:34 +01002097 h2s_close(h2s);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01002098 }
2099
Willy Tarreau7838a792019-08-12 18:42:03 +02002100 out:
Willy Tarreau4781b152021-04-06 13:53:36 +02002101 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_resp);
Willy Tarreau7838a792019-08-12 18:42:03 +02002102 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
Willy Tarreau27a84c92017-10-17 08:10:17 +02002103 return ret;
2104}
2105
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002106/* try to send an empty DATA frame with the ES flag set to notify about the
2107 * end of stream and match a shutdown(write). If an ES was already sent as
2108 * indicated by HLOC/ERROR/RESET/CLOSED states, nothing is done. Returns > 0
2109 * on success or zero if nothing was done. In case of lack of room to write the
2110 * message, it subscribes the requesting stream to future notifications.
2111 */
2112static int h2_send_empty_data_es(struct h2s *h2s)
2113{
2114 struct h2c *h2c = h2s->h2c;
2115 struct buffer *res;
2116 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02002117 int ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002118
Willy Tarreau7838a792019-08-12 18:42:03 +02002119 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
2120
2121 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_ERROR || h2s->st == H2_SS_CLOSED) {
2122 ret = 1;
2123 goto out;
2124 }
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002125
2126 if (h2c_mux_busy(h2c, h2s)) {
2127 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002128 goto out;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002129 }
2130
Willy Tarreau9c218e72019-05-26 10:08:28 +02002131 /* len: 0x000000, type: 0(DATA), flags: ES=1 */
2132 memcpy(str, "\x00\x00\x00\x00\x01", 5);
2133 write_n32(str + 5, h2s->id);
2134
Willy Tarreaubcc45952019-05-26 10:05:50 +02002135 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002136 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002137 if (!h2_get_buf(h2c, res)) {
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002138 h2c->flags |= H2_CF_MUX_MALLOC;
2139 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002140 goto out;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002141 }
2142
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002143 ret = b_istput(res, ist2(str, 9));
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002144 if (likely(ret > 0)) {
2145 h2s->flags |= H2_SF_ES_SENT;
2146 }
2147 else if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002148 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2149 goto retry;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002150 h2c->flags |= H2_CF_MUX_MFULL;
2151 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau6d8b6822017-11-07 14:39:09 +01002152 }
2153 else {
2154 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002155 ret = 0;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002156 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002157 out:
2158 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01002159 return ret;
2160}
2161
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05002162/* wake a specific stream and assign its conn_stream some CS_FL_* flags among
Willy Tarreau99ad1b32019-05-14 11:46:28 +02002163 * CS_FL_ERR_PENDING and CS_FL_ERROR if needed. The stream's state
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002164 * is automatically updated accordingly. If the stream is orphaned, it is
2165 * destroyed.
Christopher Fauletf02ca002019-03-07 16:21:34 +01002166 */
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002167static void h2s_wake_one_stream(struct h2s *h2s)
Christopher Fauletf02ca002019-03-07 16:21:34 +01002168{
Willy Tarreau7838a792019-08-12 18:42:03 +02002169 struct h2c *h2c = h2s->h2c;
2170
2171 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn, h2s);
2172
Christopher Fauletf02ca002019-03-07 16:21:34 +01002173 if (!h2s->cs) {
2174 /* this stream was already orphaned */
2175 h2s_destroy(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002176 TRACE_DEVEL("leaving with no h2s", H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002177 return;
2178 }
2179
Christopher Fauletaade4ed2020-10-08 15:38:41 +02002180 if (h2c_read0_pending(h2s->h2c)) {
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002181 if (h2s->st == H2_SS_OPEN)
2182 h2s->st = H2_SS_HREM;
2183 else if (h2s->st == H2_SS_HLOC)
2184 h2s_close(h2s);
2185 }
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002186
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002187 if ((h2s->h2c->st0 >= H2_CS_ERROR || h2s->h2c->conn->flags & CO_FL_ERROR) ||
2188 (h2s->h2c->last_sid > 0 && (!h2s->id || h2s->id > h2s->h2c->last_sid))) {
2189 h2s->cs->flags |= CS_FL_ERR_PENDING;
2190 if (h2s->cs->flags & CS_FL_EOS)
2191 h2s->cs->flags |= CS_FL_ERROR;
Willy Tarreau23482912019-05-07 15:23:14 +02002192
Willy Tarreauaebbe5e2019-05-07 17:48:59 +02002193 if (h2s->st < H2_SS_ERROR)
2194 h2s->st = H2_SS_ERROR;
2195 }
Christopher Fauletf02ca002019-03-07 16:21:34 +01002196
2197 h2s_alert(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002198 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002199}
2200
2201/* wake the streams attached to the connection, whose id is greater than <last>
2202 * or unassigned.
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002203 */
Willy Tarreau23482912019-05-07 15:23:14 +02002204static void h2_wake_some_streams(struct h2c *h2c, int last)
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002205{
2206 struct eb32_node *node;
2207 struct h2s *h2s;
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002208
Willy Tarreau7838a792019-08-12 18:42:03 +02002209 TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn);
2210
Christopher Fauletf02ca002019-03-07 16:21:34 +01002211 /* Wake all streams with ID > last */
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002212 node = eb32_lookup_ge(&h2c->streams_by_id, last + 1);
2213 while (node) {
2214 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002215 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002216 h2s_wake_one_stream(h2s);
Christopher Fauletf02ca002019-03-07 16:21:34 +01002217 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01002218
Christopher Fauletf02ca002019-03-07 16:21:34 +01002219 /* Wake all streams with unassigned ID (ID == 0) */
2220 node = eb32_lookup(&h2c->streams_by_id, 0);
2221 while (node) {
2222 h2s = container_of(node, struct h2s, by_id);
2223 if (h2s->id > 0)
2224 break;
2225 node = eb32_next(node);
Willy Tarreau13b6c2e2019-05-07 17:26:05 +02002226 h2s_wake_one_stream(h2s);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002227 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002228
2229 TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau23b92aa2017-10-30 00:26:54 +01002230}
2231
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002232/* Wake up all blocked streams whose window size has become positive after the
2233 * mux's initial window was adjusted. This should be done after having processed
2234 * SETTINGS frames which have updated the mux's initial window size.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002235 */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002236static void h2c_unblock_sfctl(struct h2c *h2c)
Willy Tarreau3421aba2017-07-27 15:41:03 +02002237{
2238 struct h2s *h2s;
2239 struct eb32_node *node;
2240
Willy Tarreau7838a792019-08-12 18:42:03 +02002241 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
2242
Willy Tarreau3421aba2017-07-27 15:41:03 +02002243 node = eb32_first(&h2c->streams_by_id);
2244 while (node) {
2245 h2s = container_of(node, struct h2s, by_id);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002246 if (h2s->flags & H2_SF_BLK_SFCTL && h2s_mws(h2s) > 0) {
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002247 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002248 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002249 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2250 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002251 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub1c9edc2019-01-30 16:11:20 +01002252 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002253 node = eb32_next(node);
2254 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002255
2256 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002257}
2258
2259/* processes a SETTINGS frame whose payload is <payload> for <plen> bytes, and
2260 * ACKs it if needed. Returns > 0 on success or zero on missing data. It may
Willy Tarreaub860c732019-01-30 15:39:55 +01002261 * return an error in h2c. The caller must have already verified frame length
2262 * and stream ID validity. Described in RFC7540#6.5.
Willy Tarreau3421aba2017-07-27 15:41:03 +02002263 */
2264static int h2c_handle_settings(struct h2c *h2c)
2265{
2266 unsigned int offset;
2267 int error;
2268
Willy Tarreau7838a792019-08-12 18:42:03 +02002269 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
2270
Willy Tarreau3421aba2017-07-27 15:41:03 +02002271 if (h2c->dff & H2_F_SETTINGS_ACK) {
2272 if (h2c->dfl) {
2273 error = H2_ERR_FRAME_SIZE_ERROR;
2274 goto fail;
2275 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002276 goto done;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002277 }
2278
Willy Tarreau3421aba2017-07-27 15:41:03 +02002279 /* process full frame only */
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002280 if (b_data(&h2c->dbuf) < h2c->dfl) {
2281 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002282 goto out0;
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002283 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002284
2285 /* parse the frame */
2286 for (offset = 0; offset < h2c->dfl; offset += 6) {
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002287 uint16_t type = h2_get_n16(&h2c->dbuf, offset);
2288 int32_t arg = h2_get_n32(&h2c->dbuf, offset + 2);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002289
2290 switch (type) {
2291 case H2_SETTINGS_INITIAL_WINDOW_SIZE:
2292 /* we need to update all existing streams with the
2293 * difference from the previous iws.
2294 */
2295 if (arg < 0) { // RFC7540#6.5.2
2296 error = H2_ERR_FLOW_CONTROL_ERROR;
2297 goto fail;
2298 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02002299 h2c->miw = arg;
2300 break;
2301 case H2_SETTINGS_MAX_FRAME_SIZE:
2302 if (arg < 16384 || arg > 16777215) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002303 TRACE_ERROR("MAX_FRAME_SIZE out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002304 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002305 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002306 goto fail;
2307 }
2308 h2c->mfs = arg;
2309 break;
Willy Tarreau8a4dca02022-01-13 16:00:12 +01002310 case H2_SETTINGS_HEADER_TABLE_SIZE:
2311 h2c->flags |= H2_CF_SHTS_UPDATED;
2312 break;
Willy Tarreau1b38b462017-12-03 19:02:28 +01002313 case H2_SETTINGS_ENABLE_PUSH:
2314 if (arg < 0 || arg > 1) { // RFC7540#6.5.2
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002315 TRACE_ERROR("ENABLE_PUSH out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002316 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002317 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau1b38b462017-12-03 19:02:28 +01002318 goto fail;
2319 }
2320 break;
Willy Tarreau2e2083a2019-01-31 10:34:07 +01002321 case H2_SETTINGS_MAX_CONCURRENT_STREAMS:
2322 if (h2c->flags & H2_CF_IS_BACK) {
2323 /* the limit is only for the backend; for the frontend it is our limit */
2324 if ((unsigned int)arg > h2_settings_max_concurrent_streams)
2325 arg = h2_settings_max_concurrent_streams;
2326 h2c->streams_limit = arg;
2327 }
2328 break;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002329 case H2_SETTINGS_ENABLE_CONNECT_PROTOCOL:
Amaury Denoyelle68993a12021-10-18 09:43:29 +02002330 if (arg == 1)
2331 h2c->flags |= H2_CF_RCVD_RFC8441;
Amaury Denoyellef9dcbee2020-12-11 17:53:10 +01002332 break;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002333 }
2334 }
2335
2336 /* need to ACK this frame now */
2337 h2c->st0 = H2_CS_FRAME_A;
Willy Tarreau7838a792019-08-12 18:42:03 +02002338 done:
2339 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002340 return 1;
2341 fail:
Willy Tarreau9364a5f2019-10-23 11:06:35 +02002342 if (!(h2c->flags & H2_CF_IS_BACK))
2343 sess_log(h2c->conn->owner);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002344 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002345 out0:
2346 TRACE_DEVEL("leaving with missing data or error", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002347 return 0;
2348}
2349
2350/* try to send an ACK for a settings frame on the connection. Returns > 0 on
2351 * success or one of the h2_status values.
2352 */
2353static int h2c_ack_settings(struct h2c *h2c)
2354{
2355 struct buffer *res;
2356 char str[9];
Willy Tarreau7838a792019-08-12 18:42:03 +02002357 int ret = 0;
2358
2359 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002360
2361 if (h2c_mux_busy(h2c, NULL)) {
2362 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002363 goto out;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002364 }
2365
Willy Tarreau9c218e72019-05-26 10:08:28 +02002366 memcpy(str,
2367 "\x00\x00\x00" /* length : 0 (no data) */
2368 "\x04" "\x01" /* type : 4, flags : ACK */
2369 "\x00\x00\x00\x00" /* stream ID */, 9);
2370
Willy Tarreaubcc45952019-05-26 10:05:50 +02002371 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002372 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002373 if (!h2_get_buf(h2c, res)) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02002374 h2c->flags |= H2_CF_MUX_MALLOC;
2375 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002376 goto out;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002377 }
2378
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002379 ret = b_istput(res, ist2(str, 9));
Willy Tarreau3421aba2017-07-27 15:41:03 +02002380 if (unlikely(ret <= 0)) {
2381 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002382 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2383 goto retry;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002384 h2c->flags |= H2_CF_MUX_MFULL;
2385 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002386 }
2387 else {
2388 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002389 ret = 0;
Willy Tarreau3421aba2017-07-27 15:41:03 +02002390 }
2391 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002392 out:
2393 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
Willy Tarreau3421aba2017-07-27 15:41:03 +02002394 return ret;
2395}
2396
Willy Tarreaucf68c782017-10-10 17:11:41 +02002397/* processes a PING frame and schedules an ACK if needed. The caller must pass
2398 * the pointer to the payload in <payload>. Returns > 0 on success or zero on
Willy Tarreaub860c732019-01-30 15:39:55 +01002399 * missing data. The caller must have already verified frame length
2400 * and stream ID validity.
Willy Tarreaucf68c782017-10-10 17:11:41 +02002401 */
2402static int h2c_handle_ping(struct h2c *h2c)
2403{
Willy Tarreaucf68c782017-10-10 17:11:41 +02002404 /* schedule a response */
Willy Tarreau68ed6412017-12-03 18:15:56 +01002405 if (!(h2c->dff & H2_F_PING_ACK))
Willy Tarreaucf68c782017-10-10 17:11:41 +02002406 h2c->st0 = H2_CS_FRAME_A;
2407 return 1;
2408}
2409
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002410/* Try to send a window update for stream id <sid> and value <increment>.
2411 * Returns > 0 on success or zero on missing room or failure. It may return an
2412 * error in h2c.
2413 */
2414static int h2c_send_window_update(struct h2c *h2c, int sid, uint32_t increment)
2415{
2416 struct buffer *res;
2417 char str[13];
Willy Tarreau7838a792019-08-12 18:42:03 +02002418 int ret = 0;
2419
2420 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002421
2422 if (h2c_mux_busy(h2c, NULL)) {
2423 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002424 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002425 }
2426
Willy Tarreau9c218e72019-05-26 10:08:28 +02002427 /* length: 4, type: 8, flags: none */
2428 memcpy(str, "\x00\x00\x04\x08\x00", 5);
2429 write_n32(str + 5, sid);
2430 write_n32(str + 9, increment);
2431
Willy Tarreaubcc45952019-05-26 10:05:50 +02002432 res = br_tail(h2c->mbuf);
Willy Tarreau9c218e72019-05-26 10:08:28 +02002433 retry:
Willy Tarreaubcc45952019-05-26 10:05:50 +02002434 if (!h2_get_buf(h2c, res)) {
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002435 h2c->flags |= H2_CF_MUX_MALLOC;
2436 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002437 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002438 }
2439
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002440 ret = b_istput(res, ist2(str, 13));
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002441 if (unlikely(ret <= 0)) {
2442 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002443 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2444 goto retry;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002445 h2c->flags |= H2_CF_MUX_MFULL;
2446 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002447 }
2448 else {
2449 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002450 ret = 0;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002451 }
2452 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002453 out:
2454 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002455 return ret;
2456}
2457
2458/* try to send pending window update for the connection. It's safe to call it
2459 * with no pending updates. Returns > 0 on success or zero on missing room or
2460 * failure. It may return an error in h2c.
2461 */
2462static int h2c_send_conn_wu(struct h2c *h2c)
2463{
2464 int ret = 1;
2465
Willy Tarreau7838a792019-08-12 18:42:03 +02002466 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2467
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002468 if (h2c->rcvd_c <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002469 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002470
Willy Tarreau97aaa672018-12-23 09:49:04 +01002471 if (!(h2c->flags & H2_CF_WINDOW_OPENED)) {
2472 /* increase the advertised connection window to 2G on
2473 * first update.
2474 */
2475 h2c->flags |= H2_CF_WINDOW_OPENED;
2476 h2c->rcvd_c += H2_INITIAL_WINDOW_INCREMENT;
2477 }
2478
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002479 /* send WU for the connection */
2480 ret = h2c_send_window_update(h2c, 0, h2c->rcvd_c);
2481 if (ret > 0)
2482 h2c->rcvd_c = 0;
2483
Willy Tarreau7838a792019-08-12 18:42:03 +02002484 out:
2485 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002486 return ret;
2487}
2488
2489/* try to send pending window update for the current dmux stream. It's safe to
2490 * call it with no pending updates. Returns > 0 on success or zero on missing
2491 * room or failure. It may return an error in h2c.
2492 */
2493static int h2c_send_strm_wu(struct h2c *h2c)
2494{
2495 int ret = 1;
2496
Willy Tarreau7838a792019-08-12 18:42:03 +02002497 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
2498
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002499 if (h2c->rcvd_s <= 0)
Willy Tarreau7838a792019-08-12 18:42:03 +02002500 goto out;
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002501
2502 /* send WU for the stream */
2503 ret = h2c_send_window_update(h2c, h2c->dsi, h2c->rcvd_s);
2504 if (ret > 0)
2505 h2c->rcvd_s = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002506 out:
2507 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02002508 return ret;
2509}
2510
Willy Tarreaucf68c782017-10-10 17:11:41 +02002511/* try to send an ACK for a ping frame on the connection. Returns > 0 on
2512 * success, 0 on missing data or one of the h2_status values.
2513 */
2514static int h2c_ack_ping(struct h2c *h2c)
2515{
2516 struct buffer *res;
2517 char str[17];
Willy Tarreau7838a792019-08-12 18:42:03 +02002518 int ret = 0;
2519
2520 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002521
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002522 if (b_data(&h2c->dbuf) < 8)
Willy Tarreau7838a792019-08-12 18:42:03 +02002523 goto out;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002524
2525 if (h2c_mux_busy(h2c, NULL)) {
2526 h2c->flags |= H2_CF_DEM_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02002527 goto out;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002528 }
2529
Willy Tarreaucf68c782017-10-10 17:11:41 +02002530 memcpy(str,
2531 "\x00\x00\x08" /* length : 8 (same payload) */
2532 "\x06" "\x01" /* type : 6, flags : ACK */
2533 "\x00\x00\x00\x00" /* stream ID */, 9);
2534
2535 /* copy the original payload */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002536 h2_get_buf_bytes(str + 9, 8, &h2c->dbuf, 0);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002537
Willy Tarreau9c218e72019-05-26 10:08:28 +02002538 res = br_tail(h2c->mbuf);
2539 retry:
2540 if (!h2_get_buf(h2c, res)) {
2541 h2c->flags |= H2_CF_MUX_MALLOC;
2542 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02002543 goto out;
Willy Tarreau9c218e72019-05-26 10:08:28 +02002544 }
2545
Willy Tarreauea1b06d2018-07-12 09:02:47 +02002546 ret = b_istput(res, ist2(str, 17));
Willy Tarreaucf68c782017-10-10 17:11:41 +02002547 if (unlikely(ret <= 0)) {
2548 if (!ret) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02002549 if ((res = br_tail_add(h2c->mbuf)) != NULL)
2550 goto retry;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002551 h2c->flags |= H2_CF_MUX_MFULL;
2552 h2c->flags |= H2_CF_DEM_MROOM;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002553 }
2554 else {
2555 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau7838a792019-08-12 18:42:03 +02002556 ret = 0;
Willy Tarreaucf68c782017-10-10 17:11:41 +02002557 }
2558 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002559 out:
2560 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
Willy Tarreaucf68c782017-10-10 17:11:41 +02002561 return ret;
2562}
2563
Willy Tarreau26f95952017-07-27 17:18:30 +02002564/* processes a WINDOW_UPDATE frame whose payload is <payload> for <plen> bytes.
2565 * Returns > 0 on success or zero on missing data. It may return an error in
Willy Tarreaub860c732019-01-30 15:39:55 +01002566 * h2c or h2s. The caller must have already verified frame length and stream ID
2567 * validity. Described in RFC7540#6.9.
Willy Tarreau26f95952017-07-27 17:18:30 +02002568 */
2569static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s)
2570{
2571 int32_t inc;
2572 int error;
2573
Willy Tarreau7838a792019-08-12 18:42:03 +02002574 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
2575
Willy Tarreau26f95952017-07-27 17:18:30 +02002576 /* process full frame only */
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002577 if (b_data(&h2c->dbuf) < h2c->dfl) {
2578 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002579 goto out0;
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002580 }
Willy Tarreau26f95952017-07-27 17:18:30 +02002581
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002582 inc = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau26f95952017-07-27 17:18:30 +02002583
2584 if (h2c->dsi != 0) {
2585 /* stream window update */
Willy Tarreau26f95952017-07-27 17:18:30 +02002586
2587 /* it's not an error to receive WU on a closed stream */
2588 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau7838a792019-08-12 18:42:03 +02002589 goto done;
Willy Tarreau26f95952017-07-27 17:18:30 +02002590
2591 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002592 TRACE_ERROR("stream WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002593 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002594 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002595 goto strm_err;
2596 }
2597
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002598 if (h2s_mws(h2s) >= 0 && h2s_mws(h2s) + inc < 0) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002599 TRACE_ERROR("stream WINDOW_UPDATE inc<0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02002600 error = H2_ERR_FLOW_CONTROL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002601 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002602 goto strm_err;
2603 }
2604
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02002605 h2s->sws += inc;
2606 if (h2s_mws(h2s) > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
Willy Tarreau26f95952017-07-27 17:18:30 +02002607 h2s->flags &= ~H2_SF_BLK_SFCTL;
Willy Tarreau9edf6db2019-10-02 10:49:59 +02002608 LIST_DEL_INIT(&h2s->list);
Willy Tarreauf96508a2020-01-10 11:12:48 +01002609 if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
2610 h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
Willy Tarreau2b718102021-04-21 07:32:39 +02002611 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreau26f95952017-07-27 17:18:30 +02002612 }
2613 }
2614 else {
2615 /* connection window update */
2616 if (!inc) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002617 TRACE_ERROR("conn WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002618 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02002619 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau26f95952017-07-27 17:18:30 +02002620 goto conn_err;
2621 }
2622
2623 if (h2c->mws >= 0 && h2c->mws + inc < 0) {
2624 error = H2_ERR_FLOW_CONTROL_ERROR;
2625 goto conn_err;
2626 }
2627
2628 h2c->mws += inc;
2629 }
2630
Willy Tarreau7838a792019-08-12 18:42:03 +02002631 done:
2632 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002633 return 1;
2634
2635 conn_err:
2636 h2c_error(h2c, error);
Willy Tarreau7838a792019-08-12 18:42:03 +02002637 out0:
2638 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002639 return 0;
2640
2641 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01002642 h2s_error(h2s, error);
2643 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002644 TRACE_DEVEL("leaving on stream error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreau26f95952017-07-27 17:18:30 +02002645 return 0;
2646}
2647
Willy Tarreaue96b0922017-10-30 00:28:29 +01002648/* processes a GOAWAY frame, and signals all streams whose ID is greater than
Willy Tarreaub860c732019-01-30 15:39:55 +01002649 * the last ID. Returns > 0 on success or zero on missing data. The caller must
2650 * have already verified frame length and stream ID validity. Described in
2651 * RFC7540#6.8.
Willy Tarreaue96b0922017-10-30 00:28:29 +01002652 */
2653static int h2c_handle_goaway(struct h2c *h2c)
2654{
Willy Tarreaue96b0922017-10-30 00:28:29 +01002655 int last;
2656
Willy Tarreau7838a792019-08-12 18:42:03 +02002657 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002658 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002659 if (b_data(&h2c->dbuf) < h2c->dfl) {
2660 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002661 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002662 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002663 }
Willy Tarreaue96b0922017-10-30 00:28:29 +01002664
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002665 last = h2_get_n32(&h2c->dbuf, 0);
2666 h2c->errcode = h2_get_n32(&h2c->dbuf, 4);
Willy Tarreau11cc2d62017-12-03 10:27:47 +01002667 if (h2c->last_sid < 0)
2668 h2c->last_sid = last;
Willy Tarreau23482912019-05-07 15:23:14 +02002669 h2_wake_some_streams(h2c, last);
Willy Tarreau7838a792019-08-12 18:42:03 +02002670 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
Willy Tarreaue96b0922017-10-30 00:28:29 +01002671 return 1;
Willy Tarreaue96b0922017-10-30 00:28:29 +01002672}
2673
Willy Tarreau92153fc2017-12-03 19:46:19 +01002674/* processes a PRIORITY frame, and either skips it or rejects if it is
Willy Tarreaub860c732019-01-30 15:39:55 +01002675 * invalid. Returns > 0 on success or zero on missing data. It may return an
2676 * error in h2c. The caller must have already verified frame length and stream
2677 * ID validity. Described in RFC7540#6.3.
Willy Tarreau92153fc2017-12-03 19:46:19 +01002678 */
2679static int h2c_handle_priority(struct h2c *h2c)
2680{
Willy Tarreau7838a792019-08-12 18:42:03 +02002681 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
2682
Willy Tarreau92153fc2017-12-03 19:46:19 +01002683 /* process full frame only */
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002684 if (b_data(&h2c->dbuf) < h2c->dfl) {
Willy Tarreau7838a792019-08-12 18:42:03 +02002685 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002686 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002687 return 0;
Willy Tarreaue7bbbca2019-08-30 15:02:22 +02002688 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01002689
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002690 if (h2_get_n32(&h2c->dbuf, 0) == h2c->dsi) {
Willy Tarreau92153fc2017-12-03 19:46:19 +01002691 /* 7540#5.3 : can't depend on itself */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002692 TRACE_ERROR("PRIORITY depends on itself", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002693 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02002694 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002695 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreaub860c732019-01-30 15:39:55 +01002696 return 0;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002697 }
Willy Tarreau7838a792019-08-12 18:42:03 +02002698 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
Willy Tarreau92153fc2017-12-03 19:46:19 +01002699 return 1;
Willy Tarreau92153fc2017-12-03 19:46:19 +01002700}
2701
Willy Tarreaucd234e92017-08-18 10:59:39 +02002702/* processes an RST_STREAM frame, and sets the 32-bit error code on the stream.
Willy Tarreaub860c732019-01-30 15:39:55 +01002703 * Returns > 0 on success or zero on missing data. The caller must have already
2704 * verified frame length and stream ID validity. Described in RFC7540#6.4.
Willy Tarreaucd234e92017-08-18 10:59:39 +02002705 */
2706static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s)
2707{
Willy Tarreau7838a792019-08-12 18:42:03 +02002708 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
2709
Willy Tarreaucd234e92017-08-18 10:59:39 +02002710 /* process full frame only */
Willy Tarreau7838a792019-08-12 18:42:03 +02002711 if (b_data(&h2c->dbuf) < h2c->dfl) {
2712 TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002713 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002714 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02002715 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002716
2717 /* late RST, already handled */
Willy Tarreau7838a792019-08-12 18:42:03 +02002718 if (h2s->st == H2_SS_CLOSED) {
2719 TRACE_DEVEL("leaving on stream closed", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002720 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02002721 }
Willy Tarreaucd234e92017-08-18 10:59:39 +02002722
Willy Tarreauc9fa0482018-07-10 17:43:27 +02002723 h2s->errcode = h2_get_n32(&h2c->dbuf, 0);
Willy Tarreau00dd0782018-03-01 16:31:34 +01002724 h2s_close(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002725
2726 if (h2s->cs) {
Willy Tarreauec988c72018-12-19 18:00:29 +01002727 cs_set_error(h2s->cs);
Willy Tarreauf830f012018-12-19 17:44:55 +01002728 h2s_alert(h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002729 }
2730
2731 h2s->flags |= H2_SF_RST_RCVD;
Willy Tarreau7838a792019-08-12 18:42:03 +02002732 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02002733 return 1;
Willy Tarreaucd234e92017-08-18 10:59:39 +02002734}
2735
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002736/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2737 * It may return an error in h2c or h2s. The caller must consider that the
2738 * return value is the new h2s in case one was allocated (most common case).
2739 * Described in RFC7540#6.2. Most of the
Willy Tarreau13278b42017-10-13 19:23:14 +02002740 * errors here are reported as connection errors since it's impossible to
2741 * recover from such errors after the compression context has been altered.
2742 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002743static struct h2s *h2c_frt_handle_headers(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau13278b42017-10-13 19:23:14 +02002744{
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002745 struct buffer rxbuf = BUF_NULL;
Willy Tarreau4790f7c2019-01-24 11:33:02 +01002746 unsigned long long body_len = 0;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002747 uint32_t flags = 0;
Willy Tarreau13278b42017-10-13 19:23:14 +02002748 int error;
2749
Willy Tarreau7838a792019-08-12 18:42:03 +02002750 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2751
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002752 if (!b_size(&h2c->dbuf)) {
2753 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002754 goto out; // empty buffer
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002755 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002756
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002757 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2758 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002759 goto out; // incomplete frame
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002760 }
Willy Tarreau13278b42017-10-13 19:23:14 +02002761
2762 /* now either the frame is complete or the buffer is complete */
2763 if (h2s->st != H2_SS_IDLE) {
Willy Tarreau88d138e2019-01-02 19:38:14 +01002764 /* The stream exists/existed, this must be a trailers frame */
2765 if (h2s->st != H2_SS_CLOSED) {
Amaury Denoyelle74162742020-12-11 17:53:05 +01002766 error = h2c_decode_headers(h2c, &h2s->rxbuf, &h2s->flags, &body_len, NULL);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002767 /* unrecoverable error ? */
Willy Tarreau8c05c692023-01-19 23:58:11 +01002768 if (h2c->st0 >= H2_CS_ERROR) {
2769 TRACE_USER("Unrecoverable error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreau3f9e0402023-01-19 23:22:03 +01002770 sess_log(h2c->conn->owner);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002771 goto out;
Willy Tarreau8c05c692023-01-19 23:58:11 +01002772 }
Willy Tarreauaab1a602019-05-06 11:12:18 +02002773
Christopher Faulet0de3f552021-10-08 08:56:00 +02002774 if (error == 0) {
2775 /* Demux not blocked because of the stream, it is an incomplete frame */
2776 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2777 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002778 goto out; // missing data
Christopher Faulet0de3f552021-10-08 08:56:00 +02002779 }
Willy Tarreauaab1a602019-05-06 11:12:18 +02002780
2781 if (error < 0) {
2782 /* Failed to decode this frame (e.g. too large request)
2783 * but the HPACK decompressor is still synchronized.
2784 */
Willy Tarreau3f9e0402023-01-19 23:22:03 +01002785 sess_log(h2c->conn->owner);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002786 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
Willy Tarreau8c05c692023-01-19 23:58:11 +01002787 TRACE_USER("Stream error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreauaab1a602019-05-06 11:12:18 +02002788 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau88d138e2019-01-02 19:38:14 +01002789 goto out;
Willy Tarreauaab1a602019-05-06 11:12:18 +02002790 }
Willy Tarreau88d138e2019-01-02 19:38:14 +01002791 goto done;
2792 }
Willy Tarreau1f035502019-01-30 11:44:07 +01002793 /* the connection was already killed by an RST, let's consume
2794 * the data and send another RST.
2795 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01002796 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreau3f9e0402023-01-19 23:22:03 +01002797 sess_log(h2c->conn->owner);
Willy Tarreau1f035502019-01-30 11:44:07 +01002798 h2s = (struct h2s*)h2_error_stream;
2799 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002800 }
2801 else if (h2c->dsi <= h2c->max_id || !(h2c->dsi & 1)) {
2802 /* RFC7540#5.1.1 stream id > prev ones, and must be odd here */
2803 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002804 TRACE_ERROR("HEADERS on invalid stream ID", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau4781b152021-04-06 13:53:36 +02002805 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau22de8d32018-09-05 19:55:58 +02002806 sess_log(h2c->conn->owner);
Willy Tarreau62bc7202023-10-20 18:38:34 +02002807 session_inc_http_req_ctr(h2c->conn->owner);
2808 session_inc_http_err_ctr(h2c->conn->owner);
Willy Tarreau13278b42017-10-13 19:23:14 +02002809 goto conn_err;
2810 }
Willy Tarreau415b1ee2019-01-02 13:59:43 +01002811 else if (h2c->flags & H2_CF_DEM_TOOMANY)
2812 goto out; // IDLE but too many cs still present
Willy Tarreau13278b42017-10-13 19:23:14 +02002813
Amaury Denoyelle74162742020-12-11 17:53:05 +01002814 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002815
Willy Tarreau25919232019-01-03 14:48:18 +01002816 /* unrecoverable error ? */
Willy Tarreau8c05c692023-01-19 23:58:11 +01002817 if (h2c->st0 >= H2_CS_ERROR) {
2818 TRACE_USER("Unrecoverable error decoding H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreau3f9e0402023-01-19 23:22:03 +01002819 sess_log(h2c->conn->owner);
Willy Tarreau62bc7202023-10-20 18:38:34 +02002820 session_inc_http_req_ctr(h2c->conn->owner);
2821 session_inc_http_err_ctr(h2c->conn->owner);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002822 goto out;
Willy Tarreau8c05c692023-01-19 23:58:11 +01002823 }
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002824
Willy Tarreau25919232019-01-03 14:48:18 +01002825 if (error <= 0) {
Christopher Faulet0de3f552021-10-08 08:56:00 +02002826 if (error == 0) {
2827 /* Demux not blocked because of the stream, it is an incomplete frame */
2828 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2829 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau25919232019-01-03 14:48:18 +01002830 goto out; // missing data
Christopher Faulet0de3f552021-10-08 08:56:00 +02002831 }
Willy Tarreau25919232019-01-03 14:48:18 +01002832
2833 /* Failed to decode this stream (e.g. too large request)
2834 * but the HPACK decompressor is still synchronized.
2835 */
Willy Tarreau3f9e0402023-01-19 23:22:03 +01002836 sess_log(h2c->conn->owner);
Willy Tarreau62bc7202023-10-20 18:38:34 +02002837 session_inc_http_req_ctr(h2c->conn->owner);
2838 session_inc_http_err_ctr(h2c->conn->owner);
2839
Willy Tarreau25919232019-01-03 14:48:18 +01002840 h2s = (struct h2s*)h2_error_stream;
Willy Tarreau19599992023-10-20 17:51:12 +02002841
2842 /* This stream ID is now opened anyway until we send the RST on
2843 * it, it must not be reused.
2844 */
2845 if (h2c->dsi > h2c->max_id)
2846 h2c->max_id = h2c->dsi;
2847
Willy Tarreau25919232019-01-03 14:48:18 +01002848 goto send_rst;
2849 }
2850
Willy Tarreau3c10d512021-06-17 08:29:14 +02002851 TRACE_USER("rcvd H2 request ", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW, h2c->conn, 0, &rxbuf);
2852
Willy Tarreauca45e442022-05-12 09:08:51 +02002853 /* Now we cannot roll back and we won't come back here anymore for this
2854 * stream, this stream ID is open.
2855 */
2856 if (h2c->dsi > h2c->max_id)
2857 h2c->max_id = h2c->dsi;
2858
Willy Tarreau22de8d32018-09-05 19:55:58 +02002859 /* Note: we don't emit any other logs below because ff we return
Willy Tarreaua8e49542018-10-03 18:53:55 +02002860 * positively from h2c_frt_stream_new(), the stream will report the error,
2861 * and if we return in error, h2c_frt_stream_new() will emit the error.
Christopher Faulet7d013e72020-12-15 16:56:50 +01002862 *
2863 * Xfer the rxbuf to the stream. On success, the new stream owns the
2864 * rxbuf. On error, it is released here.
Willy Tarreau22de8d32018-09-05 19:55:58 +02002865 */
Amaury Denoyelleee7fcd52021-10-18 14:45:49 +02002866 h2s = h2c_frt_stream_new(h2c, h2c->dsi, &rxbuf, flags);
Willy Tarreau13278b42017-10-13 19:23:14 +02002867 if (!h2s) {
Willy Tarreau96a10c22018-12-23 18:30:44 +01002868 h2s = (struct h2s*)h2_refused_stream;
2869 goto send_rst;
Willy Tarreau13278b42017-10-13 19:23:14 +02002870 }
2871
2872 h2s->st = H2_SS_OPEN;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002873 h2s->flags |= flags;
Willy Tarreau1915ca22019-01-24 11:49:37 +01002874 h2s->body_len = body_len;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002875
Willy Tarreau88d138e2019-01-02 19:38:14 +01002876 done:
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002877 if (h2c->dff & H2_F_HEADERS_END_STREAM)
Willy Tarreau13278b42017-10-13 19:23:14 +02002878 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002879
2880 if (h2s->flags & H2_SF_ES_RCVD) {
Willy Tarreaufc10f592019-01-30 19:28:32 +01002881 if (h2s->st == H2_SS_OPEN)
2882 h2s->st = H2_SS_HREM;
2883 else
2884 h2s_close(h2s);
Willy Tarreau13278b42017-10-13 19:23:14 +02002885 }
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002886 return h2s;
Willy Tarreau13278b42017-10-13 19:23:14 +02002887
2888 conn_err:
2889 h2c_error(h2c, error);
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002890 goto out;
Willy Tarreau13278b42017-10-13 19:23:14 +02002891
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01002892 out:
2893 h2_release_buf(h2c, &rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002894 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01002895 return NULL;
Willy Tarreau96a10c22018-12-23 18:30:44 +01002896
2897 send_rst:
2898 /* make the demux send an RST for the current stream. We may only
2899 * do this if we're certain that the HEADERS frame was properly
2900 * decompressed so that the HPACK decoder is still kept up to date.
2901 */
2902 h2_release_buf(h2c, &rxbuf);
2903 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02002904
Willy Tarreau022e5e52020-09-10 09:33:15 +02002905 TRACE_USER("rejected H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02002906 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau96a10c22018-12-23 18:30:44 +01002907 return h2s;
Willy Tarreau13278b42017-10-13 19:23:14 +02002908}
2909
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002910/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
2911 * It may return an error in h2c or h2s. Described in RFC7540#6.2. Most of the
2912 * errors here are reported as connection errors since it's impossible to
2913 * recover from such errors after the compression context has been altered.
2914 */
2915static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
2916{
Christopher Faulet6884aa32019-09-23 15:28:20 +02002917 struct buffer rxbuf = BUF_NULL;
2918 unsigned long long body_len = 0;
2919 uint32_t flags = 0;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002920 int error;
2921
Willy Tarreau7838a792019-08-12 18:42:03 +02002922 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
2923
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002924 if (!b_size(&h2c->dbuf)) {
2925 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002926 goto fail; // empty buffer
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002927 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002928
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002929 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
2930 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002931 goto fail; // incomplete frame
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02002932 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002933
Christopher Faulet6884aa32019-09-23 15:28:20 +02002934 if (h2s->st != H2_SS_CLOSED) {
Amaury Denoyelle74162742020-12-11 17:53:05 +01002935 error = h2c_decode_headers(h2c, &h2s->rxbuf, &h2s->flags, &h2s->body_len, h2s->upgrade_protocol);
Christopher Faulet6884aa32019-09-23 15:28:20 +02002936 }
2937 else {
2938 /* the connection was already killed by an RST, let's consume
2939 * the data and send another RST.
2940 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01002941 error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len, NULL);
Christopher Fauletea7a7782019-09-26 16:19:13 +02002942 h2s = (struct h2s*)h2_error_stream;
Christopher Faulet6884aa32019-09-23 15:28:20 +02002943 h2c->st0 = H2_CS_FRAME_E;
2944 goto send_rst;
2945 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002946
Willy Tarreau25919232019-01-03 14:48:18 +01002947 /* unrecoverable error ? */
Willy Tarreau8c05c692023-01-19 23:58:11 +01002948 if (h2c->st0 >= H2_CS_ERROR) {
2949 TRACE_USER("Unrecoverable error decoding H2 HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02002950 goto fail;
Willy Tarreau8c05c692023-01-19 23:58:11 +01002951 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002952
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002953 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
2954 /* RFC7540#5.1 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002955 TRACE_ERROR("response HEADERS in invalid state", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002956 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
2957 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002958 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002959 goto fail;
Willy Tarreau08bb1d62019-01-30 16:55:48 +01002960 }
2961
Willy Tarreau25919232019-01-03 14:48:18 +01002962 if (error <= 0) {
Christopher Faulet0de3f552021-10-08 08:56:00 +02002963 if (error == 0) {
2964 /* Demux not blocked because of the stream, it is an incomplete frame */
2965 if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
2966 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02002967 goto fail; // missing data
Christopher Faulet0de3f552021-10-08 08:56:00 +02002968 }
Willy Tarreau25919232019-01-03 14:48:18 +01002969
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002970 /* stream error : send RST_STREAM */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01002971 TRACE_ERROR("couldn't decode response HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreau25919232019-01-03 14:48:18 +01002972 h2s_error(h2s, H2_ERR_PROTOCOL_ERROR);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002973 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau4781b152021-04-06 13:53:36 +02002974 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02002975 goto fail;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002976 }
2977
Christopher Fauletfa922f02019-05-07 10:55:17 +02002978 if (h2c->dff & H2_F_HEADERS_END_STREAM)
Willy Tarreau45ffc0c2019-01-03 09:32:20 +01002979 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreau45ffc0c2019-01-03 09:32:20 +01002980
Willy Tarreau927b88b2019-03-04 08:03:25 +01002981 if (h2s->cs && h2s->cs->flags & CS_FL_ERROR && h2s->st < H2_SS_ERROR)
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002982 h2s->st = H2_SS_ERROR;
Christopher Fauletfa922f02019-05-07 10:55:17 +02002983 else if (h2s->flags & H2_SF_ES_RCVD) {
2984 if (h2s->st == H2_SS_OPEN)
2985 h2s->st = H2_SS_HREM;
2986 else if (h2s->st == H2_SS_HLOC)
2987 h2s_close(h2s);
2988 }
Willy Tarreauc12f38f2018-10-08 14:53:27 +02002989
Christopher Fauletf95f8762021-01-22 11:59:07 +01002990 /* Unblock busy server h2s waiting for the response headers to validate
2991 * the tunnel establishment or the end of the response of an oborted
2992 * tunnel
2993 */
2994 if ((h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY)) == (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY) ||
2995 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
2996 TRACE_STATE("Unblock h2s blocked on tunnel establishment/abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
2997 h2s->flags &= ~H2_SF_BLK_MBUSY;
2998 }
2999
Willy Tarreaucbd37e02021-06-16 18:32:42 +02003000 TRACE_USER("rcvd H2 response ", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, 0, &h2s->rxbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02003001 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003002 return h2s;
Willy Tarreau7838a792019-08-12 18:42:03 +02003003 fail:
3004 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
3005 return NULL;
Christopher Faulet6884aa32019-09-23 15:28:20 +02003006
3007 send_rst:
3008 /* make the demux send an RST for the current stream. We may only
3009 * do this if we're certain that the HEADERS frame was properly
3010 * decompressed so that the HPACK decoder is still kept up to date.
3011 */
3012 h2_release_buf(h2c, &rxbuf);
3013 h2c->st0 = H2_CS_FRAME_E;
3014
Willy Tarreau022e5e52020-09-10 09:33:15 +02003015 TRACE_USER("rejected H2 response", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
Christopher Faulet6884aa32019-09-23 15:28:20 +02003016 TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
3017 return h2s;
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003018}
3019
Willy Tarreau454f9052017-10-26 19:40:35 +02003020/* processes a DATA frame. Returns > 0 on success or zero on missing data.
3021 * It may return an error in h2c or h2s. Described in RFC7540#6.1.
3022 */
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01003023static int h2c_handle_data(struct h2c *h2c, struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02003024{
3025 int error;
3026
Willy Tarreau7838a792019-08-12 18:42:03 +02003027 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3028
Willy Tarreau454f9052017-10-26 19:40:35 +02003029 /* note that empty DATA frames are perfectly valid and sometimes used
3030 * to signal an end of stream (with the ES flag).
3031 */
3032
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003033 if (!b_size(&h2c->dbuf) && h2c->dfl) {
3034 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02003035 goto fail; // empty buffer
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003036 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003037
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003038 if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
3039 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7838a792019-08-12 18:42:03 +02003040 goto fail; // incomplete frame
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003041 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003042
3043 /* now either the frame is complete or the buffer is complete */
3044
Willy Tarreau454f9052017-10-26 19:40:35 +02003045 if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
3046 /* RFC7540#6.1 */
3047 error = H2_ERR_STREAM_CLOSED;
3048 goto strm_err;
3049 }
3050
Christopher Faulet5c420d22023-09-13 16:21:58 +02003051 if (!(h2s->flags & H2_SF_HEADERS_RCVD)) {
3052 /* RFC9113#8.1: The header section must be received before the message content */
3053 TRACE_ERROR("Unexpected DATA frame before the message headers", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3054 error = H2_ERR_PROTOCOL_ERROR;
3055 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
3056 goto strm_err;
3057 }
Christopher Faulet4f09ec82019-06-19 09:25:58 +02003058 if ((h2s->flags & H2_SF_DATA_CLEN) && (h2c->dfl - h2c->dpl) > h2s->body_len) {
Willy Tarreau1915ca22019-01-24 11:49:37 +01003059 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003060 TRACE_ERROR("DATA frame larger than content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003061 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02003062 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003063 goto strm_err;
3064 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01003065 if (!(h2c->flags & H2_CF_IS_BACK) &&
3066 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT) &&
3067 ((h2c->dfl - h2c->dpl) || !(h2c->dff & H2_F_DATA_END_STREAM))) {
3068 /* a tunnel attempt was aborted but the client still try to send some raw data.
3069 * Thus the stream is closed with the CANCEL error. Here we take care it is not
3070 * an empty DATA Frame with the ES flag. The error is only handled if ES was
3071 * already sent to the client because depending on the scheduling, these data may
Ilya Shipitsinacf84592021-02-06 22:29:08 +05003072 * have been sent before the server response but not handle here.
Christopher Faulet91b21dc2021-01-22 12:13:15 +01003073 */
3074 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3075 error = H2_ERR_CANCEL;
3076 goto strm_err;
3077 }
Willy Tarreau1915ca22019-01-24 11:49:37 +01003078
Willy Tarreaua56a6de2018-02-26 15:59:07 +01003079 if (!h2_frt_transfer_data(h2s))
Willy Tarreau7838a792019-08-12 18:42:03 +02003080 goto fail;
Willy Tarreaua56a6de2018-02-26 15:59:07 +01003081
Willy Tarreau454f9052017-10-26 19:40:35 +02003082 /* call the upper layers to process the frame, then let the upper layer
3083 * notify the stream about any change.
3084 */
3085 if (!h2s->cs) {
Willy Tarreau082c4572019-08-06 10:11:02 +02003086 /* The upper layer has already closed, this may happen on
3087 * 4xx/redirects during POST, or when receiving a response
3088 * from an H2 server after the client has aborted.
3089 */
3090 error = H2_ERR_CANCEL;
Willy Tarreau454f9052017-10-26 19:40:35 +02003091 goto strm_err;
3092 }
3093
Willy Tarreau8f650c32017-11-21 19:36:21 +01003094 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02003095 goto fail;
Willy Tarreau8f650c32017-11-21 19:36:21 +01003096
Willy Tarreau721c9742017-11-07 11:05:42 +01003097 if (h2s->st >= H2_SS_ERROR) {
Willy Tarreau454f9052017-10-26 19:40:35 +02003098 /* stream error : send RST_STREAM */
Willy Tarreaua20a5192017-12-27 11:02:06 +01003099 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau454f9052017-10-26 19:40:35 +02003100 }
3101
3102 /* check for completion : the callee will change this to FRAME_A or
3103 * FRAME_H once done.
3104 */
3105 if (h2c->st0 == H2_CS_FRAME_P)
Willy Tarreau7838a792019-08-12 18:42:03 +02003106 goto fail;
Willy Tarreau454f9052017-10-26 19:40:35 +02003107
Willy Tarreauc4134ba2017-12-11 18:45:08 +01003108 /* last frame */
3109 if (h2c->dff & H2_F_DATA_END_STREAM) {
Christopher Fauletfa922f02019-05-07 10:55:17 +02003110 h2s->flags |= H2_SF_ES_RCVD;
Willy Tarreaufc10f592019-01-30 19:28:32 +01003111 if (h2s->st == H2_SS_OPEN)
3112 h2s->st = H2_SS_HREM;
3113 else
3114 h2s_close(h2s);
3115
Willy Tarreau1915ca22019-01-24 11:49:37 +01003116 if (h2s->flags & H2_SF_DATA_CLEN && h2s->body_len) {
3117 /* RFC7540#8.1.2 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003118 TRACE_ERROR("ES on DATA frame before content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003119 error = H2_ERR_PROTOCOL_ERROR;
Willy Tarreau4781b152021-04-06 13:53:36 +02003120 HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
Willy Tarreau1915ca22019-01-24 11:49:37 +01003121 goto strm_err;
3122 }
Willy Tarreauc4134ba2017-12-11 18:45:08 +01003123 }
3124
Christopher Fauletf95f8762021-01-22 11:59:07 +01003125 /* Unblock busy server h2s waiting for the end of the response for an
3126 * aborted tunnel
3127 */
3128 if ((h2c->flags & H2_CF_IS_BACK) &&
3129 (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
3130 TRACE_STATE("Unblock h2s blocked on tunnel abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
3131 h2s->flags &= ~H2_SF_BLK_MBUSY;
3132 }
3133
Willy Tarreau7838a792019-08-12 18:42:03 +02003134 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003135 return 1;
3136
Willy Tarreau454f9052017-10-26 19:40:35 +02003137 strm_err:
Willy Tarreau6432dc82019-01-30 15:42:44 +01003138 h2s_error(h2s, error);
3139 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02003140 fail:
3141 TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454f9052017-10-26 19:40:35 +02003142 return 0;
3143}
3144
Willy Tarreau63864812019-08-07 14:25:20 +02003145/* check that the current frame described in h2c->{dsi,dft,dfl,dff,...} is
3146 * valid for the current stream state. This is needed only after parsing the
3147 * frame header but in practice it can be performed at any time during
3148 * H2_CS_FRAME_P since no state transition happens there. Returns >0 on success
3149 * or 0 in case of error, in which case either h2s or h2c will carry an error.
3150 */
3151static int h2_frame_check_vs_state(struct h2c *h2c, struct h2s *h2s)
3152{
Willy Tarreau7838a792019-08-12 18:42:03 +02003153 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
3154
Willy Tarreau63864812019-08-07 14:25:20 +02003155 if (h2s->st == H2_SS_IDLE &&
3156 h2c->dft != H2_FT_HEADERS && h2c->dft != H2_FT_PRIORITY) {
3157 /* RFC7540#5.1: any frame other than HEADERS or PRIORITY in
3158 * this state MUST be treated as a connection error
3159 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003160 TRACE_ERROR("invalid frame type for IDLE state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003161 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003162 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau63864812019-08-07 14:25:20 +02003163 /* only log if no other stream can report the error */
3164 sess_log(h2c->conn->owner);
3165 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003166 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7838a792019-08-12 18:42:03 +02003167 TRACE_DEVEL("leaving in error (idle&!hdrs&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003168 return 0;
3169 }
3170
Willy Tarreau57a18162019-11-24 14:57:53 +01003171 if (h2s->st == H2_SS_IDLE && (h2c->flags & H2_CF_IS_BACK)) {
3172 /* only PUSH_PROMISE would be permitted here */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003173 TRACE_ERROR("invalid frame type for IDLE state (back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau57a18162019-11-24 14:57:53 +01003174 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02003175 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau57a18162019-11-24 14:57:53 +01003176 TRACE_DEVEL("leaving in error (idle&back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
3177 return 0;
3178 }
3179
Willy Tarreau63864812019-08-07 14:25:20 +02003180 if (h2s->st == H2_SS_HREM && h2c->dft != H2_FT_WINDOW_UPDATE &&
3181 h2c->dft != H2_FT_RST_STREAM && h2c->dft != H2_FT_PRIORITY) {
3182 /* RFC7540#5.1: any frame other than WU/PRIO/RST in
3183 * this state MUST be treated as a stream error.
3184 * 6.2, 6.6 and 6.10 further mandate that HEADERS/
3185 * PUSH_PROMISE/CONTINUATION cause connection errors.
3186 */
Amaury Denoyellea8879232020-10-27 17:16:03 +01003187 if (h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003188 TRACE_ERROR("invalid frame type for HREM state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003189 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02003190 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Amaury Denoyellea8879232020-10-27 17:16:03 +01003191 }
3192 else {
Willy Tarreau63864812019-08-07 14:25:20 +02003193 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
Amaury Denoyellea8879232020-10-27 17:16:03 +01003194 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003195 TRACE_DEVEL("leaving in error (hrem&!wu&!rst&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003196 return 0;
3197 }
3198
3199 /* Below the management of frames received in closed state is a
3200 * bit hackish because the spec makes strong differences between
3201 * streams closed by receiving RST, sending RST, and seeing ES
3202 * in both directions. In addition to this, the creation of a
3203 * new stream reusing the identifier of a closed one will be
3204 * detected here. Given that we cannot keep track of all closed
3205 * streams forever, we consider that unknown closed streams were
3206 * closed on RST received, which allows us to respond with an
3207 * RST without breaking the connection (eg: to abort a transfer).
3208 * Some frames have to be silently ignored as well.
3209 */
3210 if (h2s->st == H2_SS_CLOSED && h2c->dsi) {
3211 if (!(h2c->flags & H2_CF_IS_BACK) && h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
3212 /* #5.1.1: The identifier of a newly
3213 * established stream MUST be numerically
3214 * greater than all streams that the initiating
3215 * endpoint has opened or reserved. This
3216 * governs streams that are opened using a
3217 * HEADERS frame and streams that are reserved
3218 * using PUSH_PROMISE. An endpoint that
3219 * receives an unexpected stream identifier
3220 * MUST respond with a connection error.
3221 */
3222 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003223 TRACE_DEVEL("leaving in error (closed&hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003224 return 0;
3225 }
3226
Willy Tarreau4c08f122019-09-26 08:47:15 +02003227 if (h2s->flags & H2_SF_RST_RCVD &&
3228 !(h2_ft_bit(h2c->dft) & (H2_FT_HDR_MASK | H2_FT_RST_STREAM_BIT | H2_FT_PRIORITY_BIT | H2_FT_WINDOW_UPDATE_BIT))) {
Willy Tarreau63864812019-08-07 14:25:20 +02003229 /* RFC7540#5.1:closed: an endpoint that
3230 * receives any frame other than PRIORITY after
3231 * receiving a RST_STREAM MUST treat that as a
3232 * stream error of type STREAM_CLOSED.
3233 *
3234 * Note that old streams fall into this category
3235 * and will lead to an RST being sent.
3236 *
3237 * However, we cannot generalize this to all frame types. Those
3238 * carrying compression state must still be processed before
3239 * being dropped or we'll desynchronize the decoder. This can
3240 * happen with request trailers received after sending an
3241 * RST_STREAM, or with header/trailers responses received after
3242 * sending RST_STREAM (aborted stream).
Willy Tarreau4c08f122019-09-26 08:47:15 +02003243 *
3244 * In addition, since our CLOSED streams always carry the
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003245 * RST_RCVD bit, we don't want to accidentally catch valid
Willy Tarreau4c08f122019-09-26 08:47:15 +02003246 * frames for a closed stream, i.e. RST/PRIO/WU.
Willy Tarreau63864812019-08-07 14:25:20 +02003247 */
3248 h2s_error(h2s, H2_ERR_STREAM_CLOSED);
3249 h2c->st0 = H2_CS_FRAME_E;
Christopher Faulet6884aa32019-09-23 15:28:20 +02003250 TRACE_DEVEL("leaving in error (rst_rcvd&!hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003251 return 0;
3252 }
3253
3254 /* RFC7540#5.1:closed: if this state is reached as a
3255 * result of sending a RST_STREAM frame, the peer that
3256 * receives the RST_STREAM might have already sent
3257 * frames on the stream that cannot be withdrawn. An
3258 * endpoint MUST ignore frames that it receives on
3259 * closed streams after it has sent a RST_STREAM
3260 * frame. An endpoint MAY choose to limit the period
3261 * over which it ignores frames and treat frames that
3262 * arrive after this time as being in error.
3263 */
3264 if (h2s->id && !(h2s->flags & H2_SF_RST_SENT)) {
3265 /* RFC7540#5.1:closed: any frame other than
3266 * PRIO/WU/RST in this state MUST be treated as
3267 * a connection error
3268 */
3269 if (h2c->dft != H2_FT_RST_STREAM &&
3270 h2c->dft != H2_FT_PRIORITY &&
3271 h2c->dft != H2_FT_WINDOW_UPDATE) {
3272 h2c_error(h2c, H2_ERR_STREAM_CLOSED);
Willy Tarreau7838a792019-08-12 18:42:03 +02003273 TRACE_DEVEL("leaving in error (rst_sent&!rst&!prio&!wu)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003274 return 0;
3275 }
3276 }
3277 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003278 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
Willy Tarreau63864812019-08-07 14:25:20 +02003279 return 1;
3280}
3281
Willy Tarreaubc933932017-10-09 16:21:43 +02003282/* process Rx frames to be demultiplexed */
3283static void h2_process_demux(struct h2c *h2c)
3284{
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003285 struct h2s *h2s = NULL, *tmp_h2s;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003286 struct h2_fh hdr;
3287 unsigned int padlen = 0;
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003288 int32_t old_iw = h2c->miw;
Willy Tarreauf3ee0692017-10-17 08:18:25 +02003289
Willy Tarreau7838a792019-08-12 18:42:03 +02003290 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3291
Willy Tarreau081d4722017-05-16 21:51:05 +02003292 if (h2c->st0 >= H2_CS_ERROR)
Willy Tarreau7838a792019-08-12 18:42:03 +02003293 goto out;
Willy Tarreau52eed752017-09-22 15:05:09 +02003294
3295 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3296 if (h2c->st0 == H2_CS_PREFACE) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003297 TRACE_STATE("expecting preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau01b44822018-10-03 14:26:37 +02003298 if (h2c->flags & H2_CF_IS_BACK)
Willy Tarreau7838a792019-08-12 18:42:03 +02003299 goto out;
3300
Willy Tarreau52eed752017-09-22 15:05:09 +02003301 if (unlikely(h2c_frt_recv_preface(h2c) <= 0)) {
3302 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau22de8d32018-09-05 19:55:58 +02003303 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003304 TRACE_PROTO("failed to receive preface", H2_EV_RX_PREFACE|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003305 h2c->st0 = H2_CS_ERROR2;
Willy Tarreauc23d6d12021-06-17 08:08:48 +02003306 if (b_data(&h2c->dbuf) ||
Christopher Faulet79b347d2021-07-26 10:18:35 +02003307 !(((const struct session *)h2c->conn->owner)->fe->options & (PR_O_NULLNOLOG|PR_O_IGNORE_PRB)))
Willy Tarreauc23d6d12021-06-17 08:08:48 +02003308 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003309 }
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003310 goto done;
Willy Tarreau52eed752017-09-22 15:05:09 +02003311 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003312 TRACE_PROTO("received preface", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003313
3314 h2c->max_id = 0;
3315 h2c->st0 = H2_CS_SETTINGS1;
Willy Tarreau7838a792019-08-12 18:42:03 +02003316 TRACE_STATE("switching to SETTINGS1", H2_EV_RX_PREFACE, h2c->conn);
Willy Tarreau52eed752017-09-22 15:05:09 +02003317 }
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003318
3319 if (h2c->st0 == H2_CS_SETTINGS1) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003320 /* ensure that what is pending is a valid SETTINGS frame
3321 * without an ACK.
3322 */
Willy Tarreau7838a792019-08-12 18:42:03 +02003323 TRACE_STATE("expecting settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS, h2c->conn);
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003324 if (!h2_get_frame_hdr(&h2c->dbuf, &hdr)) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003325 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003326 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau22de8d32018-09-05 19:55:58 +02003327 if (h2c->st0 == H2_CS_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003328 TRACE_ERROR("failed to receive settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003329 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003330 if (!(h2c->flags & H2_CF_IS_BACK))
3331 sess_log(h2c->conn->owner);
Willy Tarreau22de8d32018-09-05 19:55:58 +02003332 }
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003333 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003334 }
3335
3336 if (hdr.sid || hdr.ft != H2_FT_SETTINGS || hdr.ff & H2_F_SETTINGS_ACK) {
3337 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003338 TRACE_ERROR("unexpected frame type or flags", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003339 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
3340 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003341 if (!(h2c->flags & H2_CF_IS_BACK))
3342 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003343 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003344 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003345 }
3346
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003347 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003348 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003349 TRACE_ERROR("invalid settings frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003350 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
3351 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003352 if (!(h2c->flags & H2_CF_IS_BACK))
3353 sess_log(h2c->conn->owner);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003354 goto done;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003355 }
3356
Willy Tarreau3bf69182018-12-21 15:34:50 +01003357 /* that's OK, switch to FRAME_P to process it. This is
3358 * a SETTINGS frame whose header has already been
3359 * deleted above.
3360 */
Willy Tarreau54f46e52019-01-30 15:11:03 +01003361 padlen = 0;
Willy Tarreau4781b152021-04-06 13:53:36 +02003362 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003363 goto new_frame;
Willy Tarreau4c3690b2017-10-10 15:16:55 +02003364 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003365 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003366
3367 /* process as many incoming frames as possible below */
Willy Tarreau7838a792019-08-12 18:42:03 +02003368 while (1) {
Willy Tarreau7e98c052017-10-10 15:56:59 +02003369 int ret = 0;
3370
Willy Tarreau7838a792019-08-12 18:42:03 +02003371 if (!b_data(&h2c->dbuf)) {
3372 TRACE_DEVEL("no more Rx data", H2_EV_RX_FRAME, h2c->conn);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003373 h2c->flags |= H2_CF_DEM_SHORT_READ;
3374 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003375 }
3376
3377 if (h2c->st0 >= H2_CS_ERROR) {
3378 TRACE_STATE("end of connection reported", H2_EV_RX_FRAME|H2_EV_RX_EOI, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003379 break;
Willy Tarreau7838a792019-08-12 18:42:03 +02003380 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003381
3382 if (h2c->st0 == H2_CS_FRAME_H) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003383 TRACE_STATE("expecting H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003384 if (!h2_peek_frame_hdr(&h2c->dbuf, 0, &hdr)) {
3385 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003386 break;
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003387 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003388
Willy Tarreau3f0e1ec2018-04-17 10:28:27 +02003389 if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003390 TRACE_ERROR("invalid H2 frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003391 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003392 if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau22de8d32018-09-05 19:55:58 +02003393 /* only log if no other stream can report the error */
3394 sess_log(h2c->conn->owner);
3395 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003396 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003397 break;
3398 }
3399
Willy Tarreau1d7138e2022-06-08 16:32:22 +02003400 if (h2c->rcvd_s && h2c->dsi != hdr.sid) {
3401 /* changed stream with a pending WU, need to
3402 * send it now.
3403 */
3404 TRACE_PROTO("sending stream WINDOW_UPDATE frame on stream switch", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
3405 ret = h2c_send_strm_wu(h2c);
3406 if (ret <= 0)
3407 break;
3408 }
3409
Christopher Fauletdd2a5622019-06-18 12:22:38 +02003410 padlen = 0;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003411 if (h2_ft_bit(hdr.ft) & H2_FT_PADDED_MASK && hdr.ff & H2_F_PADDED) {
3412 /* If the frame is padded (HEADERS, PUSH_PROMISE or DATA),
3413 * we read the pad length and drop it from the remaining
3414 * payload (one byte + the 9 remaining ones = 10 total
3415 * removed), so we have a frame payload starting after the
3416 * pad len. Flow controlled frames (DATA) also count the
3417 * padlen in the flow control, so it must be adjusted.
3418 */
3419 if (hdr.len < 1) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003420 TRACE_ERROR("invalid H2 padded frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003421 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003422 if (!(h2c->flags & H2_CF_IS_BACK))
3423 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003424 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003425 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003426 }
3427 hdr.len--;
3428
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003429 if (b_data(&h2c->dbuf) < 10) {
3430 h2c->flags |= H2_CF_DEM_SHORT_READ;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003431 break; // missing padlen
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003432 }
Willy Tarreau3bf69182018-12-21 15:34:50 +01003433
3434 padlen = *(uint8_t *)b_peek(&h2c->dbuf, 9);
3435
3436 if (padlen > hdr.len) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003437 TRACE_ERROR("invalid H2 padding length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau3bf69182018-12-21 15:34:50 +01003438 /* RFC7540#6.1 : pad length = length of
3439 * frame payload or greater => error.
3440 */
3441 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003442 if (!(h2c->flags & H2_CF_IS_BACK))
3443 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003444 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003445 goto done;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003446 }
3447
3448 if (h2_ft_bit(hdr.ft) & H2_FT_FC_MASK) {
3449 h2c->rcvd_c++;
3450 h2c->rcvd_s++;
3451 }
3452 b_del(&h2c->dbuf, 1);
3453 }
3454 h2_skip_frame_hdr(&h2c->dbuf);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003455
3456 new_frame:
Willy Tarreau7e98c052017-10-10 15:56:59 +02003457 h2c->dfl = hdr.len;
3458 h2c->dsi = hdr.sid;
3459 h2c->dft = hdr.ft;
3460 h2c->dff = hdr.ff;
Willy Tarreau3bf69182018-12-21 15:34:50 +01003461 h2c->dpl = padlen;
Willy Tarreau6c3eeca2022-08-18 11:19:57 +02003462 h2c->flags |= H2_CF_DEM_IN_PROGRESS;
Willy Tarreau73db4342019-09-25 07:28:44 +02003463 TRACE_STATE("rcvd H2 frame header, switching to FRAME_P state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003464 h2c->st0 = H2_CS_FRAME_P;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003465
3466 /* check for minimum basic frame format validity */
3467 ret = h2_frame_check(h2c->dft, 1, h2c->dsi, h2c->dfl, global.tune.bufsize);
3468 if (ret != H2_ERR_NO_ERROR) {
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003469 TRACE_ERROR("received invalid H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau54f46e52019-01-30 15:11:03 +01003470 h2c_error(h2c, ret);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003471 if (!(h2c->flags & H2_CF_IS_BACK))
3472 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003473 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003474 goto done;
Willy Tarreau54f46e52019-01-30 15:11:03 +01003475 }
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +01003476
3477 /* transition to HEADERS frame ends the keep-alive idle
3478 * timer and starts the http-request idle delay.
3479 */
3480 if (hdr.ft == H2_FT_HEADERS)
3481 h2c->idle_start = now_ms;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003482 }
3483
Willy Tarreau9fd5aa82019-08-06 15:21:45 +02003484 /* Only H2_CS_FRAME_P, H2_CS_FRAME_A and H2_CS_FRAME_E here.
3485 * H2_CS_FRAME_P indicates an incomplete previous operation
3486 * (most often the first attempt) and requires some validity
3487 * checks for the frame and the current state. The two other
3488 * ones are set after completion (or abortion) and must skip
3489 * validity checks.
3490 */
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003491 tmp_h2s = h2c_st_by_id(h2c, h2c->dsi);
3492
Willy Tarreau567beb82018-12-18 16:52:44 +01003493 if (tmp_h2s != h2s && h2s && h2s->cs &&
3494 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003495 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003496 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003497 (h2s->flags & H2_SF_ES_RCVD) ||
3498 (h2s->cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING|CS_FL_EOS)))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003499 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003500 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_STRM_WAKE, h2c->conn, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003501 h2s->cs->flags |= CS_FL_RCV_MORE;
Willy Tarreau7e094452018-12-19 18:08:52 +01003502 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003503 }
3504 h2s = tmp_h2s;
Willy Tarreau7e98c052017-10-10 15:56:59 +02003505
Willy Tarreau63864812019-08-07 14:25:20 +02003506 if (h2c->st0 == H2_CS_FRAME_E ||
Willy Tarreau7838a792019-08-12 18:42:03 +02003507 (h2c->st0 == H2_CS_FRAME_P && !h2_frame_check_vs_state(h2c, h2s))) {
3508 TRACE_PROTO("stream error reported", H2_EV_RX_FRAME|H2_EV_PROTO_ERR, h2c->conn, h2s);
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003509 goto strm_err;
Willy Tarreau7838a792019-08-12 18:42:03 +02003510 }
Willy Tarreauc0da1962017-10-30 18:38:00 +01003511
Willy Tarreau7e98c052017-10-10 15:56:59 +02003512 switch (h2c->dft) {
Willy Tarreau3421aba2017-07-27 15:41:03 +02003513 case H2_FT_SETTINGS:
Willy Tarreau7838a792019-08-12 18:42:03 +02003514 if (h2c->st0 == H2_CS_FRAME_P) {
3515 TRACE_PROTO("receiving H2 SETTINGS frame", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003516 ret = h2c_handle_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003517 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003518 HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003519
Willy Tarreau7838a792019-08-12 18:42:03 +02003520 if (h2c->st0 == H2_CS_FRAME_A) {
3521 TRACE_PROTO("sending H2 SETTINGS ACK frame", H2_EV_TX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
Willy Tarreau3421aba2017-07-27 15:41:03 +02003522 ret = h2c_ack_settings(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003523 }
Willy Tarreau3421aba2017-07-27 15:41:03 +02003524 break;
3525
Willy Tarreaucf68c782017-10-10 17:11:41 +02003526 case H2_FT_PING:
Willy Tarreau7838a792019-08-12 18:42:03 +02003527 if (h2c->st0 == H2_CS_FRAME_P) {
3528 TRACE_PROTO("receiving H2 PING frame", H2_EV_RX_FRAME|H2_EV_RX_PING, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003529 ret = h2c_handle_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003530 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003531
Willy Tarreau7838a792019-08-12 18:42:03 +02003532 if (h2c->st0 == H2_CS_FRAME_A) {
3533 TRACE_PROTO("sending H2 PING ACK frame", H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn, h2s);
Willy Tarreaucf68c782017-10-10 17:11:41 +02003534 ret = h2c_ack_ping(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003535 }
Willy Tarreaucf68c782017-10-10 17:11:41 +02003536 break;
3537
Willy Tarreau26f95952017-07-27 17:18:30 +02003538 case H2_FT_WINDOW_UPDATE:
Willy Tarreau7838a792019-08-12 18:42:03 +02003539 if (h2c->st0 == H2_CS_FRAME_P) {
3540 TRACE_PROTO("receiving H2 WINDOW_UPDATE frame", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
Willy Tarreau26f95952017-07-27 17:18:30 +02003541 ret = h2c_handle_window_update(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003542 }
Willy Tarreau26f95952017-07-27 17:18:30 +02003543 break;
3544
Willy Tarreau61290ec2017-10-17 08:19:21 +02003545 case H2_FT_CONTINUATION:
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05003546 /* RFC7540#6.10: CONTINUATION may only be preceded by
Willy Tarreauea18f862018-12-22 20:19:26 +01003547 * a HEADERS/PUSH_PROMISE/CONTINUATION frame. These
3548 * frames' parsers consume all following CONTINUATION
3549 * frames so this one is out of sequence.
Willy Tarreau61290ec2017-10-17 08:19:21 +02003550 */
Willy Tarreau5dd36ac2020-12-01 10:24:29 +01003551 TRACE_ERROR("received unexpected H2 CONTINUATION frame", H2_EV_RX_FRAME|H2_EV_RX_CONT|H2_EV_H2C_ERR, h2c->conn, h2s);
Willy Tarreauea18f862018-12-22 20:19:26 +01003552 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003553 if (!(h2c->flags & H2_CF_IS_BACK))
3554 sess_log(h2c->conn->owner);
Willy Tarreau4781b152021-04-06 13:53:36 +02003555 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003556 goto done;
Willy Tarreau61290ec2017-10-17 08:19:21 +02003557
Willy Tarreau13278b42017-10-13 19:23:14 +02003558 case H2_FT_HEADERS:
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003559 if (h2c->st0 == H2_CS_FRAME_P) {
Willy Tarreau7838a792019-08-12 18:42:03 +02003560 TRACE_PROTO("receiving H2 HEADERS frame", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
Willy Tarreauc12f38f2018-10-08 14:53:27 +02003561 if (h2c->flags & H2_CF_IS_BACK)
3562 tmp_h2s = h2c_bck_handle_headers(h2c, h2s);
3563 else
3564 tmp_h2s = h2c_frt_handle_headers(h2c, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003565 if (tmp_h2s) {
3566 h2s = tmp_h2s;
3567 ret = 1;
3568 }
3569 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003570 HA_ATOMIC_INC(&h2c->px_counters->headers_rcvd);
Willy Tarreau13278b42017-10-13 19:23:14 +02003571 break;
3572
Willy Tarreau454f9052017-10-26 19:40:35 +02003573 case H2_FT_DATA:
Willy Tarreau7838a792019-08-12 18:42:03 +02003574 if (h2c->st0 == H2_CS_FRAME_P) {
3575 TRACE_PROTO("receiving H2 DATA frame", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Christopher Fauletfac0f8f2020-12-07 18:27:03 +01003576 ret = h2c_handle_data(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003577 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003578 HA_ATOMIC_INC(&h2c->px_counters->data_rcvd);
Willy Tarreau454f9052017-10-26 19:40:35 +02003579
Willy Tarreau7838a792019-08-12 18:42:03 +02003580 if (h2c->st0 == H2_CS_FRAME_A) {
Willy Tarreau1d7138e2022-06-08 16:32:22 +02003581 /* rcvd_s will suffice to trigger the sending of a WU */
3582 h2c->st0 = H2_CS_FRAME_H;
Willy Tarreau7838a792019-08-12 18:42:03 +02003583 }
Willy Tarreau454f9052017-10-26 19:40:35 +02003584 break;
Willy Tarreaucd234e92017-08-18 10:59:39 +02003585
Willy Tarreau92153fc2017-12-03 19:46:19 +01003586 case H2_FT_PRIORITY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003587 if (h2c->st0 == H2_CS_FRAME_P) {
3588 TRACE_PROTO("receiving H2 PRIORITY frame", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn, h2s);
Willy Tarreau92153fc2017-12-03 19:46:19 +01003589 ret = h2c_handle_priority(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003590 }
Willy Tarreau92153fc2017-12-03 19:46:19 +01003591 break;
3592
Willy Tarreaucd234e92017-08-18 10:59:39 +02003593 case H2_FT_RST_STREAM:
Willy Tarreau7838a792019-08-12 18:42:03 +02003594 if (h2c->st0 == H2_CS_FRAME_P) {
3595 TRACE_PROTO("receiving H2 RST_STREAM frame", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003596 ret = h2c_handle_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003597 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003598 HA_ATOMIC_INC(&h2c->px_counters->rst_stream_rcvd);
Willy Tarreaucd234e92017-08-18 10:59:39 +02003599 break;
3600
Willy Tarreaue96b0922017-10-30 00:28:29 +01003601 case H2_FT_GOAWAY:
Willy Tarreau7838a792019-08-12 18:42:03 +02003602 if (h2c->st0 == H2_CS_FRAME_P) {
3603 TRACE_PROTO("receiving H2 GOAWAY frame", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn, h2s);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003604 ret = h2c_handle_goaway(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003605 }
Willy Tarreau4781b152021-04-06 13:53:36 +02003606 HA_ATOMIC_INC(&h2c->px_counters->goaway_rcvd);
Willy Tarreaue96b0922017-10-30 00:28:29 +01003607 break;
3608
Willy Tarreau1c661982017-10-30 13:52:01 +01003609 /* implement all extra frame types here */
Willy Tarreau7e98c052017-10-10 15:56:59 +02003610 default:
Willy Tarreau7838a792019-08-12 18:42:03 +02003611 TRACE_PROTO("receiving H2 ignored frame", H2_EV_RX_FRAME, h2c->conn, h2s);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003612 /* drop frames that we ignore. They may be larger than
3613 * the buffer so we drain all of their contents until
3614 * we reach the end.
3615 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003616 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3617 b_del(&h2c->dbuf, ret);
Willy Tarreau7e98c052017-10-10 15:56:59 +02003618 h2c->dfl -= ret;
3619 ret = h2c->dfl == 0;
3620 }
3621
Willy Tarreauf182a9a2017-10-30 12:03:50 +01003622 strm_err:
Willy Tarreaua20a5192017-12-27 11:02:06 +01003623 /* We may have to send an RST if not done yet */
Willy Tarreau7838a792019-08-12 18:42:03 +02003624 if (h2s->st == H2_SS_ERROR) {
3625 TRACE_STATE("stream error, switching to FRAME_E", H2_EV_RX_FRAME|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003626 h2c->st0 = H2_CS_FRAME_E;
Willy Tarreau7838a792019-08-12 18:42:03 +02003627 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003628
Willy Tarreau7838a792019-08-12 18:42:03 +02003629 if (h2c->st0 == H2_CS_FRAME_E) {
3630 TRACE_PROTO("sending H2 RST_STREAM frame", H2_EV_TX_FRAME|H2_EV_TX_RST|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaua20a5192017-12-27 11:02:06 +01003631 ret = h2c_send_rst_stream(h2c, h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02003632 }
Willy Tarreau27a84c92017-10-17 08:10:17 +02003633
Willy Tarreau7e98c052017-10-10 15:56:59 +02003634 /* error or missing data condition met above ? */
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003635 if (ret <= 0)
Willy Tarreau7e98c052017-10-10 15:56:59 +02003636 break;
3637
3638 if (h2c->st0 != H2_CS_FRAME_H) {
Willy Tarreaubba7a4d2020-09-18 07:41:28 +02003639 if (h2c->dfl)
3640 TRACE_DEVEL("skipping remaining frame payload", H2_EV_RX_FRAME, h2c->conn, h2s);
Christopher Faulet5112a602019-09-26 16:38:28 +02003641 ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
3642 b_del(&h2c->dbuf, ret);
3643 h2c->dfl -= ret;
3644 if (!h2c->dfl) {
Willy Tarreau6c3eeca2022-08-18 11:19:57 +02003645 h2c->flags &= ~H2_CF_DEM_IN_PROGRESS;
Christopher Faulet5112a602019-09-26 16:38:28 +02003646 TRACE_STATE("switching to FRAME_H", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
3647 h2c->st0 = H2_CS_FRAME_H;
Christopher Faulet5112a602019-09-26 16:38:28 +02003648 }
Willy Tarreau7e98c052017-10-10 15:56:59 +02003649 }
3650 }
Willy Tarreau52eed752017-09-22 15:05:09 +02003651
Willy Tarreau1d7138e2022-06-08 16:32:22 +02003652 if (h2c->rcvd_s > 0 &&
3653 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MBUSY | H2_CF_DEM_MROOM))) {
3654 TRACE_PROTO("sending stream WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn, h2s);
3655 h2c_send_strm_wu(h2c);
3656 }
3657
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003658 if (h2c->rcvd_c > 0 &&
Willy Tarreau7838a792019-08-12 18:42:03 +02003659 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MBUSY | H2_CF_DEM_MROOM))) {
3660 TRACE_PROTO("sending H2 WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003661 h2c_send_conn_wu(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003662 }
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003663
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003664 done:
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003665 if (h2c->st0 >= H2_CS_ERROR || (h2c->flags & H2_CF_DEM_SHORT_READ)) {
3666 if (h2c->flags & H2_CF_RCVD_SHUT)
3667 h2c->flags |= H2_CF_END_REACHED;
3668 }
3669
Willy Tarreau567beb82018-12-18 16:52:44 +01003670 if (h2s && h2s->cs &&
3671 (b_data(&h2s->rxbuf) ||
Christopher Fauletaade4ed2020-10-08 15:38:41 +02003672 h2c_read0_pending(h2c) ||
Willy Tarreau76c83822019-06-15 09:55:50 +02003673 h2s->st == H2_SS_CLOSED ||
Christopher Fauletfa922f02019-05-07 10:55:17 +02003674 (h2s->flags & H2_SF_ES_RCVD) ||
3675 (h2s->cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING|CS_FL_EOS)))) {
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003676 /* we may have to signal the upper layers */
Willy Tarreau7838a792019-08-12 18:42:03 +02003677 TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn, h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003678 h2s->cs->flags |= CS_FL_RCV_MORE;
Willy Tarreau7e094452018-12-19 18:08:52 +01003679 h2s_notify_recv(h2s);
Willy Tarreau2a761dc2018-02-26 18:50:57 +01003680 }
Willy Tarreau1ed87b72018-11-25 08:45:16 +01003681
Willy Tarreau7838a792019-08-12 18:42:03 +02003682 if (old_iw != h2c->miw) {
3683 TRACE_STATE("notifying streams about SFCTL increase", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003684 h2c_unblock_sfctl(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02003685 }
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02003686
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02003687 h2c_restart_reading(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02003688 out:
3689 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003690 return;
Willy Tarreaubc933932017-10-09 16:21:43 +02003691}
3692
Willy Tarreau989539b2020-01-10 17:01:29 +01003693/* resume each h2s eligible for sending in list head <head> */
3694static void h2_resume_each_sending_h2s(struct h2c *h2c, struct list *head)
3695{
3696 struct h2s *h2s, *h2s_back;
3697
3698 TRACE_ENTER(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3699
3700 list_for_each_entry_safe(h2s, h2s_back, head, list) {
3701 if (h2c->mws <= 0 ||
3702 h2c->flags & H2_CF_MUX_BLOCK_ANY ||
3703 h2c->st0 >= H2_CS_ERROR)
3704 break;
3705
3706 h2s->flags &= ~H2_SF_BLK_ANY;
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003707
Willy Tarreaud9464162020-01-10 18:25:07 +01003708 if (h2s->flags & H2_SF_NOTIFIED)
Willy Tarreau70c5b0e2020-01-10 18:20:15 +01003709 continue;
3710
Willy Tarreau5723f292020-01-10 15:16:57 +01003711 /* If the sender changed his mind and unsubscribed, let's just
3712 * remove the stream from the send_list.
Willy Tarreau989539b2020-01-10 17:01:29 +01003713 */
Willy Tarreauf96508a2020-01-10 11:12:48 +01003714 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) &&
3715 (!h2s->subs || !(h2s->subs->events & SUB_RETRY_SEND))) {
Willy Tarreau989539b2020-01-10 17:01:29 +01003716 LIST_DEL_INIT(&h2s->list);
3717 continue;
3718 }
3719
Willy Tarreauf96508a2020-01-10 11:12:48 +01003720 if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
Willy Tarreau5723f292020-01-10 15:16:57 +01003721 h2s->flags |= H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01003722 tasklet_wakeup(h2s->subs->tasklet);
3723 h2s->subs->events &= ~SUB_RETRY_SEND;
3724 if (!h2s->subs->events)
3725 h2s->subs = NULL;
Willy Tarreau5723f292020-01-10 15:16:57 +01003726 }
3727 else if (h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) {
3728 tasklet_wakeup(h2s->shut_tl);
3729 }
Willy Tarreau989539b2020-01-10 17:01:29 +01003730 }
3731
3732 TRACE_LEAVE(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
3733}
3734
Willy Tarreau54f4c192023-10-17 08:25:19 +02003735/* removes a stream from the list it may be in. If a stream has recently been
3736 * appended to the send_list, it might have been waiting on this one when
3737 * entering h2_snd_buf() and expecting it to complete before starting to send
3738 * in turn. For this reason we check (and clear) H2_CF_WAIT_INLIST to detect
3739 * this condition, and we try to resume sending streams if it happens. Note
3740 * that we don't need to do it for fctl_list as this list is relevant before
3741 * (only consulted after) a window update on the connection, and not because
3742 * of any competition with other streams.
3743 */
3744static inline void h2_remove_from_list(struct h2s *h2s)
3745{
3746 struct h2c *h2c = h2s->h2c;
3747
3748 if (!LIST_INLIST(&h2s->list))
3749 return;
3750
3751 LIST_DEL_INIT(&h2s->list);
3752 if (h2c->flags & H2_CF_WAIT_INLIST) {
3753 h2c->flags &= ~H2_CF_WAIT_INLIST;
3754 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
3755 }
3756}
3757
Willy Tarreaubc933932017-10-09 16:21:43 +02003758/* process Tx frames from streams to be multiplexed. Returns > 0 if it reached
3759 * the end.
3760 */
3761static int h2_process_mux(struct h2c *h2c)
3762{
Willy Tarreau7838a792019-08-12 18:42:03 +02003763 TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
3764
Willy Tarreau01b44822018-10-03 14:26:37 +02003765 if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
3766 if (unlikely(h2c->st0 == H2_CS_PREFACE && (h2c->flags & H2_CF_IS_BACK))) {
3767 if (unlikely(h2c_bck_send_preface(h2c) <= 0)) {
3768 /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
Willy Tarreau9364a5f2019-10-23 11:06:35 +02003769 if (h2c->st0 == H2_CS_ERROR)
Willy Tarreau01b44822018-10-03 14:26:37 +02003770 h2c->st0 = H2_CS_ERROR2;
Willy Tarreau01b44822018-10-03 14:26:37 +02003771 goto fail;
3772 }
3773 h2c->st0 = H2_CS_SETTINGS1;
3774 }
3775 /* need to wait for the other side */
Willy Tarreau75a930a2018-12-12 08:03:58 +01003776 if (h2c->st0 < H2_CS_FRAME_H)
Willy Tarreau7838a792019-08-12 18:42:03 +02003777 goto done;
Willy Tarreau01b44822018-10-03 14:26:37 +02003778 }
3779
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003780 /* start by sending possibly pending window updates */
Willy Tarreaue74679a2019-08-06 15:39:32 +02003781 if (h2c->rcvd_s > 0 &&
3782 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3783 h2c_send_strm_wu(h2c) < 0)
3784 goto fail;
3785
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003786 if (h2c->rcvd_c > 0 &&
3787 !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
3788 h2c_send_conn_wu(h2c) < 0)
3789 goto fail;
3790
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003791 /* First we always process the flow control list because the streams
3792 * waiting there were already elected for immediate emission but were
3793 * blocked just on this.
3794 */
Willy Tarreau54f4c192023-10-17 08:25:19 +02003795 h2c->flags &= ~H2_CF_WAIT_INLIST;
Willy Tarreau989539b2020-01-10 17:01:29 +01003796 h2_resume_each_sending_h2s(h2c, &h2c->fctl_list);
3797 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Willy Tarreaubacdf5a2017-10-17 10:57:04 +02003798
Willy Tarreaucc0b8c32017-10-26 16:55:59 +02003799 fail:
Willy Tarreau3eabe9b2017-11-07 11:03:01 +01003800 if (unlikely(h2c->st0 >= H2_CS_ERROR)) {
Willy Tarreau081d4722017-05-16 21:51:05 +02003801 if (h2c->st0 == H2_CS_ERROR) {
3802 if (h2c->max_id >= 0) {
3803 h2c_send_goaway_error(h2c, NULL);
3804 if (h2c->flags & H2_CF_MUX_BLOCK_ANY)
Willy Tarreau7838a792019-08-12 18:42:03 +02003805 goto out0;
Willy Tarreau081d4722017-05-16 21:51:05 +02003806 }
3807
3808 h2c->st0 = H2_CS_ERROR2; // sent (or failed hard) !
3809 }
Willy Tarreau081d4722017-05-16 21:51:05 +02003810 }
Willy Tarreau7838a792019-08-12 18:42:03 +02003811 done:
3812 TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
3813 return 1;
3814 out0:
3815 TRACE_DEVEL("leaving in blocked situation", H2_EV_H2C_WAKE, h2c->conn);
3816 return 0;
Willy Tarreaubc933932017-10-09 16:21:43 +02003817}
3818
Willy Tarreau62f52692017-10-08 23:01:42 +02003819
Willy Tarreau479998a2018-11-18 06:30:59 +01003820/* Attempt to read data, and subscribe if none available.
3821 * The function returns 1 if data has been received, otherwise zero.
3822 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003823static int h2_recv(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003824{
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003825 struct connection *conn = h2c->conn;
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003826 struct buffer *buf;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003827 int max;
Olivier Houchard7505f942018-08-21 18:10:44 +02003828 size_t ret;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003829
Willy Tarreau7838a792019-08-12 18:42:03 +02003830 TRACE_ENTER(H2_EV_H2C_RECV, h2c->conn);
3831
3832 if (h2c->wait_event.events & SUB_RETRY_RECV) {
3833 TRACE_DEVEL("leaving on sub_recv", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003834 return (b_data(&h2c->dbuf));
Willy Tarreau7838a792019-08-12 18:42:03 +02003835 }
Olivier Houchardaf4021e2018-08-09 13:06:55 +02003836
Willy Tarreau7838a792019-08-12 18:42:03 +02003837 if (!h2_recv_allowed(h2c)) {
3838 TRACE_DEVEL("leaving on !recv_allowed", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard81a15af2018-10-19 17:26:49 +02003839 return 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02003840 }
Willy Tarreaua2af5122017-10-09 11:56:46 +02003841
Willy Tarreau44e973f2018-03-01 17:49:30 +01003842 buf = h2_get_buf(h2c, &h2c->dbuf);
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003843 if (!buf) {
3844 h2c->flags |= H2_CF_DEM_DALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02003845 TRACE_DEVEL("leaving on !alloc", H2_EV_H2C_RECV, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003846 return 0;
Willy Tarreau1b62c5c2017-09-25 11:55:01 +02003847 }
Willy Tarreau35dbd5d2017-09-22 09:13:49 +02003848
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003849 if (h2c->flags & H2_CF_RCVD_SHUT) {
3850 TRACE_DEVEL("leaving on rcvd_shut", H2_EV_H2C_RECV, h2c->conn);
Willy Tarreau49acac02021-11-19 11:41:10 +01003851 return 1;
Willy Tarreau3d4631f2021-01-20 10:53:13 +01003852 }
3853
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003854 if (!b_data(buf)) {
3855 /* try to pre-align the buffer like the
3856 * rxbufs will be to optimize memory copies. We'll make
3857 * sure that the frame header lands at the end of the
3858 * HTX block to alias it upon recv. We cannot use the
3859 * head because rcv_buf() will realign the buffer if
3860 * it's empty. Thus we cheat and pretend we already
3861 * have a few bytes there.
3862 */
3863 max = buf_room_for_htx_data(buf) + 9;
3864 buf->head = sizeof(struct htx) - 9;
3865 }
3866 else
3867 max = b_room(buf);
Willy Tarreau2a59e872018-12-12 08:23:47 +01003868
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003869 ret = max ? conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, max, 0) : 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003870
Christopher Fauletde9d6052021-04-23 12:25:18 +02003871 if (max && !ret && h2_recv_allowed(h2c)) {
3872 TRACE_DATA("failed to receive data, subscribing", H2_EV_H2C_RECV, h2c->conn);
3873 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h2c->wait_event);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003874 } else if (ret) {
Willy Tarreau022e5e52020-09-10 09:33:15 +02003875 TRACE_DATA("received data", H2_EV_H2C_RECV, h2c->conn, 0, 0, (void*)(long)ret);
Christopher Fauleta9cc1e82021-07-26 12:06:53 +02003876 h2c->flags &= ~H2_CF_DEM_SHORT_READ;
3877 }
Olivier Houchard81a15af2018-10-19 17:26:49 +02003878
Christopher Fauletde9d6052021-04-23 12:25:18 +02003879 if (conn_xprt_read0_pending(h2c->conn)) {
3880 TRACE_DATA("received read0", H2_EV_H2C_RECV, h2c->conn);
3881 h2c->flags |= H2_CF_RCVD_SHUT;
3882 }
3883
Olivier Houcharda1411e62018-08-17 18:42:48 +02003884 if (!b_data(buf)) {
Willy Tarreau44e973f2018-03-01 17:49:30 +01003885 h2_release_buf(h2c, &h2c->dbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02003886 TRACE_LEAVE(H2_EV_H2C_RECV, h2c->conn);
Olivier Houchard46677732018-11-29 17:06:17 +01003887 return (conn->flags & CO_FL_ERROR || conn_xprt_read0_pending(conn));
Willy Tarreaua2af5122017-10-09 11:56:46 +02003888 }
3889
Willy Tarreau7838a792019-08-12 18:42:03 +02003890 if (b_data(buf) == buf->size) {
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02003891 h2c->flags |= H2_CF_DEM_DFULL;
Willy Tarreau35fb8462019-10-02 11:05:46 +02003892 TRACE_STATE("demux buffer full", H2_EV_H2C_RECV|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau7838a792019-08-12 18:42:03 +02003893 }
3894
3895 TRACE_LEAVE(H2_EV_H2C_RECV, h2c->conn);
Willy Tarreau4d7a8842019-07-31 16:00:48 +02003896 return !!ret || (conn->flags & CO_FL_ERROR) || conn_xprt_read0_pending(conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02003897}
3898
Willy Tarreau479998a2018-11-18 06:30:59 +01003899/* Try to send data if possible.
3900 * The function returns 1 if data have been sent, otherwise zero.
3901 */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003902static int h2_send(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02003903{
Olivier Houchard29fb89d2018-08-02 18:56:36 +02003904 struct connection *conn = h2c->conn;
Willy Tarreaubc933932017-10-09 16:21:43 +02003905 int done;
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003906 int sent = 0;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003907
Willy Tarreau7838a792019-08-12 18:42:03 +02003908 TRACE_ENTER(H2_EV_H2C_SEND, h2c->conn);
Willy Tarreaua2af5122017-10-09 11:56:46 +02003909
Willy Tarreau7838a792019-08-12 18:42:03 +02003910 if (conn->flags & CO_FL_ERROR) {
3911 TRACE_DEVEL("leaving on error", H2_EV_H2C_SEND, h2c->conn);
3912 return 1;
3913 }
Olivier Houchard7505f942018-08-21 18:10:44 +02003914
Willy Tarreau911db9b2020-01-23 16:27:54 +01003915 if (conn->flags & CO_FL_WAIT_XPRT) {
Willy Tarreaua2af5122017-10-09 11:56:46 +02003916 /* a handshake was requested */
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02003917 goto schedule;
Willy Tarreaua2af5122017-10-09 11:56:46 +02003918 }
3919
Willy Tarreaubc933932017-10-09 16:21:43 +02003920 /* This loop is quite simple : it tries to fill as much as it can from
3921 * pending streams into the existing buffer until it's reportedly full
3922 * or the end of send requests is reached. Then it tries to send this
3923 * buffer's contents out, marks it not full if at least one byte could
3924 * be sent, and tries again.
3925 *
3926 * The snd_buf() function normally takes a "flags" argument which may
3927 * be made of a combination of CO_SFL_MSG_MORE to indicate that more
3928 * data immediately comes and CO_SFL_STREAMER to indicate that the
3929 * connection is streaming lots of data (used to increase TLS record
3930 * size at the expense of latency). The former can be sent any time
3931 * there's a buffer full flag, as it indicates at least one stream
3932 * attempted to send and failed so there are pending data. An
3933 * alternative would be to set it as long as there's an active stream
3934 * but that would be problematic for ACKs until we have an absolute
3935 * guarantee that all waiters have at least one byte to send. The
3936 * latter should possibly not be set for now.
3937 */
3938
3939 done = 0;
3940 while (!done) {
3941 unsigned int flags = 0;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003942 unsigned int released = 0;
3943 struct buffer *buf;
Willy Tarreaubc933932017-10-09 16:21:43 +02003944
3945 /* fill as much as we can into the current buffer */
3946 while (((h2c->flags & (H2_CF_MUX_MFULL|H2_CF_MUX_MALLOC)) == 0) && !done)
3947 done = h2_process_mux(h2c);
3948
Olivier Houchard2b094432019-01-29 18:28:36 +01003949 if (h2c->flags & H2_CF_MUX_MALLOC)
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003950 done = 1; // we won't go further without extra buffers
Olivier Houchard2b094432019-01-29 18:28:36 +01003951
Christopher Faulet9a3d3fc2020-10-22 16:24:58 +02003952 if ((conn->flags & (CO_FL_SOCK_WR_SH|CO_FL_ERROR)) ||
Willy Tarreaue90653b2021-10-21 17:30:06 +02003953 (h2c->flags & H2_CF_GOAWAY_FAILED))
Willy Tarreaubc933932017-10-09 16:21:43 +02003954 break;
3955
3956 if (h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MBUSY | H2_CF_DEM_MROOM))
3957 flags |= CO_SFL_MSG_MORE;
3958
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003959 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
3960 if (b_data(buf)) {
3961 int ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, b_data(buf), flags);
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003962 if (!ret) {
3963 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003964 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003965 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003966 sent = 1;
Willy Tarreau022e5e52020-09-10 09:33:15 +02003967 TRACE_DATA("sent data", H2_EV_H2C_SEND, h2c->conn, 0, buf, (void*)(long)ret);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003968 b_del(buf, ret);
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003969 if (b_data(buf)) {
3970 done = 1;
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003971 break;
Willy Tarreau7f1265a2019-05-29 17:36:37 +02003972 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003973 }
3974 b_free(buf);
3975 released++;
Willy Tarreau787db9a2018-06-14 18:31:46 +02003976 }
Willy Tarreaubc933932017-10-09 16:21:43 +02003977
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003978 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01003979 offer_buffers(NULL, released);
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02003980
Willy Tarreaubc933932017-10-09 16:21:43 +02003981 /* wrote at least one byte, the buffer is not full anymore */
Christopher Faulet69fe5ce2019-10-24 10:31:01 +02003982 if (sent)
3983 h2c->flags &= ~(H2_CF_MUX_MFULL | H2_CF_DEM_MROOM);
Willy Tarreaubc933932017-10-09 16:21:43 +02003984 }
3985
Willy Tarreaua2af5122017-10-09 11:56:46 +02003986 if (conn->flags & CO_FL_SOCK_WR_SH) {
3987 /* output closed, nothing to send, clear the buffer to release it */
Willy Tarreau51330962019-05-26 09:38:07 +02003988 b_reset(br_tail(h2c->mbuf));
Willy Tarreaua2af5122017-10-09 11:56:46 +02003989 }
Olivier Houchard6ff20392018-07-17 18:46:31 +02003990 /* We're not full anymore, so we can wake any task that are waiting
3991 * for us.
3992 */
Willy Tarreau54f4c192023-10-17 08:25:19 +02003993 if (!(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM)) && h2c->st0 >= H2_CS_FRAME_H) {
3994 h2c->flags &= ~H2_CF_WAIT_INLIST;
Willy Tarreau989539b2020-01-10 17:01:29 +01003995 h2_resume_each_sending_h2s(h2c, &h2c->send_list);
Willy Tarreau54f4c192023-10-17 08:25:19 +02003996 }
Olivier Houchardd360ac62019-03-22 17:37:16 +01003997
Olivier Houchard910b2bc2018-07-17 18:49:38 +02003998 /* We're done, no more to send */
Willy Tarreau7838a792019-08-12 18:42:03 +02003999 if (!br_data(h2c->mbuf)) {
4000 TRACE_DEVEL("leaving with everything sent", H2_EV_H2C_SEND, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02004001 return sent;
Willy Tarreau7838a792019-08-12 18:42:03 +02004002 }
Olivier Houchard910b2bc2018-07-17 18:49:38 +02004003schedule:
Willy Tarreau7838a792019-08-12 18:42:03 +02004004 if (!(conn->flags & CO_FL_ERROR) && !(h2c->wait_event.events & SUB_RETRY_SEND)) {
4005 TRACE_STATE("more data to send, subscribing", H2_EV_H2C_SEND, h2c->conn);
Olivier Houcharde179d0e2019-03-21 18:27:17 +01004006 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &h2c->wait_event);
Willy Tarreau7838a792019-08-12 18:42:03 +02004007 }
Willy Tarreau7f1265a2019-05-29 17:36:37 +02004008
Willy Tarreau7838a792019-08-12 18:42:03 +02004009 TRACE_DEVEL("leaving with some data left to send", H2_EV_H2C_SEND, h2c->conn);
Olivier Houchardd4dd22d2018-08-17 18:39:46 +02004010 return sent;
Olivier Houchard29fb89d2018-08-02 18:56:36 +02004011}
4012
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004013/* this is the tasklet referenced in h2c->wait_event.tasklet */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004014struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
Olivier Houchard29fb89d2018-08-02 18:56:36 +02004015{
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004016 struct connection *conn;
4017 struct tasklet *tl = (struct tasklet *)t;
4018 int conn_in_list;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004019 struct h2c *h2c = ctx;
Olivier Houchard7505f942018-08-21 18:10:44 +02004020 int ret = 0;
Olivier Houchard29fb89d2018-08-02 18:56:36 +02004021
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004022 if (state & TASK_F_USR1) {
4023 /* the tasklet was idling on an idle connection, it might have
4024 * been stolen, let's be careful!
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004025 */
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004026 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4027 if (t->context == NULL) {
4028 /* The connection has been taken over by another thread,
4029 * we're no longer responsible for it, so just free the
4030 * tasklet, and do nothing.
4031 */
4032 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4033 tasklet_free(tl);
Willy Tarreau74163142021-03-13 11:30:19 +01004034 t = NULL;
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004035 goto leave;
4036 }
4037 conn = h2c->conn;
4038 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004039
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004040 /* Remove the connection from the list, to be sure nobody attempts
4041 * to use it while we handle the I/O events
4042 */
Christopher Fauletc5fd15d2023-03-16 11:43:05 +01004043 conn_in_list = conn_get_idle_flag(conn);
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004044 if (conn_in_list)
4045 conn_delete_from_tree(&conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004046
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004047 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4048 } else {
4049 /* we're certain the connection was not in an idle list */
4050 conn = h2c->conn;
4051 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4052 conn_in_list = 0;
4053 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004054
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004055 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Olivier Houchard7505f942018-08-21 18:10:44 +02004056 ret = h2_send(h2c);
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004057 if (!(h2c->wait_event.events & SUB_RETRY_RECV))
Olivier Houchard7505f942018-08-21 18:10:44 +02004058 ret |= h2_recv(h2c);
Willy Tarreaucef5c8e2018-12-18 10:29:54 +01004059 if (ret || b_data(&h2c->dbuf))
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004060 ret = h2_process(h2c);
4061
4062 /* If we were in an idle list, we want to add it back into it,
4063 * unless h2_process() returned -1, which mean it has destroyed
4064 * the connection (testing !ret is enough, if h2_process() wasn't
4065 * called then ret will be 0 anyway.
4066 */
Willy Tarreau74163142021-03-13 11:30:19 +01004067 if (ret < 0)
4068 t = NULL;
4069
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004070 if (!ret && conn_in_list) {
4071 struct server *srv = objt_server(conn->target);
4072
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004073 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004074 if (conn_in_list == CO_FL_SAFE_LIST)
Willy Tarreau430bf4a2021-03-04 09:45:32 +01004075 ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004076 else
Willy Tarreau430bf4a2021-03-04 09:45:32 +01004077 ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004078 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004079 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004080
Willy Tarreau38468772020-06-28 00:31:13 +02004081leave:
Willy Tarreau7838a792019-08-12 18:42:03 +02004082 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreau74163142021-03-13 11:30:19 +01004083 return t;
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004084}
Willy Tarreaua2af5122017-10-09 11:56:46 +02004085
Willy Tarreau62f52692017-10-08 23:01:42 +02004086/* callback called on any event by the connection handler.
4087 * It applies changes and returns zero, or < 0 if it wants immediate
4088 * destruction of the connection (which normally doesn not happen in h2).
4089 */
Olivier Houchard7505f942018-08-21 18:10:44 +02004090static int h2_process(struct h2c *h2c)
Willy Tarreau62f52692017-10-08 23:01:42 +02004091{
Olivier Houchard7505f942018-08-21 18:10:44 +02004092 struct connection *conn = h2c->conn;
Willy Tarreaua2af5122017-10-09 11:56:46 +02004093
Willy Tarreau7838a792019-08-12 18:42:03 +02004094 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4095
Willy Tarreauf0961222021-02-05 11:41:46 +01004096 if (!(h2c->flags & H2_CF_DEM_BLOCK_ANY) &&
4097 (b_data(&h2c->dbuf) || (h2c->flags & H2_CF_RCVD_SHUT))) {
Willy Tarreaud13bf272017-12-14 10:34:52 +01004098 h2_process_demux(h2c);
4099
4100 if (h2c->st0 >= H2_CS_ERROR || conn->flags & CO_FL_ERROR)
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004101 b_reset(&h2c->dbuf);
Willy Tarreaud13bf272017-12-14 10:34:52 +01004102
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004103 if (!b_full(&h2c->dbuf))
Willy Tarreaud13bf272017-12-14 10:34:52 +01004104 h2c->flags &= ~H2_CF_DEM_DFULL;
4105 }
Olivier Houchard7505f942018-08-21 18:10:44 +02004106 h2_send(h2c);
Willy Tarreaud13bf272017-12-14 10:34:52 +01004107
Willy Tarreaub1e600c2020-10-13 18:09:15 +02004108 if (unlikely(h2c->proxy->disabled) && !(h2c->flags & H2_CF_IS_BACK)) {
Willy Tarreau8ec14062017-12-30 18:08:13 +01004109 /* frontend is stopping, reload likely in progress, let's try
4110 * to announce a graceful shutdown if not yet done. We don't
4111 * care if it fails, it will be tried again later.
4112 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004113 TRACE_STATE("proxy stopped, sending GOAWAY", H2_EV_H2C_WAKE|H2_EV_TX_FRAME, conn);
Willy Tarreau8ec14062017-12-30 18:08:13 +01004114 if (!(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
4115 if (h2c->last_sid < 0)
4116 h2c->last_sid = (1U << 31) - 1;
4117 h2c_send_goaway_error(h2c, NULL);
4118 }
4119 }
4120
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004121 /*
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004122 * If we received early data, and the handshake is done, wake
4123 * any stream that was waiting for it.
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004124 */
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004125 if (!(h2c->flags & H2_CF_WAIT_FOR_HS) &&
Willy Tarreau911db9b2020-01-23 16:27:54 +01004126 (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT | CO_FL_EARLY_DATA)) == CO_FL_EARLY_DATA) {
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004127 struct eb32_node *node;
4128 struct h2s *h2s;
4129
4130 h2c->flags |= H2_CF_WAIT_FOR_HS;
4131 node = eb32_lookup_ge(&h2c->streams_by_id, 1);
4132
4133 while (node) {
4134 h2s = container_of(node, struct h2s, by_id);
Willy Tarreaufde287c2018-12-19 18:33:16 +01004135 if (h2s->cs && h2s->cs->flags & CS_FL_WAIT_FOR_HS)
Willy Tarreau7e094452018-12-19 18:08:52 +01004136 h2s_notify_recv(h2s);
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004137 node = eb32_next(node);
4138 }
Olivier Houchard7fc96d52017-11-23 18:25:47 +01004139 }
Olivier Houchard6fa63d92017-11-27 18:41:32 +01004140
Christopher Fauletaade4ed2020-10-08 15:38:41 +02004141 if (conn->flags & CO_FL_ERROR || h2c_read0_pending(h2c) ||
Willy Tarreau29a98242017-10-31 06:59:15 +01004142 h2c->st0 == H2_CS_ERROR2 || h2c->flags & H2_CF_GOAWAY_FAILED ||
4143 (eb_is_empty(&h2c->streams_by_id) && h2c->last_sid >= 0 &&
4144 h2c->max_id >= h2c->last_sid)) {
Willy Tarreau23482912019-05-07 15:23:14 +02004145 h2_wake_some_streams(h2c, 0);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004146
4147 if (eb_is_empty(&h2c->streams_by_id)) {
4148 /* no more stream, kill the connection now */
Christopher Faulet73c12072019-04-08 11:23:22 +02004149 h2_release(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004150 TRACE_DEVEL("leaving after releasing the connection", H2_EV_H2C_WAKE);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004151 return -1;
4152 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004153
4154 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004155 if (conn->flags & CO_FL_LIST_MASK) {
4156 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004157 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004158 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4159 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004160 }
4161 else if (h2c->st0 == H2_CS_ERROR) {
4162 /* connections in error must be removed from the idle lists */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004163 if (conn->flags & CO_FL_LIST_MASK) {
4164 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004165 conn_delete_from_tree(&conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004166 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4167 }
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004168 }
4169
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004170 if (!b_data(&h2c->dbuf))
Willy Tarreau44e973f2018-03-01 17:49:30 +01004171 h2_release_buf(h2c, &h2c->dbuf);
Willy Tarreaufbe3b4f2017-10-09 15:14:19 +02004172
Olivier Houchard53216e72018-10-10 15:46:36 +02004173 if ((conn->flags & CO_FL_SOCK_WR_SH) ||
4174 h2c->st0 == H2_CS_ERROR2 || (h2c->flags & H2_CF_GOAWAY_FAILED) ||
4175 (h2c->st0 != H2_CS_ERROR &&
Willy Tarreau662fafc2019-05-26 09:43:07 +02004176 !br_data(h2c->mbuf) &&
Olivier Houchard53216e72018-10-10 15:46:36 +02004177 (h2c->mws <= 0 || LIST_ISEMPTY(&h2c->fctl_list)) &&
4178 ((h2c->flags & H2_CF_MUX_BLOCK_ANY) || LIST_ISEMPTY(&h2c->send_list))))
Willy Tarreau2e3c0002019-05-26 09:45:23 +02004179 h2_release_mbuf(h2c);
Willy Tarreaua2af5122017-10-09 11:56:46 +02004180
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +01004181 h2c_update_timeout(h2c);
Olivier Houchard7505f942018-08-21 18:10:44 +02004182 h2_send(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004183 TRACE_LEAVE(H2_EV_H2C_WAKE, conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02004184 return 0;
4185}
4186
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004187/* wake-up function called by the connection layer (mux_ops.wake) */
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004188static int h2_wake(struct connection *conn)
4189{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004190 struct h2c *h2c = conn->ctx;
Willy Tarreau7838a792019-08-12 18:42:03 +02004191 int ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004192
Willy Tarreau7838a792019-08-12 18:42:03 +02004193 TRACE_ENTER(H2_EV_H2C_WAKE, conn);
4194 ret = h2_process(h2c);
Willy Tarreau508f9892020-02-11 04:38:56 +01004195 if (ret >= 0)
4196 h2_wake_some_streams(h2c, 0);
Willy Tarreau7838a792019-08-12 18:42:03 +02004197 TRACE_LEAVE(H2_EV_H2C_WAKE);
4198 return ret;
Olivier Houchard21df6cc2018-09-14 23:21:44 +02004199}
4200
Willy Tarreauea392822017-10-31 10:02:25 +01004201/* Connection timeout management. The principle is that if there's no receipt
4202 * nor sending for a certain amount of time, the connection is closed. If the
4203 * MUX buffer still has lying data or is not allocatable, the connection is
4204 * immediately killed. If it's allocatable and empty, we attempt to send a
4205 * GOAWAY frame.
4206 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004207struct task *h2_timeout_task(struct task *t, void *context, unsigned int state)
Willy Tarreauea392822017-10-31 10:02:25 +01004208{
Olivier Houchard9f6af332018-05-25 14:04:04 +02004209 struct h2c *h2c = context;
Willy Tarreauea392822017-10-31 10:02:25 +01004210 int expired = tick_is_expired(t->expire, now_ms);
4211
Willy Tarreau7838a792019-08-12 18:42:03 +02004212 TRACE_ENTER(H2_EV_H2C_WAKE, h2c ? h2c->conn : NULL);
4213
Willy Tarreaubd42e922020-06-30 11:19:23 +02004214 if (h2c) {
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004215 /* Make sure nobody stole the connection from us */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004216 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004217
4218 /* Somebody already stole the connection from us, so we should not
4219 * free it, we just have to free the task.
4220 */
4221 if (!t->context) {
4222 h2c = NULL;
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004223 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004224 goto do_leave;
4225 }
4226
4227
Willy Tarreaubd42e922020-06-30 11:19:23 +02004228 if (!expired) {
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004229 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004230 TRACE_DEVEL("leaving (not expired)", H2_EV_H2C_WAKE, h2c->conn);
4231 return t;
4232 }
Willy Tarreauea392822017-10-31 10:02:25 +01004233
Willy Tarreaubd42e922020-06-30 11:19:23 +02004234 if (!h2c_may_expire(h2c)) {
4235 /* we do still have streams but all of them are idle, waiting
4236 * for the data layer, so we must not enforce the timeout here.
4237 */
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004238 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004239 t->expire = TICK_ETERNITY;
4240 return t;
4241 }
Willy Tarreauc2ea47f2019-10-01 10:12:00 +02004242
Willy Tarreaubd42e922020-06-30 11:19:23 +02004243 /* We're about to destroy the connection, so make sure nobody attempts
4244 * to steal it from us.
4245 */
Christopher Fauletc5fd15d2023-03-16 11:43:05 +01004246 if (h2c->conn->flags & CO_FL_LIST_MASK)
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004247 conn_delete_from_tree(&h2c->conn->hash_node->node);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004248
Amaury Denoyelle5c7086f2021-01-11 09:21:52 +01004249 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Willy Tarreaubd42e922020-06-30 11:19:23 +02004250 }
Olivier Houchardcd4159f2020-03-10 18:39:42 +01004251
Olivier Houchard48ce6a32020-07-02 11:58:05 +02004252do_leave:
Olivier Houchard3f795f72019-04-17 22:51:06 +02004253 task_destroy(t);
Willy Tarreau0975f112018-03-29 15:22:59 +02004254
4255 if (!h2c) {
4256 /* resources were already deleted */
Willy Tarreau7838a792019-08-12 18:42:03 +02004257 TRACE_DEVEL("leaving (not more h2c)", H2_EV_H2C_WAKE);
Willy Tarreau0975f112018-03-29 15:22:59 +02004258 return NULL;
4259 }
4260
4261 h2c->task = NULL;
Willy Tarreauea392822017-10-31 10:02:25 +01004262 h2c_error(h2c, H2_ERR_NO_ERROR);
Willy Tarreau23482912019-05-07 15:23:14 +02004263 h2_wake_some_streams(h2c, 0);
Willy Tarreauea392822017-10-31 10:02:25 +01004264
Willy Tarreau662fafc2019-05-26 09:43:07 +02004265 if (br_data(h2c->mbuf)) {
Willy Tarreauea392822017-10-31 10:02:25 +01004266 /* don't even try to send a GOAWAY, the buffer is stuck */
4267 h2c->flags |= H2_CF_GOAWAY_FAILED;
4268 }
4269
4270 /* try to send but no need to insist */
Willy Tarreau599391a2017-11-24 10:16:00 +01004271 h2c->last_sid = h2c->max_id;
Willy Tarreauea392822017-10-31 10:02:25 +01004272 if (h2c_send_goaway_error(h2c, NULL) <= 0)
4273 h2c->flags |= H2_CF_GOAWAY_FAILED;
4274
Willy Tarreau662fafc2019-05-26 09:43:07 +02004275 if (br_data(h2c->mbuf) && !(h2c->flags & H2_CF_GOAWAY_FAILED) && conn_xprt_ready(h2c->conn)) {
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004276 unsigned int released = 0;
4277 struct buffer *buf;
4278
4279 for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
4280 if (b_data(buf)) {
4281 int ret = h2c->conn->xprt->snd_buf(h2c->conn, h2c->conn->xprt_ctx, buf, b_data(buf), 0);
4282 if (!ret)
4283 break;
4284 b_del(buf, ret);
4285 if (b_data(buf))
4286 break;
4287 b_free(buf);
4288 released++;
4289 }
Willy Tarreau787db9a2018-06-14 18:31:46 +02004290 }
Willy Tarreau41c4d6a2019-05-26 09:49:17 +02004291
4292 if (released)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01004293 offer_buffers(NULL, released);
Willy Tarreau787db9a2018-06-14 18:31:46 +02004294 }
Willy Tarreauea392822017-10-31 10:02:25 +01004295
Willy Tarreau4481e262019-10-31 15:36:30 +01004296 /* in any case this connection must not be considered idle anymore */
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004297 if (h2c->conn->flags & CO_FL_LIST_MASK) {
4298 HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004299 conn_delete_from_tree(&h2c->conn->hash_node->node);
Amaury Denoyelle3d752a82021-02-19 15:37:38 +01004300 HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
4301 }
Willy Tarreau4481e262019-10-31 15:36:30 +01004302
Willy Tarreau0975f112018-03-29 15:22:59 +02004303 /* either we can release everything now or it will be done later once
4304 * the last stream closes.
4305 */
4306 if (eb_is_empty(&h2c->streams_by_id))
Christopher Faulet73c12072019-04-08 11:23:22 +02004307 h2_release(h2c);
Willy Tarreauea392822017-10-31 10:02:25 +01004308
Willy Tarreau7838a792019-08-12 18:42:03 +02004309 TRACE_LEAVE(H2_EV_H2C_WAKE);
Willy Tarreauea392822017-10-31 10:02:25 +01004310 return NULL;
4311}
4312
4313
Willy Tarreau62f52692017-10-08 23:01:42 +02004314/*******************************************/
4315/* functions below are used by the streams */
4316/*******************************************/
4317
4318/*
4319 * Attach a new stream to a connection
4320 * (Used for outgoing connections)
4321 */
Olivier Houchardf502aca2018-12-14 19:42:40 +01004322static struct conn_stream *h2_attach(struct connection *conn, struct session *sess)
Willy Tarreau62f52692017-10-08 23:01:42 +02004323{
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004324 struct conn_stream *cs;
4325 struct h2s *h2s;
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004326 struct h2c *h2c = conn->ctx;
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004327
Willy Tarreau7838a792019-08-12 18:42:03 +02004328 TRACE_ENTER(H2_EV_H2S_NEW, conn);
Christopher Faulet236c93b2020-07-02 09:19:54 +02004329 cs = cs_new(conn, conn->target);
Willy Tarreau7838a792019-08-12 18:42:03 +02004330 if (!cs) {
4331 TRACE_DEVEL("leaving on CS allocation failure", H2_EV_H2S_NEW|H2_EV_H2S_ERR, conn);
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004332 return NULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02004333 }
Olivier Houchardf502aca2018-12-14 19:42:40 +01004334 h2s = h2c_bck_stream_new(h2c, cs, sess);
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004335 if (!h2s) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004336 TRACE_DEVEL("leaving on stream creation failure", H2_EV_H2S_NEW|H2_EV_H2S_ERR, conn);
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004337 cs_free(cs);
4338 return NULL;
4339 }
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004340
4341 /* the connection is not idle anymore, let's mark this */
4342 HA_ATOMIC_AND(&h2c->wait_event.tasklet->state, ~TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004343 xprt_set_used(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004344
Willy Tarreau7838a792019-08-12 18:42:03 +02004345 TRACE_LEAVE(H2_EV_H2S_NEW, conn, h2s);
Olivier Houchard7a57e8a2018-11-27 17:36:33 +01004346 return cs;
Willy Tarreau62f52692017-10-08 23:01:42 +02004347}
4348
Willy Tarreaufafd3982018-11-18 21:29:20 +01004349/* Retrieves the first valid conn_stream from this connection, or returns NULL.
4350 * We have to scan because we may have some orphan streams. It might be
4351 * beneficial to scan backwards from the end to reduce the likeliness to find
4352 * orphans.
4353 */
4354static const struct conn_stream *h2_get_first_cs(const struct connection *conn)
4355{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01004356 struct h2c *h2c = conn->ctx;
Willy Tarreaufafd3982018-11-18 21:29:20 +01004357 struct h2s *h2s;
4358 struct eb32_node *node;
4359
4360 node = eb32_first(&h2c->streams_by_id);
4361 while (node) {
4362 h2s = container_of(node, struct h2s, by_id);
4363 if (h2s->cs)
4364 return h2s->cs;
4365 node = eb32_next(node);
4366 }
4367 return NULL;
4368}
4369
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004370static int h2_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
4371{
4372 int ret = 0;
4373 struct h2c *h2c = conn->ctx;
4374
4375 switch (mux_ctl) {
4376 case MUX_STATUS:
4377 /* Only consider the mux to be ready if we're done with
4378 * the preface and settings, and we had no error.
4379 */
4380 if (h2c->st0 >= H2_CS_FRAME_H && h2c->st0 < H2_CS_ERROR)
4381 ret |= MUX_STATUS_READY;
4382 return ret;
Christopher Faulet4c8ad842020-10-06 14:59:17 +02004383 case MUX_EXIT_STATUS:
4384 return MUX_ES_UNKNOWN;
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02004385 default:
4386 return -1;
4387 }
4388}
4389
Willy Tarreau62f52692017-10-08 23:01:42 +02004390/*
Olivier Houchard060ed432018-11-06 16:32:42 +01004391 * Destroy the mux and the associated connection, if it is no longer used
4392 */
Christopher Faulet73c12072019-04-08 11:23:22 +02004393static void h2_destroy(void *ctx)
Olivier Houchard060ed432018-11-06 16:32:42 +01004394{
Christopher Faulet73c12072019-04-08 11:23:22 +02004395 struct h2c *h2c = ctx;
Olivier Houchard060ed432018-11-06 16:32:42 +01004396
Willy Tarreau7838a792019-08-12 18:42:03 +02004397 TRACE_ENTER(H2_EV_H2C_END, h2c->conn);
Christopher Faulet39a96ee2019-04-08 10:52:21 +02004398 if (eb_is_empty(&h2c->streams_by_id) || !h2c->conn || h2c->conn->ctx != h2c)
Christopher Faulet73c12072019-04-08 11:23:22 +02004399 h2_release(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004400 TRACE_LEAVE(H2_EV_H2C_END);
Olivier Houchard060ed432018-11-06 16:32:42 +01004401}
4402
4403/*
Willy Tarreau62f52692017-10-08 23:01:42 +02004404 * Detach the stream from the connection and possibly release the connection.
4405 */
4406static void h2_detach(struct conn_stream *cs)
4407{
Willy Tarreau60935142017-10-16 18:11:19 +02004408 struct h2s *h2s = cs->ctx;
4409 struct h2c *h2c;
Olivier Houchardf502aca2018-12-14 19:42:40 +01004410 struct session *sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004411
Willy Tarreau7838a792019-08-12 18:42:03 +02004412 TRACE_ENTER(H2_EV_STRM_END, h2s ? h2s->h2c->conn : NULL, h2s);
4413
Willy Tarreau60935142017-10-16 18:11:19 +02004414 cs->ctx = NULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02004415 if (!h2s) {
4416 TRACE_LEAVE(H2_EV_STRM_END);
Willy Tarreau60935142017-10-16 18:11:19 +02004417 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004418 }
Willy Tarreau60935142017-10-16 18:11:19 +02004419
Willy Tarreaud9464162020-01-10 18:25:07 +01004420 /* there's no txbuf so we're certain not to be able to send anything */
4421 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02004422
Olivier Houchardf502aca2018-12-14 19:42:40 +01004423 sess = h2s->sess;
Willy Tarreau60935142017-10-16 18:11:19 +02004424 h2c = h2s->h2c;
4425 h2s->cs = NULL;
Willy Tarreau7ac60e82018-07-19 09:04:05 +02004426 h2c->nb_cs--;
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +01004427 if (!h2c->nb_cs)
4428 h2c->idle_start = now_ms;
4429
Willy Tarreaufa1d3572019-01-31 10:31:51 +01004430 if ((h2c->flags & (H2_CF_IS_BACK|H2_CF_DEM_TOOMANY)) == H2_CF_DEM_TOOMANY &&
4431 !h2_frt_has_too_many_cs(h2c)) {
4432 /* frontend connection was blocking new streams creation */
Willy Tarreauf2101912018-07-19 10:11:38 +02004433 h2c->flags &= ~H2_CF_DEM_TOOMANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004434 h2c_restart_reading(h2c, 1);
Willy Tarreauf2101912018-07-19 10:11:38 +02004435 }
Willy Tarreau60935142017-10-16 18:11:19 +02004436
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004437 /* this stream may be blocked waiting for some data to leave (possibly
4438 * an ES or RST frame), so orphan it in this case.
4439 */
Willy Tarreau3041fcc2018-03-29 15:41:32 +02004440 if (!(cs->conn->flags & CO_FL_ERROR) &&
Willy Tarreaua2b51812018-07-27 09:55:14 +02004441 (h2c->st0 < H2_CS_ERROR) &&
Willy Tarreau5723f292020-01-10 15:16:57 +01004442 (h2s->flags & (H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL)) &&
Willy Tarreauf96508a2020-01-10 11:12:48 +01004443 ((h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) || h2s->subs)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004444 TRACE_DEVEL("leaving on stream blocked", H2_EV_STRM_END|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +01004445 /* refresh the timeout if none was active, so that the last
4446 * leaving stream may arm it.
4447 */
Willy Tarreau4e91ddb2023-03-16 18:06:19 +01004448 if (h2c->task && !tick_isset(h2c->task->expire))
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +01004449 h2c_update_timeout(h2c);
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004450 return;
Willy Tarreau7838a792019-08-12 18:42:03 +02004451 }
Willy Tarreau22cf59b2017-11-10 11:42:33 +01004452
Willy Tarreau45f752e2017-10-30 15:44:59 +01004453 if ((h2c->flags & H2_CF_DEM_BLOCK_ANY && h2s->id == h2c->dsi) ||
4454 (h2c->flags & H2_CF_MUX_BLOCK_ANY && h2s->id == h2c->msi)) {
4455 /* unblock the connection if it was blocked on this
4456 * stream.
4457 */
4458 h2c->flags &= ~H2_CF_DEM_BLOCK_ANY;
4459 h2c->flags &= ~H2_CF_MUX_BLOCK_ANY;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02004460 h2c_restart_reading(h2c, 1);
Willy Tarreau45f752e2017-10-30 15:44:59 +01004461 }
4462
Willy Tarreau71049cc2018-03-28 13:56:39 +02004463 h2s_destroy(h2s);
Willy Tarreau60935142017-10-16 18:11:19 +02004464
Christopher Faulet9b79a102019-07-15 11:22:56 +02004465 if (h2c->flags & H2_CF_IS_BACK) {
Olivier Houchard8a786902018-12-15 16:05:40 +01004466 if (!(h2c->conn->flags &
4467 (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH))) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004468 if (h2c->conn->flags & CO_FL_PRIVATE) {
Christopher Faulet08016ab2020-07-01 16:10:06 +02004469 /* Add the connection in the session server list, if not already done */
4470 if (!session_add_conn(sess, h2c->conn, h2c->conn->target)) {
4471 h2c->conn->owner = NULL;
4472 if (eb_is_empty(&h2c->streams_by_id)) {
4473 h2c->conn->mux->destroy(h2c);
4474 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4475 return;
Christopher Fauletc5579d12020-07-01 15:45:41 +02004476 }
4477 }
Christopher Faulet08016ab2020-07-01 16:10:06 +02004478 if (eb_is_empty(&h2c->streams_by_id)) {
Christopher Fauletc5579d12020-07-01 15:45:41 +02004479 if (session_check_idle_conn(h2c->conn->owner, h2c->conn) != 0) {
4480 /* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
4481 TRACE_DEVEL("leaving without reusable idle connection", H2_EV_STRM_END);
Olivier Houchard351411f2018-12-27 17:20:54 +01004482 return;
4483 }
4484 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004485 }
Christopher Fauletc5579d12020-07-01 15:45:41 +02004486 else {
4487 if (eb_is_empty(&h2c->streams_by_id)) {
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004488 /* If the connection is owned by the session, first remove it
4489 * from its list
4490 */
4491 if (h2c->conn->owner) {
4492 session_unown_conn(h2c->conn->owner, h2c->conn);
4493 h2c->conn->owner = NULL;
4494 }
4495
Willy Tarreaue388f2f2021-03-02 16:51:09 +01004496 /* mark that the tasklet may lose its context to another thread and
4497 * that the handler needs to check it under the idle conns lock.
4498 */
4499 HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
Willy Tarreau4f8cd432021-03-02 17:27:58 +01004500 xprt_set_idle(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
4501
Olivier Houcharddc2f2752020-02-13 19:12:07 +01004502 if (!srv_add_to_idle_list(objt_server(h2c->conn->target), h2c->conn, 1)) {
Olivier Houchard2444aa52020-01-20 13:56:01 +01004503 /* The server doesn't want it, let's kill the connection right away */
4504 h2c->conn->mux->destroy(h2c);
4505 TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
4506 return;
4507 }
Olivier Houchard199d4fa2020-03-22 23:25:51 +01004508 /* At this point, the connection has been added to the
4509 * server idle list, so another thread may already have
4510 * hijacked it, so we can't do anything with it.
4511 */
Olivier Houchard2444aa52020-01-20 13:56:01 +01004512 TRACE_DEVEL("reusable idle connection", H2_EV_STRM_END);
4513 return;
Olivier Houchard8a786902018-12-15 16:05:40 +01004514
Olivier Houchard8a786902018-12-15 16:05:40 +01004515 }
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004516 else if (!h2c->conn->hash_node->node.node.leaf_p &&
Amaury Denoyelle6b8daef2020-10-14 18:17:10 +02004517 h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02004518 !LIST_INLIST(&h2c->conn->session_list)) {
Willy Tarreau430bf4a2021-03-04 09:45:32 +01004519 ebmb_insert(&__objt_server(h2c->conn->target)->per_thr[tid].avail_conns,
Amaury Denoyelle8990b012021-02-19 15:29:16 +01004520 &h2c->conn->hash_node->node,
4521 sizeof(h2c->conn->hash_node->hash));
Christopher Fauletc5579d12020-07-01 15:45:41 +02004522 }
Olivier Houchard8a786902018-12-15 16:05:40 +01004523 }
4524 }
4525 }
4526
Willy Tarreaue323f342018-03-28 13:51:45 +02004527 /* We don't want to close right now unless we're removing the
4528 * last stream, and either the connection is in error, or it
4529 * reached the ID already specified in a GOAWAY frame received
4530 * or sent (as seen by last_sid >= 0).
4531 */
Olivier Houchard7a977432019-03-21 15:47:13 +01004532 if (h2c_is_dead(h2c)) {
Willy Tarreaue323f342018-03-28 13:51:45 +02004533 /* no more stream will come, kill it now */
Willy Tarreau7838a792019-08-12 18:42:03 +02004534 TRACE_DEVEL("leaving and killing dead connection", H2_EV_STRM_END, h2c->conn);
Christopher Faulet73c12072019-04-08 11:23:22 +02004535 h2_release(h2c);
Willy Tarreaue323f342018-03-28 13:51:45 +02004536 }
4537 else if (h2c->task) {
Willy Tarreauf5b2c3f2022-03-18 15:57:34 +01004538 h2c_update_timeout(h2c);
Willy Tarreau7838a792019-08-12 18:42:03 +02004539 TRACE_DEVEL("leaving, refreshing connection's timeout", H2_EV_STRM_END, h2c->conn);
Willy Tarreau60935142017-10-16 18:11:19 +02004540 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004541 else
4542 TRACE_DEVEL("leaving", H2_EV_STRM_END, h2c->conn);
Willy Tarreau62f52692017-10-08 23:01:42 +02004543}
4544
Willy Tarreau88bdba32019-05-13 18:17:53 +02004545/* Performs a synchronous or asynchronous shutr(). */
4546static void h2_do_shutr(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004547{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004548 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004549
Willy Tarreauf983d002019-05-14 10:40:21 +02004550 if (h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004551 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004552
Willy Tarreau7838a792019-08-12 18:42:03 +02004553 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4554
Christopher Faulet4cda1c02023-10-16 19:30:02 +02004555 if (h2s->flags & H2_SF_WANT_SHUTW)
4556 goto add_to_list;
4557
Willy Tarreau18059042019-01-31 19:12:48 +01004558 /* a connstream may require us to immediately kill the whole connection
4559 * for example because of a "tcp-request content reject" rule that is
4560 * normally used to limit abuse. In this case we schedule a goaway to
4561 * close the connection.
Willy Tarreau926fa4c2017-11-07 14:42:12 +01004562 */
Willy Tarreau3cf69fe2019-05-14 10:44:40 +02004563 if ((h2s->flags & H2_SF_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004564 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004565 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004566 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4567 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4568 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004569 else if (!(h2s->flags & H2_SF_HEADERS_SENT)) {
4570 /* Nothing was never sent for this stream, so reset with
4571 * REFUSED_STREAM error to let the client retry the
4572 * request.
4573 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004574 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004575 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4576 }
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004577 else {
4578 /* a final response was already provided, we don't want this
4579 * stream anymore. This may happen when the server responds
4580 * before the end of an upload and closes quickly (redirect,
4581 * deny, ...)
4582 */
4583 h2s_error(h2s, H2_ERR_CANCEL);
4584 }
Willy Tarreau18059042019-01-31 19:12:48 +01004585
Willy Tarreau90c32322017-11-24 08:00:30 +01004586 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004587 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004588 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004589
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004590 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004591 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau00dd0782018-03-01 16:31:34 +01004592 h2s_close(h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004593 done:
4594 h2s->flags &= ~H2_SF_WANT_SHUTR;
Willy Tarreau7838a792019-08-12 18:42:03 +02004595 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004596 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004597add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004598 /* Let the handler know we want to shutr, and add ourselves to the
4599 * most relevant list if not yet done. h2_deferred_shut() will be
4600 * automatically called via the shut_tl tasklet when there's room
4601 * again.
4602 */
4603 h2s->flags |= H2_SF_WANT_SHUTR;
Willy Tarreau2b718102021-04-21 07:32:39 +02004604 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004605 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004606 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004607 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004608 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004609 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004610 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004611 return;
Willy Tarreau62f52692017-10-08 23:01:42 +02004612}
4613
Willy Tarreau88bdba32019-05-13 18:17:53 +02004614/* Performs a synchronous or asynchronous shutw(). */
4615static void h2_do_shutw(struct h2s *h2s)
Willy Tarreau62f52692017-10-08 23:01:42 +02004616{
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004617 struct h2c *h2c = h2s->h2c;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004618
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004619 if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_CLOSED)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004620 goto done;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004621
Willy Tarreau7838a792019-08-12 18:42:03 +02004622 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4623
Willy Tarreaucfba9d62019-08-06 10:30:58 +02004624 if (h2s->st != H2_SS_ERROR && (h2s->flags & H2_SF_HEADERS_SENT)) {
Willy Tarreau58e32082017-11-07 14:41:09 +01004625 /* we can cleanly close using an empty data frame only after headers */
4626
4627 if (!(h2s->flags & (H2_SF_ES_SENT|H2_SF_RST_SENT)) &&
4628 h2_send_empty_data_es(h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004629 goto add_to_list;
Willy Tarreau58e32082017-11-07 14:41:09 +01004630
4631 if (h2s->st == H2_SS_HREM)
Willy Tarreau00dd0782018-03-01 16:31:34 +01004632 h2s_close(h2s);
Willy Tarreau58e32082017-11-07 14:41:09 +01004633 else
4634 h2s->st = H2_SS_HLOC;
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004635 } else {
Willy Tarreau18059042019-01-31 19:12:48 +01004636 /* a connstream may require us to immediately kill the whole connection
4637 * for example because of a "tcp-request content reject" rule that is
4638 * normally used to limit abuse. In this case we schedule a goaway to
4639 * close the connection.
Willy Tarreaua1349f02017-10-31 07:41:55 +01004640 */
Willy Tarreau3cf69fe2019-05-14 10:44:40 +02004641 if ((h2s->flags & H2_SF_KILL_CONN) &&
Willy Tarreau18059042019-01-31 19:12:48 +01004642 !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004643 TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau18059042019-01-31 19:12:48 +01004644 h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
4645 h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
4646 }
Christopher Faulet35757d32019-03-07 15:51:33 +01004647 else {
4648 /* Nothing was never sent for this stream, so reset with
4649 * REFUSED_STREAM error to let the client retry the
4650 * request.
4651 */
Willy Tarreau7838a792019-08-12 18:42:03 +02004652 TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
Christopher Faulet35757d32019-03-07 15:51:33 +01004653 h2s_error(h2s, H2_ERR_REFUSED_STREAM);
4654 }
Willy Tarreau18059042019-01-31 19:12:48 +01004655
Willy Tarreau90c32322017-11-24 08:00:30 +01004656 if (!(h2s->flags & H2_SF_RST_SENT) &&
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004657 h2s_send_rst_stream(h2c, h2s) <= 0)
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004658 goto add_to_list;
Willy Tarreau90c32322017-11-24 08:00:30 +01004659
Willy Tarreau00dd0782018-03-01 16:31:34 +01004660 h2s_close(h2s);
Willy Tarreauc7576ea2017-10-29 22:00:09 +01004661 }
4662
Willy Tarreau4f6516d2018-12-19 13:59:17 +01004663 if (!(h2c->wait_event.events & SUB_RETRY_SEND))
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02004664 tasklet_wakeup(h2c->wait_event.tasklet);
Willy Tarreau7838a792019-08-12 18:42:03 +02004665
4666 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
4667
Willy Tarreau88bdba32019-05-13 18:17:53 +02004668 done:
4669 h2s->flags &= ~H2_SF_WANT_SHUTW;
4670 return;
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004671
4672 add_to_list:
Willy Tarreau5723f292020-01-10 15:16:57 +01004673 /* Let the handler know we want to shutw, and add ourselves to the
4674 * most relevant list if not yet done. h2_deferred_shut() will be
4675 * automatically called via the shut_tl tasklet when there's room
4676 * again.
4677 */
4678 h2s->flags |= H2_SF_WANT_SHUTW;
Willy Tarreau2b718102021-04-21 07:32:39 +02004679 if (!LIST_INLIST(&h2s->list)) {
Willy Tarreau5723f292020-01-10 15:16:57 +01004680 if (h2s->flags & H2_SF_BLK_MFCTL)
Willy Tarreau2b718102021-04-21 07:32:39 +02004681 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau5723f292020-01-10 15:16:57 +01004682 else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
Willy Tarreau2b718102021-04-21 07:32:39 +02004683 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreaub2e290a2018-03-30 17:35:38 +02004684 }
Willy Tarreau7838a792019-08-12 18:42:03 +02004685 TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
Willy Tarreau88bdba32019-05-13 18:17:53 +02004686 return;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004687}
4688
Willy Tarreau5723f292020-01-10 15:16:57 +01004689/* This is the tasklet referenced in h2s->shut_tl, it is used for
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004690 * deferred shutdowns when the h2_detach() was done but the mux buffer was full
4691 * and prevented the last frame from being emitted.
4692 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004693struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state)
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004694{
4695 struct h2s *h2s = ctx;
Willy Tarreau88bdba32019-05-13 18:17:53 +02004696 struct h2c *h2c = h2s->h2c;
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004697
Willy Tarreau7838a792019-08-12 18:42:03 +02004698 TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
4699
Willy Tarreau5723f292020-01-10 15:16:57 +01004700 if (h2s->flags & H2_SF_NOTIFIED) {
4701 /* some data processing remains to be done first */
4702 goto end;
4703 }
4704
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004705 if (h2s->flags & H2_SF_WANT_SHUTW)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004706 h2_do_shutw(h2s);
4707
Willy Tarreau2c249eb2019-05-13 18:06:17 +02004708 if (h2s->flags & H2_SF_WANT_SHUTR)
Willy Tarreau88bdba32019-05-13 18:17:53 +02004709 h2_do_shutr(h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004710
Willy Tarreau88bdba32019-05-13 18:17:53 +02004711 if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Olivier Houchardafc7cb82019-03-25 14:08:01 +01004712 /* We're done trying to send, remove ourself from the send_list */
Willy Tarreau54f4c192023-10-17 08:25:19 +02004713 h2_remove_from_list(h2s);
Olivier Houchard7a977432019-03-21 15:47:13 +01004714
Willy Tarreau88bdba32019-05-13 18:17:53 +02004715 if (!h2s->cs) {
4716 h2s_destroy(h2s);
Willy Tarreau74163142021-03-13 11:30:19 +01004717 if (h2c_is_dead(h2c)) {
Willy Tarreau88bdba32019-05-13 18:17:53 +02004718 h2_release(h2c);
Willy Tarreau74163142021-03-13 11:30:19 +01004719 t = NULL;
4720 }
Willy Tarreau88bdba32019-05-13 18:17:53 +02004721 }
Olivier Houchard7a977432019-03-21 15:47:13 +01004722 }
Willy Tarreau5723f292020-01-10 15:16:57 +01004723 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02004724 TRACE_LEAVE(H2_EV_STRM_SHUT);
Willy Tarreau74163142021-03-13 11:30:19 +01004725 return t;
Willy Tarreau62f52692017-10-08 23:01:42 +02004726}
4727
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004728/* shutr() called by the conn_stream (mux_ops.shutr) */
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004729static void h2_shutr(struct conn_stream *cs, enum cs_shr_mode mode)
4730{
4731 struct h2s *h2s = cs->ctx;
4732
Willy Tarreau7838a792019-08-12 18:42:03 +02004733 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Willy Tarreau3cf69fe2019-05-14 10:44:40 +02004734 if (cs->flags & CS_FL_KILL_CONN)
4735 h2s->flags |= H2_SF_KILL_CONN;
4736
Willy Tarreau7838a792019-08-12 18:42:03 +02004737 if (mode)
4738 h2_do_shutr(h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004739
Willy Tarreau7838a792019-08-12 18:42:03 +02004740 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004741}
4742
Willy Tarreau749f5ca2019-03-21 19:19:36 +01004743/* shutw() called by the conn_stream (mux_ops.shutw) */
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004744static void h2_shutw(struct conn_stream *cs, enum cs_shw_mode mode)
4745{
4746 struct h2s *h2s = cs->ctx;
4747
Willy Tarreau7838a792019-08-12 18:42:03 +02004748 TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Willy Tarreau3cf69fe2019-05-14 10:44:40 +02004749 if (cs->flags & CS_FL_KILL_CONN)
4750 h2s->flags |= H2_SF_KILL_CONN;
4751
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004752 h2_do_shutw(h2s);
Willy Tarreau7838a792019-08-12 18:42:03 +02004753 TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
Olivier Houchard8ae735d2018-09-11 18:24:28 +02004754}
4755
Christopher Faulet9b79a102019-07-15 11:22:56 +02004756/* Decode the payload of a HEADERS frame and produce the HTX request or response
4757 * depending on the connection's side. Returns a positive value on success, a
4758 * negative value on failure, or 0 if it couldn't proceed. May report connection
4759 * errors in h2c->errcode if the frame is non-decodable and the connection
4760 * unrecoverable. In absence of connection error when a failure is reported, the
4761 * caller must assume a stream error.
Willy Tarreauea18f862018-12-22 20:19:26 +01004762 *
4763 * The function may fold CONTINUATION frames into the initial HEADERS frame
4764 * by removing padding and next frame header, then moving the CONTINUATION
4765 * frame's payload and adjusting h2c->dfl to match the new aggregated frame,
4766 * leaving a hole between the main frame and the beginning of the next one.
4767 * The possibly remaining incomplete or next frame at the end may be moved
4768 * if the aggregated frame is not deleted, in order to fill the hole. Wrapped
4769 * HEADERS frames are unwrapped into a temporary buffer before decoding.
4770 *
4771 * A buffer at the beginning of processing may look like this :
4772 *
4773 * ,---.---------.-----.--------------.--------------.------.---.
4774 * |///| HEADERS | PAD | CONTINUATION | CONTINUATION | DATA |///|
4775 * `---^---------^-----^--------------^--------------^------^---'
4776 * | | <-----> | |
4777 * area | dpl | wrap
4778 * |<--------------> |
4779 * | dfl |
4780 * |<-------------------------------------------------->|
4781 * head data
4782 *
4783 * Padding is automatically overwritten when folding, participating to the
4784 * hole size after dfl :
4785 *
4786 * ,---.------------------------.-----.--------------.------.---.
4787 * |///| HEADERS : CONTINUATION |/////| CONTINUATION | DATA |///|
4788 * `---^------------------------^-----^--------------^------^---'
4789 * | | <-----> | |
4790 * area | hole | wrap
4791 * |<-----------------------> |
4792 * | dfl |
4793 * |<-------------------------------------------------->|
4794 * head data
4795 *
4796 * Please note that the HEADERS frame is always deprived from its PADLEN byte
4797 * however it may start with the 5 stream-dep+weight bytes in case of PRIORITY
4798 * bit.
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004799 *
4800 * The <flags> field must point to either the stream's flags or to a copy of it
4801 * so that the function can update the following flags :
4802 * - H2_SF_DATA_CLEN when content-length is seen
Willy Tarreau6cc85a52019-01-02 15:49:20 +01004803 * - H2_SF_HEADERS_RCVD once the frame is successfully decoded
Willy Tarreau88d138e2019-01-02 19:38:14 +01004804 *
4805 * The H2_SF_HEADERS_RCVD flag is also looked at in the <flags> field prior to
4806 * decoding, in order to detect if we're dealing with a headers or a trailers
4807 * block (the trailers block appears after H2_SF_HEADERS_RCVD was seen).
Willy Tarreau13278b42017-10-13 19:23:14 +02004808 */
Amaury Denoyelle74162742020-12-11 17:53:05 +01004809static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol)
Willy Tarreau13278b42017-10-13 19:23:14 +02004810{
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004811 const uint8_t *hdrs = (uint8_t *)b_head(&h2c->dbuf);
Willy Tarreau83061a82018-07-13 11:56:34 +02004812 struct buffer *tmp = get_trash_chunk();
Christopher Faulete4ab11b2019-06-11 15:05:37 +02004813 struct http_hdr list[global.tune.max_http_hdr * 2];
Willy Tarreau83061a82018-07-13 11:56:34 +02004814 struct buffer *copy = NULL;
Willy Tarreau174b06a2018-04-25 18:13:58 +02004815 unsigned int msgf;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004816 struct htx *htx = NULL;
Willy Tarreauea18f862018-12-22 20:19:26 +01004817 int flen; // header frame len
4818 int hole = 0;
Willy Tarreau86277d42019-01-02 15:36:11 +01004819 int ret = 0;
4820 int outlen;
Willy Tarreau13278b42017-10-13 19:23:14 +02004821 int wrap;
Willy Tarreau13278b42017-10-13 19:23:14 +02004822
Willy Tarreau7838a792019-08-12 18:42:03 +02004823 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
4824
Willy Tarreauea18f862018-12-22 20:19:26 +01004825next_frame:
4826 if (b_data(&h2c->dbuf) - hole < h2c->dfl)
4827 goto leave; // incomplete input frame
4828
4829 /* No END_HEADERS means there's one or more CONTINUATION frames. In
4830 * this case, we'll try to paste it immediately after the initial
4831 * HEADERS frame payload and kill any possible padding. The initial
4832 * frame's length will be increased to represent the concatenation
4833 * of the two frames. The next frame is read from position <tlen>
4834 * and written at position <flen> (minus padding if some is present).
4835 */
4836 if (unlikely(!(h2c->dff & H2_F_HEADERS_END_HEADERS))) {
4837 struct h2_fh hdr;
4838 int clen; // CONTINUATION frame's payload length
4839
Willy Tarreau7838a792019-08-12 18:42:03 +02004840 TRACE_STATE("EH missing, expecting continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004841 if (!h2_peek_frame_hdr(&h2c->dbuf, h2c->dfl + hole, &hdr)) {
4842 /* no more data, the buffer may be full, either due to
4843 * too large a frame or because of too large a hole that
4844 * we're going to compact at the end.
4845 */
4846 goto leave;
4847 }
4848
4849 if (hdr.ft != H2_FT_CONTINUATION) {
4850 /* RFC7540#6.10: frame of unexpected type */
Willy Tarreau7838a792019-08-12 18:42:03 +02004851 TRACE_STATE("not continuation!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004852 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004853 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004854 goto fail;
4855 }
4856
4857 if (hdr.sid != h2c->dsi) {
4858 /* RFC7540#6.10: frame of different stream */
Willy Tarreau7838a792019-08-12 18:42:03 +02004859 TRACE_STATE("different stream ID!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004860 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004861 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreauea18f862018-12-22 20:19:26 +01004862 goto fail;
4863 }
4864
4865 if ((unsigned)hdr.len > (unsigned)global.tune.bufsize) {
4866 /* RFC7540#4.2: invalid frame length */
Willy Tarreau7838a792019-08-12 18:42:03 +02004867 TRACE_STATE("too large frame!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004868 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4869 goto fail;
4870 }
4871
4872 /* detect when we must stop aggragating frames */
4873 h2c->dff |= hdr.ff & H2_F_HEADERS_END_HEADERS;
4874
4875 /* Take as much as we can of the CONTINUATION frame's payload */
4876 clen = b_data(&h2c->dbuf) - (h2c->dfl + hole + 9);
4877 if (clen > hdr.len)
4878 clen = hdr.len;
4879
4880 /* Move the frame's payload over the padding, hole and frame
4881 * header. At least one of hole or dpl is null (see diagrams
4882 * above). The hole moves after the new aggragated frame.
4883 */
4884 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole + 9), clen, -(h2c->dpl + hole + 9));
Christopher Fauletcb1847c2021-04-21 11:11:21 +02004885 h2c->dfl += hdr.len - h2c->dpl;
Willy Tarreauea18f862018-12-22 20:19:26 +01004886 hole += h2c->dpl + 9;
4887 h2c->dpl = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02004888 TRACE_STATE("waiting for next continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_CONT|H2_EV_RX_HDR, h2c->conn);
Willy Tarreauea18f862018-12-22 20:19:26 +01004889 goto next_frame;
4890 }
4891
4892 flen = h2c->dfl - h2c->dpl;
Willy Tarreau68472622017-12-11 18:36:37 +01004893
Willy Tarreau13278b42017-10-13 19:23:14 +02004894 /* if the input buffer wraps, take a temporary copy of it (rare) */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02004895 wrap = b_wrap(&h2c->dbuf) - b_head(&h2c->dbuf);
Willy Tarreau13278b42017-10-13 19:23:14 +02004896 if (wrap < h2c->dfl) {
Willy Tarreau68dd9852017-07-03 14:44:26 +02004897 copy = alloc_trash_chunk();
4898 if (!copy) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004899 TRACE_DEVEL("failed to allocate temporary buffer", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR, h2c->conn);
Willy Tarreau68dd9852017-07-03 14:44:26 +02004900 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
4901 goto fail;
4902 }
Willy Tarreau843b7cb2018-07-13 10:54:26 +02004903 memcpy(copy->area, b_head(&h2c->dbuf), wrap);
4904 memcpy(copy->area + wrap, b_orig(&h2c->dbuf), h2c->dfl - wrap);
4905 hdrs = (uint8_t *) copy->area;
Willy Tarreau13278b42017-10-13 19:23:14 +02004906 }
4907
Willy Tarreau13278b42017-10-13 19:23:14 +02004908 /* Skip StreamDep and weight for now (we don't support PRIORITY) */
4909 if (h2c->dff & H2_F_HEADERS_PRIORITY) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004910 if (read_n32(hdrs) == h2c->dsi) {
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004911 /* RFC7540#5.3.1 : stream dep may not depend on itself */
Willy Tarreau7838a792019-08-12 18:42:03 +02004912 TRACE_STATE("invalid stream dependency!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004913 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02004914 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreaua0d11b62018-09-05 18:30:05 +02004915 goto fail;
Willy Tarreau18b86cd2017-12-03 19:24:50 +01004916 }
4917
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004918 if (flen < 5) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004919 TRACE_STATE("frame too short for priority!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreaua01f45e2018-12-31 07:41:24 +01004920 h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
4921 goto fail;
4922 }
4923
Willy Tarreau13278b42017-10-13 19:23:14 +02004924 hdrs += 5; // stream dep = 4, weight = 1
4925 flen -= 5;
4926 }
4927
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004928 if (!h2_get_buf(h2c, rxbuf)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004929 TRACE_STATE("waiting for h2c rxbuf allocation", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Willy Tarreau937f7602018-02-26 15:22:17 +01004930 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau86277d42019-01-02 15:36:11 +01004931 goto leave;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004932 }
Willy Tarreau13278b42017-10-13 19:23:14 +02004933
Willy Tarreau937f7602018-02-26 15:22:17 +01004934 /* we can't retry a failed decompression operation so we must be very
4935 * careful not to take any risks. In practice the output buffer is
4936 * always empty except maybe for trailers, in which case we simply have
4937 * to wait for the upper layer to finish consuming what is available.
4938 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004939 htx = htx_from_buf(rxbuf);
4940 if (!htx_is_empty(htx)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004941 TRACE_STATE("waiting for room in h2c rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004942 h2c->flags |= H2_CF_DEM_SFULL;
4943 goto leave;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004944 }
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004945
Willy Tarreau25919232019-01-03 14:48:18 +01004946 /* past this point we cannot roll back in case of error */
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004947 outlen = hpack_decode_frame(h2c->ddht, hdrs, flen, list,
4948 sizeof(list)/sizeof(list[0]), tmp);
4949 if (outlen < 0) {
Willy Tarreau7838a792019-08-12 18:42:03 +02004950 TRACE_STATE("failed to decompress HPACK", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004951 h2c_error(h2c, H2_ERR_COMPRESSION_ERROR);
4952 goto fail;
4953 }
4954
Willy Tarreau25919232019-01-03 14:48:18 +01004955 /* The PACK decompressor was updated, let's update the input buffer and
4956 * the parser's state to commit these changes and allow us to later
4957 * fail solely on the stream if needed.
4958 */
4959 b_del(&h2c->dbuf, h2c->dfl + hole);
4960 h2c->dfl = hole = 0;
4961 h2c->st0 = H2_CS_FRAME_H;
4962
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004963 /* OK now we have our header list in <list> */
Willy Tarreau880f5802019-01-03 08:10:14 +01004964 msgf = (h2c->dff & H2_F_HEADERS_END_STREAM) ? 0 : H2_MSGF_BODY;
Christopher Fauletd0db4232021-01-22 11:46:30 +01004965 msgf |= (*flags & H2_SF_BODY_TUNNEL) ? H2_MSGF_BODY_TUNNEL: 0;
Amaury Denoyelle74162742020-12-11 17:53:05 +01004966 /* If an Extended CONNECT has been sent on this stream, set message flag
Ilya Shipitsinacf84592021-02-06 22:29:08 +05004967 * to convert 200 response to 101 htx response */
Amaury Denoyelle74162742020-12-11 17:53:05 +01004968 msgf |= (*flags & H2_SF_EXT_CONNECT_SENT) ? H2_MSGF_EXT_CONNECT: 0;
Willy Tarreaubd4a6b62018-11-27 09:29:36 +01004969
Willy Tarreau88d138e2019-01-02 19:38:14 +01004970 if (*flags & H2_SF_HEADERS_RCVD)
4971 goto trailers;
4972
4973 /* This is the first HEADERS frame so it's a headers block */
Christopher Faulet9b79a102019-07-15 11:22:56 +02004974 if (h2c->flags & H2_CF_IS_BACK)
Amaury Denoyelle74162742020-12-11 17:53:05 +01004975 outlen = h2_make_htx_response(list, htx, &msgf, body_len, upgrade_protocol);
Christopher Faulet9b79a102019-07-15 11:22:56 +02004976 else
Willy Tarreauf86e9942023-08-08 15:38:28 +02004977 outlen = h2_make_htx_request(list, htx, &msgf, body_len,
4978 !!(((const struct session *)h2c->conn->owner)->fe->options2 & PR_O2_REQBUG_OK));
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004979
Christopher Faulet3d875582021-04-26 17:46:13 +02004980 if (outlen < 0 || htx_free_space(htx) < global.tune.maxrewrite) {
Willy Tarreau25919232019-01-03 14:48:18 +01004981 /* too large headers? this is a stream error only */
Christopher Faulet3d875582021-04-26 17:46:13 +02004982 TRACE_STATE("message headers too large", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR|H2_EV_PROTO_ERR, h2c->conn);
4983 htx->flags |= HTX_FL_PARSING_ERROR;
Willy Tarreau59a10fb2017-11-21 20:03:02 +01004984 goto fail;
4985 }
Willy Tarreau13278b42017-10-13 19:23:14 +02004986
Willy Tarreau174b06a2018-04-25 18:13:58 +02004987 if (msgf & H2_MSGF_BODY) {
4988 /* a payload is present */
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01004989 if (msgf & H2_MSGF_BODY_CL) {
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01004990 *flags |= H2_SF_DATA_CLEN;
Christopher Faulet9b79a102019-07-15 11:22:56 +02004991 htx->extra = *body_len;
Christopher Fauleteaf0d2a2019-02-18 16:04:35 +01004992 }
Willy Tarreau174b06a2018-04-25 18:13:58 +02004993 }
Christopher Faulet7d247f02020-12-02 14:26:36 +01004994 if (msgf & H2_MSGF_BODYLESS_RSP)
4995 *flags |= H2_SF_BODYLESS_RESP;
Willy Tarreau174b06a2018-04-25 18:13:58 +02004996
Christopher Fauletd0db4232021-01-22 11:46:30 +01004997 if (msgf & H2_MSGF_BODY_TUNNEL)
4998 *flags |= H2_SF_BODY_TUNNEL;
4999 else {
5000 /* Abort the tunnel attempt, if any */
5001 if (*flags & H2_SF_BODY_TUNNEL)
5002 *flags |= H2_SF_TUNNEL_ABRT;
5003 *flags &= ~H2_SF_BODY_TUNNEL;
5004 }
5005
Willy Tarreau88d138e2019-01-02 19:38:14 +01005006 done:
Christopher Faulet0b465482019-02-19 15:14:23 +01005007 /* indicate that a HEADERS frame was received for this stream, except
5008 * for 1xx responses. For 1xx responses, another HEADERS frame is
5009 * expected.
5010 */
5011 if (!(msgf & H2_MSGF_RSP_1XX))
5012 *flags |= H2_SF_HEADERS_RCVD;
Willy Tarreau6cc85a52019-01-02 15:49:20 +01005013
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005014 if (h2c->dff & H2_F_HEADERS_END_STREAM) {
Christopher Faulet2c681c62022-12-22 09:47:01 +01005015 if (msgf & H2_MSGF_RSP_1XX) {
5016 /* RFC9113#8.1 : HEADERS frame with the ES flag set that carries an informational status code is malformed */
5017 TRACE_STATE("invalid interim response with ES flag!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
5018 goto fail;
5019 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005020 /* no more data are expected for this message */
5021 htx->flags |= HTX_FL_EOM;
Willy Tarreau88d138e2019-01-02 19:38:14 +01005022 }
Willy Tarreau937f7602018-02-26 15:22:17 +01005023
Amaury Denoyelleefe22762020-12-11 17:53:08 +01005024 if (msgf & H2_MSGF_EXT_CONNECT)
5025 *flags |= H2_SF_EXT_CONNECT_RCVD;
5026
Willy Tarreau86277d42019-01-02 15:36:11 +01005027 /* success */
5028 ret = 1;
5029
Willy Tarreau68dd9852017-07-03 14:44:26 +02005030 leave:
Willy Tarreau86277d42019-01-02 15:36:11 +01005031 /* If there is a hole left and it's not at the end, we are forced to
Willy Tarreauea18f862018-12-22 20:19:26 +01005032 * move the remaining data over it.
5033 */
5034 if (hole) {
5035 if (b_data(&h2c->dbuf) > h2c->dfl + hole)
5036 b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole),
5037 b_data(&h2c->dbuf) - (h2c->dfl + hole), -hole);
5038 b_sub(&h2c->dbuf, hole);
5039 }
5040
Christopher Fauletb7c62612023-12-13 15:36:52 +01005041 if (b_full(&h2c->dbuf) && h2c->dfl && (!htx || htx_is_empty(htx))) {
Willy Tarreauea18f862018-12-22 20:19:26 +01005042 /* too large frames */
5043 h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
Willy Tarreau86277d42019-01-02 15:36:11 +01005044 ret = -1;
Willy Tarreauea18f862018-12-22 20:19:26 +01005045 }
5046
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01005047 if (htx)
Willy Tarreau5c8cafa2018-12-23 11:30:42 +01005048 htx_to_buf(htx, rxbuf);
Willy Tarreau68dd9852017-07-03 14:44:26 +02005049 free_trash_chunk(copy);
Willy Tarreau7838a792019-08-12 18:42:03 +02005050 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
Willy Tarreau86277d42019-01-02 15:36:11 +01005051 return ret;
5052
Willy Tarreau68dd9852017-07-03 14:44:26 +02005053 fail:
Willy Tarreau86277d42019-01-02 15:36:11 +01005054 ret = -1;
Willy Tarreau68dd9852017-07-03 14:44:26 +02005055 goto leave;
Willy Tarreau88d138e2019-01-02 19:38:14 +01005056
5057 trailers:
5058 /* This is the last HEADERS frame hence a trailer */
Willy Tarreau88d138e2019-01-02 19:38:14 +01005059 if (!(h2c->dff & H2_F_HEADERS_END_STREAM)) {
5060 /* It's a trailer but it's missing ES flag */
Willy Tarreau7838a792019-08-12 18:42:03 +02005061 TRACE_STATE("missing EH on trailers frame", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
Willy Tarreau88d138e2019-01-02 19:38:14 +01005062 h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
Willy Tarreau4781b152021-04-06 13:53:36 +02005063 HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
Willy Tarreau88d138e2019-01-02 19:38:14 +01005064 goto fail;
5065 }
5066
Christopher Faulet9b79a102019-07-15 11:22:56 +02005067 /* Trailers terminate a DATA sequence */
Willy Tarreau7838a792019-08-12 18:42:03 +02005068 if (h2_make_htx_trailers(list, htx) <= 0) {
5069 TRACE_STATE("failed to append HTX trailers into rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR, h2c->conn);
Christopher Faulet9b79a102019-07-15 11:22:56 +02005070 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02005071 }
Willy Tarreau88d138e2019-01-02 19:38:14 +01005072 goto done;
Willy Tarreau13278b42017-10-13 19:23:14 +02005073}
5074
Christopher Faulet9b79a102019-07-15 11:22:56 +02005075/* Transfer the payload of a DATA frame to the HTTP/1 side. The HTTP/2 frame
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005076 * parser state is automatically updated. Returns > 0 if it could completely
5077 * send the current frame, 0 if it couldn't complete, in which case
5078 * CS_FL_RCV_MORE must be checked to know if some data remain pending (an empty
5079 * DATA frame can return 0 as a valid result). Stream errors are reported in
5080 * h2s->errcode and connection errors in h2c->errcode. The caller must already
5081 * have checked the frame header and ensured that the frame was complete or the
5082 * buffer full. It changes the frame state to FRAME_A once done.
Willy Tarreau454f9052017-10-26 19:40:35 +02005083 */
Willy Tarreau454b57b2018-02-26 15:50:05 +01005084static int h2_frt_transfer_data(struct h2s *h2s)
Willy Tarreau454f9052017-10-26 19:40:35 +02005085{
5086 struct h2c *h2c = h2s->h2c;
Christopher Faulet9b79a102019-07-15 11:22:56 +02005087 int block;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005088 unsigned int flen = 0;
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005089 struct htx *htx = NULL;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005090 struct buffer *csbuf;
Christopher Faulet9b79a102019-07-15 11:22:56 +02005091 unsigned int sent;
Willy Tarreau454f9052017-10-26 19:40:35 +02005092
Willy Tarreau7838a792019-08-12 18:42:03 +02005093 TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
5094
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005095 h2c->flags &= ~H2_CF_DEM_SFULL;
Willy Tarreau454f9052017-10-26 19:40:35 +02005096
Olivier Houchard638b7992018-08-16 15:41:52 +02005097 csbuf = h2_get_buf(h2c, &h2s->rxbuf);
Willy Tarreaud755ea62018-02-26 15:44:54 +01005098 if (!csbuf) {
5099 h2c->flags |= H2_CF_DEM_SALLOC;
Willy Tarreau7838a792019-08-12 18:42:03 +02005100 TRACE_STATE("waiting for an h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005101 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005102 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02005103 htx = htx_from_buf(csbuf);
Willy Tarreaud755ea62018-02-26 15:44:54 +01005104
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005105try_again:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005106 flen = h2c->dfl - h2c->dpl;
5107 if (!flen)
Willy Tarreau4a28da12018-01-04 14:41:00 +01005108 goto end_transfer;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005109
Willy Tarreauc9fa0482018-07-10 17:43:27 +02005110 if (flen > b_data(&h2c->dbuf)) {
5111 flen = b_data(&h2c->dbuf);
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005112 if (!flen)
Willy Tarreau454b57b2018-02-26 15:50:05 +01005113 goto fail;
Willy Tarreaud755ea62018-02-26 15:44:54 +01005114 }
5115
Christopher Faulet9b79a102019-07-15 11:22:56 +02005116 block = htx_free_data_space(htx);
5117 if (!block) {
5118 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02005119 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Christopher Faulet9b79a102019-07-15 11:22:56 +02005120 goto fail;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005121 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02005122 if (flen > block)
5123 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005124
Christopher Faulet9b79a102019-07-15 11:22:56 +02005125 /* here, flen is the max we can copy into the output buffer */
5126 block = b_contig_data(&h2c->dbuf, 0);
5127 if (flen > block)
5128 flen = block;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005129
Christopher Faulet9b79a102019-07-15 11:22:56 +02005130 sent = htx_add_data(htx, ist2(b_head(&h2c->dbuf), flen));
Willy Tarreau022e5e52020-09-10 09:33:15 +02005131 TRACE_DATA("move some data to h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s, 0, (void *)(long)sent);
Willy Tarreau454f9052017-10-26 19:40:35 +02005132
Christopher Faulet9b79a102019-07-15 11:22:56 +02005133 b_del(&h2c->dbuf, sent);
5134 h2c->dfl -= sent;
5135 h2c->rcvd_c += sent;
5136 h2c->rcvd_s += sent; // warning, this can also affect the closed streams!
Willy Tarreau454f9052017-10-26 19:40:35 +02005137
Christopher Faulet9b79a102019-07-15 11:22:56 +02005138 if (h2s->flags & H2_SF_DATA_CLEN) {
5139 h2s->body_len -= sent;
5140 htx->extra = h2s->body_len;
Willy Tarreaueba10f22018-04-25 20:44:22 +02005141 }
5142
Christopher Faulet9b79a102019-07-15 11:22:56 +02005143 if (sent < flen) {
Willy Tarreaud755ea62018-02-26 15:44:54 +01005144 h2c->flags |= H2_CF_DEM_SFULL;
Willy Tarreau7838a792019-08-12 18:42:03 +02005145 TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005146 goto fail;
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005147 }
5148
Christopher Faulet9b79a102019-07-15 11:22:56 +02005149 goto try_again;
5150
Willy Tarreau4a28da12018-01-04 14:41:00 +01005151 end_transfer:
Willy Tarreau8fc016d2017-12-11 18:27:15 +01005152 /* here we're done with the frame, all the payload (except padding) was
5153 * transferred.
5154 */
Willy Tarreaueba10f22018-04-25 20:44:22 +02005155
Christopher Faulet5be651d2021-01-22 15:28:03 +01005156 if (!(h2s->flags & H2_SF_BODY_TUNNEL) && (h2c->dff & H2_F_DATA_END_STREAM)) {
5157 /* no more data are expected for this message. This add the EOM
5158 * flag but only on the response path or if no tunnel attempt
5159 * was aborted. Otherwise (request path + tunnel abrted), the
5160 * EOM was already reported.
5161 */
Christopher Faulet33724322021-02-10 09:04:59 +01005162 if ((h2c->flags & H2_CF_IS_BACK) || !(h2s->flags & H2_SF_TUNNEL_ABRT)) {
5163 /* If we receive an empty DATA frame with ES flag while the HTX
5164 * message is empty, we must be sure to push a block to be sure
5165 * the HTX EOM flag will be handled on the other side. It is a
5166 * workaround because for now it is not possible to push empty
5167 * HTX DATA block. And without this block, there is no way to
5168 * "commit" the end of the message.
5169 */
5170 if (htx_is_empty(htx)) {
5171 if (!htx_add_endof(htx, HTX_BLK_EOT))
5172 goto fail;
5173 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005174 htx->flags |= HTX_FL_EOM;
Christopher Faulet33724322021-02-10 09:04:59 +01005175 }
Willy Tarreaueba10f22018-04-25 20:44:22 +02005176 }
5177
Willy Tarreaud1023bb2018-03-22 16:53:12 +01005178 h2c->rcvd_c += h2c->dpl;
5179 h2c->rcvd_s += h2c->dpl;
5180 h2c->dpl = 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02005181 h2c->st0 = H2_CS_FRAME_A; // send the corresponding window update
Christopher Faulet9b79a102019-07-15 11:22:56 +02005182 htx_to_buf(htx, csbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02005183 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau61ea7dc2018-12-01 23:23:04 +01005184 return 1;
Willy Tarreau454b57b2018-02-26 15:50:05 +01005185 fail:
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01005186 if (htx)
5187 htx_to_buf(htx, csbuf);
Willy Tarreau7838a792019-08-12 18:42:03 +02005188 TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
Willy Tarreau454b57b2018-02-26 15:50:05 +01005189 return 0;
Willy Tarreau454f9052017-10-26 19:40:35 +02005190}
5191
Willy Tarreau115e83b2018-12-01 19:17:53 +01005192/* Try to send a HEADERS frame matching HTX response present in HTX message
5193 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
5194 * must check the stream's status to detect any error which might have happened
5195 * subsequently to a successful send. The htx blocks are automatically removed
5196 * from the message. The htx message is assumed to be valid since produced from
5197 * the internal code, hence it contains a start line, an optional series of
5198 * header blocks and an end of header, otherwise an invalid frame could be
5199 * emitted and the resulting htx message could be left in an inconsistent state.
5200 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02005201static size_t h2s_frt_make_resp_headers(struct h2s *h2s, struct htx *htx)
Willy Tarreau115e83b2018-12-01 19:17:53 +01005202{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02005203 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau115e83b2018-12-01 19:17:53 +01005204 struct h2c *h2c = h2s->h2c;
5205 struct htx_blk *blk;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005206 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005207 struct buffer *mbuf;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005208 struct htx_sl *sl;
5209 enum htx_blk_type type;
5210 int es_now = 0;
5211 int ret = 0;
5212 int hdr;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005213
Willy Tarreau7838a792019-08-12 18:42:03 +02005214 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5215
Willy Tarreau115e83b2018-12-01 19:17:53 +01005216 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005217 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005218 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02005219 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005220 return 0;
5221 }
5222
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005223 /* get the start line (we do have one) and the rest of the headers,
5224 * that we dump starting at header 0 */
5225 sl = NULL;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005226 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005227 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau115e83b2018-12-01 19:17:53 +01005228 type = htx_get_blk_type(blk);
5229
5230 if (type == HTX_BLK_UNUSED)
5231 continue;
5232
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005233 if (type == HTX_BLK_EOH)
Willy Tarreau115e83b2018-12-01 19:17:53 +01005234 break;
5235
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005236 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005237 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005238 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5239 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5240 goto fail;
5241 }
5242
5243 list[hdr].n = htx_get_blk_name(htx, blk);
5244 list[hdr].v = htx_get_blk_value(htx, blk);
5245 hdr++;
5246 }
5247 else if (type == HTX_BLK_RES_SL) {
Christopher Faulet56498132021-01-29 11:39:43 +01005248 BUG_ON(sl); /* Only one start-line expected */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005249 sl = htx_get_blk_ptr(htx, blk);
5250 h2s->status = sl->info.res.status;
Christopher Faulet7d247f02020-12-02 14:26:36 +01005251 if (h2s->status == 204 || h2s->status == 304)
5252 h2s->flags |= H2_SF_BODYLESS_RESP;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005253 if (h2s->status < 100 || h2s->status > 999) {
5254 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5255 goto fail;
5256 }
5257 else if (h2s->status == 101) {
Amaury Denoyelleefe22762020-12-11 17:53:08 +01005258 if (unlikely(h2s->flags & H2_SF_EXT_CONNECT_RCVD)) {
5259 /* If an Extended CONNECT has been received, we need to convert 101 to 200 */
5260 h2s->status = 200;
5261 h2s->flags &= ~H2_SF_EXT_CONNECT_RCVD;
5262 }
5263 else {
5264 /* Otherwise, 101 responses are not supported in H2, so return a error (RFC7540#8.1.1) */
5265 TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5266 goto fail;
5267 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005268 }
5269 else if ((h2s->flags & H2_SF_BODY_TUNNEL) && h2s->status >= 300) {
5270 /* Abort the tunnel attempt */
5271 h2s->flags &= ~H2_SF_BODY_TUNNEL;
5272 h2s->flags |= H2_SF_TUNNEL_ABRT;
5273 }
5274 }
5275 else {
5276 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005277 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02005278 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005279 }
5280
Christopher Faulet56498132021-01-29 11:39:43 +01005281 /* The start-line me be defined */
5282 BUG_ON(!sl);
5283
Willy Tarreau115e83b2018-12-01 19:17:53 +01005284 /* marker for end of headers */
5285 list[hdr].n = ist("");
5286
Willy Tarreau9c218e72019-05-26 10:08:28 +02005287 mbuf = br_tail(h2c->mbuf);
5288 retry:
5289 if (!h2_get_buf(h2c, mbuf)) {
5290 h2c->flags |= H2_CF_MUX_MALLOC;
5291 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005292 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005293 return 0;
5294 }
5295
Willy Tarreau115e83b2018-12-01 19:17:53 +01005296 chunk_reset(&outbuf);
5297
5298 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005299 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5300 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005301 break;
5302 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005303 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau115e83b2018-12-01 19:17:53 +01005304 }
5305
5306 if (outbuf.size < 9)
5307 goto full;
5308
5309 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5310 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5311 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5312 outbuf.data = 9;
5313
Willy Tarreau8a4dca02022-01-13 16:00:12 +01005314 if ((h2c->flags & (H2_CF_SHTS_UPDATED|H2_CF_DTSU_EMITTED)) == H2_CF_SHTS_UPDATED) {
5315 /* SETTINGS_HEADER_TABLE_SIZE changed, we must send an HPACK
5316 * dynamic table size update so that some clients are not
5317 * confused. In practice we only need to send the DTSU when the
5318 * advertised size is lower than the current one, and since we
5319 * don't use it and don't care about the default 4096 bytes,
5320 * we only ack it with a zero size thus we at most have to deal
5321 * with this once. See RFC7541#4.2 and #6.3 for the spec, and
5322 * below for the whole context and interoperability risks:
5323 * https://lists.w3.org/Archives/Public/ietf-http-wg/2021OctDec/0235.html
5324 */
5325 if (b_room(&outbuf) < 1)
5326 goto full;
5327 outbuf.area[outbuf.data++] = 0x20; // HPACK DTSU 0 bytes
5328
5329 /* let's not update the flags now but only once the buffer is
5330 * really committed.
5331 */
5332 }
5333
Willy Tarreau115e83b2018-12-01 19:17:53 +01005334 /* encode status, which necessarily is the first one */
Willy Tarreauaafdf582018-12-10 18:06:40 +01005335 if (!hpack_encode_int_status(&outbuf, h2s->status)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005336 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005337 goto realign_again;
5338 goto full;
5339 }
5340
5341 /* encode all headers, stop at empty name */
5342 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
5343 /* these ones do not exist in H2 and must be dropped. */
5344 if (isteq(list[hdr].n, ist("connection")) ||
5345 isteq(list[hdr].n, ist("proxy-connection")) ||
5346 isteq(list[hdr].n, ist("keep-alive")) ||
5347 isteq(list[hdr].n, ist("upgrade")) ||
5348 isteq(list[hdr].n, ist("transfer-encoding")))
5349 continue;
5350
Christopher Faulet86d144c2019-08-14 16:32:25 +02005351 /* Skip all pseudo-headers */
5352 if (*(list[hdr].n.ptr) == ':')
5353 continue;
5354
Willy Tarreau115e83b2018-12-01 19:17:53 +01005355 if (isteq(list[hdr].n, ist("")))
5356 break; // end
5357
5358 if (!hpack_encode_header(&outbuf, list[hdr].n, list[hdr].v)) {
5359 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005360 if (b_space_wraps(mbuf))
Willy Tarreau115e83b2018-12-01 19:17:53 +01005361 goto realign_again;
5362 goto full;
5363 }
5364 }
5365
Willy Tarreaucb985a42019-10-07 16:56:34 +02005366 /* update the frame's size */
5367 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5368
5369 if (outbuf.data > h2c->mfs + 9) {
5370 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5371 /* output full */
5372 if (b_space_wraps(mbuf))
5373 goto realign_again;
5374 goto full;
5375 }
5376 }
5377
Willy Tarreau38b5bec2021-06-17 08:40:04 +02005378 TRACE_USER("sent H2 response ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5379
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005380 /* remove all header blocks including the EOH and compute the
5381 * corresponding size.
Willy Tarreau115e83b2018-12-01 19:17:53 +01005382 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005383 ret = 0;
5384 blk = htx_get_head_blk(htx);
5385 while (blk) {
5386 type = htx_get_blk_type(blk);
5387 ret += htx_get_blksz(blk);
5388 blk = htx_remove_blk(htx, blk);
5389 /* The removed block is the EOH */
5390 if (type == HTX_BLK_EOH)
5391 break;
Christopher Faulet5be651d2021-01-22 15:28:03 +01005392 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005393
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005394 if (!h2s->cs || h2s->cs->flags & CS_FL_SHW) {
5395 /* Response already closed: add END_STREAM */
5396 es_now = 1;
5397 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005398 else if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx) && h2s->status >= 200) {
5399 /* EOM+empty: we may need to add END_STREAM except for 1xx
Christopher Faulet991febd2020-12-02 15:17:31 +01005400 * responses and tunneled response.
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005401 */
Christopher Faulet991febd2020-12-02 15:17:31 +01005402 if (!(h2s->flags & H2_SF_BODY_TUNNEL) || h2s->status >= 300)
5403 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005404 }
Willy Tarreau115e83b2018-12-01 19:17:53 +01005405
Willy Tarreau115e83b2018-12-01 19:17:53 +01005406 if (es_now)
5407 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5408
5409 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005410 b_add(mbuf, outbuf.data);
Christopher Faulet0b465482019-02-19 15:14:23 +01005411
5412 /* indicates the HEADERS frame was sent, except for 1xx responses. For
5413 * 1xx responses, another HEADERS frame is expected.
5414 */
Christopher Faulet89899422020-12-07 18:24:43 +01005415 if (h2s->status >= 200)
Christopher Faulet0b465482019-02-19 15:14:23 +01005416 h2s->flags |= H2_SF_HEADERS_SENT;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005417
Willy Tarreau8a4dca02022-01-13 16:00:12 +01005418 if (h2c->flags & H2_CF_SHTS_UPDATED) {
5419 /* was sent above */
5420 h2c->flags |= H2_CF_DTSU_EMITTED;
Willy Tarreaub05f4dd2022-02-16 14:28:14 +01005421 h2c->flags &= ~H2_CF_SHTS_UPDATED;
Willy Tarreau8a4dca02022-01-13 16:00:12 +01005422 }
5423
Willy Tarreau115e83b2018-12-01 19:17:53 +01005424 if (es_now) {
5425 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02005426 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005427 if (h2s->st == H2_SS_OPEN)
5428 h2s->st = H2_SS_HLOC;
5429 else
5430 h2s_close(h2s);
5431 }
5432
5433 /* OK we could properly deliver the response */
Willy Tarreau115e83b2018-12-01 19:17:53 +01005434 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02005435 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005436 return ret;
5437 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005438 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5439 goto retry;
Willy Tarreau115e83b2018-12-01 19:17:53 +01005440 h2c->flags |= H2_CF_MUX_MFULL;
5441 h2s->flags |= H2_SF_BLK_MROOM;
5442 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005443 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau115e83b2018-12-01 19:17:53 +01005444 goto end;
5445 fail:
5446 /* unparsable HTX messages, too large ones to be produced in the local
5447 * list etc go here (unrecoverable errors).
5448 */
5449 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5450 ret = 0;
5451 goto end;
5452}
5453
Willy Tarreau80739692018-10-05 11:35:57 +02005454/* Try to send a HEADERS frame matching HTX request present in HTX message
5455 * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
5456 * must check the stream's status to detect any error which might have happened
5457 * subsequently to a successful send. The htx blocks are automatically removed
5458 * from the message. The htx message is assumed to be valid since produced from
5459 * the internal code, hence it contains a start line, an optional series of
5460 * header blocks and an end of header, otherwise an invalid frame could be
5461 * emitted and the resulting htx message could be left in an inconsistent state.
5462 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02005463static size_t h2s_bck_make_req_headers(struct h2s *h2s, struct htx *htx)
Willy Tarreau80739692018-10-05 11:35:57 +02005464{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02005465 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau80739692018-10-05 11:35:57 +02005466 struct h2c *h2c = h2s->h2c;
5467 struct htx_blk *blk;
Willy Tarreau80739692018-10-05 11:35:57 +02005468 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005469 struct buffer *mbuf;
Willy Tarreau80739692018-10-05 11:35:57 +02005470 struct htx_sl *sl;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005471 struct ist meth, uri, auth, host = IST_NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005472 enum htx_blk_type type;
5473 int es_now = 0;
5474 int ret = 0;
5475 int hdr;
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005476 int extended_connect = 0;
Willy Tarreau80739692018-10-05 11:35:57 +02005477
Willy Tarreau7838a792019-08-12 18:42:03 +02005478 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5479
Willy Tarreau80739692018-10-05 11:35:57 +02005480 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005481 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005482 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02005483 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005484 return 0;
5485 }
5486
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005487 /* get the start line (we do have one) and the rest of the headers,
5488 * that we dump starting at header 0 */
5489 sl = NULL;
Willy Tarreau80739692018-10-05 11:35:57 +02005490 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005491 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005492 type = htx_get_blk_type(blk);
5493
5494 if (type == HTX_BLK_UNUSED)
5495 continue;
5496
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005497 if (type == HTX_BLK_EOH)
Willy Tarreau80739692018-10-05 11:35:57 +02005498 break;
5499
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005500 if (type == HTX_BLK_HDR) {
Christopher Faulet56498132021-01-29 11:39:43 +01005501 BUG_ON(!sl); /* The start-line mut be defined before any headers */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005502 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
5503 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5504 goto fail;
5505 }
Willy Tarreau80739692018-10-05 11:35:57 +02005506
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005507 list[hdr].n = htx_get_blk_name(htx, blk);
5508 list[hdr].v = htx_get_blk_value(htx, blk);
Christopher Faulet67d58092019-10-02 10:51:38 +02005509
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005510 /* Skip header if same name is used to add the server name */
5511 if ((h2c->flags & H2_CF_IS_BACK) && h2c->proxy->server_id_hdr_name &&
5512 isteq(list[hdr].n, ist2(h2c->proxy->server_id_hdr_name, h2c->proxy->server_id_hdr_len)))
5513 continue;
Christopher Faulet67d58092019-10-02 10:51:38 +02005514
Ilya Shipitsinacf84592021-02-06 22:29:08 +05005515 /* Convert connection: upgrade to Extended connect from rfc 8441 */
Christopher Faulet673504a2021-09-09 09:52:51 +02005516 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteqi(list[hdr].n, ist("connection"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005517 /* rfc 7230 #6.1 Connection = list of tokens */
5518 struct ist connection_ist = list[hdr].v;
5519 do {
5520 if (isteqi(iststop(connection_ist, ','),
5521 ist("upgrade"))) {
Amaury Denoyelle68993a12021-10-18 09:43:29 +02005522 if (!(h2c->flags & H2_CF_RCVD_RFC8441)) {
5523 TRACE_STATE("reject upgrade because of no RFC8441 support", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
5524 goto fail;
5525 }
5526
Amaury Denoyellea1fa1542021-10-18 10:05:16 +02005527 TRACE_STATE("convert upgrade to extended connect method", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005528 h2s->flags |= (H2_SF_BODY_TUNNEL|H2_SF_EXT_CONNECT_SENT);
5529 sl->info.req.meth = HTTP_METH_CONNECT;
5530 meth = ist("CONNECT");
5531
5532 extended_connect = 1;
5533 break;
5534 }
5535
5536 connection_ist = istadv(istfind(connection_ist, ','), 1);
5537 } while (istlen(connection_ist));
5538 }
5539
Christopher Faulet673504a2021-09-09 09:52:51 +02005540 if ((sl->flags & HTX_SL_F_CONN_UPG) && isteq(list[hdr].n, ist("upgrade"))) {
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005541 /* rfc 7230 #6.7 Upgrade = list of protocols
5542 * rfc 8441 #4 Extended connect = :protocol is single-valued
5543 *
5544 * only first HTTP/1 protocol is preserved
5545 */
5546 const struct ist protocol = iststop(list[hdr].v, ',');
5547 /* upgrade_protocol field is 16 bytes long in h2s */
5548 istpad(h2s->upgrade_protocol, isttrim(protocol, 15));
5549 }
5550
5551 if (isteq(list[hdr].n, ist("host")))
5552 host = list[hdr].v;
5553
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005554 hdr++;
5555 }
Christopher Fauletc29b4bf2021-01-29 11:49:16 +01005556 else if (type == HTX_BLK_REQ_SL) {
5557 BUG_ON(sl); /* Only one start-line expected */
5558 sl = htx_get_blk_ptr(htx, blk);
5559 meth = htx_sl_req_meth(sl);
5560 uri = htx_sl_req_uri(sl);
5561 if (sl->info.req.meth == HTTP_METH_HEAD)
5562 h2s->flags |= H2_SF_BODYLESS_RESP;
5563 if (unlikely(uri.len == 0)) {
5564 TRACE_ERROR("no URI in HTX request", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5565 goto fail;
5566 }
5567 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005568 else {
5569 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
5570 goto fail;
5571 }
Willy Tarreau80739692018-10-05 11:35:57 +02005572 }
5573
Christopher Faulet56498132021-01-29 11:39:43 +01005574 /* The start-line me be defined */
5575 BUG_ON(!sl);
5576
Christopher Faulet72ba6cd2019-09-24 16:20:05 +02005577 /* Now add the server name to a header (if requested) */
5578 if ((h2c->flags & H2_CF_IS_BACK) && h2c->proxy->server_id_hdr_name) {
5579 struct server *srv = objt_server(h2c->conn->target);
5580
5581 if (srv) {
5582 list[hdr].n = ist2(h2c->proxy->server_id_hdr_name, h2c->proxy->server_id_hdr_len);
5583 list[hdr].v = ist(srv->id);
5584 hdr++;
5585 }
5586 }
5587
Willy Tarreau80739692018-10-05 11:35:57 +02005588 /* marker for end of headers */
5589 list[hdr].n = ist("");
5590
Willy Tarreau9c218e72019-05-26 10:08:28 +02005591 mbuf = br_tail(h2c->mbuf);
5592 retry:
5593 if (!h2_get_buf(h2c, mbuf)) {
5594 h2c->flags |= H2_CF_MUX_MALLOC;
5595 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005596 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005597 return 0;
5598 }
5599
Willy Tarreau80739692018-10-05 11:35:57 +02005600 chunk_reset(&outbuf);
5601
5602 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005603 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
5604 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005605 break;
5606 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02005607 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau80739692018-10-05 11:35:57 +02005608 }
5609
5610 if (outbuf.size < 9)
5611 goto full;
5612
5613 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
5614 memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
5615 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
5616 outbuf.data = 9;
5617
5618 /* encode the method, which necessarily is the first one */
Willy Tarreaubdabc3a2018-12-10 18:25:11 +01005619 if (!hpack_encode_method(&outbuf, sl->info.req.meth, meth)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005620 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005621 goto realign_again;
5622 goto full;
5623 }
5624
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005625 auth = ist(NULL);
5626
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005627 /* RFC7540 #8.3: the CONNECT method must have :
5628 * - :authority set to the URI part (host:port)
5629 * - :method set to CONNECT
5630 * - :scheme and :path omitted
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005631 *
5632 * Note that this is not applicable in case of the Extended CONNECT
5633 * protocol from rfc 8441.
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005634 */
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005635 if (unlikely(sl->info.req.meth == HTTP_METH_CONNECT) && !extended_connect) {
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005636 auth = uri;
5637
5638 if (!hpack_encode_header(&outbuf, ist(":authority"), auth)) {
5639 /* output full */
5640 if (b_space_wraps(mbuf))
5641 goto realign_again;
5642 goto full;
5643 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005644 h2s->flags |= H2_SF_BODY_TUNNEL;
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005645 } else {
5646 /* other methods need a :scheme. If an authority is known from
5647 * the request line, it must be sent, otherwise only host is
5648 * sent. Host is never sent as the authority.
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005649 *
5650 * This code is also applicable for Extended CONNECT protocol
5651 * from rfc 8441.
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005652 */
5653 struct ist scheme = { };
Christopher Faulet3b44c542019-06-14 10:46:51 +02005654
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005655 if (uri.ptr[0] != '/' && uri.ptr[0] != '*') {
5656 /* the URI seems to start with a scheme */
5657 int len = 1;
5658
5659 while (len < uri.len && uri.ptr[len] != ':')
5660 len++;
5661
5662 if (len + 2 < uri.len && uri.ptr[len + 1] == '/' && uri.ptr[len + 2] == '/') {
5663 /* make the uri start at the authority now */
Tim Duesterhus9f75ed12021-03-02 18:57:26 +01005664 scheme = ist2(uri.ptr, len);
Tim Duesterhus154374c2021-03-02 18:57:27 +01005665 uri = istadv(uri, len + 3);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005666
5667 /* find the auth part of the URI */
Tim Duesterhus92c696e2021-02-28 16:11:36 +01005668 auth = ist2(uri.ptr, 0);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005669 while (auth.len < uri.len && auth.ptr[auth.len] != '/')
5670 auth.len++;
5671
Tim Duesterhus154374c2021-03-02 18:57:27 +01005672 uri = istadv(uri, auth.len);
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005673 }
5674 }
5675
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005676 /* For Extended CONNECT, the :authority must be present.
5677 * Use host value for it.
5678 */
5679 if (unlikely(extended_connect) && isttest(host))
5680 auth = host;
5681
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005682 if (!scheme.len) {
5683 /* no explicit scheme, we're using an origin-form URI,
5684 * probably from an H1 request transcoded to H2 via an
5685 * external layer, then received as H2 without authority.
5686 * So we have to look up the scheme from the HTX flags.
5687 * In such a case only http and https are possible, and
5688 * https is the default (sent by browsers).
5689 */
5690 if ((sl->flags & (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP)) == (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP))
5691 scheme = ist("http");
5692 else
5693 scheme = ist("https");
5694 }
Christopher Faulet3b44c542019-06-14 10:46:51 +02005695
5696 if (!hpack_encode_scheme(&outbuf, scheme)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005697 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005698 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005699 goto realign_again;
5700 goto full;
5701 }
Willy Tarreau80739692018-10-05 11:35:57 +02005702
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005703 if (auth.len && !hpack_encode_header(&outbuf, ist(":authority"), auth)) {
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005704 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005705 if (b_space_wraps(mbuf))
Willy Tarreau5be92ff2019-02-01 15:51:59 +01005706 goto realign_again;
5707 goto full;
5708 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005709
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005710 /* encode the path. RFC7540#8.1.2.3: if path is empty it must
5711 * be sent as '/' or '*'.
5712 */
5713 if (unlikely(!uri.len)) {
5714 if (sl->info.req.meth == HTTP_METH_OPTIONS)
5715 uri = ist("*");
5716 else
5717 uri = ist("/");
Willy Tarreau053c1572019-02-01 16:13:59 +01005718 }
Willy Tarreau053c1572019-02-01 16:13:59 +01005719
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005720 if (!hpack_encode_path(&outbuf, uri)) {
5721 /* output full */
5722 if (b_space_wraps(mbuf))
5723 goto realign_again;
5724 goto full;
5725 }
Amaury Denoyelle9bf95732020-12-11 17:53:06 +01005726
5727 /* encode the pseudo-header protocol from rfc8441 if using
5728 * Extended CONNECT method.
5729 */
5730 if (unlikely(extended_connect)) {
5731 const struct ist protocol = ist(h2s->upgrade_protocol);
5732 if (isttest(protocol)) {
5733 if (!hpack_encode_header(&outbuf,
5734 ist(":protocol"),
5735 protocol)) {
5736 /* output full */
5737 if (b_space_wraps(mbuf))
5738 goto realign_again;
5739 goto full;
5740 }
5741 }
5742 }
Willy Tarreau80739692018-10-05 11:35:57 +02005743 }
5744
Willy Tarreaub8ce8902019-10-08 18:16:18 +02005745 /* encode all headers, stop at empty name. Host is only sent if we
5746 * do not provide an authority.
5747 */
Willy Tarreau80739692018-10-05 11:35:57 +02005748 for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005749 struct ist n = list[hdr].n;
5750 struct ist v = list[hdr].v;
5751
Willy Tarreau80739692018-10-05 11:35:57 +02005752 /* these ones do not exist in H2 and must be dropped. */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005753 if (isteq(n, ist("connection")) ||
5754 (auth.len && isteq(n, ist("host"))) ||
5755 isteq(n, ist("proxy-connection")) ||
5756 isteq(n, ist("keep-alive")) ||
5757 isteq(n, ist("upgrade")) ||
5758 isteq(n, ist("transfer-encoding")))
Willy Tarreau80739692018-10-05 11:35:57 +02005759 continue;
5760
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005761 if (isteq(n, ist("te"))) {
5762 /* "te" may only be sent with "trailers" if this value
5763 * is present, otherwise it must be deleted.
5764 */
5765 v = istist(v, ist("trailers"));
Tim Duesterhus7b5777d2021-03-02 18:57:28 +01005766 if (!isttest(v) || (v.len > 8 && v.ptr[8] != ','))
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005767 continue;
5768 v = ist("trailers");
5769 }
5770
Christopher Faulet86d144c2019-08-14 16:32:25 +02005771 /* Skip all pseudo-headers */
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005772 if (*(n.ptr) == ':')
Christopher Faulet86d144c2019-08-14 16:32:25 +02005773 continue;
5774
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005775 if (isteq(n, ist("")))
Willy Tarreau80739692018-10-05 11:35:57 +02005776 break; // end
5777
Willy Tarreaubb2c4ae2020-01-24 09:07:53 +01005778 if (!hpack_encode_header(&outbuf, n, v)) {
Willy Tarreau80739692018-10-05 11:35:57 +02005779 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005780 if (b_space_wraps(mbuf))
Willy Tarreau80739692018-10-05 11:35:57 +02005781 goto realign_again;
5782 goto full;
5783 }
5784 }
5785
Willy Tarreaucb985a42019-10-07 16:56:34 +02005786 /* update the frame's size */
5787 h2_set_frame_size(outbuf.area, outbuf.data - 9);
5788
5789 if (outbuf.data > h2c->mfs + 9) {
5790 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
5791 /* output full */
5792 if (b_space_wraps(mbuf))
5793 goto realign_again;
5794 goto full;
5795 }
5796 }
5797
Willy Tarreau38b5bec2021-06-17 08:40:04 +02005798 TRACE_USER("sent H2 request ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
5799
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005800 /* remove all header blocks including the EOH and compute the
5801 * corresponding size.
Willy Tarreau80739692018-10-05 11:35:57 +02005802 */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005803 ret = 0;
5804 blk = htx_get_head_blk(htx);
5805 while (blk) {
5806 type = htx_get_blk_type(blk);
5807 ret += htx_get_blksz(blk);
5808 blk = htx_remove_blk(htx, blk);
5809 /* The removed block is the EOH */
5810 if (type == HTX_BLK_EOH)
5811 break;
Christopher Fauletd0db4232021-01-22 11:46:30 +01005812 }
Willy Tarreau80739692018-10-05 11:35:57 +02005813
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005814 if (!h2s->cs || h2s->cs->flags & CS_FL_SHW) {
5815 /* Request already closed: add END_STREAM */
Willy Tarreau80739692018-10-05 11:35:57 +02005816 es_now = 1;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005817 }
5818 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
5819 /* EOM+empty: we may need to add END_STREAM (except for CONNECT
5820 * request)
5821 */
5822 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5823 es_now = 1;
5824 }
Willy Tarreau80739692018-10-05 11:35:57 +02005825
Willy Tarreau80739692018-10-05 11:35:57 +02005826 if (es_now)
5827 outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
5828
5829 /* commit the H2 response */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005830 b_add(mbuf, outbuf.data);
Willy Tarreau80739692018-10-05 11:35:57 +02005831 h2s->flags |= H2_SF_HEADERS_SENT;
5832 h2s->st = H2_SS_OPEN;
5833
Willy Tarreau80739692018-10-05 11:35:57 +02005834 if (es_now) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005835 TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02005836 // trim any possibly pending data (eg: inconsistent content-length)
5837 h2s->flags |= H2_SF_ES_SENT;
5838 h2s->st = H2_SS_HLOC;
5839 }
5840
Willy Tarreau80739692018-10-05 11:35:57 +02005841 end:
5842 return ret;
5843 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02005844 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5845 goto retry;
Willy Tarreau80739692018-10-05 11:35:57 +02005846 h2c->flags |= H2_CF_MUX_MFULL;
5847 h2s->flags |= H2_SF_BLK_MROOM;
5848 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02005849 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau80739692018-10-05 11:35:57 +02005850 goto end;
5851 fail:
5852 /* unparsable HTX messages, too large ones to be produced in the local
5853 * list etc go here (unrecoverable errors).
5854 */
5855 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
5856 ret = 0;
5857 goto end;
5858}
5859
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005860/* Try to send a DATA frame matching HTTP response present in HTX structure
Willy Tarreau98de12a2018-12-12 07:03:00 +01005861 * present in <buf>, for stream <h2s>. Returns the number of bytes sent. The
5862 * caller must check the stream's status to detect any error which might have
5863 * happened subsequently to a successful send. Returns the number of data bytes
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005864 * consumed, or zero if nothing done.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005865 */
Christopher Faulet142854b2020-12-02 15:12:40 +01005866static size_t h2s_make_data(struct h2s *h2s, struct buffer *buf, size_t count)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005867{
5868 struct h2c *h2c = h2s->h2c;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005869 struct htx *htx;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005870 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02005871 struct buffer *mbuf;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005872 size_t total = 0;
5873 int es_now = 0;
5874 int bsize; /* htx block size */
5875 int fsize; /* h2 frame size */
5876 struct htx_blk *blk;
5877 enum htx_blk_type type;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01005878 int trunc_out; /* non-zero if truncated on out buf */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005879
Willy Tarreau7838a792019-08-12 18:42:03 +02005880 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5881
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005882 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02005883 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005884 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02005885 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005886 goto end;
5887 }
5888
Willy Tarreau98de12a2018-12-12 07:03:00 +01005889 htx = htx_from_buf(buf);
5890
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005891 /* We only come here with HTX_BLK_DATA blocks */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005892
5893 new_frame:
Willy Tarreauee573762018-12-04 15:25:57 +01005894 if (!count || htx_is_empty(htx))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01005895 goto end;
5896
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005897 if ((h2c->flags & H2_CF_IS_BACK) &&
Christopher Fauletf95f8762021-01-22 11:59:07 +01005898 (h2s->flags & (H2_SF_HEADERS_RCVD|H2_SF_BODY_TUNNEL)) == H2_SF_BODY_TUNNEL) {
5899 /* The response HEADERS frame not received yet. Thus the tunnel
5900 * is not fully established yet. In this situation, we block
5901 * data sending.
5902 */
5903 h2s->flags |= H2_SF_BLK_MBUSY;
5904 TRACE_STATE("Request DATA frame blocked waiting for tunnel establishment", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5905 goto end;
5906 }
Christopher Faulet91b21dc2021-01-22 12:13:15 +01005907 else if ((h2c->flags & H2_CF_IS_BACK) && (h2s->flags & H2_SF_TUNNEL_ABRT)) {
5908 /* a tunnel attempt was aborted but the is pending raw data to xfer to the server.
5909 * Thus the stream is closed with the CANCEL error. The error will be reported to
5910 * the upper layer as aserver abort. But at this stage there is nothing more we can
5911 * do. We just wait for the end of the response to be sure to not truncate it.
5912 */
5913 if (!(h2s->flags & H2_SF_ES_RCVD)) {
5914 TRACE_STATE("Request DATA frame blocked waiting end of aborted tunnel", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
5915 h2s->flags |= H2_SF_BLK_MBUSY;
5916 }
5917 else {
5918 TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
5919 h2s_error(h2s, H2_ERR_CANCEL);
5920 }
5921 goto end;
5922 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01005923
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01005924 blk = htx_get_head_blk(htx);
5925 type = htx_get_blk_type(blk);
5926 bsize = htx_get_blksz(blk);
5927 fsize = bsize;
5928 trunc_out = 0;
5929 if (type != HTX_BLK_DATA)
5930 goto end;
5931
Willy Tarreau9c218e72019-05-26 10:08:28 +02005932 mbuf = br_tail(h2c->mbuf);
5933 retry:
5934 if (!h2_get_buf(h2c, mbuf)) {
5935 h2c->flags |= H2_CF_MUX_MALLOC;
5936 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005937 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02005938 goto end;
5939 }
5940
Willy Tarreau98de12a2018-12-12 07:03:00 +01005941 /* Perform some optimizations to reduce the number of buffer copies.
5942 * First, if the mux's buffer is empty and the htx area contains
5943 * exactly one data block of the same size as the requested count, and
5944 * this count fits within the frame size, the stream's window size, and
5945 * the connection's window size, then it's possible to simply swap the
5946 * caller's buffer with the mux's output buffer and adjust offsets and
5947 * length to match the entire DATA HTX block in the middle. In this
5948 * case we perform a true zero-copy operation from end-to-end. This is
5949 * the situation that happens all the time with large files. Second, if
5950 * this is not possible, but the mux's output buffer is empty, we still
5951 * have an opportunity to avoid the copy to the intermediary buffer, by
5952 * making the intermediary buffer's area point to the output buffer's
5953 * area. In this case we want to skip the HTX header to make sure that
5954 * copies remain aligned and that this operation remains possible all
5955 * the time. This goes for headers, data blocks and any data extracted
5956 * from the HTX blocks.
5957 */
5958 if (unlikely(fsize == count &&
Christopher Faulet192c6a22019-06-11 16:32:24 +02005959 htx_nbblks(htx) == 1 && type == HTX_BLK_DATA &&
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02005960 fsize <= h2s_mws(h2s) && fsize <= h2c->mws && fsize <= h2c->mfs)) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02005961 void *old_area = mbuf->area;
Willy Tarreau98de12a2018-12-12 07:03:00 +01005962
Willy Tarreaubcc45952019-05-26 10:05:50 +02005963 if (b_data(mbuf)) {
Willy Tarreau8ab128c2019-03-21 17:47:28 +01005964 /* Too bad there are data left there. We're willing to memcpy/memmove
5965 * up to 1/4 of the buffer, which means that it's OK to copy a large
5966 * frame into a buffer containing few data if it needs to be realigned,
5967 * and that it's also OK to copy few data without realigning. Otherwise
5968 * we'll pretend the mbuf is full and wait for it to become empty.
Willy Tarreau98de12a2018-12-12 07:03:00 +01005969 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005970 if (fsize + 9 <= b_room(mbuf) &&
5971 (b_data(mbuf) <= b_size(mbuf) / 4 ||
Willy Tarreau7838a792019-08-12 18:42:03 +02005972 (fsize <= b_size(mbuf) / 4 && fsize + 9 <= b_contig_space(mbuf)))) {
5973 TRACE_STATE("small data present in output buffer, appending", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005974 goto copy;
Willy Tarreau7838a792019-08-12 18:42:03 +02005975 }
Willy Tarreau8ab128c2019-03-21 17:47:28 +01005976
Willy Tarreau9c218e72019-05-26 10:08:28 +02005977 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
5978 goto retry;
5979
Willy Tarreau98de12a2018-12-12 07:03:00 +01005980 h2c->flags |= H2_CF_MUX_MFULL;
5981 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02005982 TRACE_STATE("too large data present in output buffer, waiting for emptiness", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005983 goto end;
5984 }
5985
Christopher Faulet925abdf2021-04-27 22:51:07 +02005986 if (htx->flags & HTX_FL_EOM) {
5987 /* EOM+empty: we may need to add END_STREAM (except for tunneled
5988 * message)
5989 */
5990 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
5991 es_now = 1;
5992 }
Willy Tarreau98de12a2018-12-12 07:03:00 +01005993 /* map an H2 frame to the HTX block so that we can put the
5994 * frame header there.
5995 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02005996 *mbuf = b_make(buf->area, buf->size, sizeof(struct htx) + blk->addr - 9, fsize + 9);
5997 outbuf.area = b_head(mbuf);
Willy Tarreau98de12a2018-12-12 07:03:00 +01005998
5999 /* prepend an H2 DATA frame header just before the DATA block */
6000 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
6001 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
Christopher Faulet925abdf2021-04-27 22:51:07 +02006002 if (es_now)
6003 outbuf.area[4] |= H2_F_DATA_END_STREAM;
Willy Tarreau98de12a2018-12-12 07:03:00 +01006004 h2_set_frame_size(outbuf.area, fsize);
6005
6006 /* update windows */
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006007 h2s->sws -= fsize;
Willy Tarreau98de12a2018-12-12 07:03:00 +01006008 h2c->mws -= fsize;
6009
6010 /* and exchange with our old area */
6011 buf->area = old_area;
6012 buf->data = buf->head = 0;
6013 total += fsize;
Christopher Faulet925abdf2021-04-27 22:51:07 +02006014 fsize = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006015
6016 TRACE_PROTO("sent H2 DATA frame (zero-copy)", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Christopher Faulet925abdf2021-04-27 22:51:07 +02006017 goto out;
Willy Tarreau98de12a2018-12-12 07:03:00 +01006018 }
Willy Tarreau2fb1d4c2018-12-04 15:28:03 +01006019
Willy Tarreau98de12a2018-12-12 07:03:00 +01006020 copy:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006021 /* for DATA and EOM we'll have to emit a frame, even if empty */
6022
6023 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02006024 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
6025 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006026 break;
6027 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02006028 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006029 }
6030
6031 if (outbuf.size < 9) {
Willy Tarreau9c218e72019-05-26 10:08:28 +02006032 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6033 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006034 h2c->flags |= H2_CF_MUX_MFULL;
6035 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006036 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006037 goto end;
6038 }
6039
6040 /* len: 0x000000 (fill later), type: 0(DATA), flags: none=0 */
6041 memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
6042 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
6043 outbuf.data = 9;
6044
6045 /* we have in <fsize> the exact number of bytes we need to copy from
6046 * the HTX buffer. We need to check this against the connection's and
6047 * the stream's send windows, and to ensure that this fits in the max
6048 * frame size and in the buffer's available space minus 9 bytes (for
6049 * the frame header). The connection's flow control is applied last so
6050 * that we can use a separate list of streams which are immediately
6051 * unblocked on window opening. Note: we don't implement padding.
6052 */
6053
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006054 if (!fsize)
6055 goto send_empty;
6056
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006057 if (h2s_mws(h2s) <= 0) {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006058 h2s->flags |= H2_SF_BLK_SFCTL;
Willy Tarreau2b718102021-04-21 07:32:39 +02006059 if (LIST_INLIST(&h2s->list))
Willy Tarreau54f4c192023-10-17 08:25:19 +02006060 h2_remove_from_list(h2s);
Willy Tarreau2b718102021-04-21 07:32:39 +02006061 LIST_APPEND(&h2c->blocked_list, &h2s->list);
Willy Tarreau7838a792019-08-12 18:42:03 +02006062 TRACE_STATE("stream window <=0, flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006063 goto end;
6064 }
6065
Willy Tarreauee573762018-12-04 15:25:57 +01006066 if (fsize > count)
6067 fsize = count;
6068
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006069 if (fsize > h2s_mws(h2s))
6070 fsize = h2s_mws(h2s); // >0
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006071
6072 if (h2c->mfs && fsize > h2c->mfs)
6073 fsize = h2c->mfs; // >0
6074
6075 if (fsize + 9 > outbuf.size) {
Willy Tarreau455d5682019-05-24 19:42:18 +02006076 /* It doesn't fit at once. If it at least fits once split and
6077 * the amount of data to move is low, let's defragment the
6078 * buffer now.
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006079 */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006080 if (b_space_wraps(mbuf) &&
6081 (fsize + 9 <= b_room(mbuf)) &&
6082 b_data(mbuf) <= MAX_DATA_REALIGN)
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006083 goto realign_again;
6084 fsize = outbuf.size - 9;
Willy Tarreauc7ce4e32020-01-14 11:42:59 +01006085 trunc_out = 1;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006086
6087 if (fsize <= 0) {
6088 /* no need to send an empty frame here */
Willy Tarreau9c218e72019-05-26 10:08:28 +02006089 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6090 goto retry;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006091 h2c->flags |= H2_CF_MUX_MFULL;
6092 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006093 TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006094 goto end;
6095 }
6096 }
6097
6098 if (h2c->mws <= 0) {
6099 h2s->flags |= H2_SF_BLK_MFCTL;
Willy Tarreau7838a792019-08-12 18:42:03 +02006100 TRACE_STATE("connection window <=0, stream flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2C_FCTL, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006101 goto end;
6102 }
6103
6104 if (fsize > h2c->mws)
6105 fsize = h2c->mws;
6106
6107 /* now let's copy this this into the output buffer */
6108 memcpy(outbuf.area + 9, htx_get_blk_ptr(htx, blk), fsize);
Willy Tarreau1d4a0f82019-08-02 07:52:08 +02006109 h2s->sws -= fsize;
Willy Tarreau0f799ca2018-12-04 15:20:11 +01006110 h2c->mws -= fsize;
Willy Tarreauee573762018-12-04 15:25:57 +01006111 count -= fsize;
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006112
6113 send_empty:
6114 /* update the frame's size */
6115 h2_set_frame_size(outbuf.area, fsize);
6116
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006117 /* consume incoming HTX block */
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006118 total += fsize;
6119 if (fsize == bsize) {
6120 htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006121 if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
6122 /* EOM+empty: we may need to add END_STREAM (except for tunneled
6123 * message)
6124 */
6125 if (!(h2s->flags & H2_SF_BODY_TUNNEL))
6126 es_now = 1;
Willy Tarreau7838a792019-08-12 18:42:03 +02006127 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006128 }
6129 else {
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006130 /* we've truncated this block */
6131 htx_cut_data_blk(htx, blk, fsize);
6132 }
6133
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006134 if (es_now)
6135 outbuf.area[4] |= H2_F_DATA_END_STREAM;
6136
6137 /* commit the H2 response */
6138 b_add(mbuf, fsize + 9);
6139
Christopher Faulet925abdf2021-04-27 22:51:07 +02006140 out:
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006141 if (es_now) {
6142 if (h2s->st == H2_SS_OPEN)
6143 h2s->st = H2_SS_HLOC;
6144 else
6145 h2s_close(h2s);
6146
6147 h2s->flags |= H2_SF_ES_SENT;
Willy Tarreau7838a792019-08-12 18:42:03 +02006148 TRACE_PROTO("ES flag set on outgoing frame", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006149 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006150 else if (fsize) {
6151 if (fsize == bsize) {
6152 TRACE_DEVEL("more data may be available, trying to send another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6153 goto new_frame;
6154 }
6155 else if (trunc_out) {
6156 /* we've truncated this block */
6157 goto new_frame;
6158 }
6159 }
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006160
6161 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02006162 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006163 return total;
6164}
6165
Christopher Faulet991febd2020-12-02 15:17:31 +01006166/* Skip the message payload (DATA blocks) and emit an empty DATA frame with the
6167 * ES flag set for stream <h2s>. This function is called for response known to
6168 * have no payload. Only DATA blocks are skipped. This means the trailers are
Ilya Shipitsinacf84592021-02-06 22:29:08 +05006169 * still emitted. The caller must check the stream's status to detect any error
Christopher Faulet991febd2020-12-02 15:17:31 +01006170 * which might have happened subsequently to a successful send. Returns the
6171 * number of data bytes consumed, or zero if nothing done.
6172 */
6173static size_t h2s_skip_data(struct h2s *h2s, struct buffer *buf, size_t count)
6174{
6175 struct h2c *h2c = h2s->h2c;
6176 struct htx *htx;
6177 int bsize; /* htx block size */
6178 int fsize; /* h2 frame size */
6179 struct htx_blk *blk;
6180 enum htx_blk_type type;
6181 size_t total = 0;
6182
6183 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6184
6185 if (h2c_mux_busy(h2c, h2s)) {
6186 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6187 h2s->flags |= H2_SF_BLK_MBUSY;
6188 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6189 goto end;
6190 }
6191
6192 htx = htx_from_buf(buf);
6193
6194 next_data:
6195 if (!count || htx_is_empty(htx))
6196 goto end;
6197 blk = htx_get_head_blk(htx);
6198 type = htx_get_blk_type(blk);
6199 bsize = htx_get_blksz(blk);
6200 fsize = bsize;
6201 if (type != HTX_BLK_DATA)
6202 goto end;
6203
6204 if (fsize > count)
6205 fsize = count;
6206
6207 if (fsize != bsize)
6208 goto skip_data;
6209
6210 if (!(htx->flags & HTX_FL_EOM) || !htx_is_unique_blk(htx, blk))
6211 goto skip_data;
6212
6213 /* Here, it is the last block and it is also the end of the message. So
6214 * we can emit an empty DATA frame with the ES flag set
6215 */
6216 if (h2_send_empty_data_es(h2s) <= 0)
6217 goto end;
6218
6219 if (h2s->st == H2_SS_OPEN)
6220 h2s->st = H2_SS_HLOC;
6221 else
6222 h2s_close(h2s);
6223
6224 skip_data:
6225 /* consume incoming HTX block */
6226 total += fsize;
6227 if (fsize == bsize) {
6228 TRACE_DEVEL("more data may be available, trying to skip another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6229 htx_remove_blk(htx, blk);
6230 goto next_data;
6231 }
6232 else {
6233 /* we've truncated this block */
6234 htx_cut_data_blk(htx, blk, fsize);
6235 }
6236
6237 end:
6238 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
6239 return total;
6240}
6241
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006242/* Try to send a HEADERS frame matching HTX_BLK_TLR series of blocks present in
6243 * HTX message <htx> for the H2 stream <h2s>. Returns the number of bytes
6244 * processed. The caller must check the stream's status to detect any error
6245 * which might have happened subsequently to a successful send. The htx blocks
6246 * are automatically removed from the message. The htx message is assumed to be
6247 * valid since produced from the internal code. Processing stops when meeting
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006248 * the EOT, which *is* removed. All trailers are processed at once and sent as a
6249 * single frame. The ES flag is always set.
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006250 */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006251static size_t h2s_make_trailers(struct h2s *h2s, struct htx *htx)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006252{
Christopher Faulete4ab11b2019-06-11 15:05:37 +02006253 struct http_hdr list[global.tune.max_http_hdr];
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006254 struct h2c *h2c = h2s->h2c;
6255 struct htx_blk *blk;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006256 struct buffer outbuf;
Willy Tarreaubcc45952019-05-26 10:05:50 +02006257 struct buffer *mbuf;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006258 enum htx_blk_type type;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006259 int ret = 0;
6260 int hdr;
6261 int idx;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006262
Willy Tarreau7838a792019-08-12 18:42:03 +02006263 TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
6264
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006265 if (h2c_mux_busy(h2c, h2s)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006266 TRACE_STATE("mux output busy", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006267 h2s->flags |= H2_SF_BLK_MBUSY;
Willy Tarreau7838a792019-08-12 18:42:03 +02006268 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006269 goto end;
6270 }
6271
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006272 /* get trailers. */
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006273 hdr = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006274 for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006275 type = htx_get_blk_type(blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006276
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006277 if (type == HTX_BLK_UNUSED)
6278 continue;
6279
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006280 if (type == HTX_BLK_EOT)
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006281 break;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006282 if (type == HTX_BLK_TLR) {
6283 if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
6284 TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
6285 goto fail;
6286 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006287
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006288 list[hdr].n = htx_get_blk_name(htx, blk);
6289 list[hdr].v = htx_get_blk_value(htx, blk);
6290 hdr++;
6291 }
6292 else {
6293 TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006294 goto fail;
Willy Tarreau7838a792019-08-12 18:42:03 +02006295 }
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006296 }
6297
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006298 /* marker for end of trailers */
6299 list[hdr].n = ist("");
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006300
Willy Tarreau9c218e72019-05-26 10:08:28 +02006301 mbuf = br_tail(h2c->mbuf);
6302 retry:
6303 if (!h2_get_buf(h2c, mbuf)) {
6304 h2c->flags |= H2_CF_MUX_MALLOC;
6305 h2s->flags |= H2_SF_BLK_MROOM;
Willy Tarreau7838a792019-08-12 18:42:03 +02006306 TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau9c218e72019-05-26 10:08:28 +02006307 goto end;
6308 }
6309
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006310 chunk_reset(&outbuf);
6311
6312 while (1) {
Willy Tarreaubcc45952019-05-26 10:05:50 +02006313 outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
6314 if (outbuf.size >= 9 || !b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006315 break;
6316 realign_again:
Willy Tarreaubcc45952019-05-26 10:05:50 +02006317 b_slow_realign(mbuf, trash.area, b_data(mbuf));
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006318 }
6319
6320 if (outbuf.size < 9)
6321 goto full;
6322
6323 /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4,ES=1 */
6324 memcpy(outbuf.area, "\x00\x00\x00\x01\x05", 5);
6325 write_n32(outbuf.area + 5, h2s->id); // 4 bytes
6326 outbuf.data = 9;
6327
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006328 /* encode all headers */
6329 for (idx = 0; idx < hdr; idx++) {
6330 /* these ones do not exist in H2 or must not appear in
6331 * trailers and must be dropped.
6332 */
6333 if (isteq(list[idx].n, ist("host")) ||
6334 isteq(list[idx].n, ist("content-length")) ||
6335 isteq(list[idx].n, ist("connection")) ||
6336 isteq(list[idx].n, ist("proxy-connection")) ||
6337 isteq(list[idx].n, ist("keep-alive")) ||
6338 isteq(list[idx].n, ist("upgrade")) ||
6339 isteq(list[idx].n, ist("te")) ||
6340 isteq(list[idx].n, ist("transfer-encoding")))
6341 continue;
6342
Christopher Faulet86d144c2019-08-14 16:32:25 +02006343 /* Skip all pseudo-headers */
6344 if (*(list[idx].n.ptr) == ':')
6345 continue;
6346
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006347 if (!hpack_encode_header(&outbuf, list[idx].n, list[idx].v)) {
6348 /* output full */
Willy Tarreaubcc45952019-05-26 10:05:50 +02006349 if (b_space_wraps(mbuf))
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006350 goto realign_again;
6351 goto full;
6352 }
6353 }
6354
Willy Tarreau5121e5d2019-05-06 15:13:41 +02006355 if (outbuf.data == 9) {
6356 /* here we have a problem, we have nothing to emit (either we
6357 * received an empty trailers block followed or we removed its
6358 * contents above). Because of this we can't send a HEADERS
6359 * frame, so we have to cheat and instead send an empty DATA
6360 * frame conveying the ES flag.
Willy Tarreau67b8cae2019-02-21 18:16:35 +01006361 */
6362 outbuf.area[3] = H2_FT_DATA;
6363 outbuf.area[4] = H2_F_DATA_END_STREAM;
6364 }
6365
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006366 /* update the frame's size */
6367 h2_set_frame_size(outbuf.area, outbuf.data - 9);
6368
Willy Tarreau572d9f52019-10-11 16:58:37 +02006369 if (outbuf.data > h2c->mfs + 9) {
6370 if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
6371 /* output full */
6372 if (b_space_wraps(mbuf))
6373 goto realign_again;
6374 goto full;
6375 }
6376 }
6377
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006378 /* commit the H2 response */
Willy Tarreau7838a792019-08-12 18:42:03 +02006379 TRACE_PROTO("sent H2 trailers HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_TX_EOI, h2c->conn, h2s);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006380 b_add(mbuf, outbuf.data);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006381 h2s->flags |= H2_SF_ES_SENT;
6382
6383 if (h2s->st == H2_SS_OPEN)
6384 h2s->st = H2_SS_HLOC;
6385 else
6386 h2s_close(h2s);
6387
6388 /* OK we could properly deliver the response */
6389 done:
Willy Tarreaufb07b3f2019-05-06 11:23:29 +02006390 /* remove all header blocks till the end and compute the corresponding size. */
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006391 ret = 0;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006392 blk = htx_get_head_blk(htx);
6393 while (blk) {
6394 type = htx_get_blk_type(blk);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006395 ret += htx_get_blksz(blk);
6396 blk = htx_remove_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006397 /* The removed block is the EOT */
6398 if (type == HTX_BLK_EOT)
6399 break;
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006400 }
6401
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006402 end:
Willy Tarreau7838a792019-08-12 18:42:03 +02006403 TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006404 return ret;
6405 full:
Willy Tarreau9c218e72019-05-26 10:08:28 +02006406 if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
6407 goto retry;
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006408 h2c->flags |= H2_CF_MUX_MFULL;
6409 h2s->flags |= H2_SF_BLK_MROOM;
6410 ret = 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006411 TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006412 goto end;
6413 fail:
6414 /* unparsable HTX messages, too large ones to be produced in the local
6415 * list etc go here (unrecoverable errors).
6416 */
6417 h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
6418 ret = 0;
6419 goto end;
6420}
6421
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006422/* Called from the upper layer, to subscribe <es> to events <event_type>. The
6423 * event subscriber <es> is not allowed to change from a previous call as long
6424 * as at least one event is still subscribed. The <event_type> must only be a
6425 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006426 */
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006427static int h2_subscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
Olivier Houchard6ff20392018-07-17 18:46:31 +02006428{
Olivier Houchard6ff20392018-07-17 18:46:31 +02006429 struct h2s *h2s = cs->ctx;
Olivier Houchard4cf7fb12018-08-02 19:23:05 +02006430 struct h2c *h2c = h2s->h2c;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006431
Willy Tarreau7838a792019-08-12 18:42:03 +02006432 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006433
6434 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006435 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006436
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006437 es->events |= event_type;
6438 h2s->subs = es;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006439
6440 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006441 TRACE_DEVEL("subscribe(recv)", H2_EV_STRM_RECV, h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006442
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006443 if (event_type & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006444 TRACE_DEVEL("subscribe(send)", H2_EV_STRM_SEND, h2c->conn, h2s);
Olivier Houchardf8338152019-05-14 17:50:32 +02006445 if (!(h2s->flags & H2_SF_BLK_SFCTL) &&
Willy Tarreau2b718102021-04-21 07:32:39 +02006446 !LIST_INLIST(&h2s->list)) {
Willy Tarreau54f4c192023-10-17 08:25:19 +02006447 if (h2s->flags & H2_SF_BLK_MFCTL) {
6448 TRACE_DEVEL("Adding to fctl list", H2_EV_STRM_SEND, h2c->conn, h2s);
Willy Tarreau2b718102021-04-21 07:32:39 +02006449 LIST_APPEND(&h2c->fctl_list, &h2s->list);
Willy Tarreau54f4c192023-10-17 08:25:19 +02006450 }
6451 else {
6452 TRACE_DEVEL("Adding to send list", H2_EV_STRM_SEND, h2c->conn, h2s);
Willy Tarreau2b718102021-04-21 07:32:39 +02006453 LIST_APPEND(&h2c->send_list, &h2s->list);
Willy Tarreau54f4c192023-10-17 08:25:19 +02006454 }
Olivier Houcharde1c6dbc2018-08-01 17:06:43 +02006455 }
Olivier Houchard6ff20392018-07-17 18:46:31 +02006456 }
Willy Tarreau7838a792019-08-12 18:42:03 +02006457 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006458 return 0;
Olivier Houchard6ff20392018-07-17 18:46:31 +02006459}
6460
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006461/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
6462 * The <es> pointer is not allowed to differ from the one passed to the
6463 * subscribe() call. It always returns zero.
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006464 */
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006465static int h2_unsubscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006466{
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006467 struct h2s *h2s = cs->ctx;
6468
Willy Tarreau7838a792019-08-12 18:42:03 +02006469 TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006470
6471 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006472 BUG_ON(h2s->subs && h2s->subs != es);
Willy Tarreauf96508a2020-01-10 11:12:48 +01006473
Willy Tarreauee1a6fc2020-01-17 07:52:13 +01006474 es->events &= ~event_type;
6475 if (!es->events)
Willy Tarreauf96508a2020-01-10 11:12:48 +01006476 h2s->subs = NULL;
6477
6478 if (event_type & SUB_RETRY_RECV)
Willy Tarreau7838a792019-08-12 18:42:03 +02006479 TRACE_DEVEL("unsubscribe(recv)", H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006480
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006481 if (event_type & SUB_RETRY_SEND) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006482 TRACE_DEVEL("subscribe(send)", H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreaud9464162020-01-10 18:25:07 +01006483 h2s->flags &= ~H2_SF_NOTIFIED;
Willy Tarreauf96508a2020-01-10 11:12:48 +01006484 if (!(h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)))
Willy Tarreau54f4c192023-10-17 08:25:19 +02006485 h2_remove_from_list(h2s);
Olivier Houchardd846c262018-10-19 17:24:29 +02006486 }
Willy Tarreauf96508a2020-01-10 11:12:48 +01006487
Willy Tarreau7838a792019-08-12 18:42:03 +02006488 TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006489 return 0;
6490}
6491
6492
Christopher Faulet93a466b2021-09-21 15:50:55 +02006493/* Called from the upper layer, to receive data
6494 *
6495 * The caller is responsible for defragmenting <buf> if necessary. But <flags>
6496 * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
6497 * means the caller wants to flush input data (from the mux buffer and the
6498 * channel buffer) to be able to use kernel splicing or any kind of mux-to-mux
6499 * xfer. If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
6500 * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
6501 * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
6502 * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
6503 * copy as much data as possible.
6504 */
Olivier Houchard511efea2018-08-16 15:30:32 +02006505static size_t h2_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
6506{
Olivier Houchard638b7992018-08-16 15:41:52 +02006507 struct h2s *h2s = cs->ctx;
Willy Tarreau082f5592018-11-25 08:03:32 +01006508 struct h2c *h2c = h2s->h2c;
Willy Tarreau86724e22018-12-01 23:19:43 +01006509 struct htx *h2s_htx = NULL;
6510 struct htx *buf_htx = NULL;
Olivier Houchard511efea2018-08-16 15:30:32 +02006511 size_t ret = 0;
6512
Willy Tarreau7838a792019-08-12 18:42:03 +02006513 TRACE_ENTER(H2_EV_STRM_RECV, h2c->conn, h2s);
6514
Olivier Houchard511efea2018-08-16 15:30:32 +02006515 /* transfer possibly pending data to the upper layer */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006516 h2s_htx = htx_from_buf(&h2s->rxbuf);
Christopher Faulet83f3d3d2022-02-21 15:12:54 +01006517 if (htx_is_empty(h2s_htx) && !(h2s_htx->flags & HTX_FL_PARSING_ERROR)) {
Christopher Faulet9b79a102019-07-15 11:22:56 +02006518 /* Here htx_to_buf() will set buffer data to 0 because
6519 * the HTX is empty.
6520 */
6521 htx_to_buf(h2s_htx, &h2s->rxbuf);
6522 goto end;
6523 }
Willy Tarreau7196dd62019-03-05 10:51:11 +01006524
Christopher Faulet9b79a102019-07-15 11:22:56 +02006525 ret = h2s_htx->data;
6526 buf_htx = htx_from_buf(buf);
Willy Tarreau7196dd62019-03-05 10:51:11 +01006527
Christopher Faulet9b79a102019-07-15 11:22:56 +02006528 /* <buf> is empty and the message is small enough, swap the
6529 * buffers. */
6530 if (htx_is_empty(buf_htx) && htx_used_space(h2s_htx) <= count) {
Christopher Faulet27ba2dc2018-12-05 11:53:24 +01006531 htx_to_buf(buf_htx, buf);
6532 htx_to_buf(h2s_htx, &h2s->rxbuf);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006533 b_xfer(buf, &h2s->rxbuf, b_data(&h2s->rxbuf));
6534 goto end;
Willy Tarreau86724e22018-12-01 23:19:43 +01006535 }
Christopher Faulet9b79a102019-07-15 11:22:56 +02006536
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006537 htx_xfer_blks(buf_htx, h2s_htx, count, HTX_BLK_UNUSED);
Christopher Faulet9b79a102019-07-15 11:22:56 +02006538
6539 if (h2s_htx->flags & HTX_FL_PARSING_ERROR) {
6540 buf_htx->flags |= HTX_FL_PARSING_ERROR;
6541 if (htx_is_empty(buf_htx))
6542 cs->flags |= CS_FL_EOI;
Willy Tarreau86724e22018-12-01 23:19:43 +01006543 }
Christopher Faulet810df062020-07-22 16:20:34 +02006544 else if (htx_is_empty(h2s_htx))
Christopher Faulet42432f32020-11-20 17:43:16 +01006545 buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM);
Olivier Houchard511efea2018-08-16 15:30:32 +02006546
Christopher Faulet9b79a102019-07-15 11:22:56 +02006547 buf_htx->extra = (h2s_htx->extra ? (h2s_htx->data + h2s_htx->extra) : 0);
6548 htx_to_buf(buf_htx, buf);
6549 htx_to_buf(h2s_htx, &h2s->rxbuf);
6550 ret -= h2s_htx->data;
6551
Christopher Faulet37070b22019-02-14 15:12:14 +01006552 end:
Olivier Houchard638b7992018-08-16 15:41:52 +02006553 if (b_data(&h2s->rxbuf))
Olivier Houchardd247be02018-12-06 16:22:29 +01006554 cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Olivier Houchard511efea2018-08-16 15:30:32 +02006555 else {
Olivier Houchardd247be02018-12-06 16:22:29 +01006556 cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Christopher Fauletd0db4232021-01-22 11:46:30 +01006557 if (h2s->flags & H2_SF_ES_RCVD) {
Christopher Fauletfa922f02019-05-07 10:55:17 +02006558 cs->flags |= CS_FL_EOI;
Christopher Fauletd0db4232021-01-22 11:46:30 +01006559 /* Add EOS flag for tunnel */
6560 if (h2s->flags & H2_SF_BODY_TUNNEL)
6561 cs->flags |= CS_FL_EOS;
6562 }
Christopher Fauletaade4ed2020-10-08 15:38:41 +02006563 if (h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED)
Olivier Houchard511efea2018-08-16 15:30:32 +02006564 cs->flags |= CS_FL_EOS;
Olivier Houchard71748cb2018-12-17 14:16:46 +01006565 if (cs->flags & CS_FL_ERR_PENDING)
6566 cs->flags |= CS_FL_ERROR;
Olivier Houchard638b7992018-08-16 15:41:52 +02006567 if (b_size(&h2s->rxbuf)) {
6568 b_free(&h2s->rxbuf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +01006569 offer_buffers(NULL, 1);
Olivier Houchard638b7992018-08-16 15:41:52 +02006570 }
Olivier Houchard511efea2018-08-16 15:30:32 +02006571 }
6572
Willy Tarreau082f5592018-11-25 08:03:32 +01006573 if (ret && h2c->dsi == h2s->id) {
6574 /* demux is blocking on this stream's buffer */
6575 h2c->flags &= ~H2_CF_DEM_SFULL;
Olivier Houchard3ca18bf2019-04-05 15:34:34 +02006576 h2c_restart_reading(h2c, 1);
Willy Tarreau082f5592018-11-25 08:03:32 +01006577 }
Christopher Faulet37070b22019-02-14 15:12:14 +01006578
Willy Tarreau7838a792019-08-12 18:42:03 +02006579 TRACE_LEAVE(H2_EV_STRM_RECV, h2c->conn, h2s);
Olivier Houchard511efea2018-08-16 15:30:32 +02006580 return ret;
6581}
6582
Olivier Houchardd846c262018-10-19 17:24:29 +02006583
Willy Tarreau749f5ca2019-03-21 19:19:36 +01006584/* Called from the upper layer, to send data from buffer <buf> for no more than
6585 * <count> bytes. Returns the number of bytes effectively sent. Some status
6586 * flags may be updated on the conn_stream.
6587 */
Christopher Fauletd44a9b32018-07-27 11:59:41 +02006588static size_t h2_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
Willy Tarreau62f52692017-10-08 23:01:42 +02006589{
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006590 struct h2s *h2s = cs->ctx;
Willy Tarreau1dc41e72018-06-14 13:21:28 +02006591 size_t total = 0;
Willy Tarreau5dd17352018-06-14 13:33:30 +02006592 size_t ret;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006593 struct htx *htx;
6594 struct htx_blk *blk;
6595 enum htx_blk_type btype;
6596 uint32_t bsize;
6597 int32_t idx;
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006598
Willy Tarreau7838a792019-08-12 18:42:03 +02006599 TRACE_ENTER(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
6600
Olivier Houchardd360ac62019-03-22 17:37:16 +01006601 /* If we were not just woken because we wanted to send but couldn't,
6602 * and there's somebody else that is waiting to send, do nothing,
6603 * we will subscribe later and be put at the end of the list
6604 */
Willy Tarreaud9464162020-01-10 18:25:07 +01006605 if (!(h2s->flags & H2_SF_NOTIFIED) &&
Willy Tarreau7838a792019-08-12 18:42:03 +02006606 (!LIST_ISEMPTY(&h2s->h2c->send_list) || !LIST_ISEMPTY(&h2s->h2c->fctl_list))) {
Willy Tarreau54f4c192023-10-17 08:25:19 +02006607 if (LIST_INLIST(&h2s->list))
6608 TRACE_DEVEL("stream already waiting, leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
6609 else {
6610 TRACE_DEVEL("other streams already waiting, going to the queue and leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
6611 h2s->h2c->flags |= H2_CF_WAIT_INLIST;
6612 }
Olivier Houchardd360ac62019-03-22 17:37:16 +01006613 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006614 }
Willy Tarreaud9464162020-01-10 18:25:07 +01006615 h2s->flags &= ~H2_SF_NOTIFIED;
Olivier Houchard998410a2019-04-15 19:23:37 +02006616
Willy Tarreau7838a792019-08-12 18:42:03 +02006617 if (h2s->h2c->st0 < H2_CS_FRAME_H) {
6618 TRACE_DEVEL("connection not ready, leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006619 return 0;
Willy Tarreau7838a792019-08-12 18:42:03 +02006620 }
Willy Tarreau6bf641a2018-10-08 09:43:03 +02006621
Willy Tarreaucab22952019-10-31 15:48:18 +01006622 if (h2s->h2c->st0 >= H2_CS_ERROR) {
6623 cs->flags |= CS_FL_ERROR;
6624 TRACE_DEVEL("connection is in error, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
6625 return 0;
6626 }
6627
Christopher Faulet9b79a102019-07-15 11:22:56 +02006628 htx = htx_from_buf(buf);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006629
Willy Tarreau0bad0432018-06-14 16:54:01 +02006630 if (!(h2s->flags & H2_SF_OUTGOING_DATA) && count)
Willy Tarreauc4312d32017-11-07 12:01:53 +01006631 h2s->flags |= H2_SF_OUTGOING_DATA;
6632
Willy Tarreau751f2d02018-10-05 09:35:00 +02006633 if (h2s->id == 0) {
6634 int32_t id = h2c_get_next_sid(h2s->h2c);
6635
6636 if (id < 0) {
Willy Tarreau751f2d02018-10-05 09:35:00 +02006637 cs->flags |= CS_FL_ERROR;
Willy Tarreau7838a792019-08-12 18:42:03 +02006638 TRACE_DEVEL("couldn't get a stream ID, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreau751f2d02018-10-05 09:35:00 +02006639 return 0;
6640 }
6641
6642 eb32_delete(&h2s->by_id);
6643 h2s->by_id.key = h2s->id = id;
6644 h2s->h2c->max_id = id;
Willy Tarreaud64a3eb2019-01-23 10:22:21 +01006645 h2s->h2c->nb_reserved--;
Willy Tarreau751f2d02018-10-05 09:35:00 +02006646 eb32_insert(&h2s->h2c->streams_by_id, &h2s->by_id);
6647 }
6648
Christopher Faulet9b79a102019-07-15 11:22:56 +02006649 while (h2s->st < H2_SS_HLOC && !(h2s->flags & H2_SF_BLK_ANY) &&
6650 count && !htx_is_empty(htx)) {
6651 idx = htx_get_head(htx);
6652 blk = htx_get_blk(htx, idx);
6653 btype = htx_get_blk_type(blk);
6654 bsize = htx_get_blksz(blk);
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006655
Christopher Faulet9b79a102019-07-15 11:22:56 +02006656 switch (btype) {
Willy Tarreau80739692018-10-05 11:35:57 +02006657 case HTX_BLK_REQ_SL:
6658 /* start-line before headers */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006659 ret = h2s_bck_make_req_headers(h2s, htx);
Willy Tarreau80739692018-10-05 11:35:57 +02006660 if (ret > 0) {
6661 total += ret;
6662 count -= ret;
6663 if (ret < bsize)
6664 goto done;
6665 }
6666 break;
6667
Willy Tarreau115e83b2018-12-01 19:17:53 +01006668 case HTX_BLK_RES_SL:
6669 /* start-line before headers */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006670 ret = h2s_frt_make_resp_headers(h2s, htx);
Willy Tarreau115e83b2018-12-01 19:17:53 +01006671 if (ret > 0) {
6672 total += ret;
6673 count -= ret;
6674 if (ret < bsize)
6675 goto done;
6676 }
6677 break;
6678
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006679 case HTX_BLK_DATA:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006680 /* all these cause the emission of a DATA frame (possibly empty) */
Christopher Faulet991febd2020-12-02 15:17:31 +01006681 if (!(h2s->h2c->flags & H2_CF_IS_BACK) &&
6682 (h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BODYLESS_RESP)) == H2_SF_BODYLESS_RESP)
6683 ret = h2s_skip_data(h2s, buf, count);
6684 else
6685 ret = h2s_make_data(h2s, buf, count);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006686 if (ret > 0) {
Willy Tarreau98de12a2018-12-12 07:03:00 +01006687 htx = htx_from_buf(buf);
Willy Tarreau0c535fd2018-12-01 19:25:56 +01006688 total += ret;
6689 count -= ret;
6690 if (ret < bsize)
6691 goto done;
6692 }
6693 break;
6694
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006695 case HTX_BLK_TLR:
Christopher Faulet2d7c5392019-06-03 10:41:26 +02006696 case HTX_BLK_EOT:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +01006697 /* This is the first trailers block, all the subsequent ones */
Christopher Faulet9b79a102019-07-15 11:22:56 +02006698 ret = h2s_make_trailers(h2s, htx);
Willy Tarreau1bb812f2019-01-04 10:56:26 +01006699 if (ret > 0) {
6700 total += ret;
6701 count -= ret;
6702 if (ret < bsize)
6703 goto done;
6704 }
6705 break;
6706
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006707 default:
6708 htx_remove_blk(htx, blk);
6709 total += bsize;
6710 count -= bsize;
6711 break;
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006712 }
Willy Tarreaubcd3bb32018-12-01 18:59:00 +01006713 }
6714
Christopher Faulet9b79a102019-07-15 11:22:56 +02006715 done:
Willy Tarreau2b778482019-05-06 15:00:22 +02006716 if (h2s->st >= H2_SS_HLOC) {
Willy Tarreau00610962018-07-19 10:58:28 +02006717 /* trim any possibly pending data after we close (extra CR-LF,
6718 * unprocessed trailers, abnormal extra data, ...)
6719 */
Willy Tarreau0bad0432018-06-14 16:54:01 +02006720 total += count;
6721 count = 0;
Willy Tarreau00610962018-07-19 10:58:28 +02006722 }
6723
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006724 /* RST are sent similarly to frame acks */
Willy Tarreau02492192017-12-07 15:59:29 +01006725 if (h2s->st == H2_SS_ERROR || h2s->flags & H2_SF_RST_RCVD) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006726 TRACE_DEVEL("reporting RST/error to the app-layer stream", H2_EV_H2S_SEND|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Willy Tarreauec988c72018-12-19 18:00:29 +01006727 cs_set_error(cs);
Willy Tarreau8c0ea7d2017-11-10 10:05:24 +01006728 if (h2s_send_rst_stream(h2s->h2c, h2s) > 0)
Willy Tarreau00dd0782018-03-01 16:31:34 +01006729 h2s_close(h2s);
Willy Tarreauc6795ca2017-11-07 09:43:06 +01006730 }
6731
Christopher Faulet9b79a102019-07-15 11:22:56 +02006732 htx_to_buf(htx, buf);
Olivier Houchardd846c262018-10-19 17:24:29 +02006733
Olivier Houchard7505f942018-08-21 18:10:44 +02006734 if (total > 0) {
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006735 if (!(h2s->h2c->wait_event.events & SUB_RETRY_SEND)) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006736 TRACE_DEVEL("data queued, waking up h2c sender", H2_EV_H2S_SEND|H2_EV_H2C_SEND, h2s->h2c->conn, h2s);
Willy Tarreau3c39a7d2019-06-14 14:42:29 +02006737 tasklet_wakeup(h2s->h2c->wait_event.tasklet);
Tim Duesterhus12a08d82020-12-21 19:40:16 +01006738 }
Olivier Houchardd846c262018-10-19 17:24:29 +02006739
Olivier Houchard7505f942018-08-21 18:10:44 +02006740 }
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006741 /* If we're waiting for flow control, and we got a shutr on the
6742 * connection, we will never be unlocked, so add an error on
6743 * the conn_stream.
6744 */
6745 if (conn_xprt_read0_pending(h2s->h2c->conn) &&
6746 !b_data(&h2s->h2c->dbuf) &&
6747 (h2s->flags & (H2_SF_BLK_SFCTL | H2_SF_BLK_MFCTL))) {
Willy Tarreau7838a792019-08-12 18:42:03 +02006748 TRACE_DEVEL("fctl with shutr, reporting error to app-layer", H2_EV_H2S_SEND|H2_EV_STRM_SEND|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
Olivier Houchard6dea2ee2018-12-19 18:16:17 +01006749 if (cs->flags & CS_FL_EOS)
6750 cs->flags |= CS_FL_ERROR;
6751 else
6752 cs->flags |= CS_FL_ERR_PENDING;
6753 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006754
Willy Tarreau5723f292020-01-10 15:16:57 +01006755 if (total > 0 && !(h2s->flags & H2_SF_BLK_SFCTL) &&
6756 !(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006757 /* Ok we managed to send something, leave the send_list if we were still there */
Willy Tarreau54f4c192023-10-17 08:25:19 +02006758 h2_remove_from_list(h2s);
6759 TRACE_DEVEL("Removed from h2s list", H2_EV_H2S_SEND|H2_EV_H2C_SEND, h2s->h2c->conn, h2s);
Olivier Houchardd360ac62019-03-22 17:37:16 +01006760 }
Willy Tarreau9edf6db2019-10-02 10:49:59 +02006761
Willy Tarreau7838a792019-08-12 18:42:03 +02006762 TRACE_LEAVE(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
Willy Tarreau9e5ae1d2017-10-17 19:58:20 +02006763 return total;
Willy Tarreau62f52692017-10-08 23:01:42 +02006764}
6765
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006766/* for debugging with CLI's "show fd" command */
Willy Tarreau8050efe2021-01-21 08:26:06 +01006767static int h2_show_fd(struct buffer *msg, struct connection *conn)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006768{
Willy Tarreau3d2ee552018-12-19 14:12:10 +01006769 struct h2c *h2c = conn->ctx;
Willy Tarreau987c0632018-12-18 10:32:05 +01006770 struct h2s *h2s = NULL;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006771 struct eb32_node *node;
6772 int fctl_cnt = 0;
6773 int send_cnt = 0;
6774 int tree_cnt = 0;
6775 int orph_cnt = 0;
Willy Tarreau60f62682019-05-26 11:32:27 +02006776 struct buffer *hmbuf, *tmbuf;
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006777 int ret = 0;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006778
6779 if (!h2c)
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006780 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006781
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006782 list_for_each_entry(h2s, &h2c->fctl_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006783 fctl_cnt++;
6784
Olivier Houchardfa8aa862018-10-10 18:25:41 +02006785 list_for_each_entry(h2s, &h2c->send_list, list)
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006786 send_cnt++;
6787
Willy Tarreau3af37712018-12-18 14:34:41 +01006788 h2s = NULL;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006789 node = eb32_first(&h2c->streams_by_id);
6790 while (node) {
6791 h2s = container_of(node, struct h2s, by_id);
6792 tree_cnt++;
6793 if (!h2s->cs)
6794 orph_cnt++;
6795 node = eb32_next(node);
6796 }
6797
Willy Tarreau60f62682019-05-26 11:32:27 +02006798 hmbuf = br_head(h2c->mbuf);
Willy Tarreaubcc45952019-05-26 10:05:50 +02006799 tmbuf = br_tail(h2c->mbuf);
Willy Tarreauab2ec452019-08-30 07:07:08 +02006800 chunk_appendf(msg, " h2c.st0=%s .err=%d .maxid=%d .lastid=%d .flg=0x%04x"
Willy Tarreau987c0632018-12-18 10:32:05 +01006801 " .nbst=%u .nbcs=%u .fctl_cnt=%d .send_cnt=%d .tree_cnt=%d"
Willy Tarreau60f62682019-05-26 11:32:27 +02006802 " .orph_cnt=%d .sub=%d .dsi=%d .dbuf=%u@%p+%u/%u .msi=%d"
6803 " .mbuf=[%u..%u|%u],h=[%u@%p+%u/%u],t=[%u@%p+%u/%u]",
Willy Tarreauab2ec452019-08-30 07:07:08 +02006804 h2c_st_to_str(h2c->st0), h2c->errcode, h2c->max_id, h2c->last_sid, h2c->flags,
Willy Tarreau616ac812018-07-24 14:12:42 +02006805 h2c->nb_streams, h2c->nb_cs, fctl_cnt, send_cnt, tree_cnt, orph_cnt,
Willy Tarreau4f6516d2018-12-19 13:59:17 +01006806 h2c->wait_event.events, h2c->dsi,
Willy Tarreau987c0632018-12-18 10:32:05 +01006807 (unsigned int)b_data(&h2c->dbuf), b_orig(&h2c->dbuf),
6808 (unsigned int)b_head_ofs(&h2c->dbuf), (unsigned int)b_size(&h2c->dbuf),
6809 h2c->msi,
Willy Tarreau60f62682019-05-26 11:32:27 +02006810 br_head_idx(h2c->mbuf), br_tail_idx(h2c->mbuf), br_size(h2c->mbuf),
6811 (unsigned int)b_data(hmbuf), b_orig(hmbuf),
6812 (unsigned int)b_head_ofs(hmbuf), (unsigned int)b_size(hmbuf),
Willy Tarreaubcc45952019-05-26 10:05:50 +02006813 (unsigned int)b_data(tmbuf), b_orig(tmbuf),
6814 (unsigned int)b_head_ofs(tmbuf), (unsigned int)b_size(tmbuf));
Willy Tarreau987c0632018-12-18 10:32:05 +01006815
6816 if (h2s) {
Willy Tarreaued4464e2021-01-20 15:50:03 +01006817 chunk_appendf(msg, " last_h2s=%p .id=%d .st=%s .flg=0x%04x .rxbuf=%u@%p+%u/%u .cs=%p",
Willy Tarreauab2ec452019-08-30 07:07:08 +02006818 h2s, h2s->id, h2s_st_to_str(h2s->st), h2s->flags,
Willy Tarreau987c0632018-12-18 10:32:05 +01006819 (unsigned int)b_data(&h2s->rxbuf), b_orig(&h2s->rxbuf),
6820 (unsigned int)b_head_ofs(&h2s->rxbuf), (unsigned int)b_size(&h2s->rxbuf),
6821 h2s->cs);
6822 if (h2s->cs)
Willy Tarreau98e40b92021-01-20 16:27:01 +01006823 chunk_appendf(msg, "(.flg=0x%08x .data=%p)",
Willy Tarreau987c0632018-12-18 10:32:05 +01006824 h2s->cs->flags, h2s->cs->data);
Willy Tarreau98e40b92021-01-20 16:27:01 +01006825
Willy Tarreaue972a432022-09-01 18:02:15 +02006826 chunk_appendf(msg, " .subs=%p", h2s->subs);
Willy Tarreau98e40b92021-01-20 16:27:01 +01006827 if (h2s->subs) {
Willy Tarreaue972a432022-09-01 18:02:15 +02006828 chunk_appendf(msg, "(ev=%d tl=%p", h2s->subs->events, h2s->subs->tasklet);
6829 chunk_appendf(msg, " tl.calls=%d tl.ctx=%p tl.fct=",
Christopher Faulet6c93c4e2021-02-25 10:06:29 +01006830 h2s->subs->tasklet->calls,
6831 h2s->subs->tasklet->context);
6832 if (h2s->subs->tasklet->calls >= 1000000)
6833 ret = 1;
Willy Tarreaue972a432022-09-01 18:02:15 +02006834 resolve_sym_name(msg, NULL, h2s->subs->tasklet->process);
6835 chunk_appendf(msg, ")");
Willy Tarreau98e40b92021-01-20 16:27:01 +01006836 }
Willy Tarreau987c0632018-12-18 10:32:05 +01006837 }
Willy Tarreau06bf83e2021-01-21 09:13:35 +01006838 return ret;
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02006839}
Willy Tarreau62f52692017-10-08 23:01:42 +02006840
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006841/* Migrate the the connection to the current thread.
6842 * Return 0 if successful, non-zero otherwise.
6843 * Expected to be called with the old thread lock held.
6844 */
Olivier Houchard1662cdb2020-07-03 14:04:37 +02006845static int h2_takeover(struct connection *conn, int orig_tid)
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006846{
6847 struct h2c *h2c = conn->ctx;
Willy Tarreau617e80f2020-07-01 16:39:33 +02006848 struct task *task;
Willy Tarreau237e9fe2023-11-17 10:56:33 +01006849 struct task *new_task;
6850 struct tasklet *new_tasklet;
6851
6852 /* Pre-allocate tasks so that we don't have to roll back after the xprt
6853 * has been migrated.
6854 */
6855 new_task = task_new(tid_bit);
6856 new_tasklet = tasklet_new();
6857 if (!new_task || !new_tasklet)
6858 goto fail;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006859
6860 if (fd_takeover(conn->handle.fd, conn) != 0)
Willy Tarreau237e9fe2023-11-17 10:56:33 +01006861 goto fail;
Olivier Houcharda74bb7e2020-07-03 14:01:21 +02006862
6863 if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
6864 /* We failed to takeover the xprt, even if the connection may
6865 * still be valid, flag it as error'd, as we have already
6866 * taken over the fd, and wake the tasklet, so that it will
6867 * destroy it.
6868 */
6869 conn->flags |= CO_FL_ERROR;
6870 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
Willy Tarreau237e9fe2023-11-17 10:56:33 +01006871 goto fail;
Olivier Houcharda74bb7e2020-07-03 14:01:21 +02006872 }
6873
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006874 if (h2c->wait_event.events)
6875 h2c->conn->xprt->unsubscribe(h2c->conn, h2c->conn->xprt_ctx,
6876 h2c->wait_event.events, &h2c->wait_event);
Willy Tarreau617e80f2020-07-01 16:39:33 +02006877
6878 task = h2c->task;
6879 if (task) {
Willy Tarreau237e9fe2023-11-17 10:56:33 +01006880 /* only assign a task if there was already one, otherwise
6881 * the preallocated new task will be released.
6882 */
Willy Tarreau617e80f2020-07-01 16:39:33 +02006883 task->context = NULL;
6884 h2c->task = NULL;
6885 __ha_barrier_store();
6886 task_kill(task);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006887
Willy Tarreau237e9fe2023-11-17 10:56:33 +01006888 h2c->task = new_task;
6889 new_task = NULL;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006890 h2c->task->process = h2_timeout_task;
6891 h2c->task->context = h2c;
6892 }
Willy Tarreau237e9fe2023-11-17 10:56:33 +01006893
6894 /* To let the tasklet know it should free itself, and do nothing else,
6895 * set its context to NULL.
6896 */
6897 h2c->wait_event.tasklet->context = NULL;
6898 tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
6899
6900 h2c->wait_event.tasklet = new_tasklet;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006901 h2c->wait_event.tasklet->process = h2_io_cb;
6902 h2c->wait_event.tasklet->context = h2c;
6903 h2c->conn->xprt->subscribe(h2c->conn, h2c->conn->xprt_ctx,
6904 SUB_RETRY_RECV, &h2c->wait_event);
6905
Willy Tarreau237e9fe2023-11-17 10:56:33 +01006906 if (new_task)
6907 __task_free(new_task);
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006908 return 0;
Willy Tarreau237e9fe2023-11-17 10:56:33 +01006909 fail:
6910 if (new_task)
6911 __task_free(new_task);
6912 tasklet_free(new_tasklet);
6913 return -1;
Olivier Houchardcd4159f2020-03-10 18:39:42 +01006914}
6915
Willy Tarreau62f52692017-10-08 23:01:42 +02006916/*******************************************************/
6917/* functions below are dedicated to the config parsers */
6918/*******************************************************/
6919
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006920/* config parser for global "tune.h2.header-table-size" */
6921static int h2_parse_header_table_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006922 const struct proxy *defpx, const char *file, int line,
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02006923 char **err)
6924{
6925 if (too_many_args(1, args, err, NULL))
6926 return -1;
6927
6928 h2_settings_header_table_size = atoi(args[1]);
6929 if (h2_settings_header_table_size < 4096 || h2_settings_header_table_size > 65536) {
6930 memprintf(err, "'%s' expects a numeric value between 4096 and 65536.", args[0]);
6931 return -1;
6932 }
6933 return 0;
6934}
Willy Tarreau62f52692017-10-08 23:01:42 +02006935
Willy Tarreaue6baec02017-07-27 11:45:11 +02006936/* config parser for global "tune.h2.initial-window-size" */
6937static int h2_parse_initial_window_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006938 const struct proxy *defpx, const char *file, int line,
Willy Tarreaue6baec02017-07-27 11:45:11 +02006939 char **err)
6940{
6941 if (too_many_args(1, args, err, NULL))
6942 return -1;
6943
6944 h2_settings_initial_window_size = atoi(args[1]);
6945 if (h2_settings_initial_window_size < 0) {
6946 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
6947 return -1;
6948 }
6949 return 0;
6950}
6951
Willy Tarreau5242ef82017-07-27 11:47:28 +02006952/* config parser for global "tune.h2.max-concurrent-streams" */
6953static int h2_parse_max_concurrent_streams(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006954 const struct proxy *defpx, const char *file, int line,
Willy Tarreau5242ef82017-07-27 11:47:28 +02006955 char **err)
6956{
6957 if (too_many_args(1, args, err, NULL))
6958 return -1;
6959
6960 h2_settings_max_concurrent_streams = atoi(args[1]);
Willy Tarreau5a490b62019-01-31 10:39:51 +01006961 if ((int)h2_settings_max_concurrent_streams < 0) {
Willy Tarreau5242ef82017-07-27 11:47:28 +02006962 memprintf(err, "'%s' expects a positive numeric value.", args[0]);
6963 return -1;
6964 }
6965 return 0;
6966}
6967
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006968/* config parser for global "tune.h2.max-frame-size" */
6969static int h2_parse_max_frame_size(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +01006970 const struct proxy *defpx, const char *file, int line,
Willy Tarreaua24b35c2019-02-21 13:24:36 +01006971 char **err)
6972{
6973 if (too_many_args(1, args, err, NULL))
6974 return -1;
6975
6976 h2_settings_max_frame_size = atoi(args[1]);
6977 if (h2_settings_max_frame_size < 16384 || h2_settings_max_frame_size > 16777215) {
6978 memprintf(err, "'%s' expects a numeric value between 16384 and 16777215.", args[0]);
6979 return -1;
6980 }
6981 return 0;
6982}
6983
Willy Tarreau62f52692017-10-08 23:01:42 +02006984
6985/****************************************/
Ilya Shipitsin46a030c2020-07-05 16:36:08 +05006986/* MUX initialization and instantiation */
Willy Tarreau62f52692017-10-08 23:01:42 +02006987/***************************************/
6988
6989/* The mux operations */
Willy Tarreau680b2bd2018-11-27 07:30:17 +01006990static const struct mux_ops h2_ops = {
Willy Tarreau62f52692017-10-08 23:01:42 +02006991 .init = h2_init,
Olivier Houchard21df6cc2018-09-14 23:21:44 +02006992 .wake = h2_wake,
Willy Tarreau62f52692017-10-08 23:01:42 +02006993 .snd_buf = h2_snd_buf,
Olivier Houchard511efea2018-08-16 15:30:32 +02006994 .rcv_buf = h2_rcv_buf,
Olivier Houchard6ff20392018-07-17 18:46:31 +02006995 .subscribe = h2_subscribe,
Olivier Houchard83a0cd82018-09-28 17:57:58 +02006996 .unsubscribe = h2_unsubscribe,
Willy Tarreau62f52692017-10-08 23:01:42 +02006997 .attach = h2_attach,
Willy Tarreaufafd3982018-11-18 21:29:20 +01006998 .get_first_cs = h2_get_first_cs,
Willy Tarreau62f52692017-10-08 23:01:42 +02006999 .detach = h2_detach,
Olivier Houchard060ed432018-11-06 16:32:42 +01007000 .destroy = h2_destroy,
Olivier Houchardd540b362018-11-05 18:37:53 +01007001 .avail_streams = h2_avail_streams,
Willy Tarreau00f18a32019-01-26 12:19:01 +01007002 .used_streams = h2_used_streams,
Willy Tarreau62f52692017-10-08 23:01:42 +02007003 .shutr = h2_shutr,
7004 .shutw = h2_shutw,
Olivier Houchard9b8e11e2019-10-25 16:19:26 +02007005 .ctl = h2_ctl,
Willy Tarreaue3f36cd2018-03-30 14:43:13 +02007006 .show_fd = h2_show_fd,
Olivier Houchardcd4159f2020-03-10 18:39:42 +01007007 .takeover = h2_takeover,
Christopher Fauleta4600572021-03-08 15:28:28 +01007008 .flags = MX_FL_CLEAN_ABRT|MX_FL_HTX|MX_FL_HOL_RISK|MX_FL_NO_UPG,
Willy Tarreau62f52692017-10-08 23:01:42 +02007009 .name = "H2",
7010};
7011
Christopher Faulet32f61c02018-04-10 14:33:41 +02007012static struct mux_proto_list mux_proto_h2 =
Christopher Fauletc985f6c2019-07-15 11:42:52 +02007013 { .token = IST("h2"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BOTH, .mux = &h2_ops };
Willy Tarreau62f52692017-10-08 23:01:42 +02007014
Willy Tarreau0108d902018-11-25 19:14:37 +01007015INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_h2);
7016
Willy Tarreau62f52692017-10-08 23:01:42 +02007017/* config keyword parsers */
7018static struct cfg_kw_list cfg_kws = {ILH, {
Willy Tarreaufe20e5b2017-07-27 11:42:14 +02007019 { CFG_GLOBAL, "tune.h2.header-table-size", h2_parse_header_table_size },
Willy Tarreaue6baec02017-07-27 11:45:11 +02007020 { CFG_GLOBAL, "tune.h2.initial-window-size", h2_parse_initial_window_size },
Willy Tarreau5242ef82017-07-27 11:47:28 +02007021 { CFG_GLOBAL, "tune.h2.max-concurrent-streams", h2_parse_max_concurrent_streams },
Willy Tarreaua24b35c2019-02-21 13:24:36 +01007022 { CFG_GLOBAL, "tune.h2.max-frame-size", h2_parse_max_frame_size },
Willy Tarreau62f52692017-10-08 23:01:42 +02007023 { 0, NULL, NULL }
7024}};
7025
Willy Tarreau0108d902018-11-25 19:14:37 +01007026INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
Willy Tarreau2bdcc702020-05-19 11:31:11 +02007027
7028/* initialize internal structs after the config is parsed.
7029 * Returns zero on success, non-zero on error.
7030 */
7031static int init_h2()
7032{
7033 pool_head_hpack_tbl = create_pool("hpack_tbl",
7034 h2_settings_header_table_size,
7035 MEM_F_SHARED|MEM_F_EXACT);
Christopher Faulet52140992020-11-06 15:23:39 +01007036 if (!pool_head_hpack_tbl) {
7037 ha_alert("failed to allocate hpack_tbl memory pool\n");
7038 return (ERR_ALERT | ERR_FATAL);
7039 }
7040 return ERR_NONE;
Willy Tarreau2bdcc702020-05-19 11:31:11 +02007041}
7042
7043REGISTER_POST_CHECK(init_h2);