blob: 696dc8f20e02c46c6492139333443b52745091ca [file] [log] [blame]
Amaury Denoyelledeed7772021-12-03 11:36:46 +01001#include <haproxy/mux_quic.h>
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002
Amaury Denoyelleeb01f592021-10-07 16:44:05 +02003#include <import/eb64tree.h>
4
Frédéric Lécailledfbae762021-02-18 09:59:01 +01005#include <haproxy/api.h>
Frédéric Lécailledfbae762021-02-18 09:59:01 +01006#include <haproxy/connection.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +01007#include <haproxy/conn_stream.h>
Amaury Denoyelledeed7772021-12-03 11:36:46 +01008#include <haproxy/dynbuf.h>
Amaury Denoyelle9a327a72022-02-14 17:11:09 +01009#include <haproxy/htx.h>
Amaury Denoyelledeed7772021-12-03 11:36:46 +010010#include <haproxy/pool.h>
Amaury Denoyelle251eadf2022-03-24 17:14:52 +010011#include <haproxy/sink.h>
Amaury Denoyelleeb01f592021-10-07 16:44:05 +020012#include <haproxy/ssl_sock-t.h>
Amaury Denoyelledd4fbfb2022-03-24 16:09:16 +010013#include <haproxy/trace.h>
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +010014#include <haproxy/xprt_quic.h>
Frédéric Lécailledfbae762021-02-18 09:59:01 +010015
Amaury Denoyelledeed7772021-12-03 11:36:46 +010016DECLARE_POOL(pool_head_qcc, "qcc", sizeof(struct qcc));
Frédéric Lécailledfbae762021-02-18 09:59:01 +010017DECLARE_POOL(pool_head_qcs, "qcs", sizeof(struct qcs));
18
Amaury Denoyelledd4fbfb2022-03-24 16:09:16 +010019/* trace source and events */
20static void qmux_trace(enum trace_level level, uint64_t mask,
21 const struct trace_source *src,
22 const struct ist where, const struct ist func,
23 const void *a1, const void *a2, const void *a3, const void *a4);
24
25static const struct trace_event qmux_trace_events[] = {
Amaury Denoyelle4f137572022-03-24 17:10:00 +010026#define QMUX_EV_QCC_RECV (1ULL << 1)
27 { .mask = QMUX_EV_QCC_RECV, .name = "qcc_recv", .desc = "Rx on QUIC connection" },
28#define QMUX_EV_QCC_SEND (1ULL << 2)
29 { .mask = QMUX_EV_QCC_SEND, .name = "qcc_send", .desc = "Tx on QUIC connection" },
30#define QMUX_EV_QCC_WAKE (1ULL << 3)
31 { .mask = QMUX_EV_QCC_WAKE, .name = "qcc_wake", .desc = "QUIC connection woken up" },
32#define QMUX_EV_QCC_END (1ULL << 4)
33 { .mask = QMUX_EV_QCC_END, .name = "qcc_end", .desc = "QUIC connection terminated" },
34#define QMUX_EV_QCC_NQCS (1ULL << 5)
35 { .mask = QMUX_EV_QCC_NQCS, .name = "qcc_no_qcs", .desc = "QUIC stream not found" },
36#define QMUX_EV_QCS_NEW (1ULL << 6)
37 { .mask = QMUX_EV_QCS_NEW, .name = "qcs_new", .desc = "new QUIC stream" },
38#define QMUX_EV_QCS_RECV (1ULL << 7)
39 { .mask = QMUX_EV_QCS_RECV, .name = "qcs_recv", .desc = "Rx on QUIC stream" },
40#define QMUX_EV_QCS_SEND (1ULL << 8)
41 { .mask = QMUX_EV_QCS_SEND, .name = "qcs_send", .desc = "Tx on QUIC stream" },
42#define QMUX_EV_QCS_END (1ULL << 9)
43 { .mask = QMUX_EV_QCS_END, .name = "qcs_end", .desc = "QUIC stream terminated" },
44#define QMUX_EV_STRM_RECV (1ULL << 10)
45 { .mask = QMUX_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" },
46#define QMUX_EV_STRM_SEND (1ULL << 11)
47 { .mask = QMUX_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
48#define QMUX_EV_STRM_END (1ULL << 12)
49 { .mask = QMUX_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
Amaury Denoyellefa29f332022-03-25 09:09:40 +010050#define QMUX_EV_SEND_FRM (1ULL << 13)
51 { .mask = QMUX_EV_SEND_FRM, .name = "send_frm", .desc = "sending QUIC frame" },
Amaury Denoyellefdcec362022-03-25 09:28:10 +010052/* special event dedicated to qcs_push_frame */
53#define QMUX_EV_QCS_PUSH_FRM (1ULL << 14)
54 { .mask = QMUX_EV_QCS_PUSH_FRM, .name = "qcs_push_frm", .desc = "qcs_push_frame" },
Amaury Denoyelledd4fbfb2022-03-24 16:09:16 +010055 { }
56};
57
Amaury Denoyellefdcec362022-03-25 09:28:10 +010058/* custom arg for QMUX_EV_QCS_PUSH_FRM */
59struct qcs_push_frm_trace_arg {
60 size_t sent;
61 int xfer;
62 char fin;
63 uint64_t offset;
64};
65
Amaury Denoyelledd4fbfb2022-03-24 16:09:16 +010066static const struct name_desc qmux_trace_lockon_args[4] = {
67 /* arg1 */ { /* already used by the connection */ },
68 /* arg2 */ { .name="qcs", .desc="QUIC stream" },
69 /* arg3 */ { },
70 /* arg4 */ { }
71};
72
73static const struct name_desc qmux_trace_decoding[] = {
74#define QMUX_VERB_CLEAN 1
75 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
76#define QMUX_VERB_MINIMAL 2
77 { .name="minimal", .desc="report only qcc/qcs state and flags, no real decoding" },
78 { /* end */ }
79};
80
81struct trace_source trace_qmux = {
82 .name = IST("qmux"),
83 .desc = "QUIC multiplexer",
84 .arg_def = TRC_ARG1_CONN, /* TRACE()'s first argument is always a connection */
85 .default_cb = qmux_trace,
86 .known_events = qmux_trace_events,
87 .lockon_args = qmux_trace_lockon_args,
88 .decoding = qmux_trace_decoding,
89 .report_events = ~0, /* report everything by default */
90};
91
92#define TRACE_SOURCE &trace_qmux
93INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
94
Amaury Denoyelledeed7772021-12-03 11:36:46 +010095/* Allocate a new QUIC streams with id <id> and type <type>. */
96struct qcs *qcs_new(struct qcc *qcc, uint64_t id, enum qcs_type type)
Frédéric Lécailledfbae762021-02-18 09:59:01 +010097{
Amaury Denoyelledeed7772021-12-03 11:36:46 +010098 struct qcs *qcs;
Frédéric Lécailledfbae762021-02-18 09:59:01 +010099
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100100 TRACE_ENTER(QMUX_EV_QCS_NEW, qcc->conn);
101
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100102 qcs = pool_alloc(pool_head_qcs);
103 if (!qcs)
104 goto out;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100105
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100106 qcs->qcc = qcc;
107 qcs->cs = NULL;
108 qcs->flags = QC_SF_NONE;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100109
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100110 qcs->by_id.key = id;
111 eb64_insert(&qcc->streams_by_id, &qcs->by_id);
112 qcc->strms[type].nb_streams++;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100113
Amaury Denoyelle6ea78192022-03-07 15:47:02 +0100114 /* If stream is local, use peer remote-limit, or else the opposite. */
115 /* TODO use uni limit for unidirectional streams */
116 qcs->tx.msd = quic_stream_is_local(qcc, id) ? qcc->rfctl.msd_bidi_r :
117 qcc->rfctl.msd_bidi_l;
118
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100119 qcs->rx.buf = BUF_NULL;
Amaury Denoyelle9a327a72022-02-14 17:11:09 +0100120 qcs->rx.app_buf = BUF_NULL;
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100121 qcs->rx.offset = 0;
122 qcs->rx.frms = EB_ROOT_UNIQUE;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100123
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100124 qcs->tx.buf = BUF_NULL;
125 qcs->tx.xprt_buf = BUF_NULL;
126 qcs->tx.offset = 0;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100127 qcs->tx.sent_offset = 0;
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100128 qcs->tx.ack_offset = 0;
Frédéric Lécaille2ee5c8b2022-03-13 12:31:36 +0100129 qcs->tx.acked_frms = EB_ROOT;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100130
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100131 qcs->wait_event.tasklet = NULL;
132 qcs->wait_event.events = 0;
133 qcs->subs = NULL;
134
135 out:
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100136 TRACE_LEAVE(QMUX_EV_QCS_NEW, qcc->conn, qcs);
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100137 return qcs;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100138}
139
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100140/* Free a qcs. This function must only be used for unidirectional streams.
141 * Bidirectional streams are released by the upper layer through qc_detach().
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100142 */
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100143void uni_qcs_free(struct qcs *qcs)
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100144{
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100145 eb64_delete(&qcs->by_id);
146 pool_free(pool_head_qcs, qcs);
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100147}
148
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100149struct buffer *qc_get_buf(struct qcs *qcs, struct buffer *bptr)
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100150{
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100151 struct buffer *buf = b_alloc(bptr);
152 BUG_ON(!buf);
153 return buf;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100154}
155
Amaury Denoyellea3f222d2021-12-06 11:24:00 +0100156int qcs_subscribe(struct qcs *qcs, int event_type, struct wait_event *es)
157{
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100158 struct qcc *qcc = qcs->qcc;
159
160 TRACE_ENTER(QMUX_EV_STRM_SEND|QMUX_EV_STRM_RECV, qcc->conn, qcs);
Amaury Denoyellea3f222d2021-12-06 11:24:00 +0100161
162 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
163 BUG_ON(qcs->subs && qcs->subs != es);
164
165 es->events |= event_type;
166 qcs->subs = es;
167
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100168 if (event_type & SUB_RETRY_RECV)
169 TRACE_DEVEL("subscribe(recv)", QMUX_EV_STRM_RECV, qcc->conn, qcs);
170
171 if (event_type & SUB_RETRY_SEND)
172 TRACE_DEVEL("subscribe(send)", QMUX_EV_STRM_SEND, qcc->conn, qcs);
173
174 TRACE_LEAVE(QMUX_EV_STRM_SEND|QMUX_EV_STRM_RECV, qcc->conn, qcs);
175
Amaury Denoyellea3f222d2021-12-06 11:24:00 +0100176 return 0;
177}
178
179void qcs_notify_recv(struct qcs *qcs)
180{
181 if (qcs->subs && qcs->subs->events & SUB_RETRY_RECV) {
182 tasklet_wakeup(qcs->subs->tasklet);
183 qcs->subs->events &= ~SUB_RETRY_RECV;
184 if (!qcs->subs->events)
185 qcs->subs = NULL;
186 }
187}
188
189void qcs_notify_send(struct qcs *qcs)
190{
191 if (qcs->subs && qcs->subs->events & SUB_RETRY_SEND) {
192 tasklet_wakeup(qcs->subs->tasklet);
193 qcs->subs->events &= ~SUB_RETRY_SEND;
194 if (!qcs->subs->events)
195 qcs->subs = NULL;
196 }
197}
198
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100199/* Retrieve as an ebtree node the stream with <id> as ID, possibly allocates
200 * several streams, depending on the already open ones.
201 * Return this node if succeeded, NULL if not.
202 */
Amaury Denoyelle50742292022-03-29 14:57:19 +0200203struct qcs *qcc_get_qcs(struct qcc *qcc, uint64_t id)
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100204{
205 unsigned int strm_type;
206 int64_t sub_id;
207 struct eb64_node *strm_node;
Amaury Denoyelle50742292022-03-29 14:57:19 +0200208 struct qcs *qcs = NULL;
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100209
210 strm_type = id & QCS_ID_TYPE_MASK;
211 sub_id = id >> QCS_ID_TYPE_SHIFT;
212 strm_node = NULL;
Amaury Denoyelle0dc40f02022-02-07 11:44:17 +0100213 if (quic_stream_is_local(qcc, id)) {
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100214 /* Local streams: this stream must be already opened. */
215 strm_node = eb64_lookup(&qcc->streams_by_id, id);
216 if (!strm_node) {
217 /* unknown stream id */
218 goto out;
219 }
Amaury Denoyelle50742292022-03-29 14:57:19 +0200220 qcs = eb64_entry(strm_node, struct qcs, by_id);
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100221 }
222 else {
223 /* Remote streams. */
224 struct eb_root *strms;
225 uint64_t largest_id;
226 enum qcs_type qcs_type;
227
228 strms = &qcc->streams_by_id;
229 qcs_type = qcs_id_type(id);
Amaury Denoyellec055e302022-02-07 16:09:06 +0100230
231 /* TODO also checks max-streams for uni streams */
232 if (quic_stream_is_bidi(id)) {
Amaury Denoyelle78396e52022-03-21 17:13:32 +0100233 if (sub_id + 1 > qcc->lfctl.ms_bidi) {
Amaury Denoyellec055e302022-02-07 16:09:06 +0100234 /* streams limit reached */
235 goto out;
236 }
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100237 }
238
239 /* Note: ->largest_id was initialized with (uint64_t)-1 as value, 0 being a
240 * correct value.
241 */
242 largest_id = qcc->strms[qcs_type].largest_id;
243 if (sub_id > (int64_t)largest_id) {
244 /* RFC: "A stream ID that is used out of order results in all streams
245 * of that type with lower-numbered stream IDs also being opened".
246 * So, let's "open" these streams.
247 */
248 int64_t i;
Amaury Denoyelle50742292022-03-29 14:57:19 +0200249 struct qcs *tmp_qcs;
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100250
Amaury Denoyelle50742292022-03-29 14:57:19 +0200251 tmp_qcs = NULL;
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100252 for (i = largest_id + 1; i <= sub_id; i++) {
253 uint64_t id = (i << QCS_ID_TYPE_SHIFT) | strm_type;
254 enum qcs_type type = id & QCS_ID_DIR_BIT ? QCS_CLT_UNI : QCS_CLT_BIDI;
Amaury Denoyelle50742292022-03-29 14:57:19 +0200255 tmp_qcs = qcs_new(qcc, id, type);
256 if (!tmp_qcs) {
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100257 /* allocation failure */
258 goto out;
259 }
260
261 qcc->strms[qcs_type].largest_id = i;
262 }
Amaury Denoyelle50742292022-03-29 14:57:19 +0200263 if (tmp_qcs)
264 qcs = tmp_qcs;
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100265 }
266 else {
267 strm_node = eb64_lookup(strms, id);
Amaury Denoyelle50742292022-03-29 14:57:19 +0200268 if (strm_node)
269 qcs = eb64_entry(strm_node, struct qcs, by_id);
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100270 }
271 }
272
Amaury Denoyelle50742292022-03-29 14:57:19 +0200273 return qcs;
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100274
275 out:
276 return NULL;
277}
278
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100279/* Handle a new STREAM frame <strm_frm>. The frame content will be copied in
280 * the buffer of the stream instance. The stream instance will be stored in
281 * <out_qcs>. In case of success, the caller can immediatly call qcc_decode_qcs
282 * to process the frame content.
283 *
284 * Returns 0 on success. On errors, two codes are present.
285 * - 1 is returned if the frame cannot be decoded and must be discarded.
286 * - 2 is returned if the stream cannot decode at the moment the frame. The
287 * frame should be buffered to be handled later.
288 */
289int qcc_recv(struct qcc *qcc, uint64_t id, uint64_t len, uint64_t offset,
290 char fin, char *data, struct qcs **out_qcs)
291{
292 struct qcs *qcs;
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100293 size_t total, diff;
294
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100295 TRACE_ENTER(QMUX_EV_QCC_RECV, qcc->conn);
296
Amaury Denoyelle50742292022-03-29 14:57:19 +0200297 qcs = qcc_get_qcs(qcc, id);
298 if (!qcs) {
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100299 TRACE_DEVEL("leaving on stream not found", QMUX_EV_QCC_RECV|QMUX_EV_QCC_NQCS, qcc->conn, NULL, &id);
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100300 return 1;
301 }
302
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100303 *out_qcs = qcs;
304
305 if (offset > qcs->rx.offset)
306 return 2;
307
308 if (offset + len <= qcs->rx.offset) {
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100309 TRACE_DEVEL("leaving on already received offset", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100310 return 1;
311 }
312
313 /* Last frame already handled for this stream. */
314 BUG_ON(qcs->flags & QC_SF_FIN_RECV);
315
316 if (!qc_get_buf(qcs, &qcs->rx.buf)) {
317 /* TODO should mark qcs as full */
318 return 2;
319 }
320
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100321 TRACE_DEVEL("newly received offset", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100322 diff = qcs->rx.offset - offset;
323
324 /* TODO do not partially copy a frame if not enough size left. Maybe
325 * this can be optimized.
326 */
327 if (len > b_room(&qcs->rx.buf)) {
328 /* TODO handle STREAM frames larger than RX buffer. */
329 BUG_ON(len > b_size(&qcs->rx.buf));
330 return 2;
331 }
332
333 len -= diff;
334 data += diff;
335
336 total = b_putblk(&qcs->rx.buf, data, len);
337 /* TODO handle partial copy of a STREAM frame. */
338 BUG_ON(len != total);
339
340 qcs->rx.offset += total;
341
342 if (fin)
343 qcs->flags |= QC_SF_FIN_RECV;
344
345 out:
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100346 TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100347 return 0;
348}
349
Amaury Denoyelle1e5e5132022-03-08 16:23:03 +0100350/* Handle a new MAX_DATA frame. <max> must contains the maximum data field of
351 * the frame.
352 *
353 * Returns 0 on success else non-zero.
354 */
355int qcc_recv_max_data(struct qcc *qcc, uint64_t max)
356{
357 if (qcc->rfctl.md < max) {
358 qcc->rfctl.md = max;
359
360 if (qcc->flags & QC_CF_BLK_MFCTL) {
361 qcc->flags &= ~QC_CF_BLK_MFCTL;
362 tasklet_wakeup(qcc->wait_event.tasklet);
363 }
364 }
365 return 0;
366}
367
Amaury Denoyelle8727ff42022-03-08 10:39:55 +0100368/* Handle a new MAX_STREAM_DATA frame. <max> must contains the maximum data
369 * field of the frame and <id> is the identifier of the QUIC stream.
370 *
371 * Returns 0 on success else non-zero.
372 */
373int qcc_recv_max_stream_data(struct qcc *qcc, uint64_t id, uint64_t max)
374{
375 struct qcs *qcs;
376 struct eb64_node *node;
377
378 node = eb64_lookup(&qcc->streams_by_id, id);
379 if (node) {
380 qcs = eb64_entry(&node->node, struct qcs, by_id);
381 if (max > qcs->tx.msd) {
382 qcs->tx.msd = max;
383
384 if (qcs->flags & QC_SF_BLK_SFCTL) {
385 qcs->flags &= ~QC_SF_BLK_SFCTL;
386 tasklet_wakeup(qcc->wait_event.tasklet);
387 }
388 }
389 }
390
391 return 0;
392}
393
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100394/* Decode the content of STREAM frames already received on the stream instance
395 * <qcs>.
396 *
397 * Returns 0 on success else non-zero.
398 */
399int qcc_decode_qcs(struct qcc *qcc, struct qcs *qcs)
400{
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100401 TRACE_ENTER(QMUX_EV_QCS_RECV, qcc->conn, qcs);
402
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100403 if (qcc->app_ops->decode_qcs(qcs, qcs->flags & QC_SF_FIN_RECV, qcc->ctx) < 0) {
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100404 TRACE_DEVEL("leaving on decoding error", QMUX_EV_QCS_RECV, qcc->conn, qcs);
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100405 return 1;
406 }
407
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100408 TRACE_LEAVE(QMUX_EV_QCS_RECV, qcc->conn, qcs);
409
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100410 return 0;
411}
412
Amaury Denoyellec055e302022-02-07 16:09:06 +0100413static int qc_is_max_streams_needed(struct qcc *qcc)
414{
Amaury Denoyelle78396e52022-03-21 17:13:32 +0100415 return qcc->lfctl.cl_bidi_r > qcc->lfctl.ms_bidi_init / 2;
Amaury Denoyellec055e302022-02-07 16:09:06 +0100416}
417
Ilya Shipitsin5e87bcf2021-12-25 11:45:52 +0500418/* detaches the QUIC stream from its QCC and releases it to the QCS pool. */
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100419static void qcs_destroy(struct qcs *qcs)
420{
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100421 struct connection *conn = qcs->qcc->conn;
Amaury Denoyellec055e302022-02-07 16:09:06 +0100422 const uint64_t id = qcs->by_id.key;
423
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100424 TRACE_ENTER(QMUX_EV_QCS_END, conn, qcs);
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100425
Amaury Denoyellec055e302022-02-07 16:09:06 +0100426 if (quic_stream_is_remote(qcs->qcc, id)) {
427 if (quic_stream_is_bidi(id)) {
Amaury Denoyelle78396e52022-03-21 17:13:32 +0100428 ++qcs->qcc->lfctl.cl_bidi_r;
Amaury Denoyellec055e302022-02-07 16:09:06 +0100429 if (qc_is_max_streams_needed(qcs->qcc))
430 tasklet_wakeup(qcs->qcc->wait_event.tasklet);
431 }
432 }
433
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100434 eb64_delete(&qcs->by_id);
435
436 b_free(&qcs->rx.buf);
437 b_free(&qcs->tx.buf);
438 b_free(&qcs->tx.xprt_buf);
439
440 --qcs->qcc->strms[qcs_id_type(qcs->by_id.key)].nb_streams;
441
442 pool_free(pool_head_qcs, qcs);
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100443
444 TRACE_LEAVE(QMUX_EV_QCS_END, conn);
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100445}
446
447static inline int qcc_is_dead(const struct qcc *qcc)
448{
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100449 if (!qcc->strms[QCS_CLT_BIDI].nb_streams && !qcc->task)
450 return 1;
451
452 return 0;
453}
454
455/* Return true if the mux timeout should be armed. */
456static inline int qcc_may_expire(struct qcc *qcc)
457{
458
459 /* Consider that the timeout must be set if no bidirectional streams
460 * are opened.
461 */
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100462 if (!qcc->strms[QCS_CLT_BIDI].nb_streams)
463 return 1;
464
465 return 0;
466}
467
468/* release function. This one should be called to free all resources allocated
469 * to the mux.
470 */
471static void qc_release(struct qcc *qcc)
472{
473 struct connection *conn = NULL;
474
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100475 TRACE_ENTER(QMUX_EV_QCC_END);
476
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100477 if (qcc) {
478 /* The connection must be aattached to this mux to be released */
479 if (qcc->conn && qcc->conn->ctx == qcc)
480 conn = qcc->conn;
481
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100482 TRACE_DEVEL("freeing qcc", QMUX_EV_QCC_END, conn);
483
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100484 if (qcc->wait_event.tasklet)
485 tasklet_free(qcc->wait_event.tasklet);
486
487 pool_free(pool_head_qcc, qcc);
488 }
489
490 if (conn) {
Amaury Denoyelle0e0969d2022-01-31 15:41:14 +0100491 LIST_DEL_INIT(&conn->stopping_list);
492
Frédéric Lécaille19cd46e2022-01-10 11:40:33 +0100493 conn->qc->conn = NULL;
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100494 conn->mux = NULL;
495 conn->ctx = NULL;
496
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100497 TRACE_DEVEL("freeing conn", QMUX_EV_QCC_END, conn);
498
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100499 conn_stop_tracking(conn);
500 conn_full_close(conn);
501 if (conn->destroy_cb)
502 conn->destroy_cb(conn);
503 conn_free(conn);
504 }
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100505
506 TRACE_LEAVE(QMUX_EV_QCC_END);
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100507}
508
Amaury Denoyelle75d14ad2022-03-22 15:10:29 +0100509/* Prepare a STREAM frame for <qcs> instance. First, transfer data from
510 * <payload> to <out> buffer. The STREAM frame payload points to the <out>
511 * buffer. The frame is then pushed to <frm_list>. If <fin> is set, and the
512 * <payload> buf is emptied after transfer, FIN bit is set on the STREAM frame.
Amaury Denoyelle6ea78192022-03-07 15:47:02 +0100513 * Transfer is automatically adjusted to not exceed the stream flow-control
Amaury Denoyelle05ce55e2022-03-08 10:35:42 +0100514 * limit. <max_data> must contains the current sum offsets for the connection.
515 * This is useful to not exceed the connection flow-control limit when using
516 * repeatdly this function on multiple streams before passing the data to the
517 * lower layer.
Amaury Denoyelle75d14ad2022-03-22 15:10:29 +0100518 *
519 * Returns the total bytes of newly transferred data or a negative error code.
520 */
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100521static int qcs_push_frame(struct qcs *qcs, struct buffer *out,
522 struct buffer *payload, int fin,
Amaury Denoyelle05ce55e2022-03-08 10:35:42 +0100523 struct list *frm_list, uint64_t max_data)
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200524{
Amaury Denoyelle05ce55e2022-03-08 10:35:42 +0100525 struct qcc *qcc = qcs->qcc;
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200526 struct quic_frame *frm;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100527 int head, left, to_xfer;
528 int total = 0;
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200529
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100530 TRACE_ENTER(QMUX_EV_QCS_SEND, qcc->conn, qcs);
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100531
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100532 qc_get_buf(qcs, out);
533
534 /*
535 * QCS out buffer diagram
536 * head left to_xfer
537 * -------------> ----------> ----->
Amaury Denoyellee0320b82022-03-11 19:12:23 +0100538 * --------------------------------------------------
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100539 * |...............|xxxxxxxxxxx|<<<<<
Amaury Denoyellee0320b82022-03-11 19:12:23 +0100540 * --------------------------------------------------
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100541 * ^ ack-off ^ sent-off ^ off
542 *
543 * STREAM frame
544 * ^ ^
545 * |xxxxxxxxxxxxxxxxx|
546 */
547
548 BUG_ON_HOT(qcs->tx.sent_offset < qcs->tx.ack_offset);
549 BUG_ON_HOT(qcs->tx.offset < qcs->tx.sent_offset);
550
551 head = qcs->tx.sent_offset - qcs->tx.ack_offset;
552 left = qcs->tx.offset - qcs->tx.sent_offset;
553 to_xfer = QUIC_MIN(b_data(payload), b_room(out));
Amaury Denoyelle6ea78192022-03-07 15:47:02 +0100554
555 BUG_ON_HOT(qcs->tx.offset > qcs->tx.msd);
556 /* do not exceed flow control limit */
557 if (qcs->tx.offset + to_xfer > qcs->tx.msd)
558 to_xfer = qcs->tx.msd - qcs->tx.offset;
559
Amaury Denoyelle05ce55e2022-03-08 10:35:42 +0100560 BUG_ON_HOT(max_data > qcc->rfctl.md);
561 /* do not overcome flow control limit on connection */
562 if (max_data + to_xfer > qcc->rfctl.md)
563 to_xfer = qcc->rfctl.md - max_data;
564
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100565 if (!left && !to_xfer)
Frédéric Lécailled2ba0962021-09-20 17:50:03 +0200566 goto out;
567
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200568 frm = pool_zalloc(pool_head_quic_frame);
569 if (!frm)
570 goto err;
571
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100572 total = b_force_xfer(out, payload, to_xfer);
573
574 frm->type = QUIC_FT_STREAM_8;
575 frm->stream.qcs = (struct qcs *)qcs;
576 frm->stream.id = qcs->by_id.key;
577 frm->stream.buf = out;
578 frm->stream.data = (unsigned char *)b_peek(out, head);
579
Amaury Denoyellefecfa0d2021-12-07 16:50:14 +0100580 /* FIN is positioned only when the buffer has been totally emptied. */
Frédéric Lécailled2ba0962021-09-20 17:50:03 +0200581 fin = fin && !b_data(payload);
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200582 if (fin)
583 frm->type |= QUIC_STREAM_FRAME_TYPE_FIN_BIT;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100584
585 if (qcs->tx.sent_offset) {
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200586 frm->type |= QUIC_STREAM_FRAME_TYPE_OFF_BIT;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100587 frm->stream.offset.key = qcs->tx.sent_offset;
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200588 }
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100589
590 if (left + total) {
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200591 frm->type |= QUIC_STREAM_FRAME_TYPE_LEN_BIT;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100592 frm->stream.len = left + total;
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200593 }
594
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100595 LIST_APPEND(frm_list, &frm->list);
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100596
Frédéric Lécailled2ba0962021-09-20 17:50:03 +0200597 out:
Amaury Denoyellefdcec362022-03-25 09:28:10 +0100598 {
599 struct qcs_push_frm_trace_arg arg = {
600 .sent = b_data(out), .xfer = total, .fin = fin,
601 .offset = qcs->tx.sent_offset
602 };
603 TRACE_LEAVE(QMUX_EV_QCS_SEND|QMUX_EV_QCS_PUSH_FRM,
604 qcc->conn, qcs, &arg);
605 }
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100606
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200607 return total;
608
609 err:
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100610 TRACE_DEVEL("leaving in error", QMUX_EV_QCS_SEND, qcc->conn, qcs);
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200611 return -1;
612}
613
Amaury Denoyelle54445d02022-03-10 16:44:14 +0100614/* This function must be called by the upper layer to inform about the sending
615 * of a STREAM frame for <qcs> instance. The frame is of <data> length and on
616 * <offset>.
617 */
618void qcc_streams_sent_done(struct qcs *qcs, uint64_t data, uint64_t offset)
619{
Amaury Denoyelle05ce55e2022-03-08 10:35:42 +0100620 struct qcc *qcc = qcs->qcc;
621 uint64_t diff;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100622
623 BUG_ON(offset > qcs->tx.sent_offset);
624
Amaury Denoyelle54445d02022-03-10 16:44:14 +0100625 /* check if the STREAM frame has already been notified. It can happen
626 * for retransmission.
627 */
628 if (offset + data <= qcs->tx.sent_offset)
629 return;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100630
631 diff = offset + data - qcs->tx.sent_offset;
632
Amaury Denoyelle05ce55e2022-03-08 10:35:42 +0100633 /* increase offset sum on connection */
634 qcc->tx.sent_offsets += diff;
635 BUG_ON_HOT(qcc->tx.sent_offsets > qcc->rfctl.md);
636 if (qcc->tx.sent_offsets == qcc->rfctl.md)
637 qcc->flags |= QC_CF_BLK_MFCTL;
638
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100639 /* increase offset on stream */
640 qcs->tx.sent_offset += diff;
Amaury Denoyelle6ea78192022-03-07 15:47:02 +0100641 BUG_ON_HOT(qcs->tx.sent_offset > qcs->tx.msd);
642 if (qcs->tx.sent_offset == qcs->tx.msd)
643 qcs->flags |= QC_SF_BLK_SFCTL;
Amaury Denoyelle54445d02022-03-10 16:44:14 +0100644}
645
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100646/* Wrapper for send on transport layer. Send a list of frames <frms> for the
647 * connection <qcc>.
648 *
649 * Returns 0 if all data sent with success else non-zero.
650 */
651static int qc_send_frames(struct qcc *qcc, struct list *frms)
652{
Amaury Denoyelledb5d1a12022-03-10 16:42:23 +0100653 /* TODO implement an opportunistic retry mechanism. This is needed
654 * because qc_send_app_pkts is not completed. It will only prepare data
655 * up to its Tx buffer. The frames left are not send even if the Tx
656 * buffer is emptied by the sendto call.
657 *
658 * To overcome this, we call repeatedly qc_send_app_pkts until we
659 * detect that the transport layer has send nothing. This could happen
660 * on congestion or sendto syscall error.
661 *
662 * When qc_send_app_pkts is improved to handle retry by itself, we can
663 * remove the looping from the MUX.
664 */
665 struct quic_frame *first_frm;
666 uint64_t first_offset = 0;
667 char first_stream_frame_type;
Amaury Denoyellee9c4cc12022-03-04 15:29:53 +0100668
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100669 TRACE_ENTER(QMUX_EV_QCC_SEND, qcc->conn);
670
671 if (LIST_ISEMPTY(frms)) {
672 TRACE_DEVEL("leaving with no frames to send", QMUX_EV_QCC_SEND, qcc->conn);
Frédéric Lécaille4e22f282022-03-18 18:38:19 +0100673 return 0;
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100674 }
Frédéric Lécaille4e22f282022-03-18 18:38:19 +0100675
Amaury Denoyellee9c4cc12022-03-04 15:29:53 +0100676 retry_send:
Amaury Denoyelledb5d1a12022-03-10 16:42:23 +0100677 first_frm = LIST_ELEM(frms->n, struct quic_frame *, list);
678 if ((first_frm->type & QUIC_FT_STREAM_8) == QUIC_FT_STREAM_8) {
679 first_offset = first_frm->stream.offset.key;
680 first_stream_frame_type = 1;
681 }
682 else {
683 first_stream_frame_type = 0;
684 }
685
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100686 if (!LIST_ISEMPTY(frms))
687 qc_send_app_pkts(qcc->conn->qc, frms);
688
Amaury Denoyelledb5d1a12022-03-10 16:42:23 +0100689 /* If there is frames left, check if the transport layer has send some
690 * data or is blocked.
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100691 */
Amaury Denoyelledb5d1a12022-03-10 16:42:23 +0100692 if (!LIST_ISEMPTY(frms)) {
693 if (first_frm != LIST_ELEM(frms->n, struct quic_frame *, list))
694 goto retry_send;
695
696 /* If the first frame is STREAM, check if its offset has
697 * changed.
698 */
699 if (first_stream_frame_type &&
700 first_offset != LIST_ELEM(frms->n, struct quic_frame *, list)->stream.offset.key) {
701 goto retry_send;
702 }
Amaury Denoyellee9c4cc12022-03-04 15:29:53 +0100703 }
704
Amaury Denoyelledb5d1a12022-03-10 16:42:23 +0100705 /* If there is frames left at this stage, transport layer is blocked.
706 * Subscribe on it to retry later.
707 */
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100708 if (!LIST_ISEMPTY(frms)) {
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100709 TRACE_DEVEL("leaving with remaining frames to send, subscribing", QMUX_EV_QCC_SEND, qcc->conn);
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100710 qcc->conn->xprt->subscribe(qcc->conn, qcc->conn->xprt_ctx,
711 SUB_RETRY_SEND, &qcc->wait_event);
712 return 1;
713 }
714
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100715 TRACE_LEAVE(QMUX_EV_QCC_SEND);
716
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100717 return 0;
718}
719
Amaury Denoyelle75d14ad2022-03-22 15:10:29 +0100720/* Proceed to sending. Loop through all available streams for the <qcc>
721 * instance and try to send as much as possible.
722 *
723 * Returns the total of bytes sent to the transport layer.
724 */
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100725static int qc_send(struct qcc *qcc)
726{
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100727 struct list frms = LIST_HEAD_INIT(frms);
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200728 struct eb64_node *node;
Amaury Denoyelle75d14ad2022-03-22 15:10:29 +0100729 int total = 0;
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200730
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100731 TRACE_ENTER(QMUX_EV_QCC_SEND);
Frédéric Lécaille8526f142021-09-20 17:58:22 +0200732
Amaury Denoyelle05ce55e2022-03-08 10:35:42 +0100733 if (qcc->flags & QC_CF_BLK_MFCTL)
734 return 0;
735
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100736 /* loop through all streams, construct STREAM frames if data available.
737 * TODO optimize the loop to favor streams which are not too heavy.
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200738 */
739 node = eb64_first(&qcc->streams_by_id);
740 while (node) {
Amaury Denoyelled3d97c62021-10-05 11:45:58 +0200741 struct qcs *qcs = container_of(node, struct qcs, by_id);
742 struct buffer *buf = &qcs->tx.buf;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100743 struct buffer *out = &qcs->tx.xprt_buf;
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100744
Amaury Denoyellee2ec9422022-03-10 16:46:18 +0100745 /* TODO
746 * for the moment, unidirectional streams have their own
747 * mechanism for sending. This should be unified in the future,
748 * in this case the next check will be removed.
749 */
750 if (quic_stream_is_uni(qcs->by_id.key)) {
751 node = eb64_next(node);
752 continue;
753 }
754
Amaury Denoyelle6ea78192022-03-07 15:47:02 +0100755 if (qcs->flags & QC_SF_BLK_SFCTL) {
756 node = eb64_next(node);
757 continue;
758 }
759
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100760 if (b_data(buf) || b_data(out)) {
Amaury Denoyelle75d14ad2022-03-22 15:10:29 +0100761 int ret;
Amaury Denoyelle92960912022-03-24 18:23:29 +0100762 char fin = !!(qcs->flags & QC_SF_FIN_STREAM);
Amaury Denoyelle75d14ad2022-03-22 15:10:29 +0100763
Amaury Denoyelle05ce55e2022-03-08 10:35:42 +0100764 ret = qcs_push_frame(qcs, out, buf, fin, &frms,
765 qcc->tx.sent_offsets + total);
Amaury Denoyelle14551132022-03-04 16:51:20 +0100766 BUG_ON(ret < 0); /* TODO handle this properly */
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200767
Amaury Denoyellee257d9e2021-12-03 14:39:29 +0100768 if (ret > 0) {
Amaury Denoyellea3f222d2021-12-06 11:24:00 +0100769 qcs_notify_send(qcs);
Amaury Denoyelle84ea8dc2021-12-03 14:40:01 +0100770 if (qcs->flags & QC_SF_BLK_MROOM)
771 qcs->flags &= ~QC_SF_BLK_MROOM;
Amaury Denoyellee257d9e2021-12-03 14:39:29 +0100772 }
Amaury Denoyellea543eb12021-10-06 14:53:13 +0200773
Amaury Denoyelled3d97c62021-10-05 11:45:58 +0200774 qcs->tx.offset += ret;
Amaury Denoyelle75d14ad2022-03-22 15:10:29 +0100775 total += ret;
Amaury Denoyellea2c58a72021-12-03 14:38:31 +0100776
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100777 /* Subscribe if not all data can be send. */
Amaury Denoyellea2c58a72021-12-03 14:38:31 +0100778 if (b_data(buf)) {
779 qcc->conn->xprt->subscribe(qcc->conn, qcc->conn->xprt_ctx,
780 SUB_RETRY_SEND, &qcc->wait_event);
781 }
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200782 }
783 node = eb64_next(node);
784 }
Frédéric Lécaille8526f142021-09-20 17:58:22 +0200785
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100786 qc_send_frames(qcc, &frms);
Amaury Denoyellee257d9e2021-12-03 14:39:29 +0100787
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100788 TRACE_LEAVE(QMUX_EV_QCC_SEND);
789
Amaury Denoyelle75d14ad2022-03-22 15:10:29 +0100790 return total;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100791}
792
Amaury Denoyelle6a4aebf2022-02-01 10:16:05 +0100793/* Release all streams that are already marked as detached. This is only done
794 * if their TX buffers are empty or if a CONNECTION_CLOSE has been received.
795 *
796 * Return the number of released stream.
797 */
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100798static int qc_release_detached_streams(struct qcc *qcc)
799{
800 struct eb64_node *node;
801 int release = 0;
802
803 node = eb64_first(&qcc->streams_by_id);
804 while (node) {
805 struct qcs *qcs = container_of(node, struct qcs, by_id);
806 node = eb64_next(node);
807
808 if (qcs->flags & QC_SF_DETACH) {
Amaury Denoyelled9751482022-02-01 15:15:11 +0100809 if ((!b_data(&qcs->tx.buf) && !b_data(&qcs->tx.xprt_buf))) {
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100810 qcs_destroy(qcs);
811 release = 1;
812 }
813 else {
814 qcc->conn->xprt->subscribe(qcc->conn, qcc->conn->xprt_ctx,
815 SUB_RETRY_SEND, &qcc->wait_event);
816 }
817 }
818 }
819
820 return release;
821}
822
Amaury Denoyellec055e302022-02-07 16:09:06 +0100823/* Send a MAX_STREAM_BIDI frame to update the limit of bidirectional streams
824 * allowed to be opened by the peer. The caller should have first checked if
825 * this is required with qc_is_max_streams_needed.
826 *
827 * Returns 0 on success else non-zero.
828 */
829static int qc_send_max_streams(struct qcc *qcc)
830{
831 struct list frms = LIST_HEAD_INIT(frms);
832 struct quic_frame *frm;
833
834 frm = pool_zalloc(pool_head_quic_frame);
835 BUG_ON(!frm); /* TODO handle this properly */
836
837 frm->type = QUIC_FT_MAX_STREAMS_BIDI;
Amaury Denoyelle78396e52022-03-21 17:13:32 +0100838 frm->max_streams_bidi.max_streams = qcc->lfctl.ms_bidi +
839 qcc->lfctl.cl_bidi_r;
Amaury Denoyellefa29f332022-03-25 09:09:40 +0100840 TRACE_DEVEL("sending MAX_STREAMS frame", QMUX_EV_SEND_FRM, qcc->conn, NULL, frm);
Amaury Denoyellec055e302022-02-07 16:09:06 +0100841 LIST_APPEND(&frms, &frm->list);
842
843 if (qc_send_frames(qcc, &frms))
844 return 1;
845
846 /* save the new limit if the frame has been send. */
Amaury Denoyelle78396e52022-03-21 17:13:32 +0100847 qcc->lfctl.ms_bidi += qcc->lfctl.cl_bidi_r;
848 qcc->lfctl.cl_bidi_r = 0;
Amaury Denoyellec055e302022-02-07 16:09:06 +0100849
850 return 0;
851}
852
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100853static struct task *qc_io_cb(struct task *t, void *ctx, unsigned int status)
854{
Amaury Denoyelle769e9ff2021-10-05 11:43:50 +0200855 struct qcc *qcc = ctx;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100856
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100857 TRACE_ENTER(QMUX_EV_QCC_WAKE);
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100858
Amaury Denoyellec055e302022-02-07 16:09:06 +0100859 if (qc_is_max_streams_needed(qcc))
860 qc_send_max_streams(qcc);
861
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100862 qc_send(qcc);
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100863
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100864 if (qc_release_detached_streams(qcc)) {
Amaury Denoyelle1136e922022-02-01 10:33:09 +0100865 /* Schedule the mux timeout if no bidirectional streams left. */
866 if (qcc_may_expire(qcc)) {
867 qcc->task->expire = tick_add(now_ms, qcc->timeout);
868 task_queue(qcc->task);
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100869 }
870 }
871
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100872 TRACE_LEAVE(QMUX_EV_QCC_WAKE);
873
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100874 return NULL;
875}
876
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100877static struct task *qc_timeout_task(struct task *t, void *ctx, unsigned int state)
878{
879 struct qcc *qcc = ctx;
880 int expired = tick_is_expired(t->expire, now_ms);
881
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100882 TRACE_ENTER(QMUX_EV_QCC_WAKE, qcc ? qcc->conn : NULL);
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100883
884 if (qcc) {
885 if (!expired) {
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100886 TRACE_DEVEL("leaving (not expired)", QMUX_EV_QCC_WAKE, qcc->conn);
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100887 return t;
888 }
889
890 if (!qcc_may_expire(qcc)) {
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100891 TRACE_DEVEL("leaving (cannot expired)", QMUX_EV_QCC_WAKE, qcc->conn);
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100892 t->expire = TICK_ETERNITY;
893 return t;
894 }
895 }
896
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100897 task_destroy(t);
Amaury Denoyelleea3e0352022-02-21 10:05:16 +0100898
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100899 if (!qcc) {
900 TRACE_DEVEL("leaving (not more qcc)", QMUX_EV_QCC_WAKE);
Amaury Denoyelleea3e0352022-02-21 10:05:16 +0100901 return NULL;
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100902 }
Amaury Denoyelleea3e0352022-02-21 10:05:16 +0100903
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100904 qcc->task = NULL;
905
906 if (qcc_is_dead(qcc))
907 qc_release(qcc);
908
Amaury Denoyelle4f137572022-03-24 17:10:00 +0100909 TRACE_LEAVE(QMUX_EV_QCC_WAKE);
910
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100911 return NULL;
912}
913
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100914static int qc_init(struct connection *conn, struct proxy *prx,
915 struct session *sess, struct buffer *input)
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100916{
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100917 struct qcc *qcc;
Amaury Denoyelle6ea78192022-03-07 15:47:02 +0100918 struct quic_transport_params *lparams, *rparams;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100919
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100920 qcc = pool_alloc(pool_head_qcc);
921 if (!qcc)
922 goto fail_no_qcc;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100923
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100924 qcc->conn = conn;
925 conn->ctx = qcc;
Amaury Denoyellece1f30d2022-02-01 15:14:24 +0100926 qcc->flags = 0;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100927
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100928 qcc->app_ops = NULL;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100929
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100930 qcc->streams_by_id = EB_ROOT_UNIQUE;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100931
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100932 /* Server parameters, params used for RX flow control. */
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100933 lparams = &conn->qc->rx.params;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100934
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100935 qcc->rx.max_data = lparams->initial_max_data;
Amaury Denoyelle05ce55e2022-03-08 10:35:42 +0100936 qcc->tx.sent_offsets = 0;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100937
938 /* Client initiated streams must respect the server flow control. */
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100939 qcc->strms[QCS_CLT_BIDI].max_streams = lparams->initial_max_streams_bidi;
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100940 qcc->strms[QCS_CLT_BIDI].nb_streams = 0;
941 qcc->strms[QCS_CLT_BIDI].largest_id = -1;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100942 qcc->strms[QCS_CLT_BIDI].rx.max_data = 0;
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100943 qcc->strms[QCS_CLT_BIDI].tx.max_data = lparams->initial_max_stream_data_bidi_remote;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100944
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100945 qcc->strms[QCS_CLT_UNI].max_streams = lparams->initial_max_streams_uni;
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100946 qcc->strms[QCS_CLT_UNI].nb_streams = 0;
947 qcc->strms[QCS_CLT_UNI].largest_id = -1;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100948 qcc->strms[QCS_CLT_UNI].rx.max_data = 0;
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100949 qcc->strms[QCS_CLT_UNI].tx.max_data = lparams->initial_max_stream_data_uni;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100950
951 /* Server initiated streams must respect the server flow control. */
952 qcc->strms[QCS_SRV_BIDI].max_streams = 0;
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100953 qcc->strms[QCS_SRV_BIDI].nb_streams = 0;
954 qcc->strms[QCS_SRV_BIDI].largest_id = -1;
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100955 qcc->strms[QCS_SRV_BIDI].rx.max_data = lparams->initial_max_stream_data_bidi_local;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100956 qcc->strms[QCS_SRV_BIDI].tx.max_data = 0;
957
958 qcc->strms[QCS_SRV_UNI].max_streams = 0;
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100959 qcc->strms[QCS_SRV_UNI].nb_streams = 0;
960 qcc->strms[QCS_SRV_UNI].largest_id = -1;
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100961 qcc->strms[QCS_SRV_UNI].rx.max_data = lparams->initial_max_stream_data_uni;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100962 qcc->strms[QCS_SRV_UNI].tx.max_data = 0;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100963
Amaury Denoyelle78396e52022-03-21 17:13:32 +0100964 qcc->lfctl.ms_bidi = qcc->lfctl.ms_bidi_init = lparams->initial_max_streams_bidi;
965 qcc->lfctl.cl_bidi_r = 0;
Amaury Denoyellec055e302022-02-07 16:09:06 +0100966
Amaury Denoyelle6ea78192022-03-07 15:47:02 +0100967 rparams = &conn->qc->tx.params;
Amaury Denoyelle05ce55e2022-03-08 10:35:42 +0100968 qcc->rfctl.md = rparams->initial_max_data;
Amaury Denoyelle6ea78192022-03-07 15:47:02 +0100969 qcc->rfctl.msd_bidi_l = rparams->initial_max_stream_data_bidi_local;
970 qcc->rfctl.msd_bidi_r = rparams->initial_max_stream_data_bidi_remote;
971
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100972 qcc->wait_event.tasklet = tasklet_new();
973 if (!qcc->wait_event.tasklet)
974 goto fail_no_tasklet;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100975
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100976 qcc->subs = NULL;
977 qcc->wait_event.tasklet->process = qc_io_cb;
978 qcc->wait_event.tasklet->context = qcc;
Frédéric Lécaillef27b66f2022-03-18 22:49:22 +0100979 qcc->wait_event.events = 0;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100980
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100981 /* haproxy timeouts */
982 qcc->timeout = prx->timeout.client;
983 qcc->task = task_new_here();
984 if (!qcc->task)
985 goto fail_no_timeout_task;
986 qcc->task->process = qc_timeout_task;
987 qcc->task->context = qcc;
988 qcc->task->expire = tick_add(now_ms, qcc->timeout);
989
Amaury Denoyelle0e0969d2022-01-31 15:41:14 +0100990 if (!conn_is_back(conn)) {
991 if (!LIST_INLIST(&conn->stopping_list)) {
992 LIST_APPEND(&mux_stopping_data[tid].list,
993 &conn->stopping_list);
994 }
995 }
996
Frédéric Lécailleb80b20c2022-01-12 17:46:56 +0100997 HA_ATOMIC_STORE(&conn->qc->qcc, qcc);
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100998 /* init read cycle */
999 tasklet_wakeup(qcc->wait_event.tasklet);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001000
Amaury Denoyelledeed7772021-12-03 11:36:46 +01001001 return 0;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001002
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +01001003 fail_no_timeout_task:
1004 tasklet_free(qcc->wait_event.tasklet);
Amaury Denoyelledeed7772021-12-03 11:36:46 +01001005 fail_no_tasklet:
1006 pool_free(pool_head_qcc, qcc);
1007 fail_no_qcc:
1008 return -1;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001009}
1010
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001011static void qc_detach(struct conn_stream *cs)
1012{
Amaury Denoyelle916f0ac2021-12-06 16:03:47 +01001013 struct qcs *qcs = cs->ctx;
1014 struct qcc *qcc = qcs->qcc;
1015
Amaury Denoyelle4f137572022-03-24 17:10:00 +01001016 TRACE_ENTER(QMUX_EV_STRM_END, qcc->conn, qcs);
Amaury Denoyelle916f0ac2021-12-06 16:03:47 +01001017
Amaury Denoyelled9751482022-02-01 15:15:11 +01001018 /* TODO on CONNECTION_CLOSE reception, it should be possible to free
1019 * qcs instances. This should be done once the buffering and ACK
1020 * managment between xprt and mux is reorganized.
1021 */
1022
1023 if ((b_data(&qcs->tx.buf) || b_data(&qcs->tx.xprt_buf))) {
Amaury Denoyelle4f137572022-03-24 17:10:00 +01001024 TRACE_DEVEL("leaving with remaining data, detaching qcs", QMUX_EV_STRM_END, qcc->conn, qcs);
Amaury Denoyelle2873a312021-12-08 14:42:55 +01001025 qcs->flags |= QC_SF_DETACH;
1026 return;
1027 }
1028
Amaury Denoyelle916f0ac2021-12-06 16:03:47 +01001029 qcs_destroy(qcs);
Amaury Denoyelle1136e922022-02-01 10:33:09 +01001030
1031 /* Schedule the mux timeout if no bidirectional streams left. */
1032 if (qcc_may_expire(qcc)) {
1033 qcc->task->expire = tick_add(now_ms, qcc->timeout);
1034 task_queue(qcc->task);
Amaury Denoyelle916f0ac2021-12-06 16:03:47 +01001035 }
Amaury Denoyelle4f137572022-03-24 17:10:00 +01001036
1037 TRACE_LEAVE(QMUX_EV_STRM_END);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001038}
1039
Amaury Denoyelledeed7772021-12-03 11:36:46 +01001040/* Called from the upper layer, to receive data */
1041static size_t qc_rcv_buf(struct conn_stream *cs, struct buffer *buf,
1042 size_t count, int flags)
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001043{
Amaury Denoyelle9a327a72022-02-14 17:11:09 +01001044 struct qcs *qcs = cs->ctx;
1045 struct htx *qcs_htx = NULL;
1046 struct htx *cs_htx = NULL;
1047 size_t ret = 0;
Amaury Denoyelleeb53e5b2022-02-14 17:11:32 +01001048 char fin = 0;
Amaury Denoyelle9a327a72022-02-14 17:11:09 +01001049
Amaury Denoyelle4f137572022-03-24 17:10:00 +01001050 TRACE_ENTER(QMUX_EV_STRM_RECV, qcs->qcc->conn, qcs);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001051
Amaury Denoyelle9a327a72022-02-14 17:11:09 +01001052 qcs_htx = htx_from_buf(&qcs->rx.app_buf);
1053 if (htx_is_empty(qcs_htx)) {
1054 /* Set buffer data to 0 as HTX is empty. */
1055 htx_to_buf(qcs_htx, &qcs->rx.app_buf);
1056 goto end;
1057 }
1058
1059 ret = qcs_htx->data;
1060
1061 cs_htx = htx_from_buf(buf);
1062 if (htx_is_empty(cs_htx) && htx_used_space(qcs_htx) <= count) {
1063 htx_to_buf(cs_htx, buf);
1064 htx_to_buf(qcs_htx, &qcs->rx.app_buf);
1065 b_xfer(buf, &qcs->rx.app_buf, b_data(&qcs->rx.app_buf));
1066 goto end;
1067 }
1068
1069 htx_xfer_blks(cs_htx, qcs_htx, count, HTX_BLK_UNUSED);
1070 BUG_ON(qcs_htx->flags & HTX_FL_PARSING_ERROR);
1071
1072 /* Copy EOM from src to dst buffer if all data copied. */
Amaury Denoyelleeb53e5b2022-02-14 17:11:32 +01001073 if (htx_is_empty(qcs_htx) && (qcs_htx->flags & HTX_FL_EOM)) {
1074 cs_htx->flags |= HTX_FL_EOM;
1075 fin = 1;
1076 }
Amaury Denoyelle9a327a72022-02-14 17:11:09 +01001077
1078 cs_htx->extra = qcs_htx->extra ? (qcs_htx->data + qcs_htx->extra) : 0;
1079 htx_to_buf(cs_htx, buf);
1080 htx_to_buf(qcs_htx, &qcs->rx.app_buf);
1081 ret -= qcs_htx->data;
1082
1083 end:
1084 if (b_data(&qcs->rx.app_buf)) {
1085 cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
1086 }
1087 else {
1088 cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
1089 if (cs->flags & CS_FL_ERR_PENDING)
1090 cs->flags |= CS_FL_ERROR;
1091
Amaury Denoyelleeb53e5b2022-02-14 17:11:32 +01001092 if (fin)
1093 cs->flags |= (CS_FL_EOI|CS_FL_EOS);
Amaury Denoyelle9a327a72022-02-14 17:11:09 +01001094
1095 if (b_size(&qcs->rx.app_buf)) {
1096 b_free(&qcs->rx.app_buf);
1097 offer_buffers(NULL, 1);
1098 }
1099 }
1100
1101 if (ret)
1102 tasklet_wakeup(qcs->qcc->wait_event.tasklet);
1103
Amaury Denoyelle4f137572022-03-24 17:10:00 +01001104 TRACE_LEAVE(QMUX_EV_STRM_RECV, qcs->qcc->conn, qcs);
1105
Amaury Denoyelle9a327a72022-02-14 17:11:09 +01001106 return ret;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001107}
1108
Amaury Denoyelledeed7772021-12-03 11:36:46 +01001109static size_t qc_snd_buf(struct conn_stream *cs, struct buffer *buf,
1110 size_t count, int flags)
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001111{
1112 struct qcs *qcs = cs->ctx;
Amaury Denoyelle4f137572022-03-24 17:10:00 +01001113 size_t ret;
1114
1115 TRACE_ENTER(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001116
Amaury Denoyelle4f137572022-03-24 17:10:00 +01001117 ret = qcs->qcc->app_ops->snd_buf(cs, buf, count, flags);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001118
Amaury Denoyelle4f137572022-03-24 17:10:00 +01001119 TRACE_LEAVE(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
1120
1121 return ret;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001122}
1123
1124/* Called from the upper layer, to subscribe <es> to events <event_type>. The
1125 * event subscriber <es> is not allowed to change from a previous call as long
1126 * as at least one event is still subscribed. The <event_type> must only be a
1127 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
1128 */
Amaury Denoyelledeed7772021-12-03 11:36:46 +01001129static int qc_subscribe(struct conn_stream *cs, int event_type,
1130 struct wait_event *es)
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001131{
Amaury Denoyellea3f222d2021-12-06 11:24:00 +01001132 return qcs_subscribe(cs->ctx, event_type, es);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001133}
1134
1135/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
1136 * The <es> pointer is not allowed to differ from the one passed to the
1137 * subscribe() call. It always returns zero.
1138 */
1139static int qc_unsubscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
1140{
1141 struct qcs *qcs = cs->ctx;
1142
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001143 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
1144 BUG_ON(qcs->subs && qcs->subs != es);
1145
1146 es->events &= ~event_type;
1147 if (!es->events)
1148 qcs->subs = NULL;
1149
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001150 return 0;
1151}
1152
Amaury Denoyelle0e0969d2022-01-31 15:41:14 +01001153static int qc_wake(struct connection *conn)
1154{
1155 struct qcc *qcc = conn->ctx;
1156
1157 /* Check if a soft-stop is in progress.
1158 * Release idling front connection if this is the case.
1159 */
1160 if (unlikely(conn->qc->li->bind_conf->frontend->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
1161 qc_release(qcc);
1162 }
1163
1164 return 1;
1165}
1166
Amaury Denoyelledd4fbfb2022-03-24 16:09:16 +01001167
Amaury Denoyellefa29f332022-03-25 09:09:40 +01001168static void qmux_trace_frm(const struct quic_frame *frm)
1169{
1170 switch (frm->type) {
1171 case QUIC_FT_MAX_STREAMS_BIDI:
1172 chunk_appendf(&trace_buf, " max_streams=%lu",
1173 frm->max_streams_bidi.max_streams);
1174 break;
1175
1176 case QUIC_FT_MAX_STREAMS_UNI:
1177 chunk_appendf(&trace_buf, " max_streams=%lu",
1178 frm->max_streams_uni.max_streams);
1179 break;
1180
1181 default:
1182 break;
1183 }
1184}
1185
Amaury Denoyelledd4fbfb2022-03-24 16:09:16 +01001186/* quic-mux trace handler */
1187static void qmux_trace(enum trace_level level, uint64_t mask,
1188 const struct trace_source *src,
1189 const struct ist where, const struct ist func,
1190 const void *a1, const void *a2, const void *a3, const void *a4)
1191{
1192 const struct connection *conn = a1;
1193 const struct qcc *qcc = conn ? conn->ctx : NULL;
1194 const struct qcs *qcs = a2;
1195
1196 if (!qcc)
1197 return;
1198
1199 if (src->verbosity > QMUX_VERB_CLEAN) {
1200 chunk_appendf(&trace_buf, " : qcc=%p(F)", qcc);
1201
1202 if (qcs)
1203 chunk_appendf(&trace_buf, " qcs=%p(%llu)", qcs, qcs->by_id.key);
Amaury Denoyelle4f137572022-03-24 17:10:00 +01001204
1205 if (mask & QMUX_EV_QCC_NQCS) {
1206 const uint64_t *id = a3;
1207 chunk_appendf(&trace_buf, " id=%lu", *id);
1208 }
Amaury Denoyellefa29f332022-03-25 09:09:40 +01001209
1210 if (mask & QMUX_EV_SEND_FRM)
1211 qmux_trace_frm(a3);
Amaury Denoyellefdcec362022-03-25 09:28:10 +01001212
1213 if (mask & QMUX_EV_QCS_PUSH_FRM) {
1214 const struct qcs_push_frm_trace_arg *arg = a3;
1215 chunk_appendf(&trace_buf, " sent=%lu xfer=%d fin=%d offset=%lu",
1216 arg->sent, arg->xfer, arg->fin, arg->offset);
1217 }
Amaury Denoyelledd4fbfb2022-03-24 16:09:16 +01001218 }
1219}
1220
Amaury Denoyelle251eadf2022-03-24 17:14:52 +01001221/* Function to automatically activate QUIC MUX traces on stdout.
1222 * Activated via the compilation flag -DENABLE_QUIC_STDOUT_TRACES.
1223 * Main use for now is in the docker image for QUIC interop testing.
1224 */
1225static void qmux_init_stdout_traces(void)
1226{
1227#ifdef ENABLE_QUIC_STDOUT_TRACES
1228 trace_qmux.sink = sink_find("stdout");
1229 trace_qmux.level = TRACE_LEVEL_DEVELOPER;
1230 trace_qmux.state = TRACE_STATE_RUNNING;
1231 trace_qmux.verbosity = QMUX_VERB_MINIMAL;
1232#endif
1233}
1234INITCALL0(STG_INIT, qmux_init_stdout_traces);
1235
Amaury Denoyelledd4fbfb2022-03-24 16:09:16 +01001236
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001237static const struct mux_ops qc_ops = {
1238 .init = qc_init,
Amaury Denoyelledeed7772021-12-03 11:36:46 +01001239 .detach = qc_detach,
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001240 .rcv_buf = qc_rcv_buf,
Amaury Denoyelledeed7772021-12-03 11:36:46 +01001241 .snd_buf = qc_snd_buf,
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001242 .subscribe = qc_subscribe,
1243 .unsubscribe = qc_unsubscribe,
Amaury Denoyelle0e0969d2022-01-31 15:41:14 +01001244 .wake = qc_wake,
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001245};
1246
1247static struct mux_proto_list mux_proto_quic =
Amaury Denoyelledeed7772021-12-03 11:36:46 +01001248 { .token = IST("quic"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_FE, .mux = &qc_ops };
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001249
1250INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_quic);