blob: e1937d65c99df948448ce578c146ee899a1c16ce [file] [log] [blame]
Amaury Denoyelledeed7772021-12-03 11:36:46 +01001#include <haproxy/mux_quic.h>
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002
Amaury Denoyelleeb01f592021-10-07 16:44:05 +02003#include <import/eb64tree.h>
4
Frédéric Lécailledfbae762021-02-18 09:59:01 +01005#include <haproxy/api.h>
Frédéric Lécailledfbae762021-02-18 09:59:01 +01006#include <haproxy/connection.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +01007#include <haproxy/conn_stream.h>
Amaury Denoyelledeed7772021-12-03 11:36:46 +01008#include <haproxy/dynbuf.h>
Amaury Denoyelle9a327a72022-02-14 17:11:09 +01009#include <haproxy/htx.h>
Amaury Denoyelledeed7772021-12-03 11:36:46 +010010#include <haproxy/pool.h>
Amaury Denoyelleeb01f592021-10-07 16:44:05 +020011#include <haproxy/ssl_sock-t.h>
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +010012#include <haproxy/xprt_quic.h>
Frédéric Lécailledfbae762021-02-18 09:59:01 +010013
Amaury Denoyelledeed7772021-12-03 11:36:46 +010014DECLARE_POOL(pool_head_qcc, "qcc", sizeof(struct qcc));
Frédéric Lécailledfbae762021-02-18 09:59:01 +010015DECLARE_POOL(pool_head_qcs, "qcs", sizeof(struct qcs));
16
Amaury Denoyelledeed7772021-12-03 11:36:46 +010017/* Allocate a new QUIC streams with id <id> and type <type>. */
18struct qcs *qcs_new(struct qcc *qcc, uint64_t id, enum qcs_type type)
Frédéric Lécailledfbae762021-02-18 09:59:01 +010019{
Amaury Denoyelledeed7772021-12-03 11:36:46 +010020 struct qcs *qcs;
Frédéric Lécailledfbae762021-02-18 09:59:01 +010021
Amaury Denoyelledeed7772021-12-03 11:36:46 +010022 qcs = pool_alloc(pool_head_qcs);
23 if (!qcs)
24 goto out;
Frédéric Lécailledfbae762021-02-18 09:59:01 +010025
Amaury Denoyellefdbf63e2021-12-16 15:22:30 +010026 fprintf(stderr, "%s: stream ID %lu\n", __func__, id);
Frédéric Lécailledfbae762021-02-18 09:59:01 +010027
Amaury Denoyelledeed7772021-12-03 11:36:46 +010028 qcs->qcc = qcc;
29 qcs->cs = NULL;
30 qcs->flags = QC_SF_NONE;
Frédéric Lécailledfbae762021-02-18 09:59:01 +010031
Amaury Denoyelledeed7772021-12-03 11:36:46 +010032 qcs->by_id.key = id;
33 eb64_insert(&qcc->streams_by_id, &qcs->by_id);
34 qcc->strms[type].nb_streams++;
Frédéric Lécailledfbae762021-02-18 09:59:01 +010035
Amaury Denoyelledeed7772021-12-03 11:36:46 +010036 qcs->rx.buf = BUF_NULL;
Amaury Denoyelle9a327a72022-02-14 17:11:09 +010037 qcs->rx.app_buf = BUF_NULL;
Amaury Denoyelledeed7772021-12-03 11:36:46 +010038 qcs->rx.offset = 0;
39 qcs->rx.frms = EB_ROOT_UNIQUE;
Frédéric Lécailledfbae762021-02-18 09:59:01 +010040
Amaury Denoyelledeed7772021-12-03 11:36:46 +010041 qcs->tx.buf = BUF_NULL;
42 qcs->tx.xprt_buf = BUF_NULL;
43 qcs->tx.offset = 0;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +010044 qcs->tx.sent_offset = 0;
Amaury Denoyelledeed7772021-12-03 11:36:46 +010045 qcs->tx.ack_offset = 0;
Frédéric Lécaille2ee5c8b2022-03-13 12:31:36 +010046 qcs->tx.acked_frms = EB_ROOT;
Frédéric Lécailledfbae762021-02-18 09:59:01 +010047
Amaury Denoyelledeed7772021-12-03 11:36:46 +010048 qcs->wait_event.tasklet = NULL;
49 qcs->wait_event.events = 0;
50 qcs->subs = NULL;
51
52 out:
53 return qcs;
Frédéric Lécailledfbae762021-02-18 09:59:01 +010054}
55
Amaury Denoyelledeed7772021-12-03 11:36:46 +010056/* Free a qcs. This function must only be used for unidirectional streams.
57 * Bidirectional streams are released by the upper layer through qc_detach().
Frédéric Lécailledfbae762021-02-18 09:59:01 +010058 */
Amaury Denoyelledeed7772021-12-03 11:36:46 +010059void uni_qcs_free(struct qcs *qcs)
Frédéric Lécailledfbae762021-02-18 09:59:01 +010060{
Amaury Denoyelledeed7772021-12-03 11:36:46 +010061 eb64_delete(&qcs->by_id);
62 pool_free(pool_head_qcs, qcs);
Frédéric Lécailledfbae762021-02-18 09:59:01 +010063}
64
Amaury Denoyelledeed7772021-12-03 11:36:46 +010065struct buffer *qc_get_buf(struct qcs *qcs, struct buffer *bptr)
Frédéric Lécailledfbae762021-02-18 09:59:01 +010066{
Amaury Denoyelledeed7772021-12-03 11:36:46 +010067 struct buffer *buf = b_alloc(bptr);
68 BUG_ON(!buf);
69 return buf;
Frédéric Lécailledfbae762021-02-18 09:59:01 +010070}
71
Amaury Denoyellea3f222d2021-12-06 11:24:00 +010072int qcs_subscribe(struct qcs *qcs, int event_type, struct wait_event *es)
73{
74 fprintf(stderr, "%s\n", __func__);
75
76 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
77 BUG_ON(qcs->subs && qcs->subs != es);
78
79 es->events |= event_type;
80 qcs->subs = es;
81
82 return 0;
83}
84
85void qcs_notify_recv(struct qcs *qcs)
86{
87 if (qcs->subs && qcs->subs->events & SUB_RETRY_RECV) {
88 tasklet_wakeup(qcs->subs->tasklet);
89 qcs->subs->events &= ~SUB_RETRY_RECV;
90 if (!qcs->subs->events)
91 qcs->subs = NULL;
92 }
93}
94
95void qcs_notify_send(struct qcs *qcs)
96{
97 if (qcs->subs && qcs->subs->events & SUB_RETRY_SEND) {
98 tasklet_wakeup(qcs->subs->tasklet);
99 qcs->subs->events &= ~SUB_RETRY_SEND;
100 if (!qcs->subs->events)
101 qcs->subs = NULL;
102 }
103}
104
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100105/* Retrieve as an ebtree node the stream with <id> as ID, possibly allocates
106 * several streams, depending on the already open ones.
107 * Return this node if succeeded, NULL if not.
108 */
109struct eb64_node *qcc_get_qcs(struct qcc *qcc, uint64_t id)
110{
111 unsigned int strm_type;
112 int64_t sub_id;
113 struct eb64_node *strm_node;
114
115 strm_type = id & QCS_ID_TYPE_MASK;
116 sub_id = id >> QCS_ID_TYPE_SHIFT;
117 strm_node = NULL;
Amaury Denoyelle0dc40f02022-02-07 11:44:17 +0100118 if (quic_stream_is_local(qcc, id)) {
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100119 /* Local streams: this stream must be already opened. */
120 strm_node = eb64_lookup(&qcc->streams_by_id, id);
121 if (!strm_node) {
122 /* unknown stream id */
123 goto out;
124 }
125 }
126 else {
127 /* Remote streams. */
128 struct eb_root *strms;
129 uint64_t largest_id;
130 enum qcs_type qcs_type;
131
132 strms = &qcc->streams_by_id;
133 qcs_type = qcs_id_type(id);
Amaury Denoyellec055e302022-02-07 16:09:06 +0100134
135 /* TODO also checks max-streams for uni streams */
136 if (quic_stream_is_bidi(id)) {
137 if (sub_id + 1 > qcc->lfctl.max_bidi_streams) {
138 /* streams limit reached */
139 goto out;
140 }
Amaury Denoyelle8a5b27a2021-12-21 11:53:10 +0100141 }
142
143 /* Note: ->largest_id was initialized with (uint64_t)-1 as value, 0 being a
144 * correct value.
145 */
146 largest_id = qcc->strms[qcs_type].largest_id;
147 if (sub_id > (int64_t)largest_id) {
148 /* RFC: "A stream ID that is used out of order results in all streams
149 * of that type with lower-numbered stream IDs also being opened".
150 * So, let's "open" these streams.
151 */
152 int64_t i;
153 struct qcs *qcs;
154
155 qcs = NULL;
156 for (i = largest_id + 1; i <= sub_id; i++) {
157 uint64_t id = (i << QCS_ID_TYPE_SHIFT) | strm_type;
158 enum qcs_type type = id & QCS_ID_DIR_BIT ? QCS_CLT_UNI : QCS_CLT_BIDI;
159 qcs = qcs_new(qcc, id, type);
160 if (!qcs) {
161 /* allocation failure */
162 goto out;
163 }
164
165 qcc->strms[qcs_type].largest_id = i;
166 }
167 if (qcs)
168 strm_node = &qcs->by_id;
169 }
170 else {
171 strm_node = eb64_lookup(strms, id);
172 }
173 }
174
175 return strm_node;
176
177 out:
178 return NULL;
179}
180
Amaury Denoyelle0e3010b2022-02-28 11:37:48 +0100181/* Handle a new STREAM frame <strm_frm>. The frame content will be copied in
182 * the buffer of the stream instance. The stream instance will be stored in
183 * <out_qcs>. In case of success, the caller can immediatly call qcc_decode_qcs
184 * to process the frame content.
185 *
186 * Returns 0 on success. On errors, two codes are present.
187 * - 1 is returned if the frame cannot be decoded and must be discarded.
188 * - 2 is returned if the stream cannot decode at the moment the frame. The
189 * frame should be buffered to be handled later.
190 */
191int qcc_recv(struct qcc *qcc, uint64_t id, uint64_t len, uint64_t offset,
192 char fin, char *data, struct qcs **out_qcs)
193{
194 struct qcs *qcs;
195 struct eb64_node *strm_node;
196 size_t total, diff;
197
198 strm_node = qcc_get_qcs(qcc, id);
199 if (!strm_node) {
200 fprintf(stderr, "%s: stream not found\n", __func__);
201 return 1;
202 }
203
204 qcs = eb64_entry(&strm_node->node, struct qcs, by_id);
205 *out_qcs = qcs;
206
207 if (offset > qcs->rx.offset)
208 return 2;
209
210 if (offset + len <= qcs->rx.offset) {
211 fprintf(stderr, "%s: already received STREAM data\n", __func__);
212 return 1;
213 }
214
215 /* Last frame already handled for this stream. */
216 BUG_ON(qcs->flags & QC_SF_FIN_RECV);
217
218 if (!qc_get_buf(qcs, &qcs->rx.buf)) {
219 /* TODO should mark qcs as full */
220 return 2;
221 }
222
223 fprintf(stderr, "%s: new STREAM data\n", __func__);
224 diff = qcs->rx.offset - offset;
225
226 /* TODO do not partially copy a frame if not enough size left. Maybe
227 * this can be optimized.
228 */
229 if (len > b_room(&qcs->rx.buf)) {
230 /* TODO handle STREAM frames larger than RX buffer. */
231 BUG_ON(len > b_size(&qcs->rx.buf));
232 return 2;
233 }
234
235 len -= diff;
236 data += diff;
237
238 total = b_putblk(&qcs->rx.buf, data, len);
239 /* TODO handle partial copy of a STREAM frame. */
240 BUG_ON(len != total);
241
242 qcs->rx.offset += total;
243
244 if (fin)
245 qcs->flags |= QC_SF_FIN_RECV;
246
247 out:
248 return 0;
249}
250
251/* Decode the content of STREAM frames already received on the stream instance
252 * <qcs>.
253 *
254 * Returns 0 on success else non-zero.
255 */
256int qcc_decode_qcs(struct qcc *qcc, struct qcs *qcs)
257{
258 if (qcc->app_ops->decode_qcs(qcs, qcs->flags & QC_SF_FIN_RECV, qcc->ctx) < 0) {
259 fprintf(stderr, "%s: decoding error\n", __func__);
260 return 1;
261 }
262
263 return 0;
264}
265
Amaury Denoyellec055e302022-02-07 16:09:06 +0100266static int qc_is_max_streams_needed(struct qcc *qcc)
267{
268 return qcc->lfctl.closed_bidi_streams > qcc->lfctl.initial_max_bidi_streams / 2;
269}
270
Ilya Shipitsin5e87bcf2021-12-25 11:45:52 +0500271/* detaches the QUIC stream from its QCC and releases it to the QCS pool. */
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100272static void qcs_destroy(struct qcs *qcs)
273{
Amaury Denoyellec055e302022-02-07 16:09:06 +0100274 const uint64_t id = qcs->by_id.key;
275
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100276 fprintf(stderr, "%s: release stream %llu\n", __func__, qcs->by_id.key);
277
Amaury Denoyellec055e302022-02-07 16:09:06 +0100278 if (quic_stream_is_remote(qcs->qcc, id)) {
279 if (quic_stream_is_bidi(id)) {
280 ++qcs->qcc->lfctl.closed_bidi_streams;
281 if (qc_is_max_streams_needed(qcs->qcc))
282 tasklet_wakeup(qcs->qcc->wait_event.tasklet);
283 }
284 }
285
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100286 eb64_delete(&qcs->by_id);
287
288 b_free(&qcs->rx.buf);
289 b_free(&qcs->tx.buf);
290 b_free(&qcs->tx.xprt_buf);
291
292 --qcs->qcc->strms[qcs_id_type(qcs->by_id.key)].nb_streams;
293
294 pool_free(pool_head_qcs, qcs);
295}
296
297static inline int qcc_is_dead(const struct qcc *qcc)
298{
299 fprintf(stderr, "%s: %lu\n", __func__, qcc->strms[QCS_CLT_BIDI].nb_streams);
300
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100301 if (!qcc->strms[QCS_CLT_BIDI].nb_streams && !qcc->task)
302 return 1;
303
304 return 0;
305}
306
307/* Return true if the mux timeout should be armed. */
308static inline int qcc_may_expire(struct qcc *qcc)
309{
310
311 /* Consider that the timeout must be set if no bidirectional streams
312 * are opened.
313 */
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100314 if (!qcc->strms[QCS_CLT_BIDI].nb_streams)
315 return 1;
316
317 return 0;
318}
319
320/* release function. This one should be called to free all resources allocated
321 * to the mux.
322 */
323static void qc_release(struct qcc *qcc)
324{
325 struct connection *conn = NULL;
326
327 if (qcc) {
328 /* The connection must be aattached to this mux to be released */
329 if (qcc->conn && qcc->conn->ctx == qcc)
330 conn = qcc->conn;
331
332 if (qcc->wait_event.tasklet)
333 tasklet_free(qcc->wait_event.tasklet);
334
335 pool_free(pool_head_qcc, qcc);
336 }
337
338 if (conn) {
Amaury Denoyelle0e0969d2022-01-31 15:41:14 +0100339 LIST_DEL_INIT(&conn->stopping_list);
340
Frédéric Lécaille19cd46e2022-01-10 11:40:33 +0100341 conn->qc->conn = NULL;
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100342 conn->mux = NULL;
343 conn->ctx = NULL;
344
345 conn_stop_tracking(conn);
346 conn_full_close(conn);
347 if (conn->destroy_cb)
348 conn->destroy_cb(conn);
349 conn_free(conn);
Frédéric Lécaille19cd46e2022-01-10 11:40:33 +0100350 fprintf(stderr, "conn@%p released\n", conn);
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100351 }
352}
353
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100354static int qcs_push_frame(struct qcs *qcs, struct buffer *out,
355 struct buffer *payload, int fin,
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100356 struct list *frm_list)
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200357{
358 struct quic_frame *frm;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100359 int head, left, to_xfer;
360 int total = 0;
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200361
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100362 fprintf(stderr, "%s\n", __func__);
363
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100364 qc_get_buf(qcs, out);
365
366 /*
367 * QCS out buffer diagram
368 * head left to_xfer
369 * -------------> ----------> ----->
370 * ==================================================
371 * |...............|xxxxxxxxxxx|<<<<<
372 * ==================================================
373 * ^ ack-off ^ sent-off ^ off
374 *
375 * STREAM frame
376 * ^ ^
377 * |xxxxxxxxxxxxxxxxx|
378 */
379
380 BUG_ON_HOT(qcs->tx.sent_offset < qcs->tx.ack_offset);
381 BUG_ON_HOT(qcs->tx.offset < qcs->tx.sent_offset);
382
383 head = qcs->tx.sent_offset - qcs->tx.ack_offset;
384 left = qcs->tx.offset - qcs->tx.sent_offset;
385 to_xfer = QUIC_MIN(b_data(payload), b_room(out));
386 if (!left && !to_xfer)
Frédéric Lécailled2ba0962021-09-20 17:50:03 +0200387 goto out;
388
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200389 frm = pool_zalloc(pool_head_quic_frame);
390 if (!frm)
391 goto err;
392
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100393 total = b_force_xfer(out, payload, to_xfer);
394
395 frm->type = QUIC_FT_STREAM_8;
396 frm->stream.qcs = (struct qcs *)qcs;
397 frm->stream.id = qcs->by_id.key;
398 frm->stream.buf = out;
399 frm->stream.data = (unsigned char *)b_peek(out, head);
400
Amaury Denoyellefecfa0d2021-12-07 16:50:14 +0100401 /* FIN is positioned only when the buffer has been totally emptied. */
Frédéric Lécailled2ba0962021-09-20 17:50:03 +0200402 fin = fin && !b_data(payload);
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200403 if (fin)
404 frm->type |= QUIC_STREAM_FRAME_TYPE_FIN_BIT;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100405
406 if (qcs->tx.sent_offset) {
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200407 frm->type |= QUIC_STREAM_FRAME_TYPE_OFF_BIT;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100408 frm->stream.offset.key = qcs->tx.sent_offset;
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200409 }
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100410
411 if (left + total) {
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200412 frm->type |= QUIC_STREAM_FRAME_TYPE_LEN_BIT;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100413 frm->stream.len = left + total;
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200414 }
415
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100416 LIST_APPEND(frm_list, &frm->list);
Frédéric Lécailled2ba0962021-09-20 17:50:03 +0200417 out:
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100418 fprintf(stderr, "%s: sent=%lu total=%d fin=%d id=%llu offset=%lu\n",
419 __func__, (long unsigned)b_data(out), total, fin, (ull)qcs->by_id.key, qcs->tx.sent_offset);
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200420 return total;
421
422 err:
423 return -1;
424}
425
Amaury Denoyelle54445d02022-03-10 16:44:14 +0100426/* This function must be called by the upper layer to inform about the sending
427 * of a STREAM frame for <qcs> instance. The frame is of <data> length and on
428 * <offset>.
429 */
430void qcc_streams_sent_done(struct qcs *qcs, uint64_t data, uint64_t offset)
431{
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100432 uint64_t diff = data;
433
434 BUG_ON(offset > qcs->tx.sent_offset);
435
Amaury Denoyelle54445d02022-03-10 16:44:14 +0100436 /* check if the STREAM frame has already been notified. It can happen
437 * for retransmission.
438 */
439 if (offset + data <= qcs->tx.sent_offset)
440 return;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100441
442 diff = offset + data - qcs->tx.sent_offset;
443
444 /* increase offset on stream */
445 qcs->tx.sent_offset += diff;
Amaury Denoyelle54445d02022-03-10 16:44:14 +0100446}
447
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100448/* Wrapper for send on transport layer. Send a list of frames <frms> for the
449 * connection <qcc>.
450 *
451 * Returns 0 if all data sent with success else non-zero.
452 */
453static int qc_send_frames(struct qcc *qcc, struct list *frms)
454{
Amaury Denoyelledb5d1a12022-03-10 16:42:23 +0100455 /* TODO implement an opportunistic retry mechanism. This is needed
456 * because qc_send_app_pkts is not completed. It will only prepare data
457 * up to its Tx buffer. The frames left are not send even if the Tx
458 * buffer is emptied by the sendto call.
459 *
460 * To overcome this, we call repeatedly qc_send_app_pkts until we
461 * detect that the transport layer has send nothing. This could happen
462 * on congestion or sendto syscall error.
463 *
464 * When qc_send_app_pkts is improved to handle retry by itself, we can
465 * remove the looping from the MUX.
466 */
467 struct quic_frame *first_frm;
468 uint64_t first_offset = 0;
469 char first_stream_frame_type;
Amaury Denoyellee9c4cc12022-03-04 15:29:53 +0100470
471 retry_send:
Amaury Denoyelledb5d1a12022-03-10 16:42:23 +0100472 first_frm = LIST_ELEM(frms->n, struct quic_frame *, list);
473 if ((first_frm->type & QUIC_FT_STREAM_8) == QUIC_FT_STREAM_8) {
474 first_offset = first_frm->stream.offset.key;
475 first_stream_frame_type = 1;
476 }
477 else {
478 first_stream_frame_type = 0;
479 }
480
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100481 if (!LIST_ISEMPTY(frms))
482 qc_send_app_pkts(qcc->conn->qc, frms);
483
Amaury Denoyelledb5d1a12022-03-10 16:42:23 +0100484 /* If there is frames left, check if the transport layer has send some
485 * data or is blocked.
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100486 */
Amaury Denoyelledb5d1a12022-03-10 16:42:23 +0100487 if (!LIST_ISEMPTY(frms)) {
488 if (first_frm != LIST_ELEM(frms->n, struct quic_frame *, list))
489 goto retry_send;
490
491 /* If the first frame is STREAM, check if its offset has
492 * changed.
493 */
494 if (first_stream_frame_type &&
495 first_offset != LIST_ELEM(frms->n, struct quic_frame *, list)->stream.offset.key) {
496 goto retry_send;
497 }
Amaury Denoyellee9c4cc12022-03-04 15:29:53 +0100498 }
499
Amaury Denoyelledb5d1a12022-03-10 16:42:23 +0100500 /* If there is frames left at this stage, transport layer is blocked.
501 * Subscribe on it to retry later.
502 */
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100503 if (!LIST_ISEMPTY(frms)) {
504 fprintf(stderr, "%s: remaining frames to send\n", __func__);
505 qcc->conn->xprt->subscribe(qcc->conn, qcc->conn->xprt_ctx,
506 SUB_RETRY_SEND, &qcc->wait_event);
507 return 1;
508 }
509
510 return 0;
511}
512
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100513static int qc_send(struct qcc *qcc)
514{
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100515 struct list frms = LIST_HEAD_INIT(frms);
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200516 struct eb64_node *node;
Amaury Denoyellec0b66ca2022-02-21 18:45:22 +0100517 int ret = 0;
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200518
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100519 fprintf(stderr, "%s\n", __func__);
Frédéric Lécaille8526f142021-09-20 17:58:22 +0200520
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100521 /* loop through all streams, construct STREAM frames if data available.
522 * TODO optimize the loop to favor streams which are not too heavy.
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200523 */
524 node = eb64_first(&qcc->streams_by_id);
525 while (node) {
Amaury Denoyelled3d97c62021-10-05 11:45:58 +0200526 struct qcs *qcs = container_of(node, struct qcs, by_id);
527 struct buffer *buf = &qcs->tx.buf;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100528 struct buffer *out = &qcs->tx.xprt_buf;
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100529
Amaury Denoyellee2ec9422022-03-10 16:46:18 +0100530 /* TODO
531 * for the moment, unidirectional streams have their own
532 * mechanism for sending. This should be unified in the future,
533 * in this case the next check will be removed.
534 */
535 if (quic_stream_is_uni(qcs->by_id.key)) {
536 node = eb64_next(node);
537 continue;
538 }
539
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100540 if (b_data(buf) || b_data(out)) {
Amaury Denoyellefecfa0d2021-12-07 16:50:14 +0100541 char fin = qcs->flags & QC_SF_FIN_STREAM;
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100542 ret = qcs_push_frame(qcs, out, buf, fin, &frms);
Amaury Denoyelle14551132022-03-04 16:51:20 +0100543 BUG_ON(ret < 0); /* TODO handle this properly */
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200544
Amaury Denoyellee257d9e2021-12-03 14:39:29 +0100545 if (ret > 0) {
Amaury Denoyellea3f222d2021-12-06 11:24:00 +0100546 qcs_notify_send(qcs);
Amaury Denoyelle84ea8dc2021-12-03 14:40:01 +0100547 if (qcs->flags & QC_SF_BLK_MROOM)
548 qcs->flags &= ~QC_SF_BLK_MROOM;
Amaury Denoyellee257d9e2021-12-03 14:39:29 +0100549 }
Amaury Denoyellea543eb12021-10-06 14:53:13 +0200550
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100551 fprintf(stderr, "%s ret=%d\n", __func__, ret);
Amaury Denoyelled3d97c62021-10-05 11:45:58 +0200552 qcs->tx.offset += ret;
Amaury Denoyellea2c58a72021-12-03 14:38:31 +0100553
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100554 /* Subscribe if not all data can be send. */
Amaury Denoyellea2c58a72021-12-03 14:38:31 +0100555 if (b_data(buf)) {
556 qcc->conn->xprt->subscribe(qcc->conn, qcc->conn->xprt_ctx,
557 SUB_RETRY_SEND, &qcc->wait_event);
558 }
Frédéric Lécaille578a7892021-09-13 16:13:00 +0200559 }
560 node = eb64_next(node);
561 }
Frédéric Lécaille8526f142021-09-20 17:58:22 +0200562
Amaury Denoyelle6ccfa3c2022-03-10 16:45:53 +0100563 qc_send_frames(qcc, &frms);
Amaury Denoyelle2c71fe52022-02-09 18:16:49 +0100564 /* TODO adjust ret if not all frames are sent. */
Amaury Denoyellee257d9e2021-12-03 14:39:29 +0100565
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100566 return ret;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100567}
568
Amaury Denoyelle6a4aebf2022-02-01 10:16:05 +0100569/* Release all streams that are already marked as detached. This is only done
570 * if their TX buffers are empty or if a CONNECTION_CLOSE has been received.
571 *
572 * Return the number of released stream.
573 */
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100574static int qc_release_detached_streams(struct qcc *qcc)
575{
576 struct eb64_node *node;
577 int release = 0;
578
579 node = eb64_first(&qcc->streams_by_id);
580 while (node) {
581 struct qcs *qcs = container_of(node, struct qcs, by_id);
582 node = eb64_next(node);
583
584 if (qcs->flags & QC_SF_DETACH) {
Amaury Denoyelled9751482022-02-01 15:15:11 +0100585 if ((!b_data(&qcs->tx.buf) && !b_data(&qcs->tx.xprt_buf))) {
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100586 qcs_destroy(qcs);
587 release = 1;
588 }
589 else {
590 qcc->conn->xprt->subscribe(qcc->conn, qcc->conn->xprt_ctx,
591 SUB_RETRY_SEND, &qcc->wait_event);
592 }
593 }
594 }
595
596 return release;
597}
598
Amaury Denoyellec055e302022-02-07 16:09:06 +0100599/* Send a MAX_STREAM_BIDI frame to update the limit of bidirectional streams
600 * allowed to be opened by the peer. The caller should have first checked if
601 * this is required with qc_is_max_streams_needed.
602 *
603 * Returns 0 on success else non-zero.
604 */
605static int qc_send_max_streams(struct qcc *qcc)
606{
607 struct list frms = LIST_HEAD_INIT(frms);
608 struct quic_frame *frm;
609
610 frm = pool_zalloc(pool_head_quic_frame);
611 BUG_ON(!frm); /* TODO handle this properly */
612
613 frm->type = QUIC_FT_MAX_STREAMS_BIDI;
614 frm->max_streams_bidi.max_streams = qcc->lfctl.max_bidi_streams +
615 qcc->lfctl.closed_bidi_streams;
616 fprintf(stderr, "SET MAX_STREAMS %lu\n", frm->max_streams_bidi.max_streams);
617 LIST_APPEND(&frms, &frm->list);
618
619 if (qc_send_frames(qcc, &frms))
620 return 1;
621
622 /* save the new limit if the frame has been send. */
623 qcc->lfctl.max_bidi_streams += qcc->lfctl.closed_bidi_streams;
624 qcc->lfctl.closed_bidi_streams = 0;
625
626 return 0;
627}
628
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100629static struct task *qc_io_cb(struct task *t, void *ctx, unsigned int status)
630{
Amaury Denoyelle769e9ff2021-10-05 11:43:50 +0200631 struct qcc *qcc = ctx;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100632
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100633 fprintf(stderr, "%s\n", __func__);
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100634
Amaury Denoyellec055e302022-02-07 16:09:06 +0100635 if (qc_is_max_streams_needed(qcc))
636 qc_send_max_streams(qcc);
637
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100638 qc_send(qcc);
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100639
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100640 if (qc_release_detached_streams(qcc)) {
Amaury Denoyelle1136e922022-02-01 10:33:09 +0100641 /* Schedule the mux timeout if no bidirectional streams left. */
642 if (qcc_may_expire(qcc)) {
643 qcc->task->expire = tick_add(now_ms, qcc->timeout);
644 task_queue(qcc->task);
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100645 }
646 }
647
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100648 return NULL;
649}
650
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100651static struct task *qc_timeout_task(struct task *t, void *ctx, unsigned int state)
652{
653 struct qcc *qcc = ctx;
654 int expired = tick_is_expired(t->expire, now_ms);
655
656 fprintf(stderr, "%s\n", __func__);
657
658 if (qcc) {
659 if (!expired) {
660 fprintf(stderr, "%s: not expired\n", __func__);
661 return t;
662 }
663
664 if (!qcc_may_expire(qcc)) {
665 fprintf(stderr, "%s: cannot expire\n", __func__);
666 t->expire = TICK_ETERNITY;
667 return t;
668 }
669 }
670
671 fprintf(stderr, "%s: timeout\n", __func__);
672 task_destroy(t);
Amaury Denoyelleea3e0352022-02-21 10:05:16 +0100673
674 if (!qcc)
675 return NULL;
676
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100677 qcc->task = NULL;
678
679 if (qcc_is_dead(qcc))
680 qc_release(qcc);
681
682 return NULL;
683}
684
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100685static int qc_init(struct connection *conn, struct proxy *prx,
686 struct session *sess, struct buffer *input)
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100687{
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100688 struct qcc *qcc;
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100689 struct quic_transport_params *lparams;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100690
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100691 qcc = pool_alloc(pool_head_qcc);
692 if (!qcc)
693 goto fail_no_qcc;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100694
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100695 qcc->conn = conn;
696 conn->ctx = qcc;
Amaury Denoyellece1f30d2022-02-01 15:14:24 +0100697 qcc->flags = 0;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100698
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100699 qcc->app_ops = NULL;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100700
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100701 qcc->streams_by_id = EB_ROOT_UNIQUE;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100702
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100703 /* Server parameters, params used for RX flow control. */
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100704 lparams = &conn->qc->rx.params;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100705
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100706 qcc->rx.max_data = lparams->initial_max_data;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100707 qcc->tx.max_data = 0;
708
709 /* Client initiated streams must respect the server flow control. */
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100710 qcc->strms[QCS_CLT_BIDI].max_streams = lparams->initial_max_streams_bidi;
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100711 qcc->strms[QCS_CLT_BIDI].nb_streams = 0;
712 qcc->strms[QCS_CLT_BIDI].largest_id = -1;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100713 qcc->strms[QCS_CLT_BIDI].rx.max_data = 0;
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100714 qcc->strms[QCS_CLT_BIDI].tx.max_data = lparams->initial_max_stream_data_bidi_remote;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100715
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100716 qcc->strms[QCS_CLT_UNI].max_streams = lparams->initial_max_streams_uni;
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100717 qcc->strms[QCS_CLT_UNI].nb_streams = 0;
718 qcc->strms[QCS_CLT_UNI].largest_id = -1;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100719 qcc->strms[QCS_CLT_UNI].rx.max_data = 0;
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100720 qcc->strms[QCS_CLT_UNI].tx.max_data = lparams->initial_max_stream_data_uni;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100721
722 /* Server initiated streams must respect the server flow control. */
723 qcc->strms[QCS_SRV_BIDI].max_streams = 0;
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100724 qcc->strms[QCS_SRV_BIDI].nb_streams = 0;
725 qcc->strms[QCS_SRV_BIDI].largest_id = -1;
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100726 qcc->strms[QCS_SRV_BIDI].rx.max_data = lparams->initial_max_stream_data_bidi_local;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100727 qcc->strms[QCS_SRV_BIDI].tx.max_data = 0;
728
729 qcc->strms[QCS_SRV_UNI].max_streams = 0;
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100730 qcc->strms[QCS_SRV_UNI].nb_streams = 0;
731 qcc->strms[QCS_SRV_UNI].largest_id = -1;
Amaury Denoyelle749cb642022-02-09 10:25:29 +0100732 qcc->strms[QCS_SRV_UNI].rx.max_data = lparams->initial_max_stream_data_uni;
Amaury Denoyellef3b0ba72021-12-08 15:12:01 +0100733 qcc->strms[QCS_SRV_UNI].tx.max_data = 0;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100734
Amaury Denoyellec055e302022-02-07 16:09:06 +0100735 qcc->lfctl.max_bidi_streams = qcc->lfctl.initial_max_bidi_streams = lparams->initial_max_streams_bidi;
736 qcc->lfctl.closed_bidi_streams = 0;
737
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100738 qcc->wait_event.tasklet = tasklet_new();
739 if (!qcc->wait_event.tasklet)
740 goto fail_no_tasklet;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100741
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100742 qcc->subs = NULL;
743 qcc->wait_event.tasklet->process = qc_io_cb;
744 qcc->wait_event.tasklet->context = qcc;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100745
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100746 /* haproxy timeouts */
747 qcc->timeout = prx->timeout.client;
748 qcc->task = task_new_here();
749 if (!qcc->task)
750 goto fail_no_timeout_task;
751 qcc->task->process = qc_timeout_task;
752 qcc->task->context = qcc;
753 qcc->task->expire = tick_add(now_ms, qcc->timeout);
754
Amaury Denoyelle0e0969d2022-01-31 15:41:14 +0100755 if (!conn_is_back(conn)) {
756 if (!LIST_INLIST(&conn->stopping_list)) {
757 LIST_APPEND(&mux_stopping_data[tid].list,
758 &conn->stopping_list);
759 }
760 }
761
Frédéric Lécailleb80b20c2022-01-12 17:46:56 +0100762 HA_ATOMIC_STORE(&conn->qc->qcc, qcc);
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100763 /* init read cycle */
764 tasklet_wakeup(qcc->wait_event.tasklet);
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100765
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100766 return 0;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100767
Amaury Denoyelleaebe26f2022-01-13 16:28:06 +0100768 fail_no_timeout_task:
769 tasklet_free(qcc->wait_event.tasklet);
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100770 fail_no_tasklet:
771 pool_free(pool_head_qcc, qcc);
772 fail_no_qcc:
773 return -1;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100774}
775
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100776static void qc_detach(struct conn_stream *cs)
777{
Amaury Denoyelle916f0ac2021-12-06 16:03:47 +0100778 struct qcs *qcs = cs->ctx;
779 struct qcc *qcc = qcs->qcc;
780
781 fprintf(stderr, "%s: leaving with tx.buf.data=%lu, tx.xprt_buf.data=%lu\n",
782 __func__, b_data(&qcs->tx.buf), b_data(&qcs->tx.xprt_buf));
783
Amaury Denoyelled9751482022-02-01 15:15:11 +0100784 /* TODO on CONNECTION_CLOSE reception, it should be possible to free
785 * qcs instances. This should be done once the buffering and ACK
786 * managment between xprt and mux is reorganized.
787 */
788
789 if ((b_data(&qcs->tx.buf) || b_data(&qcs->tx.xprt_buf))) {
Amaury Denoyelle2873a312021-12-08 14:42:55 +0100790 qcs->flags |= QC_SF_DETACH;
791 return;
792 }
793
Amaury Denoyelle916f0ac2021-12-06 16:03:47 +0100794 qcs_destroy(qcs);
Amaury Denoyelle1136e922022-02-01 10:33:09 +0100795
796 /* Schedule the mux timeout if no bidirectional streams left. */
797 if (qcc_may_expire(qcc)) {
798 qcc->task->expire = tick_add(now_ms, qcc->timeout);
799 task_queue(qcc->task);
Amaury Denoyelle916f0ac2021-12-06 16:03:47 +0100800 }
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100801}
802
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100803/* Called from the upper layer, to receive data */
804static size_t qc_rcv_buf(struct conn_stream *cs, struct buffer *buf,
805 size_t count, int flags)
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100806{
Amaury Denoyelle9a327a72022-02-14 17:11:09 +0100807 struct qcs *qcs = cs->ctx;
808 struct htx *qcs_htx = NULL;
809 struct htx *cs_htx = NULL;
810 size_t ret = 0;
Amaury Denoyelleeb53e5b2022-02-14 17:11:32 +0100811 char fin = 0;
Amaury Denoyelle9a327a72022-02-14 17:11:09 +0100812
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100813 fprintf(stderr, "%s\n", __func__);
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100814
Amaury Denoyelle9a327a72022-02-14 17:11:09 +0100815 qcs_htx = htx_from_buf(&qcs->rx.app_buf);
816 if (htx_is_empty(qcs_htx)) {
817 /* Set buffer data to 0 as HTX is empty. */
818 htx_to_buf(qcs_htx, &qcs->rx.app_buf);
819 goto end;
820 }
821
822 ret = qcs_htx->data;
823
824 cs_htx = htx_from_buf(buf);
825 if (htx_is_empty(cs_htx) && htx_used_space(qcs_htx) <= count) {
826 htx_to_buf(cs_htx, buf);
827 htx_to_buf(qcs_htx, &qcs->rx.app_buf);
828 b_xfer(buf, &qcs->rx.app_buf, b_data(&qcs->rx.app_buf));
829 goto end;
830 }
831
832 htx_xfer_blks(cs_htx, qcs_htx, count, HTX_BLK_UNUSED);
833 BUG_ON(qcs_htx->flags & HTX_FL_PARSING_ERROR);
834
835 /* Copy EOM from src to dst buffer if all data copied. */
Amaury Denoyelleeb53e5b2022-02-14 17:11:32 +0100836 if (htx_is_empty(qcs_htx) && (qcs_htx->flags & HTX_FL_EOM)) {
837 cs_htx->flags |= HTX_FL_EOM;
838 fin = 1;
839 }
Amaury Denoyelle9a327a72022-02-14 17:11:09 +0100840
841 cs_htx->extra = qcs_htx->extra ? (qcs_htx->data + qcs_htx->extra) : 0;
842 htx_to_buf(cs_htx, buf);
843 htx_to_buf(qcs_htx, &qcs->rx.app_buf);
844 ret -= qcs_htx->data;
845
846 end:
847 if (b_data(&qcs->rx.app_buf)) {
848 cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
849 }
850 else {
851 cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
852 if (cs->flags & CS_FL_ERR_PENDING)
853 cs->flags |= CS_FL_ERROR;
854
Amaury Denoyelleeb53e5b2022-02-14 17:11:32 +0100855 if (fin)
856 cs->flags |= (CS_FL_EOI|CS_FL_EOS);
Amaury Denoyelle9a327a72022-02-14 17:11:09 +0100857
858 if (b_size(&qcs->rx.app_buf)) {
859 b_free(&qcs->rx.app_buf);
860 offer_buffers(NULL, 1);
861 }
862 }
863
864 if (ret)
865 tasklet_wakeup(qcs->qcc->wait_event.tasklet);
866
867 return ret;
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100868}
869
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100870static size_t qc_snd_buf(struct conn_stream *cs, struct buffer *buf,
871 size_t count, int flags)
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100872{
873 struct qcs *qcs = cs->ctx;
874
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100875 fprintf(stderr, "%s\n", __func__);
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100876
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100877 return qcs->qcc->app_ops->snd_buf(cs, buf, count, flags);
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100878}
879
880/* Called from the upper layer, to subscribe <es> to events <event_type>. The
881 * event subscriber <es> is not allowed to change from a previous call as long
882 * as at least one event is still subscribed. The <event_type> must only be a
883 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
884 */
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100885static int qc_subscribe(struct conn_stream *cs, int event_type,
886 struct wait_event *es)
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100887{
Amaury Denoyellea3f222d2021-12-06 11:24:00 +0100888 return qcs_subscribe(cs->ctx, event_type, es);
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100889}
890
891/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
892 * The <es> pointer is not allowed to differ from the one passed to the
893 * subscribe() call. It always returns zero.
894 */
895static int qc_unsubscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
896{
897 struct qcs *qcs = cs->ctx;
898
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100899 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
900 BUG_ON(qcs->subs && qcs->subs != es);
901
902 es->events &= ~event_type;
903 if (!es->events)
904 qcs->subs = NULL;
905
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100906 return 0;
907}
908
Amaury Denoyelle0e0969d2022-01-31 15:41:14 +0100909static int qc_wake(struct connection *conn)
910{
911 struct qcc *qcc = conn->ctx;
912
913 /* Check if a soft-stop is in progress.
914 * Release idling front connection if this is the case.
915 */
916 if (unlikely(conn->qc->li->bind_conf->frontend->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
917 qc_release(qcc);
918 }
919
920 return 1;
921}
922
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100923static const struct mux_ops qc_ops = {
924 .init = qc_init,
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100925 .detach = qc_detach,
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100926 .rcv_buf = qc_rcv_buf,
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100927 .snd_buf = qc_snd_buf,
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100928 .subscribe = qc_subscribe,
929 .unsubscribe = qc_unsubscribe,
Amaury Denoyelle0e0969d2022-01-31 15:41:14 +0100930 .wake = qc_wake,
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100931};
932
933static struct mux_proto_list mux_proto_quic =
Amaury Denoyelledeed7772021-12-03 11:36:46 +0100934 { .token = IST("quic"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_FE, .mux = &qc_ops };
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100935
936INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_quic);