blob: a8e26cde417e46953d2138dda5a88e3d44240a4f [file] [log] [blame]
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001/*
2 * QUIC transport layer over SOCK_DGRAM sockets.
3 *
4 * Copyright 2020 HAProxy Technologies, Frédéric Lécaille <flecaille@haproxy.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#define _GNU_SOURCE
14#include <errno.h>
15#include <fcntl.h>
16#include <stdio.h>
17#include <stdlib.h>
18
19#include <sys/socket.h>
20#include <sys/stat.h>
21#include <sys/types.h>
22
23#include <netinet/tcp.h>
24
25#include <haproxy/buf-t.h>
26#include <haproxy/compat.h>
27#include <haproxy/api.h>
28#include <haproxy/debug.h>
29#include <haproxy/tools.h>
30#include <haproxy/ticks.h>
31#include <haproxy/time.h>
32
33#include <haproxy/connection.h>
34#include <haproxy/fd.h>
35#include <haproxy/freq_ctr.h>
36#include <haproxy/global.h>
Frédéric Lécailledfbae762021-02-18 09:59:01 +010037#include <haproxy/h3.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010038#include <haproxy/log.h>
Frédéric Lécailledfbae762021-02-18 09:59:01 +010039#include <haproxy/mux_quic.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010040#include <haproxy/pipe.h>
41#include <haproxy/proxy.h>
42#include <haproxy/quic_cc.h>
43#include <haproxy/quic_frame.h>
44#include <haproxy/quic_loss.h>
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +020045#include <haproxy/cbuf.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010046#include <haproxy/quic_tls.h>
47#include <haproxy/ssl_sock.h>
48#include <haproxy/stream_interface.h>
49#include <haproxy/task.h>
50#include <haproxy/trace.h>
51#include <haproxy/xprt_quic.h>
52
53struct quic_transport_params quic_dflt_transport_params = {
Frédéric Lécaille785c9c92021-05-17 16:42:21 +020054 .max_udp_payload_size = QUIC_DFLT_MAX_UDP_PAYLOAD_SIZE,
55 .ack_delay_exponent = QUIC_DFLT_ACK_DELAY_COMPONENT,
56 .max_ack_delay = QUIC_DFLT_MAX_ACK_DELAY,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010057};
58
59/* trace source and events */
60static void quic_trace(enum trace_level level, uint64_t mask, \
61 const struct trace_source *src,
62 const struct ist where, const struct ist func,
63 const void *a1, const void *a2, const void *a3, const void *a4);
64
65static const struct trace_event quic_trace_events[] = {
66 { .mask = QUIC_EV_CONN_NEW, .name = "new_conn", .desc = "new QUIC connection" },
67 { .mask = QUIC_EV_CONN_INIT, .name = "new_conn_init", .desc = "new QUIC connection initialization" },
68 { .mask = QUIC_EV_CONN_ISEC, .name = "init_secs", .desc = "initial secrets derivation" },
69 { .mask = QUIC_EV_CONN_RSEC, .name = "read_secs", .desc = "read secrets derivation" },
70 { .mask = QUIC_EV_CONN_WSEC, .name = "write_secs", .desc = "write secrets derivation" },
71 { .mask = QUIC_EV_CONN_LPKT, .name = "lstnr_packet", .desc = "new listener received packet" },
72 { .mask = QUIC_EV_CONN_SPKT, .name = "srv_packet", .desc = "new server received packet" },
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +050073 { .mask = QUIC_EV_CONN_ENCPKT, .name = "enc_hdshk_pkt", .desc = "handhshake packet encryption" },
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010074 { .mask = QUIC_EV_CONN_HPKT, .name = "hdshk_pkt", .desc = "handhshake packet building" },
75 { .mask = QUIC_EV_CONN_PAPKT, .name = "phdshk_apkt", .desc = "post handhshake application packet preparation" },
76 { .mask = QUIC_EV_CONN_PAPKTS, .name = "phdshk_apkts", .desc = "post handhshake application packets preparation" },
77 { .mask = QUIC_EV_CONN_HDSHK, .name = "hdshk", .desc = "SSL handhshake processing" },
78 { .mask = QUIC_EV_CONN_RMHP, .name = "rm_hp", .desc = "Remove header protection" },
79 { .mask = QUIC_EV_CONN_PRSHPKT, .name = "parse_hpkt", .desc = "parse handshake packet" },
80 { .mask = QUIC_EV_CONN_PRSAPKT, .name = "parse_apkt", .desc = "parse application packet" },
81 { .mask = QUIC_EV_CONN_PRSFRM, .name = "parse_frm", .desc = "parse frame" },
82 { .mask = QUIC_EV_CONN_PRSAFRM, .name = "parse_ack_frm", .desc = "parse ACK frame" },
83 { .mask = QUIC_EV_CONN_BFRM, .name = "build_frm", .desc = "build frame" },
84 { .mask = QUIC_EV_CONN_PHPKTS, .name = "phdshk_pkts", .desc = "handhshake packets preparation" },
85 { .mask = QUIC_EV_CONN_TRMHP, .name = "rm_hp_try", .desc = "header protection removing try" },
86 { .mask = QUIC_EV_CONN_ELRMHP, .name = "el_rm_hp", .desc = "handshake enc. level header protection removing" },
87 { .mask = QUIC_EV_CONN_ELRXPKTS, .name = "el_treat_rx_pkts", .desc = "handshake enc. level rx packets treatment" },
88 { .mask = QUIC_EV_CONN_SSLDATA, .name = "ssl_provide_data", .desc = "CRYPTO data provision to TLS stack" },
89 { .mask = QUIC_EV_CONN_RXCDATA, .name = "el_treat_rx_cfrms",.desc = "enc. level RX CRYPTO frames processing"},
90 { .mask = QUIC_EV_CONN_ADDDATA, .name = "add_hdshk_data", .desc = "TLS stack ->add_handshake_data() call"},
91 { .mask = QUIC_EV_CONN_FFLIGHT, .name = "flush_flight", .desc = "TLS stack ->flush_flight() call"},
92 { .mask = QUIC_EV_CONN_SSLALERT, .name = "send_alert", .desc = "TLS stack ->send_alert() call"},
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010093 { .mask = QUIC_EV_CONN_RTTUPDT, .name = "rtt_updt", .desc = "RTT sampling" },
94 { .mask = QUIC_EV_CONN_SPPKTS, .name = "sppkts", .desc = "send prepared packets" },
95 { .mask = QUIC_EV_CONN_PKTLOSS, .name = "pktloss", .desc = "detect packet loss" },
96 { .mask = QUIC_EV_CONN_STIMER, .name = "stimer", .desc = "set timer" },
97 { .mask = QUIC_EV_CONN_PTIMER, .name = "ptimer", .desc = "process timer" },
98 { .mask = QUIC_EV_CONN_SPTO, .name = "spto", .desc = "set PTO" },
Frédéric Lécaille6c1e36c2020-12-23 17:17:37 +010099 { .mask = QUIC_EV_CONN_BCFRMS, .name = "bcfrms", .desc = "build CRYPTO data frames" },
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100100 { /* end */ }
101};
102
103static const struct name_desc quic_trace_lockon_args[4] = {
104 /* arg1 */ { /* already used by the connection */ },
105 /* arg2 */ { .name="quic", .desc="QUIC transport" },
106 /* arg3 */ { },
107 /* arg4 */ { }
108};
109
110static const struct name_desc quic_trace_decoding[] = {
111#define QUIC_VERB_CLEAN 1
112 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
113 { /* end */ }
114};
115
116
117struct trace_source trace_quic = {
118 .name = IST("quic"),
119 .desc = "QUIC xprt",
120 .arg_def = TRC_ARG1_CONN, /* TRACE()'s first argument is always a connection */
121 .default_cb = quic_trace,
122 .known_events = quic_trace_events,
123 .lockon_args = quic_trace_lockon_args,
124 .decoding = quic_trace_decoding,
125 .report_events = ~0, /* report everything by default */
126};
127
128#define TRACE_SOURCE &trace_quic
129INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
130
131static BIO_METHOD *ha_quic_meth;
132
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100133DECLARE_STATIC_POOL(pool_head_quic_conn_ctx,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +0200134 "quic_conn_ctx_pool", sizeof(struct ssl_sock_ctx));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100135DECLARE_STATIC_POOL(pool_head_quic_conn, "quic_conn", sizeof(struct quic_conn));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100136DECLARE_POOL(pool_head_quic_connection_id,
137 "quic_connnection_id_pool", sizeof(struct quic_connection_id));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100138DECLARE_POOL(pool_head_quic_rx_packet, "quic_rx_packet_pool", sizeof(struct quic_rx_packet));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100139DECLARE_POOL(pool_head_quic_tx_packet, "quic_tx_packet_pool", sizeof(struct quic_tx_packet));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100140DECLARE_STATIC_POOL(pool_head_quic_rx_crypto_frm, "quic_rx_crypto_frm_pool", sizeof(struct quic_rx_crypto_frm));
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100141DECLARE_POOL(pool_head_quic_rx_strm_frm, "quic_rx_strm_frm", sizeof(struct quic_rx_strm_frm));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100142DECLARE_STATIC_POOL(pool_head_quic_crypto_buf, "quic_crypto_buf_pool", sizeof(struct quic_crypto_buf));
Frédéric Lécaille0ad04582021-07-27 14:51:54 +0200143DECLARE_POOL(pool_head_quic_frame, "quic_frame_pool", sizeof(struct quic_frame));
Frédéric Lécaille8090b512020-11-30 16:19:22 +0100144DECLARE_STATIC_POOL(pool_head_quic_arng, "quic_arng_pool", sizeof(struct quic_arng_node));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100145
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +0200146static struct quic_tx_packet *qc_build_hdshk_pkt(unsigned char **pos, const unsigned char *buf_end,
147 struct quic_conn *qc, int pkt_type,
148 struct quic_enc_level *qel, int *err);
149int qc_prep_phdshk_pkts(struct qring *qr, struct quic_conn *qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100150
151/* Add traces to <buf> depending on <frm> TX frame type. */
152static inline void chunk_tx_frm_appendf(struct buffer *buf,
Frédéric Lécaille0ad04582021-07-27 14:51:54 +0200153 const struct quic_frame *frm)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100154{
155 switch (frm->type) {
156 case QUIC_FT_CRYPTO:
157 chunk_appendf(buf, " cfoff=%llu cflen=%llu",
158 (unsigned long long)frm->crypto.offset,
159 (unsigned long long)frm->crypto.len);
160 break;
161 default:
162 chunk_appendf(buf, " %s", quic_frame_type_string(frm->type));
163 }
164}
165
Frédéric Lécaillef63921f2020-12-18 09:48:20 +0100166/* Only for debug purpose */
167struct enc_debug_info {
168 unsigned char *payload;
169 size_t payload_len;
170 unsigned char *aad;
171 size_t aad_len;
172 uint64_t pn;
173};
174
175/* Initializes a enc_debug_info struct (only for debug purpose) */
176static inline void enc_debug_info_init(struct enc_debug_info *edi,
177 unsigned char *payload, size_t payload_len,
178 unsigned char *aad, size_t aad_len, uint64_t pn)
179{
180 edi->payload = payload;
181 edi->payload_len = payload_len;
182 edi->aad = aad;
183 edi->aad_len = aad_len;
184 edi->pn = pn;
185}
186
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100187/* Trace callback for QUIC.
188 * These traces always expect that arg1, if non-null, is of type connection.
189 */
190static void quic_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
191 const struct ist where, const struct ist func,
192 const void *a1, const void *a2, const void *a3, const void *a4)
193{
194 const struct connection *conn = a1;
195
196 if (conn) {
197 struct quic_tls_secrets *secs;
198 struct quic_conn *qc;
199
200 qc = conn->qc;
201 chunk_appendf(&trace_buf, " : conn@%p", conn);
202 if ((mask & QUIC_EV_CONN_INIT) && qc) {
203 chunk_appendf(&trace_buf, "\n odcid");
204 quic_cid_dump(&trace_buf, &qc->odcid);
Frédéric Lécaillec5e72b92020-12-02 16:11:40 +0100205 chunk_appendf(&trace_buf, "\n dcid");
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100206 quic_cid_dump(&trace_buf, &qc->dcid);
Frédéric Lécaillec5e72b92020-12-02 16:11:40 +0100207 chunk_appendf(&trace_buf, "\n scid");
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100208 quic_cid_dump(&trace_buf, &qc->scid);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100209 }
210
211 if (mask & QUIC_EV_CONN_ADDDATA) {
212 const enum ssl_encryption_level_t *level = a2;
213 const size_t *len = a3;
214
215 if (level) {
216 enum quic_tls_enc_level lvl = ssl_to_quic_enc_level(*level);
217
218 chunk_appendf(&trace_buf, " el=%c(%d)", quic_enc_level_char(lvl), lvl);
219 }
220 if (len)
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100221 chunk_appendf(&trace_buf, " len=%llu", (unsigned long long)*len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100222 }
223 if ((mask & QUIC_EV_CONN_ISEC) && qc) {
224 /* Initial read & write secrets. */
225 enum quic_tls_enc_level level = QUIC_TLS_ENC_LEVEL_INITIAL;
226 const unsigned char *rx_sec = a2;
227 const unsigned char *tx_sec = a3;
228
229 secs = &qc->els[level].tls_ctx.rx;
230 if (secs->flags & QUIC_FL_TLS_SECRETS_SET) {
231 chunk_appendf(&trace_buf, "\n RX el=%c", quic_enc_level_char(level));
232 if (rx_sec)
233 quic_tls_secret_hexdump(&trace_buf, rx_sec, 32);
234 quic_tls_keys_hexdump(&trace_buf, secs);
235 }
236 secs = &qc->els[level].tls_ctx.tx;
237 if (secs->flags & QUIC_FL_TLS_SECRETS_SET) {
238 chunk_appendf(&trace_buf, "\n TX el=%c", quic_enc_level_char(level));
239 if (tx_sec)
240 quic_tls_secret_hexdump(&trace_buf, tx_sec, 32);
241 quic_tls_keys_hexdump(&trace_buf, secs);
242 }
243 }
244 if (mask & (QUIC_EV_CONN_RSEC|QUIC_EV_CONN_RWSEC)) {
245 const enum ssl_encryption_level_t *level = a2;
246 const unsigned char *secret = a3;
247 const size_t *secret_len = a4;
248
249 if (level) {
250 enum quic_tls_enc_level lvl = ssl_to_quic_enc_level(*level);
251
252 chunk_appendf(&trace_buf, "\n RX el=%c", quic_enc_level_char(lvl));
253 if (secret && secret_len)
254 quic_tls_secret_hexdump(&trace_buf, secret, *secret_len);
255 secs = &qc->els[lvl].tls_ctx.rx;
256 if (secs->flags & QUIC_FL_TLS_SECRETS_SET)
257 quic_tls_keys_hexdump(&trace_buf, secs);
258 }
259 }
260
261 if (mask & (QUIC_EV_CONN_WSEC|QUIC_EV_CONN_RWSEC)) {
262 const enum ssl_encryption_level_t *level = a2;
263 const unsigned char *secret = a3;
264 const size_t *secret_len = a4;
265
266 if (level) {
267 enum quic_tls_enc_level lvl = ssl_to_quic_enc_level(*level);
268
269 chunk_appendf(&trace_buf, "\n TX el=%c", quic_enc_level_char(lvl));
270 if (secret && secret_len)
271 quic_tls_secret_hexdump(&trace_buf, secret, *secret_len);
272 secs = &qc->els[lvl].tls_ctx.tx;
273 if (secs->flags & QUIC_FL_TLS_SECRETS_SET)
274 quic_tls_keys_hexdump(&trace_buf, secs);
275 }
276
277 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100278
Frédéric Lécaille133e8a72020-12-18 09:33:27 +0100279 if (mask & (QUIC_EV_CONN_HPKT|QUIC_EV_CONN_PAPKT)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100280 const struct quic_tx_packet *pkt = a2;
281 const struct quic_enc_level *qel = a3;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100282 const ssize_t *room = a4;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100283
284 if (qel) {
285 struct quic_pktns *pktns;
286
287 pktns = qc->pktns;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100288 chunk_appendf(&trace_buf, " qel=%c cwnd=%llu ppif=%lld pif=%llu "
289 "if=%llu pp=%u pdg=%d",
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100290 quic_enc_level_char_from_qel(qel, qc),
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100291 (unsigned long long)qc->path->cwnd,
292 (unsigned long long)qc->path->prep_in_flight,
293 (unsigned long long)qc->path->in_flight,
294 (unsigned long long)pktns->tx.in_flight,
295 pktns->tx.pto_probe, qc->tx.nb_pto_dgrams);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100296 }
297 if (pkt) {
Frédéric Lécaille0ad04582021-07-27 14:51:54 +0200298 const struct quic_frame *frm;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100299 chunk_appendf(&trace_buf, " pn=%llu cdlen=%u",
300 (unsigned long long)pkt->pn_node.key, pkt->cdata_len);
301 list_for_each_entry(frm, &pkt->frms, list)
302 chunk_tx_frm_appendf(&trace_buf, frm);
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100303 chunk_appendf(&trace_buf, " tx.bytes=%llu", (unsigned long long)qc->tx.bytes);
304 }
305
306 if (room) {
307 chunk_appendf(&trace_buf, " room=%lld", (long long)*room);
308 chunk_appendf(&trace_buf, " dcid.len=%llu scid.len=%llu",
309 (unsigned long long)qc->dcid.len, (unsigned long long)qc->scid.len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100310 }
311 }
312
313 if (mask & QUIC_EV_CONN_HDSHK) {
314 const enum quic_handshake_state *state = a2;
315 const int *err = a3;
316
317 if (state)
318 chunk_appendf(&trace_buf, " state=%s", quic_hdshk_state_str(*state));
319 if (err)
320 chunk_appendf(&trace_buf, " err=%s", ssl_error_str(*err));
321 }
322
Frédéric Lécaille6c1e36c2020-12-23 17:17:37 +0100323 if (mask & (QUIC_EV_CONN_TRMHP|QUIC_EV_CONN_ELRMHP|QUIC_EV_CONN_SPKT)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100324 const struct quic_rx_packet *pkt = a2;
325 const unsigned long *pktlen = a3;
326 const SSL *ssl = a4;
327
328 if (pkt) {
Frédéric Lécaillec5e72b92020-12-02 16:11:40 +0100329 chunk_appendf(&trace_buf, " pkt@%p el=%c",
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100330 pkt, quic_packet_type_enc_level_char(pkt->type));
331 if (pkt->pnl)
332 chunk_appendf(&trace_buf, " pnl=%u pn=%llu", pkt->pnl,
333 (unsigned long long)pkt->pn);
334 if (pkt->token_len)
335 chunk_appendf(&trace_buf, " toklen=%llu",
336 (unsigned long long)pkt->token_len);
337 if (pkt->aad_len)
338 chunk_appendf(&trace_buf, " aadlen=%llu",
339 (unsigned long long)pkt->aad_len);
340 chunk_appendf(&trace_buf, " flags=0x%x len=%llu",
341 pkt->flags, (unsigned long long)pkt->len);
342 }
343 if (pktlen)
344 chunk_appendf(&trace_buf, " (%ld)", *pktlen);
345 if (ssl) {
346 enum ssl_encryption_level_t level = SSL_quic_read_level(ssl);
347 chunk_appendf(&trace_buf, " el=%c",
348 quic_enc_level_char(ssl_to_quic_enc_level(level)));
349 }
350 }
351
352 if (mask & (QUIC_EV_CONN_ELRXPKTS|QUIC_EV_CONN_PRSHPKT|QUIC_EV_CONN_SSLDATA)) {
353 const struct quic_rx_packet *pkt = a2;
354 const struct quic_rx_crypto_frm *cf = a3;
355 const SSL *ssl = a4;
356
357 if (pkt)
358 chunk_appendf(&trace_buf, " pkt@%p el=%c pn=%llu", pkt,
359 quic_packet_type_enc_level_char(pkt->type),
360 (unsigned long long)pkt->pn);
361 if (cf)
362 chunk_appendf(&trace_buf, " cfoff=%llu cflen=%llu",
363 (unsigned long long)cf->offset_node.key,
364 (unsigned long long)cf->len);
365 if (ssl) {
366 enum ssl_encryption_level_t level = SSL_quic_read_level(ssl);
367 chunk_appendf(&trace_buf, " el=%c",
368 quic_enc_level_char(ssl_to_quic_enc_level(level)));
369 }
370 }
371
372 if (mask & (QUIC_EV_CONN_PRSFRM|QUIC_EV_CONN_BFRM)) {
373 const struct quic_frame *frm = a2;
374
375 if (frm)
376 chunk_appendf(&trace_buf, " %s", quic_frame_type_string(frm->type));
377 }
378
379 if (mask & QUIC_EV_CONN_PHPKTS) {
380 const struct quic_enc_level *qel = a2;
381
382 if (qel) {
383 struct quic_pktns *pktns;
384
385 pktns = qc->pktns;
386 chunk_appendf(&trace_buf,
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100387 " qel=%c ack?%d cwnd=%llu ppif=%lld pif=%llu if=%llu pp=%u pdg=%llu",
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100388 quic_enc_level_char_from_qel(qel, qc),
389 !!(pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED),
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100390 (unsigned long long)qc->path->cwnd,
391 (unsigned long long)qc->path->prep_in_flight,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100392 (unsigned long long)qc->path->in_flight,
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100393 (unsigned long long)pktns->tx.in_flight, pktns->tx.pto_probe,
394 (unsigned long long)qc->tx.nb_pto_dgrams);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100395 }
396 }
397
Frédéric Lécaillef63921f2020-12-18 09:48:20 +0100398 if (mask & QUIC_EV_CONN_ENCPKT) {
399 const struct enc_debug_info *edi = a2;
400
401 if (edi)
402 chunk_appendf(&trace_buf,
403 " payload=@%p payload_len=%llu"
404 " aad=@%p aad_len=%llu pn=%llu",
405 edi->payload, (unsigned long long)edi->payload_len,
406 edi->aad, (unsigned long long)edi->aad_len,
407 (unsigned long long)edi->pn);
408 }
409
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100410 if (mask & QUIC_EV_CONN_RMHP) {
411 const struct quic_rx_packet *pkt = a2;
412
413 if (pkt) {
414 const int *ret = a3;
415
416 chunk_appendf(&trace_buf, " pkt@%p", pkt);
417 if (ret && *ret)
418 chunk_appendf(&trace_buf, " pnl=%u pn=%llu",
419 pkt->pnl, (unsigned long long)pkt->pn);
420 }
421 }
422
423 if (mask & QUIC_EV_CONN_PRSAFRM) {
Frédéric Lécaille0ad04582021-07-27 14:51:54 +0200424 const struct quic_frame *frm = a2;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100425 const unsigned long *val1 = a3;
426 const unsigned long *val2 = a4;
427
428 if (frm)
429 chunk_tx_frm_appendf(&trace_buf, frm);
430 if (val1)
431 chunk_appendf(&trace_buf, " %lu", *val1);
432 if (val2)
433 chunk_appendf(&trace_buf, "..%lu", *val2);
434 }
435
436 if (mask & QUIC_EV_CONN_RTTUPDT) {
437 const unsigned int *rtt_sample = a2;
438 const unsigned int *ack_delay = a3;
439 const struct quic_loss *ql = a4;
440
441 if (rtt_sample)
442 chunk_appendf(&trace_buf, " rtt_sample=%ums", *rtt_sample);
443 if (ack_delay)
444 chunk_appendf(&trace_buf, " ack_delay=%ums", *ack_delay);
445 if (ql)
446 chunk_appendf(&trace_buf,
447 " srtt=%ums rttvar=%ums min_rtt=%ums",
448 ql->srtt >> 3, ql->rtt_var >> 2, ql->rtt_min);
449 }
450 if (mask & QUIC_EV_CONN_CC) {
451 const struct quic_cc_event *ev = a2;
452 const struct quic_cc *cc = a3;
453
454 if (a2)
455 quic_cc_event_trace(&trace_buf, ev);
456 if (a3)
457 quic_cc_state_trace(&trace_buf, cc);
458 }
459
460 if (mask & QUIC_EV_CONN_PKTLOSS) {
461 const struct quic_pktns *pktns = a2;
462 const struct list *lost_pkts = a3;
463 struct quic_conn *qc = conn->qc;
464
465 if (pktns) {
466 chunk_appendf(&trace_buf, " pktns=%s",
467 pktns == &qc->pktns[QUIC_TLS_PKTNS_INITIAL] ? "I" :
468 pktns == &qc->pktns[QUIC_TLS_PKTNS_01RTT] ? "01RTT": "H");
469 if (pktns->tx.loss_time)
470 chunk_appendf(&trace_buf, " loss_time=%dms",
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +0100471 TICKS_TO_MS(tick_remain(now_ms, pktns->tx.loss_time)));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100472 }
473 if (lost_pkts && !LIST_ISEMPTY(lost_pkts)) {
474 struct quic_tx_packet *pkt;
475
476 chunk_appendf(&trace_buf, " lost_pkts:");
477 list_for_each_entry(pkt, lost_pkts, list)
478 chunk_appendf(&trace_buf, " %lu", (unsigned long)pkt->pn_node.key);
479 }
480 }
481
482 if (mask & (QUIC_EV_CONN_STIMER|QUIC_EV_CONN_PTIMER|QUIC_EV_CONN_SPTO)) {
483 struct quic_conn *qc = conn->qc;
484 const struct quic_pktns *pktns = a2;
485 const int *duration = a3;
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +0100486 const uint64_t *ifae_pkts = a4;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100487
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +0100488 if (ifae_pkts)
489 chunk_appendf(&trace_buf, " ifae_pkts=%llu",
490 (unsigned long long)*ifae_pkts);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100491 if (pktns) {
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100492 chunk_appendf(&trace_buf, " pktns=%s pp=%d",
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100493 pktns == &qc->pktns[QUIC_TLS_PKTNS_INITIAL] ? "I" :
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100494 pktns == &qc->pktns[QUIC_TLS_PKTNS_01RTT] ? "01RTT": "H",
495 pktns->tx.pto_probe);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100496 if (mask & QUIC_EV_CONN_STIMER) {
497 if (pktns->tx.loss_time)
498 chunk_appendf(&trace_buf, " loss_time=%dms",
499 TICKS_TO_MS(pktns->tx.loss_time - now_ms));
500 }
501 if (mask & QUIC_EV_CONN_SPTO) {
502 if (pktns->tx.time_of_last_eliciting)
503 chunk_appendf(&trace_buf, " tole=%dms",
504 TICKS_TO_MS(pktns->tx.time_of_last_eliciting - now_ms));
505 if (duration)
Frédéric Lécaillec5e72b92020-12-02 16:11:40 +0100506 chunk_appendf(&trace_buf, " dur=%dms", TICKS_TO_MS(*duration));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100507 }
508 }
509
510 if (!(mask & QUIC_EV_CONN_SPTO) && qc->timer_task) {
511 chunk_appendf(&trace_buf,
512 " expire=%dms", TICKS_TO_MS(qc->timer - now_ms));
513 }
514 }
515
516 if (mask & QUIC_EV_CONN_SPPKTS) {
517 const struct quic_tx_packet *pkt = a2;
518
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100519 chunk_appendf(&trace_buf, " cwnd=%llu ppif=%llu pif=%llu",
520 (unsigned long long)qc->path->cwnd,
521 (unsigned long long)qc->path->prep_in_flight,
522 (unsigned long long)qc->path->in_flight);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100523 if (pkt) {
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100524 chunk_appendf(&trace_buf, " pn=%lu(%s) iflen=%llu cdlen=%llu",
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100525 (unsigned long)pkt->pn_node.key,
526 pkt->pktns == &qc->pktns[QUIC_TLS_PKTNS_INITIAL] ? "I" :
527 pkt->pktns == &qc->pktns[QUIC_TLS_PKTNS_01RTT] ? "01RTT": "H",
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100528 (unsigned long long)pkt->in_flight_len,
529 (unsigned long long)pkt->cdata_len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100530 }
531 }
Frédéric Lécaille47c433f2020-12-10 17:03:11 +0100532
533 if (mask & QUIC_EV_CONN_SSLALERT) {
534 const uint8_t *alert = a2;
535 const enum ssl_encryption_level_t *level = a3;
536
537 if (alert)
538 chunk_appendf(&trace_buf, " alert=0x%02x", *alert);
539 if (level)
540 chunk_appendf(&trace_buf, " el=%c",
541 quic_enc_level_char(ssl_to_quic_enc_level(*level)));
542 }
Frédéric Lécailleea604992020-12-24 13:01:37 +0100543
544 if (mask & QUIC_EV_CONN_BCFRMS) {
545 const size_t *sz1 = a2;
546 const size_t *sz2 = a3;
547 const size_t *sz3 = a4;
548
549 if (sz1)
550 chunk_appendf(&trace_buf, " %llu", (unsigned long long)*sz1);
551 if (sz2)
552 chunk_appendf(&trace_buf, " %llu", (unsigned long long)*sz2);
553 if (sz3)
554 chunk_appendf(&trace_buf, " %llu", (unsigned long long)*sz3);
555 }
Frédéric Lécaille242fb1b2020-12-31 12:45:38 +0100556
557 if (mask & QUIC_EV_CONN_PSTRM) {
558 const struct quic_frame *frm = a2;
Frédéric Lécaille577fe482021-01-11 15:10:06 +0100559
560 if (a2) {
561 const struct quic_stream *s = &frm->stream;
Frédéric Lécaille242fb1b2020-12-31 12:45:38 +0100562
Frédéric Lécaille577fe482021-01-11 15:10:06 +0100563 chunk_appendf(&trace_buf, " uni=%d fin=%d id=%llu off=%llu len=%llu",
564 !!(s->id & QUIC_STREAM_FRAME_ID_DIR_BIT),
565 !!(frm->type & QUIC_STREAM_FRAME_TYPE_FIN_BIT),
566 (unsigned long long)s->id,
567 (unsigned long long)s->offset,
568 (unsigned long long)s->len);
569 }
Frédéric Lécaille242fb1b2020-12-31 12:45:38 +0100570 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100571 }
572 if (mask & QUIC_EV_CONN_LPKT) {
573 const struct quic_rx_packet *pkt = a2;
574
575 if (conn)
Frédéric Lécaille2e7ffc92021-06-10 08:18:45 +0200576 chunk_appendf(&trace_buf, " xprt_ctx@%p qc@%p", conn->xprt_ctx, conn->qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100577 if (pkt)
Frédéric Lécaille2e7ffc92021-06-10 08:18:45 +0200578 chunk_appendf(&trace_buf, " pkt@%p type=0x%02x %s pkt->qc@%p",
579 pkt, pkt->type, qc_pkt_long(pkt) ? "long" : "short", pkt->qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100580 }
581
582}
583
584/* Returns 1 if the peer has validated <qc> QUIC connection address, 0 if not. */
Frédéric Lécaille1eaec332021-06-04 14:59:59 +0200585static inline int quic_peer_validated_addr(struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100586{
587 struct quic_conn *qc;
588
589 qc = ctx->conn->qc;
590 if (objt_server(qc->conn->target))
591 return 1;
592
593 if ((qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE].pktns->flags & QUIC_FL_PKTNS_ACK_RECEIVED) ||
594 (qc->els[QUIC_TLS_ENC_LEVEL_APP].pktns->flags & QUIC_FL_PKTNS_ACK_RECEIVED) ||
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +0200595 (qc->state & QUIC_HS_ST_COMPLETE))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100596 return 1;
597
598 return 0;
599}
600
601/* Set the timer attached to the QUIC connection with <ctx> as I/O handler and used for
602 * both loss detection and PTO and schedule the task assiated to this timer if needed.
603 */
Frédéric Lécaille1eaec332021-06-04 14:59:59 +0200604static inline void qc_set_timer(struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100605{
606 struct quic_conn *qc;
607 struct quic_pktns *pktns;
608 unsigned int pto;
609
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +0100610 TRACE_ENTER(QUIC_EV_CONN_STIMER, ctx->conn,
611 NULL, NULL, &ctx->conn->qc->path->ifae_pkts);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100612 qc = ctx->conn->qc;
613 pktns = quic_loss_pktns(qc);
614 if (tick_isset(pktns->tx.loss_time)) {
615 qc->timer = pktns->tx.loss_time;
616 goto out;
617 }
618
619 /* XXX TODO: anti-amplification: the timer must be
620 * cancelled for a server which reached the anti-amplification limit.
621 */
622
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +0100623 if (!qc->path->ifae_pkts && quic_peer_validated_addr(ctx)) {
624 TRACE_PROTO("timer cancellation", QUIC_EV_CONN_STIMER, ctx->conn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100625 /* Timer cancellation. */
626 qc->timer = TICK_ETERNITY;
627 goto out;
628 }
629
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +0200630 pktns = quic_pto_pktns(qc, qc->state & QUIC_HS_ST_COMPLETE, &pto);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100631 if (tick_isset(pto))
632 qc->timer = pto;
633 out:
634 task_schedule(qc->timer_task, qc->timer);
635 TRACE_LEAVE(QUIC_EV_CONN_STIMER, ctx->conn, pktns);
636}
637
638#ifndef OPENSSL_IS_BORINGSSL
639int ha_quic_set_encryption_secrets(SSL *ssl, enum ssl_encryption_level_t level,
640 const uint8_t *read_secret,
641 const uint8_t *write_secret, size_t secret_len)
642{
643 struct connection *conn = SSL_get_ex_data(ssl, ssl_app_data_index);
644 struct quic_tls_ctx *tls_ctx =
645 &conn->qc->els[ssl_to_quic_enc_level(level)].tls_ctx;
646 const SSL_CIPHER *cipher = SSL_get_current_cipher(ssl);
647
648 TRACE_ENTER(QUIC_EV_CONN_RWSEC, conn);
649 tls_ctx->rx.aead = tls_ctx->tx.aead = tls_aead(cipher);
650 tls_ctx->rx.md = tls_ctx->tx.md = tls_md(cipher);
651 tls_ctx->rx.hp = tls_ctx->tx.hp = tls_hp(cipher);
652
653 if (!quic_tls_derive_keys(tls_ctx->rx.aead, tls_ctx->rx.hp, tls_ctx->rx.md,
654 tls_ctx->rx.key, sizeof tls_ctx->rx.key,
655 tls_ctx->rx.iv, sizeof tls_ctx->rx.iv,
656 tls_ctx->rx.hp_key, sizeof tls_ctx->rx.hp_key,
657 read_secret, secret_len)) {
658 TRACE_DEVEL("RX key derivation failed", QUIC_EV_CONN_RWSEC, conn);
659 return 0;
660 }
661
662 tls_ctx->rx.flags |= QUIC_FL_TLS_SECRETS_SET;
663 if (!quic_tls_derive_keys(tls_ctx->tx.aead, tls_ctx->tx.hp, tls_ctx->tx.md,
664 tls_ctx->tx.key, sizeof tls_ctx->tx.key,
665 tls_ctx->tx.iv, sizeof tls_ctx->tx.iv,
666 tls_ctx->tx.hp_key, sizeof tls_ctx->tx.hp_key,
667 write_secret, secret_len)) {
668 TRACE_DEVEL("TX key derivation failed", QUIC_EV_CONN_RWSEC, conn);
669 return 0;
670 }
671
672 tls_ctx->tx.flags |= QUIC_FL_TLS_SECRETS_SET;
673 if (objt_server(conn->target) && level == ssl_encryption_application) {
674 const unsigned char *buf;
675 size_t buflen;
676
677 SSL_get_peer_quic_transport_params(ssl, &buf, &buflen);
678 if (!buflen)
679 return 0;
680
681 if (!quic_transport_params_store(conn->qc, 1, buf, buf + buflen))
682 return 0;
683 }
684 TRACE_LEAVE(QUIC_EV_CONN_RWSEC, conn, &level);
685
686 return 1;
687}
688#else
689/* ->set_read_secret callback to derive the RX secrets at <level> encryption
690 * level.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +0500691 * Returns 1 if succeeded, 0 if not.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100692 */
693int ha_set_rsec(SSL *ssl, enum ssl_encryption_level_t level,
694 const SSL_CIPHER *cipher,
695 const uint8_t *secret, size_t secret_len)
696{
697 struct connection *conn = SSL_get_ex_data(ssl, ssl_app_data_index);
698 struct quic_tls_ctx *tls_ctx =
699 &conn->qc->els[ssl_to_quic_enc_level(level)].tls_ctx;
700
701 TRACE_ENTER(QUIC_EV_CONN_RSEC, conn);
702 tls_ctx->rx.aead = tls_aead(cipher);
703 tls_ctx->rx.md = tls_md(cipher);
704 tls_ctx->rx.hp = tls_hp(cipher);
705
706 if (!quic_tls_derive_keys(tls_ctx->rx.aead, tls_ctx->rx.hp, tls_ctx->rx.md,
707 tls_ctx->rx.key, sizeof tls_ctx->rx.key,
708 tls_ctx->rx.iv, sizeof tls_ctx->rx.iv,
709 tls_ctx->rx.hp_key, sizeof tls_ctx->rx.hp_key,
710 secret, secret_len)) {
711 TRACE_DEVEL("RX key derivation failed", QUIC_EV_CONN_RSEC, conn);
712 goto err;
713 }
714
715 if (objt_server(conn->target) && level == ssl_encryption_application) {
716 const unsigned char *buf;
717 size_t buflen;
718
719 SSL_get_peer_quic_transport_params(ssl, &buf, &buflen);
720 if (!buflen)
721 goto err;
722
723 if (!quic_transport_params_store(conn->qc, 1, buf, buf + buflen))
724 goto err;
725 }
726
727 tls_ctx->rx.flags |= QUIC_FL_TLS_SECRETS_SET;
728 TRACE_LEAVE(QUIC_EV_CONN_RSEC, conn, &level, secret, &secret_len);
729
730 return 1;
731
732 err:
Frédéric Lécaille6c1e36c2020-12-23 17:17:37 +0100733 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_RSEC, conn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100734 return 0;
735}
736
737/* ->set_write_secret callback to derive the TX secrets at <level>
738 * encryption level.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +0500739 * Returns 1 if succeeded, 0 if not.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100740 */
741int ha_set_wsec(SSL *ssl, enum ssl_encryption_level_t level,
742 const SSL_CIPHER *cipher,
743 const uint8_t *secret, size_t secret_len)
744{
745 struct connection *conn = SSL_get_ex_data(ssl, ssl_app_data_index);
746 struct quic_tls_ctx *tls_ctx =
747 &conn->qc->els[ssl_to_quic_enc_level(level)].tls_ctx;
748
749 TRACE_ENTER(QUIC_EV_CONN_WSEC, conn);
750 tls_ctx->tx.aead = tls_aead(cipher);
751 tls_ctx->tx.md = tls_md(cipher);
752 tls_ctx->tx.hp = tls_hp(cipher);
753
754 if (!quic_tls_derive_keys(tls_ctx->tx.aead, tls_ctx->tx.hp, tls_ctx->tx.md,
755 tls_ctx->tx.key, sizeof tls_ctx->tx.key,
756 tls_ctx->tx.iv, sizeof tls_ctx->tx.iv,
757 tls_ctx->tx.hp_key, sizeof tls_ctx->tx.hp_key,
758 secret, secret_len)) {
759 TRACE_DEVEL("TX key derivation failed", QUIC_EV_CONN_WSEC, conn);
760 goto err;
761 }
762
763 tls_ctx->tx.flags |= QUIC_FL_TLS_SECRETS_SET;
764 TRACE_LEAVE(QUIC_EV_CONN_WSEC, conn, &level, secret, &secret_len);
765
766 return 1;
767
768 err:
Frédéric Lécaille6c1e36c2020-12-23 17:17:37 +0100769 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_WSEC, conn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100770 return 0;
771}
772#endif
773
774/* This function copies the CRYPTO data provided by the TLS stack found at <data>
775 * with <len> as size in CRYPTO buffers dedicated to store the information about
776 * outgoing CRYPTO frames so that to be able to replay the CRYPTO data streams.
777 * It fails only if it could not managed to allocate enough CRYPTO buffers to
778 * store all the data.
779 * Note that CRYPTO data may exist at any encryption level except at 0-RTT.
780 */
781static int quic_crypto_data_cpy(struct quic_enc_level *qel,
782 const unsigned char *data, size_t len)
783{
784 struct quic_crypto_buf **qcb;
785 /* The remaining byte to store in CRYPTO buffers. */
786 size_t cf_offset, cf_len, *nb_buf;
787 unsigned char *pos;
788
789 nb_buf = &qel->tx.crypto.nb_buf;
790 qcb = &qel->tx.crypto.bufs[*nb_buf - 1];
791 cf_offset = (*nb_buf - 1) * QUIC_CRYPTO_BUF_SZ + (*qcb)->sz;
792 cf_len = len;
793
794 while (len) {
795 size_t to_copy, room;
796
797 pos = (*qcb)->data + (*qcb)->sz;
798 room = QUIC_CRYPTO_BUF_SZ - (*qcb)->sz;
799 to_copy = len > room ? room : len;
800 if (to_copy) {
801 memcpy(pos, data, to_copy);
802 /* Increment the total size of this CRYPTO buffers by <to_copy>. */
803 qel->tx.crypto.sz += to_copy;
804 (*qcb)->sz += to_copy;
805 pos += to_copy;
806 len -= to_copy;
807 data += to_copy;
808 }
809 else {
810 struct quic_crypto_buf **tmp;
811
812 tmp = realloc(qel->tx.crypto.bufs,
813 (*nb_buf + 1) * sizeof *qel->tx.crypto.bufs);
814 if (tmp) {
815 qel->tx.crypto.bufs = tmp;
816 qcb = &qel->tx.crypto.bufs[*nb_buf];
817 *qcb = pool_alloc(pool_head_quic_crypto_buf);
818 if (!*qcb)
819 return 0;
820
821 (*qcb)->sz = 0;
822 ++*nb_buf;
823 }
824 else {
825 break;
826 }
827 }
828 }
829
830 /* Allocate a TX CRYPTO frame only if all the CRYPTO data
831 * have been buffered.
832 */
833 if (!len) {
Frédéric Lécaille0ad04582021-07-27 14:51:54 +0200834 struct quic_frame *frm;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100835
Frédéric Lécaille0ad04582021-07-27 14:51:54 +0200836 frm = pool_alloc(pool_head_quic_frame);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100837 if (!frm)
838 return 0;
839
840 frm->type = QUIC_FT_CRYPTO;
841 frm->crypto.offset = cf_offset;
842 frm->crypto.len = cf_len;
Frédéric Lécaillec88df072021-07-27 11:43:11 +0200843 MT_LIST_APPEND(&qel->pktns->tx.frms, &frm->mt_list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100844 }
845
846 return len == 0;
847}
848
849
850/* ->add_handshake_data QUIC TLS callback used by the QUIC TLS stack when it
851 * wants to provide the QUIC layer with CRYPTO data.
852 * Returns 1 if succeeded, 0 if not.
853 */
854int ha_quic_add_handshake_data(SSL *ssl, enum ssl_encryption_level_t level,
855 const uint8_t *data, size_t len)
856{
857 struct connection *conn;
858 enum quic_tls_enc_level tel;
859 struct quic_enc_level *qel;
860
861 conn = SSL_get_ex_data(ssl, ssl_app_data_index);
862 TRACE_ENTER(QUIC_EV_CONN_ADDDATA, conn);
863 tel = ssl_to_quic_enc_level(level);
864 qel = &conn->qc->els[tel];
865
866 if (tel == -1) {
867 TRACE_PROTO("Wrong encryption level", QUIC_EV_CONN_ADDDATA, conn);
868 goto err;
869 }
870
871 if (!quic_crypto_data_cpy(qel, data, len)) {
872 TRACE_PROTO("Could not bufferize", QUIC_EV_CONN_ADDDATA, conn);
873 goto err;
874 }
875
876 TRACE_PROTO("CRYPTO data buffered", QUIC_EV_CONN_ADDDATA,
877 conn, &level, &len);
878
879 TRACE_LEAVE(QUIC_EV_CONN_ADDDATA, conn);
880 return 1;
881
882 err:
883 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_ADDDATA, conn);
884 return 0;
885}
886
887int ha_quic_flush_flight(SSL *ssl)
888{
889 struct connection *conn = SSL_get_ex_data(ssl, ssl_app_data_index);
890
891 TRACE_ENTER(QUIC_EV_CONN_FFLIGHT, conn);
892 TRACE_LEAVE(QUIC_EV_CONN_FFLIGHT, conn);
893
894 return 1;
895}
896
897int ha_quic_send_alert(SSL *ssl, enum ssl_encryption_level_t level, uint8_t alert)
898{
899 struct connection *conn = SSL_get_ex_data(ssl, ssl_app_data_index);
900
Frédéric Lécaille47c433f2020-12-10 17:03:11 +0100901 TRACE_DEVEL("SSL alert", QUIC_EV_CONN_SSLALERT, conn, &alert, &level);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100902 return 1;
903}
904
905/* QUIC TLS methods */
906static SSL_QUIC_METHOD ha_quic_method = {
907#ifdef OPENSSL_IS_BORINGSSL
908 .set_read_secret = ha_set_rsec,
909 .set_write_secret = ha_set_wsec,
910#else
911 .set_encryption_secrets = ha_quic_set_encryption_secrets,
912#endif
913 .add_handshake_data = ha_quic_add_handshake_data,
914 .flush_flight = ha_quic_flush_flight,
915 .send_alert = ha_quic_send_alert,
916};
917
918/* Initialize the TLS context of a listener with <bind_conf> as configuration.
919 * Returns an error count.
920 */
921int ssl_quic_initial_ctx(struct bind_conf *bind_conf)
922{
923 struct proxy *curproxy = bind_conf->frontend;
924 struct ssl_bind_conf __maybe_unused *ssl_conf_cur;
925 int cfgerr = 0;
926
927#if 0
928 /* XXX Did not manage to use this. */
929 const char *ciphers =
930 "TLS_AES_128_GCM_SHA256:"
931 "TLS_AES_256_GCM_SHA384:"
932 "TLS_CHACHA20_POLY1305_SHA256:"
933 "TLS_AES_128_CCM_SHA256";
934#endif
Frédéric Lécaille4b1fddc2021-07-01 17:09:05 +0200935 const char *groups = "X25519:P-256:P-384:P-521";
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100936 long options =
937 (SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) |
938 SSL_OP_SINGLE_ECDH_USE |
939 SSL_OP_CIPHER_SERVER_PREFERENCE;
940 SSL_CTX *ctx;
941
942 ctx = SSL_CTX_new(TLS_server_method());
943 bind_conf->initial_ctx = ctx;
944
945 SSL_CTX_set_options(ctx, options);
946#if 0
947 if (SSL_CTX_set_cipher_list(ctx, ciphers) != 1) {
948 ha_alert("Proxy '%s': unable to set TLS 1.3 cipher list to '%s' "
949 "for bind '%s' at [%s:%d].\n",
950 curproxy->id, ciphers,
951 bind_conf->arg, bind_conf->file, bind_conf->line);
952 cfgerr++;
953 }
954#endif
955
956 if (SSL_CTX_set1_curves_list(ctx, groups) != 1) {
957 ha_alert("Proxy '%s': unable to set TLS 1.3 curves list to '%s' "
958 "for bind '%s' at [%s:%d].\n",
959 curproxy->id, groups,
960 bind_conf->arg, bind_conf->file, bind_conf->line);
961 cfgerr++;
962 }
963
964 SSL_CTX_set_mode(ctx, SSL_MODE_RELEASE_BUFFERS);
965 SSL_CTX_set_min_proto_version(ctx, TLS1_3_VERSION);
966 SSL_CTX_set_max_proto_version(ctx, TLS1_3_VERSION);
967 SSL_CTX_set_default_verify_paths(ctx);
968
969#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
970#ifdef OPENSSL_IS_BORINGSSL
971 SSL_CTX_set_select_certificate_cb(ctx, ssl_sock_switchctx_cbk);
972 SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_err_cbk);
973#elif (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
974 if (bind_conf->ssl_conf.early_data) {
975 SSL_CTX_set_options(ctx, SSL_OP_NO_ANTI_REPLAY);
976 SSL_CTX_set_max_early_data(ctx, global.tune.bufsize - global.tune.maxrewrite);
977 }
978 SSL_CTX_set_client_hello_cb(ctx, ssl_sock_switchctx_cbk, NULL);
979 SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_err_cbk);
980#else
981 SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_cbk);
982#endif
983 SSL_CTX_set_tlsext_servername_arg(ctx, bind_conf);
984#endif
985 SSL_CTX_set_quic_method(ctx, &ha_quic_method);
986
987 return cfgerr;
988}
989
990/* Decode an expected packet number from <truncated_on> its truncated value,
991 * depending on <largest_pn> the largest received packet number, and <pn_nbits>
992 * the number of bits used to encode this packet number (its length in bytes * 8).
993 * See https://quicwg.org/base-drafts/draft-ietf-quic-transport.html#packet-encoding
994 */
995static uint64_t decode_packet_number(uint64_t largest_pn,
996 uint32_t truncated_pn, unsigned int pn_nbits)
997{
998 uint64_t expected_pn = largest_pn + 1;
999 uint64_t pn_win = (uint64_t)1 << pn_nbits;
1000 uint64_t pn_hwin = pn_win / 2;
1001 uint64_t pn_mask = pn_win - 1;
1002 uint64_t candidate_pn;
1003
1004
1005 candidate_pn = (expected_pn & ~pn_mask) | truncated_pn;
1006 /* Note that <pn_win> > <pn_hwin>. */
1007 if (candidate_pn < QUIC_MAX_PACKET_NUM - pn_win &&
1008 candidate_pn + pn_hwin <= expected_pn)
1009 return candidate_pn + pn_win;
1010
1011 if (candidate_pn > expected_pn + pn_hwin && candidate_pn >= pn_win)
1012 return candidate_pn - pn_win;
1013
1014 return candidate_pn;
1015}
1016
1017/* Remove the header protection of <pkt> QUIC packet using <tls_ctx> as QUIC TLS
1018 * cryptographic context.
1019 * <largest_pn> is the largest received packet number and <pn> the address of
1020 * the packet number field for this packet with <byte0> address of its first byte.
1021 * <end> points to one byte past the end of this packet.
1022 * Returns 1 if succeeded, 0 if not.
1023 */
1024static int qc_do_rm_hp(struct quic_rx_packet *pkt, struct quic_tls_ctx *tls_ctx,
1025 int64_t largest_pn, unsigned char *pn,
1026 unsigned char *byte0, const unsigned char *end,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02001027 struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001028{
1029 int ret, outlen, i, pnlen;
1030 uint64_t packet_number;
1031 uint32_t truncated_pn = 0;
1032 unsigned char mask[5] = {0};
1033 unsigned char *sample;
1034 EVP_CIPHER_CTX *cctx;
1035 unsigned char *hp_key;
1036
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001037 /* Check there is enough data in this packet. */
1038 if (end - pn < QUIC_PACKET_PN_MAXLEN + sizeof mask) {
1039 TRACE_DEVEL("too short packet", QUIC_EV_CONN_RMHP, ctx->conn, pkt);
1040 return 0;
1041 }
1042
1043 cctx = EVP_CIPHER_CTX_new();
1044 if (!cctx) {
1045 TRACE_DEVEL("memory allocation failed", QUIC_EV_CONN_RMHP, ctx->conn, pkt);
1046 return 0;
1047 }
1048
1049 ret = 0;
1050 sample = pn + QUIC_PACKET_PN_MAXLEN;
1051
1052 hp_key = tls_ctx->rx.hp_key;
1053 if (!EVP_DecryptInit_ex(cctx, tls_ctx->rx.hp, NULL, hp_key, sample) ||
1054 !EVP_DecryptUpdate(cctx, mask, &outlen, mask, sizeof mask) ||
1055 !EVP_DecryptFinal_ex(cctx, mask, &outlen)) {
1056 TRACE_DEVEL("decryption failed", QUIC_EV_CONN_RMHP, ctx->conn, pkt);
1057 goto out;
1058 }
1059
1060 *byte0 ^= mask[0] & (*byte0 & QUIC_PACKET_LONG_HEADER_BIT ? 0xf : 0x1f);
1061 pnlen = (*byte0 & QUIC_PACKET_PNL_BITMASK) + 1;
1062 for (i = 0; i < pnlen; i++) {
1063 pn[i] ^= mask[i + 1];
1064 truncated_pn = (truncated_pn << 8) | pn[i];
1065 }
1066
1067 packet_number = decode_packet_number(largest_pn, truncated_pn, pnlen * 8);
1068 /* Store remaining information for this unprotected header */
1069 pkt->pn = packet_number;
1070 pkt->pnl = pnlen;
1071
1072 ret = 1;
1073
1074 out:
1075 EVP_CIPHER_CTX_free(cctx);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001076
1077 return ret;
1078}
1079
1080/* Encrypt the payload of a QUIC packet with <pn> as number found at <payload>
1081 * address, with <payload_len> as payload length, <aad> as address of
1082 * the ADD and <aad_len> as AAD length depending on the <tls_ctx> QUIC TLS
1083 * context.
1084 * Returns 1 if succeeded, 0 if not.
1085 */
1086static int quic_packet_encrypt(unsigned char *payload, size_t payload_len,
1087 unsigned char *aad, size_t aad_len, uint64_t pn,
1088 struct quic_tls_ctx *tls_ctx, struct connection *conn)
1089{
1090 unsigned char iv[12];
1091 unsigned char *tx_iv = tls_ctx->tx.iv;
1092 size_t tx_iv_sz = sizeof tls_ctx->tx.iv;
Frédéric Lécaillef63921f2020-12-18 09:48:20 +01001093 struct enc_debug_info edi;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001094
1095 if (!quic_aead_iv_build(iv, sizeof iv, tx_iv, tx_iv_sz, pn)) {
1096 TRACE_DEVEL("AEAD IV building for encryption failed", QUIC_EV_CONN_HPKT, conn);
Frédéric Lécaillef63921f2020-12-18 09:48:20 +01001097 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001098 }
1099
1100 if (!quic_tls_encrypt(payload, payload_len, aad, aad_len,
1101 tls_ctx->tx.aead, tls_ctx->tx.key, iv)) {
1102 TRACE_DEVEL("QUIC packet encryption failed", QUIC_EV_CONN_HPKT, conn);
Frédéric Lécaillef63921f2020-12-18 09:48:20 +01001103 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001104 }
1105
1106 return 1;
Frédéric Lécaillef63921f2020-12-18 09:48:20 +01001107
1108 err:
1109 enc_debug_info_init(&edi, payload, payload_len, aad, aad_len, pn);
1110 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_ENCPKT, conn, &edi);
1111 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001112}
1113
1114/* Decrypt <pkt> QUIC packet with <tls_ctx> as QUIC TLS cryptographic context.
1115 * Returns 1 if succeeded, 0 if not.
1116 */
1117static int qc_pkt_decrypt(struct quic_rx_packet *pkt, struct quic_tls_ctx *tls_ctx)
1118{
1119 int ret;
1120 unsigned char iv[12];
1121 unsigned char *rx_iv = tls_ctx->rx.iv;
1122 size_t rx_iv_sz = sizeof tls_ctx->rx.iv;
1123
1124 if (!quic_aead_iv_build(iv, sizeof iv, rx_iv, rx_iv_sz, pkt->pn))
1125 return 0;
1126
1127 ret = quic_tls_decrypt(pkt->data + pkt->aad_len, pkt->len - pkt->aad_len,
1128 pkt->data, pkt->aad_len,
1129 tls_ctx->rx.aead, tls_ctx->rx.key, iv);
1130 if (!ret)
1131 return 0;
1132
1133 /* Update the packet length (required to parse the frames). */
1134 pkt->len = pkt->aad_len + ret;
1135
1136 return 1;
1137}
1138
1139/* Treat <frm> frame whose packet it is attached to has just been acknowledged. */
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02001140static inline void qc_treat_acked_tx_frm(struct quic_frame *frm,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02001141 struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001142{
1143 TRACE_PROTO("Removing frame", QUIC_EV_CONN_PRSAFRM, ctx->conn, frm);
Willy Tarreau2b718102021-04-21 07:32:39 +02001144 LIST_DELETE(&frm->list);
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02001145 pool_free(pool_head_quic_frame, frm);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001146}
1147
1148/* Remove <largest> down to <smallest> node entries from <pkts> tree of TX packet,
1149 * deallocating them, and their TX frames.
1150 * Returns the last node reached to be used for the next range.
1151 * May be NULL if <largest> node could not be found.
1152 */
1153static inline struct eb64_node *qc_ackrng_pkts(struct eb_root *pkts, unsigned int *pkt_flags,
1154 struct list *newly_acked_pkts,
1155 struct eb64_node *largest_node,
1156 uint64_t largest, uint64_t smallest,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02001157 struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001158{
1159 struct eb64_node *node;
1160 struct quic_tx_packet *pkt;
1161
1162 if (largest_node)
1163 node = largest_node;
1164 else {
1165 node = eb64_lookup(pkts, largest);
1166 while (!node && largest > smallest) {
1167 node = eb64_lookup(pkts, --largest);
1168 }
1169 }
1170
1171 while (node && node->key >= smallest) {
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02001172 struct quic_frame *frm, *frmbak;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001173
1174 pkt = eb64_entry(&node->node, struct quic_tx_packet, pn_node);
1175 *pkt_flags |= pkt->flags;
Willy Tarreau2b718102021-04-21 07:32:39 +02001176 LIST_INSERT(newly_acked_pkts, &pkt->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001177 TRACE_PROTO("Removing packet #", QUIC_EV_CONN_PRSAFRM, ctx->conn,, &pkt->pn_node.key);
1178 list_for_each_entry_safe(frm, frmbak, &pkt->frms, list)
1179 qc_treat_acked_tx_frm(frm, ctx);
1180 node = eb64_prev(node);
1181 eb64_delete(&pkt->pn_node);
1182 }
1183
1184 return node;
1185}
1186
1187/* Treat <frm> frame whose packet it is attached to has just been detected as non
1188 * acknowledged.
1189 */
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02001190static inline void qc_treat_nacked_tx_frm(struct quic_frame *frm,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001191 struct quic_pktns *pktns,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02001192 struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001193{
1194 TRACE_PROTO("to resend frame", QUIC_EV_CONN_PRSAFRM, ctx->conn, frm);
Willy Tarreau2b718102021-04-21 07:32:39 +02001195 LIST_DELETE(&frm->list);
Frédéric Lécaillec88df072021-07-27 11:43:11 +02001196 MT_LIST_INSERT(&pktns->tx.frms, &frm->mt_list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001197}
1198
1199
1200/* Free the TX packets of <pkts> list */
1201static inline void free_quic_tx_pkts(struct list *pkts)
1202{
1203 struct quic_tx_packet *pkt, *tmp;
1204
1205 list_for_each_entry_safe(pkt, tmp, pkts, list) {
Willy Tarreau2b718102021-04-21 07:32:39 +02001206 LIST_DELETE(&pkt->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001207 eb64_delete(&pkt->pn_node);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001208 quic_tx_packet_refdec(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001209 }
1210}
1211
1212/* Send a packet loss event nofification to the congestion controller
1213 * attached to <qc> connection with <lost_bytes> the number of lost bytes,
1214 * <oldest_lost>, <newest_lost> the oldest lost packet and newest lost packet
1215 * at <now_us> current time.
1216 * Always succeeds.
1217 */
1218static inline void qc_cc_loss_event(struct quic_conn *qc,
1219 unsigned int lost_bytes,
1220 unsigned int newest_time_sent,
1221 unsigned int period,
1222 unsigned int now_us)
1223{
1224 struct quic_cc_event ev = {
1225 .type = QUIC_CC_EVT_LOSS,
1226 .loss.now_ms = now_ms,
1227 .loss.max_ack_delay = qc->max_ack_delay,
1228 .loss.lost_bytes = lost_bytes,
1229 .loss.newest_time_sent = newest_time_sent,
1230 .loss.period = period,
1231 };
1232
1233 quic_cc_event(&qc->path->cc, &ev);
1234}
1235
1236/* Send a packet ack event nofication for each newly acked packet of
1237 * <newly_acked_pkts> list and free them.
1238 * Always succeeds.
1239 */
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02001240static inline void qc_treat_newly_acked_pkts(struct ssl_sock_ctx *ctx,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001241 struct list *newly_acked_pkts)
1242{
1243 struct quic_conn *qc = ctx->conn->qc;
1244 struct quic_tx_packet *pkt, *tmp;
1245 struct quic_cc_event ev = { .type = QUIC_CC_EVT_ACK, };
1246
1247 list_for_each_entry_safe(pkt, tmp, newly_acked_pkts, list) {
1248 pkt->pktns->tx.in_flight -= pkt->in_flight_len;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01001249 qc->path->prep_in_flight -= pkt->in_flight_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001250 if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING)
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +01001251 qc->path->ifae_pkts--;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001252 ev.ack.acked = pkt->in_flight_len;
1253 ev.ack.time_sent = pkt->time_sent;
1254 quic_cc_event(&qc->path->cc, &ev);
Willy Tarreau2b718102021-04-21 07:32:39 +02001255 LIST_DELETE(&pkt->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001256 eb64_delete(&pkt->pn_node);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001257 quic_tx_packet_refdec(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001258 }
1259
1260}
1261
1262/* Handle <pkts> list of lost packets detected at <now_us> handling
1263 * their TX frames.
1264 * Send a packet loss event to the congestion controller if
1265 * in flight packet have been lost.
1266 * Also frees the packet in <pkts> list.
1267 * Never fails.
1268 */
1269static inline void qc_release_lost_pkts(struct quic_pktns *pktns,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02001270 struct ssl_sock_ctx *ctx,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001271 struct list *pkts,
1272 uint64_t now_us)
1273{
1274 struct quic_conn *qc = ctx->conn->qc;
1275 struct quic_tx_packet *pkt, *tmp, *oldest_lost, *newest_lost;
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02001276 struct quic_frame *frm, *frmbak;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001277 uint64_t lost_bytes;
1278
1279 lost_bytes = 0;
1280 oldest_lost = newest_lost = NULL;
1281 list_for_each_entry_safe(pkt, tmp, pkts, list) {
1282 lost_bytes += pkt->in_flight_len;
1283 pkt->pktns->tx.in_flight -= pkt->in_flight_len;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01001284 qc->path->prep_in_flight -= pkt->in_flight_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001285 if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING)
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +01001286 qc->path->ifae_pkts--;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001287 /* Treat the frames of this lost packet. */
1288 list_for_each_entry_safe(frm, frmbak, &pkt->frms, list)
1289 qc_treat_nacked_tx_frm(frm, pktns, ctx);
Willy Tarreau2b718102021-04-21 07:32:39 +02001290 LIST_DELETE(&pkt->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001291 if (!oldest_lost) {
1292 oldest_lost = newest_lost = pkt;
1293 }
1294 else {
1295 if (newest_lost != oldest_lost)
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001296 quic_tx_packet_refdec(newest_lost);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001297 newest_lost = pkt;
1298 }
1299 }
1300
1301 if (lost_bytes) {
1302 /* Sent a packet loss event to the congestion controller. */
1303 qc_cc_loss_event(ctx->conn->qc, lost_bytes, newest_lost->time_sent,
1304 newest_lost->time_sent - oldest_lost->time_sent, now_us);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001305 quic_tx_packet_refdec(oldest_lost);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001306 if (newest_lost != oldest_lost)
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001307 quic_tx_packet_refdec(newest_lost);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001308 }
1309}
1310
1311/* Look for packet loss from sent packets for <qel> encryption level of a
1312 * connection with <ctx> as I/O handler context. If remove is true, remove them from
1313 * their tree if deemed as lost or set the <loss_time> value the packet number
1314 * space if any not deemed lost.
1315 * Should be called after having received an ACK frame with newly acknowledged
1316 * packets or when the the loss detection timer has expired.
1317 * Always succeeds.
1318 */
1319static void qc_packet_loss_lookup(struct quic_pktns *pktns,
1320 struct quic_conn *qc,
1321 struct list *lost_pkts)
1322{
1323 struct eb_root *pkts;
1324 struct eb64_node *node;
1325 struct quic_loss *ql;
1326 unsigned int loss_delay;
1327
1328 TRACE_ENTER(QUIC_EV_CONN_PKTLOSS, qc->conn, pktns);
1329 pkts = &pktns->tx.pkts;
1330 pktns->tx.loss_time = TICK_ETERNITY;
1331 if (eb_is_empty(pkts))
1332 goto out;
1333
1334 ql = &qc->path->loss;
1335 loss_delay = QUIC_MAX(ql->latest_rtt, ql->srtt >> 3);
1336 loss_delay += loss_delay >> 3;
1337 loss_delay = QUIC_MAX(loss_delay, MS_TO_TICKS(QUIC_TIMER_GRANULARITY));
1338
1339 node = eb64_first(pkts);
1340 while (node) {
1341 struct quic_tx_packet *pkt;
1342 int64_t largest_acked_pn;
1343 unsigned int loss_time_limit, time_sent;
1344
1345 pkt = eb64_entry(&node->node, struct quic_tx_packet, pn_node);
1346 largest_acked_pn = pktns->tx.largest_acked_pn;
1347 node = eb64_next(node);
1348 if ((int64_t)pkt->pn_node.key > largest_acked_pn)
1349 break;
1350
1351 time_sent = pkt->time_sent;
1352 loss_time_limit = tick_add(time_sent, loss_delay);
1353 if (tick_is_le(time_sent, now_ms) ||
1354 (int64_t)largest_acked_pn >= pkt->pn_node.key + QUIC_LOSS_PACKET_THRESHOLD) {
1355 eb64_delete(&pkt->pn_node);
Willy Tarreau2b718102021-04-21 07:32:39 +02001356 LIST_APPEND(lost_pkts, &pkt->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001357 }
1358 else {
1359 pktns->tx.loss_time = tick_first(pktns->tx.loss_time, loss_time_limit);
1360 }
1361 }
1362
1363 out:
1364 TRACE_LEAVE(QUIC_EV_CONN_PKTLOSS, qc->conn, pktns, lost_pkts);
1365}
1366
1367/* Parse ACK frame into <frm> from a buffer at <buf> address with <end> being at
1368 * one byte past the end of this buffer. Also update <rtt_sample> if needed, i.e.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05001369 * if the largest acked packet was newly acked and if there was at least one newly
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001370 * acked ack-eliciting packet.
1371 * Return 1, if succeeded, 0 if not.
1372 */
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02001373static inline int qc_parse_ack_frm(struct quic_frame *frm, struct ssl_sock_ctx *ctx,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001374 struct quic_enc_level *qel,
1375 unsigned int *rtt_sample,
1376 const unsigned char **pos, const unsigned char *end)
1377{
1378 struct quic_ack *ack = &frm->ack;
1379 uint64_t smallest, largest;
1380 struct eb_root *pkts;
1381 struct eb64_node *largest_node;
1382 unsigned int time_sent, pkt_flags;
1383 struct list newly_acked_pkts = LIST_HEAD_INIT(newly_acked_pkts);
1384 struct list lost_pkts = LIST_HEAD_INIT(lost_pkts);
1385
1386 if (ack->largest_ack > qel->pktns->tx.next_pn) {
1387 TRACE_DEVEL("ACK for not sent packet", QUIC_EV_CONN_PRSAFRM,
1388 ctx->conn,, &ack->largest_ack);
1389 goto err;
1390 }
1391
1392 if (ack->first_ack_range > ack->largest_ack) {
1393 TRACE_DEVEL("too big first ACK range", QUIC_EV_CONN_PRSAFRM,
1394 ctx->conn,, &ack->first_ack_range);
1395 goto err;
1396 }
1397
1398 largest = ack->largest_ack;
1399 smallest = largest - ack->first_ack_range;
1400 pkts = &qel->pktns->tx.pkts;
1401 pkt_flags = 0;
1402 largest_node = NULL;
1403 time_sent = 0;
1404
1405 if ((int64_t)ack->largest_ack > qel->pktns->tx.largest_acked_pn) {
1406 largest_node = eb64_lookup(pkts, largest);
1407 if (!largest_node) {
1408 TRACE_DEVEL("Largest acked packet not found",
1409 QUIC_EV_CONN_PRSAFRM, ctx->conn);
1410 goto err;
1411 }
1412
1413 time_sent = eb64_entry(&largest_node->node,
1414 struct quic_tx_packet, pn_node)->time_sent;
1415 }
1416
1417 TRACE_PROTO("ack range", QUIC_EV_CONN_PRSAFRM,
1418 ctx->conn,, &largest, &smallest);
1419 do {
1420 uint64_t gap, ack_range;
1421
1422 qc_ackrng_pkts(pkts, &pkt_flags, &newly_acked_pkts,
1423 largest_node, largest, smallest, ctx);
1424 if (!ack->ack_range_num--)
1425 break;
1426
1427 if (!quic_dec_int(&gap, pos, end))
1428 goto err;
1429
1430 if (smallest < gap + 2) {
1431 TRACE_DEVEL("wrong gap value", QUIC_EV_CONN_PRSAFRM,
1432 ctx->conn,, &gap, &smallest);
1433 goto err;
1434 }
1435
1436 largest = smallest - gap - 2;
1437 if (!quic_dec_int(&ack_range, pos, end))
1438 goto err;
1439
1440 if (largest < ack_range) {
1441 TRACE_DEVEL("wrong ack range value", QUIC_EV_CONN_PRSAFRM,
1442 ctx->conn,, &largest, &ack_range);
1443 goto err;
1444 }
1445
1446 /* Do not use this node anymore. */
1447 largest_node = NULL;
1448 /* Next range */
1449 smallest = largest - ack_range;
1450
1451 TRACE_PROTO("ack range", QUIC_EV_CONN_PRSAFRM,
1452 ctx->conn,, &largest, &smallest);
1453 } while (1);
1454
1455 /* Flag this packet number space as having received an ACK. */
1456 qel->pktns->flags |= QUIC_FL_PKTNS_ACK_RECEIVED;
1457
1458 if (time_sent && (pkt_flags & QUIC_FL_TX_PACKET_ACK_ELICITING)) {
1459 *rtt_sample = tick_remain(time_sent, now_ms);
1460 qel->pktns->tx.largest_acked_pn = ack->largest_ack;
1461 }
1462
1463 if (!LIST_ISEMPTY(&newly_acked_pkts)) {
1464 if (!eb_is_empty(&qel->pktns->tx.pkts)) {
1465 qc_packet_loss_lookup(qel->pktns, ctx->conn->qc, &lost_pkts);
1466 if (!LIST_ISEMPTY(&lost_pkts))
1467 qc_release_lost_pkts(qel->pktns, ctx, &lost_pkts, now_ms);
1468 }
1469 qc_treat_newly_acked_pkts(ctx, &newly_acked_pkts);
1470 if (quic_peer_validated_addr(ctx))
1471 ctx->conn->qc->path->loss.pto_count = 0;
1472 qc_set_timer(ctx);
1473 }
1474
1475
1476 return 1;
1477
1478 err:
1479 free_quic_tx_pkts(&newly_acked_pkts);
1480 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_PRSAFRM, ctx->conn);
1481 return 0;
1482}
1483
1484/* Provide CRYPTO data to the TLS stack found at <data> with <len> as length
1485 * from <qel> encryption level with <ctx> as QUIC connection context.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05001486 * Remaining parameter are there for debugging purposes.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001487 * Return 1 if succeeded, 0 if not.
1488 */
1489static inline int qc_provide_cdata(struct quic_enc_level *el,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02001490 struct ssl_sock_ctx *ctx,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001491 const unsigned char *data, size_t len,
1492 struct quic_rx_packet *pkt,
1493 struct quic_rx_crypto_frm *cf)
1494{
1495 int ssl_err;
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001496 struct quic_conn *qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001497
1498 TRACE_ENTER(QUIC_EV_CONN_SSLDATA, ctx->conn);
1499 ssl_err = SSL_ERROR_NONE;
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001500 qc = ctx->conn->qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001501 if (SSL_provide_quic_data(ctx->ssl, el->level, data, len) != 1) {
1502 TRACE_PROTO("SSL_provide_quic_data() error",
1503 QUIC_EV_CONN_SSLDATA, ctx->conn, pkt, cf, ctx->ssl);
1504 goto err;
1505 }
1506
1507 el->rx.crypto.offset += len;
1508 TRACE_PROTO("in order CRYPTO data",
1509 QUIC_EV_CONN_SSLDATA, ctx->conn,, cf, ctx->ssl);
1510
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001511 if (qc->state < QUIC_HS_ST_COMPLETE) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001512 ssl_err = SSL_do_handshake(ctx->ssl);
1513 if (ssl_err != 1) {
1514 ssl_err = SSL_get_error(ctx->ssl, ssl_err);
1515 if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
1516 TRACE_PROTO("SSL handshake",
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001517 QUIC_EV_CONN_HDSHK, ctx->conn, &qc->state, &ssl_err);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001518 goto out;
1519 }
1520
1521 TRACE_DEVEL("SSL handshake error",
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001522 QUIC_EV_CONN_HDSHK, ctx->conn, &qc->state, &ssl_err);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001523 goto err;
1524 }
1525
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001526 TRACE_PROTO("SSL handshake OK", QUIC_EV_CONN_HDSHK, ctx->conn, &qc->state);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001527 if (objt_listener(ctx->conn->target))
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001528 qc->state = QUIC_HS_ST_CONFIRMED;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001529 else
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001530 qc->state = QUIC_HS_ST_COMPLETE;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001531 } else {
1532 ssl_err = SSL_process_quic_post_handshake(ctx->ssl);
1533 if (ssl_err != 1) {
1534 ssl_err = SSL_get_error(ctx->ssl, ssl_err);
1535 if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
1536 TRACE_DEVEL("SSL post handshake",
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001537 QUIC_EV_CONN_HDSHK, ctx->conn, &qc->state, &ssl_err);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001538 goto out;
1539 }
1540
1541 TRACE_DEVEL("SSL post handshake error",
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001542 QUIC_EV_CONN_HDSHK, ctx->conn, &qc->state, &ssl_err);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001543 goto err;
1544 }
1545
1546 TRACE_PROTO("SSL post handshake succeeded",
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001547 QUIC_EV_CONN_HDSHK, ctx->conn, &qc->state);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001548 }
1549
1550 out:
1551 TRACE_LEAVE(QUIC_EV_CONN_SSLDATA, ctx->conn);
1552 return 1;
1553
1554 err:
1555 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_SSLDATA, ctx->conn);
1556 return 0;
1557}
1558
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001559/* Allocate a new STREAM RX frame from <stream_fm> STREAM frame attached to
1560 * <pkt> RX packet.
1561 * Return it if succeeded, NULL if not.
1562 */
1563static inline
1564struct quic_rx_strm_frm *new_quic_rx_strm_frm(struct quic_stream *stream_frm,
1565 struct quic_rx_packet *pkt)
1566{
1567 struct quic_rx_strm_frm *frm;
1568
1569 frm = pool_alloc(pool_head_quic_rx_strm_frm);
1570 if (frm) {
1571 frm->offset_node.key = stream_frm->offset;
1572 frm->len = stream_frm->len;
1573 frm->data = stream_frm->data;
1574 frm->pkt = pkt;
1575 }
1576
1577 return frm;
1578}
1579
1580/* Retrieve as an ebtree node the stream with <id> as ID, possibly allocates
1581 * several streams, depending on the already open onces.
1582 * Return this node if succeeded, NULL if not.
1583 */
1584static struct eb64_node *qcc_get_qcs(struct qcc *qcc, uint64_t id)
1585{
1586 unsigned int strm_type;
1587 int64_t sub_id;
1588 struct eb64_node *strm_node;
1589
1590 TRACE_ENTER(QUIC_EV_CONN_PSTRM, qcc->conn);
1591
1592 strm_type = id & QCS_ID_TYPE_MASK;
1593 sub_id = id >> QCS_ID_TYPE_SHIFT;
1594 strm_node = NULL;
1595 if (qc_local_stream_id(qcc, id)) {
1596 /* Local streams: this stream must be already opened. */
1597 strm_node = eb64_lookup(&qcc->streams_by_id, id);
1598 if (!strm_node) {
1599 TRACE_PROTO("Unknown stream ID", QUIC_EV_CONN_PSTRM, qcc->conn);
1600 goto out;
1601 }
1602 }
1603 else {
1604 /* Remote streams. */
1605 struct eb_root *strms;
1606 uint64_t largest_id;
1607 enum qcs_type qcs_type;
1608
1609 strms = &qcc->streams_by_id;
1610 qcs_type = qcs_id_type(id);
1611 if (sub_id + 1 > qcc->strms[qcs_type].max_streams) {
1612 TRACE_PROTO("Streams limit reached", QUIC_EV_CONN_PSTRM, qcc->conn);
1613 goto out;
1614 }
1615
1616 /* Note: ->largest_id was initialized with (uint64_t)-1 as value, 0 being a
1617 * correct value.
1618 */
1619 largest_id = qcc->strms[qcs_type].largest_id;
1620 if (sub_id > (int64_t)largest_id) {
1621 /* RFC: "A stream ID that is used out of order results in all streams
1622 * of that type with lower-numbered stream IDs also being opened".
1623 * So, let's "open" these streams.
1624 */
1625 int64_t i;
1626 struct qcs *qcs;
1627
1628 qcs = NULL;
1629 for (i = largest_id + 1; i <= sub_id; i++) {
1630 qcs = qcs_new(qcc, (i << QCS_ID_TYPE_SHIFT) | strm_type);
1631 if (!qcs) {
1632 TRACE_PROTO("Could not allocate a new stream",
1633 QUIC_EV_CONN_PSTRM, qcc->conn);
1634 goto out;
1635 }
1636
1637 qcc->strms[qcs_type].largest_id = i;
1638 }
1639 if (qcs)
1640 strm_node = &qcs->by_id;
1641 }
1642 else {
1643 strm_node = eb64_lookup(strms, id);
1644 }
1645 }
1646
1647 TRACE_LEAVE(QUIC_EV_CONN_PSTRM, qcc->conn);
1648 return strm_node;
1649
1650 out:
1651 TRACE_LEAVE(QUIC_EV_CONN_PSTRM, qcc->conn);
1652 return NULL;
1653}
1654
1655/* Copy as most as possible STREAM data from <strm_frm> into <strm> stream.
1656 * Returns the number of bytes copied or -1 if failed. Also update <strm_frm> frame
1657 * to reflect the data which have been consumed.
1658 */
1659static size_t qc_strm_cpy(struct buffer *buf, struct quic_stream *strm_frm)
1660{
1661 size_t ret;
1662
1663 ret = 0;
1664 while (strm_frm->len) {
1665 size_t try;
1666
1667 try = b_contig_space(buf);
1668 if (!try)
1669 break;
1670
1671 if (try > strm_frm->len)
1672 try = strm_frm->len;
1673 memcpy(b_tail(buf), strm_frm->data, try);
1674 strm_frm->len -= try;
1675 strm_frm->offset += try;
1676 b_add(buf, try);
1677 ret += try;
1678 }
1679
1680 return ret;
1681}
1682
1683/* Handle <strm_frm> bidirectional STREAM frame. Depending on its ID, several
1684 * streams may be open. The data are copied to the stream RX buffer if possible.
1685 * If not, the STREAM frame is stored to be treated again later.
1686 * We rely on the flow control so that not to store too much STREAM frames.
1687 * Return 1 if succeeded, 0 if not.
1688 */
1689static int qc_handle_bidi_strm_frm(struct quic_rx_packet *pkt,
1690 struct quic_stream *strm_frm,
1691 struct quic_conn *qc)
1692{
1693 struct qcs *strm;
1694 struct eb64_node *strm_node, *frm_node;
1695 struct quic_rx_strm_frm *frm;
1696
1697 strm_node = qcc_get_qcs(qc->qcc, strm_frm->id);
1698 if (!strm_node) {
1699 TRACE_PROTO("Stream not found", QUIC_EV_CONN_PSTRM, qc->conn);
1700 return 0;
1701 }
1702
1703 strm = eb64_entry(&strm_node->node, struct qcs, by_id);
1704 frm_node = eb64_lookup(&strm->frms, strm_frm->offset);
1705 /* FIXME: handle the case where this frame overlap others */
1706 if (frm_node) {
1707 TRACE_PROTO("Already existing stream data",
1708 QUIC_EV_CONN_PSTRM, qc->conn);
1709 goto out;
1710 }
1711
1712 if (strm_frm->offset == strm->rx.offset) {
1713 int ret;
1714
1715 if (!qc_get_buf(qc->qcc, &strm->rx.buf))
1716 goto store_frm;
1717
1718 ret = qc_strm_cpy(&strm->rx.buf, strm_frm);
1719 if (ret && qc->qcc->app_ops->decode_qcs(strm, qc->qcc->ctx) == -1) {
1720 TRACE_PROTO("Decoding error", QUIC_EV_CONN_PSTRM);
1721 return 0;
1722 }
1723
1724 strm->rx.offset += ret;
1725 }
1726
1727 if (!strm_frm->len)
1728 goto out;
1729
1730 store_frm:
1731 frm = new_quic_rx_strm_frm(strm_frm, pkt);
1732 if (!frm) {
1733 TRACE_PROTO("Could not alloc RX STREAM frame",
1734 QUIC_EV_CONN_PSTRM, qc->conn);
1735 return 0;
1736 }
1737
1738 eb64_insert(&strm->frms, &frm->offset_node);
1739 quic_rx_packet_refinc(pkt);
1740
1741 out:
1742 return 1;
1743}
1744
1745/* Handle <strm_frm> unidirectional STREAM frame. Depending on its ID, several
1746 * streams may be open. The data are copied to the stream RX buffer if possible.
1747 * If not, the STREAM frame is stored to be treated again later.
1748 * We rely on the flow control so that not to store too much STREAM frames.
1749 * Return 1 if succeeded, 0 if not.
1750 */
1751static int qc_handle_uni_strm_frm(struct quic_rx_packet *pkt,
1752 struct quic_stream *strm_frm,
1753 struct quic_conn *qc)
1754{
1755 struct qcs *strm;
1756 struct eb64_node *strm_node, *frm_node;
1757 struct quic_rx_strm_frm *frm;
1758 size_t strm_frm_len;
1759
1760 strm_node = qcc_get_qcs(qc->qcc, strm_frm->id);
1761 if (!strm_node) {
1762 TRACE_PROTO("Stream not found", QUIC_EV_CONN_PSTRM, qc->conn);
1763 return 0;
1764 }
1765
1766 strm = eb64_entry(&strm_node->node, struct qcs, by_id);
1767 frm_node = eb64_lookup(&strm->frms, strm_frm->offset);
1768 /* FIXME: handle the case where this frame overlap others */
1769 if (frm_node) {
1770 TRACE_PROTO("Already existing stream data",
1771 QUIC_EV_CONN_PSTRM, qc->conn);
1772 goto out;
1773 }
1774
1775 strm_frm_len = strm_frm->len;
1776 if (strm_frm->offset == strm->rx.offset) {
1777 int ret;
1778
1779 if (!qc_get_buf(qc->qcc, &strm->rx.buf))
1780 goto store_frm;
1781
1782 /* qc_strm_cpy() will modify the offset, depending on the number
1783 * of bytes copied.
1784 */
1785 ret = qc_strm_cpy(&strm->rx.buf, strm_frm);
1786 /* Inform the application of the arrival of this new stream */
1787 if (!strm->rx.offset && !qc->qcc->app_ops->attach_ruqs(strm, qc->qcc->ctx)) {
1788 TRACE_PROTO("Could not set an uni-stream", QUIC_EV_CONN_PSTRM, qc->conn);
1789 return 0;
1790 }
1791
1792 if (ret)
1793 ruqs_notify_recv(strm);
1794
1795 strm_frm->offset += ret;
1796 }
1797 /* Take this frame into an account for the stream flow control */
1798 strm->rx.offset += strm_frm_len;
1799 /* It all the data were provided to the application, there is no need to
1800 * store any more inforamtion for it.
1801 */
1802 if (!strm_frm->len)
1803 goto out;
1804
1805 store_frm:
1806 frm = new_quic_rx_strm_frm(strm_frm, pkt);
1807 if (!frm) {
1808 TRACE_PROTO("Could not alloc RX STREAM frame",
1809 QUIC_EV_CONN_PSTRM, qc->conn);
1810 return 0;
1811 }
1812
1813 eb64_insert(&strm->frms, &frm->offset_node);
1814 quic_rx_packet_refinc(pkt);
1815
1816 out:
1817 return 1;
1818}
1819
1820static inline int qc_handle_strm_frm(struct quic_rx_packet *pkt,
1821 struct quic_stream *strm_frm,
1822 struct quic_conn *qc)
1823{
1824 if (strm_frm->id & QCS_ID_DIR_BIT)
1825 return qc_handle_uni_strm_frm(pkt, strm_frm, qc);
1826 else
1827 return qc_handle_bidi_strm_frm(pkt, strm_frm, qc);
1828}
1829
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001830/* Parse all the frames of <pkt> QUIC packet for QUIC connection with <ctx>
1831 * as I/O handler context and <qel> as encryption level.
1832 * Returns 1 if succeeded, 0 if failed.
1833 */
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02001834static int qc_parse_pkt_frms(struct quic_rx_packet *pkt, struct ssl_sock_ctx *ctx,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001835 struct quic_enc_level *qel)
1836{
1837 struct quic_frame frm;
1838 const unsigned char *pos, *end;
1839 struct quic_conn *conn = ctx->conn->qc;
1840
1841 TRACE_ENTER(QUIC_EV_CONN_PRSHPKT, ctx->conn);
1842 /* Skip the AAD */
1843 pos = pkt->data + pkt->aad_len;
1844 end = pkt->data + pkt->len;
1845
1846 while (pos < end) {
1847 if (!qc_parse_frm(&frm, pkt, &pos, end, conn))
1848 goto err;
1849
1850 switch (frm.type) {
Frédéric Lécaille0c140202020-12-09 15:56:48 +01001851 case QUIC_FT_PADDING:
1852 if (pos != end) {
1853 TRACE_DEVEL("wrong frame", QUIC_EV_CONN_PRSHPKT, ctx->conn, pkt);
1854 goto err;
1855 }
1856 break;
1857 case QUIC_FT_PING:
1858 break;
1859 case QUIC_FT_ACK:
1860 {
1861 unsigned int rtt_sample;
1862
1863 rtt_sample = 0;
1864 if (!qc_parse_ack_frm(&frm, ctx, qel, &rtt_sample, &pos, end))
1865 goto err;
1866
1867 if (rtt_sample) {
1868 unsigned int ack_delay;
1869
1870 ack_delay = !quic_application_pktns(qel->pktns, conn) ? 0 :
1871 MS_TO_TICKS(QUIC_MIN(quic_ack_delay_ms(&frm.ack, conn), conn->max_ack_delay));
1872 quic_loss_srtt_update(&conn->path->loss, rtt_sample, ack_delay, conn);
1873 }
Frédéric Lécaille0c140202020-12-09 15:56:48 +01001874 break;
1875 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001876 case QUIC_FT_CRYPTO:
1877 if (frm.crypto.offset != qel->rx.crypto.offset) {
1878 struct quic_rx_crypto_frm *cf;
1879
1880 cf = pool_alloc(pool_head_quic_rx_crypto_frm);
1881 if (!cf) {
1882 TRACE_DEVEL("CRYPTO frame allocation failed",
1883 QUIC_EV_CONN_PRSHPKT, ctx->conn);
1884 goto err;
1885 }
1886
1887 cf->offset_node.key = frm.crypto.offset;
1888 cf->len = frm.crypto.len;
1889 cf->data = frm.crypto.data;
1890 cf->pkt = pkt;
1891 eb64_insert(&qel->rx.crypto.frms, &cf->offset_node);
1892 quic_rx_packet_refinc(pkt);
1893 }
1894 else {
1895 /* XXX TO DO: <cf> is used only for the traces. */
1896 struct quic_rx_crypto_frm cf = { };
1897
1898 cf.offset_node.key = frm.crypto.offset;
1899 cf.len = frm.crypto.len;
1900 if (!qc_provide_cdata(qel, ctx,
1901 frm.crypto.data, frm.crypto.len,
1902 pkt, &cf))
1903 goto err;
1904 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001905 break;
Frédéric Lécaille0c140202020-12-09 15:56:48 +01001906 case QUIC_FT_STREAM_8:
1907 case QUIC_FT_STREAM_9:
1908 case QUIC_FT_STREAM_A:
1909 case QUIC_FT_STREAM_B:
1910 case QUIC_FT_STREAM_C:
1911 case QUIC_FT_STREAM_D:
1912 case QUIC_FT_STREAM_E:
1913 case QUIC_FT_STREAM_F:
Frédéric Lécaille242fb1b2020-12-31 12:45:38 +01001914 {
1915 struct quic_stream *stream = &frm.stream;
1916
1917 TRACE_PROTO("STREAM frame", QUIC_EV_CONN_PSTRM, ctx->conn, &frm);
1918 if (objt_listener(ctx->conn->target)) {
1919 if (stream->id & QUIC_STREAM_FRAME_ID_INITIATOR_BIT)
1920 goto err;
1921 } else if (!(stream->id & QUIC_STREAM_FRAME_ID_INITIATOR_BIT))
1922 goto err;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001923
1924 if (!qc_handle_strm_frm(pkt, stream, ctx->conn->qc))
1925 goto err;
1926
Frédéric Lécaille242fb1b2020-12-31 12:45:38 +01001927 break;
1928 }
Frédéric Lécaille0c140202020-12-09 15:56:48 +01001929 case QUIC_FT_NEW_CONNECTION_ID:
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001930 break;
1931 case QUIC_FT_CONNECTION_CLOSE:
1932 case QUIC_FT_CONNECTION_CLOSE_APP:
1933 break;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001934 case QUIC_FT_HANDSHAKE_DONE:
1935 if (objt_listener(ctx->conn->target))
1936 goto err;
1937
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001938 conn->state = QUIC_HS_ST_CONFIRMED;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001939 break;
1940 default:
1941 goto err;
1942 }
1943 }
1944
1945 /* The server must switch from INITIAL to HANDSHAKE handshake state when it
1946 * has successfully parse a Handshake packet. The Initial encryption must also
1947 * be discarded.
1948 */
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001949 if (conn->state == QUIC_HS_ST_SERVER_INITIAL &&
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001950 pkt->type == QUIC_PACKET_TYPE_HANDSHAKE) {
1951 quic_tls_discard_keys(&conn->els[QUIC_TLS_ENC_LEVEL_INITIAL]);
1952 quic_pktns_discard(conn->els[QUIC_TLS_ENC_LEVEL_INITIAL].pktns, conn);
1953 qc_set_timer(ctx);
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001954 conn->state = QUIC_HS_ST_SERVER_HANDSHAKE;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001955 }
1956
1957 TRACE_LEAVE(QUIC_EV_CONN_PRSHPKT, ctx->conn);
1958 return 1;
1959
1960 err:
1961 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_PRSHPKT, ctx->conn);
1962 return 0;
1963}
1964
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001965/* Write <dglen> datagram length and <pkt> first packet address into <cbuf> ring
1966 * buffer. This is the responsability of the caller to check there is enough
1967 * room in <cbuf>. Also increase the <cbuf> write index consequently.
1968 * This function must be called only after having built a correct datagram.
1969 * Always succeeds.
1970 */
1971static inline void qc_set_dg(struct cbuf *cbuf,
1972 uint16_t dglen, struct quic_tx_packet *pkt)
1973{
1974 write_u16(cb_wr(cbuf), dglen);
1975 write_ptr(cb_wr(cbuf) + sizeof dglen, pkt);
1976 cb_add(cbuf, dglen + sizeof dglen + sizeof pkt);
1977}
1978
1979/* Prepare as much as possible handshake packets into <qr> ring buffer for
1980 * the QUIC connection with <ctx> as I/O handler context, possibly concatenating
1981 * several packets in the same datagram. A header made of two fields is added
1982 * to each datagram: the datagram length followed by the address of the first
1983 * packet in this datagram.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001984 * Returns 1 if succeeded, or 0 if something wrong happened.
1985 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001986static int qc_prep_hdshk_pkts(struct qring *qr, struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001987{
1988 struct quic_conn *qc;
1989 enum quic_tls_enc_level tel, next_tel;
1990 struct quic_enc_level *qel;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001991 struct cbuf *cbuf;
1992 unsigned char *end_buf, *end, *pos, *spos;
1993 struct quic_tx_packet *first_pkt, *cur_pkt, *prv_pkt;
1994 /* length of datagrams */
1995 uint16_t dglen;
1996 size_t total;
1997 /* Each datagram is prepended with its length followed by the
1998 * address of the first packet in the datagram.
1999 */
2000 size_t dg_headlen = sizeof dglen + sizeof first_pkt;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002001
2002 TRACE_ENTER(QUIC_EV_CONN_PHPKTS, ctx->conn);
2003 qc = ctx->conn->qc;
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002004 if (!quic_get_tls_enc_levels(&tel, &next_tel, qc->state)) {
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002005 TRACE_DEVEL("unknown enc. levels", QUIC_EV_CONN_PHPKTS, ctx->conn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002006 goto err;
2007 }
2008
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002009 start:
2010 total = 0;
2011 dglen = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002012 qel = &qc->els[tel];
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002013 cbuf = qr->cbuf;
2014 spos = pos = cb_wr(cbuf);
2015 /* Leave at least <dglen> bytes at the end of this buffer
2016 * to ensure there is enough room to mark the end of prepared
2017 * contiguous data with a zero length.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002018 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002019 end_buf = pos + cb_contig_space(cbuf) - sizeof dglen;
2020 first_pkt = prv_pkt = NULL;
2021 while (end_buf - pos >= (int)qc->path->mtu + dg_headlen || prv_pkt) {
2022 int err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002023 enum quic_pkt_type pkt_type;
2024
Frédéric Lécaillec5e72b92020-12-02 16:11:40 +01002025 TRACE_POINT(QUIC_EV_CONN_PHPKTS, ctx->conn, qel);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002026 /* Do not build any more packet if the TX secrets are not available or
2027 * if there is nothing to send, i.e. if no ACK are required
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002028 * and if there is no more packets to send upon PTO expiration
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01002029 * and if there is no more CRYPTO data available or in flight
2030 * congestion control limit is reached for prepared data
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002031 */
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01002032 if (!(qel->tls_ctx.tx.flags & QUIC_FL_TLS_SECRETS_SET) ||
2033 (!(qel->pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED) &&
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002034 !qc->tx.nb_pto_dgrams &&
Frédéric Lécaillec88df072021-07-27 11:43:11 +02002035 (MT_LIST_ISEMPTY(&qel->pktns->tx.frms) ||
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01002036 qc->path->prep_in_flight >= qc->path->cwnd))) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002037 TRACE_DEVEL("nothing more to do", QUIC_EV_CONN_PHPKTS, ctx->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002038 /* Set the current datagram as prepared into <cbuf> if
2039 * the was already a correct packet which was previously written.
2040 */
2041 if (prv_pkt)
2042 qc_set_dg(cbuf, dglen, first_pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002043 break;
2044 }
2045
2046 pkt_type = quic_tls_level_pkt_type(tel);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002047 if (!prv_pkt) {
2048 /* Leave room for the datagram header */
2049 pos += dg_headlen;
2050 end = pos + qc->path->mtu;
2051 }
2052
2053 cur_pkt = qc_build_hdshk_pkt(&pos, end, qc, pkt_type, qel, &err);
2054 switch (err) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002055 case -2:
2056 goto err;
2057 case -1:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002058 /* If there was already a correct packet present, set the
2059 * current datagram as prepared into <cbuf>.
2060 */
2061 if (prv_pkt) {
2062 qc_set_dg(cbuf, dglen, first_pkt);
2063 goto stop_build;
2064 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002065 goto out;
2066 default:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002067 total += cur_pkt->len;
2068 /* keep trace of the first packet in the datagram */
2069 if (!first_pkt)
2070 first_pkt = cur_pkt;
2071 /* Attach the current one to the previous one */
2072 if (prv_pkt)
2073 prv_pkt->next = cur_pkt;
2074 /* Let's say we have to build a new dgram */
2075 prv_pkt = NULL;
2076 dglen += cur_pkt->len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002077 /* Discard the Initial encryption keys as soon as
2078 * a handshake packet could be built.
2079 */
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002080 if (qc->state == QUIC_HS_ST_CLIENT_INITIAL &&
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002081 pkt_type == QUIC_PACKET_TYPE_HANDSHAKE) {
2082 quic_tls_discard_keys(&qc->els[QUIC_TLS_ENC_LEVEL_INITIAL]);
2083 quic_pktns_discard(qc->els[QUIC_TLS_ENC_LEVEL_INITIAL].pktns, qc);
2084 qc_set_timer(ctx);
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002085 qc->state = QUIC_HS_ST_CLIENT_HANDSHAKE;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002086 }
2087 /* Special case for Initial packets: when they have all
2088 * been sent, select the next level.
2089 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002090 if (tel == QUIC_TLS_ENC_LEVEL_INITIAL &&
Frédéric Lécaillec88df072021-07-27 11:43:11 +02002091 (MT_LIST_ISEMPTY(&qel->pktns->tx.frms) || qc->els[next_tel].pktns->tx.in_flight)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002092 tel = next_tel;
2093 qel = &qc->els[tel];
Frédéric Lécaillec88df072021-07-27 11:43:11 +02002094 if (!MT_LIST_ISEMPTY(&qel->pktns->tx.frms)) {
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002095 /* If there is data for the next level, do not
2096 * consume a datagram. This is the case for a client
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002097 * which sends only one Initial packet, then wait
2098 * for additional CRYPTO data from the server to enter the
2099 * next level.
2100 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002101 prv_pkt = cur_pkt;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002102 }
2103 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002104 }
2105
2106 /* If we have to build a new datagram, set the current datagram as
2107 * prepared into <cbuf>.
2108 */
2109 if (!prv_pkt) {
2110 qc_set_dg(cbuf, dglen, first_pkt);
2111 first_pkt = NULL;
2112 dglen = 0;
2113 }
2114 }
2115
2116 stop_build:
2117 /* Reset <wr> writer index if in front of <rd> index */
2118 if (end_buf - pos < (int)qc->path->mtu + dg_headlen) {
2119 int rd = HA_ATOMIC_LOAD(&cbuf->rd);
2120
2121 TRACE_DEVEL("buffer full", QUIC_EV_CONN_PHPKTS, ctx->conn);
2122 if (cb_contig_space(cbuf) >= sizeof(uint16_t)) {
2123 if ((pos != spos && cbuf->wr > rd) || (pos == spos && rd <= cbuf->wr)) {
2124 /* Mark the end of contiguous data for the reader */
2125 write_u16(cb_wr(cbuf), 0);
2126 cb_add(cbuf, sizeof(uint16_t));
2127 }
2128 }
2129
2130 if (rd && rd <= cbuf->wr) {
2131 cb_wr_reset(cbuf);
2132 if (pos == spos) {
2133 /* Reuse the same buffer if nothing was built. */
2134 goto start;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002135 }
2136 }
2137 }
2138
2139 out:
2140 TRACE_LEAVE(QUIC_EV_CONN_PHPKTS, ctx->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002141 return total;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002142
2143 err:
2144 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_PHPKTS, ctx->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002145 return -1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002146}
2147
2148/* Send the QUIC packets which have been prepared for QUIC connections
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002149 * from <qr> ring buffer with <ctx> as I/O handler context.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002150 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002151int qc_send_ppkts(struct qring *qr, struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002152{
2153 struct quic_conn *qc;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002154 struct cbuf *cbuf;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002155
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002156 qc = ctx->conn->qc;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002157 cbuf = qr->cbuf;
2158 while (cb_contig_data(cbuf)) {
2159 unsigned char *pos;
2160 struct buffer tmpbuf = { };
2161 struct quic_tx_packet *first_pkt, *pkt, *next_pkt;
2162 uint16_t dglen;
2163 size_t headlen = sizeof dglen + sizeof first_pkt;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002164 unsigned int time_sent;
2165
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002166 pos = cb_rd(cbuf);
2167 dglen = read_u16(pos);
2168 /* End of prepared datagrams.
2169 * Reset the reader index only if in front of the writer index.
2170 */
2171 if (!dglen) {
2172 int wr = HA_ATOMIC_LOAD(&cbuf->wr);
2173
2174 if (wr && wr < cbuf->rd) {
2175 cb_rd_reset(cbuf);
2176 continue;
2177 }
2178 break;
2179 }
2180
2181 pos += sizeof dglen;
2182 first_pkt = read_ptr(pos);
2183 pos += sizeof first_pkt;
2184 tmpbuf.area = (char *)pos;
2185 tmpbuf.size = tmpbuf.data = dglen;
2186
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01002187 TRACE_PROTO("to send", QUIC_EV_CONN_SPPKTS, ctx->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002188 for (pkt = first_pkt; pkt; pkt = pkt->next)
2189 quic_tx_packet_refinc(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002190 if (ctx->xprt->snd_buf(qc->conn, qc->conn->xprt_ctx,
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002191 &tmpbuf, tmpbuf.data, 0) <= 0) {
2192 for (pkt = first_pkt; pkt; pkt = pkt->next)
2193 quic_tx_packet_refdec(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002194 break;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002195 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002196
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002197 cb_del(cbuf, dglen + headlen);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002198 qc->tx.bytes += tmpbuf.data;
2199 time_sent = now_ms;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002200
2201 for (pkt = first_pkt; pkt; pkt = next_pkt) {
2202 pkt->time_sent = time_sent;
2203 if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING) {
2204 pkt->pktns->tx.time_of_last_eliciting = time_sent;
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +01002205 qc->path->ifae_pkts++;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002206 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002207 qc->path->in_flight += pkt->in_flight_len;
2208 pkt->pktns->tx.in_flight += pkt->in_flight_len;
2209 if (pkt->in_flight_len)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002210 qc_set_timer(ctx);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002211 TRACE_PROTO("sent pkt", QUIC_EV_CONN_SPPKTS, ctx->conn, pkt);
2212 next_pkt = pkt->next;
Frédéric Lécaille0eb60c52021-07-19 14:48:36 +02002213 eb64_insert(&pkt->pktns->tx.pkts, &pkt->pn_node);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002214 quic_tx_packet_refdec(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002215 }
2216 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002217
2218 return 1;
2219}
2220
2221/* Build all the frames which must be sent just after the handshake have succeeded.
2222 * This is essentially NEW_CONNECTION_ID frames. A QUIC server must also send
2223 * a HANDSHAKE_DONE frame.
2224 * Return 1 if succeeded, 0 if not.
2225 */
2226static int quic_build_post_handshake_frames(struct quic_conn *conn)
2227{
2228 int i;
2229 struct quic_frame *frm;
2230
2231 /* Only servers must send a HANDSHAKE_DONE frame. */
2232 if (!objt_server(conn->conn->target)) {
2233 frm = pool_alloc(pool_head_quic_frame);
Frédéric Lécaille153d4a82021-01-06 12:12:39 +01002234 if (!frm)
2235 return 0;
2236
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002237 frm->type = QUIC_FT_HANDSHAKE_DONE;
Willy Tarreau2b718102021-04-21 07:32:39 +02002238 LIST_APPEND(&conn->tx.frms_to_send, &frm->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002239 }
2240
Frédéric Lécaille5aa41432021-01-28 16:22:52 +01002241 for (i = 1; i < conn->tx.params.active_connection_id_limit; i++) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002242 struct quic_connection_id *cid;
2243
2244 frm = pool_alloc(pool_head_quic_frame);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002245 cid = new_quic_cid(&conn->cids, i);
2246 if (!frm || !cid)
2247 goto err;
2248
2249 quic_connection_id_to_frm_cpy(frm, cid);
Willy Tarreau2b718102021-04-21 07:32:39 +02002250 LIST_APPEND(&conn->tx.frms_to_send, &frm->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002251 }
2252
2253 return 1;
2254
2255 err:
2256 free_quic_conn_cids(conn);
2257 return 0;
2258}
2259
2260/* Deallocate <l> list of ACK ranges. */
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002261void free_quic_arngs(struct quic_arngs *arngs)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002262{
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002263 struct eb64_node *n;
2264 struct quic_arng_node *ar;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002265
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002266 n = eb64_first(&arngs->root);
2267 while (n) {
2268 struct eb64_node *next;
2269
2270 ar = eb64_entry(&n->node, struct quic_arng_node, first);
2271 next = eb64_next(n);
2272 eb64_delete(n);
2273 free(ar);
2274 n = next;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002275 }
2276}
2277
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002278/* Return the gap value between <p> and <q> ACK ranges where <q> follows <p> in
2279 * descending order.
2280 */
2281static inline size_t sack_gap(struct quic_arng_node *p,
2282 struct quic_arng_node *q)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002283{
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002284 return p->first.key - q->last - 2;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002285}
2286
2287
2288/* Remove the last elements of <ack_ranges> list of ack range updating its
2289 * encoded size until it goes below <limit>.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05002290 * Returns 1 if succeeded, 0 if not (no more element to remove).
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002291 */
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002292static int quic_rm_last_ack_ranges(struct quic_arngs *arngs, size_t limit)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002293{
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002294 struct eb64_node *last, *prev;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002295
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002296 last = eb64_last(&arngs->root);
2297 while (last && arngs->enc_sz > limit) {
2298 struct quic_arng_node *last_node, *prev_node;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002299
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002300 prev = eb64_prev(last);
2301 if (!prev)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002302 return 0;
2303
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002304 last_node = eb64_entry(&last->node, struct quic_arng_node, first);
2305 prev_node = eb64_entry(&prev->node, struct quic_arng_node, first);
2306 arngs->enc_sz -= quic_int_getsize(last_node->last - last_node->first.key);
2307 arngs->enc_sz -= quic_int_getsize(sack_gap(prev_node, last_node));
2308 arngs->enc_sz -= quic_decint_size_diff(arngs->sz);
2309 --arngs->sz;
2310 eb64_delete(last);
2311 pool_free(pool_head_quic_arng, last);
2312 last = prev;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002313 }
2314
2315 return 1;
2316}
2317
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002318/* Set the encoded size of <arngs> QUIC ack ranges. */
2319static void quic_arngs_set_enc_sz(struct quic_arngs *arngs)
2320{
2321 struct eb64_node *node, *next;
2322 struct quic_arng_node *ar, *ar_next;
2323
2324 node = eb64_last(&arngs->root);
2325 if (!node)
2326 return;
2327
2328 ar = eb64_entry(&node->node, struct quic_arng_node, first);
2329 arngs->enc_sz = quic_int_getsize(ar->last) +
2330 quic_int_getsize(ar->last - ar->first.key) + quic_int_getsize(arngs->sz - 1);
2331
2332 while ((next = eb64_prev(node))) {
2333 ar_next = eb64_entry(&next->node, struct quic_arng_node, first);
2334 arngs->enc_sz += quic_int_getsize(sack_gap(ar, ar_next)) +
2335 quic_int_getsize(ar_next->last - ar_next->first.key);
2336 node = next;
2337 ar = eb64_entry(&node->node, struct quic_arng_node, first);
2338 }
2339}
2340
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002341/* Insert <ar> ack range into <argns> tree of ack ranges.
2342 * Returns the ack range node which has been inserted if succeeded, NULL if not.
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002343 */
2344static inline
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002345struct quic_arng_node *quic_insert_new_range(struct quic_arngs *arngs,
2346 struct quic_arng *ar)
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002347{
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002348 struct quic_arng_node *new_ar;
2349
2350 new_ar = pool_alloc(pool_head_quic_arng);
2351 if (new_ar) {
2352 new_ar->first.key = ar->first;
2353 new_ar->last = ar->last;
2354 eb64_insert(&arngs->root, &new_ar->first);
2355 arngs->sz++;
2356 }
2357
2358 return new_ar;
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002359}
2360
2361/* Update <arngs> tree of ACK ranges with <ar> as new ACK range value.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002362 * Note that this function computes the number of bytes required to encode
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002363 * this tree of ACK ranges in descending order.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002364 *
2365 * Descending order
2366 * ------------->
2367 * range1 range2
2368 * ..........|--------|..............|--------|
2369 * ^ ^ ^ ^
2370 * | | | |
2371 * last1 first1 last2 first2
2372 * ..........+--------+--------------+--------+......
2373 * diff1 gap12 diff2
2374 *
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002375 * To encode the previous list of ranges we must encode integers as follows in
2376 * descending order:
2377 * enc(last2),enc(diff2),enc(gap12),enc(diff1)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002378 * with diff1 = last1 - first1
2379 * diff2 = last2 - first2
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002380 * gap12 = first1 - last2 - 2 (>= 0)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002381 *
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002382 */
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002383int quic_update_ack_ranges_list(struct quic_arngs *arngs,
2384 struct quic_arng *ar)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002385{
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002386 struct eb64_node *le;
2387 struct quic_arng_node *new_node;
2388 struct eb64_node *new;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002389
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002390 new = NULL;
2391 if (eb_is_empty(&arngs->root)) {
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002392 new_node = quic_insert_new_range(arngs, ar);
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002393 if (!new_node)
2394 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002395
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002396 goto out;
2397 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002398
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002399 le = eb64_lookup_le(&arngs->root, ar->first);
2400 if (!le) {
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002401 new_node = quic_insert_new_range(arngs, ar);
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002402 if (!new_node)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002403 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002404 }
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002405 else {
2406 struct quic_arng_node *le_ar =
2407 eb64_entry(&le->node, struct quic_arng_node, first);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002408
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002409 /* Already existing range */
Frédéric Lécailled3f4dd82021-06-02 15:36:12 +02002410 if (le_ar->last >= ar->last)
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002411 return 1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002412
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002413 if (le_ar->last + 1 >= ar->first) {
2414 le_ar->last = ar->last;
2415 new = le;
2416 new_node = le_ar;
2417 }
2418 else {
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002419 new_node = quic_insert_new_range(arngs, ar);
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002420 if (!new_node)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002421 return 0;
Frédéric Lécaille8ba42762021-06-02 17:40:09 +02002422
2423 new = &new_node->first;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002424 }
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002425 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002426
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002427 /* Verify that the new inserted node does not overlap the nodes
2428 * which follow it.
2429 */
2430 if (new) {
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002431 struct eb64_node *next;
2432 struct quic_arng_node *next_node;
2433
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002434 while ((next = eb64_next(new))) {
2435 next_node =
2436 eb64_entry(&next->node, struct quic_arng_node, first);
Frédéric Lécaillec825eba2021-06-02 17:38:13 +02002437 if (new_node->last + 1 < next_node->first.key)
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002438 break;
2439
2440 if (next_node->last > new_node->last)
2441 new_node->last = next_node->last;
2442 eb64_delete(next);
Frédéric Lécaillebaea2842021-06-02 15:04:03 +02002443 pool_free(pool_head_quic_arng, next_node);
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002444 /* Decrement the size of these ranges. */
2445 arngs->sz--;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002446 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002447 }
2448
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002449 quic_arngs_set_enc_sz(arngs);
2450
2451 out:
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002452 return 1;
2453}
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002454/* Remove the header protection of packets at <el> encryption level.
2455 * Always succeeds.
2456 */
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02002457static inline void qc_rm_hp_pkts(struct quic_enc_level *el, struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002458{
2459 struct quic_tls_ctx *tls_ctx;
Frédéric Lécaillea11d0e22021-06-07 14:38:18 +02002460 struct quic_rx_packet *pqpkt;
2461 struct mt_list *pkttmp1, pkttmp2;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002462 struct quic_enc_level *app_qel;
2463
2464 TRACE_ENTER(QUIC_EV_CONN_ELRMHP, ctx->conn);
2465 app_qel = &ctx->conn->qc->els[QUIC_TLS_ENC_LEVEL_APP];
2466 /* A server must not process incoming 1-RTT packets before the handshake is complete. */
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002467 if (el == app_qel && objt_listener(ctx->conn->target) &&
2468 ctx->conn->qc->state < QUIC_HS_ST_COMPLETE) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002469 TRACE_PROTO("hp not removed (handshake not completed)",
2470 QUIC_EV_CONN_ELRMHP, ctx->conn);
2471 goto out;
2472 }
2473 tls_ctx = &el->tls_ctx;
Frédéric Lécaillea11d0e22021-06-07 14:38:18 +02002474 mt_list_for_each_entry_safe(pqpkt, &el->rx.pqpkts, list, pkttmp1, pkttmp2) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002475 if (!qc_do_rm_hp(pqpkt, tls_ctx, el->pktns->rx.largest_pn,
2476 pqpkt->data + pqpkt->pn_offset,
2477 pqpkt->data, pqpkt->data + pqpkt->len, ctx)) {
2478 TRACE_PROTO("hp removing error", QUIC_EV_CONN_ELRMHP, ctx->conn);
2479 /* XXX TO DO XXX */
2480 }
2481 else {
2482 /* The AAD includes the packet number field */
2483 pqpkt->aad_len = pqpkt->pn_offset + pqpkt->pnl;
2484 /* Store the packet into the tree of packets to decrypt. */
2485 pqpkt->pn_node.key = pqpkt->pn;
Frédéric Lécaille98cdeb22021-07-26 16:38:14 +02002486 HA_RWLOCK_WRLOCK(QUIC_LOCK, &el->rx.pkts_rwlock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002487 quic_rx_packet_eb64_insert(&el->rx.pkts, &pqpkt->pn_node);
Frédéric Lécaille98cdeb22021-07-26 16:38:14 +02002488 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &el->rx.pkts_rwlock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002489 TRACE_PROTO("hp removed", QUIC_EV_CONN_ELRMHP, ctx->conn, pqpkt);
2490 }
Frédéric Lécaillea11d0e22021-06-07 14:38:18 +02002491 MT_LIST_DELETE_SAFE(pkttmp1);
2492 quic_rx_packet_refdec(pqpkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002493 }
2494
2495 out:
2496 TRACE_LEAVE(QUIC_EV_CONN_ELRMHP, ctx->conn);
2497}
2498
2499/* Process all the CRYPTO frame at <el> encryption level.
2500 * Return 1 if succeeded, 0 if not.
2501 */
2502static inline int qc_treat_rx_crypto_frms(struct quic_enc_level *el,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02002503 struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002504{
2505 struct eb64_node *node;
2506
2507 TRACE_ENTER(QUIC_EV_CONN_RXCDATA, ctx->conn);
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02002508 HA_RWLOCK_WRLOCK(QUIC_LOCK, &el->rx.crypto.frms_rwlock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002509 node = eb64_first(&el->rx.crypto.frms);
2510 while (node) {
2511 struct quic_rx_crypto_frm *cf;
2512
2513 cf = eb64_entry(&node->node, struct quic_rx_crypto_frm, offset_node);
2514 if (cf->offset_node.key != el->rx.crypto.offset)
2515 break;
2516
2517 if (!qc_provide_cdata(el, ctx, cf->data, cf->len, cf->pkt, cf))
2518 goto err;
2519
2520 node = eb64_next(node);
2521 quic_rx_packet_refdec(cf->pkt);
2522 eb64_delete(&cf->offset_node);
2523 pool_free(pool_head_quic_rx_crypto_frm, cf);
2524 }
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02002525 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &el->rx.crypto.frms_rwlock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002526 TRACE_LEAVE(QUIC_EV_CONN_RXCDATA, ctx->conn);
2527 return 1;
2528
2529 err:
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02002530 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &el->rx.crypto.frms_rwlock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002531 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_RXCDATA, ctx->conn);
2532 return 0;
2533}
2534
2535/* Process all the packets at <el> encryption level.
2536 * Return 1 if succeeded, 0 if not.
2537 */
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02002538int qc_treat_rx_pkts(struct quic_enc_level *el, struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002539{
2540 struct quic_tls_ctx *tls_ctx;
2541 struct eb64_node *node;
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02002542 int64_t largest_pn = -1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002543
2544 TRACE_ENTER(QUIC_EV_CONN_ELRXPKTS, ctx->conn);
2545 tls_ctx = &el->tls_ctx;
Frédéric Lécaille98cdeb22021-07-26 16:38:14 +02002546 HA_RWLOCK_WRLOCK(QUIC_LOCK, &el->rx.pkts_rwlock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002547 node = eb64_first(&el->rx.pkts);
2548 while (node) {
2549 struct quic_rx_packet *pkt;
2550
2551 pkt = eb64_entry(&node->node, struct quic_rx_packet, pn_node);
2552 if (!qc_pkt_decrypt(pkt, tls_ctx)) {
2553 /* Drop the packet */
2554 TRACE_PROTO("packet decryption failed -> dropped",
2555 QUIC_EV_CONN_ELRXPKTS, ctx->conn, pkt);
2556 }
2557 else {
Frédéric Lécaillec4b93ea2021-06-04 10:12:43 +02002558 if (!qc_parse_pkt_frms(pkt, ctx, el)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002559 /* Drop the packet */
2560 TRACE_PROTO("packet parsing failed -> dropped",
2561 QUIC_EV_CONN_ELRXPKTS, ctx->conn, pkt);
2562 }
2563 else {
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002564 struct quic_arng ar = { .first = pkt->pn, .last = pkt->pn };
2565
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02002566 if (pkt->flags & QUIC_FL_RX_PACKET_ACK_ELICITING &&
2567 !(HA_ATOMIC_ADD_FETCH(&el->pktns->rx.nb_ack_eliciting, 1) & 1))
2568 HA_ATOMIC_OR(&el->pktns->flags, QUIC_FL_PKTNS_ACK_REQUIRED);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002569
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02002570 if (pkt->pn > largest_pn)
2571 largest_pn = pkt->pn;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002572 /* Update the list of ranges to acknowledge. */
Frédéric Lécaille654c6912021-06-04 10:27:23 +02002573 if (!quic_update_ack_ranges_list(&el->pktns->rx.arngs, &ar))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002574 TRACE_DEVEL("Could not update ack range list",
2575 QUIC_EV_CONN_ELRXPKTS, ctx->conn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002576 }
2577 }
2578 node = eb64_next(node);
2579 quic_rx_packet_eb64_delete(&pkt->pn_node);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002580 }
Frédéric Lécaille98cdeb22021-07-26 16:38:14 +02002581 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &el->rx.pkts_rwlock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002582
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02002583 /* Update the largest packet number. */
2584 if (largest_pn != -1)
2585 HA_ATOMIC_UPDATE_MAX(&el->pktns->rx.largest_pn, largest_pn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002586 if (!qc_treat_rx_crypto_frms(el, ctx))
2587 goto err;
2588
2589 TRACE_LEAVE(QUIC_EV_CONN_ELRXPKTS, ctx->conn);
2590 return 1;
2591
2592 err:
2593 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_ELRXPKTS, ctx->conn);
2594 return 0;
2595}
2596
2597/* Called during handshakes to parse and build Initial and Handshake packets for QUIC
2598 * connections with <ctx> as I/O handler context.
2599 * Returns 1 if succeeded, 0 if not.
2600 */
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02002601int qc_do_hdshk(struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002602{
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002603 int ret, ssl_err;
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002604 struct quic_conn *qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002605 enum quic_tls_enc_level tel, next_tel;
2606 struct quic_enc_level *qel, *next_qel;
2607 struct quic_tls_ctx *tls_ctx;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002608 struct qring *qr; // Tx ring
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002609
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002610 qc = ctx->conn->qc;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002611 qr = NULL;
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002612 TRACE_ENTER(QUIC_EV_CONN_HDSHK, ctx->conn, &qc->state);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002613 ssl_err = SSL_ERROR_NONE;
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002614 if (!quic_get_tls_enc_levels(&tel, &next_tel, qc->state))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002615 goto err;
2616
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002617 qel = &qc->els[tel];
2618 next_qel = &qc->els[next_tel];
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002619
2620 next_level:
2621 tls_ctx = &qel->tls_ctx;
2622
2623 /* If the header protection key for this level has been derived,
2624 * remove the packet header protections.
2625 */
Frédéric Lécaillea11d0e22021-06-07 14:38:18 +02002626 if (!MT_LIST_ISEMPTY(&qel->rx.pqpkts) &&
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002627 (tls_ctx->rx.flags & QUIC_FL_TLS_SECRETS_SET))
2628 qc_rm_hp_pkts(qel, ctx);
2629
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02002630 if (!qc_treat_rx_pkts(qel, ctx))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002631 goto err;
2632
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002633 if (!qr)
2634 qr = MT_LIST_POP(qc->tx.qring_list, typeof(qr), mt_list);
2635 ret = qc_prep_hdshk_pkts(qr, ctx);
2636 if (ret == -1)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002637 goto err;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002638 else if (ret == 0)
2639 goto skip_send;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002640
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002641 if (!qc_send_ppkts(qr, ctx))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002642 goto err;
2643
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002644 skip_send:
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002645 /* Check if there is something to do for the next level.
2646 */
2647 if ((next_qel->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_SET) &&
Frédéric Lécaillea11d0e22021-06-07 14:38:18 +02002648 (!MT_LIST_ISEMPTY(&next_qel->rx.pqpkts) || !eb_is_empty(&next_qel->rx.pkts))) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002649 qel = next_qel;
2650 goto next_level;
2651 }
2652
2653 /* If the handshake has not been completed -> out! */
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002654 if (qc->state < QUIC_HS_ST_COMPLETE)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002655 goto out;
2656
2657 /* Discard the Handshake keys. */
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002658 quic_tls_discard_keys(&qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE]);
2659 quic_pktns_discard(qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE].pktns, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002660 qc_set_timer(ctx);
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002661 if (!quic_build_post_handshake_frames(qc) ||
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002662 !qc_prep_phdshk_pkts(qr, qc) ||
2663 !qc_send_ppkts(qr, ctx))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002664 goto err;
2665
2666 out:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002667 MT_LIST_APPEND(qc->tx.qring_list, &qr->mt_list);
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002668 TRACE_LEAVE(QUIC_EV_CONN_HDSHK, ctx->conn, &qc->state);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002669 return 1;
2670
2671 err:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002672 if (qr)
2673 MT_LIST_APPEND(qc->tx.qring_list, &qr->mt_list);
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002674 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_HDSHK, ctx->conn, &qc->state, &ssl_err);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002675 return 0;
2676}
2677
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05002678/* Uninitialize <qel> QUIC encryption level. Never fails. */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002679static void quic_conn_enc_level_uninit(struct quic_enc_level *qel)
2680{
2681 int i;
2682
2683 for (i = 0; i < qel->tx.crypto.nb_buf; i++) {
2684 if (qel->tx.crypto.bufs[i]) {
2685 pool_free(pool_head_quic_crypto_buf, qel->tx.crypto.bufs[i]);
2686 qel->tx.crypto.bufs[i] = NULL;
2687 }
2688 }
Willy Tarreau61cfdf42021-02-20 10:46:51 +01002689 ha_free(&qel->tx.crypto.bufs);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002690}
2691
2692/* Initialize QUIC TLS encryption level with <level<> as level for <qc> QUIC
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05002693 * connection allocating everything needed.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002694 * Returns 1 if succeeded, 0 if not.
2695 */
2696static int quic_conn_enc_level_init(struct quic_conn *qc,
2697 enum quic_tls_enc_level level)
2698{
2699 struct quic_enc_level *qel;
2700
2701 qel = &qc->els[level];
2702 qel->level = quic_to_ssl_enc_level(level);
2703 qel->tls_ctx.rx.aead = qel->tls_ctx.tx.aead = NULL;
2704 qel->tls_ctx.rx.md = qel->tls_ctx.tx.md = NULL;
2705 qel->tls_ctx.rx.hp = qel->tls_ctx.tx.hp = NULL;
2706 qel->tls_ctx.rx.flags = 0;
2707 qel->tls_ctx.tx.flags = 0;
2708
2709 qel->rx.pkts = EB_ROOT;
Frédéric Lécaille98cdeb22021-07-26 16:38:14 +02002710 HA_RWLOCK_INIT(&qel->rx.pkts_rwlock);
Frédéric Lécaillea11d0e22021-06-07 14:38:18 +02002711 MT_LIST_INIT(&qel->rx.pqpkts);
Frédéric Lécaille9054d1b2021-07-26 16:23:53 +02002712 qel->rx.crypto.offset = 0;
2713 qel->rx.crypto.frms = EB_ROOT_UNIQUE;
2714 HA_RWLOCK_INIT(&qel->rx.crypto.frms_rwlock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002715
2716 /* Allocate only one buffer. */
2717 qel->tx.crypto.bufs = malloc(sizeof *qel->tx.crypto.bufs);
2718 if (!qel->tx.crypto.bufs)
2719 goto err;
2720
2721 qel->tx.crypto.bufs[0] = pool_alloc(pool_head_quic_crypto_buf);
2722 if (!qel->tx.crypto.bufs[0])
2723 goto err;
2724
2725 qel->tx.crypto.bufs[0]->sz = 0;
2726 qel->tx.crypto.nb_buf = 1;
2727
2728 qel->tx.crypto.sz = 0;
2729 qel->tx.crypto.offset = 0;
2730
2731 return 1;
2732
2733 err:
Willy Tarreau61cfdf42021-02-20 10:46:51 +01002734 ha_free(&qel->tx.crypto.bufs);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002735 return 0;
2736}
2737
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002738/* Release all the memory allocated for <conn> QUIC connection. */
2739static void quic_conn_free(struct quic_conn *conn)
2740{
2741 int i;
2742
Frédéric Lécaille6de72872021-06-11 15:44:24 +02002743 if (!conn)
2744 return;
2745
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002746 free_quic_conn_cids(conn);
2747 for (i = 0; i < QUIC_TLS_ENC_LEVEL_MAX; i++)
2748 quic_conn_enc_level_uninit(&conn->els[i]);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002749 if (conn->timer_task)
2750 task_destroy(conn->timer_task);
2751 pool_free(pool_head_quic_conn, conn);
2752}
2753
2754/* Callback called upon loss detection and PTO timer expirations. */
Willy Tarreau144f84a2021-03-02 16:09:26 +01002755static struct task *process_timer(struct task *task, void *ctx, unsigned int state)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002756{
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02002757 struct ssl_sock_ctx *conn_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002758 struct quic_conn *qc;
2759 struct quic_pktns *pktns;
2760
2761
2762 conn_ctx = task->context;
2763 qc = conn_ctx->conn->qc;
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +01002764 TRACE_ENTER(QUIC_EV_CONN_PTIMER, conn_ctx->conn,
2765 NULL, NULL, &qc->path->ifae_pkts);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002766 task->expire = TICK_ETERNITY;
2767 pktns = quic_loss_pktns(qc);
2768 if (tick_isset(pktns->tx.loss_time)) {
2769 struct list lost_pkts = LIST_HEAD_INIT(lost_pkts);
2770
2771 qc_packet_loss_lookup(pktns, qc, &lost_pkts);
2772 if (!LIST_ISEMPTY(&lost_pkts))
2773 qc_release_lost_pkts(pktns, ctx, &lost_pkts, now_ms);
2774 qc_set_timer(conn_ctx);
2775 goto out;
2776 }
2777
2778 if (qc->path->in_flight) {
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002779 pktns = quic_pto_pktns(qc, qc->state >= QUIC_HS_ST_COMPLETE, NULL);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002780 pktns->tx.pto_probe = 1;
2781 }
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002782 else if (objt_server(qc->conn->target) && qc->state <= QUIC_HS_ST_COMPLETE) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002783 struct quic_enc_level *iel = &qc->els[QUIC_TLS_ENC_LEVEL_INITIAL];
2784 struct quic_enc_level *hel = &qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE];
2785
2786 if (hel->tls_ctx.rx.flags == QUIC_FL_TLS_SECRETS_SET)
2787 hel->pktns->tx.pto_probe = 1;
2788 if (iel->tls_ctx.rx.flags == QUIC_FL_TLS_SECRETS_SET)
2789 iel->pktns->tx.pto_probe = 1;
2790 }
2791 qc->tx.nb_pto_dgrams = QUIC_MAX_NB_PTO_DGRAMS;
2792 tasklet_wakeup(conn_ctx->wait_event.tasklet);
2793 qc->path->loss.pto_count++;
2794
2795 out:
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01002796 TRACE_LEAVE(QUIC_EV_CONN_PTIMER, conn_ctx->conn, pktns);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002797
2798 return task;
2799}
2800
2801/* Initialize <conn> QUIC connection with <quic_initial_clients> as root of QUIC
2802 * connections used to identify the first Initial packets of client connecting
2803 * to listeners. This parameter must be NULL for QUIC connections attached
2804 * to listeners. <dcid> is the destination connection ID with <dcid_len> as length.
2805 * <scid> is the source connection ID with <scid_len> as length.
2806 * Returns 1 if succeeded, 0 if not.
2807 */
Frédéric Lécaille6de72872021-06-11 15:44:24 +02002808static struct quic_conn *qc_new_conn(unsigned int version, int ipv4,
2809 unsigned char *dcid, size_t dcid_len,
Frédéric Lécaille6b197642021-07-06 16:25:08 +02002810 unsigned char *scid, size_t scid_len, int server, void *owner)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002811{
2812 int i;
Frédéric Lécaille6de72872021-06-11 15:44:24 +02002813 struct quic_conn *qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002814 /* Initial CID. */
2815 struct quic_connection_id *icid;
2816
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02002817 TRACE_ENTER(QUIC_EV_CONN_INIT);
Frédéric Lécaille6de72872021-06-11 15:44:24 +02002818 qc = pool_zalloc(pool_head_quic_conn);
2819 if (!qc) {
2820 TRACE_PROTO("Could not allocate a new connection", QUIC_EV_CONN_INIT);
2821 goto err;
2822 }
2823
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002824 qc->cids = EB_ROOT;
2825 /* QUIC Server (or listener). */
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02002826 if (server) {
Frédéric Lécaille6b197642021-07-06 16:25:08 +02002827 struct listener *l = owner;
2828
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002829 qc->state = QUIC_HS_ST_SERVER_INITIAL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002830 /* Copy the initial DCID. */
2831 qc->odcid.len = dcid_len;
2832 if (qc->odcid.len)
2833 memcpy(qc->odcid.data, dcid, dcid_len);
2834
2835 /* Copy the SCID as our DCID for this connection. */
2836 if (scid_len)
2837 memcpy(qc->dcid.data, scid, scid_len);
2838 qc->dcid.len = scid_len;
Frédéric Lécaille6b197642021-07-06 16:25:08 +02002839 qc->tx.qring_list = &l->rx.tx_qrings;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002840 }
2841 /* QUIC Client (outgoing connection to servers) */
2842 else {
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002843 qc->state = QUIC_HS_ST_CLIENT_INITIAL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002844 if (dcid_len)
2845 memcpy(qc->dcid.data, dcid, dcid_len);
2846 qc->dcid.len = dcid_len;
2847 }
2848
2849 /* Initialize the output buffer */
2850 qc->obuf.pos = qc->obuf.data;
2851
2852 icid = new_quic_cid(&qc->cids, 0);
Frédéric Lécaille6de72872021-06-11 15:44:24 +02002853 if (!icid) {
2854 TRACE_PROTO("Could not allocate a new connection ID", QUIC_EV_CONN_INIT);
2855 goto err;
2856 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002857
2858 /* Select our SCID which is the first CID with 0 as sequence number. */
2859 qc->scid = icid->cid;
2860
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002861 /* Packet number spaces initialization. */
2862 for (i = 0; i < QUIC_TLS_PKTNS_MAX; i++)
2863 quic_pktns_init(&qc->pktns[i]);
2864 /* QUIC encryption level context initialization. */
2865 for (i = 0; i < QUIC_TLS_ENC_LEVEL_MAX; i++) {
Frédéric Lécaille6de72872021-06-11 15:44:24 +02002866 if (!quic_conn_enc_level_init(qc, i)) {
2867 TRACE_PROTO("Could not initialize an encryption level", QUIC_EV_CONN_INIT);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002868 goto err;
Frédéric Lécaille6de72872021-06-11 15:44:24 +02002869 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002870 /* Initialize the packet number space. */
2871 qc->els[i].pktns = &qc->pktns[quic_tls_pktns(i)];
2872 }
2873
Frédéric Lécaillec8d3f872021-07-06 17:19:44 +02002874 qc->version = version;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002875 /* TX part. */
2876 LIST_INIT(&qc->tx.frms_to_send);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002877 qc->tx.nb_buf = QUIC_CONN_TX_BUFS_NB;
2878 qc->tx.wbuf = qc->tx.rbuf = 0;
2879 qc->tx.bytes = 0;
2880 qc->tx.nb_pto_dgrams = 0;
2881 /* RX part. */
2882 qc->rx.bytes = 0;
2883
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002884 /* XXX TO DO: Only one path at this time. */
2885 qc->path = &qc->paths[0];
2886 quic_path_init(qc->path, ipv4, default_quic_cc_algo, qc);
2887
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02002888 TRACE_LEAVE(QUIC_EV_CONN_INIT);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002889
Frédéric Lécaille6de72872021-06-11 15:44:24 +02002890 return qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002891
2892 err:
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02002893 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_INIT);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002894 quic_conn_free(qc);
Frédéric Lécaille6de72872021-06-11 15:44:24 +02002895 return NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002896}
2897
2898/* Initialize the timer task of <qc> QUIC connection.
2899 * Returns 1 if succeeded, 0 if not.
2900 */
2901static int quic_conn_init_timer(struct quic_conn *qc)
2902{
2903 qc->timer_task = task_new(MAX_THREADS_MASK);
2904 if (!qc->timer_task)
2905 return 0;
2906
2907 qc->timer = TICK_ETERNITY;
2908 qc->timer_task->process = process_timer;
2909 qc->timer_task->context = qc->conn->xprt_ctx;
2910
2911 return 1;
2912}
2913
2914/* Parse into <pkt> a long header located at <*buf> buffer, <end> begin a pointer to the end
2915 * past one byte of this buffer.
2916 */
2917static inline int quic_packet_read_long_header(unsigned char **buf, const unsigned char *end,
2918 struct quic_rx_packet *pkt)
2919{
2920 unsigned char dcid_len, scid_len;
2921
2922 /* Version */
2923 if (!quic_read_uint32(&pkt->version, (const unsigned char **)buf, end))
2924 return 0;
2925
2926 if (!pkt->version) { /* XXX TO DO XXX Version negotiation packet */ };
2927
2928 /* Destination Connection ID Length */
2929 dcid_len = *(*buf)++;
2930 /* We want to be sure we can read <dcid_len> bytes and one more for <scid_len> value */
2931 if (dcid_len > QUIC_CID_MAXLEN || end - *buf < dcid_len + 1)
2932 /* XXX MUST BE DROPPED */
2933 return 0;
2934
2935 if (dcid_len) {
2936 /* Check that the length of this received DCID matches the CID lengths
2937 * of our implementation for non Initials packets only.
2938 */
2939 if (pkt->type != QUIC_PACKET_TYPE_INITIAL && dcid_len != QUIC_CID_LEN)
2940 return 0;
2941
2942 memcpy(pkt->dcid.data, *buf, dcid_len);
2943 }
2944
2945 pkt->dcid.len = dcid_len;
2946 *buf += dcid_len;
2947
2948 /* Source Connection ID Length */
2949 scid_len = *(*buf)++;
2950 if (scid_len > QUIC_CID_MAXLEN || end - *buf < scid_len)
2951 /* XXX MUST BE DROPPED */
2952 return 0;
2953
2954 if (scid_len)
2955 memcpy(pkt->scid.data, *buf, scid_len);
2956 pkt->scid.len = scid_len;
2957 *buf += scid_len;
2958
2959 return 1;
2960}
2961
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02002962/* If the header protection of <pkt> packet attached to <qc> connection with <ctx>
2963 * as context may be removed, return 1, 0 if not. Also set <*qel> to the associated
2964 * encryption level matching with the packet type. <*qel> may be null if not found.
2965 * Note that <ctx> may be null (for Initial packets).
2966 */
2967static int qc_pkt_may_rm_hp(struct quic_rx_packet *pkt,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02002968 struct quic_conn *qc, struct ssl_sock_ctx *ctx,
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02002969 struct quic_enc_level **qel)
2970{
2971 enum quic_tls_enc_level tel;
2972
2973 /* Special case without connection context (firt Initial packets) */
2974 if (!ctx) {
2975 *qel = &qc->els[QUIC_TLS_ENC_LEVEL_INITIAL];
2976 return 1;
2977 }
2978
2979 tel = quic_packet_type_enc_level(pkt->type);
2980 if (tel == QUIC_TLS_ENC_LEVEL_NONE) {
2981 *qel = NULL;
2982 return 0;
2983 }
2984
2985 *qel = &qc->els[tel];
2986 if ((*qel)->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_DCD) {
2987 TRACE_DEVEL("Discarded keys", QUIC_EV_CONN_TRMHP, ctx->conn);
2988 return 0;
2989 }
2990
2991 if (((*qel)->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_SET) &&
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02002992 (tel != QUIC_TLS_ENC_LEVEL_APP || ctx->conn->qc->state >= QUIC_HS_ST_COMPLETE))
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02002993 return 1;
2994
2995 return 0;
2996}
2997
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002998/* Try to remove the header protecttion of <pkt> QUIC packet attached to <conn>
2999 * QUIC connection with <buf> as packet number field address, <end> a pointer to one
3000 * byte past the end of the buffer containing this packet and <beg> the address of
3001 * the packet first byte.
3002 * If succeeded, this function updates <*buf> to point to the next packet in the buffer.
3003 * Returns 1 if succeeded, 0 if not.
3004 */
3005static inline int qc_try_rm_hp(struct quic_rx_packet *pkt,
3006 unsigned char **buf, unsigned char *beg,
3007 const unsigned char *end,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02003008 struct quic_conn *qc, struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003009{
3010 unsigned char *pn = NULL; /* Packet number field */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003011 struct quic_enc_level *qel;
3012 /* Only for traces. */
3013 struct quic_rx_packet *qpkt_trace;
3014
3015 qpkt_trace = NULL;
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02003016 TRACE_ENTER(QUIC_EV_CONN_TRMHP, ctx ? ctx->conn : NULL);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003017 /* The packet number is here. This is also the start minus
3018 * QUIC_PACKET_PN_MAXLEN of the sample used to add/remove the header
3019 * protection.
3020 */
3021 pn = *buf;
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02003022 if (qc_pkt_may_rm_hp(pkt, qc, ctx, &qel)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003023 /* Note that the following function enables us to unprotect the packet
3024 * number and its length subsequently used to decrypt the entire
3025 * packets.
3026 */
3027 if (!qc_do_rm_hp(pkt, &qel->tls_ctx,
3028 qel->pktns->rx.largest_pn, pn, beg, end, ctx)) {
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02003029 TRACE_PROTO("hp error", QUIC_EV_CONN_TRMHP, ctx ? ctx->conn : NULL);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003030 goto err;
3031 }
3032
3033 /* The AAD includes the packet number field found at <pn>. */
3034 pkt->aad_len = pn - beg + pkt->pnl;
3035 qpkt_trace = pkt;
3036 /* Store the packet */
3037 pkt->pn_node.key = pkt->pn;
Frédéric Lécaille98cdeb22021-07-26 16:38:14 +02003038 HA_RWLOCK_WRLOCK(QUIC_LOCK, &qel->rx.pkts_rwlock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003039 quic_rx_packet_eb64_insert(&qel->rx.pkts, &pkt->pn_node);
Frédéric Lécaille98cdeb22021-07-26 16:38:14 +02003040 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qel->rx.pkts_rwlock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003041 }
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02003042 else if (qel) {
3043 TRACE_PROTO("hp not removed", QUIC_EV_CONN_TRMHP, ctx ? ctx->conn : NULL, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003044 pkt->pn_offset = pn - beg;
3045 quic_rx_packet_list_addq(&qel->rx.pqpkts, pkt);
3046 }
3047
3048 memcpy(pkt->data, beg, pkt->len);
3049 /* Updtate the offset of <*buf> for the next QUIC packet. */
3050 *buf = beg + pkt->len;
3051
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02003052 TRACE_LEAVE(QUIC_EV_CONN_TRMHP, ctx ? ctx->conn : NULL, qpkt_trace);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003053 return 1;
3054
3055 err:
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02003056 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_TRMHP, ctx ? ctx->conn : NULL, qpkt_trace);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003057 return 0;
3058}
3059
3060/* Parse the header form from <byte0> first byte of <pkt> pacekt to set type.
3061 * Also set <*long_header> to 1 if this form is long, 0 if not.
3062 */
3063static inline void qc_parse_hd_form(struct quic_rx_packet *pkt,
3064 unsigned char byte0, int *long_header)
3065{
3066 if (byte0 & QUIC_PACKET_LONG_HEADER_BIT) {
3067 pkt->type =
3068 (byte0 >> QUIC_PACKET_TYPE_SHIFT) & QUIC_PACKET_TYPE_BITMASK;
3069 *long_header = 1;
3070 }
3071 else {
3072 pkt->type = QUIC_PACKET_TYPE_SHORT;
3073 *long_header = 0;
3074 }
3075}
3076
3077static ssize_t qc_srv_pkt_rcv(unsigned char **buf, const unsigned char *end,
3078 struct quic_rx_packet *pkt,
3079 struct quic_dgram_ctx *dgram_ctx,
3080 struct sockaddr_storage *saddr)
3081{
3082 unsigned char *beg;
3083 uint64_t len;
3084 struct quic_conn *qc;
3085 struct eb_root *cids;
3086 struct ebmb_node *node;
3087 struct connection *srv_conn;
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02003088 struct ssl_sock_ctx *conn_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003089 int long_header;
3090
3091 qc = NULL;
3092 TRACE_ENTER(QUIC_EV_CONN_SPKT);
3093 if (end <= *buf)
3094 goto err;
3095
3096 /* Fixed bit */
3097 if (!(**buf & QUIC_PACKET_FIXED_BIT))
3098 /* XXX TO BE DISCARDED */
3099 goto err;
3100
3101 srv_conn = dgram_ctx->owner;
3102 beg = *buf;
3103 /* Header form */
3104 qc_parse_hd_form(pkt, *(*buf)++, &long_header);
3105 if (long_header) {
3106 size_t cid_lookup_len;
3107
3108 if (!quic_packet_read_long_header(buf, end, pkt))
3109 goto err;
3110
3111 /* For Initial packets, and for servers (QUIC clients connections),
3112 * there is no Initial connection IDs storage.
3113 */
3114 if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
3115 cids = &((struct server *)__objt_server(srv_conn->target))->cids;
3116 cid_lookup_len = pkt->dcid.len;
3117 }
3118 else {
3119 cids = &((struct server *)__objt_server(srv_conn->target))->cids;
3120 cid_lookup_len = QUIC_CID_LEN;
3121 }
3122
3123 node = ebmb_lookup(cids, pkt->dcid.data, cid_lookup_len);
3124 if (!node)
3125 goto err;
3126
3127 qc = ebmb_entry(node, struct quic_conn, scid_node);
3128
3129 if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
3130 qc->dcid.len = pkt->scid.len;
3131 if (pkt->scid.len)
3132 memcpy(qc->dcid.data, pkt->scid.data, pkt->scid.len);
3133 }
3134
3135 if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
3136 uint64_t token_len;
3137
3138 if (!quic_dec_int(&token_len, (const unsigned char **)buf, end) || end - *buf < token_len)
3139 goto err;
3140
3141 /* XXX TO DO XXX 0 value means "the token is not present".
3142 * A server which sends an Initial packet must not set the token.
3143 * So, a client which receives an Initial packet with a token
3144 * MUST discard the packet or generate a connection error with
3145 * PROTOCOL_VIOLATION as type.
3146 * The token must be provided in a Retry packet or NEW_TOKEN frame.
3147 */
3148 pkt->token_len = token_len;
3149 }
3150 }
3151 else {
3152 /* XXX TO DO: Short header XXX */
3153 if (end - *buf < QUIC_CID_LEN)
3154 goto err;
3155
3156 cids = &((struct server *)__objt_server(srv_conn->target))->cids;
3157 node = ebmb_lookup(cids, *buf, QUIC_CID_LEN);
3158 if (!node)
3159 goto err;
3160
3161 qc = ebmb_entry(node, struct quic_conn, scid_node);
3162 *buf += QUIC_CID_LEN;
3163 }
3164 /* Store the DCID used for this packet to check the packet which
3165 * come in this UDP datagram match with it.
3166 */
3167 if (!dgram_ctx->dcid_node)
3168 dgram_ctx->dcid_node = node;
3169 /* Only packets packets with long headers and not RETRY or VERSION as type
3170 * have a length field.
3171 */
3172 if (long_header && pkt->type != QUIC_PACKET_TYPE_RETRY && pkt->version) {
3173 if (!quic_dec_int(&len, (const unsigned char **)buf, end) || end - *buf < len)
3174 goto err;
3175
3176 pkt->len = len;
3177 }
3178 else if (!long_header) {
3179 /* A short packet is the last one of an UDP datagram. */
3180 pkt->len = end - *buf;
3181 }
3182
3183 conn_ctx = qc->conn->xprt_ctx;
3184
3185 /* Increase the total length of this packet by the header length. */
3186 pkt->len += *buf - beg;
3187 /* Do not check the DCID node before the length. */
3188 if (dgram_ctx->dcid_node != node) {
3189 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_SPKT, qc->conn);
3190 goto err;
3191 }
3192
3193 if (pkt->len > sizeof pkt->data) {
3194 TRACE_PROTO("Too big packet", QUIC_EV_CONN_SPKT, qc->conn, pkt, &pkt->len);
3195 goto err;
3196 }
3197
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02003198 if (!qc_try_rm_hp(pkt, buf, beg, end, qc, conn_ctx))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003199 goto err;
3200
3201 /* Wake the tasklet of the QUIC connection packet handler. */
3202 if (conn_ctx)
3203 tasklet_wakeup(conn_ctx->wait_event.tasklet);
3204
3205 TRACE_LEAVE(QUIC_EV_CONN_SPKT, qc->conn);
3206
3207 return pkt->len;
3208
3209 err:
Frédéric Lécaille6c1e36c2020-12-23 17:17:37 +01003210 TRACE_DEVEL("Leaing in error", QUIC_EV_CONN_SPKT, qc ? qc->conn : NULL);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003211 return -1;
3212}
3213
3214static ssize_t qc_lstnr_pkt_rcv(unsigned char **buf, const unsigned char *end,
3215 struct quic_rx_packet *pkt,
3216 struct quic_dgram_ctx *dgram_ctx,
3217 struct sockaddr_storage *saddr)
3218{
3219 unsigned char *beg;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003220 struct quic_conn *qc;
3221 struct eb_root *cids;
3222 struct ebmb_node *node;
3223 struct listener *l;
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02003224 struct ssl_sock_ctx *conn_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003225 int long_header = 0;
3226
3227 qc = NULL;
Frédéric Lécailled24c2ec2021-05-31 10:24:49 +02003228 conn_ctx = NULL;
Frédéric Lécaille2e7ffc92021-06-10 08:18:45 +02003229 TRACE_ENTER(QUIC_EV_CONN_LPKT, NULL, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003230 if (end <= *buf)
3231 goto err;
3232
3233 /* Fixed bit */
3234 if (!(**buf & QUIC_PACKET_FIXED_BIT)) {
3235 /* XXX TO BE DISCARDED */
3236 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
3237 goto err;
3238 }
3239
3240 l = dgram_ctx->owner;
3241 beg = *buf;
3242 /* Header form */
3243 qc_parse_hd_form(pkt, *(*buf)++, &long_header);
3244 if (long_header) {
3245 unsigned char dcid_len;
3246
3247 if (!quic_packet_read_long_header(buf, end, pkt)) {
3248 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
3249 goto err;
3250 }
3251
3252 dcid_len = pkt->dcid.len;
3253 /* For Initial packets, and for servers (QUIC clients connections),
3254 * there is no Initial connection IDs storage.
3255 */
3256 if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02003257 uint64_t token_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003258 /* DCIDs of first packets coming from clients may have the same values.
3259 * Let's distinguish them concatenating the socket addresses to the DCIDs.
3260 */
3261 quic_cid_saddr_cat(&pkt->dcid, saddr);
3262 cids = &l->rx.odcids;
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02003263
3264 if (!quic_dec_int(&token_len, (const unsigned char **)buf, end) ||
3265 end - *buf < token_len) {
3266 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
3267 goto err;
3268 }
3269
3270 /* XXX TO DO XXX 0 value means "the token is not present".
3271 * A server which sends an Initial packet must not set the token.
3272 * So, a client which receives an Initial packet with a token
3273 * MUST discard the packet or generate a connection error with
3274 * PROTOCOL_VIOLATION as type.
3275 * The token must be provided in a Retry packet or NEW_TOKEN frame.
3276 */
3277 pkt->token_len = token_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003278 }
3279 else {
3280 if (pkt->dcid.len != QUIC_CID_LEN) {
3281 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
3282 goto err;
3283 }
3284
3285 cids = &l->rx.cids;
3286 }
3287
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02003288 /* Only packets packets with long headers and not RETRY or VERSION as type
3289 * have a length field.
3290 */
3291 if (pkt->type != QUIC_PACKET_TYPE_RETRY && pkt->version) {
3292 uint64_t len;
3293
3294 if (!quic_dec_int(&len, (const unsigned char **)buf, end) ||
3295 end - *buf < len) {
3296 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
3297 goto err;
3298 }
3299
3300 pkt->len = len;
3301 }
3302
3303
3304 HA_RWLOCK_RDLOCK(OTHER_LOCK, &l->rx.cids_lock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003305 node = ebmb_lookup(cids, pkt->dcid.data, pkt->dcid.len);
3306 if (!node && pkt->type == QUIC_PACKET_TYPE_INITIAL && dcid_len == QUIC_CID_LEN &&
3307 cids == &l->rx.odcids) {
3308 /* Switch to the definitive tree ->cids containing the final CIDs. */
3309 node = ebmb_lookup(&l->rx.cids, pkt->dcid.data, dcid_len);
3310 if (node) {
3311 /* If found, signal this with NULL as special value for <cids>. */
3312 pkt->dcid.len = dcid_len;
3313 cids = NULL;
3314 }
3315 }
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02003316 HA_RWLOCK_RDUNLOCK(OTHER_LOCK, &l->rx.cids_lock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003317
3318 if (!node) {
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02003319 int ipv4;
3320 struct quic_cid *odcid;
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02003321 struct ebmb_node *n = NULL;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02003322
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003323 if (pkt->type != QUIC_PACKET_TYPE_INITIAL) {
3324 TRACE_PROTO("Non Initiial packet", QUIC_EV_CONN_LPKT);
3325 goto err;
3326 }
3327
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003328 pkt->saddr = *saddr;
3329 /* Note that here, odcid_len equals to pkt->dcid.len minus the length
3330 * of <saddr>.
3331 */
3332 pkt->odcid_len = dcid_len;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02003333 ipv4 = saddr->ss_family == AF_INET;
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003334 qc = qc_new_conn(pkt->version, ipv4, pkt->dcid.data, pkt->dcid.len,
Frédéric Lécaille6b197642021-07-06 16:25:08 +02003335 pkt->scid.data, pkt->scid.len, 1, l);
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003336 if (qc == NULL)
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02003337 goto err;
3338
3339 odcid = &qc->rx.params.original_destination_connection_id;
3340 /* Copy the transport parameters. */
3341 qc->rx.params = l->bind_conf->quic_params;
3342 /* Copy original_destination_connection_id transport parameter. */
3343 memcpy(odcid->data, &pkt->dcid, pkt->odcid_len);
3344 odcid->len = pkt->odcid_len;
3345 /* Copy the initial source connection ID. */
3346 quic_cid_cpy(&qc->rx.params.initial_source_connection_id, &qc->scid);
3347 qc->enc_params_len =
3348 quic_transport_params_encode(qc->enc_params,
3349 qc->enc_params + sizeof qc->enc_params,
3350 &qc->rx.params, 1);
3351 if (!qc->enc_params_len)
3352 goto err;
3353
Frédéric Lécaille497fa782021-05-31 15:16:13 +02003354 /* NOTE: the socket address has been concatenated to the destination ID
3355 * chosen by the client for Initial packets.
3356 */
3357 if (!qc_new_isecs(qc, pkt->dcid.data, pkt->odcid_len, 1)) {
3358 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, qc->conn);
3359 goto err;
3360 }
3361
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02003362 pkt->qc = qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003363 /* This is the DCID node sent in this packet by the client. */
3364 node = &qc->odcid_node;
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02003365 /* Enqueue this packet. */
3366 MT_LIST_APPEND(&l->rx.pkts, &pkt->rx_list);
3367 /* Try to accept a new connection. */
3368 listener_accept(l);
3369
3370 HA_RWLOCK_WRLOCK(OTHER_LOCK, &l->rx.cids_lock);
3371 /* Insert the DCID the QUIC client has chosen (only for listeners) */
3372 ebmb_insert(&l->rx.odcids, &qc->odcid_node, qc->odcid.len);
3373 /* Insert our SCID, the connection ID for the QUIC client. */
3374 n = ebmb_insert(&l->rx.cids, &qc->scid_node, qc->scid.len);
3375 HA_RWLOCK_WRUNLOCK(OTHER_LOCK, &l->rx.cids_lock);
3376 if (n != &qc->scid_node) {
3377 quic_conn_free(qc);
3378 qc = ebmb_entry(n, struct quic_conn, scid_node);
3379 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003380 }
3381 else {
3382 if (pkt->type == QUIC_PACKET_TYPE_INITIAL && cids == &l->rx.odcids)
3383 qc = ebmb_entry(node, struct quic_conn, odcid_node);
3384 else
3385 qc = ebmb_entry(node, struct quic_conn, scid_node);
Frédéric Lécailled24c2ec2021-05-31 10:24:49 +02003386 conn_ctx = qc->conn->xprt_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003387 }
3388 }
3389 else {
3390 if (end - *buf < QUIC_CID_LEN) {
3391 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
3392 goto err;
3393 }
3394
3395 cids = &l->rx.cids;
3396 node = ebmb_lookup(cids, *buf, QUIC_CID_LEN);
3397 if (!node) {
3398 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
3399 goto err;
3400 }
3401
3402 qc = ebmb_entry(node, struct quic_conn, scid_node);
Frédéric Lécailled24c2ec2021-05-31 10:24:49 +02003403 conn_ctx = qc->conn->xprt_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003404 *buf += QUIC_CID_LEN;
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02003405 /* A short packet is the last one of an UDP datagram. */
3406 pkt->len = end - *buf;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003407 }
3408
3409 /* Store the DCID used for this packet to check the packet which
3410 * come in this UDP datagram match with it.
3411 */
3412 if (!dgram_ctx->dcid_node) {
3413 dgram_ctx->dcid_node = node;
3414 dgram_ctx->qc = qc;
3415 }
3416
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003417 /* Increase the total length of this packet by the header length. */
3418 pkt->len += *buf - beg;
3419 /* Do not check the DCID node before the length. */
3420 if (dgram_ctx->dcid_node != node) {
3421 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, qc->conn);
3422 goto err;
3423 }
3424
3425 if (pkt->len > sizeof pkt->data) {
3426 TRACE_PROTO("Too big packet", QUIC_EV_CONN_LPKT, qc->conn, pkt, &pkt->len);
3427 goto err;
3428 }
3429
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02003430 if (!qc_try_rm_hp(pkt, buf, beg, end, qc, conn_ctx)) {
3431 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, qc->conn);
3432 goto err;
3433 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003434
Frédéric Lécaille2e7ffc92021-06-10 08:18:45 +02003435
3436 TRACE_PROTO("New packet", QUIC_EV_CONN_LPKT, qc->conn, pkt);
Frédéric Lécaille01abc462021-07-21 09:34:27 +02003437 /* Wake up the connection packet handler task from here only if all
3438 * the contexts have been initialized, especially the mux context
3439 * conn_ctx->conn->ctx. Note that this is ->start xprt callback which
3440 * will start it if these contexts for the connection are not already
3441 * initialized.
3442 */
3443 if (conn_ctx && HA_ATOMIC_LOAD(&conn_ctx->conn->ctx))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003444 tasklet_wakeup(conn_ctx->wait_event.tasklet);
Frédéric Lécailled24c2ec2021-05-31 10:24:49 +02003445
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003446 TRACE_LEAVE(QUIC_EV_CONN_LPKT, qc->conn, pkt);
3447
3448 return pkt->len;
3449
3450 err:
Frédéric Lécaille6c1e36c2020-12-23 17:17:37 +01003451 TRACE_DEVEL("Leaving in error", QUIC_EV_CONN_LPKT,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003452 qc ? qc->conn : NULL, pkt);
3453 return -1;
3454}
3455
3456/* This function builds into <buf> buffer a QUIC long packet header whose size may be computed
3457 * in advance. This is the reponsability of the caller to check there is enough room in this
3458 * buffer to build a long header.
3459 * Returns 0 if <type> QUIC packet type is not supported by long header, or 1 if succeeded.
3460 */
3461static int quic_build_packet_long_header(unsigned char **buf, const unsigned char *end,
3462 int type, size_t pn_len, struct quic_conn *conn)
3463{
3464 if (type > QUIC_PACKET_TYPE_RETRY)
3465 return 0;
3466
3467 /* #0 byte flags */
3468 *(*buf)++ = QUIC_PACKET_FIXED_BIT | QUIC_PACKET_LONG_HEADER_BIT |
3469 (type << QUIC_PACKET_TYPE_SHIFT) | (pn_len - 1);
3470 /* Version */
3471 quic_write_uint32(buf, end, conn->version);
3472 *(*buf)++ = conn->dcid.len;
3473 /* Destination connection ID */
3474 if (conn->dcid.len) {
3475 memcpy(*buf, conn->dcid.data, conn->dcid.len);
3476 *buf += conn->dcid.len;
3477 }
3478 /* Source connection ID */
3479 *(*buf)++ = conn->scid.len;
3480 if (conn->scid.len) {
3481 memcpy(*buf, conn->scid.data, conn->scid.len);
3482 *buf += conn->scid.len;
3483 }
3484
3485 return 1;
3486}
3487
3488/* This function builds into <buf> buffer a QUIC long packet header whose size may be computed
3489 * in advance. This is the reponsability of the caller to check there is enough room in this
3490 * buffer to build a long header.
3491 * Returns 0 if <type> QUIC packet type is not supported by long header, or 1 if succeeded.
3492 */
3493static int quic_build_packet_short_header(unsigned char **buf, const unsigned char *end,
3494 size_t pn_len, struct quic_conn *conn)
3495{
3496 /* #0 byte flags */
3497 *(*buf)++ = QUIC_PACKET_FIXED_BIT | (pn_len - 1);
3498 /* Destination connection ID */
3499 if (conn->dcid.len) {
3500 memcpy(*buf, conn->dcid.data, conn->dcid.len);
3501 *buf += conn->dcid.len;
3502 }
3503
3504 return 1;
3505}
3506
3507/* Apply QUIC header protection to the packet with <buf> as first byte address,
3508 * <pn> as address of the Packet number field, <pnlen> being this field length
3509 * with <aead> as AEAD cipher and <key> as secret key.
3510 * Returns 1 if succeeded or 0 if failed.
3511 */
3512static int quic_apply_header_protection(unsigned char *buf, unsigned char *pn, size_t pnlen,
3513 const EVP_CIPHER *aead, const unsigned char *key)
3514{
3515 int i, ret, outlen;
3516 EVP_CIPHER_CTX *ctx;
3517 /* We need an IV of at least 5 bytes: one byte for bytes #0
3518 * and at most 4 bytes for the packet number
3519 */
3520 unsigned char mask[5] = {0};
3521
3522 ret = 0;
3523 ctx = EVP_CIPHER_CTX_new();
3524 if (!ctx)
3525 return 0;
3526
3527 if (!EVP_EncryptInit_ex(ctx, aead, NULL, key, pn + QUIC_PACKET_PN_MAXLEN) ||
3528 !EVP_EncryptUpdate(ctx, mask, &outlen, mask, sizeof mask) ||
3529 !EVP_EncryptFinal_ex(ctx, mask, &outlen))
3530 goto out;
3531
3532 *buf ^= mask[0] & (*buf & QUIC_PACKET_LONG_HEADER_BIT ? 0xf : 0x1f);
3533 for (i = 0; i < pnlen; i++)
3534 pn[i] ^= mask[i + 1];
3535
3536 ret = 1;
3537
3538 out:
3539 EVP_CIPHER_CTX_free(ctx);
3540
3541 return ret;
3542}
3543
3544/* Reduce the encoded size of <ack_frm> ACK frame removing the last
3545 * ACK ranges if needed to a value below <limit> in bytes.
3546 * Return 1 if succeeded, 0 if not.
3547 */
3548static int quic_ack_frm_reduce_sz(struct quic_frame *ack_frm, size_t limit)
3549{
3550 size_t room, ack_delay_sz;
3551
3552 ack_delay_sz = quic_int_getsize(ack_frm->tx_ack.ack_delay);
3553 /* A frame is made of 1 byte for the frame type. */
3554 room = limit - ack_delay_sz - 1;
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003555 if (!quic_rm_last_ack_ranges(ack_frm->tx_ack.arngs, room))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003556 return 0;
3557
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003558 return 1 + ack_delay_sz + ack_frm->tx_ack.arngs->enc_sz;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003559}
3560
3561/* Prepare as most as possible CRYPTO frames from prebuilt CRYPTO frames for <qel>
3562 * encryption level to be encoded in a buffer with <room> as available room,
Frédéric Lécailleea604992020-12-24 13:01:37 +01003563 * and <*len> the packet Length field initialized with the number of bytes already present
3564 * in this buffer which must be taken into an account for the Length packet field value.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05003565 * <headlen> is the number of bytes already present in this packet before building
Frédéric Lécailleea604992020-12-24 13:01:37 +01003566 * CRYPTO frames.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05003567 * This is the responsibility of the caller to check that <*len> < <room> as this is
3568 * the responsibility to check that <headlen> < quic_path_prep_data(conn->path).
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003569 * Update consequently <*len> to reflect the size of these CRYPTO frames built
3570 * by this function. Also attach these CRYPTO frames to <pkt> QUIC packet.
3571 * Return 1 if succeeded, 0 if not.
3572 */
3573static inline int qc_build_cfrms(struct quic_tx_packet *pkt,
Frédéric Lécailleea604992020-12-24 13:01:37 +01003574 size_t room, size_t *len, size_t headlen,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003575 struct quic_enc_level *qel,
3576 struct quic_conn *conn)
3577{
Frédéric Lécailleea604992020-12-24 13:01:37 +01003578 int ret;
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02003579 struct quic_frame *cf;
Frédéric Lécaillec88df072021-07-27 11:43:11 +02003580 struct mt_list *tmp1, tmp2;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003581
Frédéric Lécailleea604992020-12-24 13:01:37 +01003582 ret = 0;
3583 /* If we are not probing we must take into an account the congestion
3584 * control window.
3585 */
3586 if (!conn->tx.nb_pto_dgrams)
3587 room = QUIC_MIN(room, quic_path_prep_data(conn->path) - headlen);
3588 TRACE_PROTO("************** CRYPTO frames build (headlen)",
3589 QUIC_EV_CONN_BCFRMS, conn->conn, &headlen);
Frédéric Lécaillec88df072021-07-27 11:43:11 +02003590 mt_list_for_each_entry_safe(cf, &qel->pktns->tx.frms, mt_list, tmp1, tmp2) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003591 /* header length, data length, frame length. */
3592 size_t hlen, dlen, cflen;
3593
Frédéric Lécailleea604992020-12-24 13:01:37 +01003594 TRACE_PROTO(" New CRYPTO frame build (room, len)",
3595 QUIC_EV_CONN_BCFRMS, conn->conn, &room, len);
3596 if (!room)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003597 break;
3598
3599 /* Compute the length of this CRYPTO frame header */
3600 hlen = 1 + quic_int_getsize(cf->crypto.offset);
3601 /* Compute the data length of this CRyPTO frame. */
3602 dlen = max_stream_data_size(room, *len + hlen, cf->crypto.len);
Frédéric Lécailleea604992020-12-24 13:01:37 +01003603 TRACE_PROTO(" CRYPTO data length (hlen, crypto.len, dlen)",
3604 QUIC_EV_CONN_BCFRMS, conn->conn, &hlen, &cf->crypto.len, &dlen);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003605 if (!dlen)
3606 break;
3607
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003608 pkt->cdata_len += dlen;
3609 /* CRYPTO frame length. */
3610 cflen = hlen + quic_int_getsize(dlen) + dlen;
Frédéric Lécailleea604992020-12-24 13:01:37 +01003611 TRACE_PROTO(" CRYPTO frame length (cflen)",
3612 QUIC_EV_CONN_BCFRMS, conn->conn, &cflen);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003613 /* Add the CRYPTO data length and its encoded length to the packet
3614 * length and the length of this length.
3615 */
3616 *len += cflen;
Frédéric Lécailleea604992020-12-24 13:01:37 +01003617 room -= cflen;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003618 if (dlen == cf->crypto.len) {
3619 /* <cf> CRYPTO data have been consumed. */
Frédéric Lécaillec88df072021-07-27 11:43:11 +02003620 MT_LIST_DELETE_SAFE(tmp1);
Willy Tarreau2b718102021-04-21 07:32:39 +02003621 LIST_APPEND(&pkt->frms, &cf->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003622 }
3623 else {
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02003624 struct quic_frame *new_cf;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003625
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02003626 new_cf = pool_alloc(pool_head_quic_frame);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003627 if (!new_cf) {
Frédéric Lécaille6c1e36c2020-12-23 17:17:37 +01003628 TRACE_PROTO("No memory for new crypto frame", QUIC_EV_CONN_BCFRMS, conn->conn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003629 return 0;
3630 }
3631
3632 new_cf->type = QUIC_FT_CRYPTO;
3633 new_cf->crypto.len = dlen;
3634 new_cf->crypto.offset = cf->crypto.offset;
Willy Tarreau2b718102021-04-21 07:32:39 +02003635 LIST_APPEND(&pkt->frms, &new_cf->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003636 /* Consume <dlen> bytes of the current frame. */
3637 cf->crypto.len -= dlen;
3638 cf->crypto.offset += dlen;
3639 }
Frédéric Lécailleea604992020-12-24 13:01:37 +01003640 ret = 1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003641 }
3642
Frédéric Lécailleea604992020-12-24 13:01:37 +01003643 return ret;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003644}
3645
3646/* This function builds a clear handshake packet used during a QUIC TLS handshakes
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003647 * into a buffer with <pos> as position pointer and <qel> as QUIC TLS encryption level
3648 * for <conn> QUIC connection and <qel> as QUIC TLS encryption level, filling the buffer
3649 * with as much as CRYPTO.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003650 * The trailing QUIC_TLS_TAG_LEN bytes of this packet are not built. But they are
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003651 * reserved so that to ensure there is enough room to build this AEAD TAG after
3652 * having successfully returned from this function and to ensure the position
3653 * pointer <pos> may be safely incremented by QUIC_TLS_TAG_LEN.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003654 * This function also update the value of <buf_pn> pointer to point to the packet
3655 * number field in this packet. <pn_len> will also have the packet number
3656 * length as value.
3657 *
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003658 * Return 1 packet if succeeded or 0 if failed (not enough room in the buffer to build
3659 * this packet, QUIC_TLS_TAG_LEN bytes for the encryption TAG included).
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003660 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003661static int qc_do_build_hdshk_pkt(unsigned char *pos, const unsigned char *end,
3662 struct quic_tx_packet *pkt, int pkt_type,
3663 int64_t pn, size_t *pn_len,
3664 unsigned char **buf_pn,
3665 struct quic_enc_level *qel,
3666 struct quic_conn *conn)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003667{
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003668 unsigned char *beg;
Frédéric Lécailleea604992020-12-24 13:01:37 +01003669 size_t len, len_frms, token_fields_len, padding_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003670 struct quic_frame frm = { .type = QUIC_FT_CRYPTO, };
3671 struct quic_frame ack_frm = { .type = QUIC_FT_ACK, };
3672 struct quic_crypto *crypto = &frm.crypto;
3673 size_t ack_frm_len;
3674 int64_t largest_acked_pn;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003675 int add_ping_frm;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003676
Frédéric Lécailleea604992020-12-24 13:01:37 +01003677 /* Length field value with CRYPTO frames if present. */
3678 len_frms = 0;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003679 beg = pos;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003680 /* When not probing and not acking, reduce the size of this buffer to respect
3681 * the congestion controller window.
3682 */
3683 if (!conn->tx.nb_pto_dgrams && !(qel->pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED)) {
3684 size_t path_room;
3685
3686 path_room = quic_path_prep_data(conn->path);
3687 if (end - beg > path_room)
3688 end = beg + path_room;
3689 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003690
3691 /* For a server, the token field of an Initial packet is empty. */
3692 token_fields_len = pkt_type == QUIC_PACKET_TYPE_INITIAL ? 1 : 0;
3693
3694 /* Check there is enough room to build the header followed by a token. */
3695 if (end - pos < QUIC_LONG_PACKET_MINLEN + conn->dcid.len +
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003696 conn->scid.len + token_fields_len + QUIC_TLS_TAG_LEN) {
3697 ssize_t room = end - pos;
3698 TRACE_PROTO("Not enough room", QUIC_EV_CONN_HPKT,
3699 conn->conn, NULL, NULL, &room);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003700 goto err;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003701 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003702
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003703 largest_acked_pn = qel->pktns->tx.largest_acked_pn;
3704 /* packet number length */
3705 *pn_len = quic_packet_number_length(pn, largest_acked_pn);
3706
3707 quic_build_packet_long_header(&pos, end, pkt_type, *pn_len, conn);
3708
3709 /* Encode the token length (0) for an Initial packet. */
3710 if (pkt_type == QUIC_PACKET_TYPE_INITIAL)
3711 *pos++ = 0;
3712
3713 /* Build an ACK frame if required. */
3714 ack_frm_len = 0;
3715 if ((qel->pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED) &&
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003716 !eb_is_empty(&qel->pktns->rx.arngs.root)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003717 ack_frm.tx_ack.ack_delay = 0;
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003718 ack_frm.tx_ack.arngs = &qel->pktns->rx.arngs;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003719 ack_frm_len = quic_ack_frm_reduce_sz(&ack_frm, end - pos);
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003720 if (!ack_frm_len) {
3721 ssize_t room = end - pos;
3722 TRACE_PROTO("Not enough room", QUIC_EV_CONN_HPKT,
3723 conn->conn, NULL, NULL, &room);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003724 goto err;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003725 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003726
3727 qel->pktns->flags &= ~QUIC_FL_PKTNS_ACK_REQUIRED;
3728 }
3729
3730 /* Length field value without the CRYPTO frames data length. */
3731 len = ack_frm_len + *pn_len;
Frédéric Lécaillec88df072021-07-27 11:43:11 +02003732 if (!MT_LIST_ISEMPTY(&qel->pktns->tx.frms)) {
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003733 ssize_t room = end - pos;
Frédéric Lécailleea604992020-12-24 13:01:37 +01003734
3735 len_frms = len + QUIC_TLS_TAG_LEN;
3736 if (!qc_build_cfrms(pkt, end - pos, &len_frms, pos - beg, qel, conn)) {
3737 TRACE_PROTO("Not enough room", QUIC_EV_CONN_HPKT,
3738 conn->conn, NULL, NULL, &room);
3739 goto err;
3740 }
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003741 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003742
3743 add_ping_frm = 0;
3744 padding_len = 0;
3745 if (objt_server(conn->conn->target) &&
3746 pkt_type == QUIC_PACKET_TYPE_INITIAL &&
3747 len < QUIC_INITIAL_PACKET_MINLEN) {
3748 len += padding_len = QUIC_INITIAL_PACKET_MINLEN - len;
3749 }
3750 else if (LIST_ISEMPTY(&pkt->frms)) {
3751 if (qel->pktns->tx.pto_probe) {
3752 /* If we cannot send a CRYPTO frame, we send a PING frame. */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003753 add_ping_frm = 1;
3754 len += 1;
3755 }
3756 /* If there is no frame at all to follow, add at least a PADDING frame. */
3757 if (!ack_frm_len)
3758 len += padding_len = QUIC_PACKET_PN_MAXLEN - *pn_len;
3759 }
3760
3761 /* Length (of the remaining data). Must not fail because, the buffer size
3762 * has been checked above. Note that we have reserved QUIC_TLS_TAG_LEN bytes
3763 * for the encryption TAG. It must be taken into an account for the length
3764 * of this packet.
3765 */
Frédéric Lécailleea604992020-12-24 13:01:37 +01003766 if (len_frms)
3767 len = len_frms;
3768 else
3769 len += QUIC_TLS_TAG_LEN;
3770 quic_enc_int(&pos, end, len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003771
3772 /* Packet number field address. */
3773 *buf_pn = pos;
3774
3775 /* Packet number encoding. */
3776 quic_packet_number_encode(&pos, end, pn, *pn_len);
3777
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01003778 if (ack_frm_len && !qc_build_frm(&pos, end, &ack_frm, pkt, conn)) {
3779 ssize_t room = end - pos;
3780 TRACE_PROTO("Not enough room", QUIC_EV_CONN_HPKT,
3781 conn->conn, NULL, NULL, &room);
3782 goto err;
3783 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003784
3785 /* Crypto frame */
3786 if (!LIST_ISEMPTY(&pkt->frms)) {
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02003787 struct quic_frame *cf;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003788
3789 list_for_each_entry(cf, &pkt->frms, list) {
3790 crypto->offset = cf->crypto.offset;
3791 crypto->len = cf->crypto.len;
3792 crypto->qel = qel;
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01003793 if (!qc_build_frm(&pos, end, &frm, pkt, conn)) {
3794 ssize_t room = end - pos;
3795 TRACE_PROTO("Not enough room", QUIC_EV_CONN_HPKT,
3796 conn->conn, NULL, NULL, &room);
3797 goto err;
3798 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003799 }
3800 }
3801
3802 /* Build a PING frame if needed. */
3803 if (add_ping_frm) {
3804 frm.type = QUIC_FT_PING;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003805 if (!qc_build_frm(&pos, end, &frm, pkt, conn)) {
3806 ssize_t room = end - pos;
3807 TRACE_PROTO("Not enough room", QUIC_EV_CONN_HPKT,
3808 conn->conn, NULL, NULL, &room);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003809 goto err;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003810 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003811 }
3812
3813 /* Build a PADDING frame if needed. */
3814 if (padding_len) {
3815 frm.type = QUIC_FT_PADDING;
3816 frm.padding.len = padding_len;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003817 if (!qc_build_frm(&pos, end, &frm, pkt, conn)) {
3818 ssize_t room = end - pos;
3819 TRACE_PROTO("Not enough room", QUIC_EV_CONN_HPKT,
3820 conn->conn, NULL, NULL, &room);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003821 goto err;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003822 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003823 }
3824
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003825 /* Always reset this variable as this function has no idea
3826 * if it was set. It is handle by the loss detection timer.
3827 */
3828 qel->pktns->tx.pto_probe = 0;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003829 pkt->len = pos - beg;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003830
3831 out:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003832 return 1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003833
3834 err:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003835 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003836}
3837
3838static inline void quic_tx_packet_init(struct quic_tx_packet *pkt)
3839{
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003840 pkt->len = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003841 pkt->cdata_len = 0;
3842 pkt->in_flight_len = 0;
3843 LIST_INIT(&pkt->frms);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003844 pkt->next = NULL;
3845 pkt->refcnt = 1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003846}
3847
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05003848/* Free <pkt> TX packet which has not already attached to any tree. */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003849static inline void free_quic_tx_packet(struct quic_tx_packet *pkt)
3850{
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02003851 struct quic_frame *frm, *frmbak;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003852
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003853 if (!pkt)
3854 return;
3855
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003856 list_for_each_entry_safe(frm, frmbak, &pkt->frms, list) {
Willy Tarreau2b718102021-04-21 07:32:39 +02003857 LIST_DELETE(&frm->list);
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02003858 pool_free(pool_head_quic_frame, frm);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003859 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003860 quic_tx_packet_refdec(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003861}
3862
3863/* Build a handshake packet into <buf> packet buffer with <pkt_type> as packet
3864 * type for <qc> QUIC connection from CRYPTO data stream at <*offset> offset to
3865 * be encrypted at <qel> encryption level.
3866 * Return -2 if the packet could not be encrypted for any reason, -1 if there was
3867 * not enough room in <buf> to build the packet, or the size of the built packet
3868 * if succeeded (may be zero if there is too much crypto data in flight to build the packet).
3869 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003870static struct quic_tx_packet *qc_build_hdshk_pkt(unsigned char **pos,
3871 const unsigned char *buf_end,
3872 struct quic_conn *qc, int pkt_type,
3873 struct quic_enc_level *qel, int *err)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003874{
3875 /* The pointer to the packet number field. */
3876 unsigned char *buf_pn;
3877 unsigned char *beg, *end, *payload;
3878 int64_t pn;
3879 size_t pn_len, payload_len, aad_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003880 struct quic_tls_ctx *tls_ctx;
3881 struct quic_tx_packet *pkt;
3882
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01003883 TRACE_ENTER(QUIC_EV_CONN_HPKT, qc->conn, NULL, qel);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003884 *err = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003885 pkt = pool_alloc(pool_head_quic_tx_packet);
3886 if (!pkt) {
3887 TRACE_DEVEL("Not enough memory for a new packet", QUIC_EV_CONN_HPKT, qc->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003888 *err = -2;
3889 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003890 }
3891
3892 quic_tx_packet_init(pkt);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003893 beg = *pos;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003894 pn_len = 0;
3895 buf_pn = NULL;
3896 pn = qel->pktns->tx.next_pn + 1;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003897 if (!qc_do_build_hdshk_pkt(*pos, buf_end, pkt, pkt_type, pn, &pn_len, &buf_pn, qel, qc)) {
3898 *err = -1;
3899 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003900 }
3901
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003902 end = beg + pkt->len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003903 payload = buf_pn + pn_len;
3904 payload_len = end - payload;
3905 aad_len = payload - beg;
3906
3907 tls_ctx = &qel->tls_ctx;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003908 if (!quic_packet_encrypt(payload, payload_len, beg, aad_len, pn, tls_ctx, qc->conn)) {
3909 *err = -2;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003910 goto err;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003911 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003912
3913 end += QUIC_TLS_TAG_LEN;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003914 pkt->len += QUIC_TLS_TAG_LEN;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003915 if (!quic_apply_header_protection(beg, buf_pn, pn_len,
3916 tls_ctx->tx.hp, tls_ctx->tx.hp_key)) {
3917 TRACE_DEVEL("Could not apply the header protection", QUIC_EV_CONN_HPKT, qc->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003918 *err = -2;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003919 goto err;
3920 }
3921
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003922 /* Now that a correct packet is built, let us consume <*pos> buffer. */
3923 *pos = end;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003924 /* Consume a packet number. */
3925 ++qel->pktns->tx.next_pn;
3926 /* Attach the built packet to its tree. */
3927 pkt->pn_node.key = qel->pktns->tx.next_pn;
3928 /* Set the packet in fligth length for in flight packet only. */
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003929 if (pkt->flags & QUIC_FL_TX_PACKET_IN_FLIGHT) {
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003930 pkt->in_flight_len = pkt->len;
3931 qc->path->prep_in_flight += pkt->len;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003932 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003933 pkt->pktns = qel->pktns;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003934 TRACE_LEAVE(QUIC_EV_CONN_HPKT, qc->conn, pkt);
3935
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003936 return pkt;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003937
3938 err:
3939 free_quic_tx_packet(pkt);
3940 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_HPKT, qc->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003941 return NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003942}
3943
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05003944/* Prepare a clear post handhskake packet for <conn> QUIC connection.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003945 * Return the length of this packet if succeeded, -1 <wbuf> was full.
3946 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003947static int qc_do_build_phdshk_apkt(unsigned char *pos, const unsigned char *end,
3948 struct quic_tx_packet *pkt,
3949 int64_t pn, size_t *pn_len,
3950 unsigned char **buf_pn,
3951 struct quic_enc_level *qel,
3952 struct quic_conn *conn)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003953{
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003954 const unsigned char *beg;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003955 struct quic_frame *frm, *sfrm;
3956 struct quic_frame ack_frm = { .type = QUIC_FT_ACK, };
3957 size_t fake_len, ack_frm_len;
3958 int64_t largest_acked_pn;
3959
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01003960 TRACE_ENTER(QUIC_EV_CONN_PAPKT, conn->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003961 beg = pos;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01003962 /* When not probing and not acking, reduce the size of this buffer to respect
3963 * the congestion controller window.
3964 */
3965 if (!conn->tx.nb_pto_dgrams && !(qel->pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED)) {
3966 size_t path_room;
3967
3968 path_room = quic_path_prep_data(conn->path);
3969 if (end - beg > path_room)
3970 end = beg + path_room;
3971 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003972 largest_acked_pn = qel->pktns->tx.largest_acked_pn;
3973 /* Packet number length */
3974 *pn_len = quic_packet_number_length(pn, largest_acked_pn);
3975 /* Check there is enough room to build this packet (without payload). */
3976 if (end - pos < QUIC_SHORT_PACKET_MINLEN + sizeof_quic_cid(&conn->dcid) +
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01003977 *pn_len + QUIC_TLS_TAG_LEN) {
3978 ssize_t room = end - pos;
3979 TRACE_PROTO("Not enough room", QUIC_EV_CONN_PAPKT,
3980 conn->conn, NULL, NULL, &room);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003981 goto err;
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01003982 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003983
3984 /* Reserve enough room at the end of the packet for the AEAD TAG. */
3985 end -= QUIC_TLS_TAG_LEN;
3986 quic_build_packet_short_header(&pos, end, *pn_len, conn);
3987 /* Packet number field. */
3988 *buf_pn = pos;
3989 /* Packet number encoding. */
3990 quic_packet_number_encode(&pos, end, pn, *pn_len);
3991
3992 /* Build an ACK frame if required. */
3993 ack_frm_len = 0;
3994 if ((qel->pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED) &&
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003995 !eb_is_empty(&qel->pktns->rx.arngs.root)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003996 ack_frm.tx_ack.ack_delay = 0;
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003997 ack_frm.tx_ack.arngs = &qel->pktns->rx.arngs;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003998 ack_frm_len = quic_ack_frm_reduce_sz(&ack_frm, end - pos);
3999 if (!ack_frm_len)
4000 goto err;
4001
4002 qel->pktns->flags &= ~QUIC_FL_PKTNS_ACK_REQUIRED;
4003 }
4004
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01004005 if (ack_frm_len && !qc_build_frm(&pos, end, &ack_frm, pkt, conn)) {
4006 ssize_t room = end - pos;
4007 TRACE_PROTO("Not enough room", QUIC_EV_CONN_PAPKT,
4008 conn->conn, NULL, NULL, &room);
4009 goto err;
4010 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004011
4012 fake_len = ack_frm_len;
Frédéric Lécaillec88df072021-07-27 11:43:11 +02004013 if (!MT_LIST_ISEMPTY(&qel->pktns->tx.frms) &&
Frédéric Lécailleea604992020-12-24 13:01:37 +01004014 !qc_build_cfrms(pkt, end - pos, &fake_len, pos - beg, qel, conn)) {
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01004015 ssize_t room = end - pos;
4016 TRACE_PROTO("some CRYPTO frames could not be built",
4017 QUIC_EV_CONN_PAPKT, conn->conn, NULL, NULL, &room);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004018 goto err;
4019 }
4020
4021 /* Crypto frame */
4022 if (!LIST_ISEMPTY(&pkt->frms)) {
4023 struct quic_frame frm = { .type = QUIC_FT_CRYPTO, };
4024 struct quic_crypto *crypto = &frm.crypto;
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02004025 struct quic_frame *cf;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004026
4027 list_for_each_entry(cf, &pkt->frms, list) {
4028 crypto->offset = cf->crypto.offset;
4029 crypto->len = cf->crypto.len;
4030 crypto->qel = qel;
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01004031 if (!qc_build_frm(&pos, end, &frm, pkt, conn)) {
4032 ssize_t room = end - pos;
4033 TRACE_PROTO("Not enough room", QUIC_EV_CONN_PAPKT,
4034 conn->conn, NULL, NULL, &room);
4035 goto err;
4036 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004037 }
4038 }
4039
4040 /* Encode a maximum of frames. */
4041 list_for_each_entry_safe(frm, sfrm, &conn->tx.frms_to_send, list) {
4042 unsigned char *ppos;
4043
4044 ppos = pos;
4045 if (!qc_build_frm(&ppos, end, frm, pkt, conn)) {
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01004046 TRACE_DEVEL("Frames not built", QUIC_EV_CONN_PAPKT, conn->conn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004047 break;
4048 }
4049
Willy Tarreau2b718102021-04-21 07:32:39 +02004050 LIST_DELETE(&frm->list);
4051 LIST_APPEND(&pkt->frms, &frm->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004052 pos = ppos;
4053 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004054 pkt->len = pos - beg;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004055
4056 out:
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01004057 TRACE_LEAVE(QUIC_EV_CONN_PAPKT, conn->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004058 return 1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004059
4060 err:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004061 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004062}
4063
4064/* Prepare a post handhskake packet at Application encryption level for <conn>
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05004065 * QUIC connection.
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004066 * Return the length if succeeded, -1 if <wbuf> was full, -2 in case of major error
4067 * (allocation or encryption failures).
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004068 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004069static struct quic_tx_packet *qc_build_phdshk_apkt(unsigned char **pos,
4070 const unsigned char *buf_end,
4071 struct quic_conn *qc, int *err)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004072{
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05004073 /* A pointer to the packet number field in <buf> */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004074 unsigned char *buf_pn;
4075 unsigned char *beg, *end, *payload;
4076 int64_t pn;
4077 size_t pn_len, aad_len, payload_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004078 struct quic_tls_ctx *tls_ctx;
4079 struct quic_enc_level *qel;
4080 struct quic_tx_packet *pkt;
4081
4082 TRACE_ENTER(QUIC_EV_CONN_PAPKT, qc->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004083 *err = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004084 pkt = pool_alloc(pool_head_quic_tx_packet);
4085 if (!pkt) {
4086 TRACE_DEVEL("Not enough memory for a new packet", QUIC_EV_CONN_PAPKT, qc->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004087 *err = -2;
4088 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004089 }
4090
4091 quic_tx_packet_init(pkt);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004092 beg = *pos;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004093 qel = &qc->els[QUIC_TLS_ENC_LEVEL_APP];
4094 pn_len = 0;
4095 buf_pn = NULL;
4096 pn = qel->pktns->tx.next_pn + 1;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004097 if (!qc_do_build_phdshk_apkt(*pos, buf_end, pkt, pn, &pn_len, &buf_pn, qel, qc)) {
4098 *err = -1;
4099 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004100 }
4101
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004102 end = beg + pkt->len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004103 payload = buf_pn + pn_len;
4104 payload_len = end - payload;
4105 aad_len = payload - beg;
4106
4107 tls_ctx = &qel->tls_ctx;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004108 if (!quic_packet_encrypt(payload, payload_len, beg, aad_len, pn, tls_ctx, qc->conn)) {
4109 *err = -2;
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01004110 goto err;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004111 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004112
4113 end += QUIC_TLS_TAG_LEN;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004114 pkt->len += QUIC_TLS_TAG_LEN;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004115 if (!quic_apply_header_protection(beg, buf_pn, pn_len,
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004116 tls_ctx->tx.hp, tls_ctx->tx.hp_key)) {
4117 TRACE_DEVEL("Could not apply the header protection", QUIC_EV_CONN_PAPKT, qc->conn);
4118 *err = -2;
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01004119 goto err;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004120 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004121
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004122 /* Now that a correct packet is built, let us consume <*pos> buffer. */
4123 *pos = end;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004124 /* Consume a packet number. */
4125 ++qel->pktns->tx.next_pn;
4126 /* Attach the built packet to its tree. */
4127 pkt->pn_node.key = qel->pktns->tx.next_pn;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004128 /* Set the packet in fligth length for in flight packet only. */
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01004129 if (pkt->flags & QUIC_FL_TX_PACKET_IN_FLIGHT) {
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004130 pkt->in_flight_len = pkt->len;
4131 qc->path->prep_in_flight += pkt->len;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01004132 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004133 pkt->pktns = qel->pktns;
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01004134 TRACE_LEAVE(QUIC_EV_CONN_PAPKT, qc->conn, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004135
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004136 return pkt;
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01004137
4138 err:
4139 free_quic_tx_packet(pkt);
4140 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_PAPKT, qc->conn);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004141 return NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004142}
4143
4144/* Prepare a maximum of QUIC Application level packets from <ctx> QUIC
4145 * connection I/O handler context.
4146 * Returns 1 if succeeded, 0 if not.
4147 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004148int qc_prep_phdshk_pkts(struct qring *qr, struct quic_conn *qc)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004149{
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004150 struct cbuf *cbuf;
4151 unsigned char *end_buf, *end, *pos;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004152 struct quic_enc_level *qel;
4153
4154 TRACE_ENTER(QUIC_EV_CONN_PAPKTS, qc->conn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004155 qel = &qc->els[QUIC_TLS_ENC_LEVEL_APP];
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004156 cbuf = qr->cbuf;
4157 pos = cb_wr(cbuf);
4158 end = end_buf = pos + cb_contig_space(cbuf);
4159 while (pos < end_buf) {
4160 int err;
4161 uint16_t dglen;
4162 struct quic_tx_packet *pkt;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004163
4164 if (!(qel->pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED) &&
Frédéric Lécaillec88df072021-07-27 11:43:11 +02004165 (MT_LIST_ISEMPTY(&qel->pktns->tx.frms) ||
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01004166 qc->path->prep_in_flight >= qc->path->cwnd)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004167 TRACE_DEVEL("nothing more to do",
4168 QUIC_EV_CONN_PAPKTS, qc->conn);
4169 break;
4170 }
4171
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004172 /* Leave room for the datagram header */
4173 pos += sizeof dglen + sizeof pkt;
4174 if (end - pos > qc->path->mtu)
4175 end = pos + qc->path->mtu;
4176 pkt = qc_build_phdshk_apkt(&pos, end, qc, &err);
4177 switch (err) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004178 case -1:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004179 break;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004180 case -2:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004181 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004182 default:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004183 dglen = pkt->len;
4184 qc_set_dg(cbuf, dglen, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004185 }
4186 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004187 out:
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004188 TRACE_LEAVE(QUIC_EV_CONN_PAPKTS, qc->conn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004189 return 1;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004190
4191 err:
4192 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_PAPKTS, qc->conn);
4193 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004194}
4195
4196/* QUIC connection packet handler task. */
Willy Tarreau144f84a2021-03-02 16:09:26 +01004197struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004198{
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02004199 struct ssl_sock_ctx *ctx = context;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004200
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02004201 if (ctx->conn->qc->state < QUIC_HS_ST_COMPLETE) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004202 qc_do_hdshk(ctx);
4203 }
4204 else {
4205 struct quic_conn *qc = ctx->conn->qc;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004206 struct qring *qr;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004207
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004208 qr = MT_LIST_POP(qc->tx.qring_list, typeof(qr), mt_list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004209 /* XXX TO DO: may fail!!! XXX */
4210 qc_treat_rx_pkts(&qc->els[QUIC_TLS_ENC_LEVEL_APP], ctx);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004211 qc_prep_phdshk_pkts(qr, qc);
4212 qc_send_ppkts(qr, ctx);
4213 MT_LIST_APPEND(qc->tx.qring_list, &qr->mt_list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004214 }
4215
Willy Tarreau74163142021-03-13 11:30:19 +01004216 return t;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004217}
4218
Frédéric Lécaillefbe3b772021-03-03 16:23:44 +01004219/* Copy up to <count> bytes from connection <conn> internal stream storage into buffer <buf>.
4220 * Return the number of bytes which have been copied.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004221 */
Frédéric Lécaillefbe3b772021-03-03 16:23:44 +01004222static size_t quic_conn_to_buf(struct connection *conn, void *xprt_ctx,
4223 struct buffer *buf, size_t count, int flags)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004224{
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004225 size_t try, done = 0;
4226
4227 if (!conn_ctrl_ready(conn))
4228 return 0;
4229
4230 if (!fd_recv_ready(conn->handle.fd))
4231 return 0;
4232
4233 conn->flags &= ~CO_FL_WAIT_ROOM;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004234
4235 /* read the largest possible block. For this, we perform only one call
4236 * to recv() unless the buffer wraps and we exactly fill the first hunk,
Frédéric Lécaillefbe3b772021-03-03 16:23:44 +01004237 * in which case we accept to do it once again.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004238 */
4239 while (count > 0) {
4240 try = b_contig_space(buf);
4241 if (!try)
4242 break;
4243
4244 if (try > count)
4245 try = count;
4246
Frédéric Lécaillefbe3b772021-03-03 16:23:44 +01004247 b_add(buf, try);
4248 done += try;
4249 count -= try;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004250 }
4251
4252 if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done)
4253 conn->flags &= ~CO_FL_WAIT_L4_CONN;
4254
4255 leave:
4256 return done;
4257
4258 read0:
4259 conn_sock_read0(conn);
4260 conn->flags &= ~CO_FL_WAIT_L4_CONN;
4261
4262 /* Now a final check for a possible asynchronous low-level error
4263 * report. This can happen when a connection receives a reset
4264 * after a shutdown, both POLL_HUP and POLL_ERR are queued, and
4265 * we might have come from there by just checking POLL_HUP instead
4266 * of recv()'s return value 0, so we have no way to tell there was
4267 * an error without checking.
4268 */
Willy Tarreauf5090652021-04-06 17:23:40 +02004269 if (unlikely(fdtab[conn->handle.fd].state & FD_POLL_ERR))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004270 conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
4271 goto leave;
4272}
4273
4274
4275/* Send up to <count> pending bytes from buffer <buf> to connection <conn>'s
4276 * socket. <flags> may contain some CO_SFL_* flags to hint the system about
4277 * other pending data for example, but this flag is ignored at the moment.
4278 * Only one call to send() is performed, unless the buffer wraps, in which case
4279 * a second call may be performed. The connection's flags are updated with
4280 * whatever special event is detected (error, empty). The caller is responsible
4281 * for taking care of those events and avoiding the call if inappropriate. The
4282 * function does not call the connection's polling update function, so the caller
4283 * is responsible for this. It's up to the caller to update the buffer's contents
4284 * based on the return value.
4285 */
4286static size_t quic_conn_from_buf(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags)
4287{
4288 ssize_t ret;
4289 size_t try, done;
4290 int send_flag;
4291
4292 if (!conn_ctrl_ready(conn))
4293 return 0;
4294
4295 if (!fd_send_ready(conn->handle.fd))
4296 return 0;
4297
4298 done = 0;
4299 /* send the largest possible block. For this we perform only one call
4300 * to send() unless the buffer wraps and we exactly fill the first hunk,
4301 * in which case we accept to do it once again.
4302 */
4303 while (count) {
4304 try = b_contig_data(buf, done);
4305 if (try > count)
4306 try = count;
4307
4308 send_flag = MSG_DONTWAIT | MSG_NOSIGNAL;
4309 if (try < count || flags & CO_SFL_MSG_MORE)
4310 send_flag |= MSG_MORE;
4311
4312 ret = sendto(conn->handle.fd, b_peek(buf, done), try, send_flag,
4313 (struct sockaddr *)conn->dst, get_addr_len(conn->dst));
4314 if (ret > 0) {
4315 count -= ret;
4316 done += ret;
4317
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05004318 /* A send succeeded, so we can consider ourself connected */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004319 conn->flags |= CO_FL_WAIT_L4L6;
4320 /* if the system buffer is full, don't insist */
4321 if (ret < try)
4322 break;
4323 }
4324 else if (ret == 0 || errno == EAGAIN || errno == ENOTCONN || errno == EINPROGRESS) {
4325 /* nothing written, we need to poll for write first */
4326 fd_cant_send(conn->handle.fd);
4327 break;
4328 }
4329 else if (errno != EINTR) {
4330 conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
4331 break;
4332 }
4333 }
4334 if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done)
4335 conn->flags &= ~CO_FL_WAIT_L4_CONN;
4336
4337 if (done > 0) {
4338 /* we count the total bytes sent, and the send rate for 32-byte
4339 * blocks. The reason for the latter is that freq_ctr are
4340 * limited to 4GB and that it's not enough per second.
4341 */
4342 _HA_ATOMIC_ADD(&global.out_bytes, done);
4343 update_freq_ctr(&global.out_32bps, (done + 16) / 32);
4344 }
4345 return done;
4346}
4347
Frédéric Lécaille422a39c2021-03-03 17:28:34 +01004348/* Called from the upper layer, to subscribe <es> to events <event_type>. The
4349 * event subscriber <es> is not allowed to change from a previous call as long
4350 * as at least one event is still subscribed. The <event_type> must only be a
4351 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
4352 */
4353static int quic_conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
4354{
4355 return conn_subscribe(conn, xprt_ctx, event_type, es);
4356}
4357
4358/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
4359 * The <es> pointer is not allowed to differ from the one passed to the
4360 * subscribe() call. It always returns zero.
4361 */
4362static int quic_conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
4363{
4364 return conn_unsubscribe(conn, xprt_ctx, event_type, es);
4365}
4366
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004367/* Initialize a QUIC connection (quic_conn struct) to be attached to <conn>
4368 * connection with <xprt_ctx> as address of the xprt context.
4369 * Returns 1 if succeeded, 0 if not.
4370 */
4371static int qc_conn_init(struct connection *conn, void **xprt_ctx)
4372{
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02004373 struct ssl_sock_ctx *ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004374
4375 TRACE_ENTER(QUIC_EV_CONN_NEW, conn);
4376
4377 if (*xprt_ctx)
4378 goto out;
4379
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004380 ctx = pool_alloc(pool_head_quic_conn_ctx);
4381 if (!ctx) {
4382 conn->err_code = CO_ER_SYS_MEMLIM;
4383 goto err;
4384 }
4385
4386 ctx->wait_event.tasklet = tasklet_new();
4387 if (!ctx->wait_event.tasklet) {
4388 conn->err_code = CO_ER_SYS_MEMLIM;
4389 goto err;
4390 }
4391
4392 ctx->wait_event.tasklet->process = quic_conn_io_cb;
4393 ctx->wait_event.tasklet->context = ctx;
4394 ctx->wait_event.events = 0;
4395 ctx->conn = conn;
4396 ctx->subs = NULL;
4397 ctx->xprt_ctx = NULL;
4398
4399 ctx->xprt = xprt_get(XPRT_QUIC);
4400 if (objt_server(conn->target)) {
4401 /* Server */
4402 struct server *srv = __objt_server(conn->target);
4403 unsigned char dcid[QUIC_CID_LEN];
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02004404 struct quic_conn *qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004405 int ssl_err, ipv4;
4406
4407 ssl_err = SSL_ERROR_NONE;
4408 if (RAND_bytes(dcid, sizeof dcid) != 1)
4409 goto err;
4410
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004411 ipv4 = conn->dst->ss_family == AF_INET;
Frédéric Lécaille6de72872021-06-11 15:44:24 +02004412 qc = qc_new_conn(QUIC_PROTOCOL_VERSION_DRAFT_28, ipv4,
Frédéric Lécaille6b197642021-07-06 16:25:08 +02004413 dcid, sizeof dcid, NULL, 0, 0, srv);
Frédéric Lécaille6de72872021-06-11 15:44:24 +02004414 if (qc == NULL)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004415 goto err;
4416
Frédéric Lécaille6de72872021-06-11 15:44:24 +02004417 /* Insert our SCID, the connection ID for the QUIC client. */
4418 ebmb_insert(&srv->cids, &qc->scid_node, qc->scid.len);
4419
4420 conn->qc = qc;
4421 qc->conn = conn;
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02004422 if (!qc_new_isecs(qc, dcid, sizeof dcid, 0))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004423 goto err;
4424
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004425 if (ssl_bio_and_sess_init(conn, srv->ssl_ctx.ctx,
4426 &ctx->ssl, &ctx->bio, ha_quic_meth, ctx) == -1)
4427 goto err;
4428
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02004429 qc->rx.params = srv->quic_params;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004430 /* Copy the initial source connection ID. */
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02004431 quic_cid_cpy(&qc->rx.params.initial_source_connection_id, &qc->scid);
4432 qc->enc_params_len =
4433 quic_transport_params_encode(qc->enc_params, qc->enc_params + sizeof qc->enc_params,
4434 &qc->rx.params, 0);
4435 if (!qc->enc_params_len)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004436 goto err;
4437
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02004438 SSL_set_quic_transport_params(ctx->ssl, qc->enc_params, qc->enc_params_len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004439 SSL_set_connect_state(ctx->ssl);
4440 ssl_err = SSL_do_handshake(ctx->ssl);
4441 if (ssl_err != 1) {
4442 ssl_err = SSL_get_error(ctx->ssl, ssl_err);
4443 if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
4444 TRACE_PROTO("SSL handshake",
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02004445 QUIC_EV_CONN_HDSHK, ctx->conn, &qc->state, &ssl_err);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004446 }
4447 else {
4448 TRACE_DEVEL("SSL handshake error",
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02004449 QUIC_EV_CONN_HDSHK, ctx->conn, &qc->state, &ssl_err);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004450 goto err;
4451 }
4452 }
4453 }
4454 else if (objt_listener(conn->target)) {
4455 /* Listener */
4456 struct bind_conf *bc = __objt_listener(conn->target)->bind_conf;
Frédéric Lécaille1e1aad42021-05-27 14:57:09 +02004457 struct quic_conn *qc = ctx->conn->qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004458
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004459 if (ssl_bio_and_sess_init(conn, bc->initial_ctx,
4460 &ctx->ssl, &ctx->bio, ha_quic_meth, ctx) == -1)
4461 goto err;
4462
Frédéric Lécaille1e1aad42021-05-27 14:57:09 +02004463 SSL_set_quic_transport_params(ctx->ssl, qc->enc_params, qc->enc_params_len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004464 SSL_set_accept_state(ctx->ssl);
4465 }
4466
4467 *xprt_ctx = ctx;
4468
4469 /* Leave init state and start handshake */
4470 conn->flags |= CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004471
4472 out:
4473 TRACE_LEAVE(QUIC_EV_CONN_NEW, conn);
4474
4475 return 0;
4476
4477 err:
Willy Tarreau7deb28c2021-05-10 07:40:27 +02004478 if (ctx && ctx->wait_event.tasklet)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004479 tasklet_free(ctx->wait_event.tasklet);
4480 pool_free(pool_head_quic_conn_ctx, ctx);
Frédéric Lécaille6c1e36c2020-12-23 17:17:37 +01004481 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_NEW, conn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004482 return -1;
4483}
4484
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02004485/* Start the QUIC transport layer */
4486static int qc_xprt_start(struct connection *conn, void *ctx)
4487{
4488 struct quic_conn *qc;
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02004489 struct ssl_sock_ctx *qctx = ctx;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02004490
4491 qc = conn->qc;
4492 if (!quic_conn_init_timer(qc)) {
4493 TRACE_PROTO("Non initialized timer", QUIC_EV_CONN_LPKT, conn);
4494 return 0;
4495 }
4496
4497 tasklet_wakeup(qctx->wait_event.tasklet);
4498 return 1;
4499}
4500
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004501/* transport-layer operations for QUIC connections. */
4502static struct xprt_ops ssl_quic = {
4503 .snd_buf = quic_conn_from_buf,
4504 .rcv_buf = quic_conn_to_buf,
Frédéric Lécaille422a39c2021-03-03 17:28:34 +01004505 .subscribe = quic_conn_subscribe,
4506 .unsubscribe = quic_conn_unsubscribe,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004507 .init = qc_conn_init,
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02004508 .start = qc_xprt_start,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004509 .prepare_bind_conf = ssl_sock_prepare_bind_conf,
4510 .destroy_bind_conf = ssl_sock_destroy_bind_conf,
4511 .name = "QUIC",
4512};
4513
4514__attribute__((constructor))
4515static void __quic_conn_init(void)
4516{
4517 ha_quic_meth = BIO_meth_new(0x666, "ha QUIC methods");
4518 xprt_register(XPRT_QUIC, &ssl_quic);
4519}
4520
4521__attribute__((destructor))
4522static void __quic_conn_deinit(void)
4523{
4524 BIO_meth_free(ha_quic_meth);
4525}
4526
4527/* Read all the QUIC packets found in <buf> with <len> as length (typically a UDP
4528 * datagram), <ctx> being the QUIC I/O handler context, from QUIC connections,
4529 * calling <func> function;
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05004530 * Return the number of bytes read if succeeded, -1 if not.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004531 */
4532static ssize_t quic_dgram_read(char *buf, size_t len, void *owner,
4533 struct sockaddr_storage *saddr, qpkt_read_func *func)
4534{
4535 unsigned char *pos;
4536 const unsigned char *end;
4537 struct quic_dgram_ctx dgram_ctx = {
4538 .dcid_node = NULL,
4539 .owner = owner,
4540 };
4541
4542 pos = (unsigned char *)buf;
4543 end = pos + len;
4544
4545 do {
4546 int ret;
4547 struct quic_rx_packet *pkt;
4548
Willy Tarreaue4498932021-03-22 21:13:05 +01004549 pkt = pool_zalloc(pool_head_quic_rx_packet);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004550 if (!pkt)
4551 goto err;
4552
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004553 quic_rx_packet_refinc(pkt);
4554 ret = func(&pos, end, pkt, &dgram_ctx, saddr);
4555 if (ret == -1) {
4556 size_t pkt_len;
4557
4558 pkt_len = pkt->len;
4559 free_quic_rx_packet(pkt);
4560 /* If the packet length could not be found, we cannot continue. */
4561 if (!pkt_len)
4562 break;
4563 }
4564 } while (pos < end);
4565
4566 /* Increasing the received bytes counter by the UDP datagram length
4567 * if this datagram could be associated to a connection.
4568 */
4569 if (dgram_ctx.qc)
4570 dgram_ctx.qc->rx.bytes += len;
4571
4572 return pos - (unsigned char *)buf;
4573
4574 err:
4575 return -1;
4576}
4577
4578ssize_t quic_lstnr_dgram_read(char *buf, size_t len, void *owner,
4579 struct sockaddr_storage *saddr)
4580{
4581 return quic_dgram_read(buf, len, owner, saddr, qc_lstnr_pkt_rcv);
4582}
4583
4584ssize_t quic_srv_dgram_read(char *buf, size_t len, void *owner,
4585 struct sockaddr_storage *saddr)
4586{
4587 return quic_dgram_read(buf, len, owner, saddr, qc_srv_pkt_rcv);
4588}
4589
4590/* QUIC I/O handler for connection to local listeners or remove servers
4591 * depending on <listener> boolean value, with <fd> as socket file
4592 * descriptor and <ctx> as context.
4593 */
4594static size_t quic_conn_handler(int fd, void *ctx, qpkt_read_func *func)
4595{
4596 ssize_t ret;
4597 size_t done = 0;
4598 struct buffer *buf = get_trash_chunk();
4599 /* Source address */
4600 struct sockaddr_storage saddr = {0};
4601 socklen_t saddrlen = sizeof saddr;
4602
4603 if (!fd_recv_ready(fd))
4604 return 0;
4605
4606 do {
4607 ret = recvfrom(fd, buf->area, buf->size, 0,
4608 (struct sockaddr *)&saddr, &saddrlen);
4609 if (ret < 0) {
4610 if (errno == EINTR)
4611 continue;
4612 if (errno == EAGAIN)
4613 fd_cant_recv(fd);
4614 goto out;
4615 }
4616 } while (0);
4617
4618 done = buf->data = ret;
4619 quic_dgram_read(buf->area, buf->data, ctx, &saddr, func);
4620
4621 out:
4622 return done;
4623}
4624
4625/* QUIC I/O handler for connections to local listeners with <fd> as socket
4626 * file descriptor.
4627 */
4628void quic_fd_handler(int fd)
4629{
Willy Tarreauf5090652021-04-06 17:23:40 +02004630 if (fdtab[fd].state & FD_POLL_IN)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004631 quic_conn_handler(fd, fdtab[fd].owner, &qc_lstnr_pkt_rcv);
4632}
4633
4634/* QUIC I/O handler for connections to remote servers with <fd> as socket
4635 * file descriptor.
4636 */
4637void quic_conn_fd_handler(int fd)
4638{
Willy Tarreauf5090652021-04-06 17:23:40 +02004639 if (fdtab[fd].state & FD_POLL_IN)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004640 quic_conn_handler(fd, fdtab[fd].owner, &qc_srv_pkt_rcv);
4641}
4642
4643/*
4644 * Local variables:
4645 * c-indent-level: 8
4646 * c-basic-offset: 8
4647 * End:
4648 */