blob: 588d8a24a2057f7667088c9827ece26ec20eac55 [file] [log] [blame]
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001/*
2 * QUIC transport layer over SOCK_DGRAM sockets.
3 *
4 * Copyright 2020 HAProxy Technologies, Frédéric Lécaille <flecaille@haproxy.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#define _GNU_SOURCE
14#include <errno.h>
15#include <fcntl.h>
16#include <stdio.h>
17#include <stdlib.h>
18
19#include <sys/socket.h>
20#include <sys/stat.h>
21#include <sys/types.h>
22
23#include <netinet/tcp.h>
24
Amaury Denoyelleeb01f592021-10-07 16:44:05 +020025#include <import/ebmbtree.h>
26
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010027#include <haproxy/buf-t.h>
28#include <haproxy/compat.h>
29#include <haproxy/api.h>
30#include <haproxy/debug.h>
31#include <haproxy/tools.h>
32#include <haproxy/ticks.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010033
34#include <haproxy/connection.h>
35#include <haproxy/fd.h>
36#include <haproxy/freq_ctr.h>
37#include <haproxy/global.h>
Frédéric Lécailledfbae762021-02-18 09:59:01 +010038#include <haproxy/h3.h>
Amaury Denoyelle154bc7f2021-11-12 16:09:54 +010039#include <haproxy/hq_interop.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010040#include <haproxy/log.h>
Frédéric Lécailledfbae762021-02-18 09:59:01 +010041#include <haproxy/mux_quic.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010042#include <haproxy/pipe.h>
43#include <haproxy/proxy.h>
44#include <haproxy/quic_cc.h>
45#include <haproxy/quic_frame.h>
46#include <haproxy/quic_loss.h>
Amaury Denoyellecfa2d562022-01-19 16:01:05 +010047#include <haproxy/quic_sock.h>
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +020048#include <haproxy/cbuf.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010049#include <haproxy/quic_tls.h>
Amaury Denoyelle118b2cb2021-11-25 16:05:16 +010050#include <haproxy/sink.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010051#include <haproxy/ssl_sock.h>
52#include <haproxy/stream_interface.h>
53#include <haproxy/task.h>
54#include <haproxy/trace.h>
55#include <haproxy/xprt_quic.h>
56
Amaury Denoyellea22d8602021-11-10 15:17:56 +010057/* list of supported QUIC versions by this implementation */
58static int quic_supported_version[] = {
59 0x00000001,
Frédéric Lécaille56d3e1b2021-11-19 14:32:52 +010060 0xff00001d, /* draft-29 */
Amaury Denoyellea22d8602021-11-10 15:17:56 +010061
62 /* placeholder, do not add entry after this */
63 0x0
64};
65
Frédéric Lécaille48fc74a2021-09-03 16:42:19 +020066/* This is the values of some QUIC transport parameters when absent.
67 * Should be used to initialize any transport parameters (local or remote)
68 * before updating them with customized values.
69 */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010070struct quic_transport_params quic_dflt_transport_params = {
Frédéric Lécaille46be7e92021-10-22 15:04:27 +020071 .max_udp_payload_size = QUIC_PACKET_MAXLEN,
Frédéric Lécaille785c9c92021-05-17 16:42:21 +020072 .ack_delay_exponent = QUIC_DFLT_ACK_DELAY_COMPONENT,
73 .max_ack_delay = QUIC_DFLT_MAX_ACK_DELAY,
Frédéric Lécaille48fc74a2021-09-03 16:42:19 +020074 .active_connection_id_limit = QUIC_ACTIVE_CONNECTION_ID_LIMIT,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010075};
76
77/* trace source and events */
78static void quic_trace(enum trace_level level, uint64_t mask, \
79 const struct trace_source *src,
80 const struct ist where, const struct ist func,
81 const void *a1, const void *a2, const void *a3, const void *a4);
82
83static const struct trace_event quic_trace_events[] = {
84 { .mask = QUIC_EV_CONN_NEW, .name = "new_conn", .desc = "new QUIC connection" },
85 { .mask = QUIC_EV_CONN_INIT, .name = "new_conn_init", .desc = "new QUIC connection initialization" },
86 { .mask = QUIC_EV_CONN_ISEC, .name = "init_secs", .desc = "initial secrets derivation" },
87 { .mask = QUIC_EV_CONN_RSEC, .name = "read_secs", .desc = "read secrets derivation" },
88 { .mask = QUIC_EV_CONN_WSEC, .name = "write_secs", .desc = "write secrets derivation" },
89 { .mask = QUIC_EV_CONN_LPKT, .name = "lstnr_packet", .desc = "new listener received packet" },
90 { .mask = QUIC_EV_CONN_SPKT, .name = "srv_packet", .desc = "new server received packet" },
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +050091 { .mask = QUIC_EV_CONN_ENCPKT, .name = "enc_hdshk_pkt", .desc = "handhshake packet encryption" },
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010092 { .mask = QUIC_EV_CONN_HPKT, .name = "hdshk_pkt", .desc = "handhshake packet building" },
93 { .mask = QUIC_EV_CONN_PAPKT, .name = "phdshk_apkt", .desc = "post handhshake application packet preparation" },
94 { .mask = QUIC_EV_CONN_PAPKTS, .name = "phdshk_apkts", .desc = "post handhshake application packets preparation" },
95 { .mask = QUIC_EV_CONN_HDSHK, .name = "hdshk", .desc = "SSL handhshake processing" },
96 { .mask = QUIC_EV_CONN_RMHP, .name = "rm_hp", .desc = "Remove header protection" },
97 { .mask = QUIC_EV_CONN_PRSHPKT, .name = "parse_hpkt", .desc = "parse handshake packet" },
98 { .mask = QUIC_EV_CONN_PRSAPKT, .name = "parse_apkt", .desc = "parse application packet" },
99 { .mask = QUIC_EV_CONN_PRSFRM, .name = "parse_frm", .desc = "parse frame" },
100 { .mask = QUIC_EV_CONN_PRSAFRM, .name = "parse_ack_frm", .desc = "parse ACK frame" },
101 { .mask = QUIC_EV_CONN_BFRM, .name = "build_frm", .desc = "build frame" },
102 { .mask = QUIC_EV_CONN_PHPKTS, .name = "phdshk_pkts", .desc = "handhshake packets preparation" },
103 { .mask = QUIC_EV_CONN_TRMHP, .name = "rm_hp_try", .desc = "header protection removing try" },
104 { .mask = QUIC_EV_CONN_ELRMHP, .name = "el_rm_hp", .desc = "handshake enc. level header protection removing" },
105 { .mask = QUIC_EV_CONN_ELRXPKTS, .name = "el_treat_rx_pkts", .desc = "handshake enc. level rx packets treatment" },
106 { .mask = QUIC_EV_CONN_SSLDATA, .name = "ssl_provide_data", .desc = "CRYPTO data provision to TLS stack" },
107 { .mask = QUIC_EV_CONN_RXCDATA, .name = "el_treat_rx_cfrms",.desc = "enc. level RX CRYPTO frames processing"},
108 { .mask = QUIC_EV_CONN_ADDDATA, .name = "add_hdshk_data", .desc = "TLS stack ->add_handshake_data() call"},
109 { .mask = QUIC_EV_CONN_FFLIGHT, .name = "flush_flight", .desc = "TLS stack ->flush_flight() call"},
110 { .mask = QUIC_EV_CONN_SSLALERT, .name = "send_alert", .desc = "TLS stack ->send_alert() call"},
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100111 { .mask = QUIC_EV_CONN_RTTUPDT, .name = "rtt_updt", .desc = "RTT sampling" },
112 { .mask = QUIC_EV_CONN_SPPKTS, .name = "sppkts", .desc = "send prepared packets" },
113 { .mask = QUIC_EV_CONN_PKTLOSS, .name = "pktloss", .desc = "detect packet loss" },
114 { .mask = QUIC_EV_CONN_STIMER, .name = "stimer", .desc = "set timer" },
115 { .mask = QUIC_EV_CONN_PTIMER, .name = "ptimer", .desc = "process timer" },
116 { .mask = QUIC_EV_CONN_SPTO, .name = "spto", .desc = "set PTO" },
Frédéric Lécaille6c1e36c2020-12-23 17:17:37 +0100117 { .mask = QUIC_EV_CONN_BCFRMS, .name = "bcfrms", .desc = "build CRYPTO data frames" },
Frédéric Lécaille513b4f22021-09-20 15:23:17 +0200118 { .mask = QUIC_EV_CONN_XPRTSEND, .name = "xprt_send", .desc = "sending XRPT subscription" },
119 { .mask = QUIC_EV_CONN_XPRTRECV, .name = "xprt_recv", .desc = "receiving XRPT subscription" },
Frédéric Lécailleba85acd2022-01-11 14:43:50 +0100120 { .mask = QUIC_EV_CONN_FREED, .name = "conn_freed", .desc = "releasing conn. memory" },
121 { .mask = QUIC_EV_CONN_CLOSE, .name = "conn_close", .desc = "closing conn." },
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100122 { /* end */ }
123};
124
125static const struct name_desc quic_trace_lockon_args[4] = {
126 /* arg1 */ { /* already used by the connection */ },
127 /* arg2 */ { .name="quic", .desc="QUIC transport" },
128 /* arg3 */ { },
129 /* arg4 */ { }
130};
131
132static const struct name_desc quic_trace_decoding[] = {
133#define QUIC_VERB_CLEAN 1
134 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
135 { /* end */ }
136};
137
138
139struct trace_source trace_quic = {
140 .name = IST("quic"),
141 .desc = "QUIC xprt",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100142 .arg_def = TRC_ARG1_QCON, /* TRACE()'s first argument is always a quic_conn */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100143 .default_cb = quic_trace,
144 .known_events = quic_trace_events,
145 .lockon_args = quic_trace_lockon_args,
146 .decoding = quic_trace_decoding,
147 .report_events = ~0, /* report everything by default */
148};
149
150#define TRACE_SOURCE &trace_quic
151INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
152
153static BIO_METHOD *ha_quic_meth;
154
Frédéric Lécailledbe25af2021-08-04 15:27:37 +0200155DECLARE_POOL(pool_head_quic_tx_ring, "quic_tx_ring_pool", QUIC_TX_RING_BUFSZ);
Frédéric Lécaillec1029f62021-10-20 11:09:58 +0200156DECLARE_POOL(pool_head_quic_rxbuf, "quic_rxbuf_pool", QUIC_RX_BUFSZ);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100157DECLARE_POOL(pool_head_quic_conn_rxbuf, "quic_conn_rxbuf", QUIC_CONN_RX_BUFSZ);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100158DECLARE_STATIC_POOL(pool_head_quic_conn_ctx,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +0200159 "quic_conn_ctx_pool", sizeof(struct ssl_sock_ctx));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100160DECLARE_STATIC_POOL(pool_head_quic_conn, "quic_conn", sizeof(struct quic_conn));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100161DECLARE_POOL(pool_head_quic_connection_id,
162 "quic_connnection_id_pool", sizeof(struct quic_connection_id));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100163DECLARE_POOL(pool_head_quic_rx_packet, "quic_rx_packet_pool", sizeof(struct quic_rx_packet));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100164DECLARE_POOL(pool_head_quic_tx_packet, "quic_tx_packet_pool", sizeof(struct quic_tx_packet));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100165DECLARE_STATIC_POOL(pool_head_quic_rx_crypto_frm, "quic_rx_crypto_frm_pool", sizeof(struct quic_rx_crypto_frm));
Frédéric Lécailledfbae762021-02-18 09:59:01 +0100166DECLARE_POOL(pool_head_quic_rx_strm_frm, "quic_rx_strm_frm", sizeof(struct quic_rx_strm_frm));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100167DECLARE_STATIC_POOL(pool_head_quic_crypto_buf, "quic_crypto_buf_pool", sizeof(struct quic_crypto_buf));
Frédéric Lécaille0ad04582021-07-27 14:51:54 +0200168DECLARE_POOL(pool_head_quic_frame, "quic_frame_pool", sizeof(struct quic_frame));
Frédéric Lécaille8090b512020-11-30 16:19:22 +0100169DECLARE_STATIC_POOL(pool_head_quic_arng, "quic_arng_pool", sizeof(struct quic_arng_node));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100170
Frédéric Lécaille9445abc2021-08-04 10:49:51 +0200171static struct quic_tx_packet *qc_build_pkt(unsigned char **pos, const unsigned char *buf_end,
Frédéric Lécaille4436cb62021-08-16 12:06:46 +0200172 struct quic_enc_level *qel,
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +0100173 struct quic_conn *qc, size_t dglen, int pkt_type,
Frédéric Lécaillece6602d2022-01-17 11:06:10 +0100174 int padding, int ack, int probe, int cc, int *err);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100175
Frédéric Lécaillef63921f2020-12-18 09:48:20 +0100176/* Only for debug purpose */
177struct enc_debug_info {
178 unsigned char *payload;
179 size_t payload_len;
180 unsigned char *aad;
181 size_t aad_len;
182 uint64_t pn;
183};
184
185/* Initializes a enc_debug_info struct (only for debug purpose) */
186static inline void enc_debug_info_init(struct enc_debug_info *edi,
187 unsigned char *payload, size_t payload_len,
188 unsigned char *aad, size_t aad_len, uint64_t pn)
189{
190 edi->payload = payload;
191 edi->payload_len = payload_len;
192 edi->aad = aad;
193 edi->aad_len = aad_len;
194 edi->pn = pn;
195}
196
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100197/* Trace callback for QUIC.
198 * These traces always expect that arg1, if non-null, is of type connection.
199 */
200static void quic_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
201 const struct ist where, const struct ist func,
202 const void *a1, const void *a2, const void *a3, const void *a4)
203{
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100204 const struct quic_conn *qc = a1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100205
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100206 if (qc) {
207 const struct quic_tls_secrets *secs;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100208
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100209 chunk_appendf(&trace_buf, " : qc@%p", qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100210 if ((mask & QUIC_EV_CONN_INIT) && qc) {
211 chunk_appendf(&trace_buf, "\n odcid");
212 quic_cid_dump(&trace_buf, &qc->odcid);
Frédéric Lécaillec5e72b92020-12-02 16:11:40 +0100213 chunk_appendf(&trace_buf, "\n dcid");
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100214 quic_cid_dump(&trace_buf, &qc->dcid);
Frédéric Lécaillec5e72b92020-12-02 16:11:40 +0100215 chunk_appendf(&trace_buf, "\n scid");
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100216 quic_cid_dump(&trace_buf, &qc->scid);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100217 }
218
219 if (mask & QUIC_EV_CONN_ADDDATA) {
220 const enum ssl_encryption_level_t *level = a2;
221 const size_t *len = a3;
222
223 if (level) {
224 enum quic_tls_enc_level lvl = ssl_to_quic_enc_level(*level);
225
226 chunk_appendf(&trace_buf, " el=%c(%d)", quic_enc_level_char(lvl), lvl);
227 }
228 if (len)
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100229 chunk_appendf(&trace_buf, " len=%llu", (unsigned long long)*len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100230 }
231 if ((mask & QUIC_EV_CONN_ISEC) && qc) {
232 /* Initial read & write secrets. */
233 enum quic_tls_enc_level level = QUIC_TLS_ENC_LEVEL_INITIAL;
234 const unsigned char *rx_sec = a2;
235 const unsigned char *tx_sec = a3;
236
237 secs = &qc->els[level].tls_ctx.rx;
238 if (secs->flags & QUIC_FL_TLS_SECRETS_SET) {
239 chunk_appendf(&trace_buf, "\n RX el=%c", quic_enc_level_char(level));
240 if (rx_sec)
241 quic_tls_secret_hexdump(&trace_buf, rx_sec, 32);
242 quic_tls_keys_hexdump(&trace_buf, secs);
243 }
244 secs = &qc->els[level].tls_ctx.tx;
245 if (secs->flags & QUIC_FL_TLS_SECRETS_SET) {
246 chunk_appendf(&trace_buf, "\n TX el=%c", quic_enc_level_char(level));
247 if (tx_sec)
248 quic_tls_secret_hexdump(&trace_buf, tx_sec, 32);
249 quic_tls_keys_hexdump(&trace_buf, secs);
250 }
251 }
252 if (mask & (QUIC_EV_CONN_RSEC|QUIC_EV_CONN_RWSEC)) {
253 const enum ssl_encryption_level_t *level = a2;
254 const unsigned char *secret = a3;
255 const size_t *secret_len = a4;
256
257 if (level) {
258 enum quic_tls_enc_level lvl = ssl_to_quic_enc_level(*level);
259
260 chunk_appendf(&trace_buf, "\n RX el=%c", quic_enc_level_char(lvl));
261 if (secret && secret_len)
262 quic_tls_secret_hexdump(&trace_buf, secret, *secret_len);
263 secs = &qc->els[lvl].tls_ctx.rx;
264 if (secs->flags & QUIC_FL_TLS_SECRETS_SET)
265 quic_tls_keys_hexdump(&trace_buf, secs);
266 }
267 }
268
269 if (mask & (QUIC_EV_CONN_WSEC|QUIC_EV_CONN_RWSEC)) {
270 const enum ssl_encryption_level_t *level = a2;
271 const unsigned char *secret = a3;
272 const size_t *secret_len = a4;
273
274 if (level) {
275 enum quic_tls_enc_level lvl = ssl_to_quic_enc_level(*level);
276
277 chunk_appendf(&trace_buf, "\n TX el=%c", quic_enc_level_char(lvl));
278 if (secret && secret_len)
279 quic_tls_secret_hexdump(&trace_buf, secret, *secret_len);
280 secs = &qc->els[lvl].tls_ctx.tx;
281 if (secs->flags & QUIC_FL_TLS_SECRETS_SET)
282 quic_tls_keys_hexdump(&trace_buf, secs);
283 }
284
285 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100286
Frédéric Lécaille133e8a72020-12-18 09:33:27 +0100287 if (mask & (QUIC_EV_CONN_HPKT|QUIC_EV_CONN_PAPKT)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100288 const struct quic_tx_packet *pkt = a2;
289 const struct quic_enc_level *qel = a3;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100290 const ssize_t *room = a4;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100291
292 if (qel) {
Amaury Denoyelle4fd53d72021-12-21 14:28:26 +0100293 const struct quic_pktns *pktns = qc->pktns;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100294 chunk_appendf(&trace_buf, " qel=%c cwnd=%llu ppif=%lld pif=%llu "
Frédéric Lécaille466e9da2021-12-29 12:04:13 +0100295 "if=%llu pp=%u",
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100296 quic_enc_level_char_from_qel(qel, qc),
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100297 (unsigned long long)qc->path->cwnd,
298 (unsigned long long)qc->path->prep_in_flight,
299 (unsigned long long)qc->path->in_flight,
300 (unsigned long long)pktns->tx.in_flight,
Frédéric Lécaille466e9da2021-12-29 12:04:13 +0100301 pktns->tx.pto_probe);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100302 }
303 if (pkt) {
Frédéric Lécaille0ad04582021-07-27 14:51:54 +0200304 const struct quic_frame *frm;
Frédéric Lécaille0371cd52021-12-13 12:30:54 +0100305 if (pkt->pn_node.key != (uint64_t)-1)
306 chunk_appendf(&trace_buf, " pn=%llu",(ull)pkt->pn_node.key);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100307 list_for_each_entry(frm, &pkt->frms, list)
Frédéric Lécaille1ede8232021-12-23 14:11:25 +0100308 chunk_frm_appendf(&trace_buf, frm);
Frédéric Lécaille8b6ea172022-01-17 10:51:43 +0100309 chunk_appendf(&trace_buf, " rx.bytes=%llu tx.bytes=%llu",
310 (unsigned long long)qc->rx.bytes,
311 (unsigned long long)qc->tx.bytes);
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100312 }
313
314 if (room) {
315 chunk_appendf(&trace_buf, " room=%lld", (long long)*room);
316 chunk_appendf(&trace_buf, " dcid.len=%llu scid.len=%llu",
317 (unsigned long long)qc->dcid.len, (unsigned long long)qc->scid.len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100318 }
319 }
320
321 if (mask & QUIC_EV_CONN_HDSHK) {
322 const enum quic_handshake_state *state = a2;
323 const int *err = a3;
324
325 if (state)
326 chunk_appendf(&trace_buf, " state=%s", quic_hdshk_state_str(*state));
327 if (err)
328 chunk_appendf(&trace_buf, " err=%s", ssl_error_str(*err));
329 }
330
Frédéric Lécaille6c1e36c2020-12-23 17:17:37 +0100331 if (mask & (QUIC_EV_CONN_TRMHP|QUIC_EV_CONN_ELRMHP|QUIC_EV_CONN_SPKT)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100332 const struct quic_rx_packet *pkt = a2;
333 const unsigned long *pktlen = a3;
334 const SSL *ssl = a4;
335
336 if (pkt) {
Frédéric Lécaillec5e72b92020-12-02 16:11:40 +0100337 chunk_appendf(&trace_buf, " pkt@%p el=%c",
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100338 pkt, quic_packet_type_enc_level_char(pkt->type));
339 if (pkt->pnl)
340 chunk_appendf(&trace_buf, " pnl=%u pn=%llu", pkt->pnl,
341 (unsigned long long)pkt->pn);
342 if (pkt->token_len)
343 chunk_appendf(&trace_buf, " toklen=%llu",
344 (unsigned long long)pkt->token_len);
345 if (pkt->aad_len)
346 chunk_appendf(&trace_buf, " aadlen=%llu",
347 (unsigned long long)pkt->aad_len);
348 chunk_appendf(&trace_buf, " flags=0x%x len=%llu",
349 pkt->flags, (unsigned long long)pkt->len);
350 }
351 if (pktlen)
352 chunk_appendf(&trace_buf, " (%ld)", *pktlen);
353 if (ssl) {
354 enum ssl_encryption_level_t level = SSL_quic_read_level(ssl);
355 chunk_appendf(&trace_buf, " el=%c",
356 quic_enc_level_char(ssl_to_quic_enc_level(level)));
357 }
358 }
359
360 if (mask & (QUIC_EV_CONN_ELRXPKTS|QUIC_EV_CONN_PRSHPKT|QUIC_EV_CONN_SSLDATA)) {
361 const struct quic_rx_packet *pkt = a2;
362 const struct quic_rx_crypto_frm *cf = a3;
363 const SSL *ssl = a4;
364
365 if (pkt)
366 chunk_appendf(&trace_buf, " pkt@%p el=%c pn=%llu", pkt,
367 quic_packet_type_enc_level_char(pkt->type),
368 (unsigned long long)pkt->pn);
369 if (cf)
370 chunk_appendf(&trace_buf, " cfoff=%llu cflen=%llu",
371 (unsigned long long)cf->offset_node.key,
372 (unsigned long long)cf->len);
373 if (ssl) {
374 enum ssl_encryption_level_t level = SSL_quic_read_level(ssl);
Frédéric Lécaille57e6e9e2021-09-23 18:10:56 +0200375 chunk_appendf(&trace_buf, " rel=%c",
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100376 quic_enc_level_char(ssl_to_quic_enc_level(level)));
377 }
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +0100378
379 if (qc->err_code)
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100380 chunk_appendf(&trace_buf, " err_code=0x%llx", (ull)qc->err_code);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100381 }
382
383 if (mask & (QUIC_EV_CONN_PRSFRM|QUIC_EV_CONN_BFRM)) {
384 const struct quic_frame *frm = a2;
385
386 if (frm)
387 chunk_appendf(&trace_buf, " %s", quic_frame_type_string(frm->type));
388 }
389
390 if (mask & QUIC_EV_CONN_PHPKTS) {
391 const struct quic_enc_level *qel = a2;
392
393 if (qel) {
Frédéric Lécailledd51da52021-12-29 15:36:25 +0100394 const struct quic_pktns *pktns = qel->pktns;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100395 chunk_appendf(&trace_buf,
Frédéric Lécaille466e9da2021-12-29 12:04:13 +0100396 " qel=%c state=%s ack?%d cwnd=%llu ppif=%lld pif=%llu if=%llu pp=%u",
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100397 quic_enc_level_char_from_qel(qel, qc),
Frédéric Lécaille546186b2021-08-03 14:25:36 +0200398 quic_hdshk_state_str(HA_ATOMIC_LOAD(&qc->state)),
Frédéric Lécaille25eeebe2021-12-16 11:21:52 +0100399 !!(HA_ATOMIC_LOAD(&qel->pktns->flags) & QUIC_FL_PKTNS_ACK_REQUIRED),
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100400 (unsigned long long)qc->path->cwnd,
401 (unsigned long long)qc->path->prep_in_flight,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100402 (unsigned long long)qc->path->in_flight,
Frédéric Lécaille466e9da2021-12-29 12:04:13 +0100403 (unsigned long long)pktns->tx.in_flight,
404 pktns->tx.pto_probe);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100405 }
406 }
407
Frédéric Lécaillef63921f2020-12-18 09:48:20 +0100408 if (mask & QUIC_EV_CONN_ENCPKT) {
409 const struct enc_debug_info *edi = a2;
410
411 if (edi)
412 chunk_appendf(&trace_buf,
413 " payload=@%p payload_len=%llu"
414 " aad=@%p aad_len=%llu pn=%llu",
415 edi->payload, (unsigned long long)edi->payload_len,
416 edi->aad, (unsigned long long)edi->aad_len,
417 (unsigned long long)edi->pn);
418 }
419
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100420 if (mask & QUIC_EV_CONN_RMHP) {
421 const struct quic_rx_packet *pkt = a2;
422
423 if (pkt) {
424 const int *ret = a3;
425
426 chunk_appendf(&trace_buf, " pkt@%p", pkt);
427 if (ret && *ret)
428 chunk_appendf(&trace_buf, " pnl=%u pn=%llu",
429 pkt->pnl, (unsigned long long)pkt->pn);
430 }
431 }
432
433 if (mask & QUIC_EV_CONN_PRSAFRM) {
Frédéric Lécaille0ad04582021-07-27 14:51:54 +0200434 const struct quic_frame *frm = a2;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100435 const unsigned long *val1 = a3;
436 const unsigned long *val2 = a4;
437
438 if (frm)
Frédéric Lécaille1ede8232021-12-23 14:11:25 +0100439 chunk_frm_appendf(&trace_buf, frm);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100440 if (val1)
441 chunk_appendf(&trace_buf, " %lu", *val1);
442 if (val2)
443 chunk_appendf(&trace_buf, "..%lu", *val2);
444 }
445
446 if (mask & QUIC_EV_CONN_RTTUPDT) {
447 const unsigned int *rtt_sample = a2;
448 const unsigned int *ack_delay = a3;
449 const struct quic_loss *ql = a4;
450
451 if (rtt_sample)
452 chunk_appendf(&trace_buf, " rtt_sample=%ums", *rtt_sample);
453 if (ack_delay)
454 chunk_appendf(&trace_buf, " ack_delay=%ums", *ack_delay);
455 if (ql)
456 chunk_appendf(&trace_buf,
457 " srtt=%ums rttvar=%ums min_rtt=%ums",
458 ql->srtt >> 3, ql->rtt_var >> 2, ql->rtt_min);
459 }
460 if (mask & QUIC_EV_CONN_CC) {
461 const struct quic_cc_event *ev = a2;
462 const struct quic_cc *cc = a3;
463
464 if (a2)
465 quic_cc_event_trace(&trace_buf, ev);
466 if (a3)
467 quic_cc_state_trace(&trace_buf, cc);
468 }
469
470 if (mask & QUIC_EV_CONN_PKTLOSS) {
471 const struct quic_pktns *pktns = a2;
472 const struct list *lost_pkts = a3;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100473
474 if (pktns) {
475 chunk_appendf(&trace_buf, " pktns=%s",
476 pktns == &qc->pktns[QUIC_TLS_PKTNS_INITIAL] ? "I" :
477 pktns == &qc->pktns[QUIC_TLS_PKTNS_01RTT] ? "01RTT": "H");
478 if (pktns->tx.loss_time)
479 chunk_appendf(&trace_buf, " loss_time=%dms",
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +0100480 TICKS_TO_MS(tick_remain(now_ms, pktns->tx.loss_time)));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100481 }
482 if (lost_pkts && !LIST_ISEMPTY(lost_pkts)) {
483 struct quic_tx_packet *pkt;
484
485 chunk_appendf(&trace_buf, " lost_pkts:");
486 list_for_each_entry(pkt, lost_pkts, list)
487 chunk_appendf(&trace_buf, " %lu", (unsigned long)pkt->pn_node.key);
488 }
489 }
490
491 if (mask & (QUIC_EV_CONN_STIMER|QUIC_EV_CONN_PTIMER|QUIC_EV_CONN_SPTO)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100492 const struct quic_pktns *pktns = a2;
493 const int *duration = a3;
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +0100494 const uint64_t *ifae_pkts = a4;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100495
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +0100496 if (ifae_pkts)
497 chunk_appendf(&trace_buf, " ifae_pkts=%llu",
498 (unsigned long long)*ifae_pkts);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100499 if (pktns) {
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100500 chunk_appendf(&trace_buf, " pktns=%s pp=%d",
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100501 pktns == &qc->pktns[QUIC_TLS_PKTNS_INITIAL] ? "I" :
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100502 pktns == &qc->pktns[QUIC_TLS_PKTNS_01RTT] ? "01RTT": "H",
503 pktns->tx.pto_probe);
Frédéric Lécaille22cfd832021-12-27 17:42:51 +0100504 if (mask & (QUIC_EV_CONN_STIMER|QUIC_EV_CONN_SPTO)) {
505 if (pktns->tx.in_flight)
506 chunk_appendf(&trace_buf, " if=%llu", (ull)pktns->tx.in_flight);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100507 if (pktns->tx.loss_time)
508 chunk_appendf(&trace_buf, " loss_time=%dms",
509 TICKS_TO_MS(pktns->tx.loss_time - now_ms));
510 }
511 if (mask & QUIC_EV_CONN_SPTO) {
512 if (pktns->tx.time_of_last_eliciting)
513 chunk_appendf(&trace_buf, " tole=%dms",
514 TICKS_TO_MS(pktns->tx.time_of_last_eliciting - now_ms));
515 if (duration)
Frédéric Lécaillec5e72b92020-12-02 16:11:40 +0100516 chunk_appendf(&trace_buf, " dur=%dms", TICKS_TO_MS(*duration));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100517 }
518 }
519
520 if (!(mask & QUIC_EV_CONN_SPTO) && qc->timer_task) {
521 chunk_appendf(&trace_buf,
522 " expire=%dms", TICKS_TO_MS(qc->timer - now_ms));
523 }
524 }
525
526 if (mask & QUIC_EV_CONN_SPPKTS) {
527 const struct quic_tx_packet *pkt = a2;
528
Frédéric Lécaille04ffb662020-12-08 15:58:39 +0100529 chunk_appendf(&trace_buf, " cwnd=%llu ppif=%llu pif=%llu",
530 (unsigned long long)qc->path->cwnd,
531 (unsigned long long)qc->path->prep_in_flight,
532 (unsigned long long)qc->path->in_flight);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100533 if (pkt) {
Frédéric Lécaille0371cd52021-12-13 12:30:54 +0100534 chunk_appendf(&trace_buf, " pn=%lu(%s) iflen=%llu",
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100535 (unsigned long)pkt->pn_node.key,
536 pkt->pktns == &qc->pktns[QUIC_TLS_PKTNS_INITIAL] ? "I" :
537 pkt->pktns == &qc->pktns[QUIC_TLS_PKTNS_01RTT] ? "01RTT": "H",
Frédéric Lécaille0371cd52021-12-13 12:30:54 +0100538 (unsigned long long)pkt->in_flight_len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100539 }
540 }
Frédéric Lécaille47c433f2020-12-10 17:03:11 +0100541
542 if (mask & QUIC_EV_CONN_SSLALERT) {
543 const uint8_t *alert = a2;
544 const enum ssl_encryption_level_t *level = a3;
545
546 if (alert)
547 chunk_appendf(&trace_buf, " alert=0x%02x", *alert);
548 if (level)
549 chunk_appendf(&trace_buf, " el=%c",
550 quic_enc_level_char(ssl_to_quic_enc_level(*level)));
551 }
Frédéric Lécailleea604992020-12-24 13:01:37 +0100552
553 if (mask & QUIC_EV_CONN_BCFRMS) {
554 const size_t *sz1 = a2;
555 const size_t *sz2 = a3;
556 const size_t *sz3 = a4;
557
558 if (sz1)
559 chunk_appendf(&trace_buf, " %llu", (unsigned long long)*sz1);
560 if (sz2)
561 chunk_appendf(&trace_buf, " %llu", (unsigned long long)*sz2);
562 if (sz3)
563 chunk_appendf(&trace_buf, " %llu", (unsigned long long)*sz3);
564 }
Frédéric Lécaille242fb1b2020-12-31 12:45:38 +0100565
566 if (mask & QUIC_EV_CONN_PSTRM) {
567 const struct quic_frame *frm = a2;
Frédéric Lécaille577fe482021-01-11 15:10:06 +0100568
Frédéric Lécailled8b84432021-12-10 15:18:36 +0100569 if (frm)
Frédéric Lécaille1ede8232021-12-23 14:11:25 +0100570 chunk_frm_appendf(&trace_buf, frm);
Frédéric Lécaille242fb1b2020-12-31 12:45:38 +0100571 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100572 }
573 if (mask & QUIC_EV_CONN_LPKT) {
574 const struct quic_rx_packet *pkt = a2;
Frédéric Lécaille865b0782021-09-23 07:33:20 +0200575 const uint64_t *len = a3;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100576
Frédéric Lécaille8678eb02021-12-16 18:03:52 +0100577 if (pkt) {
578 chunk_appendf(&trace_buf, " pkt@%p type=0x%02x %s",
579 pkt, pkt->type, qc_pkt_long(pkt) ? "long" : "short");
580 if (pkt->pn_node.key != (uint64_t)-1)
581 chunk_appendf(&trace_buf, " pn=%llu", pkt->pn_node.key);
582 }
583
Frédéric Lécaille865b0782021-09-23 07:33:20 +0200584 if (len)
585 chunk_appendf(&trace_buf, " len=%llu", (ull)*len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100586 }
587
588}
589
590/* Returns 1 if the peer has validated <qc> QUIC connection address, 0 if not. */
Amaury Denoyellee81fed92021-12-22 11:06:34 +0100591static inline int quic_peer_validated_addr(struct quic_conn *qc)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100592{
Frédéric Lécaille67f47d02021-08-19 15:19:09 +0200593 struct quic_pktns *hdshk_pktns, *app_pktns;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100594
Frédéric Lécaille1aa57d32022-01-12 09:46:02 +0100595 if (!qc_is_listener(qc))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100596 return 1;
597
Frédéric Lécaille67f47d02021-08-19 15:19:09 +0200598 hdshk_pktns = qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE].pktns;
599 app_pktns = qc->els[QUIC_TLS_ENC_LEVEL_APP].pktns;
600 if ((HA_ATOMIC_LOAD(&hdshk_pktns->flags) & QUIC_FL_PKTNS_ACK_RECEIVED) ||
601 (HA_ATOMIC_LOAD(&app_pktns->flags) & QUIC_FL_PKTNS_ACK_RECEIVED) ||
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +0200602 HA_ATOMIC_LOAD(&qc->state) >= QUIC_HS_ST_COMPLETE)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100603 return 1;
604
605 return 0;
606}
607
608/* Set the timer attached to the QUIC connection with <ctx> as I/O handler and used for
609 * both loss detection and PTO and schedule the task assiated to this timer if needed.
610 */
Amaury Denoyellee81fed92021-12-22 11:06:34 +0100611static inline void qc_set_timer(struct quic_conn *qc)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100612{
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100613 struct quic_pktns *pktns;
614 unsigned int pto;
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +0200615 int handshake_complete;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100616
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100617 TRACE_ENTER(QUIC_EV_CONN_STIMER, qc,
Amaury Denoyellee81fed92021-12-22 11:06:34 +0100618 NULL, NULL, &qc->path->ifae_pkts);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100619
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100620 pktns = quic_loss_pktns(qc);
621 if (tick_isset(pktns->tx.loss_time)) {
622 qc->timer = pktns->tx.loss_time;
623 goto out;
624 }
625
Frédéric Lécailleca98a7f2021-11-10 17:30:15 +0100626 /* anti-amplification: the timer must be
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100627 * cancelled for a server which reached the anti-amplification limit.
628 */
Frédéric Lécaille078634d2022-01-04 16:59:42 +0100629 if (!quic_peer_validated_addr(qc) &&
630 (HA_ATOMIC_LOAD(&qc->flags) & QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100631 TRACE_PROTO("anti-amplification reached", QUIC_EV_CONN_STIMER, qc);
Frédéric Lécailleca98a7f2021-11-10 17:30:15 +0100632 qc->timer = TICK_ETERNITY;
633 goto out;
634 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100635
Amaury Denoyellee81fed92021-12-22 11:06:34 +0100636 if (!qc->path->ifae_pkts && quic_peer_validated_addr(qc)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100637 TRACE_PROTO("timer cancellation", QUIC_EV_CONN_STIMER, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100638 /* Timer cancellation. */
639 qc->timer = TICK_ETERNITY;
640 goto out;
641 }
642
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +0200643 handshake_complete = HA_ATOMIC_LOAD(&qc->state) >= QUIC_HS_ST_COMPLETE;
644 pktns = quic_pto_pktns(qc, handshake_complete, &pto);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100645 if (tick_isset(pto))
646 qc->timer = pto;
647 out:
Amaury Denoyelle0a29e132021-12-23 15:06:56 +0100648 if (qc->timer_task && qc->timer != TICK_ETERNITY)
Amaury Denoyelle336f6fd2021-10-05 14:42:25 +0200649 task_schedule(qc->timer_task, qc->timer);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100650 TRACE_LEAVE(QUIC_EV_CONN_STIMER, qc, pktns);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100651}
652
Frédéric Lécaillea7973a62021-11-30 11:10:36 +0100653/* Derive new keys and ivs required for Key Update feature for <qc> QUIC
654 * connection.
655 * Return 1 if succeeded, 0 if not.
656 */
657static int quic_tls_key_update(struct quic_conn *qc)
658{
659 struct quic_tls_ctx *tls_ctx = &qc->els[QUIC_TLS_ENC_LEVEL_APP].tls_ctx;
660 struct quic_tls_secrets *rx, *tx;
661 struct quic_tls_kp *nxt_rx = &qc->ku.nxt_rx;
662 struct quic_tls_kp *nxt_tx = &qc->ku.nxt_tx;
663
664 tls_ctx = &qc->els[QUIC_TLS_ENC_LEVEL_APP].tls_ctx;
665 rx = &tls_ctx->rx;
666 tx = &tls_ctx->tx;
667 nxt_rx = &qc->ku.nxt_rx;
668 nxt_tx = &qc->ku.nxt_tx;
669
670 /* Prepare new RX secrets */
671 if (!quic_tls_sec_update(rx->md, nxt_rx->secret, nxt_rx->secretlen,
672 rx->secret, rx->secretlen)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100673 TRACE_DEVEL("New RX secret update failed", QUIC_EV_CONN_RWSEC, qc);
Frédéric Lécaillea7973a62021-11-30 11:10:36 +0100674 return 0;
675 }
676
677 if (!quic_tls_derive_keys(rx->aead, NULL, rx->md,
678 nxt_rx->key, nxt_rx->keylen,
679 nxt_rx->iv, nxt_rx->ivlen, NULL, 0,
680 nxt_rx->secret, nxt_rx->secretlen)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100681 TRACE_DEVEL("New RX key derivation failed", QUIC_EV_CONN_RWSEC, qc);
Frédéric Lécaillea7973a62021-11-30 11:10:36 +0100682 return 0;
683 }
684
685 /* Prepare new TX secrets */
686 if (!quic_tls_sec_update(tx->md, nxt_tx->secret, nxt_tx->secretlen,
687 tx->secret, tx->secretlen)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100688 TRACE_DEVEL("New TX secret update failed", QUIC_EV_CONN_RWSEC, qc);
Frédéric Lécaillea7973a62021-11-30 11:10:36 +0100689 return 0;
690 }
691
692 if (!quic_tls_derive_keys(tx->aead, NULL, tx->md,
693 nxt_tx->key, nxt_tx->keylen,
694 nxt_tx->iv, nxt_tx->ivlen, NULL, 0,
695 nxt_tx->secret, nxt_tx->secretlen)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +0100696 TRACE_DEVEL("New TX key derivation failed", QUIC_EV_CONN_RWSEC, qc);
Frédéric Lécaillea7973a62021-11-30 11:10:36 +0100697 return 0;
698 }
699
700 return 1;
701}
702
703/* Rotate the Key Update information for <qc> QUIC connection.
704 * Must be used after having updated them.
705 * Always succeeds.
706 */
707static void quic_tls_rotate_keys(struct quic_conn *qc)
708{
709 struct quic_tls_ctx *tls_ctx = &qc->els[QUIC_TLS_ENC_LEVEL_APP].tls_ctx;
710 unsigned char *curr_secret, *curr_iv, *curr_key;
711
712 /* Rotate the RX secrets */
713 curr_secret = tls_ctx->rx.secret;
714 curr_iv = tls_ctx->rx.iv;
715 curr_key = tls_ctx->rx.key;
716
717 tls_ctx->rx.secret = qc->ku.nxt_rx.secret;
718 tls_ctx->rx.iv = qc->ku.nxt_rx.iv;
719 tls_ctx->rx.key = qc->ku.nxt_rx.key;
720
721 qc->ku.nxt_rx.secret = qc->ku.prv_rx.secret;
722 qc->ku.nxt_rx.iv = qc->ku.prv_rx.iv;
723 qc->ku.nxt_rx.key = qc->ku.prv_rx.key;
724
725 qc->ku.prv_rx.secret = curr_secret;
726 qc->ku.prv_rx.iv = curr_iv;
727 qc->ku.prv_rx.key = curr_key;
728 qc->ku.prv_rx.pn = tls_ctx->rx.pn;
729
730 /* Update the TX secrets */
731 curr_secret = tls_ctx->tx.secret;
732 curr_iv = tls_ctx->tx.iv;
733 curr_key = tls_ctx->tx.key;
734
735 tls_ctx->tx.secret = qc->ku.nxt_tx.secret;
736 tls_ctx->tx.iv = qc->ku.nxt_tx.iv;
737 tls_ctx->tx.key = qc->ku.nxt_tx.key;
738
739 qc->ku.nxt_tx.secret = curr_secret;
740 qc->ku.nxt_tx.iv = curr_iv;
741 qc->ku.nxt_tx.key = curr_key;
742}
743
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100744#ifndef OPENSSL_IS_BORINGSSL
745int ha_quic_set_encryption_secrets(SSL *ssl, enum ssl_encryption_level_t level,
746 const uint8_t *read_secret,
747 const uint8_t *write_secret, size_t secret_len)
748{
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100749 struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
750 struct quic_tls_ctx *tls_ctx = &qc->els[ssl_to_quic_enc_level(level)].tls_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100751 const SSL_CIPHER *cipher = SSL_get_current_cipher(ssl);
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100752 struct quic_tls_secrets *rx, *tx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100753
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100754 TRACE_ENTER(QUIC_EV_CONN_RWSEC, qc);
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100755 BUG_ON(secret_len > QUIC_TLS_SECRET_LEN);
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100756 if (HA_ATOMIC_LOAD(&qc->flags) & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
757 TRACE_PROTO("CC required", QUIC_EV_CONN_RWSEC, qc);
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +0100758 goto out;
759 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100760
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100761 if (!quic_tls_ctx_keys_alloc(tls_ctx)) {
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100762 TRACE_DEVEL("keys allocation failed", QUIC_EV_CONN_RWSEC, qc);
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100763 goto err;
764 }
765
766 rx = &tls_ctx->rx;
767 tx = &tls_ctx->tx;
768
769 rx->aead = tx->aead = tls_aead(cipher);
770 rx->md = tx->md = tls_md(cipher);
771 rx->hp = tx->hp = tls_hp(cipher);
772
773 if (!quic_tls_derive_keys(rx->aead, rx->hp, rx->md, rx->key, rx->keylen,
774 rx->iv, rx->ivlen, rx->hp_key, sizeof rx->hp_key,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100775 read_secret, secret_len)) {
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100776 TRACE_DEVEL("RX key derivation failed", QUIC_EV_CONN_RWSEC, qc);
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100777 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100778 }
779
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100780 rx->flags |= QUIC_FL_TLS_SECRETS_SET;
Frédéric Lécaille4015cbb2021-12-14 19:29:34 +0100781
782 if (!write_secret)
783 goto tp;
784
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100785 if (!quic_tls_derive_keys(tx->aead, tx->hp, tx->md, tx->key, tx->keylen,
786 tx->iv, tx->ivlen, tx->hp_key, sizeof tx->hp_key,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100787 write_secret, secret_len)) {
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100788 TRACE_DEVEL("TX key derivation failed", QUIC_EV_CONN_RWSEC, qc);
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100789 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100790 }
791
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100792 tx->flags |= QUIC_FL_TLS_SECRETS_SET;
Frédéric Lécaille4015cbb2021-12-14 19:29:34 +0100793 tp:
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100794 if (!qc_is_listener(qc) && level == ssl_encryption_application) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100795 const unsigned char *buf;
796 size_t buflen;
797
798 SSL_get_peer_quic_transport_params(ssl, &buf, &buflen);
799 if (!buflen)
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100800 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100801
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100802 if (!quic_transport_params_store(qc, 1, buf, buf + buflen))
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100803 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100804 }
Frédéric Lécaillea7d2c092021-11-30 11:18:18 +0100805
806 if (level == ssl_encryption_application) {
Frédéric Lécaillea7d2c092021-11-30 11:18:18 +0100807 struct quic_tls_kp *prv_rx = &qc->ku.prv_rx;
808 struct quic_tls_kp *nxt_rx = &qc->ku.nxt_rx;
809 struct quic_tls_kp *nxt_tx = &qc->ku.nxt_tx;
810
811 if (!(rx->secret = pool_alloc(pool_head_quic_tls_secret)) ||
812 !(tx->secret = pool_alloc(pool_head_quic_tls_secret))) {
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100813 TRACE_DEVEL("Could not allocate secrete keys", QUIC_EV_CONN_RWSEC, qc);
Frédéric Lécaillea7d2c092021-11-30 11:18:18 +0100814 goto err;
815 }
816
817 memcpy(rx->secret, read_secret, secret_len);
818 rx->secretlen = secret_len;
819 memcpy(tx->secret, write_secret, secret_len);
820 tx->secretlen = secret_len;
821 /* Initialize all the secret keys lengths */
822 prv_rx->secretlen = nxt_rx->secretlen = nxt_tx->secretlen = secret_len;
823 /* Prepare the next key update */
824 if (!quic_tls_key_update(qc))
825 goto err;
826 }
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +0100827 out:
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100828 TRACE_LEAVE(QUIC_EV_CONN_RWSEC, qc, &level);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100829 return 1;
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100830
831 err:
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100832 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_RWSEC, qc);
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100833 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100834}
835#else
836/* ->set_read_secret callback to derive the RX secrets at <level> encryption
837 * level.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +0500838 * Returns 1 if succeeded, 0 if not.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100839 */
840int ha_set_rsec(SSL *ssl, enum ssl_encryption_level_t level,
841 const SSL_CIPHER *cipher,
842 const uint8_t *secret, size_t secret_len)
843{
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100844 struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100845 struct quic_tls_ctx *tls_ctx =
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100846 &qc->els[ssl_to_quic_enc_level(level)].tls_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100847
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100848 TRACE_ENTER(QUIC_EV_CONN_RSEC, qc);
849 if (HA_ATOMIC_LOAD(&qc->flags) & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
850 TRACE_PROTO("CC required", QUIC_EV_CONN_RSEC, qc);
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +0100851 goto out;
852 }
853
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100854 tls_ctx->rx.aead = tls_aead(cipher);
855 tls_ctx->rx.md = tls_md(cipher);
856 tls_ctx->rx.hp = tls_hp(cipher);
857
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100858 if (!(ctx->rx.key = pool_alloc(pool_head_quic_tls_key)))
859 goto err;
860
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100861 if (!quic_tls_derive_keys(tls_ctx->rx.aead, tls_ctx->rx.hp, tls_ctx->rx.md,
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100862 tls_ctx->rx.key, tls_ctx->rx.keylen,
863 tls_ctx->rx.iv, tls_ctx->rx.ivlen,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100864 tls_ctx->rx.hp_key, sizeof tls_ctx->rx.hp_key,
865 secret, secret_len)) {
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100866 TRACE_DEVEL("RX key derivation failed", QUIC_EV_CONN_RSEC, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100867 goto err;
868 }
869
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100870 if (!qc_is_listener(qc) && level == ssl_encryption_application) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100871 const unsigned char *buf;
872 size_t buflen;
873
874 SSL_get_peer_quic_transport_params(ssl, &buf, &buflen);
875 if (!buflen)
876 goto err;
877
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100878 if (!quic_transport_params_store(qc, 1, buf, buf + buflen))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100879 goto err;
880 }
881
882 tls_ctx->rx.flags |= QUIC_FL_TLS_SECRETS_SET;
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +0100883 out:
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100884 TRACE_LEAVE(QUIC_EV_CONN_RSEC, qc, &level, secret, &secret_len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100885
886 return 1;
887
888 err:
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100889 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_RSEC, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100890 return 0;
891}
892
893/* ->set_write_secret callback to derive the TX secrets at <level>
894 * encryption level.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +0500895 * Returns 1 if succeeded, 0 if not.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100896 */
897int ha_set_wsec(SSL *ssl, enum ssl_encryption_level_t level,
898 const SSL_CIPHER *cipher,
899 const uint8_t *secret, size_t secret_len)
900{
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100901 struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
902 struct quic_tls_ctx *tls_ctx = &qc->els[ssl_to_quic_enc_level(level)].tls_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100903
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100904 TRACE_ENTER(QUIC_EV_CONN_WSEC, qc);
905 if (HA_ATOMIC_LOAD(&qc->flags) & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
906 TRACE_PROTO("CC required", QUIC_EV_CONN_WSEC, qc);
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +0100907 goto out;
908 }
909
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100910 if (!(ctx->tx.key = pool_alloc(pool_head_quic_tls_key)))
911 goto err;
912
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100913 tls_ctx->tx.aead = tls_aead(cipher);
914 tls_ctx->tx.md = tls_md(cipher);
915 tls_ctx->tx.hp = tls_hp(cipher);
916
917 if (!quic_tls_derive_keys(tls_ctx->tx.aead, tls_ctx->tx.hp, tls_ctx->tx.md,
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +0100918 tls_ctx->tx.key, tls_ctx->tx.keylen,
919 tls_ctx->tx.iv, tls_ctx->tx.ivlen,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100920 tls_ctx->tx.hp_key, sizeof tls_ctx->tx.hp_key,
921 secret, secret_len)) {
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100922 TRACE_DEVEL("TX key derivation failed", QUIC_EV_CONN_WSEC, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100923 goto err;
924 }
925
926 tls_ctx->tx.flags |= QUIC_FL_TLS_SECRETS_SET;
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100927 TRACE_LEAVE(QUIC_EV_CONN_WSEC, qc, &level, secret, &secret_len);
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +0100928 out:
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100929 return 1;
930
931 err:
Amaury Denoyelle9320dd52022-01-19 10:03:30 +0100932 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_WSEC, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100933 return 0;
934}
935#endif
936
937/* This function copies the CRYPTO data provided by the TLS stack found at <data>
938 * with <len> as size in CRYPTO buffers dedicated to store the information about
939 * outgoing CRYPTO frames so that to be able to replay the CRYPTO data streams.
940 * It fails only if it could not managed to allocate enough CRYPTO buffers to
941 * store all the data.
942 * Note that CRYPTO data may exist at any encryption level except at 0-RTT.
943 */
944static int quic_crypto_data_cpy(struct quic_enc_level *qel,
945 const unsigned char *data, size_t len)
946{
947 struct quic_crypto_buf **qcb;
948 /* The remaining byte to store in CRYPTO buffers. */
949 size_t cf_offset, cf_len, *nb_buf;
950 unsigned char *pos;
951
952 nb_buf = &qel->tx.crypto.nb_buf;
953 qcb = &qel->tx.crypto.bufs[*nb_buf - 1];
954 cf_offset = (*nb_buf - 1) * QUIC_CRYPTO_BUF_SZ + (*qcb)->sz;
955 cf_len = len;
956
957 while (len) {
958 size_t to_copy, room;
959
960 pos = (*qcb)->data + (*qcb)->sz;
961 room = QUIC_CRYPTO_BUF_SZ - (*qcb)->sz;
962 to_copy = len > room ? room : len;
963 if (to_copy) {
964 memcpy(pos, data, to_copy);
965 /* Increment the total size of this CRYPTO buffers by <to_copy>. */
966 qel->tx.crypto.sz += to_copy;
967 (*qcb)->sz += to_copy;
968 pos += to_copy;
969 len -= to_copy;
970 data += to_copy;
971 }
972 else {
973 struct quic_crypto_buf **tmp;
974
975 tmp = realloc(qel->tx.crypto.bufs,
976 (*nb_buf + 1) * sizeof *qel->tx.crypto.bufs);
977 if (tmp) {
978 qel->tx.crypto.bufs = tmp;
979 qcb = &qel->tx.crypto.bufs[*nb_buf];
980 *qcb = pool_alloc(pool_head_quic_crypto_buf);
981 if (!*qcb)
982 return 0;
983
984 (*qcb)->sz = 0;
985 ++*nb_buf;
986 }
987 else {
988 break;
989 }
990 }
991 }
992
993 /* Allocate a TX CRYPTO frame only if all the CRYPTO data
994 * have been buffered.
995 */
996 if (!len) {
Frédéric Lécaille0ad04582021-07-27 14:51:54 +0200997 struct quic_frame *frm;
Frédéric Lécaille81cd3c82022-01-10 18:31:07 +0100998 struct quic_frame *found = NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100999
Frédéric Lécaille81cd3c82022-01-10 18:31:07 +01001000 /* There is at most one CRYPTO frame in this packet number
1001 * space. Let's look for it.
1002 */
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01001003 list_for_each_entry(frm, &qel->pktns->tx.frms, list) {
Frédéric Lécaille81cd3c82022-01-10 18:31:07 +01001004 if (frm->type != QUIC_FT_CRYPTO)
1005 continue;
1006
1007 /* Found */
1008 found = frm;
1009 break;
1010 }
1011
1012 if (found) {
1013 found->crypto.len += cf_len;
1014 }
1015 else {
Frédéric Lécailled4ecf942022-01-04 23:15:40 +01001016 frm = pool_alloc(pool_head_quic_frame);
1017 if (!frm)
1018 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001019
Frédéric Lécailled4ecf942022-01-04 23:15:40 +01001020 frm->type = QUIC_FT_CRYPTO;
1021 frm->crypto.offset = cf_offset;
1022 frm->crypto.len = cf_len;
1023 frm->crypto.qel = qel;
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01001024 LIST_APPEND(&qel->pktns->tx.frms, &frm->list);
Frédéric Lécailled4ecf942022-01-04 23:15:40 +01001025 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001026 }
1027
1028 return len == 0;
1029}
1030
1031
Frédéric Lécaille067a82b2021-11-19 17:02:20 +01001032/* Set <alert> TLS alert as QUIC CRYPTO_ERROR error */
1033void quic_set_tls_alert(struct quic_conn *qc, int alert)
1034{
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +01001035 HA_ATOMIC_STORE(&qc->err_code, QC_ERR_CRYPTO_ERROR | alert);
Frédéric Lécaille067a82b2021-11-19 17:02:20 +01001036 HA_ATOMIC_OR(&qc->flags, QUIC_FL_CONN_IMMEDIATE_CLOSE);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001037 TRACE_PROTO("Alert set", QUIC_EV_CONN_SSLDATA, qc);
Frédéric Lécaille067a82b2021-11-19 17:02:20 +01001038}
1039
Frédéric Lécailleb0bd62d2021-12-14 19:34:08 +01001040/* Set the application for <qc> QUIC connection.
1041 * Return 1 if succeeded, 0 if not.
1042 */
1043int quic_set_app_ops(struct quic_conn *qc, const unsigned char *alpn, size_t alpn_len)
1044{
Amaury Denoyelle4b40f192022-01-19 11:29:25 +01001045 if (alpn_len >= 2 && memcmp(alpn, "h3", 2) == 0)
1046 qc->app_ops = &h3_ops;
1047 else if (alpn_len >= 10 && memcmp(alpn, "hq-interop", 10) == 0)
1048 qc->app_ops = &hq_interop_ops;
Frédéric Lécailleb0bd62d2021-12-14 19:34:08 +01001049 else
1050 return 0;
1051
Frédéric Lécailleb0bd62d2021-12-14 19:34:08 +01001052 return 1;
1053}
1054
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001055/* ->add_handshake_data QUIC TLS callback used by the QUIC TLS stack when it
1056 * wants to provide the QUIC layer with CRYPTO data.
1057 * Returns 1 if succeeded, 0 if not.
1058 */
1059int ha_quic_add_handshake_data(SSL *ssl, enum ssl_encryption_level_t level,
1060 const uint8_t *data, size_t len)
1061{
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001062 struct quic_conn *qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001063 enum quic_tls_enc_level tel;
1064 struct quic_enc_level *qel;
1065
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001066 qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
1067 TRACE_ENTER(QUIC_EV_CONN_ADDDATA, qc);
1068 if (HA_ATOMIC_LOAD(&qc->flags) & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
1069 TRACE_PROTO("CC required", QUIC_EV_CONN_ADDDATA, qc);
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +01001070 goto out;
1071 }
1072
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001073 tel = ssl_to_quic_enc_level(level);
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001074 qel = &qc->els[tel];
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001075
1076 if (tel == -1) {
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001077 TRACE_PROTO("Wrong encryption level", QUIC_EV_CONN_ADDDATA, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001078 goto err;
1079 }
1080
1081 if (!quic_crypto_data_cpy(qel, data, len)) {
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001082 TRACE_PROTO("Could not bufferize", QUIC_EV_CONN_ADDDATA, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001083 goto err;
1084 }
1085
1086 TRACE_PROTO("CRYPTO data buffered", QUIC_EV_CONN_ADDDATA,
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001087 qc, &level, &len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001088
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +01001089 out:
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001090 TRACE_LEAVE(QUIC_EV_CONN_ADDDATA, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001091 return 1;
1092
1093 err:
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001094 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_ADDDATA, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001095 return 0;
1096}
1097
1098int ha_quic_flush_flight(SSL *ssl)
1099{
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001100 struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001101
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001102 TRACE_ENTER(QUIC_EV_CONN_FFLIGHT, qc);
1103 TRACE_LEAVE(QUIC_EV_CONN_FFLIGHT, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001104
1105 return 1;
1106}
1107
1108int ha_quic_send_alert(SSL *ssl, enum ssl_encryption_level_t level, uint8_t alert)
1109{
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001110 struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001111
Amaury Denoyelle9320dd52022-01-19 10:03:30 +01001112 TRACE_DEVEL("SSL alert", QUIC_EV_CONN_SSLALERT, qc, &alert, &level);
1113 quic_set_tls_alert(qc, alert);
1114 HA_ATOMIC_STORE(&qc->flags, QUIC_FL_CONN_IMMEDIATE_CLOSE);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001115 return 1;
1116}
1117
1118/* QUIC TLS methods */
1119static SSL_QUIC_METHOD ha_quic_method = {
1120#ifdef OPENSSL_IS_BORINGSSL
1121 .set_read_secret = ha_set_rsec,
1122 .set_write_secret = ha_set_wsec,
1123#else
1124 .set_encryption_secrets = ha_quic_set_encryption_secrets,
1125#endif
1126 .add_handshake_data = ha_quic_add_handshake_data,
1127 .flush_flight = ha_quic_flush_flight,
1128 .send_alert = ha_quic_send_alert,
1129};
1130
1131/* Initialize the TLS context of a listener with <bind_conf> as configuration.
1132 * Returns an error count.
1133 */
1134int ssl_quic_initial_ctx(struct bind_conf *bind_conf)
1135{
1136 struct proxy *curproxy = bind_conf->frontend;
1137 struct ssl_bind_conf __maybe_unused *ssl_conf_cur;
1138 int cfgerr = 0;
1139
1140#if 0
1141 /* XXX Did not manage to use this. */
1142 const char *ciphers =
1143 "TLS_AES_128_GCM_SHA256:"
1144 "TLS_AES_256_GCM_SHA384:"
1145 "TLS_CHACHA20_POLY1305_SHA256:"
1146 "TLS_AES_128_CCM_SHA256";
1147#endif
Frédéric Lécaille4b1fddc2021-07-01 17:09:05 +02001148 const char *groups = "X25519:P-256:P-384:P-521";
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001149 long options =
1150 (SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) |
1151 SSL_OP_SINGLE_ECDH_USE |
1152 SSL_OP_CIPHER_SERVER_PREFERENCE;
1153 SSL_CTX *ctx;
1154
1155 ctx = SSL_CTX_new(TLS_server_method());
1156 bind_conf->initial_ctx = ctx;
1157
1158 SSL_CTX_set_options(ctx, options);
1159#if 0
1160 if (SSL_CTX_set_cipher_list(ctx, ciphers) != 1) {
1161 ha_alert("Proxy '%s': unable to set TLS 1.3 cipher list to '%s' "
1162 "for bind '%s' at [%s:%d].\n",
1163 curproxy->id, ciphers,
1164 bind_conf->arg, bind_conf->file, bind_conf->line);
1165 cfgerr++;
1166 }
1167#endif
1168
1169 if (SSL_CTX_set1_curves_list(ctx, groups) != 1) {
1170 ha_alert("Proxy '%s': unable to set TLS 1.3 curves list to '%s' "
1171 "for bind '%s' at [%s:%d].\n",
1172 curproxy->id, groups,
1173 bind_conf->arg, bind_conf->file, bind_conf->line);
1174 cfgerr++;
1175 }
1176
1177 SSL_CTX_set_mode(ctx, SSL_MODE_RELEASE_BUFFERS);
1178 SSL_CTX_set_min_proto_version(ctx, TLS1_3_VERSION);
1179 SSL_CTX_set_max_proto_version(ctx, TLS1_3_VERSION);
1180 SSL_CTX_set_default_verify_paths(ctx);
1181
1182#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
1183#ifdef OPENSSL_IS_BORINGSSL
1184 SSL_CTX_set_select_certificate_cb(ctx, ssl_sock_switchctx_cbk);
1185 SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_err_cbk);
1186#elif (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
1187 if (bind_conf->ssl_conf.early_data) {
1188 SSL_CTX_set_options(ctx, SSL_OP_NO_ANTI_REPLAY);
Frédéric Lécaillead3c07a2021-12-14 19:23:43 +01001189 SSL_CTX_set_max_early_data(ctx, 0xffffffff);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001190 }
1191 SSL_CTX_set_client_hello_cb(ctx, ssl_sock_switchctx_cbk, NULL);
1192 SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_err_cbk);
1193#else
1194 SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_cbk);
1195#endif
1196 SSL_CTX_set_tlsext_servername_arg(ctx, bind_conf);
1197#endif
1198 SSL_CTX_set_quic_method(ctx, &ha_quic_method);
1199
1200 return cfgerr;
1201}
1202
1203/* Decode an expected packet number from <truncated_on> its truncated value,
1204 * depending on <largest_pn> the largest received packet number, and <pn_nbits>
1205 * the number of bits used to encode this packet number (its length in bytes * 8).
1206 * See https://quicwg.org/base-drafts/draft-ietf-quic-transport.html#packet-encoding
1207 */
1208static uint64_t decode_packet_number(uint64_t largest_pn,
1209 uint32_t truncated_pn, unsigned int pn_nbits)
1210{
1211 uint64_t expected_pn = largest_pn + 1;
1212 uint64_t pn_win = (uint64_t)1 << pn_nbits;
1213 uint64_t pn_hwin = pn_win / 2;
1214 uint64_t pn_mask = pn_win - 1;
1215 uint64_t candidate_pn;
1216
1217
1218 candidate_pn = (expected_pn & ~pn_mask) | truncated_pn;
1219 /* Note that <pn_win> > <pn_hwin>. */
1220 if (candidate_pn < QUIC_MAX_PACKET_NUM - pn_win &&
1221 candidate_pn + pn_hwin <= expected_pn)
1222 return candidate_pn + pn_win;
1223
1224 if (candidate_pn > expected_pn + pn_hwin && candidate_pn >= pn_win)
1225 return candidate_pn - pn_win;
1226
1227 return candidate_pn;
1228}
1229
1230/* Remove the header protection of <pkt> QUIC packet using <tls_ctx> as QUIC TLS
1231 * cryptographic context.
1232 * <largest_pn> is the largest received packet number and <pn> the address of
1233 * the packet number field for this packet with <byte0> address of its first byte.
1234 * <end> points to one byte past the end of this packet.
1235 * Returns 1 if succeeded, 0 if not.
1236 */
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001237static int qc_do_rm_hp(struct quic_conn *qc,
1238 struct quic_rx_packet *pkt, struct quic_tls_ctx *tls_ctx,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001239 int64_t largest_pn, unsigned char *pn,
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001240 unsigned char *byte0, const unsigned char *end)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001241{
1242 int ret, outlen, i, pnlen;
1243 uint64_t packet_number;
1244 uint32_t truncated_pn = 0;
1245 unsigned char mask[5] = {0};
1246 unsigned char *sample;
1247 EVP_CIPHER_CTX *cctx;
1248 unsigned char *hp_key;
1249
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001250 /* Check there is enough data in this packet. */
1251 if (end - pn < QUIC_PACKET_PN_MAXLEN + sizeof mask) {
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001252 TRACE_DEVEL("too short packet", QUIC_EV_CONN_RMHP, qc, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001253 return 0;
1254 }
1255
1256 cctx = EVP_CIPHER_CTX_new();
1257 if (!cctx) {
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001258 TRACE_DEVEL("memory allocation failed", QUIC_EV_CONN_RMHP, qc, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001259 return 0;
1260 }
1261
1262 ret = 0;
1263 sample = pn + QUIC_PACKET_PN_MAXLEN;
1264
1265 hp_key = tls_ctx->rx.hp_key;
1266 if (!EVP_DecryptInit_ex(cctx, tls_ctx->rx.hp, NULL, hp_key, sample) ||
1267 !EVP_DecryptUpdate(cctx, mask, &outlen, mask, sizeof mask) ||
1268 !EVP_DecryptFinal_ex(cctx, mask, &outlen)) {
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001269 TRACE_DEVEL("decryption failed", QUIC_EV_CONN_RMHP, qc, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001270 goto out;
1271 }
1272
1273 *byte0 ^= mask[0] & (*byte0 & QUIC_PACKET_LONG_HEADER_BIT ? 0xf : 0x1f);
1274 pnlen = (*byte0 & QUIC_PACKET_PNL_BITMASK) + 1;
1275 for (i = 0; i < pnlen; i++) {
1276 pn[i] ^= mask[i + 1];
1277 truncated_pn = (truncated_pn << 8) | pn[i];
1278 }
1279
1280 packet_number = decode_packet_number(largest_pn, truncated_pn, pnlen * 8);
1281 /* Store remaining information for this unprotected header */
1282 pkt->pn = packet_number;
1283 pkt->pnl = pnlen;
1284
1285 ret = 1;
1286
1287 out:
1288 EVP_CIPHER_CTX_free(cctx);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001289
1290 return ret;
1291}
1292
1293/* Encrypt the payload of a QUIC packet with <pn> as number found at <payload>
1294 * address, with <payload_len> as payload length, <aad> as address of
1295 * the ADD and <aad_len> as AAD length depending on the <tls_ctx> QUIC TLS
1296 * context.
1297 * Returns 1 if succeeded, 0 if not.
1298 */
1299static int quic_packet_encrypt(unsigned char *payload, size_t payload_len,
1300 unsigned char *aad, size_t aad_len, uint64_t pn,
Frédéric Lécaille5f7f1182022-01-10 11:00:16 +01001301 struct quic_tls_ctx *tls_ctx, struct quic_conn *qc)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001302{
1303 unsigned char iv[12];
1304 unsigned char *tx_iv = tls_ctx->tx.iv;
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +01001305 size_t tx_iv_sz = tls_ctx->tx.ivlen;
Frédéric Lécaillef63921f2020-12-18 09:48:20 +01001306 struct enc_debug_info edi;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001307
1308 if (!quic_aead_iv_build(iv, sizeof iv, tx_iv, tx_iv_sz, pn)) {
Frédéric Lécaille5f7f1182022-01-10 11:00:16 +01001309 TRACE_DEVEL("AEAD IV building for encryption failed", QUIC_EV_CONN_HPKT, qc);
Frédéric Lécaillef63921f2020-12-18 09:48:20 +01001310 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001311 }
1312
1313 if (!quic_tls_encrypt(payload, payload_len, aad, aad_len,
1314 tls_ctx->tx.aead, tls_ctx->tx.key, iv)) {
Frédéric Lécaille5f7f1182022-01-10 11:00:16 +01001315 TRACE_DEVEL("QUIC packet encryption failed", QUIC_EV_CONN_HPKT, qc);
Frédéric Lécaillef63921f2020-12-18 09:48:20 +01001316 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001317 }
1318
1319 return 1;
Frédéric Lécaillef63921f2020-12-18 09:48:20 +01001320
1321 err:
1322 enc_debug_info_init(&edi, payload, payload_len, aad, aad_len, pn);
Frédéric Lécaille5f7f1182022-01-10 11:00:16 +01001323 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_ENCPKT, qc, &edi);
Frédéric Lécaillef63921f2020-12-18 09:48:20 +01001324 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001325}
1326
1327/* Decrypt <pkt> QUIC packet with <tls_ctx> as QUIC TLS cryptographic context.
1328 * Returns 1 if succeeded, 0 if not.
1329 */
Frédéric Lécaillea7d2c092021-11-30 11:18:18 +01001330static int qc_pkt_decrypt(struct quic_rx_packet *pkt, struct quic_enc_level *qel)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001331{
Frédéric Lécaillea7d2c092021-11-30 11:18:18 +01001332 int ret, kp_changed;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001333 unsigned char iv[12];
Frédéric Lécaillea7d2c092021-11-30 11:18:18 +01001334 struct quic_tls_ctx *tls_ctx = &qel->tls_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001335 unsigned char *rx_iv = tls_ctx->rx.iv;
Frédéric Lécaillea7d2c092021-11-30 11:18:18 +01001336 size_t rx_iv_sz = tls_ctx->rx.ivlen;
1337 unsigned char *rx_key = tls_ctx->rx.key;
1338
1339 kp_changed = 0;
1340 if (pkt->type == QUIC_PACKET_TYPE_SHORT) {
1341 /* The two tested bits are not at the same position,
1342 * this is why they are first both inversed.
1343 */
1344 if (!(*pkt->data & QUIC_PACKET_KEY_PHASE_BIT) ^ !(tls_ctx->flags & QUIC_FL_TLS_KP_BIT_SET)) {
1345 if (pkt->pn < tls_ctx->rx.pn) {
1346 /* The lowest packet number of a previous key phase
1347 * cannot be null if it really stores previous key phase
1348 * secrets.
1349 */
1350 if (!pkt->qc->ku.prv_rx.pn)
1351 return 0;
1352
1353 rx_iv = pkt->qc->ku.prv_rx.iv;
1354 rx_key = pkt->qc->ku.prv_rx.key;
1355 }
1356 else if (pkt->pn > qel->pktns->rx.largest_pn) {
1357 /* Next key phase */
1358 kp_changed = 1;
1359 rx_iv = pkt->qc->ku.nxt_rx.iv;
1360 rx_key = pkt->qc->ku.nxt_rx.key;
1361 }
1362 }
1363 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001364
1365 if (!quic_aead_iv_build(iv, sizeof iv, rx_iv, rx_iv_sz, pkt->pn))
1366 return 0;
1367
1368 ret = quic_tls_decrypt(pkt->data + pkt->aad_len, pkt->len - pkt->aad_len,
1369 pkt->data, pkt->aad_len,
Frédéric Lécaillea7d2c092021-11-30 11:18:18 +01001370 tls_ctx->rx.aead, rx_key, iv);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001371 if (!ret)
1372 return 0;
1373
Frédéric Lécaillea7d2c092021-11-30 11:18:18 +01001374 /* Update the keys only if the packet decryption succeeded. */
1375 if (kp_changed) {
1376 quic_tls_rotate_keys(pkt->qc);
1377 /* Toggle the Key Phase bit */
1378 tls_ctx->flags ^= QUIC_FL_TLS_KP_BIT_SET;
1379 /* Store the lowest packet number received for the current key phase */
1380 tls_ctx->rx.pn = pkt->pn;
1381 /* Prepare the next key update */
1382 if (!quic_tls_key_update(pkt->qc))
1383 return 0;
1384 }
1385
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001386 /* Update the packet length (required to parse the frames). */
1387 pkt->len = pkt->aad_len + ret;
1388
1389 return 1;
1390}
1391
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001392/* Remove from <qcs> stream the acknowledged frames.
1393 * Never fails.
1394 */
Frédéric Lécaille1c482c62021-09-20 16:59:51 +02001395static int qcs_try_to_consume(struct qcs *qcs)
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001396{
Frédéric Lécaille1c482c62021-09-20 16:59:51 +02001397 int ret;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001398 struct eb64_node *frm_node;
1399
Frédéric Lécaille1c482c62021-09-20 16:59:51 +02001400 ret = 0;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001401 frm_node = eb64_first(&qcs->tx.acked_frms);
1402 while (frm_node) {
1403 struct quic_stream *strm;
1404
1405 strm = eb64_entry(&frm_node->node, struct quic_stream, offset);
1406 if (strm->offset.key != qcs->tx.ack_offset)
1407 break;
1408
1409 b_del(strm->buf, strm->len);
1410 qcs->tx.ack_offset += strm->len;
1411 frm_node = eb64_next(frm_node);
1412 eb64_delete(&strm->offset);
Frédéric Lécaille1c482c62021-09-20 16:59:51 +02001413 ret = 1;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001414 }
Frédéric Lécaille1c482c62021-09-20 16:59:51 +02001415
1416 return ret;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001417}
1418
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001419/* Treat <frm> frame whose packet it is attached to has just been acknowledged. */
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001420static inline void qc_treat_acked_tx_frm(struct quic_conn *qc,
1421 struct quic_frame *frm)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001422{
Frédéric Lécaille1c482c62021-09-20 16:59:51 +02001423 int stream_acked;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001424
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001425 TRACE_PROTO("Removing frame", QUIC_EV_CONN_PRSAFRM, qc, frm);
Frédéric Lécaille1c482c62021-09-20 16:59:51 +02001426 stream_acked = 0;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001427 switch (frm->type) {
1428 case QUIC_FT_STREAM_8 ... QUIC_FT_STREAM_F:
1429 {
1430 struct qcs *qcs = frm->stream.qcs;
1431 struct quic_stream *strm = &frm->stream;
1432
1433 if (qcs->tx.ack_offset == strm->offset.key) {
1434 b_del(strm->buf, strm->len);
1435 qcs->tx.ack_offset += strm->len;
1436 LIST_DELETE(&frm->list);
1437 pool_free(pool_head_quic_frame, frm);
Frédéric Lécaille1c482c62021-09-20 16:59:51 +02001438 stream_acked = 1;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001439 }
1440 else {
1441 eb64_insert(&qcs->tx.acked_frms, &strm->offset);
1442 }
Frédéric Lécaille1c482c62021-09-20 16:59:51 +02001443 stream_acked |= qcs_try_to_consume(qcs);
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001444 }
1445 break;
1446 default:
1447 LIST_DELETE(&frm->list);
1448 pool_free(pool_head_quic_frame, frm);
1449 }
Frédéric Lécaille1c482c62021-09-20 16:59:51 +02001450
1451 if (stream_acked) {
1452 struct qcc *qcc = qc->qcc;
1453
1454 if (qcc->subs && qcc->subs->events & SUB_RETRY_SEND) {
1455 tasklet_wakeup(qcc->subs->tasklet);
1456 qcc->subs->events &= ~SUB_RETRY_SEND;
1457 if (!qcc->subs->events)
1458 qcc->subs = NULL;
1459 }
1460 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001461}
1462
1463/* Remove <largest> down to <smallest> node entries from <pkts> tree of TX packet,
1464 * deallocating them, and their TX frames.
1465 * Returns the last node reached to be used for the next range.
1466 * May be NULL if <largest> node could not be found.
1467 */
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001468static inline struct eb64_node *qc_ackrng_pkts(struct quic_conn *qc,
1469 struct eb_root *pkts,
1470 unsigned int *pkt_flags,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001471 struct list *newly_acked_pkts,
1472 struct eb64_node *largest_node,
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001473 uint64_t largest, uint64_t smallest)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001474{
1475 struct eb64_node *node;
1476 struct quic_tx_packet *pkt;
1477
1478 if (largest_node)
1479 node = largest_node;
1480 else {
1481 node = eb64_lookup(pkts, largest);
1482 while (!node && largest > smallest) {
1483 node = eb64_lookup(pkts, --largest);
1484 }
1485 }
1486
1487 while (node && node->key >= smallest) {
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02001488 struct quic_frame *frm, *frmbak;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001489
1490 pkt = eb64_entry(&node->node, struct quic_tx_packet, pn_node);
1491 *pkt_flags |= pkt->flags;
Willy Tarreau2b718102021-04-21 07:32:39 +02001492 LIST_INSERT(newly_acked_pkts, &pkt->list);
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001493 TRACE_PROTO("Removing packet #", QUIC_EV_CONN_PRSAFRM, qc, NULL, &pkt->pn_node.key);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001494 list_for_each_entry_safe(frm, frmbak, &pkt->frms, list)
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001495 qc_treat_acked_tx_frm(qc, frm);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001496 node = eb64_prev(node);
1497 eb64_delete(&pkt->pn_node);
1498 }
1499
1500 return node;
1501}
1502
Frédéric Lécaille7065dd02022-01-14 15:51:52 +01001503/* Remove all frames from <pkt_frm_list> and reinsert them in the
1504 * same order they have been sent into <pktns_frm_list>.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001505 */
Frédéric Lécaille7065dd02022-01-14 15:51:52 +01001506static inline void qc_requeue_nacked_pkt_tx_frms(struct quic_conn *qc,
1507 struct list *pkt_frm_list,
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01001508 struct list *pktns_frm_list)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001509{
Frédéric Lécaille7065dd02022-01-14 15:51:52 +01001510 struct quic_frame *frm, *frmbak;
1511 struct list tmp = LIST_HEAD_INIT(tmp);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001512
Frédéric Lécaille7065dd02022-01-14 15:51:52 +01001513 list_for_each_entry_safe(frm, frmbak, pkt_frm_list, list) {
1514 LIST_DELETE(&frm->list);
1515 TRACE_PROTO("to resend frame", QUIC_EV_CONN_PRSAFRM, qc, frm);
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01001516 LIST_APPEND(&tmp, &frm->list);
Frédéric Lécaille7065dd02022-01-14 15:51:52 +01001517 }
1518
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01001519 LIST_SPLICE(pktns_frm_list, &tmp);
Frédéric Lécaille7065dd02022-01-14 15:51:52 +01001520}
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001521
1522/* Free the TX packets of <pkts> list */
1523static inline void free_quic_tx_pkts(struct list *pkts)
1524{
1525 struct quic_tx_packet *pkt, *tmp;
1526
1527 list_for_each_entry_safe(pkt, tmp, pkts, list) {
Willy Tarreau2b718102021-04-21 07:32:39 +02001528 LIST_DELETE(&pkt->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001529 eb64_delete(&pkt->pn_node);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001530 quic_tx_packet_refdec(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001531 }
1532}
1533
1534/* Send a packet loss event nofification to the congestion controller
1535 * attached to <qc> connection with <lost_bytes> the number of lost bytes,
1536 * <oldest_lost>, <newest_lost> the oldest lost packet and newest lost packet
1537 * at <now_us> current time.
1538 * Always succeeds.
1539 */
1540static inline void qc_cc_loss_event(struct quic_conn *qc,
1541 unsigned int lost_bytes,
1542 unsigned int newest_time_sent,
1543 unsigned int period,
1544 unsigned int now_us)
1545{
1546 struct quic_cc_event ev = {
1547 .type = QUIC_CC_EVT_LOSS,
1548 .loss.now_ms = now_ms,
1549 .loss.max_ack_delay = qc->max_ack_delay,
1550 .loss.lost_bytes = lost_bytes,
1551 .loss.newest_time_sent = newest_time_sent,
1552 .loss.period = period,
1553 };
1554
1555 quic_cc_event(&qc->path->cc, &ev);
1556}
1557
1558/* Send a packet ack event nofication for each newly acked packet of
1559 * <newly_acked_pkts> list and free them.
1560 * Always succeeds.
1561 */
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001562static inline void qc_treat_newly_acked_pkts(struct quic_conn *qc,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001563 struct list *newly_acked_pkts)
1564{
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001565 struct quic_tx_packet *pkt, *tmp;
1566 struct quic_cc_event ev = { .type = QUIC_CC_EVT_ACK, };
1567
1568 list_for_each_entry_safe(pkt, tmp, newly_acked_pkts, list) {
1569 pkt->pktns->tx.in_flight -= pkt->in_flight_len;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01001570 qc->path->prep_in_flight -= pkt->in_flight_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001571 if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING)
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +01001572 qc->path->ifae_pkts--;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001573 ev.ack.acked = pkt->in_flight_len;
1574 ev.ack.time_sent = pkt->time_sent;
1575 quic_cc_event(&qc->path->cc, &ev);
Willy Tarreau2b718102021-04-21 07:32:39 +02001576 LIST_DELETE(&pkt->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001577 eb64_delete(&pkt->pn_node);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001578 quic_tx_packet_refdec(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001579 }
1580
1581}
1582
Frédéric Lécaillee87524d2022-01-19 17:48:40 +01001583/* Release all the frames attached to <pktns> packet number space */
1584static inline void qc_release_pktns_frms(struct quic_pktns *pktns)
1585{
1586 struct quic_frame *frm, *frmbak;
1587
1588 list_for_each_entry_safe(frm, frmbak, &pktns->tx.frms, list) {
1589 LIST_DELETE(&frm->list);
1590 pool_free(pool_head_quic_frame, frm);
1591 }
1592}
1593
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001594/* Handle <pkts> list of lost packets detected at <now_us> handling
1595 * their TX frames.
1596 * Send a packet loss event to the congestion controller if
1597 * in flight packet have been lost.
1598 * Also frees the packet in <pkts> list.
1599 * Never fails.
1600 */
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001601static inline void qc_release_lost_pkts(struct quic_conn *qc,
1602 struct quic_pktns *pktns,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001603 struct list *pkts,
1604 uint64_t now_us)
1605{
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001606 struct quic_tx_packet *pkt, *tmp, *oldest_lost, *newest_lost;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001607 uint64_t lost_bytes;
1608
1609 lost_bytes = 0;
1610 oldest_lost = newest_lost = NULL;
1611 list_for_each_entry_safe(pkt, tmp, pkts, list) {
Frédéric Lécaille7065dd02022-01-14 15:51:52 +01001612 struct list tmp = LIST_HEAD_INIT(tmp);
1613
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001614 lost_bytes += pkt->in_flight_len;
1615 pkt->pktns->tx.in_flight -= pkt->in_flight_len;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01001616 qc->path->prep_in_flight -= pkt->in_flight_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001617 if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING)
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +01001618 qc->path->ifae_pkts--;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001619 /* Treat the frames of this lost packet. */
Frédéric Lécaille7065dd02022-01-14 15:51:52 +01001620 qc_requeue_nacked_pkt_tx_frms(qc, &pkt->frms, &pktns->tx.frms);
Willy Tarreau2b718102021-04-21 07:32:39 +02001621 LIST_DELETE(&pkt->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001622 if (!oldest_lost) {
1623 oldest_lost = newest_lost = pkt;
1624 }
1625 else {
1626 if (newest_lost != oldest_lost)
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001627 quic_tx_packet_refdec(newest_lost);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001628 newest_lost = pkt;
1629 }
1630 }
1631
1632 if (lost_bytes) {
1633 /* Sent a packet loss event to the congestion controller. */
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001634 qc_cc_loss_event(qc, lost_bytes, newest_lost->time_sent,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001635 newest_lost->time_sent - oldest_lost->time_sent, now_us);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001636 quic_tx_packet_refdec(oldest_lost);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001637 if (newest_lost != oldest_lost)
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02001638 quic_tx_packet_refdec(newest_lost);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001639 }
1640}
1641
1642/* Look for packet loss from sent packets for <qel> encryption level of a
1643 * connection with <ctx> as I/O handler context. If remove is true, remove them from
1644 * their tree if deemed as lost or set the <loss_time> value the packet number
1645 * space if any not deemed lost.
1646 * Should be called after having received an ACK frame with newly acknowledged
1647 * packets or when the the loss detection timer has expired.
1648 * Always succeeds.
1649 */
1650static void qc_packet_loss_lookup(struct quic_pktns *pktns,
1651 struct quic_conn *qc,
1652 struct list *lost_pkts)
1653{
1654 struct eb_root *pkts;
1655 struct eb64_node *node;
1656 struct quic_loss *ql;
1657 unsigned int loss_delay;
1658
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001659 TRACE_ENTER(QUIC_EV_CONN_PKTLOSS, qc, pktns);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001660 pkts = &pktns->tx.pkts;
1661 pktns->tx.loss_time = TICK_ETERNITY;
1662 if (eb_is_empty(pkts))
1663 goto out;
1664
1665 ql = &qc->path->loss;
1666 loss_delay = QUIC_MAX(ql->latest_rtt, ql->srtt >> 3);
1667 loss_delay += loss_delay >> 3;
1668 loss_delay = QUIC_MAX(loss_delay, MS_TO_TICKS(QUIC_TIMER_GRANULARITY));
1669
1670 node = eb64_first(pkts);
1671 while (node) {
1672 struct quic_tx_packet *pkt;
1673 int64_t largest_acked_pn;
1674 unsigned int loss_time_limit, time_sent;
1675
1676 pkt = eb64_entry(&node->node, struct quic_tx_packet, pn_node);
Frédéric Lécaille59b07c72021-08-03 16:06:01 +02001677 largest_acked_pn = HA_ATOMIC_LOAD(&pktns->tx.largest_acked_pn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001678 node = eb64_next(node);
1679 if ((int64_t)pkt->pn_node.key > largest_acked_pn)
1680 break;
1681
1682 time_sent = pkt->time_sent;
1683 loss_time_limit = tick_add(time_sent, loss_delay);
1684 if (tick_is_le(time_sent, now_ms) ||
1685 (int64_t)largest_acked_pn >= pkt->pn_node.key + QUIC_LOSS_PACKET_THRESHOLD) {
1686 eb64_delete(&pkt->pn_node);
Willy Tarreau2b718102021-04-21 07:32:39 +02001687 LIST_APPEND(lost_pkts, &pkt->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001688 }
1689 else {
Frédéric Lécailledc90c072021-12-27 18:15:27 +01001690 if (tick_isset(pktns->tx.loss_time))
1691 pktns->tx.loss_time = tick_first(pktns->tx.loss_time, loss_time_limit);
1692 else
1693 pktns->tx.loss_time = loss_time_limit;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001694 }
1695 }
1696
1697 out:
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001698 TRACE_LEAVE(QUIC_EV_CONN_PKTLOSS, qc, pktns, lost_pkts);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001699}
1700
1701/* Parse ACK frame into <frm> from a buffer at <buf> address with <end> being at
1702 * one byte past the end of this buffer. Also update <rtt_sample> if needed, i.e.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05001703 * if the largest acked packet was newly acked and if there was at least one newly
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001704 * acked ack-eliciting packet.
1705 * Return 1, if succeeded, 0 if not.
1706 */
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001707static inline int qc_parse_ack_frm(struct quic_conn *qc,
1708 struct quic_frame *frm,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001709 struct quic_enc_level *qel,
1710 unsigned int *rtt_sample,
1711 const unsigned char **pos, const unsigned char *end)
1712{
1713 struct quic_ack *ack = &frm->ack;
1714 uint64_t smallest, largest;
1715 struct eb_root *pkts;
1716 struct eb64_node *largest_node;
1717 unsigned int time_sent, pkt_flags;
1718 struct list newly_acked_pkts = LIST_HEAD_INIT(newly_acked_pkts);
1719 struct list lost_pkts = LIST_HEAD_INIT(lost_pkts);
1720
1721 if (ack->largest_ack > qel->pktns->tx.next_pn) {
1722 TRACE_DEVEL("ACK for not sent packet", QUIC_EV_CONN_PRSAFRM,
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001723 qc, NULL, &ack->largest_ack);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001724 goto err;
1725 }
1726
1727 if (ack->first_ack_range > ack->largest_ack) {
1728 TRACE_DEVEL("too big first ACK range", QUIC_EV_CONN_PRSAFRM,
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001729 qc, NULL, &ack->first_ack_range);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001730 goto err;
1731 }
1732
1733 largest = ack->largest_ack;
1734 smallest = largest - ack->first_ack_range;
1735 pkts = &qel->pktns->tx.pkts;
1736 pkt_flags = 0;
1737 largest_node = NULL;
1738 time_sent = 0;
1739
Frédéric Lécaille59b07c72021-08-03 16:06:01 +02001740 if ((int64_t)ack->largest_ack > HA_ATOMIC_LOAD(&qel->pktns->tx.largest_acked_pn)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001741 largest_node = eb64_lookup(pkts, largest);
1742 if (!largest_node) {
1743 TRACE_DEVEL("Largest acked packet not found",
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001744 QUIC_EV_CONN_PRSAFRM, qc);
Frédéric Lécaille83b7a5b2021-11-17 16:16:04 +01001745 }
1746 else {
1747 time_sent = eb64_entry(&largest_node->node,
1748 struct quic_tx_packet, pn_node)->time_sent;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001749 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001750 }
1751
1752 TRACE_PROTO("ack range", QUIC_EV_CONN_PRSAFRM,
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001753 qc, NULL, &largest, &smallest);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001754 do {
1755 uint64_t gap, ack_range;
1756
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001757 qc_ackrng_pkts(qc, pkts, &pkt_flags, &newly_acked_pkts,
1758 largest_node, largest, smallest);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001759 if (!ack->ack_range_num--)
1760 break;
1761
1762 if (!quic_dec_int(&gap, pos, end))
1763 goto err;
1764
1765 if (smallest < gap + 2) {
1766 TRACE_DEVEL("wrong gap value", QUIC_EV_CONN_PRSAFRM,
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001767 qc, NULL, &gap, &smallest);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001768 goto err;
1769 }
1770
1771 largest = smallest - gap - 2;
1772 if (!quic_dec_int(&ack_range, pos, end))
1773 goto err;
1774
1775 if (largest < ack_range) {
1776 TRACE_DEVEL("wrong ack range value", QUIC_EV_CONN_PRSAFRM,
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001777 qc, NULL, &largest, &ack_range);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001778 goto err;
1779 }
1780
1781 /* Do not use this node anymore. */
1782 largest_node = NULL;
1783 /* Next range */
1784 smallest = largest - ack_range;
1785
1786 TRACE_PROTO("ack range", QUIC_EV_CONN_PRSAFRM,
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001787 qc, NULL, &largest, &smallest);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001788 } while (1);
1789
1790 /* Flag this packet number space as having received an ACK. */
Frédéric Lécaille67f47d02021-08-19 15:19:09 +02001791 HA_ATOMIC_OR(&qel->pktns->flags, QUIC_FL_PKTNS_ACK_RECEIVED);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001792
1793 if (time_sent && (pkt_flags & QUIC_FL_TX_PACKET_ACK_ELICITING)) {
1794 *rtt_sample = tick_remain(time_sent, now_ms);
Frédéric Lécaille59b07c72021-08-03 16:06:01 +02001795 HA_ATOMIC_STORE(&qel->pktns->tx.largest_acked_pn, ack->largest_ack);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001796 }
1797
1798 if (!LIST_ISEMPTY(&newly_acked_pkts)) {
1799 if (!eb_is_empty(&qel->pktns->tx.pkts)) {
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001800 qc_packet_loss_lookup(qel->pktns, qc, &lost_pkts);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001801 if (!LIST_ISEMPTY(&lost_pkts))
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001802 qc_release_lost_pkts(qc, qel->pktns, &lost_pkts, now_ms);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001803 }
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001804 qc_treat_newly_acked_pkts(qc, &newly_acked_pkts);
1805 if (quic_peer_validated_addr(qc))
1806 qc->path->loss.pto_count = 0;
1807 qc_set_timer(qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001808 }
1809
1810
1811 return 1;
1812
1813 err:
1814 free_quic_tx_pkts(&newly_acked_pkts);
Amaury Denoyellee81fed92021-12-22 11:06:34 +01001815 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_PRSAFRM, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001816 return 0;
1817}
1818
Frédéric Lécaille6f0fadb2021-09-28 09:04:12 +02001819/* This function gives the detail of the SSL error. It is used only
1820 * if the debug mode and the verbose mode are activated. It dump all
1821 * the SSL error until the stack was empty.
1822 */
1823static forceinline void qc_ssl_dump_errors(struct connection *conn)
1824{
1825 if (unlikely(global.mode & MODE_DEBUG)) {
1826 while (1) {
1827 unsigned long ret;
1828
1829 ret = ERR_get_error();
1830 if (!ret)
1831 return;
1832
1833 fprintf(stderr, "conn. @%p OpenSSL error[0x%lx] %s: %s\n", conn, ret,
1834 ERR_func_error_string(ret), ERR_reason_error_string(ret));
1835 }
1836 }
1837}
1838
Amaury Denoyelle71e588c2021-11-12 11:23:29 +01001839int ssl_sock_get_alpn(const struct connection *conn, void *xprt_ctx,
1840 const char **str, int *len);
1841
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001842/* Provide CRYPTO data to the TLS stack found at <data> with <len> as length
1843 * from <qel> encryption level with <ctx> as QUIC connection context.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05001844 * Remaining parameter are there for debugging purposes.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001845 * Return 1 if succeeded, 0 if not.
1846 */
1847static inline int qc_provide_cdata(struct quic_enc_level *el,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02001848 struct ssl_sock_ctx *ctx,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001849 const unsigned char *data, size_t len,
1850 struct quic_rx_packet *pkt,
1851 struct quic_rx_crypto_frm *cf)
1852{
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +02001853 int ssl_err, state;
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02001854 struct quic_conn *qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001855
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001856 ssl_err = SSL_ERROR_NONE;
Amaury Denoyellec15dd922021-12-21 11:41:52 +01001857 qc = ctx->qc;
1858
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001859 TRACE_ENTER(QUIC_EV_CONN_SSLDATA, qc);
1860
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001861 if (SSL_provide_quic_data(ctx->ssl, el->level, data, len) != 1) {
1862 TRACE_PROTO("SSL_provide_quic_data() error",
Frédéric Lécaillefde2a982021-12-27 15:12:09 +01001863 QUIC_EV_CONN_SSLDATA, qc, pkt, cf, ctx->ssl);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001864 goto err;
1865 }
1866
1867 el->rx.crypto.offset += len;
1868 TRACE_PROTO("in order CRYPTO data",
Frédéric Lécaillee7ff2b22021-12-22 17:40:38 +01001869 QUIC_EV_CONN_SSLDATA, qc, NULL, cf, ctx->ssl);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001870
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +02001871 state = HA_ATOMIC_LOAD(&qc->state);
1872 if (state < QUIC_HS_ST_COMPLETE) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001873 ssl_err = SSL_do_handshake(ctx->ssl);
1874 if (ssl_err != 1) {
1875 ssl_err = SSL_get_error(ctx->ssl, ssl_err);
1876 if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
1877 TRACE_PROTO("SSL handshake",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001878 QUIC_EV_CONN_HDSHK, qc, &state, &ssl_err);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001879 goto out;
1880 }
1881
1882 TRACE_DEVEL("SSL handshake error",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001883 QUIC_EV_CONN_HDSHK, qc, &state, &ssl_err);
Frédéric Lécaille7c881bd2021-09-28 09:05:59 +02001884 qc_ssl_dump_errors(ctx->conn);
Frédéric Lécaille66cbb822021-11-17 11:56:21 +01001885 ERR_clear_error();
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001886 goto err;
1887 }
1888
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001889 TRACE_PROTO("SSL handshake OK", QUIC_EV_CONN_HDSHK, qc, &state);
Amaury Denoyellecfa2d562022-01-19 16:01:05 +01001890 if (qc_is_listener(ctx->qc)) {
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +02001891 HA_ATOMIC_STORE(&qc->state, QUIC_HS_ST_CONFIRMED);
Amaury Denoyellecfa2d562022-01-19 16:01:05 +01001892 /* The connection is ready to be accepted. */
1893 quic_accept_push_qc(qc);
1894 }
1895 else {
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +02001896 HA_ATOMIC_STORE(&qc->state, QUIC_HS_ST_COMPLETE);
Amaury Denoyellecfa2d562022-01-19 16:01:05 +01001897 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001898 } else {
1899 ssl_err = SSL_process_quic_post_handshake(ctx->ssl);
1900 if (ssl_err != 1) {
1901 ssl_err = SSL_get_error(ctx->ssl, ssl_err);
1902 if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
1903 TRACE_DEVEL("SSL post handshake",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001904 QUIC_EV_CONN_HDSHK, qc, &state, &ssl_err);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001905 goto out;
1906 }
1907
1908 TRACE_DEVEL("SSL post handshake error",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001909 QUIC_EV_CONN_HDSHK, qc, &state, &ssl_err);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001910 goto err;
1911 }
1912
1913 TRACE_PROTO("SSL post handshake succeeded",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001914 QUIC_EV_CONN_HDSHK, qc, &state);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001915 }
Amaury Denoyellee2288c32021-12-03 14:44:21 +01001916
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001917 out:
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001918 TRACE_LEAVE(QUIC_EV_CONN_SSLDATA, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001919 return 1;
1920
1921 err:
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01001922 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_SSLDATA, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001923 return 0;
1924}
1925
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001926/* Allocate a new STREAM RX frame from <stream_fm> STREAM frame attached to
1927 * <pkt> RX packet.
1928 * Return it if succeeded, NULL if not.
1929 */
1930static inline
1931struct quic_rx_strm_frm *new_quic_rx_strm_frm(struct quic_stream *stream_frm,
1932 struct quic_rx_packet *pkt)
1933{
1934 struct quic_rx_strm_frm *frm;
1935
1936 frm = pool_alloc(pool_head_quic_rx_strm_frm);
1937 if (frm) {
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001938 frm->offset_node.key = stream_frm->offset.key;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001939 frm->len = stream_frm->len;
1940 frm->data = stream_frm->data;
1941 frm->pkt = pkt;
1942 }
1943
1944 return frm;
1945}
1946
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001947/* Copy as most as possible STREAM data from <strm_frm> into <strm> stream.
Frédéric Lécaille3fe7df82021-12-15 15:32:55 +01001948 * Also update <strm_frm> frame to reflect the data which have been consumed.
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001949 */
1950static size_t qc_strm_cpy(struct buffer *buf, struct quic_stream *strm_frm)
1951{
1952 size_t ret;
1953
1954 ret = 0;
1955 while (strm_frm->len) {
1956 size_t try;
1957
1958 try = b_contig_space(buf);
1959 if (!try)
1960 break;
1961
1962 if (try > strm_frm->len)
1963 try = strm_frm->len;
1964 memcpy(b_tail(buf), strm_frm->data, try);
1965 strm_frm->len -= try;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02001966 strm_frm->offset.key += try;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01001967 b_add(buf, try);
1968 ret += try;
1969 }
1970
1971 return ret;
1972}
1973
Frédéric Lécaillef1d38cb2021-12-20 12:02:13 +01001974/* Copy as most as possible STREAM data from <strm_frm> into <buf> buffer.
1975 * Also update <strm_frm> frame to reflect the data which have been consumed.
1976 */
1977static size_t qc_rx_strm_frm_cpy(struct buffer *buf,
1978 struct quic_rx_strm_frm *strm_frm)
1979{
1980 size_t ret;
1981
1982 ret = 0;
1983 while (strm_frm->len) {
1984 size_t try;
1985
1986 try = b_contig_space(buf);
1987 if (!try)
1988 break;
1989
1990 if (try > strm_frm->len)
1991 try = strm_frm->len;
1992 memcpy(b_tail(buf), strm_frm->data, try);
1993 strm_frm->len -= try;
1994 strm_frm->offset_node.key += try;
1995 b_add(buf, try);
1996 ret += try;
1997 }
1998
1999 return ret;
2000}
2001
2002/* Process as much as possible RX STREAM frames received for <qcs> */
2003static size_t qc_treat_rx_strm_frms(struct qcs *qcs)
2004{
2005 int total;
2006 struct eb64_node *frm_node;
2007
2008 total = 0;
2009 frm_node = eb64_first(&qcs->rx.frms);
2010 while (frm_node) {
2011 int ret;
2012 struct quic_rx_strm_frm *frm;
2013
2014 frm = eb64_entry(&frm_node->node, struct quic_rx_strm_frm, offset_node);
2015 if (frm->offset_node.key != qcs->rx.offset)
2016 break;
2017
2018 ret = qc_rx_strm_frm_cpy(&qcs->rx.buf, frm);
2019 qcs->rx.offset += ret;
2020 total += ret;
2021 if (frm->len) {
2022 /* If there is remaining data in this frame
2023 * this is because the destination buffer is full.
2024 */
2025 break;
2026 }
2027
2028 frm_node = eb64_next(frm_node);
2029 quic_rx_packet_refdec(frm->pkt);
2030 eb64_delete(&frm->offset_node);
2031 pool_free(pool_head_quic_rx_strm_frm, frm);
2032 }
2033
2034 return total;
2035}
2036
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002037/* Handle <strm_frm> bidirectional STREAM frame. Depending on its ID, several
2038 * streams may be open. The data are copied to the stream RX buffer if possible.
2039 * If not, the STREAM frame is stored to be treated again later.
2040 * We rely on the flow control so that not to store too much STREAM frames.
2041 * Return 1 if succeeded, 0 if not.
2042 */
2043static int qc_handle_bidi_strm_frm(struct quic_rx_packet *pkt,
2044 struct quic_stream *strm_frm,
2045 struct quic_conn *qc)
2046{
Frédéric Lécaillef1d38cb2021-12-20 12:02:13 +01002047 int total;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002048 struct qcs *strm;
Frédéric Lécaille10250b22021-12-22 16:13:43 +01002049 struct eb64_node *strm_node;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002050 struct quic_rx_strm_frm *frm;
2051
2052 strm_node = qcc_get_qcs(qc->qcc, strm_frm->id);
2053 if (!strm_node) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002054 TRACE_PROTO("Stream not found", QUIC_EV_CONN_PSTRM, qc);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002055 return 0;
2056 }
2057
2058 strm = eb64_entry(&strm_node->node, struct qcs, by_id);
Frédéric Lécaille10250b22021-12-22 16:13:43 +01002059 if (strm_frm->offset.key < strm->rx.offset) {
2060 size_t diff;
2061
2062 if (strm_frm->offset.key + strm_frm->len <= strm->rx.offset) {
2063 TRACE_PROTO("Already received STREAM data",
2064 QUIC_EV_CONN_PSTRM, qc);
2065 goto out;
2066 }
2067
2068 TRACE_PROTO("Partially already received STREAM data", QUIC_EV_CONN_PSTRM, qc);
2069 diff = strm->rx.offset - strm_frm->offset.key;
2070 strm_frm->offset.key = strm->rx.offset;
2071 strm_frm->len -= diff;
2072 strm_frm->data += diff;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002073 }
2074
Frédéric Lécaillef1d38cb2021-12-20 12:02:13 +01002075 total = 0;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02002076 if (strm_frm->offset.key == strm->rx.offset) {
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002077 int ret;
2078
Frédéric Lécailled8b84432021-12-10 15:18:36 +01002079 if (!qc_get_buf(strm, &strm->rx.buf)) {
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002080 goto store_frm;
Frédéric Lécailled8b84432021-12-10 15:18:36 +01002081 }
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002082
2083 ret = qc_strm_cpy(&strm->rx.buf, strm_frm);
Frédéric Lécaillef1d38cb2021-12-20 12:02:13 +01002084 total += ret;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002085 strm->rx.offset += ret;
2086 }
2087
Frédéric Lécaillef1d38cb2021-12-20 12:02:13 +01002088 total += qc_treat_rx_strm_frms(strm);
2089 if (total && qc->qcc->app_ops->decode_qcs(strm, strm_frm->fin, qc->qcc->ctx) < 0) {
Frédéric Lécaillefde2a982021-12-27 15:12:09 +01002090 TRACE_PROTO("Decoding error", QUIC_EV_CONN_PSTRM, qc);
Frédéric Lécaillef1d38cb2021-12-20 12:02:13 +01002091 return 0;
2092 }
2093
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002094 if (!strm_frm->len)
2095 goto out;
2096
2097 store_frm:
2098 frm = new_quic_rx_strm_frm(strm_frm, pkt);
2099 if (!frm) {
2100 TRACE_PROTO("Could not alloc RX STREAM frame",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002101 QUIC_EV_CONN_PSTRM, qc);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002102 return 0;
2103 }
2104
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02002105 eb64_insert(&strm->rx.frms, &frm->offset_node);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002106 quic_rx_packet_refinc(pkt);
2107
2108 out:
2109 return 1;
2110}
2111
2112/* Handle <strm_frm> unidirectional STREAM frame. Depending on its ID, several
2113 * streams may be open. The data are copied to the stream RX buffer if possible.
2114 * If not, the STREAM frame is stored to be treated again later.
2115 * We rely on the flow control so that not to store too much STREAM frames.
2116 * Return 1 if succeeded, 0 if not.
2117 */
2118static int qc_handle_uni_strm_frm(struct quic_rx_packet *pkt,
2119 struct quic_stream *strm_frm,
2120 struct quic_conn *qc)
2121{
2122 struct qcs *strm;
Frédéric Lécaille10250b22021-12-22 16:13:43 +01002123 struct eb64_node *strm_node;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002124 struct quic_rx_strm_frm *frm;
2125 size_t strm_frm_len;
2126
2127 strm_node = qcc_get_qcs(qc->qcc, strm_frm->id);
2128 if (!strm_node) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002129 TRACE_PROTO("Stream not found", QUIC_EV_CONN_PSTRM, qc);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002130 return 0;
2131 }
2132
2133 strm = eb64_entry(&strm_node->node, struct qcs, by_id);
Frédéric Lécaille10250b22021-12-22 16:13:43 +01002134 if (strm_frm->offset.key < strm->rx.offset) {
2135 size_t diff;
2136
2137 if (strm_frm->offset.key + strm_frm->len <= strm->rx.offset) {
2138 TRACE_PROTO("Already received STREAM data",
2139 QUIC_EV_CONN_PSTRM, qc);
2140 goto out;
2141 }
2142
2143 TRACE_PROTO("Partially already received STREAM data", QUIC_EV_CONN_PSTRM, qc);
2144 diff = strm->rx.offset - strm_frm->offset.key;
2145 strm_frm->offset.key = strm->rx.offset;
2146 strm_frm->len -= diff;
2147 strm_frm->data += diff;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002148 }
2149
2150 strm_frm_len = strm_frm->len;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02002151 if (strm_frm->offset.key == strm->rx.offset) {
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002152 int ret;
2153
Amaury Denoyelle1e308ff2021-10-12 18:14:12 +02002154 if (!qc_get_buf(strm, &strm->rx.buf))
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002155 goto store_frm;
2156
2157 /* qc_strm_cpy() will modify the offset, depending on the number
2158 * of bytes copied.
2159 */
2160 ret = qc_strm_cpy(&strm->rx.buf, strm_frm);
2161 /* Inform the application of the arrival of this new stream */
2162 if (!strm->rx.offset && !qc->qcc->app_ops->attach_ruqs(strm, qc->qcc->ctx)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002163 TRACE_PROTO("Could not set an uni-stream", QUIC_EV_CONN_PSTRM, qc);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002164 return 0;
2165 }
2166
Amaury Denoyellea3f222d2021-12-06 11:24:00 +01002167 if (ret)
2168 qcs_notify_recv(strm);
2169
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02002170 strm_frm->offset.key += ret;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002171 }
2172 /* Take this frame into an account for the stream flow control */
2173 strm->rx.offset += strm_frm_len;
2174 /* It all the data were provided to the application, there is no need to
Ilya Shipitsinbd6b4be2021-10-15 16:18:21 +05002175 * store any more information for it.
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002176 */
2177 if (!strm_frm->len)
2178 goto out;
2179
2180 store_frm:
2181 frm = new_quic_rx_strm_frm(strm_frm, pkt);
2182 if (!frm) {
2183 TRACE_PROTO("Could not alloc RX STREAM frame",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002184 QUIC_EV_CONN_PSTRM, qc);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002185 return 0;
2186 }
2187
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02002188 eb64_insert(&strm->rx.frms, &frm->offset_node);
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002189 quic_rx_packet_refinc(pkt);
2190
2191 out:
2192 return 1;
2193}
2194
2195static inline int qc_handle_strm_frm(struct quic_rx_packet *pkt,
2196 struct quic_stream *strm_frm,
2197 struct quic_conn *qc)
2198{
2199 if (strm_frm->id & QCS_ID_DIR_BIT)
2200 return qc_handle_uni_strm_frm(pkt, strm_frm, qc);
2201 else
2202 return qc_handle_bidi_strm_frm(pkt, strm_frm, qc);
2203}
2204
Frédéric Lécaille04e63aa2022-01-17 18:16:27 +01002205/* Prepare a fast retransmission from <qel> encryption level */
Frédéric Lécaille3bb457c2021-12-30 16:14:20 +01002206static void qc_prep_fast_retrans(struct quic_enc_level *qel,
2207 struct quic_conn *qc)
2208{
2209 struct eb_root *pkts = &qel->pktns->tx.pkts;
Frédéric Lécaille04e63aa2022-01-17 18:16:27 +01002210 struct eb64_node *node;
Frédéric Lécaille3bb457c2021-12-30 16:14:20 +01002211 struct quic_tx_packet *pkt;
Frédéric Lécaille3bb457c2021-12-30 16:14:20 +01002212
Frédéric Lécaillef010f0a2022-01-06 17:28:05 +01002213 pkt = NULL;
Frédéric Lécaille3bb457c2021-12-30 16:14:20 +01002214 pkts = &qel->pktns->tx.pkts;
2215 node = eb64_first(pkts);
Frédéric Lécaillef010f0a2022-01-06 17:28:05 +01002216 /* Skip the empty packet (they have already been retransmitted) */
2217 while (node) {
2218 pkt = eb64_entry(&node->node, struct quic_tx_packet, pn_node);
2219 if (!LIST_ISEMPTY(&pkt->frms))
2220 break;
2221 node = eb64_next(node);
2222 }
2223
2224 if (!pkt)
Frédéric Lécaille3bb457c2021-12-30 16:14:20 +01002225 return;
2226
Frédéric Lécaille7065dd02022-01-14 15:51:52 +01002227 qc_requeue_nacked_pkt_tx_frms(qc, &pkt->frms, &qel->pktns->tx.frms);
Frédéric Lécaille04e63aa2022-01-17 18:16:27 +01002228}
2229
2230/* Prepare a fast retransmission during handshake after a client
2231 * has resent Initial packets. According to the RFC a server may retransmit
2232 * up to two datagrams of Initial packets if did not receive all Initial packets
2233 * and resend them coalescing with others (Handshake here).
2234 * (Listener only).
2235 */
2236static void qc_prep_hdshk_fast_retrans(struct quic_conn *qc)
2237{
2238 struct list itmp = LIST_HEAD_INIT(itmp);
2239 struct list htmp = LIST_HEAD_INIT(htmp);
2240 struct quic_frame *frm, *frmbak;
2241
2242 struct quic_enc_level *iqel = &qc->els[QUIC_TLS_ENC_LEVEL_INITIAL];
2243 struct quic_enc_level *hqel = &qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE];
2244 struct quic_enc_level *qel = iqel;
2245 struct eb_root *pkts;
2246 struct eb64_node *node;
2247 struct quic_tx_packet *pkt;
2248 struct list *tmp = &itmp;
2249
2250 start:
2251 pkt = NULL;
2252 pkts = &qel->pktns->tx.pkts;
2253 node = eb64_first(pkts);
2254 /* Skip the empty packet (they have already been retransmitted) */
2255 while (node) {
2256 pkt = eb64_entry(&node->node, struct quic_tx_packet, pn_node);
2257 if (!LIST_ISEMPTY(&pkt->frms))
2258 break;
2259 node = eb64_next(node);
2260 }
2261
2262 if (!pkt)
2263 goto end;
2264
2265 qel->pktns->tx.pto_probe += 1;
2266 requeue:
2267 list_for_each_entry_safe(frm, frmbak, &pkt->frms, list) {
2268 TRACE_PROTO("to resend frame", QUIC_EV_CONN_PRSAFRM, qc, frm);
2269 LIST_DELETE(&frm->list);
2270 LIST_APPEND(tmp, &frm->list);
2271 }
2272
2273 if (qel == iqel) {
2274 if (pkt->next && pkt->next->type == QUIC_PACKET_TYPE_HANDSHAKE) {
2275 pkt = pkt->next;
2276 tmp = &htmp;
2277 hqel->pktns->tx.pto_probe += 1;
2278 goto requeue;
2279 }
2280
2281 qel = hqel;
2282 tmp = &htmp;
Frédéric Lécaille3bb457c2021-12-30 16:14:20 +01002283 goto start;
2284 }
Frédéric Lécaille04e63aa2022-01-17 18:16:27 +01002285
2286 end:
2287 LIST_SPLICE(&iqel->pktns->tx.frms, &itmp);
2288 LIST_SPLICE(&hqel->pktns->tx.frms, &htmp);
Frédéric Lécaille3bb457c2021-12-30 16:14:20 +01002289}
2290
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002291/* Parse all the frames of <pkt> QUIC packet for QUIC connection with <ctx>
2292 * as I/O handler context and <qel> as encryption level.
2293 * Returns 1 if succeeded, 0 if failed.
2294 */
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02002295static int qc_parse_pkt_frms(struct quic_rx_packet *pkt, struct ssl_sock_ctx *ctx,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002296 struct quic_enc_level *qel)
2297{
2298 struct quic_frame frm;
2299 const unsigned char *pos, *end;
Amaury Denoyellec15dd922021-12-21 11:41:52 +01002300 struct quic_conn *qc = ctx->qc;
Frédéric Lécaille3bb457c2021-12-30 16:14:20 +01002301 int fast_retrans = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002302
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002303 TRACE_ENTER(QUIC_EV_CONN_PRSHPKT, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002304 /* Skip the AAD */
2305 pos = pkt->data + pkt->aad_len;
2306 end = pkt->data + pkt->len;
2307
2308 while (pos < end) {
Amaury Denoyelle17a74162021-12-21 14:45:39 +01002309 if (!qc_parse_frm(&frm, pkt, &pos, end, qc))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002310 goto err;
2311
Frédéric Lécaille1ede8232021-12-23 14:11:25 +01002312 TRACE_PROTO("RX frame", QUIC_EV_CONN_PSTRM, qc, &frm);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002313 switch (frm.type) {
Frédéric Lécaille0c140202020-12-09 15:56:48 +01002314 case QUIC_FT_PADDING:
Frédéric Lécaille0c140202020-12-09 15:56:48 +01002315 break;
2316 case QUIC_FT_PING:
2317 break;
2318 case QUIC_FT_ACK:
2319 {
2320 unsigned int rtt_sample;
2321
2322 rtt_sample = 0;
Amaury Denoyellee81fed92021-12-22 11:06:34 +01002323 if (!qc_parse_ack_frm(qc, &frm, qel, &rtt_sample, &pos, end))
Frédéric Lécaille0c140202020-12-09 15:56:48 +01002324 goto err;
2325
2326 if (rtt_sample) {
2327 unsigned int ack_delay;
2328
Amaury Denoyelle17a74162021-12-21 14:45:39 +01002329 ack_delay = !quic_application_pktns(qel->pktns, qc) ? 0 :
Frédéric Lécaille22576a22021-12-28 14:27:43 +01002330 HA_ATOMIC_LOAD(&qc->state) >= QUIC_HS_ST_CONFIRMED ?
2331 MS_TO_TICKS(QUIC_MIN(quic_ack_delay_ms(&frm.ack, qc), qc->max_ack_delay)) :
2332 MS_TO_TICKS(quic_ack_delay_ms(&frm.ack, qc));
Amaury Denoyelle17a74162021-12-21 14:45:39 +01002333 quic_loss_srtt_update(&qc->path->loss, rtt_sample, ack_delay, qc);
Frédéric Lécaille0c140202020-12-09 15:56:48 +01002334 }
Frédéric Lécaille0c140202020-12-09 15:56:48 +01002335 break;
2336 }
Frédéric Lécailled8b84432021-12-10 15:18:36 +01002337 case QUIC_FT_STOP_SENDING:
Frédéric Lécailled8b84432021-12-10 15:18:36 +01002338 break;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002339 case QUIC_FT_CRYPTO:
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002340 {
2341 struct quic_rx_crypto_frm *cf;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002342
Frédéric Lécaille917a7db2022-01-03 17:00:35 +01002343 if (unlikely(qel->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_DCD)) {
2344 /* XXX TO DO: <cfdebug> is used only for the traces. */
2345 struct quic_rx_crypto_frm cfdebug = { };
2346
2347 cfdebug.offset_node.key = frm.crypto.offset;
2348 cfdebug.len = frm.crypto.len;
2349 TRACE_PROTO("CRYPTO data discarded",
2350 QUIC_EV_CONN_ELRXPKTS, qc, pkt, &cfdebug);
2351 break;
2352 }
2353
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002354 if (unlikely(frm.crypto.offset < qel->rx.crypto.offset)) {
2355 if (frm.crypto.offset + frm.crypto.len <= qel->rx.crypto.offset) {
2356 /* XXX TO DO: <cfdebug> is used only for the traces. */
2357 struct quic_rx_crypto_frm cfdebug = { };
2358
2359 cfdebug.offset_node.key = frm.crypto.offset;
2360 cfdebug.len = frm.crypto.len;
2361 /* Nothing to do */
2362 TRACE_PROTO("Already received CRYPTO data",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002363 QUIC_EV_CONN_ELRXPKTS, qc, pkt, &cfdebug);
Frédéric Lécaille2fe8b3b2022-01-10 12:10:10 +01002364 if (qc_is_listener(ctx->qc) &&
Frédéric Lécaille3bb457c2021-12-30 16:14:20 +01002365 qel == &qc->els[QUIC_TLS_ENC_LEVEL_INITIAL])
2366 fast_retrans = 1;
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002367 break;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002368 }
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002369 else {
2370 size_t diff = qel->rx.crypto.offset - frm.crypto.offset;
2371 /* XXX TO DO: <cfdebug> is used only for the traces. */
2372 struct quic_rx_crypto_frm cfdebug = { };
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002373
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002374 cfdebug.offset_node.key = frm.crypto.offset;
2375 cfdebug.len = frm.crypto.len;
2376 TRACE_PROTO("Partially already received CRYPTO data",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002377 QUIC_EV_CONN_ELRXPKTS, qc, pkt, &cfdebug);
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002378 frm.crypto.len -= diff;
2379 frm.crypto.data += diff;
2380 frm.crypto.offset = qel->rx.crypto.offset;
2381 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002382 }
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002383
2384 if (frm.crypto.offset == qel->rx.crypto.offset) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002385 /* XXX TO DO: <cf> is used only for the traces. */
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002386 struct quic_rx_crypto_frm cfdebug = { };
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002387
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002388 cfdebug.offset_node.key = frm.crypto.offset;
2389 cfdebug.len = frm.crypto.len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002390 if (!qc_provide_cdata(qel, ctx,
2391 frm.crypto.data, frm.crypto.len,
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002392 pkt, &cfdebug))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002393 goto err;
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002394
2395 break;
2396 }
2397
2398 /* frm.crypto.offset > qel->rx.crypto.offset */
2399 cf = pool_alloc(pool_head_quic_rx_crypto_frm);
2400 if (!cf) {
2401 TRACE_DEVEL("CRYPTO frame allocation failed",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002402 QUIC_EV_CONN_PRSHPKT, qc);
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002403 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002404 }
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002405
2406 cf->offset_node.key = frm.crypto.offset;
2407 cf->len = frm.crypto.len;
2408 cf->data = frm.crypto.data;
2409 cf->pkt = pkt;
2410 eb64_insert(&qel->rx.crypto.frms, &cf->offset_node);
2411 quic_rx_packet_refinc(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002412 break;
Frédéric Lécaillef9cb3a92021-12-02 11:25:58 +01002413 }
Frédéric Lécailled8b84432021-12-10 15:18:36 +01002414 case QUIC_FT_STREAM_8 ... QUIC_FT_STREAM_F:
Frédéric Lécaille242fb1b2020-12-31 12:45:38 +01002415 {
2416 struct quic_stream *stream = &frm.stream;
2417
Frédéric Lécaille2fe8b3b2022-01-10 12:10:10 +01002418 if (qc_is_listener(ctx->qc)) {
Frédéric Lécaille242fb1b2020-12-31 12:45:38 +01002419 if (stream->id & QUIC_STREAM_FRAME_ID_INITIATOR_BIT)
2420 goto err;
2421 } else if (!(stream->id & QUIC_STREAM_FRAME_ID_INITIATOR_BIT))
2422 goto err;
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002423
Amaury Denoyelle17a74162021-12-21 14:45:39 +01002424 if (!qc_handle_strm_frm(pkt, stream, qc))
Frédéric Lécailledfbae762021-02-18 09:59:01 +01002425 goto err;
2426
Frédéric Lécaille242fb1b2020-12-31 12:45:38 +01002427 break;
2428 }
Frédéric Lécaillef366cb72021-11-18 10:57:18 +01002429 case QUIC_FT_MAX_DATA:
2430 case QUIC_FT_MAX_STREAM_DATA:
2431 case QUIC_FT_MAX_STREAMS_BIDI:
2432 case QUIC_FT_MAX_STREAMS_UNI:
2433 case QUIC_FT_DATA_BLOCKED:
2434 case QUIC_FT_STREAM_DATA_BLOCKED:
2435 case QUIC_FT_STREAMS_BLOCKED_BIDI:
2436 case QUIC_FT_STREAMS_BLOCKED_UNI:
2437 break;
Frédéric Lécaille0c140202020-12-09 15:56:48 +01002438 case QUIC_FT_NEW_CONNECTION_ID:
Frédéric Lécaille2cca2412022-01-21 13:55:03 +01002439 case QUIC_FT_RETIRE_CONNECTION_ID:
2440 /* XXX TO DO XXX */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002441 break;
2442 case QUIC_FT_CONNECTION_CLOSE:
2443 case QUIC_FT_CONNECTION_CLOSE_APP:
Amaury Denoyelle5154e7a2021-12-08 14:51:04 +01002444 /* warn the mux to close the connection */
Amaury Denoyelle17a74162021-12-21 14:45:39 +01002445 qc->qcc->flags |= QC_CF_CC_RECV;
2446 tasklet_wakeup(qc->qcc->wait_event.tasklet);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002447 break;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002448 case QUIC_FT_HANDSHAKE_DONE:
Frédéric Lécaille2fe8b3b2022-01-10 12:10:10 +01002449 if (qc_is_listener(ctx->qc))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002450 goto err;
2451
Amaury Denoyelle17a74162021-12-21 14:45:39 +01002452 HA_ATOMIC_STORE(&qc->state, QUIC_HS_ST_CONFIRMED);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002453 break;
2454 default:
2455 goto err;
2456 }
2457 }
2458
Frédéric Lécaille04e63aa2022-01-17 18:16:27 +01002459 if (fast_retrans)
2460 qc_prep_hdshk_fast_retrans(qc);
Frédéric Lécaille3bb457c2021-12-30 16:14:20 +01002461
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002462 /* The server must switch from INITIAL to HANDSHAKE handshake state when it
2463 * has successfully parse a Handshake packet. The Initial encryption must also
2464 * be discarded.
2465 */
Frédéric Lécaille2fe8b3b2022-01-10 12:10:10 +01002466 if (pkt->type == QUIC_PACKET_TYPE_HANDSHAKE && qc_is_listener(ctx->qc)) {
Amaury Denoyelle17a74162021-12-21 14:45:39 +01002467 int state = HA_ATOMIC_LOAD(&qc->state);
Frédéric Lécaille8c27de72021-09-20 11:00:46 +02002468
2469 if (state >= QUIC_HS_ST_SERVER_INITIAL) {
Amaury Denoyelle17a74162021-12-21 14:45:39 +01002470 quic_tls_discard_keys(&qc->els[QUIC_TLS_ENC_LEVEL_INITIAL]);
Frédéric Lécaillefde2a982021-12-27 15:12:09 +01002471 TRACE_PROTO("discarding Initial pktns", QUIC_EV_CONN_PRSHPKT, qc);
Amaury Denoyelle17a74162021-12-21 14:45:39 +01002472 quic_pktns_discard(qc->els[QUIC_TLS_ENC_LEVEL_INITIAL].pktns, qc);
Amaury Denoyellee81fed92021-12-22 11:06:34 +01002473 qc_set_timer(ctx->qc);
Frédéric Lécaillea6255f52022-01-19 17:29:48 +01002474 qc_el_rx_pkts_del(&qc->els[QUIC_TLS_ENC_LEVEL_INITIAL]);
Frédéric Lécaillee87524d2022-01-19 17:48:40 +01002475 qc_release_pktns_frms(qc->els[QUIC_TLS_ENC_LEVEL_INITIAL].pktns);
Frédéric Lécaille8c27de72021-09-20 11:00:46 +02002476 if (state < QUIC_HS_ST_SERVER_HANDSHAKE)
Amaury Denoyelle17a74162021-12-21 14:45:39 +01002477 HA_ATOMIC_STORE(&qc->state, QUIC_HS_ST_SERVER_HANDSHAKE);
Frédéric Lécaille8c27de72021-09-20 11:00:46 +02002478 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002479 }
2480
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002481 TRACE_LEAVE(QUIC_EV_CONN_PRSHPKT, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002482 return 1;
2483
2484 err:
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002485 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_PRSHPKT, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002486 return 0;
2487}
2488
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002489/* Write <dglen> datagram length and <pkt> first packet address into <cbuf> ring
Ilya Shipitsinbd6b4be2021-10-15 16:18:21 +05002490 * buffer. This is the responsibility of the caller to check there is enough
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002491 * room in <cbuf>. Also increase the <cbuf> write index consequently.
2492 * This function must be called only after having built a correct datagram.
2493 * Always succeeds.
2494 */
2495static inline void qc_set_dg(struct cbuf *cbuf,
2496 uint16_t dglen, struct quic_tx_packet *pkt)
2497{
2498 write_u16(cb_wr(cbuf), dglen);
2499 write_ptr(cb_wr(cbuf) + sizeof dglen, pkt);
2500 cb_add(cbuf, dglen + sizeof dglen + sizeof pkt);
2501}
2502
Frédéric Lécaillee2660e62021-11-23 11:36:51 +01002503/* Prepare as much as possible packets into <qr> ring buffer for
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002504 * the QUIC connection with <ctx> as I/O handler context, possibly concatenating
2505 * several packets in the same datagram. A header made of two fields is added
2506 * to each datagram: the datagram length followed by the address of the first
2507 * packet in this datagram.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002508 * Returns 1 if succeeded, or 0 if something wrong happened.
2509 */
Frédéric Lécailleee2b8b32022-01-03 11:14:30 +01002510static int qc_prep_pkts(struct quic_conn *qc, struct qring *qr,
2511 enum quic_tls_enc_level tel,
2512 enum quic_tls_enc_level next_tel)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002513{
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002514 struct quic_enc_level *qel;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002515 struct cbuf *cbuf;
2516 unsigned char *end_buf, *end, *pos, *spos;
2517 struct quic_tx_packet *first_pkt, *cur_pkt, *prv_pkt;
2518 /* length of datagrams */
2519 uint16_t dglen;
2520 size_t total;
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01002521 int padding;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002522 /* Each datagram is prepended with its length followed by the
2523 * address of the first packet in the datagram.
2524 */
2525 size_t dg_headlen = sizeof dglen + sizeof first_pkt;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002526
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002527 TRACE_ENTER(QUIC_EV_CONN_PHPKTS, qc);
2528
Frédéric Lécaille99942d62022-01-07 14:32:31 +01002529 total = 0;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002530 start:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002531 dglen = 0;
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01002532 padding = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002533 qel = &qc->els[tel];
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002534 cbuf = qr->cbuf;
2535 spos = pos = cb_wr(cbuf);
2536 /* Leave at least <dglen> bytes at the end of this buffer
2537 * to ensure there is enough room to mark the end of prepared
2538 * contiguous data with a zero length.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002539 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002540 end_buf = pos + cb_contig_space(cbuf) - sizeof dglen;
2541 first_pkt = prv_pkt = NULL;
2542 while (end_buf - pos >= (int)qc->path->mtu + dg_headlen || prv_pkt) {
Frédéric Lécaille466e9da2021-12-29 12:04:13 +01002543 int err, probe, ack, cc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002544 enum quic_pkt_type pkt_type;
2545
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002546 TRACE_POINT(QUIC_EV_CONN_PHPKTS, qc, qel);
Frédéric Lécaille466e9da2021-12-29 12:04:13 +01002547 probe = ack = 0;
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +01002548 cc = HA_ATOMIC_LOAD(&qc->flags) & QUIC_FL_CONN_IMMEDIATE_CLOSE;
2549 if (!cc) {
Frédéric Lécaille94fca872022-01-19 18:54:18 +01002550 probe = qel->pktns->tx.pto_probe;
Frédéric Lécaille25eeebe2021-12-16 11:21:52 +01002551 ack = HA_ATOMIC_BTR(&qel->pktns->flags, QUIC_FL_PKTNS_ACK_REQUIRED_BIT);
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02002552 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002553 /* Do not build any more packet if the TX secrets are not available or
Frédéric Lécaille66cbb822021-11-17 11:56:21 +01002554 * if there is nothing to send, i.e. if no CONNECTION_CLOSE or ACK are required
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002555 * and if there is no more packets to send upon PTO expiration
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01002556 * and if there is no more CRYPTO data available or in flight
2557 * congestion control limit is reached for prepared data
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002558 */
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01002559 if (!(qel->tls_ctx.tx.flags & QUIC_FL_TLS_SECRETS_SET) ||
Frédéric Lécaille466e9da2021-12-29 12:04:13 +01002560 (!cc && !ack && !probe &&
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01002561 (LIST_ISEMPTY(&qel->pktns->tx.frms) ||
Frédéric Lécaille67f47d02021-08-19 15:19:09 +02002562 qc->path->prep_in_flight >= qc->path->cwnd))) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002563 TRACE_DEVEL("nothing more to do", QUIC_EV_CONN_PHPKTS, qc);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002564 /* Set the current datagram as prepared into <cbuf> if
2565 * the was already a correct packet which was previously written.
2566 */
2567 if (prv_pkt)
2568 qc_set_dg(cbuf, dglen, first_pkt);
Frédéric Lécaille39ba1c32022-01-21 16:52:56 +01002569 /* Let's select the next encryption level */
2570 if (tel != next_tel && next_tel != QUIC_TLS_ENC_LEVEL_NONE) {
2571 tel = next_tel;
2572 qel = &qc->els[tel];
2573 /* Build a new datagram */
2574 prv_pkt = NULL;
2575 continue;
2576 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002577 break;
2578 }
2579
2580 pkt_type = quic_tls_level_pkt_type(tel);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002581 if (!prv_pkt) {
2582 /* Leave room for the datagram header */
2583 pos += dg_headlen;
Frédéric Lécaille2fe8b3b2022-01-10 12:10:10 +01002584 if (!quic_peer_validated_addr(qc) && qc_is_listener(qc)) {
Frédéric Lécailleca98a7f2021-11-10 17:30:15 +01002585 end = pos + QUIC_MIN(qc->path->mtu, 3 * qc->rx.bytes - qc->tx.prep_bytes);
2586 }
2587 else {
2588 end = pos + qc->path->mtu;
2589 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002590 }
2591
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01002592 cur_pkt = qc_build_pkt(&pos, end, qel, qc, dglen, padding,
Frédéric Lécaille466e9da2021-12-29 12:04:13 +01002593 pkt_type, ack, probe, cc, &err);
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02002594 /* Restore the PTO dgrams counter if a packet could not be built */
Frédéric Lécaille67f47d02021-08-19 15:19:09 +02002595 if (err < 0) {
Frédéric Lécaille2766e782021-08-30 17:16:07 +02002596 if (ack)
Frédéric Lécaille25eeebe2021-12-16 11:21:52 +01002597 HA_ATOMIC_BTS(&qel->pktns->flags, QUIC_FL_PKTNS_ACK_REQUIRED_BIT);
Frédéric Lécaille67f47d02021-08-19 15:19:09 +02002598 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002599 switch (err) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002600 case -2:
2601 goto err;
2602 case -1:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002603 /* If there was already a correct packet present, set the
2604 * current datagram as prepared into <cbuf>.
2605 */
2606 if (prv_pkt) {
2607 qc_set_dg(cbuf, dglen, first_pkt);
2608 goto stop_build;
2609 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002610 goto out;
2611 default:
Frédéric Lécaille63556772021-12-29 17:18:21 +01002612 break;
2613 }
Frédéric Lécaille67f47d02021-08-19 15:19:09 +02002614
Frédéric Lécaille63556772021-12-29 17:18:21 +01002615 /* This is to please to GCC. We cannot have (err >= 0 && !cur_pkt) */
2616 if (!cur_pkt)
2617 goto err;
2618
2619 total += cur_pkt->len;
2620 /* keep trace of the first packet in the datagram */
2621 if (!first_pkt)
2622 first_pkt = cur_pkt;
2623 /* Attach the current one to the previous one */
2624 if (prv_pkt)
2625 prv_pkt->next = cur_pkt;
2626 /* Let's say we have to build a new dgram */
2627 prv_pkt = NULL;
2628 dglen += cur_pkt->len;
2629 /* Client: discard the Initial encryption keys as soon as
2630 * a handshake packet could be built.
2631 */
2632 if (HA_ATOMIC_LOAD(&qc->state) == QUIC_HS_ST_CLIENT_INITIAL &&
2633 pkt_type == QUIC_PACKET_TYPE_HANDSHAKE) {
2634 quic_tls_discard_keys(&qc->els[QUIC_TLS_ENC_LEVEL_INITIAL]);
2635 TRACE_PROTO("discarding Initial pktns", QUIC_EV_CONN_PHPKTS, qc);
2636 quic_pktns_discard(qc->els[QUIC_TLS_ENC_LEVEL_INITIAL].pktns, qc);
2637 qc_set_timer(qc);
Frédéric Lécaillea6255f52022-01-19 17:29:48 +01002638 qc_el_rx_pkts_del(&qc->els[QUIC_TLS_ENC_LEVEL_INITIAL]);
Frédéric Lécaillee87524d2022-01-19 17:48:40 +01002639 qc_release_pktns_frms(qc->els[QUIC_TLS_ENC_LEVEL_INITIAL].pktns);
Frédéric Lécaille63556772021-12-29 17:18:21 +01002640 HA_ATOMIC_STORE(&qc->state, QUIC_HS_ST_CLIENT_HANDSHAKE);
2641 }
2642 /* If the data for the current encryption level have all been sent,
2643 * select the next level.
2644 */
2645 if ((tel == QUIC_TLS_ENC_LEVEL_INITIAL || tel == QUIC_TLS_ENC_LEVEL_HANDSHAKE) &&
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01002646 (LIST_ISEMPTY(&qel->pktns->tx.frms))) {
Frédéric Lécaille63556772021-12-29 17:18:21 +01002647 /* If QUIC_TLS_ENC_LEVEL_HANDSHAKE was already reached let's try QUIC_TLS_ENC_LEVEL_APP */
2648 if (tel == QUIC_TLS_ENC_LEVEL_HANDSHAKE && next_tel == tel)
2649 next_tel = QUIC_TLS_ENC_LEVEL_APP;
2650 tel = next_tel;
2651 qel = &qc->els[tel];
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01002652 if (!LIST_ISEMPTY(&qel->pktns->tx.frms)) {
Frédéric Lécaille63556772021-12-29 17:18:21 +01002653 /* If there is data for the next level, do not
2654 * consume a datagram.
2655 */
2656 prv_pkt = cur_pkt;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002657 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002658 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002659 /* If we have to build a new datagram, set the current datagram as
2660 * prepared into <cbuf>.
2661 */
2662 if (!prv_pkt) {
2663 qc_set_dg(cbuf, dglen, first_pkt);
2664 first_pkt = NULL;
2665 dglen = 0;
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01002666 padding = 0;
2667 }
2668 else if (prv_pkt->type == QUIC_TLS_ENC_LEVEL_INITIAL &&
Frédéric Lécaille1aa57d32022-01-12 09:46:02 +01002669 (!qc_is_listener(qc) ||
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01002670 prv_pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING)) {
2671 padding = 1;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002672 }
2673 }
2674
2675 stop_build:
2676 /* Reset <wr> writer index if in front of <rd> index */
2677 if (end_buf - pos < (int)qc->path->mtu + dg_headlen) {
2678 int rd = HA_ATOMIC_LOAD(&cbuf->rd);
2679
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002680 TRACE_DEVEL("buffer full", QUIC_EV_CONN_PHPKTS, qc);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002681 if (cb_contig_space(cbuf) >= sizeof(uint16_t)) {
2682 if ((pos != spos && cbuf->wr > rd) || (pos == spos && rd <= cbuf->wr)) {
2683 /* Mark the end of contiguous data for the reader */
2684 write_u16(cb_wr(cbuf), 0);
2685 cb_add(cbuf, sizeof(uint16_t));
2686 }
2687 }
2688
2689 if (rd && rd <= cbuf->wr) {
2690 cb_wr_reset(cbuf);
Frédéric Lécaille99942d62022-01-07 14:32:31 +01002691 /* Let's try to reuse this buffer */
2692 goto start;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002693 }
2694 }
2695
2696 out:
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002697 TRACE_LEAVE(QUIC_EV_CONN_PHPKTS, qc);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002698 return total;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002699
2700 err:
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002701 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_PHPKTS, qc);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002702 return -1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002703}
2704
2705/* Send the QUIC packets which have been prepared for QUIC connections
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002706 * from <qr> ring buffer with <ctx> as I/O handler context.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002707 */
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002708int qc_send_ppkts(struct qring *qr, struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002709{
2710 struct quic_conn *qc;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002711 struct cbuf *cbuf;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002712
Amaury Denoyellec15dd922021-12-21 11:41:52 +01002713 qc = ctx->qc;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002714 cbuf = qr->cbuf;
2715 while (cb_contig_data(cbuf)) {
2716 unsigned char *pos;
2717 struct buffer tmpbuf = { };
2718 struct quic_tx_packet *first_pkt, *pkt, *next_pkt;
2719 uint16_t dglen;
2720 size_t headlen = sizeof dglen + sizeof first_pkt;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002721 unsigned int time_sent;
2722
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002723 pos = cb_rd(cbuf);
2724 dglen = read_u16(pos);
2725 /* End of prepared datagrams.
2726 * Reset the reader index only if in front of the writer index.
2727 */
2728 if (!dglen) {
2729 int wr = HA_ATOMIC_LOAD(&cbuf->wr);
2730
2731 if (wr && wr < cbuf->rd) {
2732 cb_rd_reset(cbuf);
2733 continue;
2734 }
2735 break;
2736 }
2737
2738 pos += sizeof dglen;
2739 first_pkt = read_ptr(pos);
2740 pos += sizeof first_pkt;
2741 tmpbuf.area = (char *)pos;
2742 tmpbuf.size = tmpbuf.data = dglen;
2743
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002744 TRACE_PROTO("to send", QUIC_EV_CONN_SPPKTS, qc);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002745 for (pkt = first_pkt; pkt; pkt = pkt->next)
2746 quic_tx_packet_refinc(pkt);
Amaury Denoyelle9fa15e52022-01-19 15:54:23 +01002747 if (ctx->xprt->snd_buf(NULL, qc->xprt_ctx,
Amaury Denoyelle74f22922022-01-18 16:48:17 +01002748 &tmpbuf, tmpbuf.data, 0) <= 0) {
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002749 for (pkt = first_pkt; pkt; pkt = pkt->next)
2750 quic_tx_packet_refdec(pkt);
Amaury Denoyelle74f22922022-01-18 16:48:17 +01002751 break;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002752 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002753
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002754 cb_del(cbuf, dglen + headlen);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002755 qc->tx.bytes += tmpbuf.data;
2756 time_sent = now_ms;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002757
2758 for (pkt = first_pkt; pkt; pkt = next_pkt) {
2759 pkt->time_sent = time_sent;
2760 if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING) {
2761 pkt->pktns->tx.time_of_last_eliciting = time_sent;
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +01002762 qc->path->ifae_pkts++;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002763 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002764 qc->path->in_flight += pkt->in_flight_len;
2765 pkt->pktns->tx.in_flight += pkt->in_flight_len;
2766 if (pkt->in_flight_len)
Amaury Denoyellee81fed92021-12-22 11:06:34 +01002767 qc_set_timer(qc);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01002768 TRACE_PROTO("sent pkt", QUIC_EV_CONN_SPPKTS, qc, pkt);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002769 next_pkt = pkt->next;
Frédéric Lécaille0eb60c52021-07-19 14:48:36 +02002770 eb64_insert(&pkt->pktns->tx.pkts, &pkt->pn_node);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02002771 quic_tx_packet_refdec(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002772 }
2773 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002774
2775 return 1;
2776}
2777
2778/* Build all the frames which must be sent just after the handshake have succeeded.
2779 * This is essentially NEW_CONNECTION_ID frames. A QUIC server must also send
2780 * a HANDSHAKE_DONE frame.
2781 * Return 1 if succeeded, 0 if not.
2782 */
Frédéric Lécaille522c65c2021-08-03 14:29:03 +02002783static int quic_build_post_handshake_frames(struct quic_conn *qc)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002784{
2785 int i;
Frédéric Lécaille522c65c2021-08-03 14:29:03 +02002786 struct quic_enc_level *qel;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002787 struct quic_frame *frm;
2788
Frédéric Lécaille522c65c2021-08-03 14:29:03 +02002789 qel = &qc->els[QUIC_TLS_ENC_LEVEL_APP];
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002790 /* Only servers must send a HANDSHAKE_DONE frame. */
Frédéric Lécaille1aa57d32022-01-12 09:46:02 +01002791 if (qc_is_listener(qc)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002792 frm = pool_alloc(pool_head_quic_frame);
Frédéric Lécaille153d4a82021-01-06 12:12:39 +01002793 if (!frm)
2794 return 0;
2795
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002796 frm->type = QUIC_FT_HANDSHAKE_DONE;
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01002797 LIST_APPEND(&qel->pktns->tx.frms, &frm->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002798 }
2799
Frédéric Lécaille522c65c2021-08-03 14:29:03 +02002800 for (i = 1; i < qc->tx.params.active_connection_id_limit; i++) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002801 struct quic_connection_id *cid;
Amaury Denoyelle29632b82022-01-18 16:50:58 +01002802 struct listener *l = qc->li;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002803
2804 frm = pool_alloc(pool_head_quic_frame);
Amaury Denoyelled6a352a2021-11-24 15:32:46 +01002805 if (!frm)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002806 goto err;
2807
Amaury Denoyelled6a352a2021-11-24 15:32:46 +01002808 cid = new_quic_cid(&qc->cids, qc, i);
2809 if (!cid)
2810 goto err;
2811
2812 /* insert the allocated CID in the receiver tree */
2813 HA_RWLOCK_WRLOCK(QUIC_LOCK, &l->rx.cids_lock);
2814 ebmb_insert(&l->rx.cids, &cid->node, cid->cid.len);
2815 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &l->rx.cids_lock);
2816
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002817 quic_connection_id_to_frm_cpy(frm, cid);
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01002818 LIST_APPEND(&qel->pktns->tx.frms, &frm->list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002819 }
Frédéric Lécaillede6f7c52022-01-03 17:25:53 +01002820 HA_ATOMIC_OR(&qc->flags, QUIC_FL_POST_HANDSHAKE_FRAMES_BUILT);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002821
2822 return 1;
2823
2824 err:
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002825 return 0;
2826}
2827
2828/* Deallocate <l> list of ACK ranges. */
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002829void free_quic_arngs(struct quic_arngs *arngs)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002830{
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002831 struct eb64_node *n;
2832 struct quic_arng_node *ar;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002833
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002834 n = eb64_first(&arngs->root);
2835 while (n) {
2836 struct eb64_node *next;
2837
2838 ar = eb64_entry(&n->node, struct quic_arng_node, first);
2839 next = eb64_next(n);
2840 eb64_delete(n);
2841 free(ar);
2842 n = next;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002843 }
2844}
2845
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002846/* Return the gap value between <p> and <q> ACK ranges where <q> follows <p> in
2847 * descending order.
2848 */
2849static inline size_t sack_gap(struct quic_arng_node *p,
2850 struct quic_arng_node *q)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002851{
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002852 return p->first.key - q->last - 2;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002853}
2854
2855
2856/* Remove the last elements of <ack_ranges> list of ack range updating its
2857 * encoded size until it goes below <limit>.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05002858 * Returns 1 if succeeded, 0 if not (no more element to remove).
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002859 */
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002860static int quic_rm_last_ack_ranges(struct quic_arngs *arngs, size_t limit)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002861{
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002862 struct eb64_node *last, *prev;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002863
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002864 last = eb64_last(&arngs->root);
2865 while (last && arngs->enc_sz > limit) {
2866 struct quic_arng_node *last_node, *prev_node;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002867
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002868 prev = eb64_prev(last);
2869 if (!prev)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002870 return 0;
2871
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002872 last_node = eb64_entry(&last->node, struct quic_arng_node, first);
2873 prev_node = eb64_entry(&prev->node, struct quic_arng_node, first);
2874 arngs->enc_sz -= quic_int_getsize(last_node->last - last_node->first.key);
2875 arngs->enc_sz -= quic_int_getsize(sack_gap(prev_node, last_node));
2876 arngs->enc_sz -= quic_decint_size_diff(arngs->sz);
2877 --arngs->sz;
2878 eb64_delete(last);
2879 pool_free(pool_head_quic_arng, last);
2880 last = prev;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002881 }
2882
2883 return 1;
2884}
2885
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002886/* Set the encoded size of <arngs> QUIC ack ranges. */
2887static void quic_arngs_set_enc_sz(struct quic_arngs *arngs)
2888{
2889 struct eb64_node *node, *next;
2890 struct quic_arng_node *ar, *ar_next;
2891
2892 node = eb64_last(&arngs->root);
2893 if (!node)
2894 return;
2895
2896 ar = eb64_entry(&node->node, struct quic_arng_node, first);
2897 arngs->enc_sz = quic_int_getsize(ar->last) +
2898 quic_int_getsize(ar->last - ar->first.key) + quic_int_getsize(arngs->sz - 1);
2899
2900 while ((next = eb64_prev(node))) {
2901 ar_next = eb64_entry(&next->node, struct quic_arng_node, first);
2902 arngs->enc_sz += quic_int_getsize(sack_gap(ar, ar_next)) +
2903 quic_int_getsize(ar_next->last - ar_next->first.key);
2904 node = next;
2905 ar = eb64_entry(&node->node, struct quic_arng_node, first);
2906 }
2907}
2908
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002909/* Insert <ar> ack range into <argns> tree of ack ranges.
2910 * Returns the ack range node which has been inserted if succeeded, NULL if not.
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002911 */
2912static inline
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002913struct quic_arng_node *quic_insert_new_range(struct quic_arngs *arngs,
2914 struct quic_arng *ar)
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002915{
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002916 struct quic_arng_node *new_ar;
2917
2918 new_ar = pool_alloc(pool_head_quic_arng);
2919 if (new_ar) {
2920 new_ar->first.key = ar->first;
2921 new_ar->last = ar->last;
2922 eb64_insert(&arngs->root, &new_ar->first);
2923 arngs->sz++;
2924 }
2925
2926 return new_ar;
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002927}
2928
2929/* Update <arngs> tree of ACK ranges with <ar> as new ACK range value.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002930 * Note that this function computes the number of bytes required to encode
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002931 * this tree of ACK ranges in descending order.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002932 *
2933 * Descending order
2934 * ------------->
2935 * range1 range2
2936 * ..........|--------|..............|--------|
2937 * ^ ^ ^ ^
2938 * | | | |
2939 * last1 first1 last2 first2
2940 * ..........+--------+--------------+--------+......
2941 * diff1 gap12 diff2
2942 *
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002943 * To encode the previous list of ranges we must encode integers as follows in
2944 * descending order:
2945 * enc(last2),enc(diff2),enc(gap12),enc(diff1)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002946 * with diff1 = last1 - first1
2947 * diff2 = last2 - first2
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002948 * gap12 = first1 - last2 - 2 (>= 0)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002949 *
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002950 */
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002951int quic_update_ack_ranges_list(struct quic_arngs *arngs,
2952 struct quic_arng *ar)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002953{
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002954 struct eb64_node *le;
2955 struct quic_arng_node *new_node;
2956 struct eb64_node *new;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002957
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002958 new = NULL;
2959 if (eb_is_empty(&arngs->root)) {
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002960 new_node = quic_insert_new_range(arngs, ar);
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002961 if (!new_node)
2962 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002963
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002964 goto out;
2965 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002966
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002967 le = eb64_lookup_le(&arngs->root, ar->first);
2968 if (!le) {
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002969 new_node = quic_insert_new_range(arngs, ar);
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002970 if (!new_node)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002971 return 0;
Frédéric Lécaille0e257832021-11-16 10:54:19 +01002972
2973 new = &new_node->first;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002974 }
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002975 else {
2976 struct quic_arng_node *le_ar =
2977 eb64_entry(&le->node, struct quic_arng_node, first);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002978
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002979 /* Already existing range */
Frédéric Lécailled3f4dd82021-06-02 15:36:12 +02002980 if (le_ar->last >= ar->last)
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002981 return 1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002982
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002983 if (le_ar->last + 1 >= ar->first) {
2984 le_ar->last = ar->last;
2985 new = le;
2986 new_node = le_ar;
2987 }
2988 else {
Frédéric Lécaille9ef64cd2021-06-02 15:27:34 +02002989 new_node = quic_insert_new_range(arngs, ar);
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002990 if (!new_node)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002991 return 0;
Frédéric Lécaille8ba42762021-06-02 17:40:09 +02002992
2993 new = &new_node->first;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002994 }
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002995 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01002996
Frédéric Lécaille8090b512020-11-30 16:19:22 +01002997 /* Verify that the new inserted node does not overlap the nodes
2998 * which follow it.
2999 */
3000 if (new) {
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003001 struct eb64_node *next;
3002 struct quic_arng_node *next_node;
3003
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003004 while ((next = eb64_next(new))) {
3005 next_node =
3006 eb64_entry(&next->node, struct quic_arng_node, first);
Frédéric Lécaillec825eba2021-06-02 17:38:13 +02003007 if (new_node->last + 1 < next_node->first.key)
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003008 break;
3009
3010 if (next_node->last > new_node->last)
3011 new_node->last = next_node->last;
3012 eb64_delete(next);
Frédéric Lécaillebaea2842021-06-02 15:04:03 +02003013 pool_free(pool_head_quic_arng, next_node);
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003014 /* Decrement the size of these ranges. */
3015 arngs->sz--;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003016 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003017 }
3018
Frédéric Lécaille82b86522021-08-10 09:54:03 +02003019 out:
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003020 quic_arngs_set_enc_sz(arngs);
3021
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003022 return 1;
3023}
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003024/* Remove the header protection of packets at <el> encryption level.
3025 * Always succeeds.
3026 */
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003027static inline void qc_rm_hp_pkts(struct quic_conn *qc, struct quic_enc_level *el)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003028{
3029 struct quic_tls_ctx *tls_ctx;
Frédéric Lécaillea11d0e22021-06-07 14:38:18 +02003030 struct quic_rx_packet *pqpkt;
3031 struct mt_list *pkttmp1, pkttmp2;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003032 struct quic_enc_level *app_qel;
3033
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003034 TRACE_ENTER(QUIC_EV_CONN_ELRMHP, qc);
3035 app_qel = &qc->els[QUIC_TLS_ENC_LEVEL_APP];
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003036 /* A server must not process incoming 1-RTT packets before the handshake is complete. */
Frédéric Lécaille2fe8b3b2022-01-10 12:10:10 +01003037 if (el == app_qel && qc_is_listener(qc) &&
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003038 HA_ATOMIC_LOAD(&qc->state) < QUIC_HS_ST_COMPLETE) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003039 TRACE_PROTO("hp not removed (handshake not completed)",
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003040 QUIC_EV_CONN_ELRMHP, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003041 goto out;
3042 }
3043 tls_ctx = &el->tls_ctx;
Frédéric Lécaillea11d0e22021-06-07 14:38:18 +02003044 mt_list_for_each_entry_safe(pqpkt, &el->rx.pqpkts, list, pkttmp1, pkttmp2) {
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003045 if (!qc_do_rm_hp(qc, pqpkt, tls_ctx, el->pktns->rx.largest_pn,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003046 pqpkt->data + pqpkt->pn_offset,
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003047 pqpkt->data, pqpkt->data + pqpkt->len)) {
3048 TRACE_PROTO("hp removing error", QUIC_EV_CONN_ELRMHP, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003049 /* XXX TO DO XXX */
3050 }
3051 else {
3052 /* The AAD includes the packet number field */
3053 pqpkt->aad_len = pqpkt->pn_offset + pqpkt->pnl;
3054 /* Store the packet into the tree of packets to decrypt. */
3055 pqpkt->pn_node.key = pqpkt->pn;
Frédéric Lécaille98cdeb22021-07-26 16:38:14 +02003056 HA_RWLOCK_WRLOCK(QUIC_LOCK, &el->rx.pkts_rwlock);
Frédéric Lécailleebc3fc12021-09-22 08:34:21 +02003057 eb64_insert(&el->rx.pkts, &pqpkt->pn_node);
3058 quic_rx_packet_refinc(pqpkt);
Frédéric Lécaille98cdeb22021-07-26 16:38:14 +02003059 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &el->rx.pkts_rwlock);
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003060 TRACE_PROTO("hp removed", QUIC_EV_CONN_ELRMHP, qc, pqpkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003061 }
Frédéric Lécaillea11d0e22021-06-07 14:38:18 +02003062 MT_LIST_DELETE_SAFE(pkttmp1);
3063 quic_rx_packet_refdec(pqpkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003064 }
3065
3066 out:
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003067 TRACE_LEAVE(QUIC_EV_CONN_ELRMHP, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003068}
3069
3070/* Process all the CRYPTO frame at <el> encryption level.
3071 * Return 1 if succeeded, 0 if not.
3072 */
3073static inline int qc_treat_rx_crypto_frms(struct quic_enc_level *el,
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02003074 struct ssl_sock_ctx *ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003075{
3076 struct eb64_node *node;
3077
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003078 node = eb64_first(&el->rx.crypto.frms);
3079 while (node) {
3080 struct quic_rx_crypto_frm *cf;
3081
3082 cf = eb64_entry(&node->node, struct quic_rx_crypto_frm, offset_node);
3083 if (cf->offset_node.key != el->rx.crypto.offset)
3084 break;
3085
3086 if (!qc_provide_cdata(el, ctx, cf->data, cf->len, cf->pkt, cf))
3087 goto err;
3088
3089 node = eb64_next(node);
3090 quic_rx_packet_refdec(cf->pkt);
3091 eb64_delete(&cf->offset_node);
3092 pool_free(pool_head_quic_rx_crypto_frm, cf);
3093 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003094 return 1;
3095
3096 err:
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003097 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_RXCDATA, ctx->qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003098 return 0;
3099}
3100
Frédéric Lécaille3230bcf2021-09-22 15:15:46 +02003101/* Process all the packets at <el> and <next_el> encryption level.
Ilya Shipitsinbd6b4be2021-10-15 16:18:21 +05003102 * This is the caller responsibility to check that <cur_el> is different of <next_el>
Frédéric Lécaille3230bcf2021-09-22 15:15:46 +02003103 * as pointer value.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003104 * Return 1 if succeeded, 0 if not.
3105 */
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003106int qc_treat_rx_pkts(struct quic_enc_level *cur_el, struct quic_enc_level *next_el,
3107 struct ssl_sock_ctx *ctx, int force_ack)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003108{
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003109 struct eb64_node *node;
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02003110 int64_t largest_pn = -1;
Amaury Denoyelle29632b82022-01-18 16:50:58 +01003111 struct quic_conn *qc = ctx->qc;
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003112 struct quic_enc_level *qel = cur_el;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003113
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003114 TRACE_ENTER(QUIC_EV_CONN_ELRXPKTS, ctx->qc);
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003115 qel = cur_el;
3116 next_tel:
3117 if (!qel)
3118 goto out;
3119
3120 HA_RWLOCK_WRLOCK(QUIC_LOCK, &qel->rx.pkts_rwlock);
3121 node = eb64_first(&qel->rx.pkts);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003122 while (node) {
3123 struct quic_rx_packet *pkt;
3124
3125 pkt = eb64_entry(&node->node, struct quic_rx_packet, pn_node);
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003126 TRACE_PROTO("new packet", QUIC_EV_CONN_ELRXPKTS,
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003127 ctx->qc, pkt, NULL, ctx->ssl);
Frédéric Lécaillea7d2c092021-11-30 11:18:18 +01003128 if (!qc_pkt_decrypt(pkt, qel)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003129 /* Drop the packet */
3130 TRACE_PROTO("packet decryption failed -> dropped",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003131 QUIC_EV_CONN_ELRXPKTS, ctx->qc, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003132 }
3133 else {
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003134 if (!qc_parse_pkt_frms(pkt, ctx, qel)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003135 /* Drop the packet */
3136 TRACE_PROTO("packet parsing failed -> dropped",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003137 QUIC_EV_CONN_ELRXPKTS, ctx->qc, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003138 }
3139 else {
Frédéric Lécaille8090b512020-11-30 16:19:22 +01003140 struct quic_arng ar = { .first = pkt->pn, .last = pkt->pn };
3141
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02003142 if (pkt->flags & QUIC_FL_RX_PACKET_ACK_ELICITING &&
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003143 (!(HA_ATOMIC_ADD_FETCH(&qc->rx.nb_ack_eliciting, 1) & 1) || force_ack))
Frédéric Lécaille25eeebe2021-12-16 11:21:52 +01003144 HA_ATOMIC_BTS(&qel->pktns->flags, QUIC_FL_PKTNS_ACK_REQUIRED_BIT);
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02003145 if (pkt->pn > largest_pn)
3146 largest_pn = pkt->pn;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003147 /* Update the list of ranges to acknowledge. */
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003148 if (!quic_update_ack_ranges_list(&qel->pktns->rx.arngs, &ar))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003149 TRACE_DEVEL("Could not update ack range list",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003150 QUIC_EV_CONN_ELRXPKTS, ctx->qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003151 }
3152 }
3153 node = eb64_next(node);
Frédéric Lécailleebc3fc12021-09-22 08:34:21 +02003154 eb64_delete(&pkt->pn_node);
3155 quic_rx_packet_refdec(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003156 }
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003157 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qel->rx.pkts_rwlock);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003158
Frédéric Lécaille120ea6f2021-07-26 16:42:56 +02003159 /* Update the largest packet number. */
3160 if (largest_pn != -1)
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003161 HA_ATOMIC_UPDATE_MAX(&qel->pktns->rx.largest_pn, largest_pn);
3162 if (!qc_treat_rx_crypto_frms(qel, ctx))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003163 goto err;
3164
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003165 if (qel == cur_el) {
Frédéric Lécaille3230bcf2021-09-22 15:15:46 +02003166 BUG_ON(qel == next_el);
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003167 qel = next_el;
3168 goto next_tel;
3169 }
3170
3171 out:
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003172 TRACE_LEAVE(QUIC_EV_CONN_ELRXPKTS, ctx->qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003173 return 1;
3174
3175 err:
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003176 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_ELRXPKTS, ctx->qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003177 return 0;
3178}
3179
Amaury Denoyelle8ae28072022-01-24 18:34:52 +01003180/* Check if it's possible to remove header protection for packets related to
3181 * encryption level <qel>. If <qel> is NULL, assume it's false.
3182 *
3183 * Return true if the operation is possible else false.
3184 */
3185static int qc_qel_may_rm_hp(struct quic_conn *qc, struct quic_enc_level *qel)
3186{
3187 enum quic_tls_enc_level tel;
3188
3189 if (!qel)
3190 return 0;
3191
3192 tel = ssl_to_quic_enc_level(qel->level);
3193
3194 /* check if tls secrets are available */
3195 if (qel->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_DCD)
3196 TRACE_DEVEL("Discarded keys", QUIC_EV_CONN_TRMHP, qc);
3197
3198 if (!(qel->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_SET))
3199 return 0;
3200
Amaury Denoyelle0b1f9312022-01-26 09:51:28 +01003201 /* check if the connection layer is ready before using app level */
3202 if (tel == QUIC_TLS_ENC_LEVEL_APP && qc->mux_state != QC_MUX_READY)
Amaury Denoyelle8ae28072022-01-24 18:34:52 +01003203 return 0;
Amaury Denoyelle8ae28072022-01-24 18:34:52 +01003204
3205 return 1;
3206}
3207
Frédéric Lécaille91ae7aa2021-08-03 16:45:39 +02003208/* QUIC connection packet handler task. */
3209struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003210{
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003211 int ret, ssl_err;
Frédéric Lécaille91ae7aa2021-08-03 16:45:39 +02003212 struct ssl_sock_ctx *ctx;
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02003213 struct quic_conn *qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003214 enum quic_tls_enc_level tel, next_tel;
3215 struct quic_enc_level *qel, *next_qel;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003216 struct qring *qr; // Tx ring
Frédéric Lécaillede6f7c52022-01-03 17:25:53 +01003217 int st, force_ack, zero_rtt;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003218
Frédéric Lécaille91ae7aa2021-08-03 16:45:39 +02003219 ctx = context;
Amaury Denoyellec15dd922021-12-21 11:41:52 +01003220 qc = ctx->qc;
Frédéric Lécailleba85acd2022-01-11 14:43:50 +01003221 TRACE_ENTER(QUIC_EV_CONN_HDSHK, qc);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003222 qr = NULL;
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +02003223 st = HA_ATOMIC_LOAD(&qc->state);
Frédéric Lécailleba85acd2022-01-11 14:43:50 +01003224 TRACE_PROTO("state", QUIC_EV_CONN_HDSHK, qc, &st);
Frédéric Lécaille6b663152022-01-04 17:03:11 +01003225 if (HA_ATOMIC_LOAD(&qc->flags) & QUIC_FL_CONN_IO_CB_WAKEUP) {
3226 HA_ATOMIC_BTR(&qc->flags, QUIC_FL_CONN_IO_CB_WAKEUP_BIT);
3227 /* The I/O handler has been woken up by the dgram listener
3228 * after the anti-amplification was reached.
3229 */
3230 qc_set_timer(qc);
3231 if (tick_isset(qc->timer) && tick_is_lt(qc->timer, now_ms))
3232 task_wakeup(qc->timer_task, TASK_WOKEN_MSG);
3233 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003234 ssl_err = SSL_ERROR_NONE;
Frédéric Lécaille4137b2d2021-12-17 18:24:16 +01003235 zero_rtt = st < QUIC_HS_ST_COMPLETE &&
3236 (!MT_LIST_ISEMPTY(&qc->els[QUIC_TLS_ENC_LEVEL_EARLY_DATA].rx.pqpkts) ||
3237 qc_el_rx_pkts(&qc->els[QUIC_TLS_ENC_LEVEL_EARLY_DATA]));
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003238 start:
Frédéric Lécaille917a7db2022-01-03 17:00:35 +01003239 if (st >= QUIC_HS_ST_COMPLETE &&
3240 qc_el_rx_pkts(&qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE])) {
3241 TRACE_PROTO("remaining Handshake packets", QUIC_EV_CONN_PHPKTS, qc);
3242 /* There may be remaining Handshake packets to treat and acknowledge. */
3243 tel = QUIC_TLS_ENC_LEVEL_HANDSHAKE;
3244 next_tel = QUIC_TLS_ENC_LEVEL_APP;
3245 }
3246 else if (!quic_get_tls_enc_levels(&tel, &next_tel, st, zero_rtt))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003247 goto err;
3248
Frédéric Lécaillea5fe49f2021-06-04 11:52:35 +02003249 qel = &qc->els[tel];
Frédéric Lécaillef7980962021-08-19 17:35:21 +02003250 next_qel = next_tel == QUIC_TLS_ENC_LEVEL_NONE ? NULL : &qc->els[next_tel];
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003251
3252 next_level:
Amaury Denoyelle8ae28072022-01-24 18:34:52 +01003253 /* Treat packets waiting for header packet protection decryption */
3254 if (!MT_LIST_ISEMPTY(&qel->rx.pqpkts) && qc_qel_may_rm_hp(qc, qel))
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003255 qc_rm_hp_pkts(qc, qel);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003256
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003257 force_ack = qel == &qc->els[QUIC_TLS_ENC_LEVEL_INITIAL] ||
3258 qel == &qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE];
3259 if (!qc_treat_rx_pkts(qel, next_qel, ctx, force_ack))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003260 goto err;
3261
Frédéric Lécaillea5da31d2021-12-14 19:44:14 +01003262 if (zero_rtt && next_qel && !MT_LIST_ISEMPTY(&next_qel->rx.pqpkts) &&
3263 (next_qel->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_SET)) {
3264 qel = next_qel;
3265 next_qel = NULL;
3266 goto next_level;
3267 }
3268
Frédéric Lécaille91ae7aa2021-08-03 16:45:39 +02003269 st = HA_ATOMIC_LOAD(&qc->state);
Frédéric Lécaillede6f7c52022-01-03 17:25:53 +01003270 if (st >= QUIC_HS_ST_COMPLETE) {
3271 if (!(HA_ATOMIC_LOAD(&qc->flags) & QUIC_FL_POST_HANDSHAKE_FRAMES_BUILT) &&
3272 !quic_build_post_handshake_frames(qc))
Frédéric Lécaille91ae7aa2021-08-03 16:45:39 +02003273 goto err;
Frédéric Lécaillefee7ba62021-12-06 12:09:08 +01003274
Frédéric Lécaillede6f7c52022-01-03 17:25:53 +01003275 if (!(qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE].tls_ctx.rx.flags &
3276 QUIC_FL_TLS_SECRETS_DCD)) {
3277 /* Discard the Handshake keys. */
3278 quic_tls_discard_keys(&qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE]);
3279 TRACE_PROTO("discarding Handshake pktns", QUIC_EV_CONN_PHPKTS, qc);
3280 quic_pktns_discard(qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE].pktns, qc);
3281 qc_set_timer(qc);
Frédéric Lécaillede6f7c52022-01-03 17:25:53 +01003282 qc_el_rx_pkts_del(&qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE]);
Frédéric Lécaillee87524d2022-01-19 17:48:40 +01003283 qc_release_pktns_frms(qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE].pktns);
Frédéric Lécaillede6f7c52022-01-03 17:25:53 +01003284 }
3285
3286 if (qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE].pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED) {
3287 /* There may be remaining handshake to build (acks) */
3288 st = QUIC_HS_ST_SERVER_HANDSHAKE;
3289 }
Frédéric Lécaille91ae7aa2021-08-03 16:45:39 +02003290 }
3291
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003292 if (!qr)
3293 qr = MT_LIST_POP(qc->tx.qring_list, typeof(qr), mt_list);
Frédéric Lécaillebec186d2022-01-12 15:32:55 +01003294 /* A listener does not send any O-RTT packet. O-RTT packet number space must not
3295 * be considered.
3296 */
3297 if (!quic_get_tls_enc_levels(&tel, &next_tel, st, 0))
Frédéric Lécailleee2b8b32022-01-03 11:14:30 +01003298 goto err;
3299 ret = qc_prep_pkts(qc, qr, tel, next_tel);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003300 if (ret == -1)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003301 goto err;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003302 else if (ret == 0)
3303 goto skip_send;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003304
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003305 if (!qc_send_ppkts(qr, ctx))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003306 goto err;
3307
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003308 skip_send:
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003309 /* Check if there is something to do for the next level.
3310 */
Frédéric Lécaille3230bcf2021-09-22 15:15:46 +02003311 if (next_qel && next_qel != qel &&
3312 (next_qel->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_SET) &&
Frédéric Lécaille7d807c92021-12-06 08:56:38 +01003313 (!MT_LIST_ISEMPTY(&next_qel->rx.pqpkts) || qc_el_rx_pkts(next_qel))) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003314 qel = next_qel;
Frédéric Lécaille3230bcf2021-09-22 15:15:46 +02003315 next_qel = NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003316 goto next_level;
3317 }
3318
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003319 MT_LIST_APPEND(qc->tx.qring_list, &qr->mt_list);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003320 TRACE_LEAVE(QUIC_EV_CONN_HDSHK, qc, &st);
Frédéric Lécaille91ae7aa2021-08-03 16:45:39 +02003321 return t;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003322
3323 err:
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02003324 if (qr)
3325 MT_LIST_APPEND(qc->tx.qring_list, &qr->mt_list);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003326 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_HDSHK, qc, &st, &ssl_err);
Frédéric Lécaille91ae7aa2021-08-03 16:45:39 +02003327 return t;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003328}
3329
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05003330/* Uninitialize <qel> QUIC encryption level. Never fails. */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003331static void quic_conn_enc_level_uninit(struct quic_enc_level *qel)
3332{
3333 int i;
3334
3335 for (i = 0; i < qel->tx.crypto.nb_buf; i++) {
3336 if (qel->tx.crypto.bufs[i]) {
3337 pool_free(pool_head_quic_crypto_buf, qel->tx.crypto.bufs[i]);
3338 qel->tx.crypto.bufs[i] = NULL;
3339 }
3340 }
Willy Tarreau61cfdf42021-02-20 10:46:51 +01003341 ha_free(&qel->tx.crypto.bufs);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003342}
3343
3344/* Initialize QUIC TLS encryption level with <level<> as level for <qc> QUIC
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05003345 * connection allocating everything needed.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003346 * Returns 1 if succeeded, 0 if not.
3347 */
3348static int quic_conn_enc_level_init(struct quic_conn *qc,
3349 enum quic_tls_enc_level level)
3350{
3351 struct quic_enc_level *qel;
3352
3353 qel = &qc->els[level];
3354 qel->level = quic_to_ssl_enc_level(level);
3355 qel->tls_ctx.rx.aead = qel->tls_ctx.tx.aead = NULL;
3356 qel->tls_ctx.rx.md = qel->tls_ctx.tx.md = NULL;
3357 qel->tls_ctx.rx.hp = qel->tls_ctx.tx.hp = NULL;
3358 qel->tls_ctx.rx.flags = 0;
3359 qel->tls_ctx.tx.flags = 0;
Frédéric Lécaillefc768ec2021-11-23 21:02:04 +01003360 qel->tls_ctx.flags = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003361
3362 qel->rx.pkts = EB_ROOT;
Frédéric Lécaille98cdeb22021-07-26 16:38:14 +02003363 HA_RWLOCK_INIT(&qel->rx.pkts_rwlock);
Frédéric Lécaillea11d0e22021-06-07 14:38:18 +02003364 MT_LIST_INIT(&qel->rx.pqpkts);
Frédéric Lécaille9054d1b2021-07-26 16:23:53 +02003365 qel->rx.crypto.offset = 0;
3366 qel->rx.crypto.frms = EB_ROOT_UNIQUE;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003367
3368 /* Allocate only one buffer. */
3369 qel->tx.crypto.bufs = malloc(sizeof *qel->tx.crypto.bufs);
3370 if (!qel->tx.crypto.bufs)
3371 goto err;
3372
3373 qel->tx.crypto.bufs[0] = pool_alloc(pool_head_quic_crypto_buf);
3374 if (!qel->tx.crypto.bufs[0])
3375 goto err;
3376
3377 qel->tx.crypto.bufs[0]->sz = 0;
3378 qel->tx.crypto.nb_buf = 1;
3379
3380 qel->tx.crypto.sz = 0;
3381 qel->tx.crypto.offset = 0;
3382
3383 return 1;
3384
3385 err:
Willy Tarreau61cfdf42021-02-20 10:46:51 +01003386 ha_free(&qel->tx.crypto.bufs);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003387 return 0;
3388}
3389
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01003390/* Increment the <qc> refcount.
3391 *
3392 * This operation must be conducted when manipulating the quic_conn outside of
3393 * the connection pinned thread. These threads can only retrieve the connection
3394 * in the CID tree, so this function must be conducted under the CID lock.
3395 */
3396static inline void quic_conn_take(struct quic_conn *qc)
3397{
3398 HA_ATOMIC_INC(&qc->refcount);
3399}
3400
3401/* Decrement the <qc> refcount. If the refcount is zero *BEFORE* the
Ilya Shipitsin37d3e382022-01-07 14:46:15 +05003402 * subtraction, the quic_conn is freed.
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01003403 */
3404static void quic_conn_drop(struct quic_conn *qc)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003405{
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01003406 struct ssl_sock_ctx *conn_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003407 int i;
3408
Amaury Denoyelle67e6cd52021-12-13 17:07:03 +01003409 if (!qc)
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003410 return;
3411
Amaury Denoyelle2eb7b302022-01-20 16:40:36 +01003412 if (HA_ATOMIC_FETCH_SUB(&qc->refcount, 1))
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01003413 return;
Amaury Denoyelle2af19852021-09-30 11:03:28 +02003414
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01003415 conn_ctx = HA_ATOMIC_LOAD(&qc->xprt_ctx);
Amaury Denoyelle2d9794b2022-01-20 17:43:20 +01003416 if (conn_ctx) {
3417 SSL_free(conn_ctx->ssl);
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01003418 pool_free(pool_head_quic_conn_ctx, conn_ctx);
Amaury Denoyelle2d9794b2022-01-20 17:43:20 +01003419 }
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01003420
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003421 for (i = 0; i < QUIC_TLS_ENC_LEVEL_MAX; i++)
Amaury Denoyelle67e6cd52021-12-13 17:07:03 +01003422 quic_conn_enc_level_uninit(&qc->els[i]);
Amaury Denoyelle0a29e132021-12-23 15:06:56 +01003423
Amaury Denoyelle67e6cd52021-12-13 17:07:03 +01003424 pool_free(pool_head_quic_conn_rxbuf, qc->rx.buf.area);
3425 pool_free(pool_head_quic_conn, qc);
Frédéric Lécailleba85acd2022-01-11 14:43:50 +01003426 TRACE_PROTO("QUIC conn. freed", QUIC_EV_CONN_FREED, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003427}
3428
Amaury Denoyelle2eb7b302022-01-20 16:40:36 +01003429/* Release the quic_conn <qc>. It will decrement its refcount so that the
3430 * connection will be freed once all threads have finished to work with it. The
3431 * connection is removed from the CIDs tree and thus cannot be found by other
Amaury Denoyelle760da3b2022-01-20 17:43:02 +01003432 * threads after it. The connection tasklet is killed.
Amaury Denoyelle2eb7b302022-01-20 16:40:36 +01003433 *
3434 * Do not use <qc> after it as it may be freed. This function must only be
3435 * called by the thread responsible of the quic_conn tasklet.
3436 */
3437static void quic_conn_release(struct quic_conn *qc)
3438{
Amaury Denoyelle760da3b2022-01-20 17:43:02 +01003439 struct ssl_sock_ctx *conn_ctx;
3440
Amaury Denoyelle2eb7b302022-01-20 16:40:36 +01003441 /* remove the connection from receiver cids trees */
3442 HA_RWLOCK_WRLOCK(QUIC_LOCK, &qc->li->rx.cids_lock);
3443 ebmb_delete(&qc->odcid_node);
3444 ebmb_delete(&qc->scid_node);
3445 free_quic_conn_cids(qc);
3446 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->li->rx.cids_lock);
3447
Amaury Denoyelle760da3b2022-01-20 17:43:02 +01003448 /* Kill the tasklet. Do not use tasklet_free as this is not thread safe
3449 * as other threads may call tasklet_wakeup after this.
3450 */
3451 conn_ctx = HA_ATOMIC_LOAD(&qc->xprt_ctx);
3452 if (conn_ctx)
3453 tasklet_kill(conn_ctx->wait_event.tasklet);
3454
Amaury Denoyelle2eb7b302022-01-20 16:40:36 +01003455 quic_conn_drop(qc);
3456}
3457
Amaury Denoyelle414cac52021-09-22 11:14:37 +02003458void quic_close(struct connection *conn, void *xprt_ctx)
3459{
3460 struct ssl_sock_ctx *conn_ctx = xprt_ctx;
Amaury Denoyelle29632b82022-01-18 16:50:58 +01003461 struct quic_conn *qc = conn_ctx->qc;
Amaury Denoyelle0a29e132021-12-23 15:06:56 +01003462
Frédéric Lécailleba85acd2022-01-11 14:43:50 +01003463 TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
Amaury Denoyelle0a29e132021-12-23 15:06:56 +01003464 /* This task must be deleted by the connection-pinned thread. */
3465 if (qc->timer_task) {
3466 task_destroy(qc->timer_task);
3467 qc->timer_task = NULL;
3468 }
3469
Amaury Denoyelle0b1f9312022-01-26 09:51:28 +01003470 /* Next application data can be dropped. */
3471 qc->mux_state = QC_MUX_RELEASED;
3472
Frédéric Lécailleba85acd2022-01-11 14:43:50 +01003473 TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
Amaury Denoyelle9c4da932022-01-21 14:54:58 +01003474
Amaury Denoyelle2eb7b302022-01-20 16:40:36 +01003475 /* TODO for now release the quic_conn on notification by the upper
3476 * layer. It could be useful to delay it if there is remaining data to
3477 * send or data to be acked.
3478 */
3479 quic_conn_release(qc);
Amaury Denoyelle414cac52021-09-22 11:14:37 +02003480}
3481
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003482/* Callback called upon loss detection and PTO timer expirations. */
Willy Tarreau144f84a2021-03-02 16:09:26 +01003483static struct task *process_timer(struct task *task, void *ctx, unsigned int state)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003484{
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02003485 struct ssl_sock_ctx *conn_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003486 struct quic_conn *qc;
3487 struct quic_pktns *pktns;
Frédéric Lécaillea56054e2021-12-31 16:35:28 +01003488 int i, st;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003489
3490 conn_ctx = task->context;
Amaury Denoyellec15dd922021-12-21 11:41:52 +01003491 qc = conn_ctx->qc;
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003492 TRACE_ENTER(QUIC_EV_CONN_PTIMER, qc,
Frédéric Lécaillef7e0b8d2020-12-16 17:33:11 +01003493 NULL, NULL, &qc->path->ifae_pkts);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003494 task->expire = TICK_ETERNITY;
3495 pktns = quic_loss_pktns(qc);
3496 if (tick_isset(pktns->tx.loss_time)) {
3497 struct list lost_pkts = LIST_HEAD_INIT(lost_pkts);
3498
3499 qc_packet_loss_lookup(pktns, qc, &lost_pkts);
3500 if (!LIST_ISEMPTY(&lost_pkts))
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003501 qc_release_lost_pkts(qc, pktns, &lost_pkts, now_ms);
3502 qc_set_timer(qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003503 goto out;
3504 }
3505
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +02003506 st = HA_ATOMIC_LOAD(&qc->state);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003507 if (qc->path->in_flight) {
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +02003508 pktns = quic_pto_pktns(qc, st >= QUIC_HS_ST_COMPLETE, NULL);
Frédéric Lécaille0fa553d2022-01-17 14:26:12 +01003509 if (pktns == &qc->pktns[QUIC_TLS_PKTNS_INITIAL]) {
3510 pktns->tx.pto_probe = 1;
3511 if (qc->pktns[QUIC_TLS_PKTNS_HANDSHAKE].tx.in_flight)
3512 qc->pktns[QUIC_TLS_PKTNS_HANDSHAKE].tx.pto_probe = 1;
3513 }
3514 else {
3515 pktns->tx.pto_probe = 2;
3516 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003517 }
Frédéric Lécaille1aa57d32022-01-12 09:46:02 +01003518 else if (!qc_is_listener(qc) && st <= QUIC_HS_ST_COMPLETE) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003519 struct quic_enc_level *iel = &qc->els[QUIC_TLS_ENC_LEVEL_INITIAL];
3520 struct quic_enc_level *hel = &qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE];
3521
3522 if (hel->tls_ctx.rx.flags == QUIC_FL_TLS_SECRETS_SET)
3523 hel->pktns->tx.pto_probe = 1;
3524 if (iel->tls_ctx.rx.flags == QUIC_FL_TLS_SECRETS_SET)
3525 iel->pktns->tx.pto_probe = 1;
3526 }
Frédéric Lécaillea56054e2021-12-31 16:35:28 +01003527
3528 for (i = QUIC_TLS_ENC_LEVEL_INITIAL; i < QUIC_TLS_ENC_LEVEL_MAX; i++) {
3529 int j;
3530
3531 if (i == QUIC_TLS_ENC_LEVEL_APP && !quic_peer_validated_addr(qc))
3532 continue;
3533
3534 for (j = 0; j < qc->els[i].pktns->tx.pto_probe; j++)
3535 qc_prep_fast_retrans(&qc->els[i], qc);
3536 }
3537
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003538 tasklet_wakeup(conn_ctx->wait_event.tasklet);
3539 qc->path->loss.pto_count++;
3540
3541 out:
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003542 TRACE_LEAVE(QUIC_EV_CONN_PTIMER, qc, pktns);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003543
3544 return task;
3545}
3546
3547/* Initialize <conn> QUIC connection with <quic_initial_clients> as root of QUIC
3548 * connections used to identify the first Initial packets of client connecting
3549 * to listeners. This parameter must be NULL for QUIC connections attached
3550 * to listeners. <dcid> is the destination connection ID with <dcid_len> as length.
3551 * <scid> is the source connection ID with <scid_len> as length.
3552 * Returns 1 if succeeded, 0 if not.
3553 */
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003554static struct quic_conn *qc_new_conn(unsigned int version, int ipv4,
Amaury Denoyellec92cbfc2021-12-14 17:20:59 +01003555 unsigned char *dcid, size_t dcid_len, size_t dcid_addr_len,
Frédéric Lécaille6b197642021-07-06 16:25:08 +02003556 unsigned char *scid, size_t scid_len, int server, void *owner)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003557{
3558 int i;
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003559 struct quic_conn *qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003560 /* Initial CID. */
3561 struct quic_connection_id *icid;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003562 char *buf_area;
Amaury Denoyelled6a352a2021-11-24 15:32:46 +01003563 struct listener *l = NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003564
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02003565 TRACE_ENTER(QUIC_EV_CONN_INIT);
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003566 qc = pool_zalloc(pool_head_quic_conn);
3567 if (!qc) {
3568 TRACE_PROTO("Could not allocate a new connection", QUIC_EV_CONN_INIT);
3569 goto err;
3570 }
3571
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01003572 HA_ATOMIC_STORE(&qc->refcount, 0);
3573
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003574 buf_area = pool_alloc(pool_head_quic_conn_rxbuf);
3575 if (!buf_area) {
Amaury Denoyellee770ce32021-12-21 14:51:56 +01003576 TRACE_PROTO("Could not allocate a new RX buffer", QUIC_EV_CONN_INIT, qc);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003577 goto err;
3578 }
3579
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003580 qc->cids = EB_ROOT;
3581 /* QUIC Server (or listener). */
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02003582 if (server) {
Amaury Denoyelled6a352a2021-11-24 15:32:46 +01003583 l = owner;
Frédéric Lécaille6b197642021-07-06 16:25:08 +02003584
Frédéric Lécaille2fe8b3b2022-01-10 12:10:10 +01003585 qc->flags |= QUIC_FL_CONN_LISTENER;
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +02003586 HA_ATOMIC_STORE(&qc->state, QUIC_HS_ST_SERVER_INITIAL);
Amaury Denoyellec92cbfc2021-12-14 17:20:59 +01003587 /* Copy the initial DCID with the address. */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003588 qc->odcid.len = dcid_len;
Amaury Denoyellec92cbfc2021-12-14 17:20:59 +01003589 qc->odcid.addrlen = dcid_addr_len;
3590 memcpy(qc->odcid.data, dcid, dcid_len + dcid_addr_len);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003591
Amaury Denoyelle42b9f1c2021-11-24 15:29:53 +01003592 /* copy the packet SCID to reuse it as DCID for sending */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003593 if (scid_len)
3594 memcpy(qc->dcid.data, scid, scid_len);
3595 qc->dcid.len = scid_len;
Frédéric Lécaillec1029f62021-10-20 11:09:58 +02003596 qc->tx.qring_list = &l->rx.tx_qring_list;
Amaury Denoyelle2af19852021-09-30 11:03:28 +02003597 qc->li = l;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003598 }
3599 /* QUIC Client (outgoing connection to servers) */
3600 else {
Frédéric Lécailleeed7a7d2021-08-18 09:16:01 +02003601 HA_ATOMIC_STORE(&qc->state, QUIC_HS_ST_CLIENT_INITIAL);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003602 if (dcid_len)
3603 memcpy(qc->dcid.data, dcid, dcid_len);
3604 qc->dcid.len = dcid_len;
3605 }
Amaury Denoyelle0b1f9312022-01-26 09:51:28 +01003606 qc->mux_state = QC_MUX_NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003607
3608 /* Initialize the output buffer */
3609 qc->obuf.pos = qc->obuf.data;
3610
Amaury Denoyelled6a352a2021-11-24 15:32:46 +01003611 icid = new_quic_cid(&qc->cids, qc, 0);
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003612 if (!icid) {
Amaury Denoyellee770ce32021-12-21 14:51:56 +01003613 TRACE_PROTO("Could not allocate a new connection ID", QUIC_EV_CONN_INIT, qc);
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003614 goto err;
3615 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003616
Amaury Denoyelled6a352a2021-11-24 15:32:46 +01003617 /* insert the allocated CID in the receiver tree */
3618 if (server) {
3619 HA_RWLOCK_WRLOCK(QUIC_LOCK, &l->rx.cids_lock);
3620 ebmb_insert(&l->rx.cids, &icid->node, icid->cid.len);
3621 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &l->rx.cids_lock);
3622 }
3623
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003624 /* Select our SCID which is the first CID with 0 as sequence number. */
3625 qc->scid = icid->cid;
3626
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003627 /* Packet number spaces initialization. */
3628 for (i = 0; i < QUIC_TLS_PKTNS_MAX; i++)
3629 quic_pktns_init(&qc->pktns[i]);
3630 /* QUIC encryption level context initialization. */
3631 for (i = 0; i < QUIC_TLS_ENC_LEVEL_MAX; i++) {
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003632 if (!quic_conn_enc_level_init(qc, i)) {
Amaury Denoyellee770ce32021-12-21 14:51:56 +01003633 TRACE_PROTO("Could not initialize an encryption level", QUIC_EV_CONN_INIT, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003634 goto err;
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003635 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003636 /* Initialize the packet number space. */
3637 qc->els[i].pktns = &qc->pktns[quic_tls_pktns(i)];
3638 }
3639
Frédéric Lécaillec8d3f872021-07-06 17:19:44 +02003640 qc->version = version;
Frédéric Lécaillea956d152021-11-10 09:24:22 +01003641 qc->tps_tls_ext = qc->version & 0xff000000 ?
3642 TLS_EXTENSION_QUIC_TRANSPORT_PARAMETERS_DRAFT:
3643 TLS_EXTENSION_QUIC_TRANSPORT_PARAMETERS;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003644 /* TX part. */
3645 LIST_INIT(&qc->tx.frms_to_send);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003646 qc->tx.nb_buf = QUIC_CONN_TX_BUFS_NB;
3647 qc->tx.wbuf = qc->tx.rbuf = 0;
3648 qc->tx.bytes = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003649 /* RX part. */
3650 qc->rx.bytes = 0;
Frédéric Lécaille2766e782021-08-30 17:16:07 +02003651 qc->rx.nb_ack_eliciting = 0;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003652 qc->rx.buf = b_make(buf_area, QUIC_CONN_RX_BUFSZ, 0, 0);
3653 HA_RWLOCK_INIT(&qc->rx.buf_rwlock);
3654 LIST_INIT(&qc->rx.pkt_list);
Frédéric Lécaille40df78f2021-11-30 10:59:37 +01003655 if (!quic_tls_ku_init(qc)) {
Amaury Denoyellee770ce32021-12-21 14:51:56 +01003656 TRACE_PROTO("Key update initialization failed", QUIC_EV_CONN_INIT, qc);
Frédéric Lécaille40df78f2021-11-30 10:59:37 +01003657 goto err;
3658 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003659
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003660 /* XXX TO DO: Only one path at this time. */
3661 qc->path = &qc->paths[0];
3662 quic_path_init(qc->path, ipv4, default_quic_cc_algo, qc);
3663
Amaury Denoyellecfa2d562022-01-19 16:01:05 +01003664 /* required to use MTLIST_IN_LIST */
3665 MT_LIST_INIT(&qc->accept_list);
3666
Amaury Denoyellee770ce32021-12-21 14:51:56 +01003667 TRACE_LEAVE(QUIC_EV_CONN_INIT, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003668
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003669 return qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003670
3671 err:
Amaury Denoyellee770ce32021-12-21 14:51:56 +01003672 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_INIT, qc ? qc : NULL);
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01003673 quic_conn_drop(qc);
Frédéric Lécaille6de72872021-06-11 15:44:24 +02003674 return NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003675}
3676
3677/* Initialize the timer task of <qc> QUIC connection.
3678 * Returns 1 if succeeded, 0 if not.
3679 */
3680static int quic_conn_init_timer(struct quic_conn *qc)
3681{
Frédéric Lécaillef57c3332021-12-09 10:06:21 +01003682 /* Attach this task to the same thread ID used for the connection */
3683 qc->timer_task = task_new(1UL << qc->tid);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003684 if (!qc->timer_task)
3685 return 0;
3686
3687 qc->timer = TICK_ETERNITY;
3688 qc->timer_task->process = process_timer;
3689 qc->timer_task->context = qc->conn->xprt_ctx;
3690
3691 return 1;
3692}
3693
3694/* Parse into <pkt> a long header located at <*buf> buffer, <end> begin a pointer to the end
3695 * past one byte of this buffer.
3696 */
3697static inline int quic_packet_read_long_header(unsigned char **buf, const unsigned char *end,
3698 struct quic_rx_packet *pkt)
3699{
3700 unsigned char dcid_len, scid_len;
3701
3702 /* Version */
3703 if (!quic_read_uint32(&pkt->version, (const unsigned char **)buf, end))
3704 return 0;
3705
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003706 /* Destination Connection ID Length */
3707 dcid_len = *(*buf)++;
3708 /* We want to be sure we can read <dcid_len> bytes and one more for <scid_len> value */
3709 if (dcid_len > QUIC_CID_MAXLEN || end - *buf < dcid_len + 1)
3710 /* XXX MUST BE DROPPED */
3711 return 0;
3712
3713 if (dcid_len) {
3714 /* Check that the length of this received DCID matches the CID lengths
3715 * of our implementation for non Initials packets only.
3716 */
Frédéric Lécaillea5da31d2021-12-14 19:44:14 +01003717 if (pkt->type != QUIC_PACKET_TYPE_INITIAL &&
3718 pkt->type != QUIC_PACKET_TYPE_0RTT &&
Amaury Denoyelled4962512021-12-14 17:17:28 +01003719 dcid_len != QUIC_HAP_CID_LEN)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003720 return 0;
3721
3722 memcpy(pkt->dcid.data, *buf, dcid_len);
3723 }
3724
3725 pkt->dcid.len = dcid_len;
3726 *buf += dcid_len;
3727
3728 /* Source Connection ID Length */
3729 scid_len = *(*buf)++;
3730 if (scid_len > QUIC_CID_MAXLEN || end - *buf < scid_len)
3731 /* XXX MUST BE DROPPED */
3732 return 0;
3733
3734 if (scid_len)
3735 memcpy(pkt->scid.data, *buf, scid_len);
3736 pkt->scid.len = scid_len;
3737 *buf += scid_len;
3738
3739 return 1;
3740}
3741
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003742/* Insert <pkt> RX packet in its <qel> RX packets tree */
3743static void qc_pkt_insert(struct quic_rx_packet *pkt, struct quic_enc_level *qel)
3744{
3745 pkt->pn_node.key = pkt->pn;
Frédéric Lécaille2ce5acf2021-12-20 14:41:19 +01003746 quic_rx_packet_refinc(pkt);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003747 HA_RWLOCK_WRLOCK(QUIC_LOCK, &qel->rx.pkts_rwlock);
3748 eb64_insert(&qel->rx.pkts, &pkt->pn_node);
3749 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qel->rx.pkts_rwlock);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003750}
3751
3752/* Try to remove the header protection of <pkt> QUIC packet attached to <qc>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003753 * QUIC connection with <buf> as packet number field address, <end> a pointer to one
3754 * byte past the end of the buffer containing this packet and <beg> the address of
3755 * the packet first byte.
3756 * If succeeded, this function updates <*buf> to point to the next packet in the buffer.
3757 * Returns 1 if succeeded, 0 if not.
3758 */
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003759static inline int qc_try_rm_hp(struct quic_conn *qc,
3760 struct quic_rx_packet *pkt,
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01003761 unsigned char *buf, unsigned char *beg,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003762 const unsigned char *end,
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003763 struct quic_enc_level **el)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003764{
3765 unsigned char *pn = NULL; /* Packet number field */
Amaury Denoyelle8ae28072022-01-24 18:34:52 +01003766 enum quic_tls_enc_level tel;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003767 struct quic_enc_level *qel;
3768 /* Only for traces. */
3769 struct quic_rx_packet *qpkt_trace;
3770
3771 qpkt_trace = NULL;
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003772 TRACE_ENTER(QUIC_EV_CONN_TRMHP, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003773 /* The packet number is here. This is also the start minus
3774 * QUIC_PACKET_PN_MAXLEN of the sample used to add/remove the header
3775 * protection.
3776 */
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01003777 pn = buf;
Amaury Denoyelle8ae28072022-01-24 18:34:52 +01003778
3779 tel = quic_packet_type_enc_level(pkt->type);
3780 qel = &qc->els[tel];
3781
3782 if (qc_qel_may_rm_hp(qc, qel)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003783 /* Note that the following function enables us to unprotect the packet
3784 * number and its length subsequently used to decrypt the entire
3785 * packets.
3786 */
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003787 if (!qc_do_rm_hp(qc, pkt, &qel->tls_ctx,
3788 qel->pktns->rx.largest_pn, pn, beg, end)) {
3789 TRACE_PROTO("hp error", QUIC_EV_CONN_TRMHP, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003790 goto err;
3791 }
3792
3793 /* The AAD includes the packet number field found at <pn>. */
3794 pkt->aad_len = pn - beg + pkt->pnl;
3795 qpkt_trace = pkt;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003796 }
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02003797 else if (qel) {
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003798 if (qel->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_DCD) {
3799 /* If the packet number space has been discarded, this packet
3800 * will be not parsed.
3801 */
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003802 TRACE_PROTO("Discarded pktns", QUIC_EV_CONN_TRMHP, qc, pkt);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003803 goto out;
3804 }
3805
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003806 TRACE_PROTO("hp not removed", QUIC_EV_CONN_TRMHP, qc, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003807 pkt->pn_offset = pn - beg;
Frédéric Lécailleebc3fc12021-09-22 08:34:21 +02003808 MT_LIST_APPEND(&qel->rx.pqpkts, &pkt->list);
3809 quic_rx_packet_refinc(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003810 }
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003811 else {
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003812 TRACE_PROTO("Unknown packet type", QUIC_EV_CONN_TRMHP, qc);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003813 goto err;
3814 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003815
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003816 *el = qel;
3817 /* No reference counter incrementation here!!! */
3818 LIST_APPEND(&qc->rx.pkt_list, &pkt->qc_rx_pkt_list);
3819 memcpy(b_tail(&qc->rx.buf), beg, pkt->len);
3820 pkt->data = (unsigned char *)b_tail(&qc->rx.buf);
3821 b_add(&qc->rx.buf, pkt->len);
3822 out:
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003823 TRACE_LEAVE(QUIC_EV_CONN_TRMHP, qc, qpkt_trace);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003824 return 1;
3825
3826 err:
Amaury Denoyellee81fed92021-12-22 11:06:34 +01003827 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_TRMHP, qc, qpkt_trace);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003828 return 0;
3829}
3830
3831/* Parse the header form from <byte0> first byte of <pkt> pacekt to set type.
3832 * Also set <*long_header> to 1 if this form is long, 0 if not.
3833 */
3834static inline void qc_parse_hd_form(struct quic_rx_packet *pkt,
3835 unsigned char byte0, int *long_header)
3836{
3837 if (byte0 & QUIC_PACKET_LONG_HEADER_BIT) {
3838 pkt->type =
3839 (byte0 >> QUIC_PACKET_TYPE_SHIFT) & QUIC_PACKET_TYPE_BITMASK;
3840 *long_header = 1;
3841 }
3842 else {
3843 pkt->type = QUIC_PACKET_TYPE_SHORT;
3844 *long_header = 0;
3845 }
3846}
3847
Amaury Denoyellea22d8602021-11-10 15:17:56 +01003848/*
3849 * Check if the QUIC version in packet <pkt> is supported. Returns a boolean.
3850 */
3851static inline int qc_pkt_is_supported_version(struct quic_rx_packet *pkt)
3852{
3853 int j = 0, version;
3854
3855 do {
3856 version = quic_supported_version[j];
3857 if (version == pkt->version)
3858 return 1;
3859
3860 version = quic_supported_version[++j];
3861 } while(version);
3862
3863 return 0;
3864}
3865
Frédéric Lécaillec5c69a02021-10-20 17:24:42 +02003866__attribute__((unused))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003867static ssize_t qc_srv_pkt_rcv(unsigned char **buf, const unsigned char *end,
3868 struct quic_rx_packet *pkt,
3869 struct quic_dgram_ctx *dgram_ctx,
3870 struct sockaddr_storage *saddr)
3871{
3872 unsigned char *beg;
3873 uint64_t len;
3874 struct quic_conn *qc;
3875 struct eb_root *cids;
3876 struct ebmb_node *node;
3877 struct connection *srv_conn;
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02003878 struct ssl_sock_ctx *conn_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003879 int long_header;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003880 size_t b_cspace;
3881 struct quic_enc_level *qel;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003882
3883 qc = NULL;
Frédéric Lécaillec4becf52021-11-08 11:23:17 +01003884 qel = NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003885 TRACE_ENTER(QUIC_EV_CONN_SPKT);
3886 if (end <= *buf)
3887 goto err;
3888
3889 /* Fixed bit */
3890 if (!(**buf & QUIC_PACKET_FIXED_BIT))
3891 /* XXX TO BE DISCARDED */
3892 goto err;
3893
3894 srv_conn = dgram_ctx->owner;
3895 beg = *buf;
3896 /* Header form */
3897 qc_parse_hd_form(pkt, *(*buf)++, &long_header);
3898 if (long_header) {
3899 size_t cid_lookup_len;
3900
3901 if (!quic_packet_read_long_header(buf, end, pkt))
3902 goto err;
Amaury Denoyellea22d8602021-11-10 15:17:56 +01003903
3904 /* unsupported QUIC version */
3905 if (!qc_pkt_is_supported_version(pkt)) {
3906 TRACE_PROTO("Null QUIC version, packet dropped", QUIC_EV_CONN_LPKT);
3907 goto err;
3908 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003909
3910 /* For Initial packets, and for servers (QUIC clients connections),
3911 * there is no Initial connection IDs storage.
3912 */
3913 if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
3914 cids = &((struct server *)__objt_server(srv_conn->target))->cids;
3915 cid_lookup_len = pkt->dcid.len;
3916 }
3917 else {
3918 cids = &((struct server *)__objt_server(srv_conn->target))->cids;
Amaury Denoyelled4962512021-12-14 17:17:28 +01003919 cid_lookup_len = QUIC_HAP_CID_LEN;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003920 }
3921
3922 node = ebmb_lookup(cids, pkt->dcid.data, cid_lookup_len);
3923 if (!node)
3924 goto err;
3925
3926 qc = ebmb_entry(node, struct quic_conn, scid_node);
3927
3928 if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
3929 qc->dcid.len = pkt->scid.len;
3930 if (pkt->scid.len)
3931 memcpy(qc->dcid.data, pkt->scid.data, pkt->scid.len);
3932 }
3933
3934 if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
3935 uint64_t token_len;
3936
3937 if (!quic_dec_int(&token_len, (const unsigned char **)buf, end) || end - *buf < token_len)
3938 goto err;
3939
3940 /* XXX TO DO XXX 0 value means "the token is not present".
3941 * A server which sends an Initial packet must not set the token.
3942 * So, a client which receives an Initial packet with a token
3943 * MUST discard the packet or generate a connection error with
3944 * PROTOCOL_VIOLATION as type.
3945 * The token must be provided in a Retry packet or NEW_TOKEN frame.
3946 */
3947 pkt->token_len = token_len;
3948 }
3949 }
3950 else {
3951 /* XXX TO DO: Short header XXX */
Amaury Denoyelled4962512021-12-14 17:17:28 +01003952 if (end - *buf < QUIC_HAP_CID_LEN)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003953 goto err;
3954
3955 cids = &((struct server *)__objt_server(srv_conn->target))->cids;
Amaury Denoyelled4962512021-12-14 17:17:28 +01003956 node = ebmb_lookup(cids, *buf, QUIC_HAP_CID_LEN);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003957 if (!node)
3958 goto err;
3959
3960 qc = ebmb_entry(node, struct quic_conn, scid_node);
Amaury Denoyelled4962512021-12-14 17:17:28 +01003961 *buf += QUIC_HAP_CID_LEN;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003962 }
Amaury Denoyelleadb22762021-12-14 15:04:14 +01003963
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003964 /* Only packets packets with long headers and not RETRY or VERSION as type
3965 * have a length field.
3966 */
3967 if (long_header && pkt->type != QUIC_PACKET_TYPE_RETRY && pkt->version) {
3968 if (!quic_dec_int(&len, (const unsigned char **)buf, end) || end - *buf < len)
3969 goto err;
3970
3971 pkt->len = len;
3972 }
3973 else if (!long_header) {
3974 /* A short packet is the last one of an UDP datagram. */
3975 pkt->len = end - *buf;
3976 }
3977
3978 conn_ctx = qc->conn->xprt_ctx;
3979
3980 /* Increase the total length of this packet by the header length. */
3981 pkt->len += *buf - beg;
Amaury Denoyelleadb22762021-12-14 15:04:14 +01003982
3983 /* When multiple QUIC packets are coalesced on the same UDP datagram,
3984 * they must have the same DCID.
3985 *
3986 * This check must be done after the final update to pkt.len to
3987 * properly drop the packet on failure.
3988 */
3989 if (!dgram_ctx->dcid.len) {
3990 memcpy(dgram_ctx->dcid.data, pkt->dcid.data, pkt->dcid.len);
3991 }
3992 else if (memcmp(dgram_ctx->dcid.data, pkt->dcid.data, pkt->dcid.len)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01003993 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_SPKT, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003994 goto err;
3995 }
Amaury Denoyelleadb22762021-12-14 15:04:14 +01003996 dgram_ctx->qc = qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003997
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01003998 HA_RWLOCK_WRLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
3999 b_cspace = b_contig_space(&qc->rx.buf);
4000 if (b_cspace < pkt->len) {
4001 /* Let us consume the remaining contiguous space. */
4002 b_add(&qc->rx.buf, b_cspace);
4003 if (b_contig_space(&qc->rx.buf) < pkt->len) {
4004 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004005 TRACE_PROTO("Too big packet", QUIC_EV_CONN_SPKT, qc, pkt, &pkt->len);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004006 goto err;
4007 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004008 }
4009
Amaury Denoyellee81fed92021-12-22 11:06:34 +01004010 if (!qc_try_rm_hp(qc, pkt, *buf, beg, end, &qel)) {
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004011 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004012 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_SPKT, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004013 goto err;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004014 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004015
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004016 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
4017 if (pkt->aad_len)
4018 qc_pkt_insert(pkt, qel);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004019 /* Wake the tasklet of the QUIC connection packet handler. */
4020 if (conn_ctx)
4021 tasklet_wakeup(conn_ctx->wait_event.tasklet);
4022
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004023 TRACE_LEAVE(QUIC_EV_CONN_SPKT, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004024
4025 return pkt->len;
4026
4027 err:
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004028 TRACE_DEVEL("Leaing in error", QUIC_EV_CONN_SPKT, qc ? qc : NULL);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004029 return -1;
4030}
4031
Amaury Denoyellea22d8602021-11-10 15:17:56 +01004032/*
4033 * Send a Version Negotiation packet on response to <pkt> on socket <fd> to
4034 * address <addr>.
4035 * Implementation of RFC9000 6. Version Negotiation
4036 *
4037 * TODO implement a rate-limiting sending of Version Negotiation packets
4038 *
4039 * Returns 0 on success else non-zero
4040 */
Amaury Denoyelled6b16672021-12-23 10:37:19 +01004041static int send_version_negotiation(int fd, struct sockaddr_storage *addr,
4042 struct quic_rx_packet *pkt)
Amaury Denoyellea22d8602021-11-10 15:17:56 +01004043{
4044 char buf[256];
4045 int i = 0, j, version;
4046 const socklen_t addrlen = get_addr_len(addr);
4047
4048 /*
4049 * header form
4050 * long header, fixed bit to 0 for Version Negotiation
4051 */
Frédéric Lécailleea78ee12021-11-18 13:54:43 +01004052 if (RAND_bytes((unsigned char *)buf, 1) != 1)
4053 return 1;
Amaury Denoyellea22d8602021-11-10 15:17:56 +01004054
Frédéric Lécailleea78ee12021-11-18 13:54:43 +01004055 buf[i++] |= '\x80';
Amaury Denoyellea22d8602021-11-10 15:17:56 +01004056 /* null version for Version Negotiation */
4057 buf[i++] = '\x00';
4058 buf[i++] = '\x00';
4059 buf[i++] = '\x00';
4060 buf[i++] = '\x00';
4061
4062 /* source connection id */
4063 buf[i++] = pkt->scid.len;
Amaury Denoyelle10eed8e2021-11-18 13:48:57 +01004064 memcpy(&buf[i], pkt->scid.data, pkt->scid.len);
Amaury Denoyellea22d8602021-11-10 15:17:56 +01004065 i += pkt->scid.len;
4066
4067 /* destination connection id */
4068 buf[i++] = pkt->dcid.len;
Amaury Denoyelle10eed8e2021-11-18 13:48:57 +01004069 memcpy(&buf[i], pkt->dcid.data, pkt->dcid.len);
Amaury Denoyellea22d8602021-11-10 15:17:56 +01004070 i += pkt->dcid.len;
4071
4072 /* supported version */
4073 j = 0;
4074 do {
4075 version = htonl(quic_supported_version[j]);
4076 memcpy(&buf[i], &version, sizeof(version));
4077 i += sizeof(version);
4078
4079 version = quic_supported_version[++j];
4080 } while (version);
4081
4082 if (sendto(fd, buf, i, 0, (struct sockaddr *)addr, addrlen) < 0)
4083 return 1;
4084
4085 return 0;
4086}
4087
Amaury Denoyelleb76ae692022-01-11 14:16:37 +01004088/* Generate the token to be used in Retry packets. The token is written to
4089 * <buf> which is expected to be <len> bytes.
4090 *
4091 * Various parameters are expected to be encoded in the token. For now, only
4092 * the DCID from <pkt> is stored. This is useful to implement a stateless Retry
4093 * as this CID must be repeated by the server in the transport parameters.
4094 *
4095 * TODO add the client address to validate the token origin.
4096 *
4097 * Returns the length of the encoded token or 0 on error.
4098 */
4099static int generate_retry_token(unsigned char *buf, unsigned char len,
4100 struct quic_rx_packet *pkt)
4101{
4102 const size_t token_len = 1 + pkt->dcid.len;
4103 unsigned char i = 0;
4104
4105 if (token_len > len)
4106 return 0;
4107
4108 buf[i++] = pkt->dcid.len;
4109 memcpy(&buf[i], pkt->dcid.data, pkt->dcid.len);
4110 i += pkt->dcid.len;
4111
4112 return i;
4113}
4114
4115/* Generate a Retry packet and send it on <fd> socket to <addr> in response to
4116 * the Initial <pkt> packet.
4117 *
4118 * Returns 0 on success else non-zero.
4119 */
4120static int send_retry(int fd, struct sockaddr_storage *addr,
4121 struct quic_rx_packet *pkt)
4122{
4123 unsigned char buf[128];
4124 int i = 0, token_len;
4125 const socklen_t addrlen = get_addr_len(addr);
4126 struct quic_cid scid;
4127
4128 /* long header + fixed bit + packet type 0x3 */
4129 buf[i++] = 0xf0;
4130 /* version */
4131 buf[i++] = 0x00;
4132 buf[i++] = 0x00;
4133 buf[i++] = 0x00;
4134 buf[i++] = 0x01;
4135
4136 /* Use the SCID from <pkt> for Retry DCID. */
4137 buf[i++] = pkt->scid.len;
4138 memcpy(&buf[i], pkt->scid.data, pkt->scid.len);
4139 i += pkt->scid.len;
4140
4141 /* Generate a new CID to be used as SCID for the Retry packet. */
4142 scid.len = QUIC_HAP_CID_LEN;
4143 if (RAND_bytes(scid.data, scid.len) != 1)
4144 return 1;
4145
4146 buf[i++] = scid.len;
4147 memcpy(&buf[i], scid.data, scid.len);
4148 i += scid.len;
4149
4150 /* token */
4151 if (!(token_len = generate_retry_token(&buf[i], &buf[i] - buf, pkt)))
4152 return 1;
4153 i += token_len;
4154
4155 /* token integrity tag */
4156 if ((&buf[i] - buf < QUIC_TLS_TAG_LEN) ||
4157 !quic_tls_generate_retry_integrity_tag(pkt->dcid.data,
4158 pkt->dcid.len, buf, i)) {
4159 return 1;
4160 }
4161
4162 i += QUIC_TLS_TAG_LEN;
4163
4164 if (sendto(fd, buf, i, 0, (struct sockaddr *)addr, addrlen) < 0)
4165 return 1;
4166
4167 return 0;
4168}
4169
Amaury Denoyelle8efe0322021-12-15 16:32:56 +01004170/* Retrieve a quic_conn instance from the <pkt> DCID field. If the packet is of
4171 * type INITIAL, the ODCID tree is first used. In this case, <saddr> is
4172 * concatenated to the <pkt> DCID field.
4173 *
4174 * Returns the instance or NULL if not found.
4175 */
Amaury Denoyelled6b16672021-12-23 10:37:19 +01004176static struct quic_conn *retrieve_qc_conn_from_cid(struct quic_rx_packet *pkt,
Amaury Denoyelle8efe0322021-12-15 16:32:56 +01004177 struct listener *l,
4178 struct sockaddr_storage *saddr)
4179{
4180 struct quic_conn *qc = NULL;
4181 struct ebmb_node *node;
4182 struct quic_connection_id *id;
Amaury Denoyelle250ac422021-12-22 11:29:05 +01004183 /* set if the quic_conn is found in the second DCID tree */
4184 int found_in_dcid = 0;
Amaury Denoyelle8efe0322021-12-15 16:32:56 +01004185
4186 HA_RWLOCK_RDLOCK(QUIC_LOCK, &l->rx.cids_lock);
4187
4188 /* Look first into ODCIDs tree for INITIAL/0-RTT packets. */
4189 if (pkt->type == QUIC_PACKET_TYPE_INITIAL ||
4190 pkt->type == QUIC_PACKET_TYPE_0RTT) {
4191 /* DCIDs of first packets coming from multiple clients may have
4192 * the same values. Let's distinguish them by concatenating the
4193 * socket addresses.
4194 */
4195 quic_cid_saddr_cat(&pkt->dcid, saddr);
4196 node = ebmb_lookup(&l->rx.odcids, pkt->dcid.data,
4197 pkt->dcid.len + pkt->dcid.addrlen);
4198 if (node) {
4199 qc = ebmb_entry(node, struct quic_conn, odcid_node);
4200 goto end;
4201 }
4202 }
4203
4204 /* Look into DCIDs tree for non-INITIAL/0-RTT packets. This may be used
4205 * also for INITIAL/0-RTT non-first packets with the final DCID in
4206 * used.
4207 */
4208 node = ebmb_lookup(&l->rx.cids, pkt->dcid.data, pkt->dcid.len);
4209 if (!node)
4210 goto end;
4211
4212 id = ebmb_entry(node, struct quic_connection_id, node);
4213 qc = id->qc;
Amaury Denoyelle250ac422021-12-22 11:29:05 +01004214 found_in_dcid = 1;
4215
4216 end:
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01004217 if (qc)
4218 quic_conn_take(qc);
Amaury Denoyelle250ac422021-12-22 11:29:05 +01004219 HA_RWLOCK_RDUNLOCK(QUIC_LOCK, &l->rx.cids_lock);
Amaury Denoyelle8efe0322021-12-15 16:32:56 +01004220
4221 /* If found in DCIDs tree, remove the quic_conn from the ODCIDs tree.
4222 * If already done, this is a noop.
Amaury Denoyelle250ac422021-12-22 11:29:05 +01004223 *
4224 * node.leaf_p is first checked to avoid unnecessary locking.
Amaury Denoyelle8efe0322021-12-15 16:32:56 +01004225 */
Amaury Denoyellec6fab982021-12-23 16:27:56 +01004226 if (qc && found_in_dcid && qc->odcid_node.node.leaf_p) {
Amaury Denoyelle250ac422021-12-22 11:29:05 +01004227 HA_RWLOCK_WRLOCK(QUIC_LOCK, &l->rx.cids_lock);
4228 ebmb_delete(&qc->odcid_node);
4229 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &l->rx.cids_lock);
4230 }
Amaury Denoyelle8efe0322021-12-15 16:32:56 +01004231
4232 return qc;
4233}
4234
Amaury Denoyelle5ff1c972022-01-11 14:11:32 +01004235/* Parse the Retry token from buffer <token> whose size is <token_len>. This
4236 * will extract the parameters stored in the token : <odcid>.
4237 *
4238 * Returns 0 on success else non-zero.
4239 */
4240static int parse_retry_token(const unsigned char *token, uint64_t token_len,
4241 struct quic_cid *odcid)
4242{
4243 uint64_t odcid_len;
4244
4245 if (!quic_dec_int(&odcid_len, &token, token + token_len))
4246 return 1;
4247
4248 memcpy(odcid->data, token, odcid_len);
4249 odcid->len = odcid_len;
4250
4251 return 0;
4252}
4253
Amaury Denoyelle33ac3462022-01-18 16:44:34 +01004254/* Try to allocate the <*ssl> SSL session object for <qc> QUIC connection
4255 * with <ssl_ctx> as SSL context inherited settings. Also set the transport
4256 * parameters of this session.
4257 * This is the responsibility of the caller to check the validity of all the
4258 * pointers passed as parameter to this function.
4259 * Return 0 if succeeded, -1 if not. If failed, sets the ->err_code member of <qc->conn> to
4260 * CO_ER_SSL_NO_MEM.
4261 */
4262static int qc_ssl_sess_init(struct quic_conn *qc, SSL_CTX *ssl_ctx, SSL **ssl,
4263 unsigned char *params, size_t params_len)
4264{
4265 int retry;
4266
4267 retry = 1;
4268 retry:
4269 *ssl = SSL_new(ssl_ctx);
4270 if (!*ssl) {
4271 if (!retry--)
4272 goto err;
4273
4274 pool_gc(NULL);
4275 goto retry;
4276 }
4277
4278 if (!SSL_set_quic_method(*ssl, &ha_quic_method) ||
4279 !SSL_set_ex_data(*ssl, ssl_qc_app_data_index, qc) ||
4280 !SSL_set_quic_transport_params(*ssl, qc->enc_params, qc->enc_params_len)) {
4281 goto err;
4282
4283 SSL_free(*ssl);
4284 *ssl = NULL;
4285 if (!retry--)
4286 goto err;
4287
4288 pool_gc(NULL);
4289 goto retry;
4290 }
4291
4292 return 0;
4293
4294 err:
4295 qc->conn->err_code = CO_ER_SSL_NO_MEM;
4296 return -1;
4297}
4298
4299/* Allocate the ssl_sock_ctx from connection <qc>. This creates the tasklet
4300 * used to process <qc> received packets. The allocated context is stored in
4301 * <qc.xprt_ctx>.
4302 *
4303 * Returns 0 on success else non-zero.
4304 */
4305int qc_conn_alloc_ssl_ctx(struct quic_conn *qc)
4306{
4307 struct bind_conf *bc = qc->li->bind_conf;
4308 struct ssl_sock_ctx *ctx = NULL;
4309
4310 ctx = pool_zalloc(pool_head_quic_conn_ctx);
4311 if (!ctx)
4312 goto err;
4313
4314 ctx->wait_event.tasklet = tasklet_new();
4315 if (!ctx->wait_event.tasklet)
4316 goto err;
4317
4318 ctx->wait_event.tasklet->process = quic_conn_io_cb;
4319 ctx->wait_event.tasklet->context = ctx;
4320 ctx->wait_event.events = 0;
4321 ctx->subs = NULL;
4322 ctx->xprt_ctx = NULL;
4323 ctx->qc = qc;
4324
4325 /* Set tasklet tid based on the SCID selected by us for this
4326 * connection. The upper layer will also be binded on the same thread.
4327 */
4328 qc->tid = ctx->wait_event.tasklet->tid = quic_get_cid_tid(&qc->scid);
4329
4330 if (qc_is_listener(qc)) {
4331 if (qc_ssl_sess_init(qc, bc->initial_ctx, &ctx->ssl,
4332 qc->enc_params, qc->enc_params_len) == -1) {
4333 goto err;
4334 }
4335
4336 /* Enabling 0-RTT */
4337 if (bc->ssl_conf.early_data)
4338 SSL_set_quic_early_data_enabled(ctx->ssl, 1);
4339
4340 SSL_set_accept_state(ctx->ssl);
4341 }
4342
4343 ctx->xprt = xprt_get(XPRT_QUIC);
4344
4345 /* Store the allocated context in <qc>. */
4346 HA_ATOMIC_STORE(&qc->xprt_ctx, ctx);
4347
4348 return 0;
4349
4350 err:
4351 if (ctx && ctx->wait_event.tasklet)
4352 tasklet_free(ctx->wait_event.tasklet);
4353 pool_free(pool_head_quic_conn_ctx, ctx);
4354
4355 return 1;
4356}
4357
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004358static ssize_t qc_lstnr_pkt_rcv(unsigned char *buf, const unsigned char *end,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004359 struct quic_rx_packet *pkt,
4360 struct quic_dgram_ctx *dgram_ctx,
4361 struct sockaddr_storage *saddr)
4362{
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004363 unsigned char *beg, *payload;
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01004364 struct quic_conn *qc, *qc_to_purge = NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004365 struct listener *l;
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02004366 struct ssl_sock_ctx *conn_ctx;
Frédéric Lécaille6b663152022-01-04 17:03:11 +01004367 int long_header = 0, io_cb_wakeup = 0;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004368 size_t b_cspace;
4369 struct quic_enc_level *qel;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004370
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004371 beg = buf;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004372 qc = NULL;
Frédéric Lécailled24c2ec2021-05-31 10:24:49 +02004373 conn_ctx = NULL;
Frédéric Lécaillec4becf52021-11-08 11:23:17 +01004374 qel = NULL;
Frédéric Lécaille8678eb02021-12-16 18:03:52 +01004375 TRACE_ENTER(QUIC_EV_CONN_LPKT);
4376 /* This ist only to please to traces and distinguish the
4377 * packet with parsed packet number from others.
4378 */
4379 pkt->pn_node.key = (uint64_t)-1;
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004380 if (end <= buf)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004381 goto err;
4382
4383 /* Fixed bit */
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004384 if (!(*buf & QUIC_PACKET_FIXED_BIT)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004385 /* XXX TO BE DISCARDED */
4386 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
4387 goto err;
4388 }
4389
4390 l = dgram_ctx->owner;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004391 /* Header form */
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004392 qc_parse_hd_form(pkt, *buf++, &long_header);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004393 if (long_header) {
Frédéric Lécaille2c15a662021-12-22 20:39:12 +01004394 uint64_t len;
4395
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004396 if (!quic_packet_read_long_header(&buf, end, pkt)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004397 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
4398 goto err;
4399 }
4400
Frédéric Lécaille2c15a662021-12-22 20:39:12 +01004401 /* Retry of Version Negotiation packets are only sent by servers */
4402 if (pkt->type == QUIC_PACKET_TYPE_RETRY || !pkt->version) {
4403 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
4404 goto err;
4405 }
4406
Amaury Denoyellea22d8602021-11-10 15:17:56 +01004407 /* RFC9000 6. Version Negotiation */
4408 if (!qc_pkt_is_supported_version(pkt)) {
Amaury Denoyellea22d8602021-11-10 15:17:56 +01004409 /* unsupported version, send Negotiation packet */
Amaury Denoyelled6b16672021-12-23 10:37:19 +01004410 if (send_version_negotiation(l->rx.fd, saddr, pkt)) {
Amaury Denoyellea22d8602021-11-10 15:17:56 +01004411 TRACE_PROTO("Error on Version Negotiation sending", QUIC_EV_CONN_LPKT);
4412 goto err;
4413 }
4414
4415 TRACE_PROTO("Unsupported QUIC version, send Version Negotiation packet", QUIC_EV_CONN_LPKT);
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004416 goto err;
Amaury Denoyellea22d8602021-11-10 15:17:56 +01004417 }
4418
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004419 /* For Initial packets, and for servers (QUIC clients connections),
4420 * there is no Initial connection IDs storage.
4421 */
Amaury Denoyelle8efe0322021-12-15 16:32:56 +01004422 if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02004423 uint64_t token_len;
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02004424
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004425 if (!quic_dec_int(&token_len, (const unsigned char **)&buf, end) ||
4426 end - buf < token_len) {
Amaury Denoyelle8efe0322021-12-15 16:32:56 +01004427 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
4428 goto err;
Frédéric Lécaillea5da31d2021-12-14 19:44:14 +01004429 }
Amaury Denoyelle8efe0322021-12-15 16:32:56 +01004430
4431 /* XXX TO DO XXX 0 value means "the token is not present".
4432 * A server which sends an Initial packet must not set the token.
4433 * So, a client which receives an Initial packet with a token
4434 * MUST discard the packet or generate a connection error with
4435 * PROTOCOL_VIOLATION as type.
4436 * The token must be provided in a Retry packet or NEW_TOKEN frame.
4437 */
4438 pkt->token_len = token_len;
Amaury Denoyelleb76ae692022-01-11 14:16:37 +01004439
4440 /* TODO Retry should be automatically activated if
4441 * suspect network usage is detected.
4442 */
4443 if (!token_len && l->bind_conf->quic_force_retry) {
4444 TRACE_PROTO("Initial without token, sending retry", QUIC_EV_CONN_LPKT);
4445 if (send_retry(l->rx.fd, saddr, pkt)) {
4446 TRACE_PROTO("Error during Retry generation", QUIC_EV_CONN_LPKT);
4447 goto err;
4448 }
4449
4450 goto err;
4451 }
4452 else {
Amaury Denoyelle5ff1c972022-01-11 14:11:32 +01004453 pkt->token = buf;
4454 buf += pkt->token_len;
4455 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004456 }
Amaury Denoyelle8efe0322021-12-15 16:32:56 +01004457 else if (pkt->type != QUIC_PACKET_TYPE_0RTT) {
Amaury Denoyelled4962512021-12-14 17:17:28 +01004458 if (pkt->dcid.len != QUIC_HAP_CID_LEN) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004459 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
4460 goto err;
4461 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004462 }
4463
Frédéric Lécaille2c15a662021-12-22 20:39:12 +01004464 if (!quic_dec_int(&len, (const unsigned char **)&buf, end) ||
4465 end - buf < len) {
4466 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
4467 goto err;
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02004468 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004469
Frédéric Lécaille2c15a662021-12-22 20:39:12 +01004470 payload = buf;
4471 pkt->len = len + payload - beg;
4472
Amaury Denoyelled6b16672021-12-23 10:37:19 +01004473 qc = retrieve_qc_conn_from_cid(pkt, l, saddr);
Amaury Denoyelle8efe0322021-12-15 16:32:56 +01004474 if (!qc) {
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02004475 int ipv4;
4476 struct quic_cid *odcid;
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02004477 struct ebmb_node *n = NULL;
Frédéric Lécaille2fc76cf2021-08-31 19:10:40 +02004478 const unsigned char *salt = initial_salt_v1;
4479 size_t salt_len = sizeof initial_salt_v1;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02004480
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004481 if (pkt->type != QUIC_PACKET_TYPE_INITIAL) {
Amaury Denoyelle47e1f6d2021-12-17 10:58:05 +01004482 TRACE_PROTO("Non Initial packet", QUIC_EV_CONN_LPKT);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004483 goto err;
4484 }
4485
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004486 pkt->saddr = *saddr;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02004487 ipv4 = saddr->ss_family == AF_INET;
Amaury Denoyellec92cbfc2021-12-14 17:20:59 +01004488 qc = qc_new_conn(pkt->version, ipv4,
4489 pkt->dcid.data, pkt->dcid.len, pkt->dcid.addrlen,
Frédéric Lécaille6b197642021-07-06 16:25:08 +02004490 pkt->scid.data, pkt->scid.len, 1, l);
Frédéric Lécaille6de72872021-06-11 15:44:24 +02004491 if (qc == NULL)
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02004492 goto err;
4493
Amaury Denoyelle9fa15e52022-01-19 15:54:23 +01004494 memcpy(&qc->peer_addr, &pkt->saddr, sizeof(pkt->saddr));
4495
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02004496 odcid = &qc->rx.params.original_destination_connection_id;
4497 /* Copy the transport parameters. */
4498 qc->rx.params = l->bind_conf->quic_params;
Amaury Denoyelle5ff1c972022-01-11 14:11:32 +01004499
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02004500 /* Copy original_destination_connection_id transport parameter. */
Amaury Denoyelle5ff1c972022-01-11 14:11:32 +01004501 if (pkt->token_len) {
4502 if (parse_retry_token(pkt->token, pkt->token_len, odcid)) {
4503 TRACE_PROTO("Error during Initial token parsing", QUIC_EV_CONN_LPKT, qc);
4504 goto err;
4505 }
Amaury Denoyellec3b6f4d2022-01-11 12:03:09 +01004506 /* Copy retry_source_connection_id transport parameter. */
4507 quic_cid_cpy(&qc->rx.params.retry_source_connection_id,
4508 &pkt->dcid);
Amaury Denoyelle5ff1c972022-01-11 14:11:32 +01004509 }
4510 else {
4511 memcpy(odcid->data, &pkt->dcid.data, pkt->dcid.len);
4512 odcid->len = pkt->dcid.len;
4513 }
4514
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02004515 /* Copy the initial source connection ID. */
4516 quic_cid_cpy(&qc->rx.params.initial_source_connection_id, &qc->scid);
4517 qc->enc_params_len =
4518 quic_transport_params_encode(qc->enc_params,
4519 qc->enc_params + sizeof qc->enc_params,
4520 &qc->rx.params, 1);
4521 if (!qc->enc_params_len)
4522 goto err;
4523
Amaury Denoyelle33ac3462022-01-18 16:44:34 +01004524 if (qc_conn_alloc_ssl_ctx(qc))
4525 goto err;
4526
Frédéric Lécaille497fa782021-05-31 15:16:13 +02004527 /* NOTE: the socket address has been concatenated to the destination ID
4528 * chosen by the client for Initial packets.
4529 */
Frédéric Lécaille2fc76cf2021-08-31 19:10:40 +02004530 if (pkt->version == QUIC_PROTOCOL_VERSION_DRAFT_29) {
4531 salt = initial_salt_draft_29;
4532 salt_len = sizeof initial_salt_draft_29;
4533 }
4534 if (!qc_new_isecs(qc, salt, salt_len,
Amaury Denoyellec92cbfc2021-12-14 17:20:59 +01004535 pkt->dcid.data, pkt->dcid.len, 1)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004536 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, qc);
Frédéric Lécaille497fa782021-05-31 15:16:13 +02004537 goto err;
4538 }
4539
Frédéric Lécailleb0006ee2021-11-03 19:01:31 +01004540 HA_RWLOCK_WRLOCK(QUIC_LOCK, &l->rx.cids_lock);
Frédéric Lécaillef3d078d2021-06-14 14:18:10 +02004541 /* Insert the DCID the QUIC client has chosen (only for listeners) */
Amaury Denoyellec92cbfc2021-12-14 17:20:59 +01004542 n = ebmb_insert(&l->rx.odcids, &qc->odcid_node,
4543 qc->odcid.len + qc->odcid.addrlen);
Frédéric Lécaille8370c932021-11-08 17:01:46 +01004544
Amaury Denoyelleaff4ec82021-11-24 15:16:08 +01004545 /* If the insertion failed, it means that another
4546 * thread has already allocated a QUIC connection for
4547 * the same CID. Liberate our allocated connection.
4548 */
4549 if (unlikely(n != &qc->odcid_node)) {
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01004550 qc_to_purge = qc;
4551
Amaury Denoyelleaff4ec82021-11-24 15:16:08 +01004552 qc = ebmb_entry(n, struct quic_conn, odcid_node);
4553 pkt->qc = qc;
4554 }
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01004555
4556 quic_conn_take(qc);
4557 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &l->rx.cids_lock);
4558
4559 if (likely(!qc_to_purge)) {
Frédéric Lécaille8370c932021-11-08 17:01:46 +01004560 /* Enqueue this packet. */
Frédéric Lécaillef67b3562021-11-15 16:21:40 +01004561 pkt->qc = qc;
Frédéric Lécaille8370c932021-11-08 17:01:46 +01004562 }
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01004563 else {
4564 quic_conn_drop(qc_to_purge);
4565 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004566 }
4567 else {
Frédéric Lécaille8370c932021-11-08 17:01:46 +01004568 pkt->qc = qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004569 }
4570 }
4571 else {
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004572 if (end - buf < QUIC_HAP_CID_LEN) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004573 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
4574 goto err;
4575 }
4576
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004577 memcpy(pkt->dcid.data, buf, QUIC_HAP_CID_LEN);
Amaury Denoyelleadb22762021-12-14 15:04:14 +01004578 pkt->dcid.len = QUIC_HAP_CID_LEN;
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004579 buf += QUIC_HAP_CID_LEN;
4580
4581 /* A short packet is the last one of a UDP datagram. */
4582 payload = buf;
4583 pkt->len = end - beg;
Amaury Denoyelleadb22762021-12-14 15:04:14 +01004584
Amaury Denoyelled6b16672021-12-23 10:37:19 +01004585 qc = retrieve_qc_conn_from_cid(pkt, l, saddr);
Frédéric Lécaille4d118d62021-12-21 14:48:58 +01004586 if (!qc) {
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004587 size_t pktlen = end - buf;
Frédéric Lécaille4d118d62021-12-21 14:48:58 +01004588 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, NULL, pkt, &pktlen);
4589 goto err;
4590 }
4591
Frédéric Lécaille8370c932021-11-08 17:01:46 +01004592 pkt->qc = qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004593 }
4594
Amaury Denoyelleadb22762021-12-14 15:04:14 +01004595
4596 /* When multiple QUIC packets are coalesced on the same UDP datagram,
4597 * they must have the same DCID.
4598 *
4599 * This check must be done after the final update to pkt.len to
4600 * properly drop the packet on failure.
4601 */
4602 if (!dgram_ctx->dcid.len) {
4603 memcpy(dgram_ctx->dcid.data, pkt->dcid.data, pkt->dcid.len);
Frédéric Lécaille6b663152022-01-04 17:03:11 +01004604 if (!quic_peer_validated_addr(qc) &&
4605 HA_ATOMIC_LOAD(&qc->flags) & QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED) {
4606 TRACE_PROTO("PTO timer must be armed after anti-amplication was reached",
4607 QUIC_EV_CONN_LPKT, qc);
4608 /* Reset the anti-amplification bit. It will be set again
4609 * when sending the next packet if reached again.
4610 */
4611 HA_ATOMIC_BTR(&qc->flags, QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED_BIT);
4612 HA_ATOMIC_OR(&qc->flags, QUIC_FL_CONN_IO_CB_WAKEUP_BIT);
4613 io_cb_wakeup = 1;
4614 }
Amaury Denoyelleadb22762021-12-14 15:04:14 +01004615 }
4616 else if (memcmp(dgram_ctx->dcid.data, pkt->dcid.data, pkt->dcid.len)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004617 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004618 goto err;
4619 }
Amaury Denoyelleadb22762021-12-14 15:04:14 +01004620 dgram_ctx->qc = qc;
4621
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004622
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +01004623 if (HA_ATOMIC_LOAD(&qc->err_code)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004624 TRACE_PROTO("Connection error", QUIC_EV_CONN_LPKT, qc);
Frédéric Lécaille66cbb822021-11-17 11:56:21 +01004625 goto out;
4626 }
4627
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004628 pkt->raw_len = pkt->len;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004629 HA_RWLOCK_WRLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
Frédéric Lécailled61bc8d2021-12-02 14:46:19 +01004630 quic_rx_pkts_del(qc);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004631 b_cspace = b_contig_space(&qc->rx.buf);
4632 if (b_cspace < pkt->len) {
4633 /* Let us consume the remaining contiguous space. */
Frédéric Lécailled61bc8d2021-12-02 14:46:19 +01004634 if (b_cspace) {
4635 b_putchr(&qc->rx.buf, 0x00);
4636 b_cspace--;
4637 }
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004638 b_add(&qc->rx.buf, b_cspace);
4639 if (b_contig_space(&qc->rx.buf) < pkt->len) {
4640 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004641 TRACE_PROTO("Too big packet", QUIC_EV_CONN_LPKT, qc, pkt, &pkt->len);
Frédéric Lécaille91ac6c32021-12-17 16:11:54 +01004642 qc_list_all_rx_pkts(qc);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004643 goto err;
4644 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004645 }
4646
Amaury Denoyellee81fed92021-12-22 11:06:34 +01004647 if (!qc_try_rm_hp(qc, pkt, payload, beg, end, &qel)) {
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004648 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004649 TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, qc);
Frédéric Lécaille1a5e88c2021-05-31 18:04:07 +02004650 goto err;
4651 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004652
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004653 HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004654 TRACE_PROTO("New packet", QUIC_EV_CONN_LPKT, qc, pkt);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01004655 if (pkt->aad_len)
4656 qc_pkt_insert(pkt, qel);
Frédéric Lécaille66cbb822021-11-17 11:56:21 +01004657 out:
Frédéric Lécaille01abc462021-07-21 09:34:27 +02004658 /* Wake up the connection packet handler task from here only if all
4659 * the contexts have been initialized, especially the mux context
4660 * conn_ctx->conn->ctx. Note that this is ->start xprt callback which
4661 * will start it if these contexts for the connection are not already
4662 * initialized.
4663 */
Amaury Denoyelle7ca7c842021-12-22 18:20:38 +01004664 conn_ctx = HA_ATOMIC_LOAD(&qc->xprt_ctx);
Amaury Denoyellecfa2d562022-01-19 16:01:05 +01004665 if (conn_ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004666 tasklet_wakeup(conn_ctx->wait_event.tasklet);
Frédéric Lécailled24c2ec2021-05-31 10:24:49 +02004667
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004668 TRACE_LEAVE(QUIC_EV_CONN_LPKT, qc ? qc : NULL, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004669
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01004670 if (qc)
4671 quic_conn_drop(qc);
4672
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004673 return pkt->len;
4674
4675 err:
Frédéric Lécaille6b663152022-01-04 17:03:11 +01004676 /* Wakeup the I/O handler callback if the PTO timer must be armed.
4677 * This cannot be done by this thread.
4678 */
4679 if (io_cb_wakeup) {
4680 conn_ctx = HA_ATOMIC_LOAD(&qc->xprt_ctx);
4681 if (conn_ctx && conn_ctx->wait_event.tasklet)
4682 tasklet_wakeup(conn_ctx->wait_event.tasklet);
4683 }
Frédéric Lécaillef7ef9762021-12-31 16:37:58 +01004684 /* If length not found, consume the entire datagram */
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01004685 if (!pkt->len)
4686 pkt->len = end - beg;
Frédéric Lécaille6c1e36c2020-12-23 17:17:37 +01004687 TRACE_DEVEL("Leaving in error", QUIC_EV_CONN_LPKT,
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004688 qc ? qc : NULL, pkt);
Amaury Denoyelle76f47ca2021-12-23 10:02:50 +01004689
4690 if (qc)
4691 quic_conn_drop(qc);
4692
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004693 return -1;
4694}
4695
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01004696/* This function builds into <buf> buffer a QUIC long packet header.
4697 * Return 1 if enough room to build this header, 0 if not.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004698 */
4699static int quic_build_packet_long_header(unsigned char **buf, const unsigned char *end,
4700 int type, size_t pn_len, struct quic_conn *conn)
4701{
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01004702 if (end - *buf < sizeof conn->version + conn->dcid.len + conn->scid.len + 3)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004703 return 0;
4704
4705 /* #0 byte flags */
4706 *(*buf)++ = QUIC_PACKET_FIXED_BIT | QUIC_PACKET_LONG_HEADER_BIT |
4707 (type << QUIC_PACKET_TYPE_SHIFT) | (pn_len - 1);
4708 /* Version */
4709 quic_write_uint32(buf, end, conn->version);
4710 *(*buf)++ = conn->dcid.len;
4711 /* Destination connection ID */
4712 if (conn->dcid.len) {
4713 memcpy(*buf, conn->dcid.data, conn->dcid.len);
4714 *buf += conn->dcid.len;
4715 }
4716 /* Source connection ID */
4717 *(*buf)++ = conn->scid.len;
4718 if (conn->scid.len) {
4719 memcpy(*buf, conn->scid.data, conn->scid.len);
4720 *buf += conn->scid.len;
4721 }
4722
4723 return 1;
4724}
4725
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01004726/* This function builds into <buf> buffer a QUIC short packet header.
4727 * Return 1 if enough room to build this header, 0 if not.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004728 */
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01004729static int quic_build_packet_short_header(unsigned char **buf, const unsigned char *end,
4730 size_t pn_len, struct quic_conn *conn,
4731 unsigned char tls_flags)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004732{
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01004733 if (end - *buf < 1 + conn->dcid.len)
4734 return 0;
4735
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004736 /* #0 byte flags */
Frédéric Lécaillea7d2c092021-11-30 11:18:18 +01004737 *(*buf)++ = QUIC_PACKET_FIXED_BIT |
4738 ((tls_flags & QUIC_FL_TLS_KP_BIT_SET) ? QUIC_PACKET_KEY_PHASE_BIT : 0) | (pn_len - 1);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004739 /* Destination connection ID */
4740 if (conn->dcid.len) {
4741 memcpy(*buf, conn->dcid.data, conn->dcid.len);
4742 *buf += conn->dcid.len;
4743 }
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01004744
4745 return 1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004746}
4747
4748/* Apply QUIC header protection to the packet with <buf> as first byte address,
4749 * <pn> as address of the Packet number field, <pnlen> being this field length
4750 * with <aead> as AEAD cipher and <key> as secret key.
4751 * Returns 1 if succeeded or 0 if failed.
4752 */
4753static int quic_apply_header_protection(unsigned char *buf, unsigned char *pn, size_t pnlen,
4754 const EVP_CIPHER *aead, const unsigned char *key)
4755{
4756 int i, ret, outlen;
4757 EVP_CIPHER_CTX *ctx;
4758 /* We need an IV of at least 5 bytes: one byte for bytes #0
4759 * and at most 4 bytes for the packet number
4760 */
4761 unsigned char mask[5] = {0};
4762
4763 ret = 0;
4764 ctx = EVP_CIPHER_CTX_new();
4765 if (!ctx)
4766 return 0;
4767
4768 if (!EVP_EncryptInit_ex(ctx, aead, NULL, key, pn + QUIC_PACKET_PN_MAXLEN) ||
4769 !EVP_EncryptUpdate(ctx, mask, &outlen, mask, sizeof mask) ||
4770 !EVP_EncryptFinal_ex(ctx, mask, &outlen))
4771 goto out;
4772
4773 *buf ^= mask[0] & (*buf & QUIC_PACKET_LONG_HEADER_BIT ? 0xf : 0x1f);
4774 for (i = 0; i < pnlen; i++)
4775 pn[i] ^= mask[i + 1];
4776
4777 ret = 1;
4778
4779 out:
4780 EVP_CIPHER_CTX_free(ctx);
4781
4782 return ret;
4783}
4784
4785/* Reduce the encoded size of <ack_frm> ACK frame removing the last
4786 * ACK ranges if needed to a value below <limit> in bytes.
4787 * Return 1 if succeeded, 0 if not.
4788 */
4789static int quic_ack_frm_reduce_sz(struct quic_frame *ack_frm, size_t limit)
4790{
4791 size_t room, ack_delay_sz;
4792
4793 ack_delay_sz = quic_int_getsize(ack_frm->tx_ack.ack_delay);
4794 /* A frame is made of 1 byte for the frame type. */
4795 room = limit - ack_delay_sz - 1;
Frédéric Lécaille8090b512020-11-30 16:19:22 +01004796 if (!quic_rm_last_ack_ranges(ack_frm->tx_ack.arngs, room))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004797 return 0;
4798
Frédéric Lécaille8090b512020-11-30 16:19:22 +01004799 return 1 + ack_delay_sz + ack_frm->tx_ack.arngs->enc_sz;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004800}
4801
Frédéric Lécaille9445abc2021-08-04 10:49:51 +02004802/* Prepare as most as possible CRYPTO or STREAM frames from their prebuilt frames
4803 * for <qel> encryption level to be encoded in a buffer with <room> as available room,
Frédéric Lécailleea604992020-12-24 13:01:37 +01004804 * and <*len> the packet Length field initialized with the number of bytes already present
4805 * in this buffer which must be taken into an account for the Length packet field value.
Frédéric Lécaille9445abc2021-08-04 10:49:51 +02004806 * <headlen> is the number of bytes already present in this packet before building frames.
4807 *
Frédéric Lécaille9445abc2021-08-04 10:49:51 +02004808 * Update consequently <*len> to reflect the size of these frames built
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01004809 * by this function. Also attach these frames to <l> frame list.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004810 * Return 1 if succeeded, 0 if not.
4811 */
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01004812static inline int qc_build_frms(struct list *l,
Frédéric Lécaille9445abc2021-08-04 10:49:51 +02004813 size_t room, size_t *len, size_t headlen,
4814 struct quic_enc_level *qel,
Amaury Denoyelle17a74162021-12-21 14:45:39 +01004815 struct quic_conn *qc)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004816{
Frédéric Lécailleea604992020-12-24 13:01:37 +01004817 int ret;
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01004818 struct quic_frame *cf, *cfbak;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004819
Frédéric Lécailleea604992020-12-24 13:01:37 +01004820 ret = 0;
Frédéric Lécaillef4e5a7c2022-01-17 17:56:20 +01004821 if (*len > room)
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02004822 return 0;
4823
Frédéric Lécailleea604992020-12-24 13:01:37 +01004824 /* If we are not probing we must take into an account the congestion
4825 * control window.
4826 */
Frédéric Lécaillef4e5a7c2022-01-17 17:56:20 +01004827 if (!qel->pktns->tx.pto_probe) {
4828 size_t remain = quic_path_prep_data(qc->path);
4829
4830 if (headlen > remain)
4831 return 0;
4832
4833 room = QUIC_MIN(room, remain - headlen);
4834 }
4835
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004836 TRACE_PROTO("************** frames build (headlen)",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004837 QUIC_EV_CONN_BCFRMS, qc, &headlen);
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01004838 list_for_each_entry_safe(cf, cfbak, &qel->pktns->tx.frms, list) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004839 /* header length, data length, frame length. */
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004840 size_t hlen, dlen, dlen_sz, avail_room, flen;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004841
Frédéric Lécailleea604992020-12-24 13:01:37 +01004842 if (!room)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004843 break;
4844
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004845 switch (cf->type) {
4846 case QUIC_FT_CRYPTO:
4847 TRACE_PROTO(" New CRYPTO frame build (room, len)",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004848 QUIC_EV_CONN_BCFRMS, qc, &room, len);
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004849 /* Compute the length of this CRYPTO frame header */
4850 hlen = 1 + quic_int_getsize(cf->crypto.offset);
4851 /* Compute the data length of this CRyPTO frame. */
4852 dlen = max_stream_data_size(room, *len + hlen, cf->crypto.len);
4853 TRACE_PROTO(" CRYPTO data length (hlen, crypto.len, dlen)",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004854 QUIC_EV_CONN_BCFRMS, qc, &hlen, &cf->crypto.len, &dlen);
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004855 if (!dlen)
4856 break;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004857
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004858 /* CRYPTO frame length. */
4859 flen = hlen + quic_int_getsize(dlen) + dlen;
4860 TRACE_PROTO(" CRYPTO frame length (flen)",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004861 QUIC_EV_CONN_BCFRMS, qc, &flen);
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004862 /* Add the CRYPTO data length and its encoded length to the packet
4863 * length and the length of this length.
4864 */
4865 *len += flen;
4866 room -= flen;
4867 if (dlen == cf->crypto.len) {
4868 /* <cf> CRYPTO data have been consumed. */
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01004869 LIST_DELETE(&cf->list);
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01004870 LIST_APPEND(l, &cf->list);
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004871 }
4872 else {
4873 struct quic_frame *new_cf;
4874
4875 new_cf = pool_alloc(pool_head_quic_frame);
4876 if (!new_cf) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004877 TRACE_PROTO("No memory for new crypto frame", QUIC_EV_CONN_BCFRMS, qc);
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004878 return 0;
4879 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004880
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004881 new_cf->type = QUIC_FT_CRYPTO;
4882 new_cf->crypto.len = dlen;
4883 new_cf->crypto.offset = cf->crypto.offset;
4884 new_cf->crypto.qel = qel;
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01004885 LIST_APPEND(l, &new_cf->list);
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004886 /* Consume <dlen> bytes of the current frame. */
4887 cf->crypto.len -= dlen;
4888 cf->crypto.offset += dlen;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004889 }
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004890 break;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004891
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004892 case QUIC_FT_STREAM_8 ... QUIC_FT_STREAM_F:
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004893 /* Note that these frames are accepted in short packets only without
4894 * "Length" packet field. Here, <*len> is used only to compute the
4895 * sum of the lengths of the already built frames for this packet.
Frédéric Lécailled8b84432021-12-10 15:18:36 +01004896 *
4897 * Compute the length of this STREAM frame "header" made a all the field
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004898 * excepting the variable ones. Note that +1 is for the type of this frame.
4899 */
4900 hlen = 1 + quic_int_getsize(cf->stream.id) +
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02004901 ((cf->type & QUIC_STREAM_FRAME_TYPE_OFF_BIT) ? quic_int_getsize(cf->stream.offset.key) : 0);
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004902 /* Compute the data length of this STREAM frame. */
4903 avail_room = room - hlen - *len;
4904 if ((ssize_t)avail_room <= 0)
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02004905 break;
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004906
Frédéric Lécailled8b84432021-12-10 15:18:36 +01004907 TRACE_PROTO(" New STREAM frame build (room, len)",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004908 QUIC_EV_CONN_BCFRMS, qc, &room, len);
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004909 if (cf->type & QUIC_STREAM_FRAME_TYPE_LEN_BIT) {
4910 dlen = max_available_room(avail_room, &dlen_sz);
4911 if (dlen > cf->stream.len) {
4912 dlen = cf->stream.len;
4913 }
4914 dlen_sz = quic_int_getsize(dlen);
4915 flen = hlen + dlen_sz + dlen;
4916 }
4917 else {
4918 dlen = QUIC_MIN(avail_room, cf->stream.len);
4919 flen = hlen + dlen;
4920 }
4921 TRACE_PROTO(" STREAM data length (hlen, stream.len, dlen)",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004922 QUIC_EV_CONN_BCFRMS, qc, &hlen, &cf->stream.len, &dlen);
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004923 TRACE_PROTO(" STREAM frame length (flen)",
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004924 QUIC_EV_CONN_BCFRMS, qc, &flen);
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004925 /* Add the STREAM data length and its encoded length to the packet
4926 * length and the length of this length.
4927 */
4928 *len += flen;
4929 room -= flen;
4930 if (dlen == cf->stream.len) {
4931 /* <cf> STREAM data have been consumed. */
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01004932 LIST_DELETE(&cf->list);
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01004933 LIST_APPEND(l, &cf->list);
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004934 }
4935 else {
4936 struct quic_frame *new_cf;
4937
4938 new_cf = pool_zalloc(pool_head_quic_frame);
4939 if (!new_cf) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01004940 TRACE_PROTO("No memory for new STREAM frame", QUIC_EV_CONN_BCFRMS, qc);
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004941 return 0;
4942 }
4943
4944 new_cf->type = cf->type;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02004945 new_cf->stream.qcs = cf->stream.qcs;
4946 new_cf->stream.buf = cf->stream.buf;
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004947 new_cf->stream.id = cf->stream.id;
4948 if (cf->type & QUIC_STREAM_FRAME_TYPE_OFF_BIT)
4949 new_cf->stream.offset = cf->stream.offset;
4950 new_cf->stream.len = dlen;
4951 new_cf->type |= QUIC_STREAM_FRAME_TYPE_LEN_BIT;
4952 /* FIN bit reset */
4953 new_cf->type &= ~QUIC_STREAM_FRAME_TYPE_FIN_BIT;
4954 new_cf->stream.data = cf->stream.data;
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01004955 LIST_APPEND(l, &new_cf->list);
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004956 cf->type |= QUIC_STREAM_FRAME_TYPE_OFF_BIT;
4957 /* Consume <dlen> bytes of the current frame. */
4958 cf->stream.len -= dlen;
Frédéric Lécaille785d3bd2021-09-10 09:13:39 +02004959 cf->stream.offset.key += dlen;
Frédéric Lécaillea5b1b892021-08-25 17:56:22 +02004960 cf->stream.data += dlen;
4961 }
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004962 break;
4963
4964 default:
4965 flen = qc_frm_len(cf);
4966 BUG_ON(!flen);
4967 if (flen > room)
4968 continue;
4969
4970 *len += flen;
4971 room -= flen;
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01004972 LIST_DELETE(&cf->list);
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01004973 LIST_APPEND(l, &cf->list);
Frédéric Lécaille0ac38512021-08-03 16:38:49 +02004974 break;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004975 }
Frédéric Lécailleea604992020-12-24 13:01:37 +01004976 ret = 1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004977 }
4978
Frédéric Lécailleea604992020-12-24 13:01:37 +01004979 return ret;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004980}
4981
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02004982/* This function builds a clear packet from <pkt> information (its type)
Frédéric Lécaille9445abc2021-08-04 10:49:51 +02004983 * into a buffer with <pos> as position pointer and <qel> as QUIC TLS encryption
4984 * level for <conn> QUIC connection and <qel> as QUIC TLS encryption level,
4985 * filling the buffer with as much frames as possible.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004986 * The trailing QUIC_TLS_TAG_LEN bytes of this packet are not built. But they are
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02004987 * reserved so that to ensure there is enough room to build this AEAD TAG after
Frédéric Lécaille9445abc2021-08-04 10:49:51 +02004988 * having returned from this function.
4989 * This function also updates the value of <buf_pn> pointer to point to the packet
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004990 * number field in this packet. <pn_len> will also have the packet number
4991 * length as value.
4992 *
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01004993 * Return 1 if succeeded (enough room to buile this packet), O if not.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01004994 */
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01004995static int qc_do_build_pkt(unsigned char *pos, const unsigned char *end,
4996 size_t dglen, struct quic_tx_packet *pkt,
4997 int64_t pn, size_t *pn_len, unsigned char **buf_pn,
Frédéric Lécaillece6602d2022-01-17 11:06:10 +01004998 int ack, int padding, int cc, int probe,
Amaury Denoyelle17a74162021-12-21 14:45:39 +01004999 struct quic_enc_level *qel, struct quic_conn *qc)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005000{
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005001 unsigned char *beg;
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01005002 size_t len, len_sz, len_frms, padding_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005003 struct quic_frame frm = { .type = QUIC_FT_CRYPTO, };
5004 struct quic_frame ack_frm = { .type = QUIC_FT_ACK, };
Frédéric Lécaille66cbb822021-11-17 11:56:21 +01005005 struct quic_frame cc_frm = { . type = QUIC_FT_CONNECTION_CLOSE, };
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01005006 size_t ack_frm_len, head_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005007 int64_t largest_acked_pn;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01005008 int add_ping_frm;
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01005009 struct list frm_list = LIST_HEAD_INIT(frm_list);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005010
Frédéric Lécailleea604992020-12-24 13:01:37 +01005011 /* Length field value with CRYPTO frames if present. */
5012 len_frms = 0;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005013 beg = pos;
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +01005014 /* When not probing and not acking, and no immediate close is required,
5015 * reduce the size of this buffer to respect
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02005016 * the congestion controller window. So, we do not limit the size of this
5017 * packet if we have an ACK frame to send because an ACK frame is not
5018 * ack-eliciting. This size will be limited if we have ack-eliciting
5019 * frames to send from qel->pktns->tx.frms.
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01005020 */
Frédéric Lécaillece6602d2022-01-17 11:06:10 +01005021 if (!probe && !ack && !cc) {
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01005022 size_t path_room;
5023
Amaury Denoyelle17a74162021-12-21 14:45:39 +01005024 path_room = quic_path_prep_data(qc->path);
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01005025 if (end - beg > path_room)
5026 end = beg + path_room;
5027 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005028
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005029 /* Ensure there is enough room for the TLS encryption tag and a zero token
5030 * length field if any.
5031 */
5032 if (end - pos < QUIC_TLS_TAG_LEN +
5033 (pkt->type == QUIC_PACKET_TYPE_INITIAL ? 1 : 0))
5034 goto no_room;
5035
5036 end -= QUIC_TLS_TAG_LEN;
Frédéric Lécaillee1aa0d32021-08-03 16:03:09 +02005037 largest_acked_pn = HA_ATOMIC_LOAD(&qel->pktns->tx.largest_acked_pn);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005038 /* packet number length */
5039 *pn_len = quic_packet_number_length(pn, largest_acked_pn);
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02005040 /* Build the header */
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005041 if ((pkt->type == QUIC_PACKET_TYPE_SHORT &&
Amaury Denoyelle17a74162021-12-21 14:45:39 +01005042 !quic_build_packet_short_header(&pos, end, *pn_len, qc, qel->tls_ctx.flags)) ||
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005043 (pkt->type != QUIC_PACKET_TYPE_SHORT &&
Amaury Denoyelle17a74162021-12-21 14:45:39 +01005044 !quic_build_packet_long_header(&pos, end, pkt->type, *pn_len, qc)))
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005045 goto no_room;
5046
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02005047 /* XXX FIXME XXX Encode the token length (0) for an Initial packet. */
5048 if (pkt->type == QUIC_PACKET_TYPE_INITIAL)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005049 *pos++ = 0;
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01005050 head_len = pos - beg;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005051 /* Build an ACK frame if required. */
5052 ack_frm_len = 0;
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +01005053 if (!cc && ack && !eb_is_empty(&qel->pktns->rx.arngs.root)) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005054 ack_frm.tx_ack.ack_delay = 0;
Frédéric Lécaille8090b512020-11-30 16:19:22 +01005055 ack_frm.tx_ack.arngs = &qel->pktns->rx.arngs;
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02005056 /* XXX BE CAREFUL XXX : here we reserved at least one byte for the
5057 * smallest frame (PING) and <*pn_len> more for the packet number. Note
5058 * that from here, we do not know if we will have to send a PING frame.
5059 * This will be decided after having computed the ack-eliciting frames
5060 * to be added to this packet.
5061 */
5062 ack_frm_len = quic_ack_frm_reduce_sz(&ack_frm, end - 1 - *pn_len - pos);
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005063 if (!ack_frm_len)
5064 goto no_room;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005065 }
5066
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02005067 /* Length field value without the ack-eliciting frames. */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005068 len = ack_frm_len + *pn_len;
Frédéric Lécaille1fc5e162021-11-22 14:25:57 +01005069 len_frms = 0;
Frédéric Lécaille82468ea2022-01-14 20:23:22 +01005070 if (!cc && !LIST_ISEMPTY(&qel->pktns->tx.frms)) {
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01005071 ssize_t room = end - pos;
Frédéric Lécailleea604992020-12-24 13:01:37 +01005072
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02005073 /* Initialize the length of the frames built below to <len>.
5074 * If any frame could be successfully built by qc_build_frms(),
5075 * we will have len_frms > len.
5076 */
5077 len_frms = len;
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01005078 if (!qc_build_frms(&frm_list, end - pos, &len_frms, pos - beg, qel, qc)) {
Frédéric Lécailleea604992020-12-24 13:01:37 +01005079 TRACE_PROTO("Not enough room", QUIC_EV_CONN_HPKT,
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01005080 qc, NULL, NULL, &room);
Amaury Denoyelle17a74162021-12-21 14:45:39 +01005081 }
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01005082 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005083
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01005084 /* Length (of the remaining data). Must not fail because, the buffer size
5085 * has been checked above. Note that we have reserved QUIC_TLS_TAG_LEN bytes
5086 * for the encryption tag. It must be taken into an account for the length
5087 * of this packet.
5088 */
5089 if (len_frms)
5090 len = len_frms + QUIC_TLS_TAG_LEN;
5091 else
5092 len += QUIC_TLS_TAG_LEN;
Frédéric Lécaille66cbb822021-11-17 11:56:21 +01005093 /* CONNECTION_CLOSE frame */
5094 if (cc) {
5095 struct quic_connection_close *cc = &cc_frm.connection_close;
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01005096
Amaury Denoyelle17a74162021-12-21 14:45:39 +01005097 cc->error_code = qc->err_code;
Frédéric Lécaille66cbb822021-11-17 11:56:21 +01005098 len += qc_frm_len(&cc_frm);
5099 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005100 add_ping_frm = 0;
5101 padding_len = 0;
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01005102 len_sz = quic_int_getsize(len);
5103 /* Add this packet size to <dglen> */
5104 dglen += head_len + len_sz + len;
5105 if (padding && dglen < QUIC_INITIAL_PACKET_MINLEN) {
5106 /* This is a maximum padding size */
5107 padding_len = QUIC_INITIAL_PACKET_MINLEN - dglen;
5108 /* The length field value is of this packet is <len> + <padding_len>
5109 * the size of which may be greater than the initial computed size
Ilya Shipitsin5e87bcf2021-12-25 11:45:52 +05005110 * <len_sz>. So, let's deduce the difference between these to packet
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01005111 * sizes from <padding_len>.
5112 */
5113 padding_len -= quic_int_getsize(len + padding_len) - len_sz;
5114 len += padding_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005115 }
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01005116 else if (LIST_ISEMPTY(&frm_list) || len_frms == len) {
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005117 if (qel->pktns->tx.pto_probe) {
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02005118 /* If we cannot send a frame, we send a PING frame. */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005119 add_ping_frm = 1;
5120 len += 1;
5121 }
5122 /* If there is no frame at all to follow, add at least a PADDING frame. */
Frédéric Lécaille66cbb822021-11-17 11:56:21 +01005123 if (!ack_frm_len && !cc)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005124 len += padding_len = QUIC_PACKET_PN_MAXLEN - *pn_len;
5125 }
5126
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005127 if (pkt->type != QUIC_PACKET_TYPE_SHORT && !quic_enc_int(&pos, end, len))
5128 goto no_room;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005129
5130 /* Packet number field address. */
5131 *buf_pn = pos;
5132
5133 /* Packet number encoding. */
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005134 if (!quic_packet_number_encode(&pos, end, pn, *pn_len))
5135 goto no_room;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005136
Amaury Denoyelle17a74162021-12-21 14:45:39 +01005137 if (ack_frm_len && !qc_build_frm(&pos, end, &ack_frm, pkt, qc))
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005138 goto no_room;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005139
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02005140 /* Ack-eliciting frames */
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01005141 if (!LIST_ISEMPTY(&frm_list)) {
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02005142 struct quic_frame *cf;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005143
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01005144 list_for_each_entry(cf, &frm_list, list) {
Amaury Denoyelle17a74162021-12-21 14:45:39 +01005145 if (!qc_build_frm(&pos, end, cf, pkt, qc)) {
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01005146 ssize_t room = end - pos;
5147 TRACE_PROTO("Not enough room", QUIC_EV_CONN_HPKT,
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01005148 qc, NULL, NULL, &room);
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005149 break;
Frédéric Lécaille133e8a72020-12-18 09:33:27 +01005150 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005151 }
5152 }
5153
5154 /* Build a PING frame if needed. */
5155 if (add_ping_frm) {
5156 frm.type = QUIC_FT_PING;
Amaury Denoyelle17a74162021-12-21 14:45:39 +01005157 if (!qc_build_frm(&pos, end, &frm, pkt, qc))
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005158 goto no_room;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005159 }
5160
Frédéric Lécaille66cbb822021-11-17 11:56:21 +01005161 /* Build a CONNECTION_CLOSE frame if needed. */
Amaury Denoyelle17a74162021-12-21 14:45:39 +01005162 if (cc && !qc_build_frm(&pos, end, &cc_frm, pkt, qc))
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005163 goto no_room;
Frédéric Lécaille66cbb822021-11-17 11:56:21 +01005164
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005165 /* Build a PADDING frame if needed. */
5166 if (padding_len) {
5167 frm.type = QUIC_FT_PADDING;
5168 frm.padding.len = padding_len;
Amaury Denoyelle17a74162021-12-21 14:45:39 +01005169 if (!qc_build_frm(&pos, end, &frm, pkt, qc))
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005170 goto no_room;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005171 }
5172
Frédéric Lécaille466e9da2021-12-29 12:04:13 +01005173 /* If this packet is ack-eliciting and we are probing let's
5174 * decrement the PTO probe counter.
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01005175 */
Frédéric Lécaille466e9da2021-12-29 12:04:13 +01005176 if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING &&
5177 qel->pktns->tx.pto_probe)
5178 qel->pktns->tx.pto_probe--;
5179
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005180 pkt->len = pos - beg;
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01005181 LIST_SPLICE(&pkt->frms, &frm_list);
5182 TRACE_PROTO("Ack eliciting frame", QUIC_EV_CONN_HPKT, qc, pkt);
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005183
5184 return 1;
5185
5186 no_room:
Frédéric Lécaillecba4cd42022-01-14 20:39:18 +01005187 /* Replace the pre-built frames which could not be add to this packet */
5188 LIST_SPLICE(&qel->pktns->tx.frms, &frm_list);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01005189 TRACE_PROTO("Not enough room", QUIC_EV_CONN_HPKT, qc);
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005190 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005191}
5192
Frédéric Lécaille0e50e1b2021-08-03 15:03:35 +02005193static inline void quic_tx_packet_init(struct quic_tx_packet *pkt, int type)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005194{
Frédéric Lécaille0e50e1b2021-08-03 15:03:35 +02005195 pkt->type = type;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005196 pkt->len = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005197 pkt->in_flight_len = 0;
Frédéric Lécaille0371cd52021-12-13 12:30:54 +01005198 pkt->pn_node.key = (uint64_t)-1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005199 LIST_INIT(&pkt->frms);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005200 pkt->next = NULL;
5201 pkt->refcnt = 1;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005202}
5203
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05005204/* Free <pkt> TX packet which has not already attached to any tree. */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005205static inline void free_quic_tx_packet(struct quic_tx_packet *pkt)
5206{
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02005207 struct quic_frame *frm, *frmbak;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005208
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005209 if (!pkt)
5210 return;
5211
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005212 list_for_each_entry_safe(frm, frmbak, &pkt->frms, list) {
Willy Tarreau2b718102021-04-21 07:32:39 +02005213 LIST_DELETE(&frm->list);
Frédéric Lécaille0ad04582021-07-27 14:51:54 +02005214 pool_free(pool_head_quic_frame, frm);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005215 }
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005216 quic_tx_packet_refdec(pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005217}
5218
Frédéric Lécaille9445abc2021-08-04 10:49:51 +02005219/* Build a packet into <buf> packet buffer with <pkt_type> as packet
5220 * type for <qc> QUIC connection from <qel> encryption level.
5221 * Return -2 if the packet could not be allocated or encrypted for any reason,
5222 * -1 if there was not enough room to build a packet.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005223 */
Frédéric Lécaille9445abc2021-08-04 10:49:51 +02005224static struct quic_tx_packet *qc_build_pkt(unsigned char **pos,
5225 const unsigned char *buf_end,
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02005226 struct quic_enc_level *qel,
Frédéric Lécaille28f51fa2021-11-09 14:12:12 +01005227 struct quic_conn *qc, size_t dglen, int padding,
Frédéric Lécaillece6602d2022-01-17 11:06:10 +01005228 int pkt_type, int ack, int probe, int cc, int *err)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005229{
5230 /* The pointer to the packet number field. */
5231 unsigned char *buf_pn;
5232 unsigned char *beg, *end, *payload;
5233 int64_t pn;
5234 size_t pn_len, payload_len, aad_len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005235 struct quic_tls_ctx *tls_ctx;
5236 struct quic_tx_packet *pkt;
5237
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01005238 TRACE_ENTER(QUIC_EV_CONN_HPKT, qc, NULL, qel);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005239 *err = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005240 pkt = pool_alloc(pool_head_quic_tx_packet);
5241 if (!pkt) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01005242 TRACE_DEVEL("Not enough memory for a new packet", QUIC_EV_CONN_HPKT, qc);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005243 *err = -2;
5244 goto err;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005245 }
5246
Frédéric Lécaille0e50e1b2021-08-03 15:03:35 +02005247 quic_tx_packet_init(pkt, pkt_type);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005248 beg = *pos;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005249 pn_len = 0;
5250 buf_pn = NULL;
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005251
5252 pn = qel->pktns->tx.next_pn + 1;
5253 if (!qc_do_build_pkt(*pos, buf_end, dglen, pkt, pn, &pn_len, &buf_pn,
Frédéric Lécaillece6602d2022-01-17 11:06:10 +01005254 ack, padding, cc, probe, qel, qc)) {
Frédéric Lécaille4436cb62021-08-16 12:06:46 +02005255 *err = -1;
5256 goto err;
5257 }
5258
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005259 end = beg + pkt->len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005260 payload = buf_pn + pn_len;
5261 payload_len = end - payload;
5262 aad_len = payload - beg;
5263
5264 tls_ctx = &qel->tls_ctx;
Frédéric Lécaille5f7f1182022-01-10 11:00:16 +01005265 if (!quic_packet_encrypt(payload, payload_len, beg, aad_len, pn, tls_ctx, qc)) {
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005266 *err = -2;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005267 goto err;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005268 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005269
5270 end += QUIC_TLS_TAG_LEN;
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005271 pkt->len += QUIC_TLS_TAG_LEN;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005272 if (!quic_apply_header_protection(beg, buf_pn, pn_len,
5273 tls_ctx->tx.hp, tls_ctx->tx.hp_key)) {
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01005274 TRACE_DEVEL("Could not apply the header protection", QUIC_EV_CONN_HPKT, qc);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005275 *err = -2;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005276 goto err;
5277 }
5278
Frédéric Lécaille73dcc6e2021-12-07 15:27:44 +01005279 /* Consume a packet number */
5280 qel->pktns->tx.next_pn++;
Frédéric Lécailleca98a7f2021-11-10 17:30:15 +01005281 qc->tx.prep_bytes += pkt->len;
Frédéric Lécaille41a07602022-01-04 16:57:37 +01005282 if (qc->tx.prep_bytes >= 3 * qc->rx.bytes)
5283 HA_ATOMIC_OR(&qc->flags, QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005284 /* Now that a correct packet is built, let us consume <*pos> buffer. */
5285 *pos = end;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005286 /* Attach the built packet to its tree. */
Frédéric Lécaillea7348f62021-08-03 16:50:14 +02005287 pkt->pn_node.key = pn;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005288 /* Set the packet in fligth length for in flight packet only. */
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01005289 if (pkt->flags & QUIC_FL_TX_PACKET_IN_FLIGHT) {
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005290 pkt->in_flight_len = pkt->len;
5291 qc->path->prep_in_flight += pkt->len;
Frédéric Lécaille04ffb662020-12-08 15:58:39 +01005292 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005293 pkt->pktns = qel->pktns;
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01005294 TRACE_LEAVE(QUIC_EV_CONN_HPKT, qc, pkt);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005295
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005296 return pkt;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005297
5298 err:
5299 free_quic_tx_packet(pkt);
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01005300 TRACE_DEVEL("leaving in error", QUIC_EV_CONN_HPKT, qc);
Frédéric Lécaillec5b0c932021-07-06 16:35:52 +02005301 return NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005302}
5303
Frédéric Lécaillefbe3b772021-03-03 16:23:44 +01005304/* Copy up to <count> bytes from connection <conn> internal stream storage into buffer <buf>.
5305 * Return the number of bytes which have been copied.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005306 */
Frédéric Lécaillefbe3b772021-03-03 16:23:44 +01005307static size_t quic_conn_to_buf(struct connection *conn, void *xprt_ctx,
5308 struct buffer *buf, size_t count, int flags)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005309{
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005310 size_t try, done = 0;
5311
5312 if (!conn_ctrl_ready(conn))
5313 return 0;
5314
5315 if (!fd_recv_ready(conn->handle.fd))
5316 return 0;
5317
5318 conn->flags &= ~CO_FL_WAIT_ROOM;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005319
5320 /* read the largest possible block. For this, we perform only one call
5321 * to recv() unless the buffer wraps and we exactly fill the first hunk,
Frédéric Lécaillefbe3b772021-03-03 16:23:44 +01005322 * in which case we accept to do it once again.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005323 */
5324 while (count > 0) {
5325 try = b_contig_space(buf);
5326 if (!try)
5327 break;
5328
5329 if (try > count)
5330 try = count;
5331
Frédéric Lécaillefbe3b772021-03-03 16:23:44 +01005332 b_add(buf, try);
5333 done += try;
5334 count -= try;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005335 }
5336
5337 if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done)
5338 conn->flags &= ~CO_FL_WAIT_L4_CONN;
5339
5340 leave:
5341 return done;
5342
5343 read0:
5344 conn_sock_read0(conn);
5345 conn->flags &= ~CO_FL_WAIT_L4_CONN;
5346
5347 /* Now a final check for a possible asynchronous low-level error
5348 * report. This can happen when a connection receives a reset
5349 * after a shutdown, both POLL_HUP and POLL_ERR are queued, and
5350 * we might have come from there by just checking POLL_HUP instead
5351 * of recv()'s return value 0, so we have no way to tell there was
5352 * an error without checking.
5353 */
Willy Tarreauf5090652021-04-06 17:23:40 +02005354 if (unlikely(fdtab[conn->handle.fd].state & FD_POLL_ERR))
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005355 conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
5356 goto leave;
5357}
5358
5359
5360/* Send up to <count> pending bytes from buffer <buf> to connection <conn>'s
5361 * socket. <flags> may contain some CO_SFL_* flags to hint the system about
5362 * other pending data for example, but this flag is ignored at the moment.
5363 * Only one call to send() is performed, unless the buffer wraps, in which case
5364 * a second call may be performed. The connection's flags are updated with
5365 * whatever special event is detected (error, empty). The caller is responsible
5366 * for taking care of those events and avoiding the call if inappropriate. The
5367 * function does not call the connection's polling update function, so the caller
5368 * is responsible for this. It's up to the caller to update the buffer's contents
5369 * based on the return value.
5370 */
5371static size_t quic_conn_from_buf(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags)
5372{
5373 ssize_t ret;
5374 size_t try, done;
5375 int send_flag;
Amaury Denoyelle9fa15e52022-01-19 15:54:23 +01005376 struct quic_conn *qc = ((struct ssl_sock_ctx *)xprt_ctx)->qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005377
5378 done = 0;
5379 /* send the largest possible block. For this we perform only one call
5380 * to send() unless the buffer wraps and we exactly fill the first hunk,
5381 * in which case we accept to do it once again.
5382 */
5383 while (count) {
5384 try = b_contig_data(buf, done);
5385 if (try > count)
5386 try = count;
5387
5388 send_flag = MSG_DONTWAIT | MSG_NOSIGNAL;
5389 if (try < count || flags & CO_SFL_MSG_MORE)
5390 send_flag |= MSG_MORE;
5391
Amaury Denoyelle9fa15e52022-01-19 15:54:23 +01005392 ret = sendto(qc->li->rx.fd, b_peek(buf, done), try, send_flag,
5393 (struct sockaddr *)&qc->peer_addr, get_addr_len(&qc->peer_addr));
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005394 if (ret > 0) {
5395 count -= ret;
5396 done += ret;
5397
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005398 if (ret < try)
5399 break;
5400 }
5401 else if (ret == 0 || errno == EAGAIN || errno == ENOTCONN || errno == EINPROGRESS) {
Amaury Denoyelle9fa15e52022-01-19 15:54:23 +01005402 ABORT_NOW();
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005403 }
5404 else if (errno != EINTR) {
Amaury Denoyelle9fa15e52022-01-19 15:54:23 +01005405 ABORT_NOW();
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005406 }
5407 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005408
5409 if (done > 0) {
5410 /* we count the total bytes sent, and the send rate for 32-byte
5411 * blocks. The reason for the latter is that freq_ctr are
5412 * limited to 4GB and that it's not enough per second.
5413 */
5414 _HA_ATOMIC_ADD(&global.out_bytes, done);
5415 update_freq_ctr(&global.out_32bps, (done + 16) / 32);
5416 }
5417 return done;
5418}
5419
Frédéric Lécaille422a39c2021-03-03 17:28:34 +01005420/* Called from the upper layer, to subscribe <es> to events <event_type>. The
5421 * event subscriber <es> is not allowed to change from a previous call as long
5422 * as at least one event is still subscribed. The <event_type> must only be a
5423 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
5424 */
5425static int quic_conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
5426{
Frédéric Lécaille513b4f22021-09-20 15:23:17 +02005427 struct qcc *qcc = conn->qc->qcc;
5428
5429 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
5430 BUG_ON(qcc->subs && qcc->subs != es);
5431
5432 es->events |= event_type;
5433 qcc->subs = es;
5434
5435 if (event_type & SUB_RETRY_RECV)
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01005436 TRACE_DEVEL("subscribe(recv)", QUIC_EV_CONN_XPRTRECV, conn->qc, qcc);
Frédéric Lécaille513b4f22021-09-20 15:23:17 +02005437
5438 if (event_type & SUB_RETRY_SEND)
Amaury Denoyelle7aaeb5b2021-12-21 14:29:15 +01005439 TRACE_DEVEL("subscribe(send)", QUIC_EV_CONN_XPRTSEND, conn->qc, qcc);
Frédéric Lécaille513b4f22021-09-20 15:23:17 +02005440
5441 return 0;
Frédéric Lécaille422a39c2021-03-03 17:28:34 +01005442}
5443
5444/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
5445 * The <es> pointer is not allowed to differ from the one passed to the
5446 * subscribe() call. It always returns zero.
5447 */
5448static int quic_conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
5449{
5450 return conn_unsubscribe(conn, xprt_ctx, event_type, es);
5451}
5452
Amaury Denoyelle33ac3462022-01-18 16:44:34 +01005453/* Store in <xprt_ctx> the context attached to <conn>.
5454 * Returns always 0.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005455 */
5456static int qc_conn_init(struct connection *conn, void **xprt_ctx)
5457{
Amaury Denoyelle7ca7c842021-12-22 18:20:38 +01005458 struct quic_conn *qc = NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005459
5460 TRACE_ENTER(QUIC_EV_CONN_NEW, conn);
5461
Amaury Denoyelle33ac3462022-01-18 16:44:34 +01005462 /* do not store the context if already set */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005463 if (*xprt_ctx)
5464 goto out;
5465
Amaury Denoyelle33ac3462022-01-18 16:44:34 +01005466 HA_ATOMIC_STORE(xprt_ctx, conn->qc->xprt_ctx);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005467
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005468 out:
Frédéric Lécaillefde2a982021-12-27 15:12:09 +01005469 TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005470
5471 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005472}
5473
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02005474/* Start the QUIC transport layer */
5475static int qc_xprt_start(struct connection *conn, void *ctx)
5476{
5477 struct quic_conn *qc;
Frédéric Lécaille1eaec332021-06-04 14:59:59 +02005478 struct ssl_sock_ctx *qctx = ctx;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02005479
5480 qc = conn->qc;
5481 if (!quic_conn_init_timer(qc)) {
Frédéric Lécaillefde2a982021-12-27 15:12:09 +01005482 TRACE_PROTO("Non initialized timer", QUIC_EV_CONN_LPKT, qc);
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02005483 return 0;
5484 }
5485
Amaury Denoyellecfa2d562022-01-19 16:01:05 +01005486 quic_mux_transport_params_update(qc->qcc);
5487 if (qcc_install_app_ops(qc->qcc, qc->app_ops)) {
5488 TRACE_PROTO("Cannot install app layer", QUIC_EV_CONN_LPKT, qc);
5489 return 0;
5490 }
5491
5492 /* mux-quic can now be considered ready. */
5493 qc->mux_state = QC_MUX_READY;
5494
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02005495 tasklet_wakeup(qctx->wait_event.tasklet);
5496 return 1;
5497}
5498
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005499/* transport-layer operations for QUIC connections. */
5500static struct xprt_ops ssl_quic = {
Amaury Denoyelle414cac52021-09-22 11:14:37 +02005501 .close = quic_close,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005502 .snd_buf = quic_conn_from_buf,
5503 .rcv_buf = quic_conn_to_buf,
Frédéric Lécaille422a39c2021-03-03 17:28:34 +01005504 .subscribe = quic_conn_subscribe,
5505 .unsubscribe = quic_conn_unsubscribe,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005506 .init = qc_conn_init,
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +02005507 .start = qc_xprt_start,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005508 .prepare_bind_conf = ssl_sock_prepare_bind_conf,
5509 .destroy_bind_conf = ssl_sock_destroy_bind_conf,
Amaury Denoyelle71e588c2021-11-12 11:23:29 +01005510 .get_alpn = ssl_sock_get_alpn,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005511 .name = "QUIC",
5512};
5513
5514__attribute__((constructor))
5515static void __quic_conn_init(void)
5516{
5517 ha_quic_meth = BIO_meth_new(0x666, "ha QUIC methods");
5518 xprt_register(XPRT_QUIC, &ssl_quic);
5519}
5520
5521__attribute__((destructor))
5522static void __quic_conn_deinit(void)
5523{
5524 BIO_meth_free(ha_quic_meth);
5525}
5526
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01005527/* Read all the QUIC packets found in <buf> from QUIC connection with <owner>
5528 * as owner calling <func> function.
Ilya Shipitsin1e9a6662021-01-05 22:10:46 +05005529 * Return the number of bytes read if succeeded, -1 if not.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005530 */
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01005531static ssize_t quic_dgram_read(struct buffer *buf, size_t len, void *owner,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005532 struct sockaddr_storage *saddr, qpkt_read_func *func)
5533{
5534 unsigned char *pos;
5535 const unsigned char *end;
5536 struct quic_dgram_ctx dgram_ctx = {
Amaury Denoyelleadb22762021-12-14 15:04:14 +01005537 .qc = NULL,
5538 .dcid.len = 0,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005539 .owner = owner,
5540 };
5541
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01005542 pos = (unsigned char *)b_head(buf);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005543 end = pos + len;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005544 do {
5545 int ret;
5546 struct quic_rx_packet *pkt;
5547
Willy Tarreaue4498932021-03-22 21:13:05 +01005548 pkt = pool_zalloc(pool_head_quic_rx_packet);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005549 if (!pkt)
5550 goto err;
5551
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005552 quic_rx_packet_refinc(pkt);
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01005553 ret = func(pos, end, pkt, &dgram_ctx, saddr);
5554 pos += pkt->len;
Frédéric Lécaille310d1bd2021-09-22 15:10:49 +02005555 quic_rx_packet_refdec(pkt);
Frédéric Lécaille01cfec72021-12-22 10:17:01 +01005556 if (ret == -1)
Frédéric Lécaille865b0782021-09-23 07:33:20 +02005557 /* If the packet length could not be found, we cannot continue. */
5558 break;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005559 } while (pos < end);
5560
5561 /* Increasing the received bytes counter by the UDP datagram length
5562 * if this datagram could be associated to a connection.
5563 */
5564 if (dgram_ctx.qc)
5565 dgram_ctx.qc->rx.bytes += len;
5566
Amaury Denoyellece340fe2022-01-11 14:20:46 +01005567 return pos - (unsigned char *)b_head(buf);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005568
5569 err:
5570 return -1;
5571}
5572
Frédéric Lécaille324ecda2021-11-02 10:14:44 +01005573ssize_t quic_lstnr_dgram_read(struct buffer *buf, size_t len, void *owner,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005574 struct sockaddr_storage *saddr)
5575{
5576 return quic_dgram_read(buf, len, owner, saddr, qc_lstnr_pkt_rcv);
5577}
5578
Amaury Denoyelle118b2cb2021-11-25 16:05:16 +01005579/* Function to automatically activate QUIC traces on stdout.
5580 * Activated via the compilation flag -DENABLE_QUIC_STDOUT_TRACES.
5581 * Main use for now is in the docker image for QUIC interop testing.
5582 */
5583static void quic_init_stdout_traces(void)
5584{
5585#ifdef ENABLE_QUIC_STDOUT_TRACES
5586 trace_quic.sink = sink_find("stdout");
5587 trace_quic.level = TRACE_LEVEL_DEVELOPER;
Amaury Denoyelle118b2cb2021-11-25 16:05:16 +01005588 trace_quic.state = TRACE_STATE_RUNNING;
5589#endif
5590}
5591INITCALL0(STG_INIT, quic_init_stdout_traces);
5592
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005593/*
5594 * Local variables:
5595 * c-indent-level: 8
5596 * c-basic-offset: 8
5597 * End:
5598 */