blob: 55920d2f5207eba3dd48b0738fa2f7c10c393a63 [file] [log] [blame]
Frédéric Lécaille70da8892020-11-06 15:49:49 +01001/*
2 * QUIC socket management.
3 *
Willy Tarreau3dfb7da2022-03-02 22:33:39 +01004 * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
Frédéric Lécaille70da8892020-11-06 15:49:49 +01005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +020013#define _GNU_SOURCE /* required for struct in6_pktinfo */
Frédéric Lécaille70da8892020-11-06 15:49:49 +010014#include <errno.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020015#include <stdlib.h>
16#include <string.h>
Frédéric Lécaille70da8892020-11-06 15:49:49 +010017
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +020018#include <netinet/in.h>
Frédéric Lécaille70da8892020-11-06 15:49:49 +010019#include <sys/socket.h>
20#include <sys/types.h>
21
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020022#include <haproxy/api.h>
23#include <haproxy/buf.h>
Frédéric Lécaille70da8892020-11-06 15:49:49 +010024#include <haproxy/connection.h>
Amaury Denoyelle7c9fdd92022-11-16 11:01:02 +010025#include <haproxy/dynbuf.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020026#include <haproxy/fd.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020027#include <haproxy/global-t.h>
28#include <haproxy/list.h>
Frédéric Lécaille70da8892020-11-06 15:49:49 +010029#include <haproxy/listener.h>
Amaury Denoyelle40909df2022-10-24 17:08:43 +020030#include <haproxy/log.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020031#include <haproxy/pool.h>
Frédéric Lécaille6492e662022-05-17 17:23:16 +020032#include <haproxy/proto_quic.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020033#include <haproxy/proxy-t.h>
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020034#include <haproxy/quic_conn.h>
Amaury Denoyelle4d295042022-01-19 16:18:44 +010035#include <haproxy/quic_sock.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020036#include <haproxy/quic_tp-t.h>
Amaury Denoyelleeb01f592021-10-07 16:44:05 +020037#include <haproxy/session.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020038#include <haproxy/stats-t.h>
39#include <haproxy/task.h>
Amaury Denoyelle8687b632022-09-27 14:22:09 +020040#include <haproxy/trace.h>
Amaury Denoyelle777969c2022-03-24 16:06:26 +010041#include <haproxy/tools.h>
Amaury Denoyelle5b414862022-10-24 17:40:37 +020042#include <haproxy/trace.h>
43
44#define TRACE_SOURCE &trace_quic
Frédéric Lécaille026a7922020-11-23 15:46:36 +010045
Amaury Denoyelle8687b632022-09-27 14:22:09 +020046#define TRACE_SOURCE &trace_quic
47
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020048/* Retrieve a connection's source address. Returns -1 on failure. */
49int quic_sock_get_src(struct connection *conn, struct sockaddr *addr, socklen_t len)
50{
51 struct quic_conn *qc;
52
Willy Tarreau784b8682022-04-11 14:18:10 +020053 if (!conn || !conn->handle.qc)
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020054 return -1;
55
Willy Tarreau784b8682022-04-11 14:18:10 +020056 qc = conn->handle.qc;
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020057 if (conn_is_back(conn)) {
58 /* no source address defined for outgoing connections for now */
59 return -1;
60 } else {
61 /* front connection, return the peer's address */
62 if (len > sizeof(qc->peer_addr))
63 len = sizeof(qc->peer_addr);
64 memcpy(addr, &qc->peer_addr, len);
65 return 0;
66 }
67}
68
69/* Retrieve a connection's destination address. Returns -1 on failure. */
70int quic_sock_get_dst(struct connection *conn, struct sockaddr *addr, socklen_t len)
71{
72 struct quic_conn *qc;
73
Willy Tarreau784b8682022-04-11 14:18:10 +020074 if (!conn || !conn->handle.qc)
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020075 return -1;
76
Willy Tarreau784b8682022-04-11 14:18:10 +020077 qc = conn->handle.qc;
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020078 if (conn_is_back(conn)) {
79 /* back connection, return the peer's address */
80 if (len > sizeof(qc->peer_addr))
81 len = sizeof(qc->peer_addr);
82 memcpy(addr, &qc->peer_addr, len);
83 } else {
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +020084 struct sockaddr_storage *from;
85
86 /* Return listener address if IP_PKTINFO or friends are not
87 * supported by the socket.
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020088 */
89 BUG_ON(!qc->li);
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +020090 from = is_addr(&qc->local_addr) ? &qc->local_addr :
91 &qc->li->rx.addr;
92 if (len > sizeof(*from))
93 len = sizeof(*from);
94 memcpy(addr, from, len);
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020095 }
96 return 0;
97}
98
Frédéric Lécaille026a7922020-11-23 15:46:36 +010099/*
100 * Inspired from session_accept_fd().
101 * Instantiate a new connection (connection struct) to be attached to <qc>
102 * QUIC connection of <l> listener.
103 * Returns 1 if succeeded, 0 if not.
104 */
105static int new_quic_cli_conn(struct quic_conn *qc, struct listener *l,
106 struct sockaddr_storage *saddr)
107{
108 struct connection *cli_conn;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100109
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100110 if (unlikely((cli_conn = conn_new(&l->obj_type)) == NULL))
111 goto out;
112
Willy Tarreau9cc88c32022-04-08 14:34:31 +0200113 if (!sockaddr_alloc(&cli_conn->src, saddr, sizeof *saddr))
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100114 goto out_free_conn;
115
Willy Tarreau030b3e62022-05-02 17:47:46 +0200116 cli_conn->flags |= CO_FL_FDLESS;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100117 qc->conn = cli_conn;
Willy Tarreau784b8682022-04-11 14:18:10 +0200118 cli_conn->handle.qc = qc;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100119
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100120 cli_conn->target = &l->obj_type;
121
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100122 return 1;
123
124 out_free_conn:
Frédéric Lécaille01ab6612021-06-14 10:31:43 +0200125 qc->conn = NULL;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100126 conn_stop_tracking(cli_conn);
127 conn_xprt_close(cli_conn);
128 conn_free(cli_conn);
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100129 out:
130
131 return 0;
132}
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100133
134/* Tests if the receiver supports accepting connections. Returns positive on
135 * success, 0 if not possible
136 */
137int quic_sock_accepting_conn(const struct receiver *rx)
138{
139 return 1;
140}
141
142/* Accept an incoming connection from listener <l>, and return it, as well as
143 * a CO_AC_* status code into <status> if not null. Null is returned on error.
144 * <l> must be a valid listener with a valid frontend.
145 */
146struct connection *quic_sock_accept_conn(struct listener *l, int *status)
147{
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100148 struct quic_conn *qc;
Willy Tarreau6a4d48b2023-04-21 10:46:45 +0200149 struct li_per_thread *lthr = &l->per_thr[ti->ltid];
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100150
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100151 qc = MT_LIST_POP(&lthr->quic_accept.conns, struct quic_conn *, accept_list);
Amaury Denoyelle987812b2023-04-17 09:31:16 +0200152 if (!qc || qc->flags & (QUIC_FL_CONN_CLOSING|QUIC_FL_CONN_DRAINING))
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100153 goto done;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100154
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100155 if (!new_quic_cli_conn(qc, l, &qc->peer_addr))
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100156 goto err;
157
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100158 done:
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100159 *status = CO_AC_DONE;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100160 return qc ? qc->conn : NULL;
161
162 err:
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100163 /* in case of error reinsert the element to process it later. */
164 MT_LIST_INSERT(&lthr->quic_accept.conns, &qc->accept_list);
165
166 *status = CO_AC_PAUSE;
167 return NULL;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100168}
169
Amaury Denoyelle8687b632022-09-27 14:22:09 +0200170/* QUIC datagrams handler task. */
171struct task *quic_lstnr_dghdlr(struct task *t, void *ctx, unsigned int state)
172{
173 struct quic_dghdlr *dghdlr = ctx;
174 struct quic_dgram *dgram;
175 int max_dgrams = global.tune.maxpollevents;
176
177 TRACE_ENTER(QUIC_EV_CONN_LPKT);
178
179 while ((dgram = MT_LIST_POP(&dghdlr->dgrams, typeof(dgram), handler_list))) {
180 if (quic_dgram_parse(dgram, NULL, dgram->owner)) {
181 /* TODO should we requeue the datagram ? */
182 break;
183 }
184
185 if (--max_dgrams <= 0)
186 goto stop_here;
187 }
188
189 TRACE_LEAVE(QUIC_EV_CONN_LPKT);
190 return t;
191
192 stop_here:
193 /* too much work done at once, come back here later */
194 if (!MT_LIST_ISEMPTY(&dghdlr->dgrams))
195 tasklet_wakeup((struct tasklet *)t);
196
197 TRACE_LEAVE(QUIC_EV_CONN_LPKT);
198 return t;
199}
200
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200201/* Retrieve the DCID from the datagram found at <pos> position and deliver it to the
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200202 * correct datagram handler.
203 * Return 1 if a correct datagram could be found, 0 if not.
204 */
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200205static int quic_lstnr_dgram_dispatch(unsigned char *pos, size_t len, void *owner,
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200206 struct sockaddr_storage *saddr,
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200207 struct sockaddr_storage *daddr,
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200208 struct quic_dgram *new_dgram, struct list *dgrams)
209{
210 struct quic_dgram *dgram;
211 unsigned char *dcid;
212 size_t dcid_len;
213 int cid_tid;
214
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200215 if (!len || !quic_get_dgram_dcid(pos, pos + len, &dcid, &dcid_len))
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200216 goto err;
217
218 dgram = new_dgram ? new_dgram : pool_alloc(pool_head_quic_dgram);
219 if (!dgram)
220 goto err;
221
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200222 if ((cid_tid = quic_get_cid_tid(dcid, dcid_len, saddr, pos, len)) < 0) {
Amaury Denoyellef16ec342023-04-13 17:42:34 +0200223 /* Use the current thread if CID not found. If a clients opens
224 * a connection with multiple packets, it is possible that
225 * several threads will deal with datagrams sharing the same
226 * CID. For this reason, the CID tree insertion will be
227 * conducted as an atomic operation and the datagram ultimately
228 * redispatch by the late thread.
Amaury Denoyellee83f9372023-04-18 11:10:54 +0200229 */
Amaury Denoyellef16ec342023-04-13 17:42:34 +0200230 cid_tid = tid;
Amaury Denoyellee83f9372023-04-18 11:10:54 +0200231 }
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200232
233 /* All the members must be initialized! */
234 dgram->owner = owner;
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200235 dgram->buf = pos;
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200236 dgram->len = len;
237 dgram->dcid = dcid;
238 dgram->dcid_len = dcid_len;
239 dgram->saddr = *saddr;
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200240 dgram->daddr = *daddr;
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200241 dgram->qc = NULL;
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200242
243 /* Attached datagram to its quic_receiver_buf and quic_dghdlrs. */
244 LIST_APPEND(dgrams, &dgram->recv_list);
245 MT_LIST_APPEND(&quic_dghdlrs[cid_tid].dgrams, &dgram->handler_list);
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200246
Willy Tarreauf9d4a7d2022-08-05 08:45:56 +0200247 /* typically quic_lstnr_dghdlr() */
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200248 tasklet_wakeup(quic_dghdlrs[cid_tid].task);
249
250 return 1;
251
252 err:
Frédéric Lécaillebfb077a2022-08-12 11:55:20 +0200253 pool_free(pool_head_quic_dgram, new_dgram);
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200254 return 0;
255}
256
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200257/* This function is responsible to remove unused datagram attached in front of
258 * <buf>. Each instances will be freed until a not yet consumed datagram is
259 * found or end of the list is hit. The last unused datagram found is not freed
260 * and is instead returned so that the caller can reuse it if needed.
261 *
Ilya Shipitsin4a689da2022-10-29 09:34:32 +0500262 * Returns the last unused datagram or NULL if no occurrence found.
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200263 */
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200264static struct quic_dgram *quic_rxbuf_purge_dgrams(struct quic_receiver_buf *rbuf)
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200265{
266 struct quic_dgram *cur, *prev = NULL;
267
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200268 while (!LIST_ISEMPTY(&rbuf->dgram_list)) {
269 cur = LIST_ELEM(rbuf->dgram_list.n, struct quic_dgram *, recv_list);
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200270
271 /* Loop until a not yet consumed datagram is found. */
Amaury Denoyelle0b13e942022-10-25 11:38:21 +0200272 if (HA_ATOMIC_LOAD(&cur->buf))
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200273 break;
274
275 /* Clear buffer of current unused datagram. */
276 LIST_DELETE(&cur->recv_list);
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200277 b_del(&rbuf->buf, cur->len);
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200278
279 /* Free last found unused datagram. */
Tim Duesterhusc18e2442023-04-22 17:47:33 +0200280 pool_free(pool_head_quic_dgram, prev);
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200281 prev = cur;
282 }
283
284 /* Return last unused datagram found. */
285 return prev;
286}
287
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200288/* Receive data from datagram socket <fd>. Data are placed in <out> buffer of
289 * length <len>.
290 *
291 * Datagram addresses will be returned via the next arguments. <from> will be
292 * the peer address and <to> the reception one. Note that <to> can only be
293 * retrieved if the socket supports IP_PKTINFO or affiliated options. If not,
294 * <to> will be set as AF_UNSPEC. The caller must specify <to_port> to ensure
295 * that <to> address is completely filled.
296 *
297 * Returns value from recvmsg syscall.
298 */
299static ssize_t quic_recv(int fd, void *out, size_t len,
300 struct sockaddr *from, socklen_t from_len,
301 struct sockaddr *to, socklen_t to_len,
302 uint16_t dst_port)
303{
304 union pktinfo {
305#ifdef IP_PKTINFO
306 struct in_pktinfo in;
307#else /* !IP_PKTINFO */
308 struct in_addr addr;
309#endif
310#ifdef IPV6_RECVPKTINFO
311 struct in6_pktinfo in6;
312#endif
313 };
314 char cdata[CMSG_SPACE(sizeof(union pktinfo))];
315 struct msghdr msg;
316 struct iovec vec;
317 struct cmsghdr *cmsg;
318 ssize_t ret;
319
320 vec.iov_base = out;
321 vec.iov_len = len;
322
323 memset(&msg, 0, sizeof(msg));
324 msg.msg_name = from;
325 msg.msg_namelen = from_len;
326 msg.msg_iov = &vec;
327 msg.msg_iovlen = 1;
328 msg.msg_control = &cdata;
329 msg.msg_controllen = sizeof(cdata);
330
331 clear_addr((struct sockaddr_storage *)to);
332
333 do {
334 ret = recvmsg(fd, &msg, 0);
335 } while (ret < 0 && errno == EINTR);
336
337 /* TODO handle errno. On EAGAIN/EWOULDBLOCK use fd_cant_recv() if
338 * using dedicated connection socket.
339 */
340
341 if (ret < 0)
342 goto end;
343
344 for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
345 switch (cmsg->cmsg_level) {
346 case IPPROTO_IP:
347#if defined(IP_PKTINFO)
348 if (cmsg->cmsg_type == IP_PKTINFO) {
349 struct sockaddr_in *in = (struct sockaddr_in *)to;
350 struct in_pktinfo *info = (struct in_pktinfo *)CMSG_DATA(cmsg);
351
352 if (to_len >= sizeof(struct sockaddr_in)) {
353 in->sin_family = AF_INET;
354 in->sin_addr = info->ipi_addr;
355 in->sin_port = dst_port;
356 }
357 }
358#elif defined(IP_RECVDSTADDR)
359 if (cmsg->cmsg_type == IP_RECVDSTADDR) {
360 struct sockaddr_in *in = (struct sockaddr_in *)to;
361 struct in_addr *info = (struct in_addr *)CMSG_DATA(cmsg);
362
363 if (to_len >= sizeof(struct sockaddr_in)) {
364 in->sin_family = AF_INET;
365 in->sin_addr.s_addr = info->s_addr;
366 in->sin_port = dst_port;
367 }
368 }
369#endif /* IP_PKTINFO || IP_RECVDSTADDR */
370 break;
371
372 case IPPROTO_IPV6:
373#ifdef IPV6_RECVPKTINFO
374 if (cmsg->cmsg_type == IPV6_PKTINFO) {
375 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)to;
376 struct in6_pktinfo *info6 = (struct in6_pktinfo *)CMSG_DATA(cmsg);
377
378 if (to_len >= sizeof(struct sockaddr_in6)) {
379 in6->sin6_family = AF_INET6;
380 memcpy(&in6->sin6_addr, &info6->ipi6_addr, sizeof(in6->sin6_addr));
381 in6->sin6_port = dst_port;
382 }
383 }
384#endif
385 break;
386 }
387 }
388
389 end:
390 return ret;
391}
392
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100393/* Function called on a read event from a listening socket. It tries
394 * to handle as many connections as possible.
395 */
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200396void quic_lstnr_sock_fd_iocb(int fd)
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100397{
398 ssize_t ret;
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200399 struct quic_receiver_buf *rxbuf;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100400 struct buffer *buf;
401 struct listener *l = objt_listener(fdtab[fd].owner);
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100402 struct quic_transport_params *params;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100403 /* Source address */
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200404 struct sockaddr_storage saddr = {0}, daddr = {0};
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100405 size_t max_sz, cspace;
Frédéric Lécaille2bed1f12022-06-23 21:05:05 +0200406 struct quic_dgram *new_dgram;
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100407 unsigned char *dgram_buf;
Frédéric Lécaille1b0707f2022-06-30 11:28:56 +0200408 int max_dgrams;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100409
Tim Duesterhus16554242021-09-15 13:58:49 +0200410 BUG_ON(!l);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100411
Frédéric Lécaille19ef6362022-06-23 18:00:37 +0200412 new_dgram = NULL;
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100413 if (!l)
414 return;
415
Willy Tarreauf5090652021-04-06 17:23:40 +0200416 if (!(fdtab[fd].state & FD_POLL_IN) || !fd_recv_ready(fd))
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100417 return;
418
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200419 rxbuf = MT_LIST_POP(&l->rx.rxbuf_list, typeof(rxbuf), rxbuf_el);
Amaury Denoyelleee72a432021-11-19 15:49:29 +0100420 if (!rxbuf)
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100421 goto out;
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100422
Amaury Denoyelleee72a432021-11-19 15:49:29 +0100423 buf = &rxbuf->buf;
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100424
Frédéric Lécaille1b0707f2022-06-30 11:28:56 +0200425 max_dgrams = global.tune.maxpollevents;
426 start:
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500427 /* Try to reuse an existing dgram. Note that there is always at
Frédéric Lécaille2bed1f12022-06-23 21:05:05 +0200428 * least one datagram to pick, except the first time we enter
429 * this function for this <rxbuf> buffer.
430 */
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200431 new_dgram = quic_rxbuf_purge_dgrams(rxbuf);
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100432
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100433 params = &l->bind_conf->quic_params;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100434 max_sz = params->max_udp_payload_size;
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100435 cspace = b_contig_space(buf);
436 if (cspace < max_sz) {
Amaury Denoyelle735b44f2022-10-27 17:56:27 +0200437 struct proxy *px = l->bind_conf->frontend;
438 struct quic_counters *prx_counters = EXTRA_COUNTERS_GET(px->extra_counters_fe, &quic_stats_module);
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100439 struct quic_dgram *dgram;
440
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200441 /* Do no mark <buf> as full, and do not try to consume it
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200442 * if the contiguous remaining space is not at the end
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200443 */
Amaury Denoyelle735b44f2022-10-27 17:56:27 +0200444 if (b_tail(buf) + cspace < b_wrap(buf)) {
445 HA_ATOMIC_INC(&prx_counters->rxbuf_full);
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200446 goto out;
Amaury Denoyelle735b44f2022-10-27 17:56:27 +0200447 }
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200448
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100449 /* Allocate a fake datagram, without data to locate
450 * the end of the RX buffer (required during purging).
451 */
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200452 dgram = pool_alloc(pool_head_quic_dgram);
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100453 if (!dgram)
454 goto out;
455
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200456 /* Initialize only the useful members of this fake datagram. */
457 dgram->buf = NULL;
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100458 dgram->len = cspace;
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200459 /* Append this datagram only to the RX buffer list. It will
460 * not be treated by any datagram handler.
461 */
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200462 LIST_APPEND(&rxbuf->dgram_list, &dgram->recv_list);
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200463
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100464 /* Consume the remaining space */
465 b_add(buf, cspace);
Amaury Denoyelle735b44f2022-10-27 17:56:27 +0200466 if (b_contig_space(buf) < max_sz) {
467 HA_ATOMIC_INC(&prx_counters->rxbuf_full);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100468 goto out;
Amaury Denoyelle735b44f2022-10-27 17:56:27 +0200469 }
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100470 }
471
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100472 dgram_buf = (unsigned char *)b_tail(buf);
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200473 ret = quic_recv(fd, dgram_buf, max_sz,
474 (struct sockaddr *)&saddr, sizeof(saddr),
475 (struct sockaddr *)&daddr, sizeof(daddr),
476 get_net_port(&l->rx.addr));
477 if (ret <= 0)
478 goto out;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100479
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100480 b_add(buf, ret);
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200481 if (!quic_lstnr_dgram_dispatch(dgram_buf, ret, l, &saddr, &daddr,
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200482 new_dgram, &rxbuf->dgram_list)) {
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100483 /* If wrong, consume this datagram */
Amaury Denoyelle9875f022022-11-24 15:24:38 +0100484 b_sub(buf, ret);
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100485 }
Frédéric Lécaille19ef6362022-06-23 18:00:37 +0200486 new_dgram = NULL;
Frédéric Lécaille1b0707f2022-06-30 11:28:56 +0200487 if (--max_dgrams > 0)
488 goto start;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100489 out:
Frédéric Lécaille19ef6362022-06-23 18:00:37 +0200490 pool_free(pool_head_quic_dgram, new_dgram);
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200491 MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200492}
493
494/* FD-owned quic-conn socket callback. */
Willy Tarreau8f6da642023-03-10 12:04:02 +0100495void quic_conn_sock_fd_iocb(int fd)
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200496{
Amaury Denoyelle7c9fdd92022-11-16 11:01:02 +0100497 struct quic_conn *qc = fdtab[fd].owner;
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200498
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200499 TRACE_ENTER(QUIC_EV_CONN_RCV, qc);
500
Amaury Denoyellee1a0ee32023-02-28 15:11:09 +0100501 if (fd_send_active(fd) && fd_send_ready(fd)) {
502 TRACE_DEVEL("send ready", QUIC_EV_CONN_RCV, qc);
503 fd_stop_send(fd);
504 tasklet_wakeup_after(NULL, qc->wait_event.tasklet);
Amaury Denoyellecaa16542023-02-28 15:11:26 +0100505 qc_notify_send(qc);
Amaury Denoyellee1a0ee32023-02-28 15:11:09 +0100506 }
507
508 if (fd_recv_ready(fd)) {
509 tasklet_wakeup_after(NULL, qc->wait_event.tasklet);
510 fd_stop_recv(fd);
511 }
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200512
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200513 TRACE_LEAVE(QUIC_EV_CONN_RCV, qc);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100514}
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100515
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200516/* Send a datagram stored into <buf> buffer with <sz> as size.
517 * The caller must ensure there is at least <sz> bytes in this buffer.
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200518 *
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100519 * Returns the total bytes sent over the socket. 0 is returned if a transient
520 * error is encountered which allows send to be retry later. A negative value
521 * is used for a fatal error which guarantee that all future send operation for
522 * this connection will fail.
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200523 *
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200524 * TODO standardize this function for a generic UDP sendto wrapper. This can be
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100525 * done by removing the <qc> arg and replace it with address/port.
526 */
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200527int qc_snd_buf(struct quic_conn *qc, const struct buffer *buf, size_t sz,
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100528 int flags)
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100529{
530 ssize_t ret;
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100531
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200532 do {
Amaury Denoyelledc0dcb32022-11-21 14:48:57 +0100533 if (qc_test_fd(qc)) {
Amaury Denoyellee1a0ee32023-02-28 15:11:09 +0100534 if (!fd_send_ready(qc->fd))
535 return 0;
536
Amaury Denoyelledc0dcb32022-11-21 14:48:57 +0100537 ret = send(qc->fd, b_peek(buf, b_head_ofs(buf)), sz,
538 MSG_DONTWAIT | MSG_NOSIGNAL);
539 }
Amaury Denoyelle2d380922023-01-19 18:05:54 +0100540#if defined(IP_PKTINFO) || defined(IP_RECVDSTADDR) || defined(IPV6_RECVPKTINFO)
541 else if (is_addr(&qc->local_addr)) {
542 struct msghdr msg = { 0 };
543 struct iovec vec;
544 struct cmsghdr *cmsg;
545#ifdef IP_PKTINFO
546 struct in_pktinfo in;
547#endif /* IP_PKTINFO */
548#ifdef IPV6_RECVPKTINFO
549 struct in6_pktinfo in6;
550#endif /* IPV6_RECVPKTINFO */
551 union {
552#ifdef IP_PKTINFO
553 char buf[CMSG_SPACE(sizeof(in))];
554#endif /* IP_PKTINFO */
555#ifdef IPV6_RECVPKTINFO
556 char buf6[CMSG_SPACE(sizeof(in6))];
557#endif /* IPV6_RECVPKTINFO */
558 char bufaddr[CMSG_SPACE(sizeof(struct in_addr))];
559 struct cmsghdr align;
560 } u;
561
562 vec.iov_base = b_peek(buf, b_head_ofs(buf));
563 vec.iov_len = sz;
564 msg.msg_name = &qc->peer_addr;
565 msg.msg_namelen = get_addr_len(&qc->peer_addr);
566 msg.msg_iov = &vec;
567 msg.msg_iovlen = 1;
568
569 switch (qc->local_addr.ss_family) {
570 case AF_INET:
571#if defined(IP_PKTINFO)
572 memset(&in, 0, sizeof(in));
573 memcpy(&in.ipi_spec_dst,
574 &((struct sockaddr_in *)&qc->local_addr)->sin_addr,
575 sizeof(struct in_addr));
576
577 msg.msg_control = u.buf;
578 msg.msg_controllen = sizeof(u.buf);
579
580 cmsg = CMSG_FIRSTHDR(&msg);
581 cmsg->cmsg_level = IPPROTO_IP;
582 cmsg->cmsg_type = IP_PKTINFO;
583 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
584 memcpy(CMSG_DATA(cmsg), &in, sizeof(in));
585#elif defined(IP_RECVDSTADDR)
586 msg.msg_control = u.bufaddr;
587 msg.msg_controllen = sizeof(u.bufaddr);
588
589 cmsg = CMSG_FIRSTHDR(&msg);
590 cmsg->cmsg_level = IPPROTO_IP;
591 cmsg->cmsg_type = IP_SENDSRCADDR;
592 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_addr));
593 memcpy(CMSG_DATA(cmsg),
594 &((struct sockaddr_in *)&qc->local_addr)->sin_addr,
595 sizeof(struct in_addr));
596#endif /* IP_PKTINFO || IP_RECVDSTADDR */
597 break;
598
599 case AF_INET6:
600#ifdef IPV6_RECVPKTINFO
601 memset(&in6, 0, sizeof(in6));
602 memcpy(&in6.ipi6_addr,
603 &((struct sockaddr_in6 *)&qc->local_addr)->sin6_addr,
604 sizeof(struct in6_addr));
605
606 msg.msg_control = u.buf6;
607 msg.msg_controllen = sizeof(u.buf6);
608
609 cmsg = CMSG_FIRSTHDR(&msg);
610 cmsg->cmsg_level = IPPROTO_IPV6;
611 cmsg->cmsg_type = IPV6_PKTINFO;
612 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
613 memcpy(CMSG_DATA(cmsg), &in6, sizeof(in6));
614#endif /* IPV6_RECVPKTINFO */
615 break;
616
617 default:
618 break;
619 }
620
621 ret = sendmsg(qc->li->rx.fd, &msg,
622 MSG_DONTWAIT|MSG_NOSIGNAL);
623 }
624#endif /* IP_PKTINFO || IP_RECVDSTADDR || IPV6_RECVPKTINFO */
Amaury Denoyelledc0dcb32022-11-21 14:48:57 +0100625 else {
626 ret = sendto(qc->li->rx.fd, b_peek(buf, b_head_ofs(buf)), sz,
627 MSG_DONTWAIT|MSG_NOSIGNAL,
628 (struct sockaddr *)&qc->peer_addr,
629 get_addr_len(&qc->peer_addr));
630 }
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200631 } while (ret < 0 && errno == EINTR);
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100632
Frédéric Lécaille9fc10af2023-02-09 20:37:26 +0100633 if (ret < 0) {
Amaury Denoyelle1d9f1702022-10-24 10:03:33 +0200634 struct proxy *prx = qc->li->bind_conf->frontend;
635 struct quic_counters *prx_counters =
636 EXTRA_COUNTERS_GET(prx->extra_counters_fe,
637 &quic_stats_module);
638
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200639 if (errno == EAGAIN || errno == EWOULDBLOCK ||
Amaury Denoyelle4bdd0692023-02-27 17:31:55 +0100640 errno == ENOTCONN || errno == EINPROGRESS) {
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200641 if (errno == EAGAIN || errno == EWOULDBLOCK)
642 HA_ATOMIC_INC(&prx_counters->socket_full);
643 else
644 HA_ATOMIC_INC(&prx_counters->sendto_err);
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100645
646 /* transient error */
Amaury Denoyellee1a0ee32023-02-28 15:11:09 +0100647 fd_want_send(qc->fd);
648 fd_cant_send(qc->fd);
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100649 TRACE_PRINTF(TRACE_LEVEL_USER, QUIC_EV_CONN_SPPKTS, qc, 0, 0, 0,
650 "UDP send failure errno=%d (%s)", errno, strerror(errno));
651 return 0;
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200652 }
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100653 else {
654 /* unrecoverable error */
Amaury Denoyelle1d9f1702022-10-24 10:03:33 +0200655 HA_ATOMIC_INC(&prx_counters->sendto_err_unknown);
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100656 TRACE_PRINTF(TRACE_LEVEL_USER, QUIC_EV_CONN_SPPKTS, qc, 0, 0, 0,
657 "UDP send failure errno=%d (%s)", errno, strerror(errno));
658 return -1;
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200659 }
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200660 }
661
Frédéric Lécaille9fc10af2023-02-09 20:37:26 +0100662 if (ret != sz)
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100663 return 0;
Frédéric Lécaille9fc10af2023-02-09 20:37:26 +0100664
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100665 return ret;
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100666}
667
Amaury Denoyelle7c9fdd92022-11-16 11:01:02 +0100668/* Receive datagram on <qc> FD-owned socket.
669 *
670 * Returns the total number of bytes read or a negative value on error.
671 */
672int qc_rcv_buf(struct quic_conn *qc)
673{
674 struct sockaddr_storage saddr = {0}, daddr = {0};
675 struct quic_transport_params *params;
676 struct quic_dgram *new_dgram = NULL;
677 struct buffer buf = BUF_NULL;
678 size_t max_sz;
679 unsigned char *dgram_buf;
680 struct listener *l;
681 ssize_t ret = 0;
682
683 /* Do not call this if quic-conn FD is uninitialized. */
684 BUG_ON(qc->fd < 0);
685
686 TRACE_ENTER(QUIC_EV_CONN_RCV, qc);
687 l = qc->li;
688
689 params = &l->bind_conf->quic_params;
690 max_sz = params->max_udp_payload_size;
691
692 do {
693 if (!b_alloc(&buf))
694 break; /* TODO subscribe for memory again available. */
695
696 b_reset(&buf);
697 BUG_ON(b_contig_space(&buf) < max_sz);
698
699 /* Allocate datagram on first loop or after requeuing. */
700 if (!new_dgram && !(new_dgram = pool_alloc(pool_head_quic_dgram)))
701 break; /* TODO subscribe for memory again available. */
702
703 dgram_buf = (unsigned char *)b_tail(&buf);
704 ret = quic_recv(qc->fd, dgram_buf, max_sz,
705 (struct sockaddr *)&saddr, sizeof(saddr),
706 (struct sockaddr *)&daddr, sizeof(daddr),
707 get_net_port(&qc->local_addr));
708 if (ret <= 0) {
709 /* Subscribe FD for future reception. */
710 fd_want_recv(qc->fd);
711 break;
712 }
713
714 b_add(&buf, ret);
715
716 new_dgram->buf = dgram_buf;
717 new_dgram->len = ret;
718 new_dgram->dcid_len = 0;
719 new_dgram->dcid = NULL;
720 new_dgram->saddr = saddr;
721 new_dgram->daddr = daddr;
722 new_dgram->qc = NULL; /* set later via quic_dgram_parse() */
723
724 TRACE_DEVEL("read datagram", QUIC_EV_CONN_RCV, qc, new_dgram);
725
726 if (!quic_get_dgram_dcid(new_dgram->buf,
727 new_dgram->buf + new_dgram->len,
728 &new_dgram->dcid, &new_dgram->dcid_len)) {
729 continue;
730 }
731
732 if (!qc_check_dcid(qc, new_dgram->dcid, new_dgram->dcid_len)) {
733 /* Datagram received by error on the connection FD, dispatch it
734 * to its associated quic-conn.
735 *
736 * TODO count redispatch datagrams.
737 */
Amaury Denoyelleb2bd8392022-10-05 17:56:08 +0200738 struct quic_receiver_buf *rxbuf;
739 struct quic_dgram *tmp_dgram;
740 unsigned char *rxbuf_tail;
741
742 TRACE_STATE("datagram for other connection on quic-conn socket, requeue it", QUIC_EV_CONN_RCV, qc);
743
744 rxbuf = MT_LIST_POP(&l->rx.rxbuf_list, typeof(rxbuf), rxbuf_el);
745
746 tmp_dgram = quic_rxbuf_purge_dgrams(rxbuf);
747 pool_free(pool_head_quic_dgram, tmp_dgram);
748
749 if (b_contig_space(&rxbuf->buf) < new_dgram->len) {
750 /* TODO count lost datagrams */
751 MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
752 continue;
753 }
754
755 rxbuf_tail = (unsigned char *)b_tail(&rxbuf->buf);
756 __b_putblk(&rxbuf->buf, (char *)dgram_buf, new_dgram->len);
757 if (!quic_lstnr_dgram_dispatch(rxbuf_tail, ret, l, &qc->peer_addr, &daddr,
758 new_dgram, &rxbuf->dgram_list)) {
759 /* TODO count lost datagrams. */
760 b_sub(&buf, ret);
761 }
762 else {
763 /* datagram must not be freed as it was requeued. */
764 new_dgram = NULL;
765 }
766
767 MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
768 continue;
Amaury Denoyelle7c9fdd92022-11-16 11:01:02 +0100769 }
770
771 quic_dgram_parse(new_dgram, qc, qc->li);
772 /* A datagram must always be consumed after quic_parse_dgram(). */
773 BUG_ON(new_dgram->buf);
774 } while (ret > 0);
775
776 pool_free(pool_head_quic_dgram, new_dgram);
777
778 if (b_size(&buf)) {
779 b_free(&buf);
780 offer_buffers(NULL, 1);
781 }
782
783 TRACE_LEAVE(QUIC_EV_CONN_RCV, qc);
784 return ret;
785}
786
Amaury Denoyelle40909df2022-10-24 17:08:43 +0200787/* Allocate a socket file-descriptor specific for QUIC connection <qc>.
788 * Endpoint addresses are specified by the two following arguments : <src> is
789 * the local address and <dst> is the remote one.
790 *
791 * Return the socket FD or a negative error code. On error, socket is marked as
792 * uninitialized.
793 */
794void qc_alloc_fd(struct quic_conn *qc, const struct sockaddr_storage *src,
795 const struct sockaddr_storage *dst)
796{
797 struct proxy *p = qc->li->bind_conf->frontend;
798 int fd = -1;
799 int ret;
800
801 /* Must not happen. */
802 BUG_ON(src->ss_family != dst->ss_family);
803
804 qc_init_fd(qc);
805
806 fd = socket(src->ss_family, SOCK_DGRAM, 0);
807 if (fd < 0)
808 goto err;
809
810 if (fd >= global.maxsock) {
811 send_log(p, LOG_EMERG,
812 "Proxy %s reached the configured maximum connection limit. Please check the global 'maxconn' value.\n",
813 p->id);
814 goto err;
815 }
816
817 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
818 if (ret < 0)
819 goto err;
820
821 switch (src->ss_family) {
822 case AF_INET:
823#if defined(IP_PKTINFO)
824 ret = setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &one, sizeof(one));
825#elif defined(IP_RECVDSTADDR)
826 ret = setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &one, sizeof(one));
827#endif /* IP_PKTINFO || IP_RECVDSTADDR */
828 break;
829 case AF_INET6:
830#ifdef IPV6_RECVPKTINFO
831 ret = setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &one, sizeof(one));
832#endif
833 break;
834 }
835 if (ret < 0)
836 goto err;
837
838 ret = bind(fd, (struct sockaddr *)src, get_addr_len(src));
839 if (ret < 0)
840 goto err;
841
842 ret = connect(fd, (struct sockaddr *)dst, get_addr_len(dst));
843 if (ret < 0)
844 goto err;
845
846 qc->fd = fd;
847 fd_set_nonblock(fd);
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200848 fd_insert(fd, qc, quic_conn_sock_fd_iocb, tgid, ti->ltid_bit);
849 fd_want_recv(fd);
Amaury Denoyelle40909df2022-10-24 17:08:43 +0200850
851 return;
852
853 err:
854 if (fd >= 0)
855 close(fd);
856}
857
Amaury Denoyelled3083c92022-12-01 16:20:06 +0100858/* Release socket file-descriptor specific for QUIC connection <qc>. Set
859 * <reinit> if socket should be reinitialized after address migration.
860 */
861void qc_release_fd(struct quic_conn *qc, int reinit)
Amaury Denoyelle40909df2022-10-24 17:08:43 +0200862{
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200863 if (qc_test_fd(qc)) {
864 fd_delete(qc->fd);
Amaury Denoyelle40909df2022-10-24 17:08:43 +0200865 qc->fd = DEAD_FD_MAGIC;
Amaury Denoyelled3083c92022-12-01 16:20:06 +0100866
867 if (reinit)
868 qc_init_fd(qc);
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200869 }
Amaury Denoyelle40909df2022-10-24 17:08:43 +0200870}
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100871
Amaury Denoyelle739de3f2023-04-11 14:42:31 +0200872/* Wrapper for fd_want_recv(). Safe even if connection does not used its owned
873 * socket.
874 */
875void qc_want_recv(struct quic_conn *qc)
876{
877 if (qc_test_fd(qc))
878 fd_want_recv(qc->fd);
879}
880
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100881/*********************** QUIC accept queue management ***********************/
882/* per-thread accept queues */
883struct quic_accept_queue *quic_accept_queues;
884
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100885/* Install <qc> on the queue ready to be accepted. The queue task is then woken
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100886 * up. If <qc> accept is already scheduled or done, nothing is done.
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100887 */
888void quic_accept_push_qc(struct quic_conn *qc)
889{
Amaury Denoyelle66947282023-04-13 11:48:38 +0200890 struct quic_accept_queue *queue = &quic_accept_queues[tid];
Willy Tarreau6a4d48b2023-04-21 10:46:45 +0200891 struct li_per_thread *lthr = &qc->li->per_thr[ti->ltid];
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100892
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100893 /* early return if accept is already in progress/done for this
894 * connection
895 */
Frédéric Lécaillefc790062022-03-28 17:10:31 +0200896 if (qc->flags & QUIC_FL_CONN_ACCEPT_REGISTERED)
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100897 return;
898
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100899 BUG_ON(MT_LIST_INLIST(&qc->accept_list));
900
Frédéric Lécaillefc790062022-03-28 17:10:31 +0200901 qc->flags |= QUIC_FL_CONN_ACCEPT_REGISTERED;
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100902 /* 1. insert the listener in the accept queue
903 *
904 * Use TRY_APPEND as there is a possible race even with INLIST if
905 * multiple threads try to add the same listener instance from several
906 * quic_conn.
907 */
908 if (!MT_LIST_INLIST(&(lthr->quic_accept.list)))
909 MT_LIST_TRY_APPEND(&queue->listeners, &(lthr->quic_accept.list));
910
911 /* 2. insert the quic_conn in the listener per-thread queue. */
912 MT_LIST_APPEND(&lthr->quic_accept.conns, &qc->accept_list);
913
914 /* 3. wake up the queue tasklet */
Amaury Denoyelle66947282023-04-13 11:48:38 +0200915 tasklet_wakeup(quic_accept_queues[tid].tasklet);
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100916}
917
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100918/* Tasklet handler to accept QUIC connections. Call listener_accept on every
919 * listener instances registered in the accept queue.
920 */
Willy Tarreau41e701e2022-09-08 15:12:59 +0200921struct task *quic_accept_run(struct task *t, void *ctx, unsigned int i)
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100922{
923 struct li_per_thread *lthr;
924 struct mt_list *elt1, elt2;
925 struct quic_accept_queue *queue = &quic_accept_queues[tid];
926
927 mt_list_for_each_entry_safe(lthr, &queue->listeners, quic_accept.list, elt1, elt2) {
928 listener_accept(lthr->li);
Frédéric Lécaille4377dbd2023-03-10 13:34:30 +0100929 if (!MT_LIST_ISEMPTY(&lthr->quic_accept.conns))
930 tasklet_wakeup((struct tasklet*)t);
931 else
932 MT_LIST_DELETE_SAFE(elt1);
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100933 }
934
935 return NULL;
936}
937
938static int quic_alloc_accept_queues(void)
939{
940 int i;
941
Tim Duesterhus9fb57e82022-06-01 21:58:37 +0200942 quic_accept_queues = calloc(global.nbthread,
943 sizeof(*quic_accept_queues));
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100944 if (!quic_accept_queues) {
945 ha_alert("Failed to allocate the quic accept queues.\n");
946 return 0;
947 }
948
949 for (i = 0; i < global.nbthread; ++i) {
950 struct tasklet *task;
951 if (!(task = tasklet_new())) {
952 ha_alert("Failed to allocate the quic accept queue on thread %d.\n", i);
953 return 0;
954 }
955
956 tasklet_set_tid(task, i);
957 task->process = quic_accept_run;
958 quic_accept_queues[i].tasklet = task;
959
960 MT_LIST_INIT(&quic_accept_queues[i].listeners);
961 }
962
963 return 1;
964}
965REGISTER_POST_CHECK(quic_alloc_accept_queues);
966
967static int quic_deallocate_accept_queues(void)
968{
969 int i;
970
971 if (quic_accept_queues) {
972 for (i = 0; i < global.nbthread; ++i)
973 tasklet_free(quic_accept_queues[i].tasklet);
974 free(quic_accept_queues);
975 }
976
977 return 1;
978}
979REGISTER_POST_DEINIT(quic_deallocate_accept_queues);