blob: b7320a5b27f3c8d10f6fddf54e303482447a534d [file] [log] [blame]
Frédéric Lécaille70da8892020-11-06 15:49:49 +01001/*
2 * QUIC socket management.
3 *
Willy Tarreau3dfb7da2022-03-02 22:33:39 +01004 * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
Frédéric Lécaille70da8892020-11-06 15:49:49 +01005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +020013#define _GNU_SOURCE /* required for struct in6_pktinfo */
Frédéric Lécaille70da8892020-11-06 15:49:49 +010014#include <errno.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020015#include <stdlib.h>
16#include <string.h>
Frédéric Lécaille70da8892020-11-06 15:49:49 +010017
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +020018#include <netinet/in.h>
Frédéric Lécaille70da8892020-11-06 15:49:49 +010019#include <sys/socket.h>
20#include <sys/types.h>
21
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020022#include <haproxy/api.h>
23#include <haproxy/buf.h>
Frédéric Lécaille70da8892020-11-06 15:49:49 +010024#include <haproxy/connection.h>
Amaury Denoyelle7c9fdd92022-11-16 11:01:02 +010025#include <haproxy/dynbuf.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020026#include <haproxy/fd.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020027#include <haproxy/global-t.h>
28#include <haproxy/list.h>
Frédéric Lécaille70da8892020-11-06 15:49:49 +010029#include <haproxy/listener.h>
Amaury Denoyelle40909df2022-10-24 17:08:43 +020030#include <haproxy/log.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020031#include <haproxy/pool.h>
Amaury Denoyelle1125d052024-05-22 14:21:16 +020032#include <haproxy/protocol-t.h>
Frédéric Lécaille6492e662022-05-17 17:23:16 +020033#include <haproxy/proto_quic.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020034#include <haproxy/proxy-t.h>
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020035#include <haproxy/quic_conn.h>
Amaury Denoyelle4d295042022-01-19 16:18:44 +010036#include <haproxy/quic_sock.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020037#include <haproxy/quic_tp-t.h>
Amaury Denoyelleeb01f592021-10-07 16:44:05 +020038#include <haproxy/session.h>
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020039#include <haproxy/stats-t.h>
40#include <haproxy/task.h>
Amaury Denoyelle8687b632022-09-27 14:22:09 +020041#include <haproxy/trace.h>
Amaury Denoyelle777969c2022-03-24 16:06:26 +010042#include <haproxy/tools.h>
Amaury Denoyelle5b414862022-10-24 17:40:37 +020043#include <haproxy/trace.h>
44
45#define TRACE_SOURCE &trace_quic
Frédéric Lécaille026a7922020-11-23 15:46:36 +010046
Amaury Denoyelle8687b632022-09-27 14:22:09 +020047#define TRACE_SOURCE &trace_quic
48
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020049/* Retrieve a connection's source address. Returns -1 on failure. */
50int quic_sock_get_src(struct connection *conn, struct sockaddr *addr, socklen_t len)
51{
52 struct quic_conn *qc;
53
Willy Tarreau784b8682022-04-11 14:18:10 +020054 if (!conn || !conn->handle.qc)
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020055 return -1;
56
Willy Tarreau784b8682022-04-11 14:18:10 +020057 qc = conn->handle.qc;
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020058 if (conn_is_back(conn)) {
59 /* no source address defined for outgoing connections for now */
60 return -1;
61 } else {
62 /* front connection, return the peer's address */
63 if (len > sizeof(qc->peer_addr))
64 len = sizeof(qc->peer_addr);
65 memcpy(addr, &qc->peer_addr, len);
66 return 0;
67 }
68}
69
70/* Retrieve a connection's destination address. Returns -1 on failure. */
71int quic_sock_get_dst(struct connection *conn, struct sockaddr *addr, socklen_t len)
72{
73 struct quic_conn *qc;
74
Willy Tarreau784b8682022-04-11 14:18:10 +020075 if (!conn || !conn->handle.qc)
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020076 return -1;
77
Willy Tarreau784b8682022-04-11 14:18:10 +020078 qc = conn->handle.qc;
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020079 if (conn_is_back(conn)) {
80 /* back connection, return the peer's address */
81 if (len > sizeof(qc->peer_addr))
82 len = sizeof(qc->peer_addr);
83 memcpy(addr, &qc->peer_addr, len);
84 } else {
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +020085 struct sockaddr_storage *from;
86
87 /* Return listener address if IP_PKTINFO or friends are not
88 * supported by the socket.
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020089 */
90 BUG_ON(!qc->li);
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +020091 from = is_addr(&qc->local_addr) ? &qc->local_addr :
92 &qc->li->rx.addr;
93 if (len > sizeof(*from))
94 len = sizeof(*from);
95 memcpy(addr, from, len);
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020096 }
97 return 0;
98}
99
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100100/*
101 * Inspired from session_accept_fd().
102 * Instantiate a new connection (connection struct) to be attached to <qc>
103 * QUIC connection of <l> listener.
104 * Returns 1 if succeeded, 0 if not.
105 */
106static int new_quic_cli_conn(struct quic_conn *qc, struct listener *l,
107 struct sockaddr_storage *saddr)
108{
109 struct connection *cli_conn;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100110
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100111 if (unlikely((cli_conn = conn_new(&l->obj_type)) == NULL))
112 goto out;
113
Willy Tarreau9cc88c32022-04-08 14:34:31 +0200114 if (!sockaddr_alloc(&cli_conn->src, saddr, sizeof *saddr))
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100115 goto out_free_conn;
116
Willy Tarreau030b3e62022-05-02 17:47:46 +0200117 cli_conn->flags |= CO_FL_FDLESS;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100118 qc->conn = cli_conn;
Willy Tarreau784b8682022-04-11 14:18:10 +0200119 cli_conn->handle.qc = qc;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100120
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100121 cli_conn->target = &l->obj_type;
122
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100123 return 1;
124
125 out_free_conn:
Frédéric Lécaille01ab6612021-06-14 10:31:43 +0200126 qc->conn = NULL;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100127 conn_stop_tracking(cli_conn);
128 conn_xprt_close(cli_conn);
129 conn_free(cli_conn);
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100130 out:
131
132 return 0;
133}
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100134
135/* Tests if the receiver supports accepting connections. Returns positive on
136 * success, 0 if not possible
137 */
138int quic_sock_accepting_conn(const struct receiver *rx)
139{
140 return 1;
141}
142
143/* Accept an incoming connection from listener <l>, and return it, as well as
144 * a CO_AC_* status code into <status> if not null. Null is returned on error.
145 * <l> must be a valid listener with a valid frontend.
146 */
147struct connection *quic_sock_accept_conn(struct listener *l, int *status)
148{
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100149 struct quic_conn *qc;
Willy Tarreau6a4d48b2023-04-21 10:46:45 +0200150 struct li_per_thread *lthr = &l->per_thr[ti->ltid];
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100151
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100152 qc = MT_LIST_POP(&lthr->quic_accept.conns, struct quic_conn *, accept_list);
Amaury Denoyelle987812b2023-04-17 09:31:16 +0200153 if (!qc || qc->flags & (QUIC_FL_CONN_CLOSING|QUIC_FL_CONN_DRAINING))
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100154 goto done;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100155
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100156 if (!new_quic_cli_conn(qc, l, &qc->peer_addr))
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100157 goto err;
158
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100159 done:
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100160 *status = CO_AC_DONE;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100161 return qc ? qc->conn : NULL;
162
163 err:
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100164 /* in case of error reinsert the element to process it later. */
165 MT_LIST_INSERT(&lthr->quic_accept.conns, &qc->accept_list);
166
167 *status = CO_AC_PAUSE;
168 return NULL;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100169}
170
Amaury Denoyelle8687b632022-09-27 14:22:09 +0200171/* QUIC datagrams handler task. */
172struct task *quic_lstnr_dghdlr(struct task *t, void *ctx, unsigned int state)
173{
174 struct quic_dghdlr *dghdlr = ctx;
175 struct quic_dgram *dgram;
176 int max_dgrams = global.tune.maxpollevents;
177
178 TRACE_ENTER(QUIC_EV_CONN_LPKT);
179
180 while ((dgram = MT_LIST_POP(&dghdlr->dgrams, typeof(dgram), handler_list))) {
181 if (quic_dgram_parse(dgram, NULL, dgram->owner)) {
182 /* TODO should we requeue the datagram ? */
183 break;
184 }
185
186 if (--max_dgrams <= 0)
187 goto stop_here;
188 }
189
190 TRACE_LEAVE(QUIC_EV_CONN_LPKT);
191 return t;
192
193 stop_here:
194 /* too much work done at once, come back here later */
195 if (!MT_LIST_ISEMPTY(&dghdlr->dgrams))
196 tasklet_wakeup((struct tasklet *)t);
197
198 TRACE_LEAVE(QUIC_EV_CONN_LPKT);
199 return t;
200}
201
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200202/* Retrieve the DCID from the datagram found at <pos> position and deliver it to the
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200203 * correct datagram handler.
204 * Return 1 if a correct datagram could be found, 0 if not.
205 */
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200206static int quic_lstnr_dgram_dispatch(unsigned char *pos, size_t len, void *owner,
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200207 struct sockaddr_storage *saddr,
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200208 struct sockaddr_storage *daddr,
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200209 struct quic_dgram *new_dgram, struct list *dgrams)
210{
211 struct quic_dgram *dgram;
212 unsigned char *dcid;
213 size_t dcid_len;
214 int cid_tid;
215
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200216 if (!len || !quic_get_dgram_dcid(pos, pos + len, &dcid, &dcid_len))
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200217 goto err;
218
219 dgram = new_dgram ? new_dgram : pool_alloc(pool_head_quic_dgram);
220 if (!dgram)
221 goto err;
222
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200223 if ((cid_tid = quic_get_cid_tid(dcid, dcid_len, saddr, pos, len)) < 0) {
Amaury Denoyellef16ec342023-04-13 17:42:34 +0200224 /* Use the current thread if CID not found. If a clients opens
225 * a connection with multiple packets, it is possible that
226 * several threads will deal with datagrams sharing the same
227 * CID. For this reason, the CID tree insertion will be
228 * conducted as an atomic operation and the datagram ultimately
229 * redispatch by the late thread.
Amaury Denoyellee83f9372023-04-18 11:10:54 +0200230 */
Amaury Denoyellef16ec342023-04-13 17:42:34 +0200231 cid_tid = tid;
Amaury Denoyellee83f9372023-04-18 11:10:54 +0200232 }
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200233
234 /* All the members must be initialized! */
235 dgram->owner = owner;
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200236 dgram->buf = pos;
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200237 dgram->len = len;
238 dgram->dcid = dcid;
239 dgram->dcid_len = dcid_len;
240 dgram->saddr = *saddr;
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200241 dgram->daddr = *daddr;
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200242 dgram->qc = NULL;
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200243
244 /* Attached datagram to its quic_receiver_buf and quic_dghdlrs. */
245 LIST_APPEND(dgrams, &dgram->recv_list);
246 MT_LIST_APPEND(&quic_dghdlrs[cid_tid].dgrams, &dgram->handler_list);
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200247
Willy Tarreauf9d4a7d2022-08-05 08:45:56 +0200248 /* typically quic_lstnr_dghdlr() */
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200249 tasklet_wakeup(quic_dghdlrs[cid_tid].task);
250
251 return 1;
252
253 err:
Frédéric Lécaillebfb077a2022-08-12 11:55:20 +0200254 pool_free(pool_head_quic_dgram, new_dgram);
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200255 return 0;
256}
257
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200258/* This function is responsible to remove unused datagram attached in front of
259 * <buf>. Each instances will be freed until a not yet consumed datagram is
260 * found or end of the list is hit. The last unused datagram found is not freed
261 * and is instead returned so that the caller can reuse it if needed.
262 *
Ilya Shipitsin4a689da2022-10-29 09:34:32 +0500263 * Returns the last unused datagram or NULL if no occurrence found.
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200264 */
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200265static struct quic_dgram *quic_rxbuf_purge_dgrams(struct quic_receiver_buf *rbuf)
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200266{
267 struct quic_dgram *cur, *prev = NULL;
268
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200269 while (!LIST_ISEMPTY(&rbuf->dgram_list)) {
270 cur = LIST_ELEM(rbuf->dgram_list.n, struct quic_dgram *, recv_list);
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200271
272 /* Loop until a not yet consumed datagram is found. */
Amaury Denoyelle0b13e942022-10-25 11:38:21 +0200273 if (HA_ATOMIC_LOAD(&cur->buf))
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200274 break;
275
276 /* Clear buffer of current unused datagram. */
277 LIST_DELETE(&cur->recv_list);
Frédéric Lécaille7d23e8d2023-04-24 15:49:36 +0200278 b_del(&rbuf->buf, cur->len);
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200279
280 /* Free last found unused datagram. */
Tim Duesterhusc18e2442023-04-22 17:47:33 +0200281 pool_free(pool_head_quic_dgram, prev);
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200282 prev = cur;
283 }
284
285 /* Return last unused datagram found. */
286 return prev;
287}
288
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200289/* Receive data from datagram socket <fd>. Data are placed in <out> buffer of
290 * length <len>.
291 *
292 * Datagram addresses will be returned via the next arguments. <from> will be
293 * the peer address and <to> the reception one. Note that <to> can only be
294 * retrieved if the socket supports IP_PKTINFO or affiliated options. If not,
295 * <to> will be set as AF_UNSPEC. The caller must specify <to_port> to ensure
296 * that <to> address is completely filled.
297 *
298 * Returns value from recvmsg syscall.
299 */
300static ssize_t quic_recv(int fd, void *out, size_t len,
301 struct sockaddr *from, socklen_t from_len,
302 struct sockaddr *to, socklen_t to_len,
303 uint16_t dst_port)
304{
305 union pktinfo {
306#ifdef IP_PKTINFO
307 struct in_pktinfo in;
308#else /* !IP_PKTINFO */
309 struct in_addr addr;
310#endif
311#ifdef IPV6_RECVPKTINFO
312 struct in6_pktinfo in6;
313#endif
314 };
315 char cdata[CMSG_SPACE(sizeof(union pktinfo))];
316 struct msghdr msg;
317 struct iovec vec;
318 struct cmsghdr *cmsg;
319 ssize_t ret;
320
321 vec.iov_base = out;
322 vec.iov_len = len;
323
324 memset(&msg, 0, sizeof(msg));
325 msg.msg_name = from;
326 msg.msg_namelen = from_len;
327 msg.msg_iov = &vec;
328 msg.msg_iovlen = 1;
329 msg.msg_control = &cdata;
330 msg.msg_controllen = sizeof(cdata);
331
332 clear_addr((struct sockaddr_storage *)to);
333
334 do {
335 ret = recvmsg(fd, &msg, 0);
336 } while (ret < 0 && errno == EINTR);
337
338 /* TODO handle errno. On EAGAIN/EWOULDBLOCK use fd_cant_recv() if
339 * using dedicated connection socket.
340 */
341
342 if (ret < 0)
343 goto end;
344
Amaury Denoyelle1125d052024-05-22 14:21:16 +0200345 if (unlikely(port_is_restricted((struct sockaddr_storage *)from, HA_PROTO_QUIC))) {
346 ret = -1;
347 goto end;
348 }
349
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200350 for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
351 switch (cmsg->cmsg_level) {
352 case IPPROTO_IP:
353#if defined(IP_PKTINFO)
354 if (cmsg->cmsg_type == IP_PKTINFO) {
355 struct sockaddr_in *in = (struct sockaddr_in *)to;
356 struct in_pktinfo *info = (struct in_pktinfo *)CMSG_DATA(cmsg);
357
358 if (to_len >= sizeof(struct sockaddr_in)) {
359 in->sin_family = AF_INET;
360 in->sin_addr = info->ipi_addr;
361 in->sin_port = dst_port;
362 }
363 }
364#elif defined(IP_RECVDSTADDR)
365 if (cmsg->cmsg_type == IP_RECVDSTADDR) {
366 struct sockaddr_in *in = (struct sockaddr_in *)to;
367 struct in_addr *info = (struct in_addr *)CMSG_DATA(cmsg);
368
369 if (to_len >= sizeof(struct sockaddr_in)) {
370 in->sin_family = AF_INET;
371 in->sin_addr.s_addr = info->s_addr;
372 in->sin_port = dst_port;
373 }
374 }
375#endif /* IP_PKTINFO || IP_RECVDSTADDR */
376 break;
377
378 case IPPROTO_IPV6:
379#ifdef IPV6_RECVPKTINFO
380 if (cmsg->cmsg_type == IPV6_PKTINFO) {
381 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)to;
382 struct in6_pktinfo *info6 = (struct in6_pktinfo *)CMSG_DATA(cmsg);
383
384 if (to_len >= sizeof(struct sockaddr_in6)) {
385 in6->sin6_family = AF_INET6;
386 memcpy(&in6->sin6_addr, &info6->ipi6_addr, sizeof(in6->sin6_addr));
387 in6->sin6_port = dst_port;
388 }
389 }
390#endif
391 break;
392 }
393 }
394
395 end:
396 return ret;
397}
398
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100399/* Function called on a read event from a listening socket. It tries
400 * to handle as many connections as possible.
401 */
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200402void quic_lstnr_sock_fd_iocb(int fd)
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100403{
404 ssize_t ret;
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200405 struct quic_receiver_buf *rxbuf;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100406 struct buffer *buf;
407 struct listener *l = objt_listener(fdtab[fd].owner);
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100408 struct quic_transport_params *params;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100409 /* Source address */
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200410 struct sockaddr_storage saddr = {0}, daddr = {0};
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100411 size_t max_sz, cspace;
Frédéric Lécaille2bed1f12022-06-23 21:05:05 +0200412 struct quic_dgram *new_dgram;
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100413 unsigned char *dgram_buf;
Frédéric Lécaille1b0707f2022-06-30 11:28:56 +0200414 int max_dgrams;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100415
Tim Duesterhus16554242021-09-15 13:58:49 +0200416 BUG_ON(!l);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100417
Frédéric Lécaille19ef6362022-06-23 18:00:37 +0200418 new_dgram = NULL;
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100419 if (!l)
420 return;
421
Willy Tarreauf5090652021-04-06 17:23:40 +0200422 if (!(fdtab[fd].state & FD_POLL_IN) || !fd_recv_ready(fd))
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100423 return;
424
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200425 rxbuf = MT_LIST_POP(&l->rx.rxbuf_list, typeof(rxbuf), rxbuf_el);
Amaury Denoyelleee72a432021-11-19 15:49:29 +0100426 if (!rxbuf)
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100427 goto out;
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100428
Amaury Denoyelleee72a432021-11-19 15:49:29 +0100429 buf = &rxbuf->buf;
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100430
Frédéric Lécaille1b0707f2022-06-30 11:28:56 +0200431 max_dgrams = global.tune.maxpollevents;
432 start:
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500433 /* Try to reuse an existing dgram. Note that there is always at
Frédéric Lécaille2bed1f12022-06-23 21:05:05 +0200434 * least one datagram to pick, except the first time we enter
435 * this function for this <rxbuf> buffer.
436 */
Amaury Denoyelle91b23052022-10-06 14:45:09 +0200437 new_dgram = quic_rxbuf_purge_dgrams(rxbuf);
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100438
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100439 params = &l->bind_conf->quic_params;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100440 max_sz = params->max_udp_payload_size;
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100441 cspace = b_contig_space(buf);
442 if (cspace < max_sz) {
Amaury Denoyelle735b44f2022-10-27 17:56:27 +0200443 struct proxy *px = l->bind_conf->frontend;
444 struct quic_counters *prx_counters = EXTRA_COUNTERS_GET(px->extra_counters_fe, &quic_stats_module);
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100445 struct quic_dgram *dgram;
446
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200447 /* Do no mark <buf> as full, and do not try to consume it
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200448 * if the contiguous remaining space is not at the end
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200449 */
Amaury Denoyelle735b44f2022-10-27 17:56:27 +0200450 if (b_tail(buf) + cspace < b_wrap(buf)) {
451 HA_ATOMIC_INC(&prx_counters->rxbuf_full);
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200452 goto out;
Amaury Denoyelle735b44f2022-10-27 17:56:27 +0200453 }
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200454
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100455 /* Allocate a fake datagram, without data to locate
456 * the end of the RX buffer (required during purging).
457 */
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200458 dgram = pool_alloc(pool_head_quic_dgram);
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100459 if (!dgram)
460 goto out;
461
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200462 /* Initialize only the useful members of this fake datagram. */
463 dgram->buf = NULL;
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100464 dgram->len = cspace;
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200465 /* Append this datagram only to the RX buffer list. It will
466 * not be treated by any datagram handler.
467 */
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200468 LIST_APPEND(&rxbuf->dgram_list, &dgram->recv_list);
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200469
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100470 /* Consume the remaining space */
471 b_add(buf, cspace);
Amaury Denoyelle735b44f2022-10-27 17:56:27 +0200472 if (b_contig_space(buf) < max_sz) {
473 HA_ATOMIC_INC(&prx_counters->rxbuf_full);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100474 goto out;
Amaury Denoyelle735b44f2022-10-27 17:56:27 +0200475 }
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100476 }
477
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100478 dgram_buf = (unsigned char *)b_tail(buf);
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200479 ret = quic_recv(fd, dgram_buf, max_sz,
480 (struct sockaddr *)&saddr, sizeof(saddr),
481 (struct sockaddr *)&daddr, sizeof(daddr),
482 get_net_port(&l->rx.addr));
483 if (ret <= 0)
484 goto out;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100485
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100486 b_add(buf, ret);
Amaury Denoyelle97ecc7a2022-09-23 17:15:58 +0200487 if (!quic_lstnr_dgram_dispatch(dgram_buf, ret, l, &saddr, &daddr,
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200488 new_dgram, &rxbuf->dgram_list)) {
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100489 /* If wrong, consume this datagram */
Amaury Denoyelle9875f022022-11-24 15:24:38 +0100490 b_sub(buf, ret);
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100491 }
Frédéric Lécaille19ef6362022-06-23 18:00:37 +0200492 new_dgram = NULL;
Frédéric Lécaille1b0707f2022-06-30 11:28:56 +0200493 if (--max_dgrams > 0)
494 goto start;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100495 out:
Frédéric Lécaille19ef6362022-06-23 18:00:37 +0200496 pool_free(pool_head_quic_dgram, new_dgram);
Amaury Denoyelle1cba8d62022-10-06 15:16:22 +0200497 MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200498}
499
500/* FD-owned quic-conn socket callback. */
Willy Tarreau8f6da642023-03-10 12:04:02 +0100501void quic_conn_sock_fd_iocb(int fd)
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200502{
Amaury Denoyelle7c9fdd92022-11-16 11:01:02 +0100503 struct quic_conn *qc = fdtab[fd].owner;
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200504
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200505 TRACE_ENTER(QUIC_EV_CONN_RCV, qc);
506
Amaury Denoyellee1a0ee32023-02-28 15:11:09 +0100507 if (fd_send_active(fd) && fd_send_ready(fd)) {
508 TRACE_DEVEL("send ready", QUIC_EV_CONN_RCV, qc);
509 fd_stop_send(fd);
510 tasklet_wakeup_after(NULL, qc->wait_event.tasklet);
Amaury Denoyellecaa16542023-02-28 15:11:26 +0100511 qc_notify_send(qc);
Amaury Denoyellee1a0ee32023-02-28 15:11:09 +0100512 }
513
514 if (fd_recv_ready(fd)) {
515 tasklet_wakeup_after(NULL, qc->wait_event.tasklet);
516 fd_stop_recv(fd);
517 }
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200518
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200519 TRACE_LEAVE(QUIC_EV_CONN_RCV, qc);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100520}
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100521
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200522/* Send a datagram stored into <buf> buffer with <sz> as size.
523 * The caller must ensure there is at least <sz> bytes in this buffer.
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200524 *
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100525 * Returns the total bytes sent over the socket. 0 is returned if a transient
526 * error is encountered which allows send to be retry later. A negative value
527 * is used for a fatal error which guarantee that all future send operation for
528 * this connection will fail.
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200529 *
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200530 * TODO standardize this function for a generic UDP sendto wrapper. This can be
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100531 * done by removing the <qc> arg and replace it with address/port.
532 */
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200533int qc_snd_buf(struct quic_conn *qc, const struct buffer *buf, size_t sz,
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100534 int flags)
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100535{
536 ssize_t ret;
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100537
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200538 do {
Amaury Denoyelledc0dcb32022-11-21 14:48:57 +0100539 if (qc_test_fd(qc)) {
Amaury Denoyellee1a0ee32023-02-28 15:11:09 +0100540 if (!fd_send_ready(qc->fd))
541 return 0;
542
Amaury Denoyelledc0dcb32022-11-21 14:48:57 +0100543 ret = send(qc->fd, b_peek(buf, b_head_ofs(buf)), sz,
544 MSG_DONTWAIT | MSG_NOSIGNAL);
545 }
Amaury Denoyelle2d380922023-01-19 18:05:54 +0100546#if defined(IP_PKTINFO) || defined(IP_RECVDSTADDR) || defined(IPV6_RECVPKTINFO)
547 else if (is_addr(&qc->local_addr)) {
548 struct msghdr msg = { 0 };
549 struct iovec vec;
550 struct cmsghdr *cmsg;
551#ifdef IP_PKTINFO
552 struct in_pktinfo in;
553#endif /* IP_PKTINFO */
554#ifdef IPV6_RECVPKTINFO
555 struct in6_pktinfo in6;
556#endif /* IPV6_RECVPKTINFO */
557 union {
558#ifdef IP_PKTINFO
559 char buf[CMSG_SPACE(sizeof(in))];
560#endif /* IP_PKTINFO */
561#ifdef IPV6_RECVPKTINFO
562 char buf6[CMSG_SPACE(sizeof(in6))];
563#endif /* IPV6_RECVPKTINFO */
564 char bufaddr[CMSG_SPACE(sizeof(struct in_addr))];
565 struct cmsghdr align;
566 } u;
567
568 vec.iov_base = b_peek(buf, b_head_ofs(buf));
569 vec.iov_len = sz;
570 msg.msg_name = &qc->peer_addr;
571 msg.msg_namelen = get_addr_len(&qc->peer_addr);
572 msg.msg_iov = &vec;
573 msg.msg_iovlen = 1;
574
575 switch (qc->local_addr.ss_family) {
576 case AF_INET:
577#if defined(IP_PKTINFO)
578 memset(&in, 0, sizeof(in));
579 memcpy(&in.ipi_spec_dst,
580 &((struct sockaddr_in *)&qc->local_addr)->sin_addr,
581 sizeof(struct in_addr));
582
583 msg.msg_control = u.buf;
584 msg.msg_controllen = sizeof(u.buf);
585
586 cmsg = CMSG_FIRSTHDR(&msg);
587 cmsg->cmsg_level = IPPROTO_IP;
588 cmsg->cmsg_type = IP_PKTINFO;
589 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
590 memcpy(CMSG_DATA(cmsg), &in, sizeof(in));
591#elif defined(IP_RECVDSTADDR)
592 msg.msg_control = u.bufaddr;
593 msg.msg_controllen = sizeof(u.bufaddr);
594
595 cmsg = CMSG_FIRSTHDR(&msg);
596 cmsg->cmsg_level = IPPROTO_IP;
597 cmsg->cmsg_type = IP_SENDSRCADDR;
598 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_addr));
599 memcpy(CMSG_DATA(cmsg),
600 &((struct sockaddr_in *)&qc->local_addr)->sin_addr,
601 sizeof(struct in_addr));
602#endif /* IP_PKTINFO || IP_RECVDSTADDR */
603 break;
604
605 case AF_INET6:
606#ifdef IPV6_RECVPKTINFO
607 memset(&in6, 0, sizeof(in6));
608 memcpy(&in6.ipi6_addr,
609 &((struct sockaddr_in6 *)&qc->local_addr)->sin6_addr,
610 sizeof(struct in6_addr));
611
612 msg.msg_control = u.buf6;
613 msg.msg_controllen = sizeof(u.buf6);
614
615 cmsg = CMSG_FIRSTHDR(&msg);
616 cmsg->cmsg_level = IPPROTO_IPV6;
617 cmsg->cmsg_type = IPV6_PKTINFO;
618 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
619 memcpy(CMSG_DATA(cmsg), &in6, sizeof(in6));
620#endif /* IPV6_RECVPKTINFO */
621 break;
622
623 default:
624 break;
625 }
626
627 ret = sendmsg(qc->li->rx.fd, &msg,
628 MSG_DONTWAIT|MSG_NOSIGNAL);
629 }
630#endif /* IP_PKTINFO || IP_RECVDSTADDR || IPV6_RECVPKTINFO */
Amaury Denoyelledc0dcb32022-11-21 14:48:57 +0100631 else {
632 ret = sendto(qc->li->rx.fd, b_peek(buf, b_head_ofs(buf)), sz,
633 MSG_DONTWAIT|MSG_NOSIGNAL,
634 (struct sockaddr *)&qc->peer_addr,
635 get_addr_len(&qc->peer_addr));
636 }
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200637 } while (ret < 0 && errno == EINTR);
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100638
Frédéric Lécaille9fc10af2023-02-09 20:37:26 +0100639 if (ret < 0) {
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200640 if (errno == EAGAIN || errno == EWOULDBLOCK ||
Amaury Denoyelle4bdd0692023-02-27 17:31:55 +0100641 errno == ENOTCONN || errno == EINPROGRESS) {
Amaury Denoyelle2c70bd22024-02-19 17:27:07 +0100642 /* transient error */
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200643 if (errno == EAGAIN || errno == EWOULDBLOCK)
Frédéric Lécaillebdd64fd2023-05-24 11:10:19 +0200644 qc->cntrs.socket_full++;
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200645 else
Frédéric Lécaillebdd64fd2023-05-24 11:10:19 +0200646 qc->cntrs.sendto_err++;
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100647
Amaury Denoyelle2c70bd22024-02-19 17:27:07 +0100648 if (qc_test_fd(qc)) {
649 fd_want_send(qc->fd);
650 fd_cant_send(qc->fd);
651 }
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100652 TRACE_PRINTF(TRACE_LEVEL_USER, QUIC_EV_CONN_SPPKTS, qc, 0, 0, 0,
653 "UDP send failure errno=%d (%s)", errno, strerror(errno));
654 return 0;
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200655 }
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100656 else {
657 /* unrecoverable error */
Frédéric Lécaillebdd64fd2023-05-24 11:10:19 +0200658 qc->cntrs.sendto_err_unknown++;
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100659 TRACE_PRINTF(TRACE_LEVEL_USER, QUIC_EV_CONN_SPPKTS, qc, 0, 0, 0,
660 "UDP send failure errno=%d (%s)", errno, strerror(errno));
661 return -1;
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200662 }
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200663 }
664
Frédéric Lécaille9fc10af2023-02-09 20:37:26 +0100665 if (ret != sz)
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100666 return 0;
Frédéric Lécaille9fc10af2023-02-09 20:37:26 +0100667
Amaury Denoyelle1febc2d2023-02-23 11:18:38 +0100668 return ret;
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100669}
670
Amaury Denoyelle7c9fdd92022-11-16 11:01:02 +0100671/* Receive datagram on <qc> FD-owned socket.
672 *
673 * Returns the total number of bytes read or a negative value on error.
674 */
675int qc_rcv_buf(struct quic_conn *qc)
676{
677 struct sockaddr_storage saddr = {0}, daddr = {0};
678 struct quic_transport_params *params;
679 struct quic_dgram *new_dgram = NULL;
680 struct buffer buf = BUF_NULL;
681 size_t max_sz;
682 unsigned char *dgram_buf;
683 struct listener *l;
684 ssize_t ret = 0;
685
686 /* Do not call this if quic-conn FD is uninitialized. */
687 BUG_ON(qc->fd < 0);
688
689 TRACE_ENTER(QUIC_EV_CONN_RCV, qc);
690 l = qc->li;
691
692 params = &l->bind_conf->quic_params;
693 max_sz = params->max_udp_payload_size;
694
695 do {
696 if (!b_alloc(&buf))
697 break; /* TODO subscribe for memory again available. */
698
699 b_reset(&buf);
700 BUG_ON(b_contig_space(&buf) < max_sz);
701
702 /* Allocate datagram on first loop or after requeuing. */
703 if (!new_dgram && !(new_dgram = pool_alloc(pool_head_quic_dgram)))
704 break; /* TODO subscribe for memory again available. */
705
706 dgram_buf = (unsigned char *)b_tail(&buf);
707 ret = quic_recv(qc->fd, dgram_buf, max_sz,
708 (struct sockaddr *)&saddr, sizeof(saddr),
709 (struct sockaddr *)&daddr, sizeof(daddr),
710 get_net_port(&qc->local_addr));
711 if (ret <= 0) {
712 /* Subscribe FD for future reception. */
Amaury Denoyelle6edb8562023-08-11 16:10:34 +0200713 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOTCONN)
714 fd_want_recv(qc->fd);
715 /* TODO handle other error codes as fatal on the connection. */
Amaury Denoyelle7c9fdd92022-11-16 11:01:02 +0100716 break;
717 }
718
719 b_add(&buf, ret);
720
721 new_dgram->buf = dgram_buf;
722 new_dgram->len = ret;
723 new_dgram->dcid_len = 0;
724 new_dgram->dcid = NULL;
725 new_dgram->saddr = saddr;
726 new_dgram->daddr = daddr;
727 new_dgram->qc = NULL; /* set later via quic_dgram_parse() */
728
729 TRACE_DEVEL("read datagram", QUIC_EV_CONN_RCV, qc, new_dgram);
730
731 if (!quic_get_dgram_dcid(new_dgram->buf,
732 new_dgram->buf + new_dgram->len,
733 &new_dgram->dcid, &new_dgram->dcid_len)) {
734 continue;
735 }
736
737 if (!qc_check_dcid(qc, new_dgram->dcid, new_dgram->dcid_len)) {
738 /* Datagram received by error on the connection FD, dispatch it
739 * to its associated quic-conn.
740 *
741 * TODO count redispatch datagrams.
742 */
Amaury Denoyelleb2bd8392022-10-05 17:56:08 +0200743 struct quic_receiver_buf *rxbuf;
744 struct quic_dgram *tmp_dgram;
745 unsigned char *rxbuf_tail;
Amaury Denoyelle5f01cf32023-08-04 09:57:04 +0200746 size_t cspace;
Amaury Denoyelleb2bd8392022-10-05 17:56:08 +0200747
748 TRACE_STATE("datagram for other connection on quic-conn socket, requeue it", QUIC_EV_CONN_RCV, qc);
749
750 rxbuf = MT_LIST_POP(&l->rx.rxbuf_list, typeof(rxbuf), rxbuf_el);
Amaury Denoyelled2081512023-08-04 15:34:34 +0200751 ALREADY_CHECKED(rxbuf);
Amaury Denoyelle5f01cf32023-08-04 09:57:04 +0200752 cspace = b_contig_space(&rxbuf->buf);
Amaury Denoyelleb2bd8392022-10-05 17:56:08 +0200753
754 tmp_dgram = quic_rxbuf_purge_dgrams(rxbuf);
755 pool_free(pool_head_quic_dgram, tmp_dgram);
756
Amaury Denoyelle5f01cf32023-08-04 09:57:04 +0200757 /* Insert a fake datagram if space wraps to consume it. */
758 if (cspace < new_dgram->len && b_space_wraps(&rxbuf->buf)) {
759 struct quic_dgram *fake_dgram = pool_alloc(pool_head_quic_dgram);
760 if (!fake_dgram) {
761 /* TODO count lost datagrams */
Amaury Denoyelle36595342023-08-04 15:37:29 +0200762 MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
Amaury Denoyelle5f01cf32023-08-04 09:57:04 +0200763 continue;
764 }
765
766 fake_dgram->buf = NULL;
767 fake_dgram->len = cspace;
768 LIST_APPEND(&rxbuf->dgram_list, &fake_dgram->recv_list);
769 b_add(&rxbuf->buf, cspace);
770 }
771
772 /* Recheck contig space after fake datagram insert. */
Amaury Denoyelleb2bd8392022-10-05 17:56:08 +0200773 if (b_contig_space(&rxbuf->buf) < new_dgram->len) {
774 /* TODO count lost datagrams */
775 MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
776 continue;
777 }
778
779 rxbuf_tail = (unsigned char *)b_tail(&rxbuf->buf);
780 __b_putblk(&rxbuf->buf, (char *)dgram_buf, new_dgram->len);
Frédéric Lécaille76d50252023-05-11 20:43:28 +0200781 if (!quic_lstnr_dgram_dispatch(rxbuf_tail, ret, l, &saddr, &daddr,
Amaury Denoyelleb2bd8392022-10-05 17:56:08 +0200782 new_dgram, &rxbuf->dgram_list)) {
783 /* TODO count lost datagrams. */
784 b_sub(&buf, ret);
785 }
786 else {
787 /* datagram must not be freed as it was requeued. */
788 new_dgram = NULL;
789 }
790
791 MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
792 continue;
Amaury Denoyelle7c9fdd92022-11-16 11:01:02 +0100793 }
794
795 quic_dgram_parse(new_dgram, qc, qc->li);
796 /* A datagram must always be consumed after quic_parse_dgram(). */
797 BUG_ON(new_dgram->buf);
798 } while (ret > 0);
799
800 pool_free(pool_head_quic_dgram, new_dgram);
801
802 if (b_size(&buf)) {
803 b_free(&buf);
804 offer_buffers(NULL, 1);
805 }
806
807 TRACE_LEAVE(QUIC_EV_CONN_RCV, qc);
808 return ret;
809}
810
Amaury Denoyelle40909df2022-10-24 17:08:43 +0200811/* Allocate a socket file-descriptor specific for QUIC connection <qc>.
812 * Endpoint addresses are specified by the two following arguments : <src> is
813 * the local address and <dst> is the remote one.
814 *
815 * Return the socket FD or a negative error code. On error, socket is marked as
816 * uninitialized.
817 */
818void qc_alloc_fd(struct quic_conn *qc, const struct sockaddr_storage *src,
819 const struct sockaddr_storage *dst)
820{
821 struct proxy *p = qc->li->bind_conf->frontend;
822 int fd = -1;
823 int ret;
824
825 /* Must not happen. */
826 BUG_ON(src->ss_family != dst->ss_family);
827
828 qc_init_fd(qc);
829
830 fd = socket(src->ss_family, SOCK_DGRAM, 0);
831 if (fd < 0)
832 goto err;
833
834 if (fd >= global.maxsock) {
835 send_log(p, LOG_EMERG,
836 "Proxy %s reached the configured maximum connection limit. Please check the global 'maxconn' value.\n",
837 p->id);
838 goto err;
839 }
840
841 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
842 if (ret < 0)
843 goto err;
844
845 switch (src->ss_family) {
846 case AF_INET:
847#if defined(IP_PKTINFO)
848 ret = setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &one, sizeof(one));
849#elif defined(IP_RECVDSTADDR)
850 ret = setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &one, sizeof(one));
851#endif /* IP_PKTINFO || IP_RECVDSTADDR */
852 break;
853 case AF_INET6:
854#ifdef IPV6_RECVPKTINFO
855 ret = setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &one, sizeof(one));
856#endif
857 break;
858 }
859 if (ret < 0)
860 goto err;
861
862 ret = bind(fd, (struct sockaddr *)src, get_addr_len(src));
863 if (ret < 0)
864 goto err;
865
866 ret = connect(fd, (struct sockaddr *)dst, get_addr_len(dst));
867 if (ret < 0)
868 goto err;
869
870 qc->fd = fd;
871 fd_set_nonblock(fd);
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200872 fd_insert(fd, qc, quic_conn_sock_fd_iocb, tgid, ti->ltid_bit);
873 fd_want_recv(fd);
Amaury Denoyelle40909df2022-10-24 17:08:43 +0200874
875 return;
876
877 err:
878 if (fd >= 0)
879 close(fd);
880}
881
Amaury Denoyelled3083c92022-12-01 16:20:06 +0100882/* Release socket file-descriptor specific for QUIC connection <qc>. Set
883 * <reinit> if socket should be reinitialized after address migration.
884 */
885void qc_release_fd(struct quic_conn *qc, int reinit)
Amaury Denoyelle40909df2022-10-24 17:08:43 +0200886{
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200887 if (qc_test_fd(qc)) {
888 fd_delete(qc->fd);
Amaury Denoyelle40909df2022-10-24 17:08:43 +0200889 qc->fd = DEAD_FD_MAGIC;
Amaury Denoyelled3083c92022-12-01 16:20:06 +0100890
891 if (reinit)
892 qc_init_fd(qc);
Amaury Denoyelle5b414862022-10-24 17:40:37 +0200893 }
Amaury Denoyelle40909df2022-10-24 17:08:43 +0200894}
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100895
Amaury Denoyelle739de3f2023-04-11 14:42:31 +0200896/* Wrapper for fd_want_recv(). Safe even if connection does not used its owned
897 * socket.
898 */
899void qc_want_recv(struct quic_conn *qc)
900{
901 if (qc_test_fd(qc))
902 fd_want_recv(qc->fd);
903}
904
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100905/*********************** QUIC accept queue management ***********************/
906/* per-thread accept queues */
907struct quic_accept_queue *quic_accept_queues;
908
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100909/* Install <qc> on the queue ready to be accepted. The queue task is then woken
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100910 * up. If <qc> accept is already scheduled or done, nothing is done.
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100911 */
912void quic_accept_push_qc(struct quic_conn *qc)
913{
Amaury Denoyelle66947282023-04-13 11:48:38 +0200914 struct quic_accept_queue *queue = &quic_accept_queues[tid];
Willy Tarreau6a4d48b2023-04-21 10:46:45 +0200915 struct li_per_thread *lthr = &qc->li->per_thr[ti->ltid];
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100916
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100917 /* early return if accept is already in progress/done for this
918 * connection
919 */
Frédéric Lécaillefc790062022-03-28 17:10:31 +0200920 if (qc->flags & QUIC_FL_CONN_ACCEPT_REGISTERED)
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100921 return;
922
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100923 BUG_ON(MT_LIST_INLIST(&qc->accept_list));
924
Frédéric Lécaillefc790062022-03-28 17:10:31 +0200925 qc->flags |= QUIC_FL_CONN_ACCEPT_REGISTERED;
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100926 /* 1. insert the listener in the accept queue
927 *
928 * Use TRY_APPEND as there is a possible race even with INLIST if
929 * multiple threads try to add the same listener instance from several
930 * quic_conn.
931 */
932 if (!MT_LIST_INLIST(&(lthr->quic_accept.list)))
933 MT_LIST_TRY_APPEND(&queue->listeners, &(lthr->quic_accept.list));
934
935 /* 2. insert the quic_conn in the listener per-thread queue. */
936 MT_LIST_APPEND(&lthr->quic_accept.conns, &qc->accept_list);
937
938 /* 3. wake up the queue tasklet */
Amaury Denoyelle66947282023-04-13 11:48:38 +0200939 tasklet_wakeup(quic_accept_queues[tid].tasklet);
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100940}
941
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100942/* Tasklet handler to accept QUIC connections. Call listener_accept on every
943 * listener instances registered in the accept queue.
944 */
Willy Tarreau41e701e2022-09-08 15:12:59 +0200945struct task *quic_accept_run(struct task *t, void *ctx, unsigned int i)
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100946{
947 struct li_per_thread *lthr;
948 struct mt_list *elt1, elt2;
949 struct quic_accept_queue *queue = &quic_accept_queues[tid];
950
951 mt_list_for_each_entry_safe(lthr, &queue->listeners, quic_accept.list, elt1, elt2) {
952 listener_accept(lthr->li);
Frédéric Lécaille4377dbd2023-03-10 13:34:30 +0100953 if (!MT_LIST_ISEMPTY(&lthr->quic_accept.conns))
954 tasklet_wakeup((struct tasklet*)t);
955 else
956 MT_LIST_DELETE_SAFE(elt1);
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100957 }
958
959 return NULL;
960}
961
962static int quic_alloc_accept_queues(void)
963{
964 int i;
965
Tim Duesterhus9fb57e82022-06-01 21:58:37 +0200966 quic_accept_queues = calloc(global.nbthread,
967 sizeof(*quic_accept_queues));
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100968 if (!quic_accept_queues) {
969 ha_alert("Failed to allocate the quic accept queues.\n");
970 return 0;
971 }
972
973 for (i = 0; i < global.nbthread; ++i) {
974 struct tasklet *task;
975 if (!(task = tasklet_new())) {
976 ha_alert("Failed to allocate the quic accept queue on thread %d.\n", i);
977 return 0;
978 }
979
980 tasklet_set_tid(task, i);
981 task->process = quic_accept_run;
982 quic_accept_queues[i].tasklet = task;
983
984 MT_LIST_INIT(&quic_accept_queues[i].listeners);
985 }
986
987 return 1;
988}
989REGISTER_POST_CHECK(quic_alloc_accept_queues);
990
991static int quic_deallocate_accept_queues(void)
992{
993 int i;
994
995 if (quic_accept_queues) {
996 for (i = 0; i < global.nbthread; ++i)
997 tasklet_free(quic_accept_queues[i].tasklet);
998 free(quic_accept_queues);
999 }
1000
1001 return 1;
1002}
1003REGISTER_POST_DEINIT(quic_deallocate_accept_queues);