blob: 8d197568710fa14a799d59ed878c9babf7283bf5 [file] [log] [blame]
Frédéric Lécaille70da8892020-11-06 15:49:49 +01001/*
2 * QUIC socket management.
3 *
Willy Tarreau3dfb7da2022-03-02 22:33:39 +01004 * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
Frédéric Lécaille70da8892020-11-06 15:49:49 +01005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <errno.h>
14
15#include <sys/socket.h>
16#include <sys/types.h>
17
18#include <haproxy/connection.h>
19#include <haproxy/listener.h>
Frédéric Lécaille6492e662022-05-17 17:23:16 +020020#include <haproxy/proto_quic.h>
Amaury Denoyelle4d295042022-01-19 16:18:44 +010021#include <haproxy/quic_sock.h>
Amaury Denoyelleeb01f592021-10-07 16:44:05 +020022#include <haproxy/session.h>
Amaury Denoyelle777969c2022-03-24 16:06:26 +010023#include <haproxy/tools.h>
Frédéric Lécaille026a7922020-11-23 15:46:36 +010024#include <haproxy/xprt_quic.h>
25
26/* This function is called from the protocol layer accept() in order to
27 * instantiate a new session on behalf of a given listener and frontend. It
28 * returns a positive value upon success, 0 if the connection can be ignored,
29 * or a negative value upon critical failure. The accepted connection is
30 * closed if we return <= 0. If no handshake is needed, it immediately tries
31 * to instantiate a new stream. The connection must already have been filled
32 * with the incoming connection handle (a fd), a target (the listener) and a
33 * source address.
34 */
35int quic_session_accept(struct connection *cli_conn)
36{
37 struct listener *l = __objt_listener(cli_conn->target);
38 struct proxy *p = l->bind_conf->frontend;
39 struct session *sess;
40
41 cli_conn->proxy_netns = l->rx.settings->netns;
Frédéric Lécaille026a7922020-11-23 15:46:36 +010042 /* This flag is ordinarily set by conn_ctrl_init() which cannot
43 * be called for now.
44 */
45 cli_conn->flags |= CO_FL_CTRL_READY;
46
47 /* wait for a PROXY protocol header */
48 if (l->options & LI_O_ACC_PROXY)
49 cli_conn->flags |= CO_FL_ACCEPT_PROXY;
50
51 /* wait for a NetScaler client IP insertion protocol header */
52 if (l->options & LI_O_ACC_CIP)
53 cli_conn->flags |= CO_FL_ACCEPT_CIP;
54
Frédéric Lécaille026a7922020-11-23 15:46:36 +010055 /* Add the handshake pseudo-XPRT */
56 if (cli_conn->flags & (CO_FL_ACCEPT_PROXY | CO_FL_ACCEPT_CIP)) {
57 if (xprt_add_hs(cli_conn) != 0)
58 goto out_free_conn;
59 }
Olivier Houchard1b3c9312021-03-05 23:37:48 +010060
Frédéric Lécaille026a7922020-11-23 15:46:36 +010061 sess = session_new(p, l, &cli_conn->obj_type);
62 if (!sess)
63 goto out_free_conn;
64
65 conn_set_owner(cli_conn, sess, NULL);
66
Frédéric Lécailleecb58722021-05-27 17:12:36 +020067 if (conn_complete_session(cli_conn) < 0)
68 goto out_free_sess;
69
Amaury Denoyelle622ec412022-04-13 16:58:26 +020070 if (conn_xprt_start(cli_conn) < 0) {
71 /* conn_complete_session has succeeded : conn is the owner of
72 * the session and the MUX is initialized.
73 * Let the MUX free all resources on error.
74 */
75 cli_conn->mux->destroy(cli_conn->ctx);
76 return -1;
77 }
78
79 return 1;
Frédéric Lécaille27faba72021-03-03 16:21:00 +010080
Frédéric Lécaille026a7922020-11-23 15:46:36 +010081 out_free_sess:
82 /* prevent call to listener_release during session_free. It will be
83 * done below, for all errors. */
84 sess->listener = NULL;
85 session_free(sess);
86 out_free_conn:
Willy Tarreau784b8682022-04-11 14:18:10 +020087 cli_conn->handle.qc->conn = NULL;
Frédéric Lécaille026a7922020-11-23 15:46:36 +010088 conn_stop_tracking(cli_conn);
89 conn_xprt_close(cli_conn);
90 conn_free(cli_conn);
91 out:
92
Frédéric Lécaillee8139f32021-03-11 17:06:30 +010093 return -1;
Frédéric Lécaille026a7922020-11-23 15:46:36 +010094}
95
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020096/* Retrieve a connection's source address. Returns -1 on failure. */
97int quic_sock_get_src(struct connection *conn, struct sockaddr *addr, socklen_t len)
98{
99 struct quic_conn *qc;
100
Willy Tarreau784b8682022-04-11 14:18:10 +0200101 if (!conn || !conn->handle.qc)
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +0200102 return -1;
103
Willy Tarreau784b8682022-04-11 14:18:10 +0200104 qc = conn->handle.qc;
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +0200105 if (conn_is_back(conn)) {
106 /* no source address defined for outgoing connections for now */
107 return -1;
108 } else {
109 /* front connection, return the peer's address */
110 if (len > sizeof(qc->peer_addr))
111 len = sizeof(qc->peer_addr);
112 memcpy(addr, &qc->peer_addr, len);
113 return 0;
114 }
115}
116
117/* Retrieve a connection's destination address. Returns -1 on failure. */
118int quic_sock_get_dst(struct connection *conn, struct sockaddr *addr, socklen_t len)
119{
120 struct quic_conn *qc;
121
Willy Tarreau784b8682022-04-11 14:18:10 +0200122 if (!conn || !conn->handle.qc)
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +0200123 return -1;
124
Willy Tarreau784b8682022-04-11 14:18:10 +0200125 qc = conn->handle.qc;
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +0200126 if (conn_is_back(conn)) {
127 /* back connection, return the peer's address */
128 if (len > sizeof(qc->peer_addr))
129 len = sizeof(qc->peer_addr);
130 memcpy(addr, &qc->peer_addr, len);
131 } else {
132 /* FIXME: front connection, no local address for now, we'll
133 * return the listener's address instead.
134 */
135 BUG_ON(!qc->li);
136
137 if (len > sizeof(qc->li->rx.addr))
138 len = sizeof(qc->li->rx.addr);
139 memcpy(addr, &qc->li->rx.addr, len);
140 }
141 return 0;
142}
143
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100144/*
145 * Inspired from session_accept_fd().
146 * Instantiate a new connection (connection struct) to be attached to <qc>
147 * QUIC connection of <l> listener.
148 * Returns 1 if succeeded, 0 if not.
149 */
150static int new_quic_cli_conn(struct quic_conn *qc, struct listener *l,
151 struct sockaddr_storage *saddr)
152{
153 struct connection *cli_conn;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100154
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100155 if (unlikely((cli_conn = conn_new(&l->obj_type)) == NULL))
156 goto out;
157
Willy Tarreau9cc88c32022-04-08 14:34:31 +0200158 if (!sockaddr_alloc(&cli_conn->src, saddr, sizeof *saddr))
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100159 goto out_free_conn;
160
Willy Tarreau030b3e62022-05-02 17:47:46 +0200161 cli_conn->flags |= CO_FL_FDLESS;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100162 qc->conn = cli_conn;
Willy Tarreau784b8682022-04-11 14:18:10 +0200163 cli_conn->handle.qc = qc;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100164
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100165 cli_conn->target = &l->obj_type;
166
Frédéric Lécaille01ab6612021-06-14 10:31:43 +0200167 /* We need the xprt context before accepting (->accept()) the connection:
168 * we may receive packet before this connection acception.
169 */
170 if (conn_prepare(cli_conn, l->rx.proto, l->bind_conf->xprt) < 0)
171 goto out_free_conn;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100172
173 return 1;
174
175 out_free_conn:
Frédéric Lécaille01ab6612021-06-14 10:31:43 +0200176 qc->conn = NULL;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100177 conn_stop_tracking(cli_conn);
178 conn_xprt_close(cli_conn);
179 conn_free(cli_conn);
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100180 out:
181
182 return 0;
183}
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100184
185/* Tests if the receiver supports accepting connections. Returns positive on
186 * success, 0 if not possible
187 */
188int quic_sock_accepting_conn(const struct receiver *rx)
189{
190 return 1;
191}
192
193/* Accept an incoming connection from listener <l>, and return it, as well as
194 * a CO_AC_* status code into <status> if not null. Null is returned on error.
195 * <l> must be a valid listener with a valid frontend.
196 */
197struct connection *quic_sock_accept_conn(struct listener *l, int *status)
198{
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100199 struct quic_conn *qc;
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100200 struct li_per_thread *lthr = &l->per_thr[tid];
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100201
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100202 qc = MT_LIST_POP(&lthr->quic_accept.conns, struct quic_conn *, accept_list);
203 if (!qc)
204 goto done;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100205
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100206 if (!new_quic_cli_conn(qc, l, &qc->peer_addr))
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100207 goto err;
208
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100209 done:
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100210 *status = CO_AC_DONE;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100211 return qc ? qc->conn : NULL;
212
213 err:
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100214 /* in case of error reinsert the element to process it later. */
215 MT_LIST_INSERT(&lthr->quic_accept.conns, &qc->accept_list);
216
217 *status = CO_AC_PAUSE;
218 return NULL;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100219}
220
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200221/* Retrieve the DCID from the datagram found in <buf> and deliver it to the
222 * correct datagram handler.
223 * Return 1 if a correct datagram could be found, 0 if not.
224 */
225static int quic_lstnr_dgram_dispatch(unsigned char *buf, size_t len, void *owner,
226 struct sockaddr_storage *saddr,
227 struct quic_dgram *new_dgram, struct list *dgrams)
228{
229 struct quic_dgram *dgram;
230 unsigned char *dcid;
231 size_t dcid_len;
232 int cid_tid;
233
234 if (!len || !quic_get_dgram_dcid(buf, buf + len, &dcid, &dcid_len))
235 goto err;
236
237 dgram = new_dgram ? new_dgram : pool_alloc(pool_head_quic_dgram);
238 if (!dgram)
239 goto err;
240
241 cid_tid = quic_get_cid_tid(dcid);
242
243 /* All the members must be initialized! */
244 dgram->owner = owner;
245 dgram->buf = buf;
246 dgram->len = len;
247 dgram->dcid = dcid;
248 dgram->dcid_len = dcid_len;
249 dgram->saddr = *saddr;
250 dgram->qc = NULL;
251 LIST_APPEND(dgrams, &dgram->list);
252 MT_LIST_APPEND(&quic_dghdlrs[cid_tid].dgrams, &dgram->mt_list);
253
Willy Tarreauf9d4a7d2022-08-05 08:45:56 +0200254 /* typically quic_lstnr_dghdlr() */
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200255 tasklet_wakeup(quic_dghdlrs[cid_tid].task);
256
257 return 1;
258
259 err:
Frédéric Lécaillebfb077a2022-08-12 11:55:20 +0200260 pool_free(pool_head_quic_dgram, new_dgram);
Frédéric Lécaille6492e662022-05-17 17:23:16 +0200261 return 0;
262}
263
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100264/* Function called on a read event from a listening socket. It tries
265 * to handle as many connections as possible.
266 */
267void quic_sock_fd_iocb(int fd)
268{
269 ssize_t ret;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100270 struct rxbuf *rxbuf;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100271 struct buffer *buf;
272 struct listener *l = objt_listener(fdtab[fd].owner);
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100273 struct quic_transport_params *params;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100274 /* Source address */
275 struct sockaddr_storage saddr = {0};
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100276 size_t max_sz, cspace;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100277 socklen_t saddrlen;
Frédéric Lécaille2bed1f12022-06-23 21:05:05 +0200278 struct quic_dgram *new_dgram;
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100279 unsigned char *dgram_buf;
Frédéric Lécaille1b0707f2022-06-30 11:28:56 +0200280 int max_dgrams;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100281
Tim Duesterhus16554242021-09-15 13:58:49 +0200282 BUG_ON(!l);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100283
Frédéric Lécaille19ef6362022-06-23 18:00:37 +0200284 new_dgram = NULL;
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100285 if (!l)
286 return;
287
Willy Tarreauf5090652021-04-06 17:23:40 +0200288 if (!(fdtab[fd].state & FD_POLL_IN) || !fd_recv_ready(fd))
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100289 return;
290
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100291 rxbuf = MT_LIST_POP(&l->rx.rxbuf_list, typeof(rxbuf), mt_list);
Amaury Denoyelleee72a432021-11-19 15:49:29 +0100292 if (!rxbuf)
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100293 goto out;
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100294
Amaury Denoyelleee72a432021-11-19 15:49:29 +0100295 buf = &rxbuf->buf;
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100296
Frédéric Lécaille1b0707f2022-06-30 11:28:56 +0200297 max_dgrams = global.tune.maxpollevents;
298 start:
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500299 /* Try to reuse an existing dgram. Note that there is always at
Frédéric Lécaille2bed1f12022-06-23 21:05:05 +0200300 * least one datagram to pick, except the first time we enter
301 * this function for this <rxbuf> buffer.
302 */
303 if (!LIST_ISEMPTY(&rxbuf->dgrams)) {
304 struct quic_dgram *dg =
305 LIST_ELEM(rxbuf->dgrams.n, struct quic_dgram *, list);
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100306
Frédéric Lécaille2bed1f12022-06-23 21:05:05 +0200307 if (!dg->buf) {
308 LIST_DELETE(&dg->list);
309 b_del(buf, dg->len);
310 new_dgram = dg;
311 }
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100312 }
313
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100314 params = &l->bind_conf->quic_params;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100315 max_sz = params->max_udp_payload_size;
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100316 cspace = b_contig_space(buf);
317 if (cspace < max_sz) {
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100318 struct quic_dgram *dgram;
319
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200320 /* Do no mark <buf> as full, and do not try to consume it
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200321 * if the contiguous remaining space is not at the end
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200322 */
323 if (b_tail(buf) + cspace < b_wrap(buf))
324 goto out;
325
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100326 /* Allocate a fake datagram, without data to locate
327 * the end of the RX buffer (required during purging).
328 */
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200329 dgram = pool_alloc(pool_head_quic_dgram);
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100330 if (!dgram)
331 goto out;
332
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200333 /* Initialize only the useful members of this fake datagram. */
334 dgram->buf = NULL;
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100335 dgram->len = cspace;
Frédéric Lécailleba19acd2022-08-08 21:10:58 +0200336 /* Append this datagram only to the RX buffer list. It will
337 * not be treated by any datagram handler.
338 */
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100339 LIST_APPEND(&rxbuf->dgrams, &dgram->list);
Frédéric Lécaille0c535682022-06-23 17:47:10 +0200340
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100341 /* Consume the remaining space */
342 b_add(buf, cspace);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100343 if (b_contig_space(buf) < max_sz)
344 goto out;
345 }
346
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100347 dgram_buf = (unsigned char *)b_tail(buf);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100348 saddrlen = sizeof saddr;
349 do {
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100350 ret = recvfrom(fd, dgram_buf, max_sz, 0,
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100351 (struct sockaddr *)&saddr, &saddrlen);
Willy Tarreauacef5e22022-04-25 20:32:15 +0200352 if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
Frédéric Lécaille439c4642022-02-02 14:33:10 +0100353 fd_cant_recv(fd);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100354 goto out;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100355 }
Frédéric Lécaille439c4642022-02-02 14:33:10 +0100356 } while (ret < 0 && errno == EINTR);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100357
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100358 b_add(buf, ret);
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100359 if (!quic_lstnr_dgram_dispatch(dgram_buf, ret, l, &saddr,
360 new_dgram, &rxbuf->dgrams)) {
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100361 /* If wrong, consume this datagram */
362 b_del(buf, ret);
363 }
Frédéric Lécaille19ef6362022-06-23 18:00:37 +0200364 new_dgram = NULL;
Frédéric Lécaille1b0707f2022-06-30 11:28:56 +0200365 if (--max_dgrams > 0)
366 goto start;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100367 out:
Frédéric Lécaille19ef6362022-06-23 18:00:37 +0200368 pool_free(pool_head_quic_dgram, new_dgram);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100369 MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->mt_list);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100370}
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100371
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200372/* Send a datagram stored into <buf> buffer with <sz> as size.
373 * The caller must ensure there is at least <sz> bytes in this buffer.
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200374 *
375 * Returns 0 on success else non-zero.
376 *
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200377 * TODO standardize this function for a generic UDP sendto wrapper. This can be
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100378 * done by removing the <qc> arg and replace it with address/port.
379 */
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200380int qc_snd_buf(struct quic_conn *qc, const struct buffer *buf, size_t sz,
381 int flags)
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100382{
383 ssize_t ret;
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100384
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200385 do {
386 ret = sendto(qc->li->rx.fd, b_peek(buf, b_head_ofs(buf)), sz,
387 MSG_DONTWAIT | MSG_NOSIGNAL,
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100388 (struct sockaddr *)&qc->peer_addr, get_addr_len(&qc->peer_addr));
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200389 } while (ret < 0 && errno == EINTR);
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100390
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200391 if (ret < 0 || ret != sz) {
392 /* TODO adjust errno for UDP context. */
393 if (errno == EAGAIN || errno == EWOULDBLOCK ||
394 errno == ENOTCONN || errno == EINPROGRESS || errno == EBADF) {
395 struct proxy *prx = qc->li->bind_conf->frontend;
396 struct quic_counters *prx_counters =
397 EXTRA_COUNTERS_GET(prx->extra_counters_fe,
398 &quic_stats_module);
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100399
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200400 if (errno == EAGAIN || errno == EWOULDBLOCK)
401 HA_ATOMIC_INC(&prx_counters->socket_full);
402 else
403 HA_ATOMIC_INC(&prx_counters->sendto_err);
404 }
405 else if (errno) {
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500406 /* TODO unlisted errno : handle it explicitly. */
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200407 ABORT_NOW();
408 }
409
410 return 1;
Frédéric Lécaille48bb8752022-08-03 20:52:20 +0200411 }
412
Amaury Denoyelle6715cbf2022-08-05 11:56:36 +0200413 /* we count the total bytes sent, and the send rate for 32-byte blocks.
414 * The reason for the latter is that freq_ctr are limited to 4GB and
415 * that it's not enough per second.
416 */
417 _HA_ATOMIC_ADD(&global.out_bytes, ret);
418 update_freq_ctr(&global.out_32bps, (ret + 16) / 32);
419
420 return 0;
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100421}
422
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100423
424/*********************** QUIC accept queue management ***********************/
425/* per-thread accept queues */
426struct quic_accept_queue *quic_accept_queues;
427
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100428/* Install <qc> on the queue ready to be accepted. The queue task is then woken
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100429 * up. If <qc> accept is already scheduled or done, nothing is done.
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100430 */
431void quic_accept_push_qc(struct quic_conn *qc)
432{
433 struct quic_accept_queue *queue = &quic_accept_queues[qc->tid];
434 struct li_per_thread *lthr = &qc->li->per_thr[qc->tid];
435
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100436 /* early return if accept is already in progress/done for this
437 * connection
438 */
Frédéric Lécaillefc790062022-03-28 17:10:31 +0200439 if (qc->flags & QUIC_FL_CONN_ACCEPT_REGISTERED)
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100440 return;
441
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100442 BUG_ON(MT_LIST_INLIST(&qc->accept_list));
443
Frédéric Lécaillefc790062022-03-28 17:10:31 +0200444 qc->flags |= QUIC_FL_CONN_ACCEPT_REGISTERED;
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100445 /* 1. insert the listener in the accept queue
446 *
447 * Use TRY_APPEND as there is a possible race even with INLIST if
448 * multiple threads try to add the same listener instance from several
449 * quic_conn.
450 */
451 if (!MT_LIST_INLIST(&(lthr->quic_accept.list)))
452 MT_LIST_TRY_APPEND(&queue->listeners, &(lthr->quic_accept.list));
453
454 /* 2. insert the quic_conn in the listener per-thread queue. */
455 MT_LIST_APPEND(&lthr->quic_accept.conns, &qc->accept_list);
456
457 /* 3. wake up the queue tasklet */
458 tasklet_wakeup(quic_accept_queues[qc->tid].tasklet);
459}
460
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100461/* Tasklet handler to accept QUIC connections. Call listener_accept on every
462 * listener instances registered in the accept queue.
463 */
464static struct task *quic_accept_run(struct task *t, void *ctx, unsigned int i)
465{
466 struct li_per_thread *lthr;
467 struct mt_list *elt1, elt2;
468 struct quic_accept_queue *queue = &quic_accept_queues[tid];
469
470 mt_list_for_each_entry_safe(lthr, &queue->listeners, quic_accept.list, elt1, elt2) {
471 listener_accept(lthr->li);
472 MT_LIST_DELETE_SAFE(elt1);
473 }
474
475 return NULL;
476}
477
478static int quic_alloc_accept_queues(void)
479{
480 int i;
481
Tim Duesterhus9fb57e82022-06-01 21:58:37 +0200482 quic_accept_queues = calloc(global.nbthread,
483 sizeof(*quic_accept_queues));
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100484 if (!quic_accept_queues) {
485 ha_alert("Failed to allocate the quic accept queues.\n");
486 return 0;
487 }
488
489 for (i = 0; i < global.nbthread; ++i) {
490 struct tasklet *task;
491 if (!(task = tasklet_new())) {
492 ha_alert("Failed to allocate the quic accept queue on thread %d.\n", i);
493 return 0;
494 }
495
496 tasklet_set_tid(task, i);
497 task->process = quic_accept_run;
498 quic_accept_queues[i].tasklet = task;
499
500 MT_LIST_INIT(&quic_accept_queues[i].listeners);
501 }
502
503 return 1;
504}
505REGISTER_POST_CHECK(quic_alloc_accept_queues);
506
507static int quic_deallocate_accept_queues(void)
508{
509 int i;
510
511 if (quic_accept_queues) {
512 for (i = 0; i < global.nbthread; ++i)
513 tasklet_free(quic_accept_queues[i].tasklet);
514 free(quic_accept_queues);
515 }
516
517 return 1;
518}
519REGISTER_POST_DEINIT(quic_deallocate_accept_queues);