blob: 1aa0015993aba6c68aeb753e1de95e3fe3ebe43a [file] [log] [blame]
Frédéric Lécaille70da8892020-11-06 15:49:49 +01001/*
2 * QUIC socket management.
3 *
Willy Tarreau3dfb7da2022-03-02 22:33:39 +01004 * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
Frédéric Lécaille70da8892020-11-06 15:49:49 +01005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <errno.h>
14
15#include <sys/socket.h>
16#include <sys/types.h>
17
18#include <haproxy/connection.h>
19#include <haproxy/listener.h>
Amaury Denoyelle4d295042022-01-19 16:18:44 +010020#include <haproxy/quic_sock.h>
Amaury Denoyelleeb01f592021-10-07 16:44:05 +020021#include <haproxy/session.h>
Amaury Denoyelle777969c2022-03-24 16:06:26 +010022#include <haproxy/tools.h>
Frédéric Lécaille026a7922020-11-23 15:46:36 +010023#include <haproxy/xprt_quic.h>
24
25/* This function is called from the protocol layer accept() in order to
26 * instantiate a new session on behalf of a given listener and frontend. It
27 * returns a positive value upon success, 0 if the connection can be ignored,
28 * or a negative value upon critical failure. The accepted connection is
29 * closed if we return <= 0. If no handshake is needed, it immediately tries
30 * to instantiate a new stream. The connection must already have been filled
31 * with the incoming connection handle (a fd), a target (the listener) and a
32 * source address.
33 */
34int quic_session_accept(struct connection *cli_conn)
35{
36 struct listener *l = __objt_listener(cli_conn->target);
37 struct proxy *p = l->bind_conf->frontend;
38 struct session *sess;
39
40 cli_conn->proxy_netns = l->rx.settings->netns;
Frédéric Lécaille026a7922020-11-23 15:46:36 +010041 /* This flag is ordinarily set by conn_ctrl_init() which cannot
42 * be called for now.
43 */
44 cli_conn->flags |= CO_FL_CTRL_READY;
45
46 /* wait for a PROXY protocol header */
47 if (l->options & LI_O_ACC_PROXY)
48 cli_conn->flags |= CO_FL_ACCEPT_PROXY;
49
50 /* wait for a NetScaler client IP insertion protocol header */
51 if (l->options & LI_O_ACC_CIP)
52 cli_conn->flags |= CO_FL_ACCEPT_CIP;
53
Frédéric Lécaille026a7922020-11-23 15:46:36 +010054 /* Add the handshake pseudo-XPRT */
55 if (cli_conn->flags & (CO_FL_ACCEPT_PROXY | CO_FL_ACCEPT_CIP)) {
56 if (xprt_add_hs(cli_conn) != 0)
57 goto out_free_conn;
58 }
Olivier Houchard1b3c9312021-03-05 23:37:48 +010059
Frédéric Lécaille026a7922020-11-23 15:46:36 +010060 sess = session_new(p, l, &cli_conn->obj_type);
61 if (!sess)
62 goto out_free_conn;
63
64 conn_set_owner(cli_conn, sess, NULL);
65
Frédéric Lécailleecb58722021-05-27 17:12:36 +020066 if (conn_complete_session(cli_conn) < 0)
67 goto out_free_sess;
68
69 if (conn_xprt_start(cli_conn) >= 0)
Frédéric Lécaille27faba72021-03-03 16:21:00 +010070 return 1;
71
Frédéric Lécaille026a7922020-11-23 15:46:36 +010072 out_free_sess:
73 /* prevent call to listener_release during session_free. It will be
74 * done below, for all errors. */
75 sess->listener = NULL;
76 session_free(sess);
77 out_free_conn:
Willy Tarreau784b8682022-04-11 14:18:10 +020078 cli_conn->handle.qc->conn = NULL;
Frédéric Lécaille026a7922020-11-23 15:46:36 +010079 conn_stop_tracking(cli_conn);
80 conn_xprt_close(cli_conn);
81 conn_free(cli_conn);
82 out:
83
Frédéric Lécaillee8139f32021-03-11 17:06:30 +010084 return -1;
Frédéric Lécaille026a7922020-11-23 15:46:36 +010085}
86
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020087/* Retrieve a connection's source address. Returns -1 on failure. */
88int quic_sock_get_src(struct connection *conn, struct sockaddr *addr, socklen_t len)
89{
90 struct quic_conn *qc;
91
Willy Tarreau784b8682022-04-11 14:18:10 +020092 if (!conn || !conn->handle.qc)
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020093 return -1;
94
Willy Tarreau784b8682022-04-11 14:18:10 +020095 qc = conn->handle.qc;
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +020096 if (conn_is_back(conn)) {
97 /* no source address defined for outgoing connections for now */
98 return -1;
99 } else {
100 /* front connection, return the peer's address */
101 if (len > sizeof(qc->peer_addr))
102 len = sizeof(qc->peer_addr);
103 memcpy(addr, &qc->peer_addr, len);
104 return 0;
105 }
106}
107
108/* Retrieve a connection's destination address. Returns -1 on failure. */
109int quic_sock_get_dst(struct connection *conn, struct sockaddr *addr, socklen_t len)
110{
111 struct quic_conn *qc;
112
Willy Tarreau784b8682022-04-11 14:18:10 +0200113 if (!conn || !conn->handle.qc)
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +0200114 return -1;
115
Willy Tarreau784b8682022-04-11 14:18:10 +0200116 qc = conn->handle.qc;
Willy Tarreaucdf7c8e2022-04-11 16:20:00 +0200117 if (conn_is_back(conn)) {
118 /* back connection, return the peer's address */
119 if (len > sizeof(qc->peer_addr))
120 len = sizeof(qc->peer_addr);
121 memcpy(addr, &qc->peer_addr, len);
122 } else {
123 /* FIXME: front connection, no local address for now, we'll
124 * return the listener's address instead.
125 */
126 BUG_ON(!qc->li);
127
128 if (len > sizeof(qc->li->rx.addr))
129 len = sizeof(qc->li->rx.addr);
130 memcpy(addr, &qc->li->rx.addr, len);
131 }
132 return 0;
133}
134
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100135/*
136 * Inspired from session_accept_fd().
137 * Instantiate a new connection (connection struct) to be attached to <qc>
138 * QUIC connection of <l> listener.
139 * Returns 1 if succeeded, 0 if not.
140 */
141static int new_quic_cli_conn(struct quic_conn *qc, struct listener *l,
142 struct sockaddr_storage *saddr)
143{
144 struct connection *cli_conn;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100145
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100146 if (unlikely((cli_conn = conn_new(&l->obj_type)) == NULL))
147 goto out;
148
Willy Tarreau9cc88c32022-04-08 14:34:31 +0200149 if (!sockaddr_alloc(&cli_conn->src, saddr, sizeof *saddr))
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100150 goto out_free_conn;
151
Willy Tarreauc78a9692022-04-11 17:26:56 +0200152 cli_conn->flags |= CO_FL_ADDR_FROM_SET | CO_FL_FDLESS;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100153 qc->conn = cli_conn;
Willy Tarreau784b8682022-04-11 14:18:10 +0200154 cli_conn->handle.qc = qc;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100155
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100156 cli_conn->target = &l->obj_type;
157
Frédéric Lécaille01ab6612021-06-14 10:31:43 +0200158 /* We need the xprt context before accepting (->accept()) the connection:
159 * we may receive packet before this connection acception.
160 */
161 if (conn_prepare(cli_conn, l->rx.proto, l->bind_conf->xprt) < 0)
162 goto out_free_conn;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100163
164 return 1;
165
166 out_free_conn:
Frédéric Lécaille01ab6612021-06-14 10:31:43 +0200167 qc->conn = NULL;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100168 conn_stop_tracking(cli_conn);
169 conn_xprt_close(cli_conn);
170 conn_free(cli_conn);
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100171 out:
172
173 return 0;
174}
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100175
176/* Tests if the receiver supports accepting connections. Returns positive on
177 * success, 0 if not possible
178 */
179int quic_sock_accepting_conn(const struct receiver *rx)
180{
181 return 1;
182}
183
184/* Accept an incoming connection from listener <l>, and return it, as well as
185 * a CO_AC_* status code into <status> if not null. Null is returned on error.
186 * <l> must be a valid listener with a valid frontend.
187 */
188struct connection *quic_sock_accept_conn(struct listener *l, int *status)
189{
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100190 struct quic_conn *qc;
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100191 struct li_per_thread *lthr = &l->per_thr[tid];
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100192
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100193 qc = MT_LIST_POP(&lthr->quic_accept.conns, struct quic_conn *, accept_list);
194 if (!qc)
195 goto done;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100196
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100197 if (!new_quic_cli_conn(qc, l, &qc->peer_addr))
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100198 goto err;
199
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100200 done:
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100201 *status = CO_AC_DONE;
Frédéric Lécaille026a7922020-11-23 15:46:36 +0100202 return qc ? qc->conn : NULL;
203
204 err:
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100205 /* in case of error reinsert the element to process it later. */
206 MT_LIST_INSERT(&lthr->quic_accept.conns, &qc->accept_list);
207
208 *status = CO_AC_PAUSE;
209 return NULL;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100210}
211
212/* Function called on a read event from a listening socket. It tries
213 * to handle as many connections as possible.
214 */
215void quic_sock_fd_iocb(int fd)
216{
217 ssize_t ret;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100218 struct rxbuf *rxbuf;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100219 struct buffer *buf;
220 struct listener *l = objt_listener(fdtab[fd].owner);
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100221 struct quic_transport_params *params;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100222 /* Source address */
223 struct sockaddr_storage saddr = {0};
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100224 size_t max_sz, cspace;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100225 socklen_t saddrlen;
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100226 struct quic_dgram *dgram, *dgramp, *new_dgram;
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100227 unsigned char *dgram_buf;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100228
Tim Duesterhus16554242021-09-15 13:58:49 +0200229 BUG_ON(!l);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100230
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100231 if (!l)
232 return;
233
Willy Tarreauf5090652021-04-06 17:23:40 +0200234 if (!(fdtab[fd].state & FD_POLL_IN) || !fd_recv_ready(fd))
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100235 return;
236
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100237 rxbuf = MT_LIST_POP(&l->rx.rxbuf_list, typeof(rxbuf), mt_list);
Amaury Denoyelleee72a432021-11-19 15:49:29 +0100238 if (!rxbuf)
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100239 goto out;
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100240
Amaury Denoyelleee72a432021-11-19 15:49:29 +0100241 buf = &rxbuf->buf;
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100242
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100243 new_dgram = NULL;
244 /* Remove all consumed datagrams of this buffer */
245 list_for_each_entry_safe(dgram, dgramp, &rxbuf->dgrams, list) {
246 if (HA_ATOMIC_LOAD(&dgram->buf))
247 break;
248
249 LIST_DELETE(&dgram->list);
250 b_del(buf, dgram->len);
251 if (!new_dgram)
252 new_dgram = dgram;
253 else
254 pool_free(pool_head_quic_dgram, dgram);
255 }
256
Frédéric Lécaillec4becf52021-11-08 11:23:17 +0100257 params = &l->bind_conf->quic_params;
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100258 max_sz = params->max_udp_payload_size;
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100259 cspace = b_contig_space(buf);
260 if (cspace < max_sz) {
Frédéric Lécaille1712b1d2022-01-28 13:10:24 +0100261 struct quic_dgram *dgram;
262
263 /* Allocate a fake datagram, without data to locate
264 * the end of the RX buffer (required during purging).
265 */
266 dgram = pool_zalloc(pool_head_quic_dgram);
267 if (!dgram)
268 goto out;
269
270 dgram->len = cspace;
271 LIST_APPEND(&rxbuf->dgrams, &dgram->list);
Frédéric Lécaille320744b2022-01-27 12:19:28 +0100272 /* Consume the remaining space */
273 b_add(buf, cspace);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100274 if (b_contig_space(buf) < max_sz)
275 goto out;
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100276
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100277 }
278
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100279 dgram_buf = (unsigned char *)b_tail(buf);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100280 saddrlen = sizeof saddr;
281 do {
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100282 ret = recvfrom(fd, dgram_buf, max_sz, 0,
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100283 (struct sockaddr *)&saddr, &saddrlen);
Frédéric Lécaille439c4642022-02-02 14:33:10 +0100284 if (ret < 0 && errno == EAGAIN) {
285 fd_cant_recv(fd);
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100286 goto out;
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100287 }
Frédéric Lécaille439c4642022-02-02 14:33:10 +0100288 } while (ret < 0 && errno == EINTR);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100289
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100290 b_add(buf, ret);
Frédéric Lécaillef6f75202022-02-02 09:44:22 +0100291 if (!quic_lstnr_dgram_dispatch(dgram_buf, ret, l, &saddr,
292 new_dgram, &rxbuf->dgrams)) {
Frédéric Lécaille37ae5052022-01-27 11:31:50 +0100293 /* If wrong, consume this datagram */
294 b_del(buf, ret);
295 }
Frédéric Lécaille324ecda2021-11-02 10:14:44 +0100296 out:
297 MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->mt_list);
Frédéric Lécaille70da8892020-11-06 15:49:49 +0100298}
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100299
Amaury Denoyelle58a77042022-02-09 15:43:07 +0100300/* TODO standardize this function for a generic UDP sendto wrapper. This can be
301 * done by removing the <qc> arg and replace it with address/port.
302 */
303size_t qc_snd_buf(struct quic_conn *qc, const struct buffer *buf, size_t count,
304 int flags)
305{
306 ssize_t ret;
307 size_t try, done;
308 int send_flag;
309
310 done = 0;
311 /* send the largest possible block. For this we perform only one call
312 * to send() unless the buffer wraps and we exactly fill the first hunk,
313 * in which case we accept to do it once again.
314 */
315 while (count) {
316 try = b_contig_data(buf, done);
317 if (try > count)
318 try = count;
319
320 send_flag = MSG_DONTWAIT | MSG_NOSIGNAL;
321 if (try < count || flags & CO_SFL_MSG_MORE)
322 send_flag |= MSG_MORE;
323
324 ret = sendto(qc->li->rx.fd, b_peek(buf, done), try, send_flag,
325 (struct sockaddr *)&qc->peer_addr, get_addr_len(&qc->peer_addr));
326 if (ret > 0) {
327 /* TODO remove partial sending support for UDP */
328 count -= ret;
329 done += ret;
330
331 if (ret < try)
332 break;
333 }
334 else if (ret == 0 || errno == EAGAIN || errno == ENOTCONN || errno == EINPROGRESS) {
335 /* TODO must be handle properly. It is justified for UDP ? */
336 ABORT_NOW();
337 }
338 else if (errno != EINTR) {
339 /* TODO must be handle properly. It is justified for UDP ? */
340 ABORT_NOW();
341 }
342 }
343
344 if (done > 0) {
345 /* we count the total bytes sent, and the send rate for 32-byte
346 * blocks. The reason for the latter is that freq_ctr are
347 * limited to 4GB and that it's not enough per second.
348 */
349 _HA_ATOMIC_ADD(&global.out_bytes, done);
350 update_freq_ctr(&global.out_32bps, (done + 16) / 32);
351 }
352 return done;
353}
354
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100355
356/*********************** QUIC accept queue management ***********************/
357/* per-thread accept queues */
358struct quic_accept_queue *quic_accept_queues;
359
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100360/* Install <qc> on the queue ready to be accepted. The queue task is then woken
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100361 * up. If <qc> accept is already scheduled or done, nothing is done.
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100362 */
363void quic_accept_push_qc(struct quic_conn *qc)
364{
365 struct quic_accept_queue *queue = &quic_accept_queues[qc->tid];
366 struct li_per_thread *lthr = &qc->li->per_thr[qc->tid];
367
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100368 /* early return if accept is already in progress/done for this
369 * connection
370 */
Frédéric Lécaillefc790062022-03-28 17:10:31 +0200371 if (qc->flags & QUIC_FL_CONN_ACCEPT_REGISTERED)
Frédéric Lécaille91f083a2022-01-28 21:43:48 +0100372 return;
373
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100374 BUG_ON(MT_LIST_INLIST(&qc->accept_list));
375
Frédéric Lécaillefc790062022-03-28 17:10:31 +0200376 qc->flags |= QUIC_FL_CONN_ACCEPT_REGISTERED;
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100377 /* 1. insert the listener in the accept queue
378 *
379 * Use TRY_APPEND as there is a possible race even with INLIST if
380 * multiple threads try to add the same listener instance from several
381 * quic_conn.
382 */
383 if (!MT_LIST_INLIST(&(lthr->quic_accept.list)))
384 MT_LIST_TRY_APPEND(&queue->listeners, &(lthr->quic_accept.list));
385
386 /* 2. insert the quic_conn in the listener per-thread queue. */
387 MT_LIST_APPEND(&lthr->quic_accept.conns, &qc->accept_list);
388
389 /* 3. wake up the queue tasklet */
390 tasklet_wakeup(quic_accept_queues[qc->tid].tasklet);
391}
392
Amaury Denoyelle2ce99fe2022-01-19 15:46:11 +0100393/* Tasklet handler to accept QUIC connections. Call listener_accept on every
394 * listener instances registered in the accept queue.
395 */
396static struct task *quic_accept_run(struct task *t, void *ctx, unsigned int i)
397{
398 struct li_per_thread *lthr;
399 struct mt_list *elt1, elt2;
400 struct quic_accept_queue *queue = &quic_accept_queues[tid];
401
402 mt_list_for_each_entry_safe(lthr, &queue->listeners, quic_accept.list, elt1, elt2) {
403 listener_accept(lthr->li);
404 MT_LIST_DELETE_SAFE(elt1);
405 }
406
407 return NULL;
408}
409
410static int quic_alloc_accept_queues(void)
411{
412 int i;
413
414 quic_accept_queues = calloc(global.nbthread, sizeof(struct quic_accept_queue));
415 if (!quic_accept_queues) {
416 ha_alert("Failed to allocate the quic accept queues.\n");
417 return 0;
418 }
419
420 for (i = 0; i < global.nbthread; ++i) {
421 struct tasklet *task;
422 if (!(task = tasklet_new())) {
423 ha_alert("Failed to allocate the quic accept queue on thread %d.\n", i);
424 return 0;
425 }
426
427 tasklet_set_tid(task, i);
428 task->process = quic_accept_run;
429 quic_accept_queues[i].tasklet = task;
430
431 MT_LIST_INIT(&quic_accept_queues[i].listeners);
432 }
433
434 return 1;
435}
436REGISTER_POST_CHECK(quic_alloc_accept_queues);
437
438static int quic_deallocate_accept_queues(void)
439{
440 int i;
441
442 if (quic_accept_queues) {
443 for (i = 0; i < global.nbthread; ++i)
444 tasklet_free(quic_accept_queues[i].tasklet);
445 free(quic_accept_queues);
446 }
447
448 return 1;
449}
450REGISTER_POST_DEINIT(quic_deallocate_accept_queues);