Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 1 | /* |
| 2 | * QUIC socket management. |
| 3 | * |
Willy Tarreau | 3dfb7da | 2022-03-02 22:33:39 +0100 | [diff] [blame] | 4 | * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com> |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
| 13 | #include <errno.h> |
| 14 | |
| 15 | #include <sys/socket.h> |
| 16 | #include <sys/types.h> |
| 17 | |
| 18 | #include <haproxy/connection.h> |
| 19 | #include <haproxy/listener.h> |
Frédéric Lécaille | 6492e66 | 2022-05-17 17:23:16 +0200 | [diff] [blame] | 20 | #include <haproxy/proto_quic.h> |
Amaury Denoyelle | 4d29504 | 2022-01-19 16:18:44 +0100 | [diff] [blame] | 21 | #include <haproxy/quic_sock.h> |
Amaury Denoyelle | eb01f59 | 2021-10-07 16:44:05 +0200 | [diff] [blame] | 22 | #include <haproxy/session.h> |
Amaury Denoyelle | 777969c | 2022-03-24 16:06:26 +0100 | [diff] [blame] | 23 | #include <haproxy/tools.h> |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 24 | #include <haproxy/xprt_quic.h> |
| 25 | |
| 26 | /* This function is called from the protocol layer accept() in order to |
| 27 | * instantiate a new session on behalf of a given listener and frontend. It |
| 28 | * returns a positive value upon success, 0 if the connection can be ignored, |
| 29 | * or a negative value upon critical failure. The accepted connection is |
| 30 | * closed if we return <= 0. If no handshake is needed, it immediately tries |
| 31 | * to instantiate a new stream. The connection must already have been filled |
| 32 | * with the incoming connection handle (a fd), a target (the listener) and a |
| 33 | * source address. |
| 34 | */ |
| 35 | int quic_session_accept(struct connection *cli_conn) |
| 36 | { |
| 37 | struct listener *l = __objt_listener(cli_conn->target); |
| 38 | struct proxy *p = l->bind_conf->frontend; |
| 39 | struct session *sess; |
| 40 | |
| 41 | cli_conn->proxy_netns = l->rx.settings->netns; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 42 | /* This flag is ordinarily set by conn_ctrl_init() which cannot |
| 43 | * be called for now. |
| 44 | */ |
| 45 | cli_conn->flags |= CO_FL_CTRL_READY; |
| 46 | |
| 47 | /* wait for a PROXY protocol header */ |
| 48 | if (l->options & LI_O_ACC_PROXY) |
| 49 | cli_conn->flags |= CO_FL_ACCEPT_PROXY; |
| 50 | |
| 51 | /* wait for a NetScaler client IP insertion protocol header */ |
| 52 | if (l->options & LI_O_ACC_CIP) |
| 53 | cli_conn->flags |= CO_FL_ACCEPT_CIP; |
| 54 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 55 | /* Add the handshake pseudo-XPRT */ |
| 56 | if (cli_conn->flags & (CO_FL_ACCEPT_PROXY | CO_FL_ACCEPT_CIP)) { |
| 57 | if (xprt_add_hs(cli_conn) != 0) |
| 58 | goto out_free_conn; |
| 59 | } |
Olivier Houchard | 1b3c931 | 2021-03-05 23:37:48 +0100 | [diff] [blame] | 60 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 61 | sess = session_new(p, l, &cli_conn->obj_type); |
| 62 | if (!sess) |
| 63 | goto out_free_conn; |
| 64 | |
| 65 | conn_set_owner(cli_conn, sess, NULL); |
| 66 | |
Frédéric Lécaille | ecb5872 | 2021-05-27 17:12:36 +0200 | [diff] [blame] | 67 | if (conn_complete_session(cli_conn) < 0) |
| 68 | goto out_free_sess; |
| 69 | |
Amaury Denoyelle | 622ec41 | 2022-04-13 16:58:26 +0200 | [diff] [blame] | 70 | if (conn_xprt_start(cli_conn) < 0) { |
| 71 | /* conn_complete_session has succeeded : conn is the owner of |
| 72 | * the session and the MUX is initialized. |
| 73 | * Let the MUX free all resources on error. |
| 74 | */ |
| 75 | cli_conn->mux->destroy(cli_conn->ctx); |
| 76 | return -1; |
| 77 | } |
| 78 | |
| 79 | return 1; |
Frédéric Lécaille | 27faba7 | 2021-03-03 16:21:00 +0100 | [diff] [blame] | 80 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 81 | out_free_sess: |
| 82 | /* prevent call to listener_release during session_free. It will be |
| 83 | * done below, for all errors. */ |
| 84 | sess->listener = NULL; |
| 85 | session_free(sess); |
| 86 | out_free_conn: |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 87 | cli_conn->handle.qc->conn = NULL; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 88 | conn_stop_tracking(cli_conn); |
| 89 | conn_xprt_close(cli_conn); |
| 90 | conn_free(cli_conn); |
| 91 | out: |
| 92 | |
Frédéric Lécaille | e8139f3 | 2021-03-11 17:06:30 +0100 | [diff] [blame] | 93 | return -1; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 94 | } |
| 95 | |
Willy Tarreau | cdf7c8e | 2022-04-11 16:20:00 +0200 | [diff] [blame] | 96 | /* Retrieve a connection's source address. Returns -1 on failure. */ |
| 97 | int quic_sock_get_src(struct connection *conn, struct sockaddr *addr, socklen_t len) |
| 98 | { |
| 99 | struct quic_conn *qc; |
| 100 | |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 101 | if (!conn || !conn->handle.qc) |
Willy Tarreau | cdf7c8e | 2022-04-11 16:20:00 +0200 | [diff] [blame] | 102 | return -1; |
| 103 | |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 104 | qc = conn->handle.qc; |
Willy Tarreau | cdf7c8e | 2022-04-11 16:20:00 +0200 | [diff] [blame] | 105 | if (conn_is_back(conn)) { |
| 106 | /* no source address defined for outgoing connections for now */ |
| 107 | return -1; |
| 108 | } else { |
| 109 | /* front connection, return the peer's address */ |
| 110 | if (len > sizeof(qc->peer_addr)) |
| 111 | len = sizeof(qc->peer_addr); |
| 112 | memcpy(addr, &qc->peer_addr, len); |
| 113 | return 0; |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | /* Retrieve a connection's destination address. Returns -1 on failure. */ |
| 118 | int quic_sock_get_dst(struct connection *conn, struct sockaddr *addr, socklen_t len) |
| 119 | { |
| 120 | struct quic_conn *qc; |
| 121 | |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 122 | if (!conn || !conn->handle.qc) |
Willy Tarreau | cdf7c8e | 2022-04-11 16:20:00 +0200 | [diff] [blame] | 123 | return -1; |
| 124 | |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 125 | qc = conn->handle.qc; |
Willy Tarreau | cdf7c8e | 2022-04-11 16:20:00 +0200 | [diff] [blame] | 126 | if (conn_is_back(conn)) { |
| 127 | /* back connection, return the peer's address */ |
| 128 | if (len > sizeof(qc->peer_addr)) |
| 129 | len = sizeof(qc->peer_addr); |
| 130 | memcpy(addr, &qc->peer_addr, len); |
| 131 | } else { |
| 132 | /* FIXME: front connection, no local address for now, we'll |
| 133 | * return the listener's address instead. |
| 134 | */ |
| 135 | BUG_ON(!qc->li); |
| 136 | |
| 137 | if (len > sizeof(qc->li->rx.addr)) |
| 138 | len = sizeof(qc->li->rx.addr); |
| 139 | memcpy(addr, &qc->li->rx.addr, len); |
| 140 | } |
| 141 | return 0; |
| 142 | } |
| 143 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 144 | /* |
| 145 | * Inspired from session_accept_fd(). |
| 146 | * Instantiate a new connection (connection struct) to be attached to <qc> |
| 147 | * QUIC connection of <l> listener. |
| 148 | * Returns 1 if succeeded, 0 if not. |
| 149 | */ |
| 150 | static int new_quic_cli_conn(struct quic_conn *qc, struct listener *l, |
| 151 | struct sockaddr_storage *saddr) |
| 152 | { |
| 153 | struct connection *cli_conn; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 154 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 155 | if (unlikely((cli_conn = conn_new(&l->obj_type)) == NULL)) |
| 156 | goto out; |
| 157 | |
Willy Tarreau | 9cc88c3 | 2022-04-08 14:34:31 +0200 | [diff] [blame] | 158 | if (!sockaddr_alloc(&cli_conn->src, saddr, sizeof *saddr)) |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 159 | goto out_free_conn; |
| 160 | |
Willy Tarreau | 030b3e6 | 2022-05-02 17:47:46 +0200 | [diff] [blame] | 161 | cli_conn->flags |= CO_FL_FDLESS; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 162 | qc->conn = cli_conn; |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 163 | cli_conn->handle.qc = qc; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 164 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 165 | cli_conn->target = &l->obj_type; |
| 166 | |
Frédéric Lécaille | 01ab661 | 2021-06-14 10:31:43 +0200 | [diff] [blame] | 167 | /* We need the xprt context before accepting (->accept()) the connection: |
| 168 | * we may receive packet before this connection acception. |
| 169 | */ |
| 170 | if (conn_prepare(cli_conn, l->rx.proto, l->bind_conf->xprt) < 0) |
| 171 | goto out_free_conn; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 172 | |
| 173 | return 1; |
| 174 | |
| 175 | out_free_conn: |
Frédéric Lécaille | 01ab661 | 2021-06-14 10:31:43 +0200 | [diff] [blame] | 176 | qc->conn = NULL; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 177 | conn_stop_tracking(cli_conn); |
| 178 | conn_xprt_close(cli_conn); |
| 179 | conn_free(cli_conn); |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 180 | out: |
| 181 | |
| 182 | return 0; |
| 183 | } |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 184 | |
| 185 | /* Tests if the receiver supports accepting connections. Returns positive on |
| 186 | * success, 0 if not possible |
| 187 | */ |
| 188 | int quic_sock_accepting_conn(const struct receiver *rx) |
| 189 | { |
| 190 | return 1; |
| 191 | } |
| 192 | |
| 193 | /* Accept an incoming connection from listener <l>, and return it, as well as |
| 194 | * a CO_AC_* status code into <status> if not null. Null is returned on error. |
| 195 | * <l> must be a valid listener with a valid frontend. |
| 196 | */ |
| 197 | struct connection *quic_sock_accept_conn(struct listener *l, int *status) |
| 198 | { |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 199 | struct quic_conn *qc; |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 200 | struct li_per_thread *lthr = &l->per_thr[tid]; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 201 | |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 202 | qc = MT_LIST_POP(<hr->quic_accept.conns, struct quic_conn *, accept_list); |
| 203 | if (!qc) |
| 204 | goto done; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 205 | |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 206 | if (!new_quic_cli_conn(qc, l, &qc->peer_addr)) |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 207 | goto err; |
| 208 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 209 | done: |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 210 | *status = CO_AC_DONE; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 211 | return qc ? qc->conn : NULL; |
| 212 | |
| 213 | err: |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 214 | /* in case of error reinsert the element to process it later. */ |
| 215 | MT_LIST_INSERT(<hr->quic_accept.conns, &qc->accept_list); |
| 216 | |
| 217 | *status = CO_AC_PAUSE; |
| 218 | return NULL; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 219 | } |
| 220 | |
Frédéric Lécaille | 6492e66 | 2022-05-17 17:23:16 +0200 | [diff] [blame] | 221 | /* Retrieve the DCID from the datagram found in <buf> and deliver it to the |
| 222 | * correct datagram handler. |
| 223 | * Return 1 if a correct datagram could be found, 0 if not. |
| 224 | */ |
| 225 | static int quic_lstnr_dgram_dispatch(unsigned char *buf, size_t len, void *owner, |
| 226 | struct sockaddr_storage *saddr, |
| 227 | struct quic_dgram *new_dgram, struct list *dgrams) |
| 228 | { |
| 229 | struct quic_dgram *dgram; |
| 230 | unsigned char *dcid; |
| 231 | size_t dcid_len; |
| 232 | int cid_tid; |
| 233 | |
| 234 | if (!len || !quic_get_dgram_dcid(buf, buf + len, &dcid, &dcid_len)) |
| 235 | goto err; |
| 236 | |
| 237 | dgram = new_dgram ? new_dgram : pool_alloc(pool_head_quic_dgram); |
| 238 | if (!dgram) |
| 239 | goto err; |
| 240 | |
| 241 | cid_tid = quic_get_cid_tid(dcid); |
| 242 | |
| 243 | /* All the members must be initialized! */ |
| 244 | dgram->owner = owner; |
| 245 | dgram->buf = buf; |
| 246 | dgram->len = len; |
| 247 | dgram->dcid = dcid; |
| 248 | dgram->dcid_len = dcid_len; |
| 249 | dgram->saddr = *saddr; |
| 250 | dgram->qc = NULL; |
| 251 | LIST_APPEND(dgrams, &dgram->list); |
| 252 | MT_LIST_APPEND(&quic_dghdlrs[cid_tid].dgrams, &dgram->mt_list); |
| 253 | |
| 254 | tasklet_wakeup(quic_dghdlrs[cid_tid].task); |
| 255 | |
| 256 | return 1; |
| 257 | |
| 258 | err: |
| 259 | return 0; |
| 260 | } |
| 261 | |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 262 | /* Function called on a read event from a listening socket. It tries |
| 263 | * to handle as many connections as possible. |
| 264 | */ |
| 265 | void quic_sock_fd_iocb(int fd) |
| 266 | { |
| 267 | ssize_t ret; |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 268 | struct rxbuf *rxbuf; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 269 | struct buffer *buf; |
| 270 | struct listener *l = objt_listener(fdtab[fd].owner); |
Frédéric Lécaille | c4becf5 | 2021-11-08 11:23:17 +0100 | [diff] [blame] | 271 | struct quic_transport_params *params; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 272 | /* Source address */ |
| 273 | struct sockaddr_storage saddr = {0}; |
Frédéric Lécaille | 320744b | 2022-01-27 12:19:28 +0100 | [diff] [blame] | 274 | size_t max_sz, cspace; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 275 | socklen_t saddrlen; |
Frédéric Lécaille | 37ae505 | 2022-01-27 11:31:50 +0100 | [diff] [blame] | 276 | struct quic_dgram *dgram, *dgramp, *new_dgram; |
Frédéric Lécaille | f6f7520 | 2022-02-02 09:44:22 +0100 | [diff] [blame] | 277 | unsigned char *dgram_buf; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 278 | |
Tim Duesterhus | 1655424 | 2021-09-15 13:58:49 +0200 | [diff] [blame] | 279 | BUG_ON(!l); |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 280 | |
Frédéric Lécaille | c4becf5 | 2021-11-08 11:23:17 +0100 | [diff] [blame] | 281 | if (!l) |
| 282 | return; |
| 283 | |
Willy Tarreau | f509065 | 2021-04-06 17:23:40 +0200 | [diff] [blame] | 284 | if (!(fdtab[fd].state & FD_POLL_IN) || !fd_recv_ready(fd)) |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 285 | return; |
| 286 | |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 287 | rxbuf = MT_LIST_POP(&l->rx.rxbuf_list, typeof(rxbuf), mt_list); |
Amaury Denoyelle | ee72a43 | 2021-11-19 15:49:29 +0100 | [diff] [blame] | 288 | if (!rxbuf) |
Frédéric Lécaille | c4becf5 | 2021-11-08 11:23:17 +0100 | [diff] [blame] | 289 | goto out; |
Frédéric Lécaille | 37ae505 | 2022-01-27 11:31:50 +0100 | [diff] [blame] | 290 | |
Amaury Denoyelle | ee72a43 | 2021-11-19 15:49:29 +0100 | [diff] [blame] | 291 | buf = &rxbuf->buf; |
Frédéric Lécaille | c4becf5 | 2021-11-08 11:23:17 +0100 | [diff] [blame] | 292 | |
Frédéric Lécaille | 37ae505 | 2022-01-27 11:31:50 +0100 | [diff] [blame] | 293 | new_dgram = NULL; |
| 294 | /* Remove all consumed datagrams of this buffer */ |
| 295 | list_for_each_entry_safe(dgram, dgramp, &rxbuf->dgrams, list) { |
| 296 | if (HA_ATOMIC_LOAD(&dgram->buf)) |
| 297 | break; |
| 298 | |
| 299 | LIST_DELETE(&dgram->list); |
| 300 | b_del(buf, dgram->len); |
| 301 | if (!new_dgram) |
| 302 | new_dgram = dgram; |
| 303 | else |
| 304 | pool_free(pool_head_quic_dgram, dgram); |
| 305 | } |
| 306 | |
Frédéric Lécaille | c4becf5 | 2021-11-08 11:23:17 +0100 | [diff] [blame] | 307 | params = &l->bind_conf->quic_params; |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 308 | max_sz = params->max_udp_payload_size; |
Frédéric Lécaille | 320744b | 2022-01-27 12:19:28 +0100 | [diff] [blame] | 309 | cspace = b_contig_space(buf); |
| 310 | if (cspace < max_sz) { |
Frédéric Lécaille | 1712b1d | 2022-01-28 13:10:24 +0100 | [diff] [blame] | 311 | struct quic_dgram *dgram; |
| 312 | |
Frédéric Lécaille | 0c53568 | 2022-06-23 17:47:10 +0200 | [diff] [blame^] | 313 | /* Do no mark <buf> as full, and do not try to consume it |
| 314 | * if the contiguous remmaining space is not at the end |
| 315 | */ |
| 316 | if (b_tail(buf) + cspace < b_wrap(buf)) |
| 317 | goto out; |
| 318 | |
Frédéric Lécaille | 1712b1d | 2022-01-28 13:10:24 +0100 | [diff] [blame] | 319 | /* Allocate a fake datagram, without data to locate |
| 320 | * the end of the RX buffer (required during purging). |
| 321 | */ |
| 322 | dgram = pool_zalloc(pool_head_quic_dgram); |
| 323 | if (!dgram) |
| 324 | goto out; |
| 325 | |
| 326 | dgram->len = cspace; |
| 327 | LIST_APPEND(&rxbuf->dgrams, &dgram->list); |
Frédéric Lécaille | 0c53568 | 2022-06-23 17:47:10 +0200 | [diff] [blame^] | 328 | |
Frédéric Lécaille | 320744b | 2022-01-27 12:19:28 +0100 | [diff] [blame] | 329 | /* Consume the remaining space */ |
| 330 | b_add(buf, cspace); |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 331 | if (b_contig_space(buf) < max_sz) |
| 332 | goto out; |
| 333 | } |
| 334 | |
Frédéric Lécaille | f6f7520 | 2022-02-02 09:44:22 +0100 | [diff] [blame] | 335 | dgram_buf = (unsigned char *)b_tail(buf); |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 336 | saddrlen = sizeof saddr; |
| 337 | do { |
Frédéric Lécaille | f6f7520 | 2022-02-02 09:44:22 +0100 | [diff] [blame] | 338 | ret = recvfrom(fd, dgram_buf, max_sz, 0, |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 339 | (struct sockaddr *)&saddr, &saddrlen); |
Willy Tarreau | acef5e2 | 2022-04-25 20:32:15 +0200 | [diff] [blame] | 340 | if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) { |
Frédéric Lécaille | 439c464 | 2022-02-02 14:33:10 +0100 | [diff] [blame] | 341 | fd_cant_recv(fd); |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 342 | goto out; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 343 | } |
Frédéric Lécaille | 439c464 | 2022-02-02 14:33:10 +0100 | [diff] [blame] | 344 | } while (ret < 0 && errno == EINTR); |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 345 | |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 346 | b_add(buf, ret); |
Frédéric Lécaille | f6f7520 | 2022-02-02 09:44:22 +0100 | [diff] [blame] | 347 | if (!quic_lstnr_dgram_dispatch(dgram_buf, ret, l, &saddr, |
| 348 | new_dgram, &rxbuf->dgrams)) { |
Frédéric Lécaille | 37ae505 | 2022-01-27 11:31:50 +0100 | [diff] [blame] | 349 | /* If wrong, consume this datagram */ |
| 350 | b_del(buf, ret); |
| 351 | } |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 352 | out: |
| 353 | MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->mt_list); |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 354 | } |
Amaury Denoyelle | 2ce99fe | 2022-01-19 15:46:11 +0100 | [diff] [blame] | 355 | |
Amaury Denoyelle | 58a7704 | 2022-02-09 15:43:07 +0100 | [diff] [blame] | 356 | /* TODO standardize this function for a generic UDP sendto wrapper. This can be |
| 357 | * done by removing the <qc> arg and replace it with address/port. |
| 358 | */ |
| 359 | size_t qc_snd_buf(struct quic_conn *qc, const struct buffer *buf, size_t count, |
| 360 | int flags) |
| 361 | { |
| 362 | ssize_t ret; |
| 363 | size_t try, done; |
| 364 | int send_flag; |
| 365 | |
| 366 | done = 0; |
| 367 | /* send the largest possible block. For this we perform only one call |
| 368 | * to send() unless the buffer wraps and we exactly fill the first hunk, |
| 369 | * in which case we accept to do it once again. |
| 370 | */ |
| 371 | while (count) { |
| 372 | try = b_contig_data(buf, done); |
| 373 | if (try > count) |
| 374 | try = count; |
| 375 | |
| 376 | send_flag = MSG_DONTWAIT | MSG_NOSIGNAL; |
| 377 | if (try < count || flags & CO_SFL_MSG_MORE) |
| 378 | send_flag |= MSG_MORE; |
| 379 | |
| 380 | ret = sendto(qc->li->rx.fd, b_peek(buf, done), try, send_flag, |
| 381 | (struct sockaddr *)&qc->peer_addr, get_addr_len(&qc->peer_addr)); |
| 382 | if (ret > 0) { |
| 383 | /* TODO remove partial sending support for UDP */ |
| 384 | count -= ret; |
| 385 | done += ret; |
| 386 | |
| 387 | if (ret < try) |
| 388 | break; |
| 389 | } |
Amaury Denoyelle | ad5df38 | 2022-05-18 18:26:13 +0200 | [diff] [blame] | 390 | else if (errno == EINTR) { |
| 391 | /* try again */ |
| 392 | continue; |
| 393 | } |
Amaury Denoyelle | 3dde0d8 | 2022-05-19 11:53:56 +0200 | [diff] [blame] | 394 | else if (ret == 0 || errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOTCONN || errno == EINPROGRESS || errno == EBADF) { |
Amaury Denoyelle | 58a7704 | 2022-02-09 15:43:07 +0100 | [diff] [blame] | 395 | /* TODO must be handle properly. It is justified for UDP ? */ |
Frédéric Lécaille | 8726d63 | 2022-05-03 10:32:21 +0200 | [diff] [blame] | 396 | qc->sendto_err++; |
Amaury Denoyelle | 8fa6666 | 2022-05-18 18:14:12 +0200 | [diff] [blame] | 397 | break; |
Amaury Denoyelle | 58a7704 | 2022-02-09 15:43:07 +0100 | [diff] [blame] | 398 | } |
Amaury Denoyelle | ad5df38 | 2022-05-18 18:26:13 +0200 | [diff] [blame] | 399 | else if (errno) { |
| 400 | /* TODO unlisted errno : handle it explicitely. */ |
| 401 | ABORT_NOW(); |
Amaury Denoyelle | 58a7704 | 2022-02-09 15:43:07 +0100 | [diff] [blame] | 402 | } |
| 403 | } |
| 404 | |
| 405 | if (done > 0) { |
| 406 | /* we count the total bytes sent, and the send rate for 32-byte |
| 407 | * blocks. The reason for the latter is that freq_ctr are |
| 408 | * limited to 4GB and that it's not enough per second. |
| 409 | */ |
| 410 | _HA_ATOMIC_ADD(&global.out_bytes, done); |
| 411 | update_freq_ctr(&global.out_32bps, (done + 16) / 32); |
| 412 | } |
| 413 | return done; |
| 414 | } |
| 415 | |
Amaury Denoyelle | 2ce99fe | 2022-01-19 15:46:11 +0100 | [diff] [blame] | 416 | |
| 417 | /*********************** QUIC accept queue management ***********************/ |
| 418 | /* per-thread accept queues */ |
| 419 | struct quic_accept_queue *quic_accept_queues; |
| 420 | |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 421 | /* Install <qc> on the queue ready to be accepted. The queue task is then woken |
Frédéric Lécaille | 91f083a | 2022-01-28 21:43:48 +0100 | [diff] [blame] | 422 | * up. If <qc> accept is already scheduled or done, nothing is done. |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 423 | */ |
| 424 | void quic_accept_push_qc(struct quic_conn *qc) |
| 425 | { |
| 426 | struct quic_accept_queue *queue = &quic_accept_queues[qc->tid]; |
| 427 | struct li_per_thread *lthr = &qc->li->per_thr[qc->tid]; |
| 428 | |
Frédéric Lécaille | 91f083a | 2022-01-28 21:43:48 +0100 | [diff] [blame] | 429 | /* early return if accept is already in progress/done for this |
| 430 | * connection |
| 431 | */ |
Frédéric Lécaille | fc79006 | 2022-03-28 17:10:31 +0200 | [diff] [blame] | 432 | if (qc->flags & QUIC_FL_CONN_ACCEPT_REGISTERED) |
Frédéric Lécaille | 91f083a | 2022-01-28 21:43:48 +0100 | [diff] [blame] | 433 | return; |
| 434 | |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 435 | BUG_ON(MT_LIST_INLIST(&qc->accept_list)); |
| 436 | |
Frédéric Lécaille | fc79006 | 2022-03-28 17:10:31 +0200 | [diff] [blame] | 437 | qc->flags |= QUIC_FL_CONN_ACCEPT_REGISTERED; |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 438 | /* 1. insert the listener in the accept queue |
| 439 | * |
| 440 | * Use TRY_APPEND as there is a possible race even with INLIST if |
| 441 | * multiple threads try to add the same listener instance from several |
| 442 | * quic_conn. |
| 443 | */ |
| 444 | if (!MT_LIST_INLIST(&(lthr->quic_accept.list))) |
| 445 | MT_LIST_TRY_APPEND(&queue->listeners, &(lthr->quic_accept.list)); |
| 446 | |
| 447 | /* 2. insert the quic_conn in the listener per-thread queue. */ |
| 448 | MT_LIST_APPEND(<hr->quic_accept.conns, &qc->accept_list); |
| 449 | |
| 450 | /* 3. wake up the queue tasklet */ |
| 451 | tasklet_wakeup(quic_accept_queues[qc->tid].tasklet); |
| 452 | } |
| 453 | |
Amaury Denoyelle | 2ce99fe | 2022-01-19 15:46:11 +0100 | [diff] [blame] | 454 | /* Tasklet handler to accept QUIC connections. Call listener_accept on every |
| 455 | * listener instances registered in the accept queue. |
| 456 | */ |
| 457 | static struct task *quic_accept_run(struct task *t, void *ctx, unsigned int i) |
| 458 | { |
| 459 | struct li_per_thread *lthr; |
| 460 | struct mt_list *elt1, elt2; |
| 461 | struct quic_accept_queue *queue = &quic_accept_queues[tid]; |
| 462 | |
| 463 | mt_list_for_each_entry_safe(lthr, &queue->listeners, quic_accept.list, elt1, elt2) { |
| 464 | listener_accept(lthr->li); |
| 465 | MT_LIST_DELETE_SAFE(elt1); |
| 466 | } |
| 467 | |
| 468 | return NULL; |
| 469 | } |
| 470 | |
| 471 | static int quic_alloc_accept_queues(void) |
| 472 | { |
| 473 | int i; |
| 474 | |
Tim Duesterhus | 9fb57e8 | 2022-06-01 21:58:37 +0200 | [diff] [blame] | 475 | quic_accept_queues = calloc(global.nbthread, |
| 476 | sizeof(*quic_accept_queues)); |
Amaury Denoyelle | 2ce99fe | 2022-01-19 15:46:11 +0100 | [diff] [blame] | 477 | if (!quic_accept_queues) { |
| 478 | ha_alert("Failed to allocate the quic accept queues.\n"); |
| 479 | return 0; |
| 480 | } |
| 481 | |
| 482 | for (i = 0; i < global.nbthread; ++i) { |
| 483 | struct tasklet *task; |
| 484 | if (!(task = tasklet_new())) { |
| 485 | ha_alert("Failed to allocate the quic accept queue on thread %d.\n", i); |
| 486 | return 0; |
| 487 | } |
| 488 | |
| 489 | tasklet_set_tid(task, i); |
| 490 | task->process = quic_accept_run; |
| 491 | quic_accept_queues[i].tasklet = task; |
| 492 | |
| 493 | MT_LIST_INIT(&quic_accept_queues[i].listeners); |
| 494 | } |
| 495 | |
| 496 | return 1; |
| 497 | } |
| 498 | REGISTER_POST_CHECK(quic_alloc_accept_queues); |
| 499 | |
| 500 | static int quic_deallocate_accept_queues(void) |
| 501 | { |
| 502 | int i; |
| 503 | |
| 504 | if (quic_accept_queues) { |
| 505 | for (i = 0; i < global.nbthread; ++i) |
| 506 | tasklet_free(quic_accept_queues[i].tasklet); |
| 507 | free(quic_accept_queues); |
| 508 | } |
| 509 | |
| 510 | return 1; |
| 511 | } |
| 512 | REGISTER_POST_DEINIT(quic_deallocate_accept_queues); |