Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 1 | /* |
| 2 | * QUIC socket management. |
| 3 | * |
Willy Tarreau | 3dfb7da | 2022-03-02 22:33:39 +0100 | [diff] [blame] | 4 | * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com> |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
| 13 | #include <errno.h> |
| 14 | |
| 15 | #include <sys/socket.h> |
| 16 | #include <sys/types.h> |
| 17 | |
| 18 | #include <haproxy/connection.h> |
| 19 | #include <haproxy/listener.h> |
Amaury Denoyelle | 4d29504 | 2022-01-19 16:18:44 +0100 | [diff] [blame] | 20 | #include <haproxy/quic_sock.h> |
Amaury Denoyelle | eb01f59 | 2021-10-07 16:44:05 +0200 | [diff] [blame] | 21 | #include <haproxy/session.h> |
Amaury Denoyelle | 777969c | 2022-03-24 16:06:26 +0100 | [diff] [blame] | 22 | #include <haproxy/tools.h> |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 23 | #include <haproxy/xprt_quic.h> |
| 24 | |
| 25 | /* This function is called from the protocol layer accept() in order to |
| 26 | * instantiate a new session on behalf of a given listener and frontend. It |
| 27 | * returns a positive value upon success, 0 if the connection can be ignored, |
| 28 | * or a negative value upon critical failure. The accepted connection is |
| 29 | * closed if we return <= 0. If no handshake is needed, it immediately tries |
| 30 | * to instantiate a new stream. The connection must already have been filled |
| 31 | * with the incoming connection handle (a fd), a target (the listener) and a |
| 32 | * source address. |
| 33 | */ |
| 34 | int quic_session_accept(struct connection *cli_conn) |
| 35 | { |
| 36 | struct listener *l = __objt_listener(cli_conn->target); |
| 37 | struct proxy *p = l->bind_conf->frontend; |
| 38 | struct session *sess; |
| 39 | |
| 40 | cli_conn->proxy_netns = l->rx.settings->netns; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 41 | /* This flag is ordinarily set by conn_ctrl_init() which cannot |
| 42 | * be called for now. |
| 43 | */ |
| 44 | cli_conn->flags |= CO_FL_CTRL_READY; |
| 45 | |
| 46 | /* wait for a PROXY protocol header */ |
| 47 | if (l->options & LI_O_ACC_PROXY) |
| 48 | cli_conn->flags |= CO_FL_ACCEPT_PROXY; |
| 49 | |
| 50 | /* wait for a NetScaler client IP insertion protocol header */ |
| 51 | if (l->options & LI_O_ACC_CIP) |
| 52 | cli_conn->flags |= CO_FL_ACCEPT_CIP; |
| 53 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 54 | /* Add the handshake pseudo-XPRT */ |
| 55 | if (cli_conn->flags & (CO_FL_ACCEPT_PROXY | CO_FL_ACCEPT_CIP)) { |
| 56 | if (xprt_add_hs(cli_conn) != 0) |
| 57 | goto out_free_conn; |
| 58 | } |
Olivier Houchard | 1b3c931 | 2021-03-05 23:37:48 +0100 | [diff] [blame] | 59 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 60 | sess = session_new(p, l, &cli_conn->obj_type); |
| 61 | if (!sess) |
| 62 | goto out_free_conn; |
| 63 | |
| 64 | conn_set_owner(cli_conn, sess, NULL); |
| 65 | |
Frédéric Lécaille | ecb5872 | 2021-05-27 17:12:36 +0200 | [diff] [blame] | 66 | if (conn_complete_session(cli_conn) < 0) |
| 67 | goto out_free_sess; |
| 68 | |
| 69 | if (conn_xprt_start(cli_conn) >= 0) |
Frédéric Lécaille | 27faba7 | 2021-03-03 16:21:00 +0100 | [diff] [blame] | 70 | return 1; |
| 71 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 72 | out_free_sess: |
| 73 | /* prevent call to listener_release during session_free. It will be |
| 74 | * done below, for all errors. */ |
| 75 | sess->listener = NULL; |
| 76 | session_free(sess); |
| 77 | out_free_conn: |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 78 | cli_conn->handle.qc->conn = NULL; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 79 | conn_stop_tracking(cli_conn); |
| 80 | conn_xprt_close(cli_conn); |
| 81 | conn_free(cli_conn); |
| 82 | out: |
| 83 | |
Frédéric Lécaille | e8139f3 | 2021-03-11 17:06:30 +0100 | [diff] [blame] | 84 | return -1; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 85 | } |
| 86 | |
Willy Tarreau | cdf7c8e | 2022-04-11 16:20:00 +0200 | [diff] [blame] | 87 | /* Retrieve a connection's source address. Returns -1 on failure. */ |
| 88 | int quic_sock_get_src(struct connection *conn, struct sockaddr *addr, socklen_t len) |
| 89 | { |
| 90 | struct quic_conn *qc; |
| 91 | |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 92 | if (!conn || !conn->handle.qc) |
Willy Tarreau | cdf7c8e | 2022-04-11 16:20:00 +0200 | [diff] [blame] | 93 | return -1; |
| 94 | |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 95 | qc = conn->handle.qc; |
Willy Tarreau | cdf7c8e | 2022-04-11 16:20:00 +0200 | [diff] [blame] | 96 | if (conn_is_back(conn)) { |
| 97 | /* no source address defined for outgoing connections for now */ |
| 98 | return -1; |
| 99 | } else { |
| 100 | /* front connection, return the peer's address */ |
| 101 | if (len > sizeof(qc->peer_addr)) |
| 102 | len = sizeof(qc->peer_addr); |
| 103 | memcpy(addr, &qc->peer_addr, len); |
| 104 | return 0; |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | /* Retrieve a connection's destination address. Returns -1 on failure. */ |
| 109 | int quic_sock_get_dst(struct connection *conn, struct sockaddr *addr, socklen_t len) |
| 110 | { |
| 111 | struct quic_conn *qc; |
| 112 | |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 113 | if (!conn || !conn->handle.qc) |
Willy Tarreau | cdf7c8e | 2022-04-11 16:20:00 +0200 | [diff] [blame] | 114 | return -1; |
| 115 | |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 116 | qc = conn->handle.qc; |
Willy Tarreau | cdf7c8e | 2022-04-11 16:20:00 +0200 | [diff] [blame] | 117 | if (conn_is_back(conn)) { |
| 118 | /* back connection, return the peer's address */ |
| 119 | if (len > sizeof(qc->peer_addr)) |
| 120 | len = sizeof(qc->peer_addr); |
| 121 | memcpy(addr, &qc->peer_addr, len); |
| 122 | } else { |
| 123 | /* FIXME: front connection, no local address for now, we'll |
| 124 | * return the listener's address instead. |
| 125 | */ |
| 126 | BUG_ON(!qc->li); |
| 127 | |
| 128 | if (len > sizeof(qc->li->rx.addr)) |
| 129 | len = sizeof(qc->li->rx.addr); |
| 130 | memcpy(addr, &qc->li->rx.addr, len); |
| 131 | } |
| 132 | return 0; |
| 133 | } |
| 134 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 135 | /* |
| 136 | * Inspired from session_accept_fd(). |
| 137 | * Instantiate a new connection (connection struct) to be attached to <qc> |
| 138 | * QUIC connection of <l> listener. |
| 139 | * Returns 1 if succeeded, 0 if not. |
| 140 | */ |
| 141 | static int new_quic_cli_conn(struct quic_conn *qc, struct listener *l, |
| 142 | struct sockaddr_storage *saddr) |
| 143 | { |
| 144 | struct connection *cli_conn; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 145 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 146 | if (unlikely((cli_conn = conn_new(&l->obj_type)) == NULL)) |
| 147 | goto out; |
| 148 | |
Willy Tarreau | 9cc88c3 | 2022-04-08 14:34:31 +0200 | [diff] [blame] | 149 | if (!sockaddr_alloc(&cli_conn->src, saddr, sizeof *saddr)) |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 150 | goto out_free_conn; |
| 151 | |
Willy Tarreau | c78a969 | 2022-04-11 17:26:56 +0200 | [diff] [blame] | 152 | cli_conn->flags |= CO_FL_ADDR_FROM_SET | CO_FL_FDLESS; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 153 | qc->conn = cli_conn; |
Willy Tarreau | 784b868 | 2022-04-11 14:18:10 +0200 | [diff] [blame] | 154 | cli_conn->handle.qc = qc; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 155 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 156 | cli_conn->target = &l->obj_type; |
| 157 | |
Frédéric Lécaille | 01ab661 | 2021-06-14 10:31:43 +0200 | [diff] [blame] | 158 | /* We need the xprt context before accepting (->accept()) the connection: |
| 159 | * we may receive packet before this connection acception. |
| 160 | */ |
| 161 | if (conn_prepare(cli_conn, l->rx.proto, l->bind_conf->xprt) < 0) |
| 162 | goto out_free_conn; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 163 | |
| 164 | return 1; |
| 165 | |
| 166 | out_free_conn: |
Frédéric Lécaille | 01ab661 | 2021-06-14 10:31:43 +0200 | [diff] [blame] | 167 | qc->conn = NULL; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 168 | conn_stop_tracking(cli_conn); |
| 169 | conn_xprt_close(cli_conn); |
| 170 | conn_free(cli_conn); |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 171 | out: |
| 172 | |
| 173 | return 0; |
| 174 | } |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 175 | |
| 176 | /* Tests if the receiver supports accepting connections. Returns positive on |
| 177 | * success, 0 if not possible |
| 178 | */ |
| 179 | int quic_sock_accepting_conn(const struct receiver *rx) |
| 180 | { |
| 181 | return 1; |
| 182 | } |
| 183 | |
| 184 | /* Accept an incoming connection from listener <l>, and return it, as well as |
| 185 | * a CO_AC_* status code into <status> if not null. Null is returned on error. |
| 186 | * <l> must be a valid listener with a valid frontend. |
| 187 | */ |
| 188 | struct connection *quic_sock_accept_conn(struct listener *l, int *status) |
| 189 | { |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 190 | struct quic_conn *qc; |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 191 | struct li_per_thread *lthr = &l->per_thr[tid]; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 192 | |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 193 | qc = MT_LIST_POP(<hr->quic_accept.conns, struct quic_conn *, accept_list); |
| 194 | if (!qc) |
| 195 | goto done; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 196 | |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 197 | if (!new_quic_cli_conn(qc, l, &qc->peer_addr)) |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 198 | goto err; |
| 199 | |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 200 | done: |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 201 | *status = CO_AC_DONE; |
Frédéric Lécaille | 026a792 | 2020-11-23 15:46:36 +0100 | [diff] [blame] | 202 | return qc ? qc->conn : NULL; |
| 203 | |
| 204 | err: |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 205 | /* in case of error reinsert the element to process it later. */ |
| 206 | MT_LIST_INSERT(<hr->quic_accept.conns, &qc->accept_list); |
| 207 | |
| 208 | *status = CO_AC_PAUSE; |
| 209 | return NULL; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 210 | } |
| 211 | |
| 212 | /* Function called on a read event from a listening socket. It tries |
| 213 | * to handle as many connections as possible. |
| 214 | */ |
| 215 | void quic_sock_fd_iocb(int fd) |
| 216 | { |
| 217 | ssize_t ret; |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 218 | struct rxbuf *rxbuf; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 219 | struct buffer *buf; |
| 220 | struct listener *l = objt_listener(fdtab[fd].owner); |
Frédéric Lécaille | c4becf5 | 2021-11-08 11:23:17 +0100 | [diff] [blame] | 221 | struct quic_transport_params *params; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 222 | /* Source address */ |
| 223 | struct sockaddr_storage saddr = {0}; |
Frédéric Lécaille | 320744b | 2022-01-27 12:19:28 +0100 | [diff] [blame] | 224 | size_t max_sz, cspace; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 225 | socklen_t saddrlen; |
Frédéric Lécaille | 37ae505 | 2022-01-27 11:31:50 +0100 | [diff] [blame] | 226 | struct quic_dgram *dgram, *dgramp, *new_dgram; |
Frédéric Lécaille | f6f7520 | 2022-02-02 09:44:22 +0100 | [diff] [blame] | 227 | unsigned char *dgram_buf; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 228 | |
Tim Duesterhus | 1655424 | 2021-09-15 13:58:49 +0200 | [diff] [blame] | 229 | BUG_ON(!l); |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 230 | |
Frédéric Lécaille | c4becf5 | 2021-11-08 11:23:17 +0100 | [diff] [blame] | 231 | if (!l) |
| 232 | return; |
| 233 | |
Willy Tarreau | f509065 | 2021-04-06 17:23:40 +0200 | [diff] [blame] | 234 | if (!(fdtab[fd].state & FD_POLL_IN) || !fd_recv_ready(fd)) |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 235 | return; |
| 236 | |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 237 | rxbuf = MT_LIST_POP(&l->rx.rxbuf_list, typeof(rxbuf), mt_list); |
Amaury Denoyelle | ee72a43 | 2021-11-19 15:49:29 +0100 | [diff] [blame] | 238 | if (!rxbuf) |
Frédéric Lécaille | c4becf5 | 2021-11-08 11:23:17 +0100 | [diff] [blame] | 239 | goto out; |
Frédéric Lécaille | 37ae505 | 2022-01-27 11:31:50 +0100 | [diff] [blame] | 240 | |
Amaury Denoyelle | ee72a43 | 2021-11-19 15:49:29 +0100 | [diff] [blame] | 241 | buf = &rxbuf->buf; |
Frédéric Lécaille | c4becf5 | 2021-11-08 11:23:17 +0100 | [diff] [blame] | 242 | |
Frédéric Lécaille | 37ae505 | 2022-01-27 11:31:50 +0100 | [diff] [blame] | 243 | new_dgram = NULL; |
| 244 | /* Remove all consumed datagrams of this buffer */ |
| 245 | list_for_each_entry_safe(dgram, dgramp, &rxbuf->dgrams, list) { |
| 246 | if (HA_ATOMIC_LOAD(&dgram->buf)) |
| 247 | break; |
| 248 | |
| 249 | LIST_DELETE(&dgram->list); |
| 250 | b_del(buf, dgram->len); |
| 251 | if (!new_dgram) |
| 252 | new_dgram = dgram; |
| 253 | else |
| 254 | pool_free(pool_head_quic_dgram, dgram); |
| 255 | } |
| 256 | |
Frédéric Lécaille | c4becf5 | 2021-11-08 11:23:17 +0100 | [diff] [blame] | 257 | params = &l->bind_conf->quic_params; |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 258 | max_sz = params->max_udp_payload_size; |
Frédéric Lécaille | 320744b | 2022-01-27 12:19:28 +0100 | [diff] [blame] | 259 | cspace = b_contig_space(buf); |
| 260 | if (cspace < max_sz) { |
Frédéric Lécaille | 1712b1d | 2022-01-28 13:10:24 +0100 | [diff] [blame] | 261 | struct quic_dgram *dgram; |
| 262 | |
| 263 | /* Allocate a fake datagram, without data to locate |
| 264 | * the end of the RX buffer (required during purging). |
| 265 | */ |
| 266 | dgram = pool_zalloc(pool_head_quic_dgram); |
| 267 | if (!dgram) |
| 268 | goto out; |
| 269 | |
| 270 | dgram->len = cspace; |
| 271 | LIST_APPEND(&rxbuf->dgrams, &dgram->list); |
Frédéric Lécaille | 320744b | 2022-01-27 12:19:28 +0100 | [diff] [blame] | 272 | /* Consume the remaining space */ |
| 273 | b_add(buf, cspace); |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 274 | if (b_contig_space(buf) < max_sz) |
| 275 | goto out; |
Frédéric Lécaille | f6f7520 | 2022-02-02 09:44:22 +0100 | [diff] [blame] | 276 | |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 277 | } |
| 278 | |
Frédéric Lécaille | f6f7520 | 2022-02-02 09:44:22 +0100 | [diff] [blame] | 279 | dgram_buf = (unsigned char *)b_tail(buf); |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 280 | saddrlen = sizeof saddr; |
| 281 | do { |
Frédéric Lécaille | f6f7520 | 2022-02-02 09:44:22 +0100 | [diff] [blame] | 282 | ret = recvfrom(fd, dgram_buf, max_sz, 0, |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 283 | (struct sockaddr *)&saddr, &saddrlen); |
Frédéric Lécaille | 439c464 | 2022-02-02 14:33:10 +0100 | [diff] [blame] | 284 | if (ret < 0 && errno == EAGAIN) { |
| 285 | fd_cant_recv(fd); |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 286 | goto out; |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 287 | } |
Frédéric Lécaille | 439c464 | 2022-02-02 14:33:10 +0100 | [diff] [blame] | 288 | } while (ret < 0 && errno == EINTR); |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 289 | |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 290 | b_add(buf, ret); |
Frédéric Lécaille | f6f7520 | 2022-02-02 09:44:22 +0100 | [diff] [blame] | 291 | if (!quic_lstnr_dgram_dispatch(dgram_buf, ret, l, &saddr, |
| 292 | new_dgram, &rxbuf->dgrams)) { |
Frédéric Lécaille | 37ae505 | 2022-01-27 11:31:50 +0100 | [diff] [blame] | 293 | /* If wrong, consume this datagram */ |
| 294 | b_del(buf, ret); |
| 295 | } |
Frédéric Lécaille | 324ecda | 2021-11-02 10:14:44 +0100 | [diff] [blame] | 296 | out: |
| 297 | MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->mt_list); |
Frédéric Lécaille | 70da889 | 2020-11-06 15:49:49 +0100 | [diff] [blame] | 298 | } |
Amaury Denoyelle | 2ce99fe | 2022-01-19 15:46:11 +0100 | [diff] [blame] | 299 | |
Amaury Denoyelle | 58a7704 | 2022-02-09 15:43:07 +0100 | [diff] [blame] | 300 | /* TODO standardize this function for a generic UDP sendto wrapper. This can be |
| 301 | * done by removing the <qc> arg and replace it with address/port. |
| 302 | */ |
| 303 | size_t qc_snd_buf(struct quic_conn *qc, const struct buffer *buf, size_t count, |
| 304 | int flags) |
| 305 | { |
| 306 | ssize_t ret; |
| 307 | size_t try, done; |
| 308 | int send_flag; |
| 309 | |
| 310 | done = 0; |
| 311 | /* send the largest possible block. For this we perform only one call |
| 312 | * to send() unless the buffer wraps and we exactly fill the first hunk, |
| 313 | * in which case we accept to do it once again. |
| 314 | */ |
| 315 | while (count) { |
| 316 | try = b_contig_data(buf, done); |
| 317 | if (try > count) |
| 318 | try = count; |
| 319 | |
| 320 | send_flag = MSG_DONTWAIT | MSG_NOSIGNAL; |
| 321 | if (try < count || flags & CO_SFL_MSG_MORE) |
| 322 | send_flag |= MSG_MORE; |
| 323 | |
| 324 | ret = sendto(qc->li->rx.fd, b_peek(buf, done), try, send_flag, |
| 325 | (struct sockaddr *)&qc->peer_addr, get_addr_len(&qc->peer_addr)); |
| 326 | if (ret > 0) { |
| 327 | /* TODO remove partial sending support for UDP */ |
| 328 | count -= ret; |
| 329 | done += ret; |
| 330 | |
| 331 | if (ret < try) |
| 332 | break; |
| 333 | } |
| 334 | else if (ret == 0 || errno == EAGAIN || errno == ENOTCONN || errno == EINPROGRESS) { |
| 335 | /* TODO must be handle properly. It is justified for UDP ? */ |
| 336 | ABORT_NOW(); |
| 337 | } |
| 338 | else if (errno != EINTR) { |
| 339 | /* TODO must be handle properly. It is justified for UDP ? */ |
| 340 | ABORT_NOW(); |
| 341 | } |
| 342 | } |
| 343 | |
| 344 | if (done > 0) { |
| 345 | /* we count the total bytes sent, and the send rate for 32-byte |
| 346 | * blocks. The reason for the latter is that freq_ctr are |
| 347 | * limited to 4GB and that it's not enough per second. |
| 348 | */ |
| 349 | _HA_ATOMIC_ADD(&global.out_bytes, done); |
| 350 | update_freq_ctr(&global.out_32bps, (done + 16) / 32); |
| 351 | } |
| 352 | return done; |
| 353 | } |
| 354 | |
Amaury Denoyelle | 2ce99fe | 2022-01-19 15:46:11 +0100 | [diff] [blame] | 355 | |
| 356 | /*********************** QUIC accept queue management ***********************/ |
| 357 | /* per-thread accept queues */ |
| 358 | struct quic_accept_queue *quic_accept_queues; |
| 359 | |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 360 | /* Install <qc> on the queue ready to be accepted. The queue task is then woken |
Frédéric Lécaille | 91f083a | 2022-01-28 21:43:48 +0100 | [diff] [blame] | 361 | * up. If <qc> accept is already scheduled or done, nothing is done. |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 362 | */ |
| 363 | void quic_accept_push_qc(struct quic_conn *qc) |
| 364 | { |
| 365 | struct quic_accept_queue *queue = &quic_accept_queues[qc->tid]; |
| 366 | struct li_per_thread *lthr = &qc->li->per_thr[qc->tid]; |
| 367 | |
Frédéric Lécaille | 91f083a | 2022-01-28 21:43:48 +0100 | [diff] [blame] | 368 | /* early return if accept is already in progress/done for this |
| 369 | * connection |
| 370 | */ |
Frédéric Lécaille | fc79006 | 2022-03-28 17:10:31 +0200 | [diff] [blame] | 371 | if (qc->flags & QUIC_FL_CONN_ACCEPT_REGISTERED) |
Frédéric Lécaille | 91f083a | 2022-01-28 21:43:48 +0100 | [diff] [blame] | 372 | return; |
| 373 | |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 374 | BUG_ON(MT_LIST_INLIST(&qc->accept_list)); |
| 375 | |
Frédéric Lécaille | fc79006 | 2022-03-28 17:10:31 +0200 | [diff] [blame] | 376 | qc->flags |= QUIC_FL_CONN_ACCEPT_REGISTERED; |
Amaury Denoyelle | cfa2d56 | 2022-01-19 16:01:05 +0100 | [diff] [blame] | 377 | /* 1. insert the listener in the accept queue |
| 378 | * |
| 379 | * Use TRY_APPEND as there is a possible race even with INLIST if |
| 380 | * multiple threads try to add the same listener instance from several |
| 381 | * quic_conn. |
| 382 | */ |
| 383 | if (!MT_LIST_INLIST(&(lthr->quic_accept.list))) |
| 384 | MT_LIST_TRY_APPEND(&queue->listeners, &(lthr->quic_accept.list)); |
| 385 | |
| 386 | /* 2. insert the quic_conn in the listener per-thread queue. */ |
| 387 | MT_LIST_APPEND(<hr->quic_accept.conns, &qc->accept_list); |
| 388 | |
| 389 | /* 3. wake up the queue tasklet */ |
| 390 | tasklet_wakeup(quic_accept_queues[qc->tid].tasklet); |
| 391 | } |
| 392 | |
Amaury Denoyelle | 2ce99fe | 2022-01-19 15:46:11 +0100 | [diff] [blame] | 393 | /* Tasklet handler to accept QUIC connections. Call listener_accept on every |
| 394 | * listener instances registered in the accept queue. |
| 395 | */ |
| 396 | static struct task *quic_accept_run(struct task *t, void *ctx, unsigned int i) |
| 397 | { |
| 398 | struct li_per_thread *lthr; |
| 399 | struct mt_list *elt1, elt2; |
| 400 | struct quic_accept_queue *queue = &quic_accept_queues[tid]; |
| 401 | |
| 402 | mt_list_for_each_entry_safe(lthr, &queue->listeners, quic_accept.list, elt1, elt2) { |
| 403 | listener_accept(lthr->li); |
| 404 | MT_LIST_DELETE_SAFE(elt1); |
| 405 | } |
| 406 | |
| 407 | return NULL; |
| 408 | } |
| 409 | |
| 410 | static int quic_alloc_accept_queues(void) |
| 411 | { |
| 412 | int i; |
| 413 | |
| 414 | quic_accept_queues = calloc(global.nbthread, sizeof(struct quic_accept_queue)); |
| 415 | if (!quic_accept_queues) { |
| 416 | ha_alert("Failed to allocate the quic accept queues.\n"); |
| 417 | return 0; |
| 418 | } |
| 419 | |
| 420 | for (i = 0; i < global.nbthread; ++i) { |
| 421 | struct tasklet *task; |
| 422 | if (!(task = tasklet_new())) { |
| 423 | ha_alert("Failed to allocate the quic accept queue on thread %d.\n", i); |
| 424 | return 0; |
| 425 | } |
| 426 | |
| 427 | tasklet_set_tid(task, i); |
| 428 | task->process = quic_accept_run; |
| 429 | quic_accept_queues[i].tasklet = task; |
| 430 | |
| 431 | MT_LIST_INIT(&quic_accept_queues[i].listeners); |
| 432 | } |
| 433 | |
| 434 | return 1; |
| 435 | } |
| 436 | REGISTER_POST_CHECK(quic_alloc_accept_queues); |
| 437 | |
| 438 | static int quic_deallocate_accept_queues(void) |
| 439 | { |
| 440 | int i; |
| 441 | |
| 442 | if (quic_accept_queues) { |
| 443 | for (i = 0; i < global.nbthread; ++i) |
| 444 | tasklet_free(quic_accept_queues[i].tasklet); |
| 445 | free(quic_accept_queues); |
| 446 | } |
| 447 | |
| 448 | return 1; |
| 449 | } |
| 450 | REGISTER_POST_DEINIT(quic_deallocate_accept_queues); |