Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 1 | /* |
Willy Tarreau | f7bc57c | 2012-10-03 00:19:48 +0200 | [diff] [blame] | 2 | * RAW transport layer over SOCK_STREAM sockets. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 3 | * |
Willy Tarreau | b277d6e | 2012-05-11 16:59:14 +0200 | [diff] [blame] | 4 | * Copyright 2000-2012 Willy Tarreau <w@1wt.eu> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
Willy Tarreau | 6b4aad4 | 2009-01-18 21:59:13 +0100 | [diff] [blame] | 13 | #define _GNU_SOURCE |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 14 | #include <errno.h> |
| 15 | #include <fcntl.h> |
| 16 | #include <stdio.h> |
| 17 | #include <stdlib.h> |
| 18 | |
| 19 | #include <sys/socket.h> |
| 20 | #include <sys/stat.h> |
| 21 | #include <sys/types.h> |
Dmitry Sivachenko | caf5898 | 2009-08-24 15:11:06 +0400 | [diff] [blame] | 22 | #include <netinet/tcp.h> |
| 23 | |
Willy Tarreau | 4c7e4b7 | 2020-05-27 12:58:42 +0200 | [diff] [blame] | 24 | #include <haproxy/api.h> |
Willy Tarreau | 2741c8c | 2020-06-02 11:28:02 +0200 | [diff] [blame] | 25 | #include <haproxy/buf.h> |
Willy Tarreau | 7ea393d | 2020-06-04 18:02:10 +0200 | [diff] [blame] | 26 | #include <haproxy/connection.h> |
Willy Tarreau | 36979d9 | 2020-06-05 17:27:29 +0200 | [diff] [blame] | 27 | #include <haproxy/errors.h> |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 28 | #include <haproxy/fd.h> |
| 29 | #include <haproxy/freq_ctr.h> |
Willy Tarreau | f268ee8 | 2020-06-04 17:05:57 +0200 | [diff] [blame] | 30 | #include <haproxy/global.h> |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 31 | #include <haproxy/pipe.h> |
Willy Tarreau | 5e539c9 | 2020-06-04 20:45:39 +0200 | [diff] [blame] | 32 | #include <haproxy/stream_interface.h> |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 33 | #include <haproxy/tools.h> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 34 | |
Willy Tarreau | b277d6e | 2012-05-11 16:59:14 +0200 | [diff] [blame] | 35 | |
Willy Tarreau | e573323 | 2019-05-22 19:24:06 +0200 | [diff] [blame] | 36 | #if defined(USE_LINUX_SPLICE) |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 37 | |
| 38 | /* A pipe contains 16 segments max, and it's common to see segments of 1448 bytes |
| 39 | * because of timestamps. Use this as a hint for not looping on splice(). |
| 40 | */ |
| 41 | #define SPLICE_FULL_HINT 16*1448 |
| 42 | |
Willy Tarreau | a9de333 | 2009-11-28 07:47:10 +0100 | [diff] [blame] | 43 | /* how many data we attempt to splice at once when the buffer is configured for |
| 44 | * infinite forwarding */ |
| 45 | #define MAX_SPLICE_AT_ONCE (1<<30) |
| 46 | |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 47 | /* Returns : |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 48 | * -1 if splice() is not supported |
| 49 | * >= 0 to report the amount of spliced bytes. |
| 50 | * connection flags are updated (error, read0, wait_room, wait_data). |
| 51 | * The caller must have previously allocated the pipe. |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 52 | */ |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 53 | int raw_sock_to_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count) |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 54 | { |
Willy Tarreau | 31971e5 | 2009-09-20 12:07:52 +0200 | [diff] [blame] | 55 | int ret; |
Willy Tarreau | afad0e0 | 2012-08-09 14:45:22 +0200 | [diff] [blame] | 56 | int retval = 0; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 57 | |
Willy Tarreau | f79c817 | 2013-10-21 16:30:56 +0200 | [diff] [blame] | 58 | |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 59 | if (!conn_ctrl_ready(conn)) |
Willy Tarreau | f79c817 | 2013-10-21 16:30:56 +0200 | [diff] [blame] | 60 | return 0; |
| 61 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 62 | if (!fd_recv_ready(conn->handle.fd)) |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 63 | return 0; |
| 64 | |
Willy Tarreau | e2a0eec | 2020-01-17 09:59:40 +0100 | [diff] [blame] | 65 | conn->flags &= ~CO_FL_WAIT_ROOM; |
Willy Tarreau | ce3eda7 | 2013-12-05 00:49:40 +0100 | [diff] [blame] | 66 | errno = 0; |
| 67 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 68 | /* Under Linux, if FD_POLL_HUP is set, we have reached the end. |
| 69 | * Since older splice() implementations were buggy and returned |
| 70 | * EAGAIN on end of read, let's bypass the call to splice() now. |
| 71 | */ |
Willy Tarreau | f509065 | 2021-04-06 17:23:40 +0200 | [diff] [blame] | 72 | if (unlikely(!(fdtab[conn->handle.fd].state & FD_POLL_IN))) { |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 73 | /* stop here if we reached the end of data */ |
Willy Tarreau | f509065 | 2021-04-06 17:23:40 +0200 | [diff] [blame] | 74 | if ((fdtab[conn->handle.fd].state & (FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_HUP) |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 75 | goto out_read0; |
| 76 | |
| 77 | /* report error on POLL_ERR before connection establishment */ |
Willy Tarreau | f509065 | 2021-04-06 17:23:40 +0200 | [diff] [blame] | 78 | if ((fdtab[conn->handle.fd].state & FD_POLL_ERR) && (conn->flags & CO_FL_WAIT_L4_CONN)) { |
Willy Tarreau | 26f4a04 | 2013-12-04 23:44:10 +0100 | [diff] [blame] | 79 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; |
Willy Tarreau | ce3eda7 | 2013-12-05 00:49:40 +0100 | [diff] [blame] | 80 | errno = 0; /* let the caller do a getsockopt() if it wants it */ |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 81 | goto leave; |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 82 | } |
| 83 | } |
Willy Tarreau | a9de333 | 2009-11-28 07:47:10 +0100 | [diff] [blame] | 84 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 85 | while (count) { |
| 86 | if (count > MAX_SPLICE_AT_ONCE) |
| 87 | count = MAX_SPLICE_AT_ONCE; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 88 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 89 | ret = splice(conn->handle.fd, NULL, pipe->prod, NULL, count, |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 90 | SPLICE_F_MOVE|SPLICE_F_NONBLOCK); |
| 91 | |
| 92 | if (ret <= 0) { |
Willy Tarreau | 3844747 | 2019-05-22 19:55:24 +0200 | [diff] [blame] | 93 | if (ret == 0) |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 94 | goto out_read0; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 95 | |
| 96 | if (errno == EAGAIN) { |
| 97 | /* there are two reasons for EAGAIN : |
| 98 | * - nothing in the socket buffer (standard) |
| 99 | * - pipe is full |
Willy Tarreau | 3844747 | 2019-05-22 19:55:24 +0200 | [diff] [blame] | 100 | * The difference between these two situations |
| 101 | * is problematic. Since we don't know if the |
| 102 | * pipe is full, we'll stop if the pipe is not |
| 103 | * empty. Anyway, we will almost always fill or |
| 104 | * empty the pipe. |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 105 | */ |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 106 | if (pipe->data) { |
Ilya Shipitsin | ce7b00f | 2020-03-23 22:28:40 +0500 | [diff] [blame] | 107 | /* always stop reading until the pipe is flushed */ |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 108 | conn->flags |= CO_FL_WAIT_ROOM; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 109 | break; |
| 110 | } |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 111 | /* socket buffer exhausted */ |
| 112 | fd_cant_recv(conn->handle.fd); |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 113 | break; |
| 114 | } |
Willy Tarreau | 45b8893 | 2012-11-12 12:00:09 +0100 | [diff] [blame] | 115 | else if (errno == ENOSYS || errno == EINVAL || errno == EBADF) { |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 116 | /* splice not supported on this end, disable it. |
| 117 | * We can safely return -1 since there is no |
| 118 | * chance that any data has been piped yet. |
| 119 | */ |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 120 | retval = -1; |
| 121 | goto leave; |
Willy Tarreau | dc340a9 | 2009-06-28 23:10:19 +0200 | [diff] [blame] | 122 | } |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 123 | else if (errno == EINTR) { |
| 124 | /* try again */ |
| 125 | continue; |
| 126 | } |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 127 | /* here we have another error */ |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 128 | conn->flags |= CO_FL_ERROR; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 129 | break; |
| 130 | } /* ret <= 0 */ |
| 131 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 132 | retval += ret; |
| 133 | pipe->data += ret; |
Willy Tarreau | 4fc90ef | 2013-04-06 11:29:39 +0200 | [diff] [blame] | 134 | count -= ret; |
Willy Tarreau | baf2a50 | 2013-01-07 16:38:26 +0100 | [diff] [blame] | 135 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 136 | if (pipe->data >= SPLICE_FULL_HINT || ret >= global.tune.recv_enough) { |
| 137 | /* We've read enough of it for this time, let's stop before |
| 138 | * being asked to poll. |
| 139 | */ |
Willy Tarreau | 61d39a0 | 2013-07-18 21:49:32 +0200 | [diff] [blame] | 140 | conn->flags |= CO_FL_WAIT_ROOM; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 141 | break; |
| 142 | } |
| 143 | } /* while */ |
| 144 | |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 145 | if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && retval) |
| 146 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 147 | |
| 148 | leave: |
Willy Tarreau | 7cf0e45 | 2019-05-23 11:39:14 +0200 | [diff] [blame] | 149 | if (retval > 0) { |
| 150 | /* we count the total bytes sent, and the send rate for 32-byte |
| 151 | * blocks. The reason for the latter is that freq_ctr are |
| 152 | * limited to 4GB and that it's not enough per second. |
| 153 | */ |
| 154 | _HA_ATOMIC_ADD(&global.out_bytes, retval); |
Christopher Faulet | aaa7085 | 2020-07-10 13:56:30 +0200 | [diff] [blame] | 155 | _HA_ATOMIC_ADD(&global.spliced_out_bytes, retval); |
Willy Tarreau | 7cf0e45 | 2019-05-23 11:39:14 +0200 | [diff] [blame] | 156 | update_freq_ctr(&global.out_32bps, (retval + 16) / 32); |
| 157 | } |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 158 | return retval; |
Willy Tarreau | 3eba98a | 2009-01-25 13:56:13 +0100 | [diff] [blame] | 159 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 160 | out_read0: |
| 161 | conn_sock_read0(conn); |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 162 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 163 | goto leave; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 164 | } |
| 165 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 166 | /* Send as many bytes as possible from the pipe to the connection's socket. |
| 167 | */ |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 168 | int raw_sock_from_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pipe) |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 169 | { |
| 170 | int ret, done; |
| 171 | |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 172 | if (!conn_ctrl_ready(conn)) |
| 173 | return 0; |
| 174 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 175 | if (!fd_send_ready(conn->handle.fd)) |
Willy Tarreau | f79c817 | 2013-10-21 16:30:56 +0200 | [diff] [blame] | 176 | return 0; |
| 177 | |
Willy Tarreau | a8c7e8e | 2020-01-23 18:17:55 +0100 | [diff] [blame] | 178 | if (conn->flags & CO_FL_SOCK_WR_SH) { |
| 179 | /* it's already closed */ |
| 180 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH; |
| 181 | errno = EPIPE; |
| 182 | return 0; |
| 183 | } |
| 184 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 185 | done = 0; |
| 186 | while (pipe->data) { |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 187 | ret = splice(pipe->cons, NULL, conn->handle.fd, NULL, pipe->data, |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 188 | SPLICE_F_MOVE|SPLICE_F_NONBLOCK); |
| 189 | |
| 190 | if (ret <= 0) { |
| 191 | if (ret == 0 || errno == EAGAIN) { |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 192 | fd_cant_send(conn->handle.fd); |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 193 | break; |
| 194 | } |
| 195 | else if (errno == EINTR) |
| 196 | continue; |
| 197 | |
| 198 | /* here we have another error */ |
| 199 | conn->flags |= CO_FL_ERROR; |
| 200 | break; |
| 201 | } |
| 202 | |
| 203 | done += ret; |
| 204 | pipe->data -= ret; |
| 205 | } |
Willy Tarreau | ccf3f6d | 2019-09-05 17:05:05 +0200 | [diff] [blame] | 206 | if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) { |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 207 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | ccf3f6d | 2019-09-05 17:05:05 +0200 | [diff] [blame] | 208 | } |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 209 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 210 | return done; |
| 211 | } |
| 212 | |
Willy Tarreau | e573323 | 2019-05-22 19:24:06 +0200 | [diff] [blame] | 213 | #endif /* USE_LINUX_SPLICE */ |
Willy Tarreau | 6b4aad4 | 2009-01-18 21:59:13 +0100 | [diff] [blame] | 214 | |
| 215 | |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 216 | /* Receive up to <count> bytes from connection <conn>'s socket and store them |
Willy Tarreau | abf08d9 | 2014-01-14 11:31:27 +0100 | [diff] [blame] | 217 | * into buffer <buf>. Only one call to recv() is performed, unless the |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 218 | * buffer wraps, in which case a second call may be performed. The connection's |
| 219 | * flags are updated with whatever special event is detected (error, read0, |
| 220 | * empty). The caller is responsible for taking care of those events and |
| 221 | * avoiding the call if inappropriate. The function does not call the |
| 222 | * connection's polling update function, so the caller is responsible for this. |
Willy Tarreau | ce3eda7 | 2013-12-05 00:49:40 +0100 | [diff] [blame] | 223 | * errno is cleared before starting so that the caller knows that if it spots an |
| 224 | * error without errno, it's pending and can be retrieved via getsockopt(SO_ERROR). |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 225 | */ |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 226 | static size_t raw_sock_to_buf(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags) |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 227 | { |
Willy Tarreau | bfc4d77 | 2018-07-18 11:22:03 +0200 | [diff] [blame] | 228 | ssize_t ret; |
| 229 | size_t try, done = 0; |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 230 | |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 231 | if (!conn_ctrl_ready(conn)) |
Willy Tarreau | f79c817 | 2013-10-21 16:30:56 +0200 | [diff] [blame] | 232 | return 0; |
| 233 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 234 | if (!fd_recv_ready(conn->handle.fd)) |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 235 | return 0; |
| 236 | |
Willy Tarreau | e2a0eec | 2020-01-17 09:59:40 +0100 | [diff] [blame] | 237 | conn->flags &= ~CO_FL_WAIT_ROOM; |
Willy Tarreau | ce3eda7 | 2013-12-05 00:49:40 +0100 | [diff] [blame] | 238 | errno = 0; |
| 239 | |
Willy Tarreau | f509065 | 2021-04-06 17:23:40 +0200 | [diff] [blame] | 240 | if (unlikely(!(fdtab[conn->handle.fd].state & FD_POLL_IN))) { |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 241 | /* stop here if we reached the end of data */ |
Willy Tarreau | f509065 | 2021-04-06 17:23:40 +0200 | [diff] [blame] | 242 | if ((fdtab[conn->handle.fd].state & (FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_HUP) |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 243 | goto read0; |
| 244 | |
| 245 | /* report error on POLL_ERR before connection establishment */ |
Willy Tarreau | f509065 | 2021-04-06 17:23:40 +0200 | [diff] [blame] | 246 | if ((fdtab[conn->handle.fd].state & FD_POLL_ERR) && (conn->flags & CO_FL_WAIT_L4_CONN)) { |
Willy Tarreau | 26f4a04 | 2013-12-04 23:44:10 +0100 | [diff] [blame] | 247 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 248 | goto leave; |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 249 | } |
| 250 | } |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 251 | |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 252 | /* read the largest possible block. For this, we perform only one call |
| 253 | * to recv() unless the buffer wraps and we exactly fill the first hunk, |
| 254 | * in which case we accept to do it once again. A new attempt is made on |
| 255 | * EINTR too. |
| 256 | */ |
Willy Tarreau | abf08d9 | 2014-01-14 11:31:27 +0100 | [diff] [blame] | 257 | while (count > 0) { |
Willy Tarreau | 591d445 | 2018-06-15 17:21:00 +0200 | [diff] [blame] | 258 | try = b_contig_space(buf); |
| 259 | if (!try) |
| 260 | break; |
| 261 | |
Willy Tarreau | abf08d9 | 2014-01-14 11:31:27 +0100 | [diff] [blame] | 262 | if (try > count) |
| 263 | try = count; |
| 264 | |
Willy Tarreau | 8f9c72d | 2018-06-07 18:46:28 +0200 | [diff] [blame] | 265 | ret = recv(conn->handle.fd, b_tail(buf), try, 0); |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 266 | |
| 267 | if (ret > 0) { |
Olivier Houchard | acd1403 | 2018-06-28 18:17:23 +0200 | [diff] [blame] | 268 | b_add(buf, ret); |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 269 | done += ret; |
| 270 | if (ret < try) { |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 271 | /* socket buffer exhausted */ |
| 272 | fd_cant_recv(conn->handle.fd); |
| 273 | |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 274 | /* unfortunately, on level-triggered events, POLL_HUP |
| 275 | * is generally delivered AFTER the system buffer is |
Willy Tarreau | 6812871 | 2017-03-13 12:04:34 +0100 | [diff] [blame] | 276 | * empty, unless the poller supports POLL_RDHUP. If |
| 277 | * we know this is the case, we don't try to read more |
| 278 | * as we know there's no more available. Similarly, if |
| 279 | * there's no problem with lingering we don't even try |
| 280 | * to read an unlikely close from the client since we'll |
| 281 | * close first anyway. |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 282 | */ |
Willy Tarreau | f509065 | 2021-04-06 17:23:40 +0200 | [diff] [blame] | 283 | if (fdtab[conn->handle.fd].state & FD_POLL_HUP) |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 284 | goto read0; |
Willy Tarreau | 6c11bd2 | 2014-01-24 00:54:27 +0100 | [diff] [blame] | 285 | |
Willy Tarreau | b41a6e9 | 2021-04-06 17:49:19 +0200 | [diff] [blame] | 286 | if (!(fdtab[conn->handle.fd].state & FD_LINGER_RISK) || |
Willy Tarreau | 6812871 | 2017-03-13 12:04:34 +0100 | [diff] [blame] | 287 | (cur_poller.flags & HAP_POLL_F_RDHUP)) { |
Willy Tarreau | 6812871 | 2017-03-13 12:04:34 +0100 | [diff] [blame] | 288 | break; |
| 289 | } |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 290 | } |
| 291 | count -= ret; |
Willy Tarreau | 716bec2 | 2020-02-20 11:04:40 +0100 | [diff] [blame] | 292 | |
| 293 | if (flags & CO_RFL_READ_ONCE) |
| 294 | break; |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 295 | } |
| 296 | else if (ret == 0) { |
| 297 | goto read0; |
| 298 | } |
Joshua M. Clulow | 0724903 | 2014-03-03 13:48:42 -0800 | [diff] [blame] | 299 | else if (errno == EAGAIN || errno == ENOTCONN) { |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 300 | /* socket buffer exhausted */ |
| 301 | fd_cant_recv(conn->handle.fd); |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 302 | break; |
| 303 | } |
| 304 | else if (errno != EINTR) { |
Willy Tarreau | 26f4a04 | 2013-12-04 23:44:10 +0100 | [diff] [blame] | 305 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 306 | break; |
| 307 | } |
| 308 | } |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 309 | |
| 310 | if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) |
| 311 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 312 | |
| 313 | leave: |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 314 | return done; |
| 315 | |
| 316 | read0: |
| 317 | conn_sock_read0(conn); |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 318 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | debdc4b | 2012-12-07 00:01:33 +0100 | [diff] [blame] | 319 | |
| 320 | /* Now a final check for a possible asynchronous low-level error |
| 321 | * report. This can happen when a connection receives a reset |
| 322 | * after a shutdown, both POLL_HUP and POLL_ERR are queued, and |
| 323 | * we might have come from there by just checking POLL_HUP instead |
| 324 | * of recv()'s return value 0, so we have no way to tell there was |
| 325 | * an error without checking. |
| 326 | */ |
Willy Tarreau | f509065 | 2021-04-06 17:23:40 +0200 | [diff] [blame] | 327 | if (unlikely(fdtab[conn->handle.fd].state & FD_POLL_ERR)) |
Willy Tarreau | 26f4a04 | 2013-12-04 23:44:10 +0100 | [diff] [blame] | 328 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 329 | goto leave; |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 330 | } |
| 331 | |
| 332 | |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 333 | /* Send up to <count> pending bytes from buffer <buf> to connection <conn>'s |
| 334 | * socket. <flags> may contain some CO_SFL_* flags to hint the system about |
| 335 | * other pending data for example, but this flag is ignored at the moment. |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 336 | * Only one call to send() is performed, unless the buffer wraps, in which case |
| 337 | * a second call may be performed. The connection's flags are updated with |
| 338 | * whatever special event is detected (error, empty). The caller is responsible |
| 339 | * for taking care of those events and avoiding the call if inappropriate. The |
| 340 | * function does not call the connection's polling update function, so the caller |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 341 | * is responsible for this. It's up to the caller to update the buffer's contents |
| 342 | * based on the return value. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 343 | */ |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 344 | static size_t raw_sock_from_buf(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags) |
Willy Tarreau | 0c2fc1f | 2009-01-18 15:30:37 +0100 | [diff] [blame] | 345 | { |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 346 | ssize_t ret; |
| 347 | size_t try, done; |
| 348 | int send_flag; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 349 | |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 350 | if (!conn_ctrl_ready(conn)) |
| 351 | return 0; |
| 352 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 353 | if (!fd_send_ready(conn->handle.fd)) |
Willy Tarreau | f79c817 | 2013-10-21 16:30:56 +0200 | [diff] [blame] | 354 | return 0; |
| 355 | |
Willy Tarreau | a8c7e8e | 2020-01-23 18:17:55 +0100 | [diff] [blame] | 356 | if (conn->flags & CO_FL_SOCK_WR_SH) { |
| 357 | /* it's already closed */ |
| 358 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH; |
| 359 | errno = EPIPE; |
| 360 | return 0; |
| 361 | } |
| 362 | |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 363 | done = 0; |
| 364 | /* send the largest possible block. For this we perform only one call |
| 365 | * to send() unless the buffer wraps and we exactly fill the first hunk, |
| 366 | * in which case we accept to do it once again. |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 367 | */ |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 368 | while (count) { |
| 369 | try = b_contig_data(buf, done); |
| 370 | if (try > count) |
| 371 | try = count; |
Willy Tarreau | f890dc9 | 2008-12-13 21:12:26 +0100 | [diff] [blame] | 372 | |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 373 | send_flag = MSG_DONTWAIT | MSG_NOSIGNAL; |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 374 | if (try < count || flags & CO_SFL_MSG_MORE) |
Willy Tarreau | 7e4086d | 2014-02-02 01:44:13 +0100 | [diff] [blame] | 375 | send_flag |= MSG_MORE; |
Willy Tarreau | fb14edc | 2009-06-14 15:24:37 +0200 | [diff] [blame] | 376 | |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 377 | ret = send(conn->handle.fd, b_peek(buf, done), try, send_flag); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 378 | |
| 379 | if (ret > 0) { |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 380 | count -= ret; |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 381 | done += ret; |
Willy Tarreau | b38903c | 2008-11-23 21:33:29 +0100 | [diff] [blame] | 382 | |
Willy Tarreau | ab3e1d3 | 2007-06-03 14:10:36 +0200 | [diff] [blame] | 383 | /* if the system buffer is full, don't insist */ |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 384 | if (ret < try) { |
| 385 | fd_cant_send(conn->handle.fd); |
Willy Tarreau | 6996e15 | 2007-04-30 14:37:43 +0200 | [diff] [blame] | 386 | break; |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 387 | } |
Willy Tarreau | 3381bf8 | 2020-01-17 17:39:35 +0100 | [diff] [blame] | 388 | if (!count) |
Willy Tarreau | 3110eb7 | 2020-02-21 10:21:46 +0100 | [diff] [blame] | 389 | fd_stop_send(conn->handle.fd); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 390 | } |
Willy Tarreau | 034c88c | 2017-01-23 23:36:45 +0100 | [diff] [blame] | 391 | else if (ret == 0 || errno == EAGAIN || errno == ENOTCONN || errno == EINPROGRESS) { |
Willy Tarreau | 0c2fc1f | 2009-01-18 15:30:37 +0100 | [diff] [blame] | 392 | /* nothing written, we need to poll for write first */ |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 393 | fd_cant_send(conn->handle.fd); |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 394 | break; |
Willy Tarreau | 8374918 | 2007-04-15 20:56:27 +0200 | [diff] [blame] | 395 | } |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 396 | else if (errno != EINTR) { |
Willy Tarreau | 26f4a04 | 2013-12-04 23:44:10 +0100 | [diff] [blame] | 397 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 398 | break; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 399 | } |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 400 | } |
Willy Tarreau | ccf3f6d | 2019-09-05 17:05:05 +0200 | [diff] [blame] | 401 | if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) { |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 402 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | ccf3f6d | 2019-09-05 17:05:05 +0200 | [diff] [blame] | 403 | } |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 404 | |
Willy Tarreau | 7cf0e45 | 2019-05-23 11:39:14 +0200 | [diff] [blame] | 405 | if (done > 0) { |
| 406 | /* we count the total bytes sent, and the send rate for 32-byte |
| 407 | * blocks. The reason for the latter is that freq_ctr are |
| 408 | * limited to 4GB and that it's not enough per second. |
| 409 | */ |
| 410 | _HA_ATOMIC_ADD(&global.out_bytes, done); |
| 411 | update_freq_ctr(&global.out_32bps, (done + 16) / 32); |
| 412 | } |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 413 | return done; |
Willy Tarreau | 0c2fc1f | 2009-01-18 15:30:37 +0100 | [diff] [blame] | 414 | } |
Willy Tarreau | 6996e15 | 2007-04-30 14:37:43 +0200 | [diff] [blame] | 415 | |
Willy Tarreau | ee1a6fc | 2020-01-17 07:52:13 +0100 | [diff] [blame] | 416 | /* Called from the upper layer, to subscribe <es> to events <event_type>. The |
| 417 | * event subscriber <es> is not allowed to change from a previous call as long |
| 418 | * as at least one event is still subscribed. The <event_type> must only be a |
| 419 | * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0. |
| 420 | */ |
| 421 | static int raw_sock_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es) |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 422 | { |
Willy Tarreau | ee1a6fc | 2020-01-17 07:52:13 +0100 | [diff] [blame] | 423 | return conn_subscribe(conn, xprt_ctx, event_type, es); |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 424 | } |
| 425 | |
Willy Tarreau | ee1a6fc | 2020-01-17 07:52:13 +0100 | [diff] [blame] | 426 | /* Called from the upper layer, to unsubscribe <es> from events <event_type>. |
| 427 | * The <es> pointer is not allowed to differ from the one passed to the |
| 428 | * subscribe() call. It always returns zero. |
| 429 | */ |
| 430 | static int raw_sock_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es) |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 431 | { |
Willy Tarreau | ee1a6fc | 2020-01-17 07:52:13 +0100 | [diff] [blame] | 432 | return conn_unsubscribe(conn, xprt_ctx, event_type, es); |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 433 | } |
Willy Tarreau | 0c2fc1f | 2009-01-18 15:30:37 +0100 | [diff] [blame] | 434 | |
Olivier Houchard | ca1a57f | 2021-03-14 00:34:49 +0100 | [diff] [blame] | 435 | static void raw_sock_close(struct connection *conn, void *xprt_ctx) |
| 436 | { |
| 437 | if (conn->subs != NULL) { |
| 438 | conn_unsubscribe(conn, NULL, conn->subs->events, conn->subs); |
| 439 | } |
| 440 | } |
| 441 | |
Olivier Houchard | 5149b59 | 2019-05-23 17:47:36 +0200 | [diff] [blame] | 442 | /* We can't have an underlying XPRT, so just return -1 to signify failure */ |
| 443 | static int raw_sock_remove_xprt(struct connection *conn, void *xprt_ctx, void *toremove_ctx, const struct xprt_ops *newops, void *newctx) |
| 444 | { |
| 445 | /* This is the lowest xprt we can have, so if we get there we didn't |
| 446 | * find the xprt we wanted to remove, that's a bug |
| 447 | */ |
| 448 | BUG_ON(1); |
| 449 | return -1; |
| 450 | } |
| 451 | |
Willy Tarreau | f7bc57c | 2012-10-03 00:19:48 +0200 | [diff] [blame] | 452 | /* transport-layer operations for RAW sockets */ |
Willy Tarreau | d9f5cca | 2016-12-22 21:08:52 +0100 | [diff] [blame] | 453 | static struct xprt_ops raw_sock = { |
Willy Tarreau | c578891 | 2012-08-24 18:12:41 +0200 | [diff] [blame] | 454 | .snd_buf = raw_sock_from_buf, |
| 455 | .rcv_buf = raw_sock_to_buf, |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 456 | .subscribe = raw_sock_subscribe, |
| 457 | .unsubscribe = raw_sock_unsubscribe, |
Olivier Houchard | 5149b59 | 2019-05-23 17:47:36 +0200 | [diff] [blame] | 458 | .remove_xprt = raw_sock_remove_xprt, |
Willy Tarreau | e573323 | 2019-05-22 19:24:06 +0200 | [diff] [blame] | 459 | #if defined(USE_LINUX_SPLICE) |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 460 | .rcv_pipe = raw_sock_to_pipe, |
| 461 | .snd_pipe = raw_sock_from_pipe, |
| 462 | #endif |
Willy Tarreau | c578891 | 2012-08-24 18:12:41 +0200 | [diff] [blame] | 463 | .shutr = NULL, |
| 464 | .shutw = NULL, |
Olivier Houchard | ca1a57f | 2021-03-14 00:34:49 +0100 | [diff] [blame] | 465 | .close = raw_sock_close, |
Willy Tarreau | 8e0bb0a | 2016-11-24 16:58:12 +0100 | [diff] [blame] | 466 | .name = "RAW", |
Willy Tarreau | 5c979a9 | 2012-05-07 17:15:39 +0200 | [diff] [blame] | 467 | }; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 468 | |
Willy Tarreau | 13e1410 | 2016-12-22 20:25:26 +0100 | [diff] [blame] | 469 | |
| 470 | __attribute__((constructor)) |
Olivier Houchard | 0d00593 | 2017-08-14 15:59:44 +0200 | [diff] [blame] | 471 | static void __raw_sock_init(void) |
Willy Tarreau | 13e1410 | 2016-12-22 20:25:26 +0100 | [diff] [blame] | 472 | { |
| 473 | xprt_register(XPRT_RAW, &raw_sock); |
| 474 | } |
| 475 | |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 476 | /* |
| 477 | * Local variables: |
| 478 | * c-indent-level: 8 |
| 479 | * c-basic-offset: 8 |
| 480 | * End: |
| 481 | */ |