Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 1 | /* |
Willy Tarreau | f7bc57c | 2012-10-03 00:19:48 +0200 | [diff] [blame] | 2 | * RAW transport layer over SOCK_STREAM sockets. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 3 | * |
Willy Tarreau | b277d6e | 2012-05-11 16:59:14 +0200 | [diff] [blame] | 4 | * Copyright 2000-2012 Willy Tarreau <w@1wt.eu> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
Willy Tarreau | 6b4aad4 | 2009-01-18 21:59:13 +0100 | [diff] [blame] | 13 | #define _GNU_SOURCE |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 14 | #include <errno.h> |
| 15 | #include <fcntl.h> |
| 16 | #include <stdio.h> |
| 17 | #include <stdlib.h> |
| 18 | |
| 19 | #include <sys/socket.h> |
| 20 | #include <sys/stat.h> |
| 21 | #include <sys/types.h> |
Dmitry Sivachenko | caf5898 | 2009-08-24 15:11:06 +0400 | [diff] [blame] | 22 | #include <netinet/tcp.h> |
| 23 | |
Willy Tarreau | 4c7e4b7 | 2020-05-27 12:58:42 +0200 | [diff] [blame] | 24 | #include <haproxy/api.h> |
Willy Tarreau | 2741c8c | 2020-06-02 11:28:02 +0200 | [diff] [blame] | 25 | #include <haproxy/buf.h> |
Willy Tarreau | 7ea393d | 2020-06-04 18:02:10 +0200 | [diff] [blame] | 26 | #include <haproxy/connection.h> |
Willy Tarreau | 36979d9 | 2020-06-05 17:27:29 +0200 | [diff] [blame] | 27 | #include <haproxy/errors.h> |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 28 | #include <haproxy/fd.h> |
| 29 | #include <haproxy/freq_ctr.h> |
Willy Tarreau | f268ee8 | 2020-06-04 17:05:57 +0200 | [diff] [blame] | 30 | #include <haproxy/global.h> |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 31 | #include <haproxy/pipe.h> |
Willy Tarreau | 5e539c9 | 2020-06-04 20:45:39 +0200 | [diff] [blame] | 32 | #include <haproxy/stream_interface.h> |
Willy Tarreau | c2f7c58 | 2020-06-02 18:15:32 +0200 | [diff] [blame] | 33 | #include <haproxy/ticks.h> |
Willy Tarreau | 92b4f13 | 2020-06-01 11:05:15 +0200 | [diff] [blame] | 34 | #include <haproxy/time.h> |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 35 | #include <haproxy/tools.h> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 36 | |
Willy Tarreau | b277d6e | 2012-05-11 16:59:14 +0200 | [diff] [blame] | 37 | |
Willy Tarreau | e573323 | 2019-05-22 19:24:06 +0200 | [diff] [blame] | 38 | #if defined(USE_LINUX_SPLICE) |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 39 | |
| 40 | /* A pipe contains 16 segments max, and it's common to see segments of 1448 bytes |
| 41 | * because of timestamps. Use this as a hint for not looping on splice(). |
| 42 | */ |
| 43 | #define SPLICE_FULL_HINT 16*1448 |
| 44 | |
Willy Tarreau | a9de333 | 2009-11-28 07:47:10 +0100 | [diff] [blame] | 45 | /* how many data we attempt to splice at once when the buffer is configured for |
| 46 | * infinite forwarding */ |
| 47 | #define MAX_SPLICE_AT_ONCE (1<<30) |
| 48 | |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 49 | /* Returns : |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 50 | * -1 if splice() is not supported |
| 51 | * >= 0 to report the amount of spliced bytes. |
| 52 | * connection flags are updated (error, read0, wait_room, wait_data). |
| 53 | * The caller must have previously allocated the pipe. |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 54 | */ |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 55 | int raw_sock_to_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count) |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 56 | { |
Willy Tarreau | 31971e5 | 2009-09-20 12:07:52 +0200 | [diff] [blame] | 57 | int ret; |
Willy Tarreau | afad0e0 | 2012-08-09 14:45:22 +0200 | [diff] [blame] | 58 | int retval = 0; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 59 | |
Willy Tarreau | f79c817 | 2013-10-21 16:30:56 +0200 | [diff] [blame] | 60 | |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 61 | if (!conn_ctrl_ready(conn)) |
Willy Tarreau | f79c817 | 2013-10-21 16:30:56 +0200 | [diff] [blame] | 62 | return 0; |
| 63 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 64 | if (!fd_recv_ready(conn->handle.fd)) |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 65 | return 0; |
| 66 | |
Willy Tarreau | e2a0eec | 2020-01-17 09:59:40 +0100 | [diff] [blame] | 67 | conn->flags &= ~CO_FL_WAIT_ROOM; |
Willy Tarreau | ce3eda7 | 2013-12-05 00:49:40 +0100 | [diff] [blame] | 68 | errno = 0; |
| 69 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 70 | /* Under Linux, if FD_POLL_HUP is set, we have reached the end. |
| 71 | * Since older splice() implementations were buggy and returned |
| 72 | * EAGAIN on end of read, let's bypass the call to splice() now. |
| 73 | */ |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 74 | if (unlikely(!(fdtab[conn->handle.fd].ev & FD_POLL_IN))) { |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 75 | /* stop here if we reached the end of data */ |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 76 | if ((fdtab[conn->handle.fd].ev & (FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_HUP) |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 77 | goto out_read0; |
| 78 | |
| 79 | /* report error on POLL_ERR before connection establishment */ |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 80 | if ((fdtab[conn->handle.fd].ev & FD_POLL_ERR) && (conn->flags & CO_FL_WAIT_L4_CONN)) { |
Willy Tarreau | 26f4a04 | 2013-12-04 23:44:10 +0100 | [diff] [blame] | 81 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; |
Willy Tarreau | ce3eda7 | 2013-12-05 00:49:40 +0100 | [diff] [blame] | 82 | errno = 0; /* let the caller do a getsockopt() if it wants it */ |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 83 | goto leave; |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 84 | } |
| 85 | } |
Willy Tarreau | a9de333 | 2009-11-28 07:47:10 +0100 | [diff] [blame] | 86 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 87 | while (count) { |
| 88 | if (count > MAX_SPLICE_AT_ONCE) |
| 89 | count = MAX_SPLICE_AT_ONCE; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 90 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 91 | ret = splice(conn->handle.fd, NULL, pipe->prod, NULL, count, |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 92 | SPLICE_F_MOVE|SPLICE_F_NONBLOCK); |
| 93 | |
| 94 | if (ret <= 0) { |
Willy Tarreau | 3844747 | 2019-05-22 19:55:24 +0200 | [diff] [blame] | 95 | if (ret == 0) |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 96 | goto out_read0; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 97 | |
| 98 | if (errno == EAGAIN) { |
| 99 | /* there are two reasons for EAGAIN : |
| 100 | * - nothing in the socket buffer (standard) |
| 101 | * - pipe is full |
Willy Tarreau | 3844747 | 2019-05-22 19:55:24 +0200 | [diff] [blame] | 102 | * The difference between these two situations |
| 103 | * is problematic. Since we don't know if the |
| 104 | * pipe is full, we'll stop if the pipe is not |
| 105 | * empty. Anyway, we will almost always fill or |
| 106 | * empty the pipe. |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 107 | */ |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 108 | if (pipe->data) { |
Ilya Shipitsin | ce7b00f | 2020-03-23 22:28:40 +0500 | [diff] [blame] | 109 | /* always stop reading until the pipe is flushed */ |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 110 | conn->flags |= CO_FL_WAIT_ROOM; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 111 | break; |
| 112 | } |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 113 | /* socket buffer exhausted */ |
| 114 | fd_cant_recv(conn->handle.fd); |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 115 | break; |
| 116 | } |
Willy Tarreau | 45b8893 | 2012-11-12 12:00:09 +0100 | [diff] [blame] | 117 | else if (errno == ENOSYS || errno == EINVAL || errno == EBADF) { |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 118 | /* splice not supported on this end, disable it. |
| 119 | * We can safely return -1 since there is no |
| 120 | * chance that any data has been piped yet. |
| 121 | */ |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 122 | retval = -1; |
| 123 | goto leave; |
Willy Tarreau | dc340a9 | 2009-06-28 23:10:19 +0200 | [diff] [blame] | 124 | } |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 125 | else if (errno == EINTR) { |
| 126 | /* try again */ |
| 127 | continue; |
| 128 | } |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 129 | /* here we have another error */ |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 130 | conn->flags |= CO_FL_ERROR; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 131 | break; |
| 132 | } /* ret <= 0 */ |
| 133 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 134 | retval += ret; |
| 135 | pipe->data += ret; |
Willy Tarreau | 4fc90ef | 2013-04-06 11:29:39 +0200 | [diff] [blame] | 136 | count -= ret; |
Willy Tarreau | baf2a50 | 2013-01-07 16:38:26 +0100 | [diff] [blame] | 137 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 138 | if (pipe->data >= SPLICE_FULL_HINT || ret >= global.tune.recv_enough) { |
| 139 | /* We've read enough of it for this time, let's stop before |
| 140 | * being asked to poll. |
| 141 | */ |
Willy Tarreau | 61d39a0 | 2013-07-18 21:49:32 +0200 | [diff] [blame] | 142 | conn->flags |= CO_FL_WAIT_ROOM; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 143 | break; |
| 144 | } |
| 145 | } /* while */ |
| 146 | |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 147 | if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && retval) |
| 148 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 149 | |
| 150 | leave: |
Willy Tarreau | 7cf0e45 | 2019-05-23 11:39:14 +0200 | [diff] [blame] | 151 | if (retval > 0) { |
| 152 | /* we count the total bytes sent, and the send rate for 32-byte |
| 153 | * blocks. The reason for the latter is that freq_ctr are |
| 154 | * limited to 4GB and that it's not enough per second. |
| 155 | */ |
| 156 | _HA_ATOMIC_ADD(&global.out_bytes, retval); |
| 157 | update_freq_ctr(&global.out_32bps, (retval + 16) / 32); |
| 158 | } |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 159 | return retval; |
Willy Tarreau | 3eba98a | 2009-01-25 13:56:13 +0100 | [diff] [blame] | 160 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 161 | out_read0: |
| 162 | conn_sock_read0(conn); |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 163 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 164 | goto leave; |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 165 | } |
| 166 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 167 | /* Send as many bytes as possible from the pipe to the connection's socket. |
| 168 | */ |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 169 | int raw_sock_from_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pipe) |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 170 | { |
| 171 | int ret, done; |
| 172 | |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 173 | if (!conn_ctrl_ready(conn)) |
| 174 | return 0; |
| 175 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 176 | if (!fd_send_ready(conn->handle.fd)) |
Willy Tarreau | f79c817 | 2013-10-21 16:30:56 +0200 | [diff] [blame] | 177 | return 0; |
| 178 | |
Willy Tarreau | a8c7e8e | 2020-01-23 18:17:55 +0100 | [diff] [blame] | 179 | if (conn->flags & CO_FL_SOCK_WR_SH) { |
| 180 | /* it's already closed */ |
| 181 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH; |
| 182 | errno = EPIPE; |
| 183 | return 0; |
| 184 | } |
| 185 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 186 | done = 0; |
| 187 | while (pipe->data) { |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 188 | ret = splice(pipe->cons, NULL, conn->handle.fd, NULL, pipe->data, |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 189 | SPLICE_F_MOVE|SPLICE_F_NONBLOCK); |
| 190 | |
| 191 | if (ret <= 0) { |
| 192 | if (ret == 0 || errno == EAGAIN) { |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 193 | fd_cant_send(conn->handle.fd); |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 194 | break; |
| 195 | } |
| 196 | else if (errno == EINTR) |
| 197 | continue; |
| 198 | |
| 199 | /* here we have another error */ |
| 200 | conn->flags |= CO_FL_ERROR; |
| 201 | break; |
| 202 | } |
| 203 | |
| 204 | done += ret; |
| 205 | pipe->data -= ret; |
| 206 | } |
Willy Tarreau | ccf3f6d | 2019-09-05 17:05:05 +0200 | [diff] [blame] | 207 | if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) { |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 208 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | ccf3f6d | 2019-09-05 17:05:05 +0200 | [diff] [blame] | 209 | } |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 210 | |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 211 | return done; |
| 212 | } |
| 213 | |
Willy Tarreau | e573323 | 2019-05-22 19:24:06 +0200 | [diff] [blame] | 214 | #endif /* USE_LINUX_SPLICE */ |
Willy Tarreau | 6b4aad4 | 2009-01-18 21:59:13 +0100 | [diff] [blame] | 215 | |
| 216 | |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 217 | /* Receive up to <count> bytes from connection <conn>'s socket and store them |
Willy Tarreau | abf08d9 | 2014-01-14 11:31:27 +0100 | [diff] [blame] | 218 | * into buffer <buf>. Only one call to recv() is performed, unless the |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 219 | * buffer wraps, in which case a second call may be performed. The connection's |
| 220 | * flags are updated with whatever special event is detected (error, read0, |
| 221 | * empty). The caller is responsible for taking care of those events and |
| 222 | * avoiding the call if inappropriate. The function does not call the |
| 223 | * connection's polling update function, so the caller is responsible for this. |
Willy Tarreau | ce3eda7 | 2013-12-05 00:49:40 +0100 | [diff] [blame] | 224 | * errno is cleared before starting so that the caller knows that if it spots an |
| 225 | * error without errno, it's pending and can be retrieved via getsockopt(SO_ERROR). |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 226 | */ |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 227 | static size_t raw_sock_to_buf(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags) |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 228 | { |
Willy Tarreau | bfc4d77 | 2018-07-18 11:22:03 +0200 | [diff] [blame] | 229 | ssize_t ret; |
| 230 | size_t try, done = 0; |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 231 | |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 232 | if (!conn_ctrl_ready(conn)) |
Willy Tarreau | f79c817 | 2013-10-21 16:30:56 +0200 | [diff] [blame] | 233 | return 0; |
| 234 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 235 | if (!fd_recv_ready(conn->handle.fd)) |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 236 | return 0; |
| 237 | |
Willy Tarreau | e2a0eec | 2020-01-17 09:59:40 +0100 | [diff] [blame] | 238 | conn->flags &= ~CO_FL_WAIT_ROOM; |
Willy Tarreau | ce3eda7 | 2013-12-05 00:49:40 +0100 | [diff] [blame] | 239 | errno = 0; |
| 240 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 241 | if (unlikely(!(fdtab[conn->handle.fd].ev & FD_POLL_IN))) { |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 242 | /* stop here if we reached the end of data */ |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 243 | if ((fdtab[conn->handle.fd].ev & (FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_HUP) |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 244 | goto read0; |
| 245 | |
| 246 | /* report error on POLL_ERR before connection establishment */ |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 247 | if ((fdtab[conn->handle.fd].ev & FD_POLL_ERR) && (conn->flags & CO_FL_WAIT_L4_CONN)) { |
Willy Tarreau | 26f4a04 | 2013-12-04 23:44:10 +0100 | [diff] [blame] | 248 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 249 | goto leave; |
Willy Tarreau | 6f5d141 | 2012-10-04 20:38:49 +0200 | [diff] [blame] | 250 | } |
| 251 | } |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 252 | |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 253 | /* read the largest possible block. For this, we perform only one call |
| 254 | * to recv() unless the buffer wraps and we exactly fill the first hunk, |
| 255 | * in which case we accept to do it once again. A new attempt is made on |
| 256 | * EINTR too. |
| 257 | */ |
Willy Tarreau | abf08d9 | 2014-01-14 11:31:27 +0100 | [diff] [blame] | 258 | while (count > 0) { |
Willy Tarreau | 591d445 | 2018-06-15 17:21:00 +0200 | [diff] [blame] | 259 | try = b_contig_space(buf); |
| 260 | if (!try) |
| 261 | break; |
| 262 | |
Willy Tarreau | abf08d9 | 2014-01-14 11:31:27 +0100 | [diff] [blame] | 263 | if (try > count) |
| 264 | try = count; |
| 265 | |
Willy Tarreau | 8f9c72d | 2018-06-07 18:46:28 +0200 | [diff] [blame] | 266 | ret = recv(conn->handle.fd, b_tail(buf), try, 0); |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 267 | |
| 268 | if (ret > 0) { |
Olivier Houchard | acd1403 | 2018-06-28 18:17:23 +0200 | [diff] [blame] | 269 | b_add(buf, ret); |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 270 | done += ret; |
| 271 | if (ret < try) { |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 272 | /* socket buffer exhausted */ |
| 273 | fd_cant_recv(conn->handle.fd); |
| 274 | |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 275 | /* unfortunately, on level-triggered events, POLL_HUP |
| 276 | * is generally delivered AFTER the system buffer is |
Willy Tarreau | 6812871 | 2017-03-13 12:04:34 +0100 | [diff] [blame] | 277 | * empty, unless the poller supports POLL_RDHUP. If |
| 278 | * we know this is the case, we don't try to read more |
| 279 | * as we know there's no more available. Similarly, if |
| 280 | * there's no problem with lingering we don't even try |
| 281 | * to read an unlikely close from the client since we'll |
| 282 | * close first anyway. |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 283 | */ |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 284 | if (fdtab[conn->handle.fd].ev & FD_POLL_HUP) |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 285 | goto read0; |
Willy Tarreau | 6c11bd2 | 2014-01-24 00:54:27 +0100 | [diff] [blame] | 286 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 287 | if ((!fdtab[conn->handle.fd].linger_risk) || |
Willy Tarreau | 6812871 | 2017-03-13 12:04:34 +0100 | [diff] [blame] | 288 | (cur_poller.flags & HAP_POLL_F_RDHUP)) { |
Willy Tarreau | 6812871 | 2017-03-13 12:04:34 +0100 | [diff] [blame] | 289 | break; |
| 290 | } |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 291 | } |
| 292 | count -= ret; |
Willy Tarreau | 716bec2 | 2020-02-20 11:04:40 +0100 | [diff] [blame] | 293 | |
| 294 | if (flags & CO_RFL_READ_ONCE) |
| 295 | break; |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 296 | } |
| 297 | else if (ret == 0) { |
| 298 | goto read0; |
| 299 | } |
Joshua M. Clulow | 0724903 | 2014-03-03 13:48:42 -0800 | [diff] [blame] | 300 | else if (errno == EAGAIN || errno == ENOTCONN) { |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 301 | /* socket buffer exhausted */ |
| 302 | fd_cant_recv(conn->handle.fd); |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 303 | break; |
| 304 | } |
| 305 | else if (errno != EINTR) { |
Willy Tarreau | 26f4a04 | 2013-12-04 23:44:10 +0100 | [diff] [blame] | 306 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 307 | break; |
| 308 | } |
| 309 | } |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 310 | |
| 311 | if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) |
| 312 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 313 | |
| 314 | leave: |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 315 | return done; |
| 316 | |
| 317 | read0: |
| 318 | conn_sock_read0(conn); |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 319 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | debdc4b | 2012-12-07 00:01:33 +0100 | [diff] [blame] | 320 | |
| 321 | /* Now a final check for a possible asynchronous low-level error |
| 322 | * report. This can happen when a connection receives a reset |
| 323 | * after a shutdown, both POLL_HUP and POLL_ERR are queued, and |
| 324 | * we might have come from there by just checking POLL_HUP instead |
| 325 | * of recv()'s return value 0, so we have no way to tell there was |
| 326 | * an error without checking. |
| 327 | */ |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 328 | if (unlikely(fdtab[conn->handle.fd].ev & FD_POLL_ERR)) |
Willy Tarreau | 26f4a04 | 2013-12-04 23:44:10 +0100 | [diff] [blame] | 329 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 330 | goto leave; |
Willy Tarreau | 2ba4465 | 2012-08-20 17:30:32 +0200 | [diff] [blame] | 331 | } |
| 332 | |
| 333 | |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 334 | /* Send up to <count> pending bytes from buffer <buf> to connection <conn>'s |
| 335 | * socket. <flags> may contain some CO_SFL_* flags to hint the system about |
| 336 | * other pending data for example, but this flag is ignored at the moment. |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 337 | * Only one call to send() is performed, unless the buffer wraps, in which case |
| 338 | * a second call may be performed. The connection's flags are updated with |
| 339 | * whatever special event is detected (error, empty). The caller is responsible |
| 340 | * for taking care of those events and avoiding the call if inappropriate. The |
| 341 | * function does not call the connection's polling update function, so the caller |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 342 | * is responsible for this. It's up to the caller to update the buffer's contents |
| 343 | * based on the return value. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 344 | */ |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 345 | static size_t raw_sock_from_buf(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags) |
Willy Tarreau | 0c2fc1f | 2009-01-18 15:30:37 +0100 | [diff] [blame] | 346 | { |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 347 | ssize_t ret; |
| 348 | size_t try, done; |
| 349 | int send_flag; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 350 | |
Willy Tarreau | fd803bb | 2014-01-20 15:13:07 +0100 | [diff] [blame] | 351 | if (!conn_ctrl_ready(conn)) |
| 352 | return 0; |
| 353 | |
Willy Tarreau | 585744b | 2017-08-24 14:31:19 +0200 | [diff] [blame] | 354 | if (!fd_send_ready(conn->handle.fd)) |
Willy Tarreau | f79c817 | 2013-10-21 16:30:56 +0200 | [diff] [blame] | 355 | return 0; |
| 356 | |
Willy Tarreau | a8c7e8e | 2020-01-23 18:17:55 +0100 | [diff] [blame] | 357 | if (conn->flags & CO_FL_SOCK_WR_SH) { |
| 358 | /* it's already closed */ |
| 359 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH; |
| 360 | errno = EPIPE; |
| 361 | return 0; |
| 362 | } |
| 363 | |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 364 | done = 0; |
| 365 | /* send the largest possible block. For this we perform only one call |
| 366 | * to send() unless the buffer wraps and we exactly fill the first hunk, |
| 367 | * in which case we accept to do it once again. |
Willy Tarreau | 5bd8c37 | 2009-01-19 00:32:22 +0100 | [diff] [blame] | 368 | */ |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 369 | while (count) { |
| 370 | try = b_contig_data(buf, done); |
| 371 | if (try > count) |
| 372 | try = count; |
Willy Tarreau | f890dc9 | 2008-12-13 21:12:26 +0100 | [diff] [blame] | 373 | |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 374 | send_flag = MSG_DONTWAIT | MSG_NOSIGNAL; |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 375 | if (try < count || flags & CO_SFL_MSG_MORE) |
Willy Tarreau | 7e4086d | 2014-02-02 01:44:13 +0100 | [diff] [blame] | 376 | send_flag |= MSG_MORE; |
Willy Tarreau | fb14edc | 2009-06-14 15:24:37 +0200 | [diff] [blame] | 377 | |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 378 | ret = send(conn->handle.fd, b_peek(buf, done), try, send_flag); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 379 | |
| 380 | if (ret > 0) { |
Willy Tarreau | 787db9a | 2018-06-14 18:31:46 +0200 | [diff] [blame] | 381 | count -= ret; |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 382 | done += ret; |
Willy Tarreau | b38903c | 2008-11-23 21:33:29 +0100 | [diff] [blame] | 383 | |
Willy Tarreau | ab3e1d3 | 2007-06-03 14:10:36 +0200 | [diff] [blame] | 384 | /* if the system buffer is full, don't insist */ |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 385 | if (ret < try) { |
| 386 | fd_cant_send(conn->handle.fd); |
Willy Tarreau | 6996e15 | 2007-04-30 14:37:43 +0200 | [diff] [blame] | 387 | break; |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 388 | } |
Willy Tarreau | 3381bf8 | 2020-01-17 17:39:35 +0100 | [diff] [blame] | 389 | if (!count) |
Willy Tarreau | 3110eb7 | 2020-02-21 10:21:46 +0100 | [diff] [blame] | 390 | fd_stop_send(conn->handle.fd); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 391 | } |
Willy Tarreau | 034c88c | 2017-01-23 23:36:45 +0100 | [diff] [blame] | 392 | else if (ret == 0 || errno == EAGAIN || errno == ENOTCONN || errno == EINPROGRESS) { |
Willy Tarreau | 0c2fc1f | 2009-01-18 15:30:37 +0100 | [diff] [blame] | 393 | /* nothing written, we need to poll for write first */ |
Willy Tarreau | 8dd348c | 2020-02-28 14:09:12 +0100 | [diff] [blame] | 394 | fd_cant_send(conn->handle.fd); |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 395 | break; |
Willy Tarreau | 8374918 | 2007-04-15 20:56:27 +0200 | [diff] [blame] | 396 | } |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 397 | else if (errno != EINTR) { |
Willy Tarreau | 26f4a04 | 2013-12-04 23:44:10 +0100 | [diff] [blame] | 398 | conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 399 | break; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 400 | } |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 401 | } |
Willy Tarreau | ccf3f6d | 2019-09-05 17:05:05 +0200 | [diff] [blame] | 402 | if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) { |
Willy Tarreau | 665e6ee | 2012-10-04 20:20:46 +0200 | [diff] [blame] | 403 | conn->flags &= ~CO_FL_WAIT_L4_CONN; |
Willy Tarreau | ccf3f6d | 2019-09-05 17:05:05 +0200 | [diff] [blame] | 404 | } |
Willy Tarreau | 256b9c5 | 2017-10-25 09:30:13 +0200 | [diff] [blame] | 405 | |
Willy Tarreau | 7cf0e45 | 2019-05-23 11:39:14 +0200 | [diff] [blame] | 406 | if (done > 0) { |
| 407 | /* we count the total bytes sent, and the send rate for 32-byte |
| 408 | * blocks. The reason for the latter is that freq_ctr are |
| 409 | * limited to 4GB and that it's not enough per second. |
| 410 | */ |
| 411 | _HA_ATOMIC_ADD(&global.out_bytes, done); |
| 412 | update_freq_ctr(&global.out_32bps, (done + 16) / 32); |
| 413 | } |
Willy Tarreau | 5368d80 | 2012-08-21 18:22:06 +0200 | [diff] [blame] | 414 | return done; |
Willy Tarreau | 0c2fc1f | 2009-01-18 15:30:37 +0100 | [diff] [blame] | 415 | } |
Willy Tarreau | 6996e15 | 2007-04-30 14:37:43 +0200 | [diff] [blame] | 416 | |
Willy Tarreau | ee1a6fc | 2020-01-17 07:52:13 +0100 | [diff] [blame] | 417 | /* Called from the upper layer, to subscribe <es> to events <event_type>. The |
| 418 | * event subscriber <es> is not allowed to change from a previous call as long |
| 419 | * as at least one event is still subscribed. The <event_type> must only be a |
| 420 | * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0. |
| 421 | */ |
| 422 | static int raw_sock_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es) |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 423 | { |
Willy Tarreau | ee1a6fc | 2020-01-17 07:52:13 +0100 | [diff] [blame] | 424 | return conn_subscribe(conn, xprt_ctx, event_type, es); |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 425 | } |
| 426 | |
Willy Tarreau | ee1a6fc | 2020-01-17 07:52:13 +0100 | [diff] [blame] | 427 | /* Called from the upper layer, to unsubscribe <es> from events <event_type>. |
| 428 | * The <es> pointer is not allowed to differ from the one passed to the |
| 429 | * subscribe() call. It always returns zero. |
| 430 | */ |
| 431 | static int raw_sock_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es) |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 432 | { |
Willy Tarreau | ee1a6fc | 2020-01-17 07:52:13 +0100 | [diff] [blame] | 433 | return conn_unsubscribe(conn, xprt_ctx, event_type, es); |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 434 | } |
Willy Tarreau | 0c2fc1f | 2009-01-18 15:30:37 +0100 | [diff] [blame] | 435 | |
Olivier Houchard | 5149b59 | 2019-05-23 17:47:36 +0200 | [diff] [blame] | 436 | /* We can't have an underlying XPRT, so just return -1 to signify failure */ |
| 437 | static int raw_sock_remove_xprt(struct connection *conn, void *xprt_ctx, void *toremove_ctx, const struct xprt_ops *newops, void *newctx) |
| 438 | { |
| 439 | /* This is the lowest xprt we can have, so if we get there we didn't |
| 440 | * find the xprt we wanted to remove, that's a bug |
| 441 | */ |
| 442 | BUG_ON(1); |
| 443 | return -1; |
| 444 | } |
| 445 | |
Willy Tarreau | f7bc57c | 2012-10-03 00:19:48 +0200 | [diff] [blame] | 446 | /* transport-layer operations for RAW sockets */ |
Willy Tarreau | d9f5cca | 2016-12-22 21:08:52 +0100 | [diff] [blame] | 447 | static struct xprt_ops raw_sock = { |
Willy Tarreau | c578891 | 2012-08-24 18:12:41 +0200 | [diff] [blame] | 448 | .snd_buf = raw_sock_from_buf, |
| 449 | .rcv_buf = raw_sock_to_buf, |
Olivier Houchard | e179d0e | 2019-03-21 18:27:17 +0100 | [diff] [blame] | 450 | .subscribe = raw_sock_subscribe, |
| 451 | .unsubscribe = raw_sock_unsubscribe, |
Olivier Houchard | 5149b59 | 2019-05-23 17:47:36 +0200 | [diff] [blame] | 452 | .remove_xprt = raw_sock_remove_xprt, |
Willy Tarreau | e573323 | 2019-05-22 19:24:06 +0200 | [diff] [blame] | 453 | #if defined(USE_LINUX_SPLICE) |
Willy Tarreau | 96199b1 | 2012-08-24 00:46:52 +0200 | [diff] [blame] | 454 | .rcv_pipe = raw_sock_to_pipe, |
| 455 | .snd_pipe = raw_sock_from_pipe, |
| 456 | #endif |
Willy Tarreau | c578891 | 2012-08-24 18:12:41 +0200 | [diff] [blame] | 457 | .shutr = NULL, |
| 458 | .shutw = NULL, |
| 459 | .close = NULL, |
Willy Tarreau | 8e0bb0a | 2016-11-24 16:58:12 +0100 | [diff] [blame] | 460 | .name = "RAW", |
Willy Tarreau | 5c979a9 | 2012-05-07 17:15:39 +0200 | [diff] [blame] | 461 | }; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 462 | |
Willy Tarreau | 13e1410 | 2016-12-22 20:25:26 +0100 | [diff] [blame] | 463 | |
| 464 | __attribute__((constructor)) |
Olivier Houchard | 0d00593 | 2017-08-14 15:59:44 +0200 | [diff] [blame] | 465 | static void __raw_sock_init(void) |
Willy Tarreau | 13e1410 | 2016-12-22 20:25:26 +0100 | [diff] [blame] | 466 | { |
| 467 | xprt_register(XPRT_RAW, &raw_sock); |
| 468 | } |
| 469 | |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 470 | /* |
| 471 | * Local variables: |
| 472 | * c-indent-level: 8 |
| 473 | * c-basic-offset: 8 |
| 474 | * End: |
| 475 | */ |