blob: 9ad5b67c9955599d21e2357668b15762b160a31e [file] [log] [blame]
Willy Tarreau18b7df72020-08-28 12:07:22 +02001/*
2 * Generic code for native (BSD-compatible) sockets
3 *
4 * Copyright 2000-2020 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020013#define _GNU_SOURCE
Willy Tarreau18b7df72020-08-28 12:07:22 +020014#include <ctype.h>
15#include <errno.h>
16#include <fcntl.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <string.h>
20#include <time.h>
21
22#include <sys/param.h>
23#include <sys/socket.h>
24#include <sys/types.h>
25
Willy Tarreau42961742020-08-28 18:42:45 +020026#include <net/if.h>
27
Willy Tarreau18b7df72020-08-28 12:07:22 +020028#include <haproxy/api.h>
29#include <haproxy/connection.h>
Willy Tarreaua74cb382020-10-15 21:29:49 +020030#include <haproxy/listener.h>
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020031#include <haproxy/log.h>
Willy Tarreau18b7df72020-08-28 12:07:22 +020032#include <haproxy/namespace.h>
33#include <haproxy/sock.h>
Willy Tarreau2d34a712020-08-28 16:49:41 +020034#include <haproxy/sock_inet.h>
Willy Tarreau18b7df72020-08-28 12:07:22 +020035#include <haproxy/tools.h>
36
Willy Tarreau063d47d2020-08-28 16:29:53 +020037/* the list of remaining sockets transferred from an older process */
38struct xfer_sock_list *xfer_sock_list = NULL;
Willy Tarreau18b7df72020-08-28 12:07:22 +020039
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020040
41/* Accept an incoming connection from listener <l>, and return it, as well as
42 * a CO_AC_* status code into <status> if not null. Null is returned on error.
43 * <l> must be a valid listener with a valid frontend.
44 */
45struct connection *sock_accept_conn(struct listener *l, int *status)
46{
47#ifdef USE_ACCEPT4
48 static int accept4_broken;
49#endif
50 struct proxy *p = l->bind_conf->frontend;
Willy Tarreau344b8fc2020-10-15 09:43:31 +020051 struct connection *conn = NULL;
52 struct sockaddr_storage *addr = NULL;
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020053 socklen_t laddr;
54 int ret;
55 int cfd;
56
Willy Tarreau344b8fc2020-10-15 09:43:31 +020057 if (!sockaddr_alloc(&addr, NULL, 0))
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020058 goto fail_addr;
59
60 /* accept() will mark all accepted FDs O_NONBLOCK and the ones accepted
61 * in the master process as FD_CLOEXEC. It's not done for workers
62 * because 1) workers are not supposed to execute anything so there's
63 * no reason for uselessly slowing down everything, and 2) that would
64 * prevent us from implementing fd passing in the future.
65 */
66#ifdef USE_ACCEPT4
67 laddr = sizeof(*conn->src);
68
69 /* only call accept4() if it's known to be safe, otherwise fallback to
70 * the legacy accept() + fcntl().
71 */
72 if (unlikely(accept4_broken) ||
Willy Tarreau344b8fc2020-10-15 09:43:31 +020073 (((cfd = accept4(l->rx.fd, (struct sockaddr*)addr, &laddr,
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020074 SOCK_NONBLOCK | (master ? SOCK_CLOEXEC : 0))) == -1) &&
75 (errno == ENOSYS || errno == EINVAL || errno == EBADF) &&
76 (accept4_broken = 1)))
77#endif
78 {
79 laddr = sizeof(*conn->src);
Willy Tarreau344b8fc2020-10-15 09:43:31 +020080 if ((cfd = accept(l->rx.fd, (struct sockaddr*)addr, &laddr)) != -1) {
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020081 fcntl(cfd, F_SETFL, O_NONBLOCK);
82 if (master)
83 fcntl(cfd, F_SETFD, FD_CLOEXEC);
84 }
85 }
86
87 if (likely(cfd != -1)) {
88 /* Perfect, the connection was accepted */
Willy Tarreau344b8fc2020-10-15 09:43:31 +020089 conn = conn_new(&l->obj_type);
90 if (!conn)
91 goto fail_conn;
92
93 conn->src = addr;
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020094 conn->handle.fd = cfd;
95 conn->flags |= CO_FL_ADDR_FROM_SET;
96 ret = CO_AC_DONE;
97 goto done;
98 }
99
100 /* error conditions below */
Willy Tarreau344b8fc2020-10-15 09:43:31 +0200101 sockaddr_free(&addr);
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200102
103 switch (errno) {
104 case EAGAIN:
105 ret = CO_AC_DONE; /* nothing more to accept */
106 if (fdtab[l->rx.fd].ev & (FD_POLL_HUP|FD_POLL_ERR)) {
107 /* the listening socket might have been disabled in a shared
108 * process and we're a collateral victim. We'll just pause for
109 * a while in case it comes back. In the mean time, we need to
110 * clear this sticky flag.
111 */
112 _HA_ATOMIC_AND(&fdtab[l->rx.fd].ev, ~(FD_POLL_HUP|FD_POLL_ERR));
113 ret = CO_AC_PAUSE;
114 }
115 fd_cant_recv(l->rx.fd);
116 break;
117
118 case EINVAL:
119 /* might be trying to accept on a shut fd (eg: soft stop) */
120 ret = CO_AC_PAUSE;
121 break;
122
123 case EINTR:
124 case ECONNABORTED:
125 ret = CO_AC_RETRY;
126 break;
127
128 case ENFILE:
129 if (p)
130 send_log(p, LOG_EMERG,
131 "Proxy %s reached system FD limit (maxsock=%d). Please check system tunables.\n",
132 p->id, global.maxsock);
133 ret = CO_AC_PAUSE;
134 break;
135
136 case EMFILE:
137 if (p)
138 send_log(p, LOG_EMERG,
139 "Proxy %s reached process FD limit (maxsock=%d). Please check 'ulimit-n' and restart.\n",
140 p->id, global.maxsock);
141 ret = CO_AC_PAUSE;
142 break;
143
144 case ENOBUFS:
145 case ENOMEM:
146 if (p)
147 send_log(p, LOG_EMERG,
148 "Proxy %s reached system memory limit (maxsock=%d). Please check system tunables.\n",
149 p->id, global.maxsock);
150 ret = CO_AC_PAUSE;
151 break;
152
153 default:
154 /* unexpected result, let's give up and let other tasks run */
155 ret = CO_AC_YIELD;
156 }
157 done:
158 if (status)
159 *status = ret;
160 return conn;
161
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200162 fail_conn:
Willy Tarreau344b8fc2020-10-15 09:43:31 +0200163 sockaddr_free(&addr);
Remi Tricot-Le Breton25dd0ad2021-01-14 15:26:24 +0100164 /* The accept call already succeeded by the time we try to allocate the connection,
165 * we need to close it in case of failure. */
166 close(cfd);
Willy Tarreau344b8fc2020-10-15 09:43:31 +0200167 fail_addr:
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200168 ret = CO_AC_PAUSE;
169 goto done;
170}
171
Willy Tarreau18b7df72020-08-28 12:07:22 +0200172/* Create a socket to connect to the server in conn->dst (which MUST be valid),
173 * using the configured namespace if needed, or the one passed by the proxy
174 * protocol if required to do so. It ultimately calls socket() or socketat()
175 * and returns the FD or error code.
176 */
177int sock_create_server_socket(struct connection *conn)
178{
179 const struct netns_entry *ns = NULL;
180
181#ifdef USE_NS
182 if (objt_server(conn->target)) {
183 if (__objt_server(conn->target)->flags & SRV_F_USE_NS_FROM_PP)
184 ns = conn->proxy_netns;
185 else
186 ns = __objt_server(conn->target)->netns;
187 }
188#endif
189 return my_socketat(ns, conn->dst->ss_family, SOCK_STREAM, 0);
190}
191
Willy Tarreaua4380b22020-11-04 13:59:04 +0100192/* Enables receiving on receiver <rx> once already bound. */
Willy Tarreaue70c7972020-09-25 19:00:01 +0200193void sock_enable(struct receiver *rx)
194{
Willy Tarreaua4380b22020-11-04 13:59:04 +0100195 if (rx->flags & RX_F_BOUND)
196 fd_want_recv_safe(rx->fd);
Willy Tarreaue70c7972020-09-25 19:00:01 +0200197}
198
Willy Tarreaua4380b22020-11-04 13:59:04 +0100199/* Disables receiving on receiver <rx> once already bound. */
Willy Tarreaue70c7972020-09-25 19:00:01 +0200200void sock_disable(struct receiver *rx)
201{
Willy Tarreaua4380b22020-11-04 13:59:04 +0100202 if (rx->flags & RX_F_BOUND)
Willy Tarreaue70c7972020-09-25 19:00:01 +0200203 fd_stop_recv(rx->fd);
204}
205
Willy Tarreauf58b8db2020-10-09 16:32:08 +0200206/* stops, unbinds and possibly closes the FD associated with receiver rx */
207void sock_unbind(struct receiver *rx)
208{
209 /* There are a number of situations where we prefer to keep the FD and
210 * not to close it (unless we're stopping, of course):
211 * - worker process unbinding from a worker's FD with socket transfer enabled => keep
212 * - master process unbinding from a master's inherited FD => keep
213 * - master process unbinding from a master's FD => close
Willy Tarreau22ccd5e2020-11-03 18:38:05 +0100214 * - master process unbinding from a worker's inherited FD => keep
Willy Tarreauf58b8db2020-10-09 16:32:08 +0200215 * - master process unbinding from a worker's FD => close
216 * - worker process unbinding from a master's FD => close
217 * - worker process unbinding from a worker's FD => close
218 */
219 if (rx->flags & RX_F_BOUND)
220 rx->proto->rx_disable(rx);
221
222 if (!stopping && !master &&
223 !(rx->flags & RX_F_MWORKER) &&
224 (global.tune.options & GTUNE_SOCKET_TRANSFER))
225 return;
226
227 if (!stopping && master &&
Willy Tarreauf58b8db2020-10-09 16:32:08 +0200228 rx->flags & RX_F_INHERITED)
229 return;
230
231 rx->flags &= ~RX_F_BOUND;
232 if (rx->fd != -1)
233 fd_delete(rx->fd);
234 rx->fd = -1;
235}
236
Willy Tarreau18b7df72020-08-28 12:07:22 +0200237/*
238 * Retrieves the source address for the socket <fd>, with <dir> indicating
239 * if we're a listener (=0) or an initiator (!=0). It returns 0 in case of
240 * success, -1 in case of error. The socket's source address is stored in
241 * <sa> for <salen> bytes.
242 */
243int sock_get_src(int fd, struct sockaddr *sa, socklen_t salen, int dir)
244{
245 if (dir)
246 return getsockname(fd, sa, &salen);
247 else
248 return getpeername(fd, sa, &salen);
249}
250
251/*
252 * Retrieves the original destination address for the socket <fd>, with <dir>
253 * indicating if we're a listener (=0) or an initiator (!=0). It returns 0 in
254 * case of success, -1 in case of error. The socket's source address is stored
255 * in <sa> for <salen> bytes.
256 */
257int sock_get_dst(int fd, struct sockaddr *sa, socklen_t salen, int dir)
258{
259 if (dir)
260 return getpeername(fd, sa, &salen);
261 else
262 return getsockname(fd, sa, &salen);
263}
264
Willy Tarreau42961742020-08-28 18:42:45 +0200265/* Try to retrieve exported sockets from worker at CLI <unixsocket>. These
266 * ones will be placed into the xfer_sock_list for later use by function
267 * sock_find_compatible_fd(). Returns 0 on success, -1 on failure.
268 */
269int sock_get_old_sockets(const char *unixsocket)
270{
271 char *cmsgbuf = NULL, *tmpbuf = NULL;
272 int *tmpfd = NULL;
273 struct sockaddr_un addr;
274 struct cmsghdr *cmsg;
275 struct msghdr msghdr;
276 struct iovec iov;
277 struct xfer_sock_list *xfer_sock = NULL;
278 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
279 int sock = -1;
280 int ret = -1;
281 int ret2 = -1;
282 int fd_nb;
283 int got_fd = 0;
284 int cur_fd = 0;
285 size_t maxoff = 0, curoff = 0;
286
287 memset(&msghdr, 0, sizeof(msghdr));
288 cmsgbuf = malloc(CMSG_SPACE(sizeof(int)) * MAX_SEND_FD);
289 if (!cmsgbuf) {
290 ha_warning("Failed to allocate memory to send sockets\n");
291 goto out;
292 }
293
294 sock = socket(PF_UNIX, SOCK_STREAM, 0);
295 if (sock < 0) {
296 ha_warning("Failed to connect to the old process socket '%s'\n", unixsocket);
297 goto out;
298 }
299
300 strncpy(addr.sun_path, unixsocket, sizeof(addr.sun_path) - 1);
301 addr.sun_path[sizeof(addr.sun_path) - 1] = 0;
302 addr.sun_family = PF_UNIX;
303
304 ret = connect(sock, (struct sockaddr *)&addr, sizeof(addr));
305 if (ret < 0) {
306 ha_warning("Failed to connect to the old process socket '%s'\n", unixsocket);
307 goto out;
308 }
309
310 setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (void *)&tv, sizeof(tv));
311 iov.iov_base = &fd_nb;
312 iov.iov_len = sizeof(fd_nb);
313 msghdr.msg_iov = &iov;
314 msghdr.msg_iovlen = 1;
315
316 if (send(sock, "_getsocks\n", strlen("_getsocks\n"), 0) != strlen("_getsocks\n")) {
317 ha_warning("Failed to get the number of sockets to be transferred !\n");
318 goto out;
319 }
320
321 /* First, get the number of file descriptors to be received */
322 if (recvmsg(sock, &msghdr, MSG_WAITALL) != sizeof(fd_nb)) {
323 ha_warning("Failed to get the number of sockets to be transferred !\n");
324 goto out;
325 }
326
327 if (fd_nb == 0) {
328 ret2 = 0;
329 goto out;
330 }
331
332 tmpbuf = malloc(fd_nb * (1 + MAXPATHLEN + 1 + IFNAMSIZ + sizeof(int)));
333 if (tmpbuf == NULL) {
334 ha_warning("Failed to allocate memory while receiving sockets\n");
335 goto out;
336 }
337
338 tmpfd = malloc(fd_nb * sizeof(int));
339 if (tmpfd == NULL) {
340 ha_warning("Failed to allocate memory while receiving sockets\n");
341 goto out;
342 }
343
344 msghdr.msg_control = cmsgbuf;
345 msghdr.msg_controllen = CMSG_SPACE(sizeof(int)) * MAX_SEND_FD;
346 iov.iov_len = MAX_SEND_FD * (1 + MAXPATHLEN + 1 + IFNAMSIZ + sizeof(int));
347
348 do {
349 int ret3;
350
351 iov.iov_base = tmpbuf + curoff;
352
353 ret = recvmsg(sock, &msghdr, 0);
354
355 if (ret == -1 && errno == EINTR)
356 continue;
357
358 if (ret <= 0)
359 break;
360
361 /* Send an ack to let the sender know we got the sockets
362 * and it can send some more
363 */
364 do {
365 ret3 = send(sock, &got_fd, sizeof(got_fd), 0);
366 } while (ret3 == -1 && errno == EINTR);
367
368 for (cmsg = CMSG_FIRSTHDR(&msghdr); cmsg != NULL; cmsg = CMSG_NXTHDR(&msghdr, cmsg)) {
369 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
370 size_t totlen = cmsg->cmsg_len - CMSG_LEN(0);
371
372 if (totlen / sizeof(int) + got_fd > fd_nb) {
373 ha_warning("Got to many sockets !\n");
374 goto out;
375 }
376
377 /*
378 * Be paranoid and use memcpy() to avoid any
379 * potential alignment issue.
380 */
381 memcpy(&tmpfd[got_fd], CMSG_DATA(cmsg), totlen);
382 got_fd += totlen / sizeof(int);
383 }
384 }
385 curoff += ret;
386 } while (got_fd < fd_nb);
387
388 if (got_fd != fd_nb) {
389 ha_warning("We didn't get the expected number of sockets (expecting %d got %d)\n",
390 fd_nb, got_fd);
391 goto out;
392 }
393
394 maxoff = curoff;
395 curoff = 0;
396
397 for (cur_fd = 0; cur_fd < got_fd; cur_fd++) {
398 int fd = tmpfd[cur_fd];
399 socklen_t socklen;
400 int val;
401 int len;
402
403 xfer_sock = calloc(1, sizeof(*xfer_sock));
404 if (!xfer_sock) {
405 ha_warning("Failed to allocate memory in get_old_sockets() !\n");
406 break;
407 }
408 xfer_sock->fd = -1;
409
410 socklen = sizeof(xfer_sock->addr);
411 if (getsockname(fd, (struct sockaddr *)&xfer_sock->addr, &socklen) != 0) {
412 ha_warning("Failed to get socket address\n");
Willy Tarreau61cfdf42021-02-20 10:46:51 +0100413 ha_free(&xfer_sock);
Willy Tarreau42961742020-08-28 18:42:45 +0200414 continue;
415 }
416
417 if (curoff >= maxoff) {
418 ha_warning("Inconsistency while transferring sockets\n");
419 goto out;
420 }
421
422 len = tmpbuf[curoff++];
423 if (len > 0) {
424 /* We have a namespace */
425 if (curoff + len > maxoff) {
426 ha_warning("Inconsistency while transferring sockets\n");
427 goto out;
428 }
429 xfer_sock->namespace = malloc(len + 1);
430 if (!xfer_sock->namespace) {
431 ha_warning("Failed to allocate memory while transferring sockets\n");
432 goto out;
433 }
434 memcpy(xfer_sock->namespace, &tmpbuf[curoff], len);
435 xfer_sock->namespace[len] = 0;
436 xfer_sock->ns_namelen = len;
437 curoff += len;
438 }
439
440 if (curoff >= maxoff) {
441 ha_warning("Inconsistency while transferring sockets\n");
442 goto out;
443 }
444
445 len = tmpbuf[curoff++];
446 if (len > 0) {
447 /* We have an interface */
448 if (curoff + len > maxoff) {
449 ha_warning("Inconsistency while transferring sockets\n");
450 goto out;
451 }
452 xfer_sock->iface = malloc(len + 1);
453 if (!xfer_sock->iface) {
454 ha_warning("Failed to allocate memory while transferring sockets\n");
455 goto out;
456 }
457 memcpy(xfer_sock->iface, &tmpbuf[curoff], len);
458 xfer_sock->iface[len] = 0;
459 xfer_sock->if_namelen = len;
460 curoff += len;
461 }
462
463 if (curoff + sizeof(int) > maxoff) {
464 ha_warning("Inconsistency while transferring sockets\n");
465 goto out;
466 }
467
468 /* we used to have 32 bits of listener options here but we don't
469 * use them anymore.
470 */
471 curoff += sizeof(int);
472
473 /* determine the foreign status directly from the socket itself */
474 if (sock_inet_is_foreign(fd, xfer_sock->addr.ss_family))
Willy Tarreaua2c17872020-08-28 19:09:19 +0200475 xfer_sock->options |= SOCK_XFER_OPT_FOREIGN;
Willy Tarreau42961742020-08-28 18:42:45 +0200476
Willy Tarreau9dbb6c42020-08-28 19:20:23 +0200477 socklen = sizeof(val);
478 if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &val, &socklen) == 0 && val == SOCK_DGRAM)
479 xfer_sock->options |= SOCK_XFER_OPT_DGRAM;
480
Willy Tarreau42961742020-08-28 18:42:45 +0200481#if defined(IPV6_V6ONLY)
482 /* keep only the v6only flag depending on what's currently
483 * active on the socket, and always drop the v4v6 one.
484 */
485 socklen = sizeof(val);
486 if (xfer_sock->addr.ss_family == AF_INET6 &&
487 getsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &val, &socklen) == 0 && val > 0)
Willy Tarreaua2c17872020-08-28 19:09:19 +0200488 xfer_sock->options |= SOCK_XFER_OPT_V6ONLY;
Willy Tarreau42961742020-08-28 18:42:45 +0200489#endif
490
491 xfer_sock->fd = fd;
492 if (xfer_sock_list)
493 xfer_sock_list->prev = xfer_sock;
494 xfer_sock->next = xfer_sock_list;
495 xfer_sock->prev = NULL;
496 xfer_sock_list = xfer_sock;
497 xfer_sock = NULL;
498 }
499
500 ret2 = 0;
501out:
502 /* If we failed midway make sure to close the remaining
503 * file descriptors
504 */
505 if (tmpfd != NULL && cur_fd < got_fd) {
506 for (; cur_fd < got_fd; cur_fd++) {
507 close(tmpfd[cur_fd]);
508 }
509 }
510
511 free(tmpbuf);
512 free(tmpfd);
513 free(cmsgbuf);
514
515 if (sock != -1)
516 close(sock);
517
518 if (xfer_sock) {
519 free(xfer_sock->namespace);
520 free(xfer_sock->iface);
521 if (xfer_sock->fd != -1)
522 close(xfer_sock->fd);
523 free(xfer_sock);
524 }
525 return (ret2);
526}
527
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200528/* When binding the receivers, check if a socket has been sent to us by the
Willy Tarreau2d34a712020-08-28 16:49:41 +0200529 * previous process that we could reuse, instead of creating a new one. Note
530 * that some address family-specific options are checked on the listener and
531 * on the socket. Typically for AF_INET and AF_INET6, we check for transparent
532 * mode, and for AF_INET6 we also check for "v4v6" or "v6only". The reused
533 * socket is automatically removed from the list so that it's not proposed
534 * anymore.
535 */
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200536int sock_find_compatible_fd(const struct receiver *rx)
Willy Tarreau2d34a712020-08-28 16:49:41 +0200537{
538 struct xfer_sock_list *xfer_sock = xfer_sock_list;
Willy Tarreaua2c17872020-08-28 19:09:19 +0200539 int options = 0;
Willy Tarreau2d34a712020-08-28 16:49:41 +0200540 int if_namelen = 0;
541 int ns_namelen = 0;
542 int ret = -1;
543
Willy Tarreauf1f66092020-09-04 08:15:31 +0200544 if (!rx->proto->fam->addrcmp)
Willy Tarreau2d34a712020-08-28 16:49:41 +0200545 return -1;
546
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200547 if (rx->proto->sock_type == SOCK_DGRAM)
Willy Tarreau9dbb6c42020-08-28 19:20:23 +0200548 options |= SOCK_XFER_OPT_DGRAM;
549
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200550 if (rx->settings->options & RX_O_FOREIGN)
Willy Tarreaua2c17872020-08-28 19:09:19 +0200551 options |= SOCK_XFER_OPT_FOREIGN;
552
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200553 if (rx->addr.ss_family == AF_INET6) {
Willy Tarreau2d34a712020-08-28 16:49:41 +0200554 /* Prepare to match the v6only option against what we really want. Note
555 * that sadly the two options are not exclusive to each other and that
556 * v6only is stronger than v4v6.
557 */
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200558 if ((rx->settings->options & RX_O_V6ONLY) ||
559 (sock_inet6_v6only_default && !(rx->settings->options & RX_O_V4V6)))
Willy Tarreaua2c17872020-08-28 19:09:19 +0200560 options |= SOCK_XFER_OPT_V6ONLY;
Willy Tarreau2d34a712020-08-28 16:49:41 +0200561 }
Willy Tarreau2d34a712020-08-28 16:49:41 +0200562
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200563 if (rx->settings->interface)
564 if_namelen = strlen(rx->settings->interface);
Willy Tarreau2d34a712020-08-28 16:49:41 +0200565#ifdef USE_NS
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200566 if (rx->settings->netns)
567 ns_namelen = rx->settings->netns->name_len;
Willy Tarreau2d34a712020-08-28 16:49:41 +0200568#endif
569
570 while (xfer_sock) {
Willy Tarreaua2c17872020-08-28 19:09:19 +0200571 if ((options == xfer_sock->options) &&
Willy Tarreau2d34a712020-08-28 16:49:41 +0200572 (if_namelen == xfer_sock->if_namelen) &&
573 (ns_namelen == xfer_sock->ns_namelen) &&
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200574 (!if_namelen || strcmp(rx->settings->interface, xfer_sock->iface) == 0) &&
Willy Tarreau2d34a712020-08-28 16:49:41 +0200575#ifdef USE_NS
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200576 (!ns_namelen || strcmp(rx->settings->netns->node.key, xfer_sock->namespace) == 0) &&
Willy Tarreau2d34a712020-08-28 16:49:41 +0200577#endif
Willy Tarreauf1f66092020-09-04 08:15:31 +0200578 rx->proto->fam->addrcmp(&xfer_sock->addr, &rx->addr) == 0)
Willy Tarreau2d34a712020-08-28 16:49:41 +0200579 break;
580 xfer_sock = xfer_sock->next;
581 }
582
583 if (xfer_sock != NULL) {
584 ret = xfer_sock->fd;
585 if (xfer_sock == xfer_sock_list)
586 xfer_sock_list = xfer_sock->next;
587 if (xfer_sock->prev)
588 xfer_sock->prev->next = xfer_sock->next;
589 if (xfer_sock->next)
590 xfer_sock->next->prev = xfer_sock->prev;
591 free(xfer_sock->iface);
592 free(xfer_sock->namespace);
593 free(xfer_sock);
594 }
595 return ret;
596}
597
Willy Tarreau5ced3e82020-10-13 17:06:12 +0200598/* Tests if the receiver supports accepting connections. Returns positive on
599 * success, 0 if not possible, negative if the socket is non-recoverable. The
600 * rationale behind this is that inherited FDs may be broken and that shared
601 * FDs might have been paused by another process.
602 */
Willy Tarreau7d053e42020-10-15 09:19:43 +0200603int sock_accepting_conn(const struct receiver *rx)
Willy Tarreau5ced3e82020-10-13 17:06:12 +0200604{
605 int opt_val = 0;
606 socklen_t opt_len = sizeof(opt_val);
607
608 if (getsockopt(rx->fd, SOL_SOCKET, SO_ACCEPTCONN, &opt_val, &opt_len) == -1)
609 return -1;
610
611 return opt_val;
612}
613
Willy Tarreaua74cb382020-10-15 21:29:49 +0200614/* This is the FD handler IO callback for stream sockets configured for
615 * accepting incoming connections. It's a pass-through to listener_accept()
616 * which will iterate over the listener protocol's accept_conn() function.
617 * The FD's owner must be a listener.
618 */
619void sock_accept_iocb(int fd)
620{
621 struct listener *l = fdtab[fd].owner;
622
623 if (!l)
624 return;
625
Willy Tarreaub4daeeb2020-11-04 14:58:36 +0100626 BUG_ON(!!master != !!(l->rx.flags & RX_F_MWORKER));
Willy Tarreaua74cb382020-10-15 21:29:49 +0200627 listener_accept(l);
628}
629
Willy Tarreaude471c42020-12-08 15:50:56 +0100630/* This completes the initialization of connection <conn> by inserting its FD
631 * into the fdtab, associating it with the regular connection handler. It will
632 * be bound to the current thread only. This call cannot fail.
633 */
634void sock_conn_ctrl_init(struct connection *conn)
635{
Willy Tarreau586f71b2020-12-11 15:54:36 +0100636 fd_insert(conn->handle.fd, conn, sock_conn_iocb, tid_bit);
Willy Tarreaude471c42020-12-08 15:50:56 +0100637}
638
639/* This completes the release of connection <conn> by removing its FD from the
640 * fdtab and deleting it. The connection must not use the FD anymore past this
641 * point. The FD may be modified in the connection.
642 */
643void sock_conn_ctrl_close(struct connection *conn)
644{
645 fd_delete(conn->handle.fd);
646 conn->handle.fd = DEAD_FD_MAGIC;
647}
648
Willy Tarreau586f71b2020-12-11 15:54:36 +0100649/* This is the callback which is set when a connection establishment is pending
650 * and we have nothing to send. It may update the FD polling status to indicate
651 * !READY. It returns 0 if it fails in a fatal way or needs to poll to go
652 * further, otherwise it returns non-zero and removes the CO_FL_WAIT_L4_CONN
653 * flag from the connection's flags. In case of error, it sets CO_FL_ERROR and
654 * leaves the error code in errno.
655 */
656int sock_conn_check(struct connection *conn)
657{
658 struct sockaddr_storage *addr;
659 int fd = conn->handle.fd;
660
661 if (conn->flags & CO_FL_ERROR)
662 return 0;
663
664 if (!conn_ctrl_ready(conn))
665 return 0;
666
667 if (!(conn->flags & CO_FL_WAIT_L4_CONN))
668 return 1; /* strange we were called while ready */
669
670 if (!fd_send_ready(fd))
671 return 0;
672
673 /* Here we have 2 cases :
674 * - modern pollers, able to report ERR/HUP. If these ones return any
675 * of these flags then it's likely a failure, otherwise it possibly
676 * is a success (i.e. there may have been data received just before
677 * the error was reported).
678 * - select, which doesn't report these and with which it's always
679 * necessary either to try connect() again or to check for SO_ERROR.
680 * In order to simplify everything, we double-check using connect() as
681 * soon as we meet either of these delicate situations. Note that
682 * SO_ERROR would clear the error after reporting it!
683 */
684 if (cur_poller.flags & HAP_POLL_F_ERRHUP) {
685 /* modern poller, able to report ERR/HUP */
686 if ((fdtab[fd].ev & (FD_POLL_IN|FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_IN)
687 goto done;
688 if ((fdtab[fd].ev & (FD_POLL_OUT|FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_OUT)
689 goto done;
690 if (!(fdtab[fd].ev & (FD_POLL_ERR|FD_POLL_HUP)))
691 goto wait;
692 /* error present, fall through common error check path */
693 }
694
695 /* Use connect() to check the state of the socket. This has the double
696 * advantage of *not* clearing the error (so that health checks can
697 * still use getsockopt(SO_ERROR)) and giving us the following info :
698 * - error
699 * - connecting (EALREADY, EINPROGRESS)
700 * - connected (EISCONN, 0)
701 */
702 addr = conn->dst;
703 if ((conn->flags & CO_FL_SOCKS4) && obj_type(conn->target) == OBJ_TYPE_SERVER)
704 addr = &objt_server(conn->target)->socks4_addr;
705
706 if (connect(fd, (const struct sockaddr *)addr, get_addr_len(addr)) == -1) {
707 if (errno == EALREADY || errno == EINPROGRESS)
708 goto wait;
709
710 if (errno && errno != EISCONN)
711 goto out_error;
712 }
713
714 done:
715 /* The FD is ready now, we'll mark the connection as complete and
716 * forward the event to the transport layer which will notify the
717 * data layer.
718 */
719 conn->flags &= ~CO_FL_WAIT_L4_CONN;
720 fd_may_send(fd);
721 fd_cond_recv(fd);
722 errno = 0; // make health checks happy
723 return 1;
724
725 out_error:
726 /* Write error on the file descriptor. Report it to the connection
727 * and disable polling on this FD.
728 */
729 fdtab[fd].linger_risk = 0;
730 conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
731 fd_stop_both(fd);
732 return 0;
733
734 wait:
735 fd_cant_send(fd);
736 fd_want_send(fd);
737 return 0;
738}
739
740/* I/O callback for fd-based connections. It calls the read/write handlers
741 * provided by the connection's sock_ops, which must be valid.
742 */
743void sock_conn_iocb(int fd)
744{
745 struct connection *conn = fdtab[fd].owner;
746 unsigned int flags;
747 int need_wake = 0;
748
749 if (unlikely(!conn)) {
750 activity[tid].conn_dead++;
751 return;
752 }
753
754 flags = conn->flags & ~CO_FL_ERROR; /* ensure to call the wake handler upon error */
755
756 if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) &&
757 ((fd_send_ready(fd) && fd_send_active(fd)) ||
758 (fd_recv_ready(fd) && fd_recv_active(fd)))) {
759 /* Still waiting for a connection to establish and nothing was
760 * attempted yet to probe the connection. this will clear the
761 * CO_FL_WAIT_L4_CONN flag on success.
762 */
763 if (!sock_conn_check(conn))
764 goto leave;
765 need_wake = 1;
766 }
767
768 if (fd_send_ready(fd) && fd_send_active(fd)) {
769 /* force reporting of activity by clearing the previous flags :
770 * we'll have at least ERROR or CONNECTED at the end of an I/O,
771 * both of which will be detected below.
772 */
773 flags = 0;
774 if (conn->subs && conn->subs->events & SUB_RETRY_SEND) {
775 need_wake = 0; // wake will be called after this I/O
776 tasklet_wakeup(conn->subs->tasklet);
777 conn->subs->events &= ~SUB_RETRY_SEND;
778 if (!conn->subs->events)
779 conn->subs = NULL;
780 }
781 fd_stop_send(fd);
782 }
783
784 /* The data transfer starts here and stops on error and handshakes. Note
785 * that we must absolutely test conn->xprt at each step in case it suddenly
786 * changes due to a quick unexpected close().
787 */
788 if (fd_recv_ready(fd) && fd_recv_active(fd)) {
789 /* force reporting of activity by clearing the previous flags :
790 * we'll have at least ERROR or CONNECTED at the end of an I/O,
791 * both of which will be detected below.
792 */
793 flags = 0;
794 if (conn->subs && conn->subs->events & SUB_RETRY_RECV) {
795 need_wake = 0; // wake will be called after this I/O
796 tasklet_wakeup(conn->subs->tasklet);
797 conn->subs->events &= ~SUB_RETRY_RECV;
798 if (!conn->subs->events)
799 conn->subs = NULL;
800 }
801 fd_stop_recv(fd);
802 }
803
804 leave:
805 /* we may have to finish to install a mux or to wake it up based on
806 * what was just done above. It may kill the connection so we have to
807 * be prpared not to use it anymore.
808 */
809 if (conn_notify_mux(conn, flags, need_wake) < 0)
810 return;
811
812 /* commit polling changes in case of error.
813 * WT: it seems that the last case where this could still be relevant
814 * is if a mux wake function above report a connection error but does
815 * not stop polling. Shouldn't we enforce this into the mux instead of
816 * having to deal with this ?
817 */
818 if (unlikely(conn->flags & CO_FL_ERROR)) {
819 if (conn_ctrl_ready(conn))
820 fd_stop_both(fd);
821 }
822}
823
Willy Tarreau427c8462020-12-11 16:19:12 +0100824/* Drains possibly pending incoming data on the file descriptor attached to the
825 * connection. This is used to know whether we need to disable lingering on
826 * close. Returns non-zero if it is safe to close without disabling lingering,
827 * otherwise zero.
828 */
829int sock_drain(struct connection *conn)
830{
831 int turns = 2;
832 int fd = conn->handle.fd;
833 int len;
834
835 if (fdtab[fd].ev & (FD_POLL_ERR|FD_POLL_HUP))
836 goto shut;
837
838 if (!fd_recv_ready(fd))
839 return 0;
840
841 /* no drain function defined, use the generic one */
842
843 while (turns) {
844#ifdef MSG_TRUNC_CLEARS_INPUT
845 len = recv(fd, NULL, INT_MAX, MSG_DONTWAIT | MSG_NOSIGNAL | MSG_TRUNC);
846 if (len == -1 && errno == EFAULT)
847#endif
848 len = recv(fd, trash.area, trash.size, MSG_DONTWAIT | MSG_NOSIGNAL);
849
850 if (len == 0)
851 goto shut;
852
853 if (len < 0) {
854 if (errno == EAGAIN) {
855 /* connection not closed yet */
856 fd_cant_recv(fd);
857 break;
858 }
859 if (errno == EINTR) /* oops, try again */
860 continue;
861 /* other errors indicate a dead connection, fine. */
862 goto shut;
863 }
864 /* OK we read some data, let's try again once */
865 turns--;
866 }
867
868 /* some data are still present, give up */
869 return 0;
870
871 shut:
872 /* we're certain the connection was shut down */
873 fdtab[fd].linger_risk = 0;
874 return 1;
875}
876
Willy Tarreau472125b2020-12-11 17:02:50 +0100877/* Checks the connection's FD for readiness of events <event_type>, which may
878 * only be a combination of SUB_RETRY_RECV and SUB_RETRY_SEND. Those which are
879 * ready are returned. The ones that are not ready are enabled. The caller is
880 * expected to do what is needed to handle ready events and to deal with
881 * subsequent wakeups caused by the requested events' readiness.
882 */
883int sock_check_events(struct connection *conn, int event_type)
884{
885 int ret = 0;
886
887 if (event_type & SUB_RETRY_RECV) {
888 if (fd_recv_ready(conn->handle.fd))
889 ret |= SUB_RETRY_RECV;
890 else
891 fd_want_recv(conn->handle.fd);
892 }
893
894 if (event_type & SUB_RETRY_SEND) {
895 if (fd_send_ready(conn->handle.fd))
896 ret |= SUB_RETRY_SEND;
897 else
898 fd_want_send(conn->handle.fd);
899 }
900
901 return ret;
902}
903
904/* Ignore readiness events from connection's FD for events of types <event_type>
905 * which may only be a combination of SUB_RETRY_RECV and SUB_RETRY_SEND.
906 */
907void sock_ignore_events(struct connection *conn, int event_type)
908{
909 if (event_type & SUB_RETRY_RECV)
910 fd_stop_recv(conn->handle.fd);
911
912 if (event_type & SUB_RETRY_SEND)
913 fd_stop_send(conn->handle.fd);
914}
915
Willy Tarreau18b7df72020-08-28 12:07:22 +0200916/*
917 * Local variables:
918 * c-indent-level: 8
919 * c-basic-offset: 8
920 * End:
921 */