blob: 748a51580b6351fdd0e12a815c2273f79195a5c8 [file] [log] [blame]
Willy Tarreau18b7df72020-08-28 12:07:22 +02001/*
2 * Generic code for native (BSD-compatible) sockets
3 *
4 * Copyright 2000-2020 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020013#define _GNU_SOURCE
Willy Tarreau18b7df72020-08-28 12:07:22 +020014#include <ctype.h>
15#include <errno.h>
16#include <fcntl.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <string.h>
20#include <time.h>
21
22#include <sys/param.h>
23#include <sys/socket.h>
24#include <sys/types.h>
25
Willy Tarreau42961742020-08-28 18:42:45 +020026#include <net/if.h>
27
Willy Tarreau18b7df72020-08-28 12:07:22 +020028#include <haproxy/api.h>
29#include <haproxy/connection.h>
Willy Tarreaua74cb382020-10-15 21:29:49 +020030#include <haproxy/listener.h>
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020031#include <haproxy/log.h>
Willy Tarreau18b7df72020-08-28 12:07:22 +020032#include <haproxy/namespace.h>
33#include <haproxy/sock.h>
Willy Tarreau2d34a712020-08-28 16:49:41 +020034#include <haproxy/sock_inet.h>
Willy Tarreau18b7df72020-08-28 12:07:22 +020035#include <haproxy/tools.h>
36
Willy Tarreau9f775762022-01-28 18:28:18 +010037#define SOCK_XFER_OPT_FOREIGN 0x000000001
38#define SOCK_XFER_OPT_V6ONLY 0x000000002
39#define SOCK_XFER_OPT_DGRAM 0x000000004
40
Willy Tarreau063d47d2020-08-28 16:29:53 +020041/* the list of remaining sockets transferred from an older process */
Willy Tarreau9f775762022-01-28 18:28:18 +010042struct xfer_sock_list {
43 int fd;
44 int options; /* socket options as SOCK_XFER_OPT_* */
45 char *iface;
46 char *namespace;
47 int if_namelen;
48 int ns_namelen;
49 struct xfer_sock_list *prev;
50 struct xfer_sock_list *next;
51 struct sockaddr_storage addr;
52};
53
54static struct xfer_sock_list *xfer_sock_list;
Willy Tarreau18b7df72020-08-28 12:07:22 +020055
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020056
57/* Accept an incoming connection from listener <l>, and return it, as well as
58 * a CO_AC_* status code into <status> if not null. Null is returned on error.
59 * <l> must be a valid listener with a valid frontend.
60 */
61struct connection *sock_accept_conn(struct listener *l, int *status)
62{
63#ifdef USE_ACCEPT4
64 static int accept4_broken;
65#endif
66 struct proxy *p = l->bind_conf->frontend;
Willy Tarreau344b8fc2020-10-15 09:43:31 +020067 struct connection *conn = NULL;
68 struct sockaddr_storage *addr = NULL;
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020069 socklen_t laddr;
70 int ret;
71 int cfd;
72
Willy Tarreau344b8fc2020-10-15 09:43:31 +020073 if (!sockaddr_alloc(&addr, NULL, 0))
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020074 goto fail_addr;
75
76 /* accept() will mark all accepted FDs O_NONBLOCK and the ones accepted
77 * in the master process as FD_CLOEXEC. It's not done for workers
78 * because 1) workers are not supposed to execute anything so there's
79 * no reason for uselessly slowing down everything, and 2) that would
80 * prevent us from implementing fd passing in the future.
81 */
82#ifdef USE_ACCEPT4
83 laddr = sizeof(*conn->src);
84
85 /* only call accept4() if it's known to be safe, otherwise fallback to
86 * the legacy accept() + fcntl().
87 */
88 if (unlikely(accept4_broken) ||
Willy Tarreau344b8fc2020-10-15 09:43:31 +020089 (((cfd = accept4(l->rx.fd, (struct sockaddr*)addr, &laddr,
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020090 SOCK_NONBLOCK | (master ? SOCK_CLOEXEC : 0))) == -1) &&
91 (errno == ENOSYS || errno == EINVAL || errno == EBADF) &&
92 (accept4_broken = 1)))
93#endif
94 {
95 laddr = sizeof(*conn->src);
Willy Tarreau344b8fc2020-10-15 09:43:31 +020096 if ((cfd = accept(l->rx.fd, (struct sockaddr*)addr, &laddr)) != -1) {
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020097 fcntl(cfd, F_SETFL, O_NONBLOCK);
98 if (master)
99 fcntl(cfd, F_SETFD, FD_CLOEXEC);
100 }
101 }
102
103 if (likely(cfd != -1)) {
104 /* Perfect, the connection was accepted */
Willy Tarreau344b8fc2020-10-15 09:43:31 +0200105 conn = conn_new(&l->obj_type);
106 if (!conn)
107 goto fail_conn;
108
109 conn->src = addr;
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200110 conn->handle.fd = cfd;
111 conn->flags |= CO_FL_ADDR_FROM_SET;
112 ret = CO_AC_DONE;
113 goto done;
114 }
115
116 /* error conditions below */
Willy Tarreau344b8fc2020-10-15 09:43:31 +0200117 sockaddr_free(&addr);
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200118
119 switch (errno) {
120 case EAGAIN:
121 ret = CO_AC_DONE; /* nothing more to accept */
Willy Tarreauf5090652021-04-06 17:23:40 +0200122 if (fdtab[l->rx.fd].state & (FD_POLL_HUP|FD_POLL_ERR)) {
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200123 /* the listening socket might have been disabled in a shared
124 * process and we're a collateral victim. We'll just pause for
125 * a while in case it comes back. In the mean time, we need to
126 * clear this sticky flag.
127 */
Willy Tarreauf5090652021-04-06 17:23:40 +0200128 _HA_ATOMIC_AND(&fdtab[l->rx.fd].state, ~(FD_POLL_HUP|FD_POLL_ERR));
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200129 ret = CO_AC_PAUSE;
130 }
131 fd_cant_recv(l->rx.fd);
132 break;
133
134 case EINVAL:
135 /* might be trying to accept on a shut fd (eg: soft stop) */
136 ret = CO_AC_PAUSE;
137 break;
138
139 case EINTR:
140 case ECONNABORTED:
141 ret = CO_AC_RETRY;
142 break;
143
144 case ENFILE:
145 if (p)
146 send_log(p, LOG_EMERG,
147 "Proxy %s reached system FD limit (maxsock=%d). Please check system tunables.\n",
148 p->id, global.maxsock);
149 ret = CO_AC_PAUSE;
150 break;
151
152 case EMFILE:
153 if (p)
154 send_log(p, LOG_EMERG,
155 "Proxy %s reached process FD limit (maxsock=%d). Please check 'ulimit-n' and restart.\n",
156 p->id, global.maxsock);
157 ret = CO_AC_PAUSE;
158 break;
159
160 case ENOBUFS:
161 case ENOMEM:
162 if (p)
163 send_log(p, LOG_EMERG,
164 "Proxy %s reached system memory limit (maxsock=%d). Please check system tunables.\n",
165 p->id, global.maxsock);
166 ret = CO_AC_PAUSE;
167 break;
168
169 default:
170 /* unexpected result, let's give up and let other tasks run */
171 ret = CO_AC_YIELD;
172 }
173 done:
174 if (status)
175 *status = ret;
176 return conn;
177
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200178 fail_conn:
Willy Tarreau344b8fc2020-10-15 09:43:31 +0200179 sockaddr_free(&addr);
Remi Tricot-Le Breton25dd0ad2021-01-14 15:26:24 +0100180 /* The accept call already succeeded by the time we try to allocate the connection,
181 * we need to close it in case of failure. */
182 close(cfd);
Willy Tarreau344b8fc2020-10-15 09:43:31 +0200183 fail_addr:
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200184 ret = CO_AC_PAUSE;
185 goto done;
186}
187
Willy Tarreau18b7df72020-08-28 12:07:22 +0200188/* Create a socket to connect to the server in conn->dst (which MUST be valid),
189 * using the configured namespace if needed, or the one passed by the proxy
190 * protocol if required to do so. It ultimately calls socket() or socketat()
191 * and returns the FD or error code.
192 */
193int sock_create_server_socket(struct connection *conn)
194{
195 const struct netns_entry *ns = NULL;
196
197#ifdef USE_NS
198 if (objt_server(conn->target)) {
199 if (__objt_server(conn->target)->flags & SRV_F_USE_NS_FROM_PP)
200 ns = conn->proxy_netns;
201 else
202 ns = __objt_server(conn->target)->netns;
203 }
204#endif
205 return my_socketat(ns, conn->dst->ss_family, SOCK_STREAM, 0);
206}
207
Willy Tarreaua4380b22020-11-04 13:59:04 +0100208/* Enables receiving on receiver <rx> once already bound. */
Willy Tarreaue70c7972020-09-25 19:00:01 +0200209void sock_enable(struct receiver *rx)
210{
Willy Tarreaua4380b22020-11-04 13:59:04 +0100211 if (rx->flags & RX_F_BOUND)
212 fd_want_recv_safe(rx->fd);
Willy Tarreaue70c7972020-09-25 19:00:01 +0200213}
214
Willy Tarreaua4380b22020-11-04 13:59:04 +0100215/* Disables receiving on receiver <rx> once already bound. */
Willy Tarreaue70c7972020-09-25 19:00:01 +0200216void sock_disable(struct receiver *rx)
217{
Willy Tarreaua4380b22020-11-04 13:59:04 +0100218 if (rx->flags & RX_F_BOUND)
Willy Tarreaue70c7972020-09-25 19:00:01 +0200219 fd_stop_recv(rx->fd);
220}
221
Willy Tarreauf58b8db2020-10-09 16:32:08 +0200222/* stops, unbinds and possibly closes the FD associated with receiver rx */
223void sock_unbind(struct receiver *rx)
224{
225 /* There are a number of situations where we prefer to keep the FD and
226 * not to close it (unless we're stopping, of course):
Willy Tarreau89673e22023-11-20 10:44:21 +0100227 * - worker process unbinding from a worker's non-suspendable FD (ABNS) => close
Willy Tarreauf58b8db2020-10-09 16:32:08 +0200228 * - worker process unbinding from a worker's FD with socket transfer enabled => keep
229 * - master process unbinding from a master's inherited FD => keep
230 * - master process unbinding from a master's FD => close
Willy Tarreau22ccd5e2020-11-03 18:38:05 +0100231 * - master process unbinding from a worker's inherited FD => keep
Willy Tarreauf58b8db2020-10-09 16:32:08 +0200232 * - master process unbinding from a worker's FD => close
233 * - worker process unbinding from a master's FD => close
234 * - worker process unbinding from a worker's FD => close
235 */
236 if (rx->flags & RX_F_BOUND)
237 rx->proto->rx_disable(rx);
238
239 if (!stopping && !master &&
240 !(rx->flags & RX_F_MWORKER) &&
Willy Tarreau89673e22023-11-20 10:44:21 +0100241 !(rx->flags & RX_F_NON_SUSPENDABLE) &&
Willy Tarreauf58b8db2020-10-09 16:32:08 +0200242 (global.tune.options & GTUNE_SOCKET_TRANSFER))
243 return;
244
245 if (!stopping && master &&
Willy Tarreauf58b8db2020-10-09 16:32:08 +0200246 rx->flags & RX_F_INHERITED)
247 return;
248
249 rx->flags &= ~RX_F_BOUND;
250 if (rx->fd != -1)
251 fd_delete(rx->fd);
252 rx->fd = -1;
253}
254
Willy Tarreau18b7df72020-08-28 12:07:22 +0200255/*
256 * Retrieves the source address for the socket <fd>, with <dir> indicating
257 * if we're a listener (=0) or an initiator (!=0). It returns 0 in case of
258 * success, -1 in case of error. The socket's source address is stored in
259 * <sa> for <salen> bytes.
260 */
261int sock_get_src(int fd, struct sockaddr *sa, socklen_t salen, int dir)
262{
263 if (dir)
264 return getsockname(fd, sa, &salen);
265 else
266 return getpeername(fd, sa, &salen);
267}
268
269/*
270 * Retrieves the original destination address for the socket <fd>, with <dir>
271 * indicating if we're a listener (=0) or an initiator (!=0). It returns 0 in
272 * case of success, -1 in case of error. The socket's source address is stored
273 * in <sa> for <salen> bytes.
274 */
275int sock_get_dst(int fd, struct sockaddr *sa, socklen_t salen, int dir)
276{
277 if (dir)
278 return getpeername(fd, sa, &salen);
279 else
280 return getsockname(fd, sa, &salen);
281}
282
Willy Tarreau42961742020-08-28 18:42:45 +0200283/* Try to retrieve exported sockets from worker at CLI <unixsocket>. These
284 * ones will be placed into the xfer_sock_list for later use by function
285 * sock_find_compatible_fd(). Returns 0 on success, -1 on failure.
286 */
287int sock_get_old_sockets(const char *unixsocket)
288{
289 char *cmsgbuf = NULL, *tmpbuf = NULL;
290 int *tmpfd = NULL;
291 struct sockaddr_un addr;
292 struct cmsghdr *cmsg;
293 struct msghdr msghdr;
294 struct iovec iov;
295 struct xfer_sock_list *xfer_sock = NULL;
296 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
297 int sock = -1;
298 int ret = -1;
299 int ret2 = -1;
300 int fd_nb;
301 int got_fd = 0;
302 int cur_fd = 0;
303 size_t maxoff = 0, curoff = 0;
304
305 memset(&msghdr, 0, sizeof(msghdr));
306 cmsgbuf = malloc(CMSG_SPACE(sizeof(int)) * MAX_SEND_FD);
307 if (!cmsgbuf) {
308 ha_warning("Failed to allocate memory to send sockets\n");
309 goto out;
310 }
311
312 sock = socket(PF_UNIX, SOCK_STREAM, 0);
313 if (sock < 0) {
314 ha_warning("Failed to connect to the old process socket '%s'\n", unixsocket);
315 goto out;
316 }
317
318 strncpy(addr.sun_path, unixsocket, sizeof(addr.sun_path) - 1);
319 addr.sun_path[sizeof(addr.sun_path) - 1] = 0;
320 addr.sun_family = PF_UNIX;
321
322 ret = connect(sock, (struct sockaddr *)&addr, sizeof(addr));
323 if (ret < 0) {
324 ha_warning("Failed to connect to the old process socket '%s'\n", unixsocket);
325 goto out;
326 }
327
328 setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (void *)&tv, sizeof(tv));
329 iov.iov_base = &fd_nb;
330 iov.iov_len = sizeof(fd_nb);
331 msghdr.msg_iov = &iov;
332 msghdr.msg_iovlen = 1;
333
334 if (send(sock, "_getsocks\n", strlen("_getsocks\n"), 0) != strlen("_getsocks\n")) {
335 ha_warning("Failed to get the number of sockets to be transferred !\n");
336 goto out;
337 }
338
339 /* First, get the number of file descriptors to be received */
340 if (recvmsg(sock, &msghdr, MSG_WAITALL) != sizeof(fd_nb)) {
341 ha_warning("Failed to get the number of sockets to be transferred !\n");
342 goto out;
343 }
344
345 if (fd_nb == 0) {
346 ret2 = 0;
347 goto out;
348 }
349
350 tmpbuf = malloc(fd_nb * (1 + MAXPATHLEN + 1 + IFNAMSIZ + sizeof(int)));
351 if (tmpbuf == NULL) {
352 ha_warning("Failed to allocate memory while receiving sockets\n");
353 goto out;
354 }
355
356 tmpfd = malloc(fd_nb * sizeof(int));
357 if (tmpfd == NULL) {
358 ha_warning("Failed to allocate memory while receiving sockets\n");
359 goto out;
360 }
361
362 msghdr.msg_control = cmsgbuf;
363 msghdr.msg_controllen = CMSG_SPACE(sizeof(int)) * MAX_SEND_FD;
364 iov.iov_len = MAX_SEND_FD * (1 + MAXPATHLEN + 1 + IFNAMSIZ + sizeof(int));
365
366 do {
367 int ret3;
368
369 iov.iov_base = tmpbuf + curoff;
370
371 ret = recvmsg(sock, &msghdr, 0);
372
373 if (ret == -1 && errno == EINTR)
374 continue;
375
376 if (ret <= 0)
377 break;
378
379 /* Send an ack to let the sender know we got the sockets
380 * and it can send some more
381 */
382 do {
383 ret3 = send(sock, &got_fd, sizeof(got_fd), 0);
384 } while (ret3 == -1 && errno == EINTR);
385
386 for (cmsg = CMSG_FIRSTHDR(&msghdr); cmsg != NULL; cmsg = CMSG_NXTHDR(&msghdr, cmsg)) {
387 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
388 size_t totlen = cmsg->cmsg_len - CMSG_LEN(0);
389
390 if (totlen / sizeof(int) + got_fd > fd_nb) {
391 ha_warning("Got to many sockets !\n");
392 goto out;
393 }
394
395 /*
396 * Be paranoid and use memcpy() to avoid any
397 * potential alignment issue.
398 */
399 memcpy(&tmpfd[got_fd], CMSG_DATA(cmsg), totlen);
400 got_fd += totlen / sizeof(int);
401 }
402 }
403 curoff += ret;
404 } while (got_fd < fd_nb);
405
406 if (got_fd != fd_nb) {
407 ha_warning("We didn't get the expected number of sockets (expecting %d got %d)\n",
408 fd_nb, got_fd);
409 goto out;
410 }
411
412 maxoff = curoff;
413 curoff = 0;
414
415 for (cur_fd = 0; cur_fd < got_fd; cur_fd++) {
416 int fd = tmpfd[cur_fd];
417 socklen_t socklen;
418 int val;
419 int len;
420
421 xfer_sock = calloc(1, sizeof(*xfer_sock));
422 if (!xfer_sock) {
423 ha_warning("Failed to allocate memory in get_old_sockets() !\n");
424 break;
425 }
426 xfer_sock->fd = -1;
427
428 socklen = sizeof(xfer_sock->addr);
429 if (getsockname(fd, (struct sockaddr *)&xfer_sock->addr, &socklen) != 0) {
430 ha_warning("Failed to get socket address\n");
Willy Tarreau61cfdf42021-02-20 10:46:51 +0100431 ha_free(&xfer_sock);
Willy Tarreau42961742020-08-28 18:42:45 +0200432 continue;
433 }
434
435 if (curoff >= maxoff) {
436 ha_warning("Inconsistency while transferring sockets\n");
437 goto out;
438 }
439
440 len = tmpbuf[curoff++];
441 if (len > 0) {
442 /* We have a namespace */
443 if (curoff + len > maxoff) {
444 ha_warning("Inconsistency while transferring sockets\n");
445 goto out;
446 }
447 xfer_sock->namespace = malloc(len + 1);
448 if (!xfer_sock->namespace) {
449 ha_warning("Failed to allocate memory while transferring sockets\n");
450 goto out;
451 }
452 memcpy(xfer_sock->namespace, &tmpbuf[curoff], len);
453 xfer_sock->namespace[len] = 0;
454 xfer_sock->ns_namelen = len;
455 curoff += len;
456 }
457
458 if (curoff >= maxoff) {
459 ha_warning("Inconsistency while transferring sockets\n");
460 goto out;
461 }
462
463 len = tmpbuf[curoff++];
464 if (len > 0) {
465 /* We have an interface */
466 if (curoff + len > maxoff) {
467 ha_warning("Inconsistency while transferring sockets\n");
468 goto out;
469 }
470 xfer_sock->iface = malloc(len + 1);
471 if (!xfer_sock->iface) {
472 ha_warning("Failed to allocate memory while transferring sockets\n");
473 goto out;
474 }
475 memcpy(xfer_sock->iface, &tmpbuf[curoff], len);
476 xfer_sock->iface[len] = 0;
477 xfer_sock->if_namelen = len;
478 curoff += len;
479 }
480
481 if (curoff + sizeof(int) > maxoff) {
482 ha_warning("Inconsistency while transferring sockets\n");
483 goto out;
484 }
485
486 /* we used to have 32 bits of listener options here but we don't
487 * use them anymore.
488 */
489 curoff += sizeof(int);
490
491 /* determine the foreign status directly from the socket itself */
492 if (sock_inet_is_foreign(fd, xfer_sock->addr.ss_family))
Willy Tarreaua2c17872020-08-28 19:09:19 +0200493 xfer_sock->options |= SOCK_XFER_OPT_FOREIGN;
Willy Tarreau42961742020-08-28 18:42:45 +0200494
Willy Tarreau9dbb6c42020-08-28 19:20:23 +0200495 socklen = sizeof(val);
496 if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &val, &socklen) == 0 && val == SOCK_DGRAM)
497 xfer_sock->options |= SOCK_XFER_OPT_DGRAM;
498
Willy Tarreau42961742020-08-28 18:42:45 +0200499#if defined(IPV6_V6ONLY)
500 /* keep only the v6only flag depending on what's currently
501 * active on the socket, and always drop the v4v6 one.
502 */
503 socklen = sizeof(val);
504 if (xfer_sock->addr.ss_family == AF_INET6 &&
505 getsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &val, &socklen) == 0 && val > 0)
Willy Tarreaua2c17872020-08-28 19:09:19 +0200506 xfer_sock->options |= SOCK_XFER_OPT_V6ONLY;
Willy Tarreau42961742020-08-28 18:42:45 +0200507#endif
508
509 xfer_sock->fd = fd;
510 if (xfer_sock_list)
511 xfer_sock_list->prev = xfer_sock;
512 xfer_sock->next = xfer_sock_list;
513 xfer_sock->prev = NULL;
514 xfer_sock_list = xfer_sock;
515 xfer_sock = NULL;
516 }
517
518 ret2 = 0;
519out:
520 /* If we failed midway make sure to close the remaining
521 * file descriptors
522 */
523 if (tmpfd != NULL && cur_fd < got_fd) {
524 for (; cur_fd < got_fd; cur_fd++) {
525 close(tmpfd[cur_fd]);
526 }
527 }
528
529 free(tmpbuf);
530 free(tmpfd);
531 free(cmsgbuf);
532
533 if (sock != -1)
534 close(sock);
535
536 if (xfer_sock) {
537 free(xfer_sock->namespace);
538 free(xfer_sock->iface);
539 if (xfer_sock->fd != -1)
540 close(xfer_sock->fd);
541 free(xfer_sock);
542 }
543 return (ret2);
544}
545
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200546/* When binding the receivers, check if a socket has been sent to us by the
Willy Tarreau2d34a712020-08-28 16:49:41 +0200547 * previous process that we could reuse, instead of creating a new one. Note
548 * that some address family-specific options are checked on the listener and
549 * on the socket. Typically for AF_INET and AF_INET6, we check for transparent
550 * mode, and for AF_INET6 we also check for "v4v6" or "v6only". The reused
551 * socket is automatically removed from the list so that it's not proposed
552 * anymore.
553 */
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200554int sock_find_compatible_fd(const struct receiver *rx)
Willy Tarreau2d34a712020-08-28 16:49:41 +0200555{
556 struct xfer_sock_list *xfer_sock = xfer_sock_list;
Willy Tarreaua2c17872020-08-28 19:09:19 +0200557 int options = 0;
Willy Tarreau2d34a712020-08-28 16:49:41 +0200558 int if_namelen = 0;
559 int ns_namelen = 0;
560 int ret = -1;
561
Willy Tarreauf1f66092020-09-04 08:15:31 +0200562 if (!rx->proto->fam->addrcmp)
Willy Tarreau2d34a712020-08-28 16:49:41 +0200563 return -1;
564
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200565 if (rx->proto->sock_type == SOCK_DGRAM)
Willy Tarreau9dbb6c42020-08-28 19:20:23 +0200566 options |= SOCK_XFER_OPT_DGRAM;
567
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200568 if (rx->settings->options & RX_O_FOREIGN)
Willy Tarreaua2c17872020-08-28 19:09:19 +0200569 options |= SOCK_XFER_OPT_FOREIGN;
570
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200571 if (rx->addr.ss_family == AF_INET6) {
Willy Tarreau2d34a712020-08-28 16:49:41 +0200572 /* Prepare to match the v6only option against what we really want. Note
573 * that sadly the two options are not exclusive to each other and that
574 * v6only is stronger than v4v6.
575 */
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200576 if ((rx->settings->options & RX_O_V6ONLY) ||
577 (sock_inet6_v6only_default && !(rx->settings->options & RX_O_V4V6)))
Willy Tarreaua2c17872020-08-28 19:09:19 +0200578 options |= SOCK_XFER_OPT_V6ONLY;
Willy Tarreau2d34a712020-08-28 16:49:41 +0200579 }
Willy Tarreau2d34a712020-08-28 16:49:41 +0200580
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200581 if (rx->settings->interface)
582 if_namelen = strlen(rx->settings->interface);
Willy Tarreau2d34a712020-08-28 16:49:41 +0200583#ifdef USE_NS
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200584 if (rx->settings->netns)
585 ns_namelen = rx->settings->netns->name_len;
Willy Tarreau2d34a712020-08-28 16:49:41 +0200586#endif
587
588 while (xfer_sock) {
Willy Tarreaua2c17872020-08-28 19:09:19 +0200589 if ((options == xfer_sock->options) &&
Willy Tarreau2d34a712020-08-28 16:49:41 +0200590 (if_namelen == xfer_sock->if_namelen) &&
591 (ns_namelen == xfer_sock->ns_namelen) &&
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200592 (!if_namelen || strcmp(rx->settings->interface, xfer_sock->iface) == 0) &&
Willy Tarreau2d34a712020-08-28 16:49:41 +0200593#ifdef USE_NS
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200594 (!ns_namelen || strcmp(rx->settings->netns->node.key, xfer_sock->namespace) == 0) &&
Willy Tarreau2d34a712020-08-28 16:49:41 +0200595#endif
Willy Tarreauf1f66092020-09-04 08:15:31 +0200596 rx->proto->fam->addrcmp(&xfer_sock->addr, &rx->addr) == 0)
Willy Tarreau2d34a712020-08-28 16:49:41 +0200597 break;
598 xfer_sock = xfer_sock->next;
599 }
600
601 if (xfer_sock != NULL) {
602 ret = xfer_sock->fd;
603 if (xfer_sock == xfer_sock_list)
604 xfer_sock_list = xfer_sock->next;
605 if (xfer_sock->prev)
606 xfer_sock->prev->next = xfer_sock->next;
607 if (xfer_sock->next)
608 xfer_sock->next->prev = xfer_sock->prev;
609 free(xfer_sock->iface);
610 free(xfer_sock->namespace);
611 free(xfer_sock);
612 }
613 return ret;
614}
615
Willy Tarreau9f775762022-01-28 18:28:18 +0100616/* After all protocols are bound, there may remain some old sockets that have
617 * been removed between the previous config and the new one. These ones must
618 * be dropped, otherwise they will remain open and may prevent a service from
619 * restarting.
620 */
621void sock_drop_unused_old_sockets()
622{
623 while (xfer_sock_list != NULL) {
624 struct xfer_sock_list *tmpxfer = xfer_sock_list->next;
625
626 close(xfer_sock_list->fd);
627 free(xfer_sock_list->iface);
628 free(xfer_sock_list->namespace);
629 free(xfer_sock_list);
630 xfer_sock_list = tmpxfer;
631 }
632}
633
Willy Tarreau5ced3e82020-10-13 17:06:12 +0200634/* Tests if the receiver supports accepting connections. Returns positive on
635 * success, 0 if not possible, negative if the socket is non-recoverable. The
636 * rationale behind this is that inherited FDs may be broken and that shared
637 * FDs might have been paused by another process.
638 */
Willy Tarreau7d053e42020-10-15 09:19:43 +0200639int sock_accepting_conn(const struct receiver *rx)
Willy Tarreau5ced3e82020-10-13 17:06:12 +0200640{
641 int opt_val = 0;
642 socklen_t opt_len = sizeof(opt_val);
643
644 if (getsockopt(rx->fd, SOL_SOCKET, SO_ACCEPTCONN, &opt_val, &opt_len) == -1)
645 return -1;
646
647 return opt_val;
648}
649
Willy Tarreaua74cb382020-10-15 21:29:49 +0200650/* This is the FD handler IO callback for stream sockets configured for
651 * accepting incoming connections. It's a pass-through to listener_accept()
652 * which will iterate over the listener protocol's accept_conn() function.
653 * The FD's owner must be a listener.
654 */
655void sock_accept_iocb(int fd)
656{
657 struct listener *l = fdtab[fd].owner;
658
659 if (!l)
660 return;
661
Willy Tarreaub4daeeb2020-11-04 14:58:36 +0100662 BUG_ON(!!master != !!(l->rx.flags & RX_F_MWORKER));
Willy Tarreaua74cb382020-10-15 21:29:49 +0200663 listener_accept(l);
664}
665
Willy Tarreaude471c42020-12-08 15:50:56 +0100666/* This completes the initialization of connection <conn> by inserting its FD
667 * into the fdtab, associating it with the regular connection handler. It will
668 * be bound to the current thread only. This call cannot fail.
669 */
670void sock_conn_ctrl_init(struct connection *conn)
671{
Willy Tarreau586f71b2020-12-11 15:54:36 +0100672 fd_insert(conn->handle.fd, conn, sock_conn_iocb, tid_bit);
Willy Tarreaude471c42020-12-08 15:50:56 +0100673}
674
675/* This completes the release of connection <conn> by removing its FD from the
676 * fdtab and deleting it. The connection must not use the FD anymore past this
677 * point. The FD may be modified in the connection.
678 */
679void sock_conn_ctrl_close(struct connection *conn)
680{
681 fd_delete(conn->handle.fd);
682 conn->handle.fd = DEAD_FD_MAGIC;
683}
684
Willy Tarreau586f71b2020-12-11 15:54:36 +0100685/* This is the callback which is set when a connection establishment is pending
686 * and we have nothing to send. It may update the FD polling status to indicate
687 * !READY. It returns 0 if it fails in a fatal way or needs to poll to go
688 * further, otherwise it returns non-zero and removes the CO_FL_WAIT_L4_CONN
689 * flag from the connection's flags. In case of error, it sets CO_FL_ERROR and
690 * leaves the error code in errno.
691 */
692int sock_conn_check(struct connection *conn)
693{
694 struct sockaddr_storage *addr;
695 int fd = conn->handle.fd;
696
697 if (conn->flags & CO_FL_ERROR)
698 return 0;
699
700 if (!conn_ctrl_ready(conn))
701 return 0;
702
703 if (!(conn->flags & CO_FL_WAIT_L4_CONN))
704 return 1; /* strange we were called while ready */
705
Willy Tarreau95473382021-07-06 08:29:20 +0200706 if (!fd_send_ready(fd) && !(fdtab[fd].state & (FD_POLL_ERR|FD_POLL_HUP)))
Willy Tarreau586f71b2020-12-11 15:54:36 +0100707 return 0;
708
709 /* Here we have 2 cases :
710 * - modern pollers, able to report ERR/HUP. If these ones return any
711 * of these flags then it's likely a failure, otherwise it possibly
712 * is a success (i.e. there may have been data received just before
713 * the error was reported).
714 * - select, which doesn't report these and with which it's always
715 * necessary either to try connect() again or to check for SO_ERROR.
716 * In order to simplify everything, we double-check using connect() as
717 * soon as we meet either of these delicate situations. Note that
718 * SO_ERROR would clear the error after reporting it!
719 */
720 if (cur_poller.flags & HAP_POLL_F_ERRHUP) {
721 /* modern poller, able to report ERR/HUP */
Willy Tarreauf5090652021-04-06 17:23:40 +0200722 if ((fdtab[fd].state & (FD_POLL_IN|FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_IN)
Willy Tarreau586f71b2020-12-11 15:54:36 +0100723 goto done;
Willy Tarreauf5090652021-04-06 17:23:40 +0200724 if ((fdtab[fd].state & (FD_POLL_OUT|FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_OUT)
Willy Tarreau586f71b2020-12-11 15:54:36 +0100725 goto done;
Willy Tarreauf5090652021-04-06 17:23:40 +0200726 if (!(fdtab[fd].state & (FD_POLL_ERR|FD_POLL_HUP)))
Willy Tarreau586f71b2020-12-11 15:54:36 +0100727 goto wait;
728 /* error present, fall through common error check path */
729 }
730
731 /* Use connect() to check the state of the socket. This has the double
732 * advantage of *not* clearing the error (so that health checks can
733 * still use getsockopt(SO_ERROR)) and giving us the following info :
734 * - error
735 * - connecting (EALREADY, EINPROGRESS)
736 * - connected (EISCONN, 0)
737 */
738 addr = conn->dst;
739 if ((conn->flags & CO_FL_SOCKS4) && obj_type(conn->target) == OBJ_TYPE_SERVER)
740 addr = &objt_server(conn->target)->socks4_addr;
741
742 if (connect(fd, (const struct sockaddr *)addr, get_addr_len(addr)) == -1) {
743 if (errno == EALREADY || errno == EINPROGRESS)
744 goto wait;
745
746 if (errno && errno != EISCONN)
747 goto out_error;
748 }
749
750 done:
751 /* The FD is ready now, we'll mark the connection as complete and
752 * forward the event to the transport layer which will notify the
753 * data layer.
754 */
755 conn->flags &= ~CO_FL_WAIT_L4_CONN;
756 fd_may_send(fd);
757 fd_cond_recv(fd);
758 errno = 0; // make health checks happy
759 return 1;
760
761 out_error:
762 /* Write error on the file descriptor. Report it to the connection
763 * and disable polling on this FD.
764 */
Willy Tarreau586f71b2020-12-11 15:54:36 +0100765 conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
Willy Tarreaub41a6e92021-04-06 17:49:19 +0200766 HA_ATOMIC_AND(&fdtab[fd].state, ~FD_LINGER_RISK);
Willy Tarreau586f71b2020-12-11 15:54:36 +0100767 fd_stop_both(fd);
768 return 0;
769
770 wait:
771 fd_cant_send(fd);
772 fd_want_send(fd);
773 return 0;
774}
775
776/* I/O callback for fd-based connections. It calls the read/write handlers
777 * provided by the connection's sock_ops, which must be valid.
778 */
779void sock_conn_iocb(int fd)
780{
781 struct connection *conn = fdtab[fd].owner;
782 unsigned int flags;
783 int need_wake = 0;
784
785 if (unlikely(!conn)) {
786 activity[tid].conn_dead++;
787 return;
788 }
789
790 flags = conn->flags & ~CO_FL_ERROR; /* ensure to call the wake handler upon error */
791
792 if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) &&
793 ((fd_send_ready(fd) && fd_send_active(fd)) ||
794 (fd_recv_ready(fd) && fd_recv_active(fd)))) {
795 /* Still waiting for a connection to establish and nothing was
796 * attempted yet to probe the connection. this will clear the
797 * CO_FL_WAIT_L4_CONN flag on success.
798 */
799 if (!sock_conn_check(conn))
800 goto leave;
801 need_wake = 1;
802 }
803
804 if (fd_send_ready(fd) && fd_send_active(fd)) {
805 /* force reporting of activity by clearing the previous flags :
806 * we'll have at least ERROR or CONNECTED at the end of an I/O,
807 * both of which will be detected below.
808 */
809 flags = 0;
810 if (conn->subs && conn->subs->events & SUB_RETRY_SEND) {
811 need_wake = 0; // wake will be called after this I/O
812 tasklet_wakeup(conn->subs->tasklet);
813 conn->subs->events &= ~SUB_RETRY_SEND;
814 if (!conn->subs->events)
815 conn->subs = NULL;
816 }
817 fd_stop_send(fd);
818 }
819
820 /* The data transfer starts here and stops on error and handshakes. Note
821 * that we must absolutely test conn->xprt at each step in case it suddenly
822 * changes due to a quick unexpected close().
823 */
824 if (fd_recv_ready(fd) && fd_recv_active(fd)) {
825 /* force reporting of activity by clearing the previous flags :
826 * we'll have at least ERROR or CONNECTED at the end of an I/O,
827 * both of which will be detected below.
828 */
829 flags = 0;
830 if (conn->subs && conn->subs->events & SUB_RETRY_RECV) {
831 need_wake = 0; // wake will be called after this I/O
832 tasklet_wakeup(conn->subs->tasklet);
833 conn->subs->events &= ~SUB_RETRY_RECV;
834 if (!conn->subs->events)
835 conn->subs = NULL;
836 }
837 fd_stop_recv(fd);
838 }
839
840 leave:
841 /* we may have to finish to install a mux or to wake it up based on
842 * what was just done above. It may kill the connection so we have to
843 * be prpared not to use it anymore.
844 */
845 if (conn_notify_mux(conn, flags, need_wake) < 0)
846 return;
847
848 /* commit polling changes in case of error.
849 * WT: it seems that the last case where this could still be relevant
850 * is if a mux wake function above report a connection error but does
851 * not stop polling. Shouldn't we enforce this into the mux instead of
852 * having to deal with this ?
853 */
854 if (unlikely(conn->flags & CO_FL_ERROR)) {
855 if (conn_ctrl_ready(conn))
856 fd_stop_both(fd);
Willy Tarreau51a6e932023-11-14 07:55:04 +0100857
858 if (conn->subs) {
859 tasklet_wakeup(conn->subs->tasklet);
860 if (!conn->subs->events)
861 conn->subs = NULL;
862 }
Willy Tarreau586f71b2020-12-11 15:54:36 +0100863 }
864}
865
Willy Tarreau427c8462020-12-11 16:19:12 +0100866/* Drains possibly pending incoming data on the file descriptor attached to the
867 * connection. This is used to know whether we need to disable lingering on
868 * close. Returns non-zero if it is safe to close without disabling lingering,
869 * otherwise zero.
870 */
871int sock_drain(struct connection *conn)
872{
873 int turns = 2;
874 int fd = conn->handle.fd;
875 int len;
876
Willy Tarreauf5090652021-04-06 17:23:40 +0200877 if (fdtab[fd].state & (FD_POLL_ERR|FD_POLL_HUP))
Willy Tarreau427c8462020-12-11 16:19:12 +0100878 goto shut;
879
Willy Tarreaub17952c2021-10-21 21:31:42 +0200880 if (!(conn->flags & CO_FL_WANT_DRAIN) && !fd_recv_ready(fd))
Willy Tarreau427c8462020-12-11 16:19:12 +0100881 return 0;
882
883 /* no drain function defined, use the generic one */
884
885 while (turns) {
886#ifdef MSG_TRUNC_CLEARS_INPUT
887 len = recv(fd, NULL, INT_MAX, MSG_DONTWAIT | MSG_NOSIGNAL | MSG_TRUNC);
888 if (len == -1 && errno == EFAULT)
889#endif
890 len = recv(fd, trash.area, trash.size, MSG_DONTWAIT | MSG_NOSIGNAL);
891
892 if (len == 0)
893 goto shut;
894
895 if (len < 0) {
896 if (errno == EAGAIN) {
897 /* connection not closed yet */
898 fd_cant_recv(fd);
899 break;
900 }
901 if (errno == EINTR) /* oops, try again */
902 continue;
903 /* other errors indicate a dead connection, fine. */
904 goto shut;
905 }
906 /* OK we read some data, let's try again once */
907 turns--;
908 }
909
910 /* some data are still present, give up */
911 return 0;
912
913 shut:
914 /* we're certain the connection was shut down */
Willy Tarreaub41a6e92021-04-06 17:49:19 +0200915 HA_ATOMIC_AND(&fdtab[fd].state, ~FD_LINGER_RISK);
Willy Tarreau427c8462020-12-11 16:19:12 +0100916 return 1;
917}
918
Willy Tarreau472125b2020-12-11 17:02:50 +0100919/* Checks the connection's FD for readiness of events <event_type>, which may
920 * only be a combination of SUB_RETRY_RECV and SUB_RETRY_SEND. Those which are
921 * ready are returned. The ones that are not ready are enabled. The caller is
922 * expected to do what is needed to handle ready events and to deal with
923 * subsequent wakeups caused by the requested events' readiness.
924 */
925int sock_check_events(struct connection *conn, int event_type)
926{
927 int ret = 0;
928
929 if (event_type & SUB_RETRY_RECV) {
930 if (fd_recv_ready(conn->handle.fd))
931 ret |= SUB_RETRY_RECV;
932 else
933 fd_want_recv(conn->handle.fd);
934 }
935
936 if (event_type & SUB_RETRY_SEND) {
937 if (fd_send_ready(conn->handle.fd))
938 ret |= SUB_RETRY_SEND;
939 else
940 fd_want_send(conn->handle.fd);
941 }
942
943 return ret;
944}
945
946/* Ignore readiness events from connection's FD for events of types <event_type>
947 * which may only be a combination of SUB_RETRY_RECV and SUB_RETRY_SEND.
948 */
949void sock_ignore_events(struct connection *conn, int event_type)
950{
951 if (event_type & SUB_RETRY_RECV)
952 fd_stop_recv(conn->handle.fd);
953
954 if (event_type & SUB_RETRY_SEND)
955 fd_stop_send(conn->handle.fd);
956}
957
Willy Tarreau18b7df72020-08-28 12:07:22 +0200958/*
959 * Local variables:
960 * c-indent-level: 8
961 * c-basic-offset: 8
962 * End:
963 */