blob: fd9278383fb5f9a6a53ffcb98e918a1e56a696c1 [file] [log] [blame]
Willy Tarreau18b7df72020-08-28 12:07:22 +02001/*
2 * Generic code for native (BSD-compatible) sockets
3 *
4 * Copyright 2000-2020 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020013#define _GNU_SOURCE
Willy Tarreau18b7df72020-08-28 12:07:22 +020014#include <ctype.h>
15#include <errno.h>
Willy Tarreau18b7df72020-08-28 12:07:22 +020016#include <stdio.h>
17#include <stdlib.h>
18#include <string.h>
19#include <time.h>
20
21#include <sys/param.h>
22#include <sys/socket.h>
23#include <sys/types.h>
24
Willy Tarreau42961742020-08-28 18:42:45 +020025#include <net/if.h>
26
Willy Tarreau18b7df72020-08-28 12:07:22 +020027#include <haproxy/api.h>
Willy Tarreau5d9ddc52021-10-06 19:54:09 +020028#include <haproxy/activity.h>
Willy Tarreau18b7df72020-08-28 12:07:22 +020029#include <haproxy/connection.h>
Willy Tarreaua74cb382020-10-15 21:29:49 +020030#include <haproxy/listener.h>
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020031#include <haproxy/log.h>
Willy Tarreau18b7df72020-08-28 12:07:22 +020032#include <haproxy/namespace.h>
William Lallemand2be557f2021-11-24 18:45:37 +010033#include <haproxy/proto_sockpair.h>
Willy Tarreau18b7df72020-08-28 12:07:22 +020034#include <haproxy/sock.h>
Willy Tarreau2d34a712020-08-28 16:49:41 +020035#include <haproxy/sock_inet.h>
Willy Tarreau18b7df72020-08-28 12:07:22 +020036#include <haproxy/tools.h>
37
Willy Tarreaub5101162022-01-28 18:28:18 +010038#define SOCK_XFER_OPT_FOREIGN 0x000000001
39#define SOCK_XFER_OPT_V6ONLY 0x000000002
40#define SOCK_XFER_OPT_DGRAM 0x000000004
41
Willy Tarreau063d47d2020-08-28 16:29:53 +020042/* the list of remaining sockets transferred from an older process */
Willy Tarreaub5101162022-01-28 18:28:18 +010043struct xfer_sock_list {
44 int fd;
45 int options; /* socket options as SOCK_XFER_OPT_* */
46 char *iface;
47 char *namespace;
48 int if_namelen;
49 int ns_namelen;
50 struct xfer_sock_list *prev;
51 struct xfer_sock_list *next;
52 struct sockaddr_storage addr;
53};
54
55static struct xfer_sock_list *xfer_sock_list;
Willy Tarreau18b7df72020-08-28 12:07:22 +020056
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020057
58/* Accept an incoming connection from listener <l>, and return it, as well as
59 * a CO_AC_* status code into <status> if not null. Null is returned on error.
60 * <l> must be a valid listener with a valid frontend.
61 */
62struct connection *sock_accept_conn(struct listener *l, int *status)
63{
64#ifdef USE_ACCEPT4
65 static int accept4_broken;
66#endif
67 struct proxy *p = l->bind_conf->frontend;
Willy Tarreau344b8fc2020-10-15 09:43:31 +020068 struct connection *conn = NULL;
69 struct sockaddr_storage *addr = NULL;
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020070 socklen_t laddr;
71 int ret;
72 int cfd;
73
Willy Tarreau344b8fc2020-10-15 09:43:31 +020074 if (!sockaddr_alloc(&addr, NULL, 0))
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020075 goto fail_addr;
76
77 /* accept() will mark all accepted FDs O_NONBLOCK and the ones accepted
78 * in the master process as FD_CLOEXEC. It's not done for workers
79 * because 1) workers are not supposed to execute anything so there's
80 * no reason for uselessly slowing down everything, and 2) that would
81 * prevent us from implementing fd passing in the future.
82 */
83#ifdef USE_ACCEPT4
84 laddr = sizeof(*conn->src);
85
86 /* only call accept4() if it's known to be safe, otherwise fallback to
87 * the legacy accept() + fcntl().
88 */
89 if (unlikely(accept4_broken) ||
Willy Tarreau344b8fc2020-10-15 09:43:31 +020090 (((cfd = accept4(l->rx.fd, (struct sockaddr*)addr, &laddr,
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020091 SOCK_NONBLOCK | (master ? SOCK_CLOEXEC : 0))) == -1) &&
92 (errno == ENOSYS || errno == EINVAL || errno == EBADF) &&
Tim Duesterhusf897fc92021-11-20 14:39:47 +010093 ((accept4_broken = 1))))
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020094#endif
95 {
96 laddr = sizeof(*conn->src);
Willy Tarreau344b8fc2020-10-15 09:43:31 +020097 if ((cfd = accept(l->rx.fd, (struct sockaddr*)addr, &laddr)) != -1) {
Willy Tarreau38247432022-04-26 10:24:14 +020098 fd_set_nonblock(cfd);
Willy Tarreauf1dc9f22020-10-15 09:21:31 +020099 if (master)
Willy Tarreau38247432022-04-26 10:24:14 +0200100 fd_set_cloexec(cfd);
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200101 }
102 }
103
104 if (likely(cfd != -1)) {
Willy Tarreaue4d09ce2022-04-11 15:01:37 +0200105 if (unlikely(cfd >= global.maxsock)) {
Willy Tarreaue4d09ce2022-04-11 15:01:37 +0200106 send_log(p, LOG_EMERG,
107 "Proxy %s reached the configured maximum connection limit. Please check the global 'maxconn' value.\n",
108 p->id);
109 goto fail_conn;
110 }
111
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200112 /* Perfect, the connection was accepted */
Willy Tarreau344b8fc2020-10-15 09:43:31 +0200113 conn = conn_new(&l->obj_type);
114 if (!conn)
115 goto fail_conn;
116
117 conn->src = addr;
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200118 conn->handle.fd = cfd;
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200119 ret = CO_AC_DONE;
120 goto done;
121 }
122
123 /* error conditions below */
Willy Tarreau344b8fc2020-10-15 09:43:31 +0200124 sockaddr_free(&addr);
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200125
126 switch (errno) {
Willy Tarreauacef5e22022-04-25 20:32:15 +0200127#if defined(EWOULDBLOCK) && defined(EAGAIN) && EWOULDBLOCK != EAGAIN
128 case EWOULDBLOCK:
129#endif
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200130 case EAGAIN:
131 ret = CO_AC_DONE; /* nothing more to accept */
Willy Tarreauf5090652021-04-06 17:23:40 +0200132 if (fdtab[l->rx.fd].state & (FD_POLL_HUP|FD_POLL_ERR)) {
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200133 /* the listening socket might have been disabled in a shared
134 * process and we're a collateral victim. We'll just pause for
135 * a while in case it comes back. In the mean time, we need to
136 * clear this sticky flag.
137 */
Willy Tarreauf5090652021-04-06 17:23:40 +0200138 _HA_ATOMIC_AND(&fdtab[l->rx.fd].state, ~(FD_POLL_HUP|FD_POLL_ERR));
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200139 ret = CO_AC_PAUSE;
140 }
141 fd_cant_recv(l->rx.fd);
142 break;
143
144 case EINVAL:
145 /* might be trying to accept on a shut fd (eg: soft stop) */
146 ret = CO_AC_PAUSE;
147 break;
148
149 case EINTR:
150 case ECONNABORTED:
151 ret = CO_AC_RETRY;
152 break;
153
154 case ENFILE:
155 if (p)
156 send_log(p, LOG_EMERG,
157 "Proxy %s reached system FD limit (maxsock=%d). Please check system tunables.\n",
158 p->id, global.maxsock);
159 ret = CO_AC_PAUSE;
160 break;
161
162 case EMFILE:
163 if (p)
164 send_log(p, LOG_EMERG,
165 "Proxy %s reached process FD limit (maxsock=%d). Please check 'ulimit-n' and restart.\n",
166 p->id, global.maxsock);
167 ret = CO_AC_PAUSE;
168 break;
169
170 case ENOBUFS:
171 case ENOMEM:
172 if (p)
173 send_log(p, LOG_EMERG,
174 "Proxy %s reached system memory limit (maxsock=%d). Please check system tunables.\n",
175 p->id, global.maxsock);
176 ret = CO_AC_PAUSE;
177 break;
178
179 default:
180 /* unexpected result, let's give up and let other tasks run */
181 ret = CO_AC_YIELD;
182 }
183 done:
184 if (status)
185 *status = ret;
186 return conn;
187
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200188 fail_conn:
Willy Tarreau344b8fc2020-10-15 09:43:31 +0200189 sockaddr_free(&addr);
Remi Tricot-Le Breton25dd0ad2021-01-14 15:26:24 +0100190 /* The accept call already succeeded by the time we try to allocate the connection,
191 * we need to close it in case of failure. */
192 close(cfd);
Willy Tarreau344b8fc2020-10-15 09:43:31 +0200193 fail_addr:
Willy Tarreauf1dc9f22020-10-15 09:21:31 +0200194 ret = CO_AC_PAUSE;
195 goto done;
196}
197
Willy Tarreau18b7df72020-08-28 12:07:22 +0200198/* Create a socket to connect to the server in conn->dst (which MUST be valid),
199 * using the configured namespace if needed, or the one passed by the proxy
200 * protocol if required to do so. It ultimately calls socket() or socketat()
201 * and returns the FD or error code.
202 */
203int sock_create_server_socket(struct connection *conn)
204{
205 const struct netns_entry *ns = NULL;
206
207#ifdef USE_NS
208 if (objt_server(conn->target)) {
209 if (__objt_server(conn->target)->flags & SRV_F_USE_NS_FROM_PP)
210 ns = conn->proxy_netns;
211 else
212 ns = __objt_server(conn->target)->netns;
213 }
214#endif
215 return my_socketat(ns, conn->dst->ss_family, SOCK_STREAM, 0);
216}
217
Willy Tarreaua4380b22020-11-04 13:59:04 +0100218/* Enables receiving on receiver <rx> once already bound. */
Willy Tarreaue70c7972020-09-25 19:00:01 +0200219void sock_enable(struct receiver *rx)
220{
Willy Tarreaua4380b22020-11-04 13:59:04 +0100221 if (rx->flags & RX_F_BOUND)
222 fd_want_recv_safe(rx->fd);
Willy Tarreaue70c7972020-09-25 19:00:01 +0200223}
224
Willy Tarreaua4380b22020-11-04 13:59:04 +0100225/* Disables receiving on receiver <rx> once already bound. */
Willy Tarreaue70c7972020-09-25 19:00:01 +0200226void sock_disable(struct receiver *rx)
227{
Willy Tarreaua4380b22020-11-04 13:59:04 +0100228 if (rx->flags & RX_F_BOUND)
Willy Tarreaue70c7972020-09-25 19:00:01 +0200229 fd_stop_recv(rx->fd);
230}
231
Willy Tarreauf58b8db2020-10-09 16:32:08 +0200232/* stops, unbinds and possibly closes the FD associated with receiver rx */
233void sock_unbind(struct receiver *rx)
234{
235 /* There are a number of situations where we prefer to keep the FD and
236 * not to close it (unless we're stopping, of course):
237 * - worker process unbinding from a worker's FD with socket transfer enabled => keep
238 * - master process unbinding from a master's inherited FD => keep
239 * - master process unbinding from a master's FD => close
Willy Tarreau22ccd5e2020-11-03 18:38:05 +0100240 * - master process unbinding from a worker's inherited FD => keep
Willy Tarreauf58b8db2020-10-09 16:32:08 +0200241 * - master process unbinding from a worker's FD => close
242 * - worker process unbinding from a master's FD => close
243 * - worker process unbinding from a worker's FD => close
244 */
245 if (rx->flags & RX_F_BOUND)
246 rx->proto->rx_disable(rx);
247
248 if (!stopping && !master &&
249 !(rx->flags & RX_F_MWORKER) &&
250 (global.tune.options & GTUNE_SOCKET_TRANSFER))
251 return;
252
253 if (!stopping && master &&
Willy Tarreauf58b8db2020-10-09 16:32:08 +0200254 rx->flags & RX_F_INHERITED)
255 return;
256
257 rx->flags &= ~RX_F_BOUND;
258 if (rx->fd != -1)
259 fd_delete(rx->fd);
260 rx->fd = -1;
261}
262
Willy Tarreau18b7df72020-08-28 12:07:22 +0200263/*
264 * Retrieves the source address for the socket <fd>, with <dir> indicating
265 * if we're a listener (=0) or an initiator (!=0). It returns 0 in case of
266 * success, -1 in case of error. The socket's source address is stored in
267 * <sa> for <salen> bytes.
268 */
269int sock_get_src(int fd, struct sockaddr *sa, socklen_t salen, int dir)
270{
271 if (dir)
272 return getsockname(fd, sa, &salen);
273 else
274 return getpeername(fd, sa, &salen);
275}
276
277/*
278 * Retrieves the original destination address for the socket <fd>, with <dir>
279 * indicating if we're a listener (=0) or an initiator (!=0). It returns 0 in
280 * case of success, -1 in case of error. The socket's source address is stored
281 * in <sa> for <salen> bytes.
282 */
283int sock_get_dst(int fd, struct sockaddr *sa, socklen_t salen, int dir)
284{
285 if (dir)
286 return getpeername(fd, sa, &salen);
287 else
288 return getsockname(fd, sa, &salen);
289}
290
Willy Tarreau42961742020-08-28 18:42:45 +0200291/* Try to retrieve exported sockets from worker at CLI <unixsocket>. These
292 * ones will be placed into the xfer_sock_list for later use by function
293 * sock_find_compatible_fd(). Returns 0 on success, -1 on failure.
294 */
295int sock_get_old_sockets(const char *unixsocket)
296{
297 char *cmsgbuf = NULL, *tmpbuf = NULL;
298 int *tmpfd = NULL;
299 struct sockaddr_un addr;
300 struct cmsghdr *cmsg;
301 struct msghdr msghdr;
302 struct iovec iov;
303 struct xfer_sock_list *xfer_sock = NULL;
304 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
305 int sock = -1;
306 int ret = -1;
307 int ret2 = -1;
308 int fd_nb;
309 int got_fd = 0;
310 int cur_fd = 0;
311 size_t maxoff = 0, curoff = 0;
312
William Lallemand2be557f2021-11-24 18:45:37 +0100313 if (strncmp("sockpair@", unixsocket, strlen("sockpair@")) == 0) {
314 /* sockpair for master-worker usage */
315 int sv[2];
316 int dst_fd;
317
318 dst_fd = strtoll(unixsocket + strlen("sockpair@"), NULL, 0);
319
320 if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
321 ha_warning("socketpair(): Cannot create socketpair. Giving up.\n");
322 }
323
324 if (send_fd_uxst(dst_fd, sv[0]) == -1) {
William Lallemand708949d2022-07-25 16:04:38 +0200325 ha_alert("socketpair: Cannot transfer the fd %d over sockpair@%d. Giving up.\n", sv[0], dst_fd);
William Lallemand2be557f2021-11-24 18:45:37 +0100326 close(sv[0]);
327 close(sv[1]);
328 goto out;
329 }
330
331 close(sv[0]); /* we don't need this side anymore */
332 sock = sv[1];
333
334 } else {
335 /* Unix socket */
336
337 sock = socket(PF_UNIX, SOCK_STREAM, 0);
338 if (sock < 0) {
339 ha_warning("Failed to connect to the old process socket '%s'\n", unixsocket);
340 goto out;
341 }
342
343 strncpy(addr.sun_path, unixsocket, sizeof(addr.sun_path) - 1);
344 addr.sun_path[sizeof(addr.sun_path) - 1] = 0;
345 addr.sun_family = PF_UNIX;
346
347 ret = connect(sock, (struct sockaddr *)&addr, sizeof(addr));
348 if (ret < 0) {
349 ha_warning("Failed to connect to the old process socket '%s'\n", unixsocket);
350 goto out;
351 }
352
353 }
Willy Tarreau42961742020-08-28 18:42:45 +0200354 memset(&msghdr, 0, sizeof(msghdr));
355 cmsgbuf = malloc(CMSG_SPACE(sizeof(int)) * MAX_SEND_FD);
356 if (!cmsgbuf) {
357 ha_warning("Failed to allocate memory to send sockets\n");
358 goto out;
359 }
360
Willy Tarreau42961742020-08-28 18:42:45 +0200361 setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (void *)&tv, sizeof(tv));
362 iov.iov_base = &fd_nb;
363 iov.iov_len = sizeof(fd_nb);
364 msghdr.msg_iov = &iov;
365 msghdr.msg_iovlen = 1;
366
367 if (send(sock, "_getsocks\n", strlen("_getsocks\n"), 0) != strlen("_getsocks\n")) {
368 ha_warning("Failed to get the number of sockets to be transferred !\n");
369 goto out;
370 }
371
372 /* First, get the number of file descriptors to be received */
373 if (recvmsg(sock, &msghdr, MSG_WAITALL) != sizeof(fd_nb)) {
374 ha_warning("Failed to get the number of sockets to be transferred !\n");
375 goto out;
376 }
377
378 if (fd_nb == 0) {
379 ret2 = 0;
380 goto out;
381 }
382
383 tmpbuf = malloc(fd_nb * (1 + MAXPATHLEN + 1 + IFNAMSIZ + sizeof(int)));
384 if (tmpbuf == NULL) {
385 ha_warning("Failed to allocate memory while receiving sockets\n");
386 goto out;
387 }
388
389 tmpfd = malloc(fd_nb * sizeof(int));
390 if (tmpfd == NULL) {
391 ha_warning("Failed to allocate memory while receiving sockets\n");
392 goto out;
393 }
394
395 msghdr.msg_control = cmsgbuf;
396 msghdr.msg_controllen = CMSG_SPACE(sizeof(int)) * MAX_SEND_FD;
397 iov.iov_len = MAX_SEND_FD * (1 + MAXPATHLEN + 1 + IFNAMSIZ + sizeof(int));
398
399 do {
400 int ret3;
401
402 iov.iov_base = tmpbuf + curoff;
403
404 ret = recvmsg(sock, &msghdr, 0);
405
406 if (ret == -1 && errno == EINTR)
407 continue;
408
409 if (ret <= 0)
410 break;
411
412 /* Send an ack to let the sender know we got the sockets
413 * and it can send some more
414 */
415 do {
416 ret3 = send(sock, &got_fd, sizeof(got_fd), 0);
417 } while (ret3 == -1 && errno == EINTR);
418
419 for (cmsg = CMSG_FIRSTHDR(&msghdr); cmsg != NULL; cmsg = CMSG_NXTHDR(&msghdr, cmsg)) {
420 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
421 size_t totlen = cmsg->cmsg_len - CMSG_LEN(0);
422
423 if (totlen / sizeof(int) + got_fd > fd_nb) {
424 ha_warning("Got to many sockets !\n");
425 goto out;
426 }
427
428 /*
429 * Be paranoid and use memcpy() to avoid any
430 * potential alignment issue.
431 */
432 memcpy(&tmpfd[got_fd], CMSG_DATA(cmsg), totlen);
433 got_fd += totlen / sizeof(int);
434 }
435 }
436 curoff += ret;
437 } while (got_fd < fd_nb);
438
439 if (got_fd != fd_nb) {
440 ha_warning("We didn't get the expected number of sockets (expecting %d got %d)\n",
441 fd_nb, got_fd);
442 goto out;
443 }
444
445 maxoff = curoff;
446 curoff = 0;
447
448 for (cur_fd = 0; cur_fd < got_fd; cur_fd++) {
449 int fd = tmpfd[cur_fd];
450 socklen_t socklen;
451 int val;
452 int len;
453
454 xfer_sock = calloc(1, sizeof(*xfer_sock));
455 if (!xfer_sock) {
456 ha_warning("Failed to allocate memory in get_old_sockets() !\n");
457 break;
458 }
459 xfer_sock->fd = -1;
460
461 socklen = sizeof(xfer_sock->addr);
462 if (getsockname(fd, (struct sockaddr *)&xfer_sock->addr, &socklen) != 0) {
463 ha_warning("Failed to get socket address\n");
Willy Tarreau61cfdf42021-02-20 10:46:51 +0100464 ha_free(&xfer_sock);
Willy Tarreau42961742020-08-28 18:42:45 +0200465 continue;
466 }
467
468 if (curoff >= maxoff) {
469 ha_warning("Inconsistency while transferring sockets\n");
470 goto out;
471 }
472
473 len = tmpbuf[curoff++];
474 if (len > 0) {
475 /* We have a namespace */
476 if (curoff + len > maxoff) {
477 ha_warning("Inconsistency while transferring sockets\n");
478 goto out;
479 }
480 xfer_sock->namespace = malloc(len + 1);
481 if (!xfer_sock->namespace) {
482 ha_warning("Failed to allocate memory while transferring sockets\n");
483 goto out;
484 }
485 memcpy(xfer_sock->namespace, &tmpbuf[curoff], len);
486 xfer_sock->namespace[len] = 0;
487 xfer_sock->ns_namelen = len;
488 curoff += len;
489 }
490
491 if (curoff >= maxoff) {
492 ha_warning("Inconsistency while transferring sockets\n");
493 goto out;
494 }
495
496 len = tmpbuf[curoff++];
497 if (len > 0) {
498 /* We have an interface */
499 if (curoff + len > maxoff) {
500 ha_warning("Inconsistency while transferring sockets\n");
501 goto out;
502 }
503 xfer_sock->iface = malloc(len + 1);
504 if (!xfer_sock->iface) {
505 ha_warning("Failed to allocate memory while transferring sockets\n");
506 goto out;
507 }
508 memcpy(xfer_sock->iface, &tmpbuf[curoff], len);
509 xfer_sock->iface[len] = 0;
510 xfer_sock->if_namelen = len;
511 curoff += len;
512 }
513
514 if (curoff + sizeof(int) > maxoff) {
515 ha_warning("Inconsistency while transferring sockets\n");
516 goto out;
517 }
518
519 /* we used to have 32 bits of listener options here but we don't
520 * use them anymore.
521 */
522 curoff += sizeof(int);
523
524 /* determine the foreign status directly from the socket itself */
525 if (sock_inet_is_foreign(fd, xfer_sock->addr.ss_family))
Willy Tarreaua2c17872020-08-28 19:09:19 +0200526 xfer_sock->options |= SOCK_XFER_OPT_FOREIGN;
Willy Tarreau42961742020-08-28 18:42:45 +0200527
Willy Tarreau9dbb6c42020-08-28 19:20:23 +0200528 socklen = sizeof(val);
529 if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &val, &socklen) == 0 && val == SOCK_DGRAM)
530 xfer_sock->options |= SOCK_XFER_OPT_DGRAM;
531
Willy Tarreau42961742020-08-28 18:42:45 +0200532#if defined(IPV6_V6ONLY)
533 /* keep only the v6only flag depending on what's currently
534 * active on the socket, and always drop the v4v6 one.
535 */
536 socklen = sizeof(val);
537 if (xfer_sock->addr.ss_family == AF_INET6 &&
538 getsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &val, &socklen) == 0 && val > 0)
Willy Tarreaua2c17872020-08-28 19:09:19 +0200539 xfer_sock->options |= SOCK_XFER_OPT_V6ONLY;
Willy Tarreau42961742020-08-28 18:42:45 +0200540#endif
541
542 xfer_sock->fd = fd;
543 if (xfer_sock_list)
544 xfer_sock_list->prev = xfer_sock;
545 xfer_sock->next = xfer_sock_list;
546 xfer_sock->prev = NULL;
547 xfer_sock_list = xfer_sock;
548 xfer_sock = NULL;
549 }
550
551 ret2 = 0;
552out:
553 /* If we failed midway make sure to close the remaining
554 * file descriptors
555 */
556 if (tmpfd != NULL && cur_fd < got_fd) {
557 for (; cur_fd < got_fd; cur_fd++) {
558 close(tmpfd[cur_fd]);
559 }
560 }
561
562 free(tmpbuf);
563 free(tmpfd);
564 free(cmsgbuf);
565
566 if (sock != -1)
567 close(sock);
568
569 if (xfer_sock) {
570 free(xfer_sock->namespace);
571 free(xfer_sock->iface);
572 if (xfer_sock->fd != -1)
573 close(xfer_sock->fd);
574 free(xfer_sock);
575 }
576 return (ret2);
577}
578
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200579/* When binding the receivers, check if a socket has been sent to us by the
Willy Tarreau2d34a712020-08-28 16:49:41 +0200580 * previous process that we could reuse, instead of creating a new one. Note
581 * that some address family-specific options are checked on the listener and
582 * on the socket. Typically for AF_INET and AF_INET6, we check for transparent
583 * mode, and for AF_INET6 we also check for "v4v6" or "v6only". The reused
584 * socket is automatically removed from the list so that it's not proposed
585 * anymore.
586 */
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200587int sock_find_compatible_fd(const struct receiver *rx)
Willy Tarreau2d34a712020-08-28 16:49:41 +0200588{
589 struct xfer_sock_list *xfer_sock = xfer_sock_list;
Willy Tarreaua2c17872020-08-28 19:09:19 +0200590 int options = 0;
Willy Tarreau2d34a712020-08-28 16:49:41 +0200591 int if_namelen = 0;
592 int ns_namelen = 0;
593 int ret = -1;
594
Willy Tarreauf1f66092020-09-04 08:15:31 +0200595 if (!rx->proto->fam->addrcmp)
Willy Tarreau2d34a712020-08-28 16:49:41 +0200596 return -1;
597
Willy Tarreaue3b45182021-10-27 17:28:55 +0200598 if (rx->proto->proto_type == PROTO_TYPE_DGRAM)
Willy Tarreau9dbb6c42020-08-28 19:20:23 +0200599 options |= SOCK_XFER_OPT_DGRAM;
600
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200601 if (rx->settings->options & RX_O_FOREIGN)
Willy Tarreaua2c17872020-08-28 19:09:19 +0200602 options |= SOCK_XFER_OPT_FOREIGN;
603
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200604 if (rx->addr.ss_family == AF_INET6) {
Willy Tarreau2d34a712020-08-28 16:49:41 +0200605 /* Prepare to match the v6only option against what we really want. Note
606 * that sadly the two options are not exclusive to each other and that
607 * v6only is stronger than v4v6.
608 */
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200609 if ((rx->settings->options & RX_O_V6ONLY) ||
610 (sock_inet6_v6only_default && !(rx->settings->options & RX_O_V4V6)))
Willy Tarreaua2c17872020-08-28 19:09:19 +0200611 options |= SOCK_XFER_OPT_V6ONLY;
Willy Tarreau2d34a712020-08-28 16:49:41 +0200612 }
Willy Tarreau2d34a712020-08-28 16:49:41 +0200613
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200614 if (rx->settings->interface)
615 if_namelen = strlen(rx->settings->interface);
Willy Tarreau2d34a712020-08-28 16:49:41 +0200616#ifdef USE_NS
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200617 if (rx->settings->netns)
618 ns_namelen = rx->settings->netns->name_len;
Willy Tarreau2d34a712020-08-28 16:49:41 +0200619#endif
620
621 while (xfer_sock) {
Willy Tarreaua2c17872020-08-28 19:09:19 +0200622 if ((options == xfer_sock->options) &&
Willy Tarreau2d34a712020-08-28 16:49:41 +0200623 (if_namelen == xfer_sock->if_namelen) &&
624 (ns_namelen == xfer_sock->ns_namelen) &&
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200625 (!if_namelen || strcmp(rx->settings->interface, xfer_sock->iface) == 0) &&
Willy Tarreau2d34a712020-08-28 16:49:41 +0200626#ifdef USE_NS
Willy Tarreauc049c0d2020-09-01 15:20:52 +0200627 (!ns_namelen || strcmp(rx->settings->netns->node.key, xfer_sock->namespace) == 0) &&
Willy Tarreau2d34a712020-08-28 16:49:41 +0200628#endif
Willy Tarreauf1f66092020-09-04 08:15:31 +0200629 rx->proto->fam->addrcmp(&xfer_sock->addr, &rx->addr) == 0)
Willy Tarreau2d34a712020-08-28 16:49:41 +0200630 break;
631 xfer_sock = xfer_sock->next;
632 }
633
634 if (xfer_sock != NULL) {
635 ret = xfer_sock->fd;
636 if (xfer_sock == xfer_sock_list)
637 xfer_sock_list = xfer_sock->next;
638 if (xfer_sock->prev)
639 xfer_sock->prev->next = xfer_sock->next;
640 if (xfer_sock->next)
641 xfer_sock->next->prev = xfer_sock->prev;
642 free(xfer_sock->iface);
643 free(xfer_sock->namespace);
644 free(xfer_sock);
645 }
646 return ret;
647}
648
Willy Tarreaub5101162022-01-28 18:28:18 +0100649/* After all protocols are bound, there may remain some old sockets that have
650 * been removed between the previous config and the new one. These ones must
651 * be dropped, otherwise they will remain open and may prevent a service from
652 * restarting.
653 */
654void sock_drop_unused_old_sockets()
655{
656 while (xfer_sock_list != NULL) {
657 struct xfer_sock_list *tmpxfer = xfer_sock_list->next;
658
659 close(xfer_sock_list->fd);
660 free(xfer_sock_list->iface);
661 free(xfer_sock_list->namespace);
662 free(xfer_sock_list);
663 xfer_sock_list = tmpxfer;
664 }
665}
666
Willy Tarreau5ced3e82020-10-13 17:06:12 +0200667/* Tests if the receiver supports accepting connections. Returns positive on
668 * success, 0 if not possible, negative if the socket is non-recoverable. The
669 * rationale behind this is that inherited FDs may be broken and that shared
670 * FDs might have been paused by another process.
671 */
Willy Tarreau7d053e42020-10-15 09:19:43 +0200672int sock_accepting_conn(const struct receiver *rx)
Willy Tarreau5ced3e82020-10-13 17:06:12 +0200673{
674 int opt_val = 0;
675 socklen_t opt_len = sizeof(opt_val);
676
677 if (getsockopt(rx->fd, SOL_SOCKET, SO_ACCEPTCONN, &opt_val, &opt_len) == -1)
678 return -1;
679
680 return opt_val;
681}
682
Willy Tarreaua74cb382020-10-15 21:29:49 +0200683/* This is the FD handler IO callback for stream sockets configured for
684 * accepting incoming connections. It's a pass-through to listener_accept()
685 * which will iterate over the listener protocol's accept_conn() function.
686 * The FD's owner must be a listener.
687 */
688void sock_accept_iocb(int fd)
689{
690 struct listener *l = fdtab[fd].owner;
691
692 if (!l)
693 return;
694
Willy Tarreaub4daeeb2020-11-04 14:58:36 +0100695 BUG_ON(!!master != !!(l->rx.flags & RX_F_MWORKER));
Willy Tarreaua74cb382020-10-15 21:29:49 +0200696 listener_accept(l);
697}
698
Willy Tarreaude471c42020-12-08 15:50:56 +0100699/* This completes the initialization of connection <conn> by inserting its FD
700 * into the fdtab, associating it with the regular connection handler. It will
701 * be bound to the current thread only. This call cannot fail.
702 */
703void sock_conn_ctrl_init(struct connection *conn)
704{
Willy Tarreau07ecfc52022-04-11 18:07:03 +0200705 BUG_ON(conn->flags & CO_FL_FDLESS);
Willy Tarreau27a32452022-07-07 08:29:00 +0200706 fd_insert(conn->handle.fd, conn, sock_conn_iocb, tgid, ti->ltid_bit);
Willy Tarreaude471c42020-12-08 15:50:56 +0100707}
708
709/* This completes the release of connection <conn> by removing its FD from the
710 * fdtab and deleting it. The connection must not use the FD anymore past this
711 * point. The FD may be modified in the connection.
712 */
713void sock_conn_ctrl_close(struct connection *conn)
714{
Willy Tarreau07ecfc52022-04-11 18:07:03 +0200715 BUG_ON(conn->flags & CO_FL_FDLESS);
Willy Tarreaude471c42020-12-08 15:50:56 +0100716 fd_delete(conn->handle.fd);
717 conn->handle.fd = DEAD_FD_MAGIC;
718}
719
Willy Tarreau586f71b2020-12-11 15:54:36 +0100720/* This is the callback which is set when a connection establishment is pending
721 * and we have nothing to send. It may update the FD polling status to indicate
722 * !READY. It returns 0 if it fails in a fatal way or needs to poll to go
723 * further, otherwise it returns non-zero and removes the CO_FL_WAIT_L4_CONN
724 * flag from the connection's flags. In case of error, it sets CO_FL_ERROR and
725 * leaves the error code in errno.
726 */
727int sock_conn_check(struct connection *conn)
728{
729 struct sockaddr_storage *addr;
730 int fd = conn->handle.fd;
731
732 if (conn->flags & CO_FL_ERROR)
733 return 0;
734
735 if (!conn_ctrl_ready(conn))
736 return 0;
737
738 if (!(conn->flags & CO_FL_WAIT_L4_CONN))
739 return 1; /* strange we were called while ready */
740
Willy Tarreau07ecfc52022-04-11 18:07:03 +0200741 BUG_ON(conn->flags & CO_FL_FDLESS);
742
Willy Tarreau5a9c6372021-07-06 08:29:20 +0200743 if (!fd_send_ready(fd) && !(fdtab[fd].state & (FD_POLL_ERR|FD_POLL_HUP)))
Willy Tarreau586f71b2020-12-11 15:54:36 +0100744 return 0;
745
746 /* Here we have 2 cases :
747 * - modern pollers, able to report ERR/HUP. If these ones return any
748 * of these flags then it's likely a failure, otherwise it possibly
749 * is a success (i.e. there may have been data received just before
750 * the error was reported).
751 * - select, which doesn't report these and with which it's always
752 * necessary either to try connect() again or to check for SO_ERROR.
753 * In order to simplify everything, we double-check using connect() as
754 * soon as we meet either of these delicate situations. Note that
755 * SO_ERROR would clear the error after reporting it!
756 */
757 if (cur_poller.flags & HAP_POLL_F_ERRHUP) {
758 /* modern poller, able to report ERR/HUP */
Willy Tarreauf5090652021-04-06 17:23:40 +0200759 if ((fdtab[fd].state & (FD_POLL_IN|FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_IN)
Willy Tarreau586f71b2020-12-11 15:54:36 +0100760 goto done;
Willy Tarreauf5090652021-04-06 17:23:40 +0200761 if ((fdtab[fd].state & (FD_POLL_OUT|FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_OUT)
Willy Tarreau586f71b2020-12-11 15:54:36 +0100762 goto done;
Willy Tarreauf5090652021-04-06 17:23:40 +0200763 if (!(fdtab[fd].state & (FD_POLL_ERR|FD_POLL_HUP)))
Willy Tarreau586f71b2020-12-11 15:54:36 +0100764 goto wait;
765 /* error present, fall through common error check path */
766 }
767
768 /* Use connect() to check the state of the socket. This has the double
769 * advantage of *not* clearing the error (so that health checks can
770 * still use getsockopt(SO_ERROR)) and giving us the following info :
771 * - error
772 * - connecting (EALREADY, EINPROGRESS)
773 * - connected (EISCONN, 0)
774 */
775 addr = conn->dst;
776 if ((conn->flags & CO_FL_SOCKS4) && obj_type(conn->target) == OBJ_TYPE_SERVER)
777 addr = &objt_server(conn->target)->socks4_addr;
778
779 if (connect(fd, (const struct sockaddr *)addr, get_addr_len(addr)) == -1) {
780 if (errno == EALREADY || errno == EINPROGRESS)
781 goto wait;
782
783 if (errno && errno != EISCONN)
784 goto out_error;
785 }
786
787 done:
788 /* The FD is ready now, we'll mark the connection as complete and
789 * forward the event to the transport layer which will notify the
790 * data layer.
791 */
792 conn->flags &= ~CO_FL_WAIT_L4_CONN;
793 fd_may_send(fd);
794 fd_cond_recv(fd);
795 errno = 0; // make health checks happy
796 return 1;
797
798 out_error:
799 /* Write error on the file descriptor. Report it to the connection
800 * and disable polling on this FD.
801 */
Willy Tarreau586f71b2020-12-11 15:54:36 +0100802 conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
Willy Tarreaub41a6e92021-04-06 17:49:19 +0200803 HA_ATOMIC_AND(&fdtab[fd].state, ~FD_LINGER_RISK);
Willy Tarreau586f71b2020-12-11 15:54:36 +0100804 fd_stop_both(fd);
805 return 0;
806
807 wait:
808 fd_cant_send(fd);
809 fd_want_send(fd);
810 return 0;
811}
812
813/* I/O callback for fd-based connections. It calls the read/write handlers
814 * provided by the connection's sock_ops, which must be valid.
815 */
816void sock_conn_iocb(int fd)
817{
818 struct connection *conn = fdtab[fd].owner;
819 unsigned int flags;
820 int need_wake = 0;
Willy Tarreau9b773ec2023-03-02 15:07:51 +0100821 struct tasklet *t;
Willy Tarreau586f71b2020-12-11 15:54:36 +0100822
823 if (unlikely(!conn)) {
824 activity[tid].conn_dead++;
825 return;
826 }
827
828 flags = conn->flags & ~CO_FL_ERROR; /* ensure to call the wake handler upon error */
829
830 if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) &&
831 ((fd_send_ready(fd) && fd_send_active(fd)) ||
832 (fd_recv_ready(fd) && fd_recv_active(fd)))) {
833 /* Still waiting for a connection to establish and nothing was
834 * attempted yet to probe the connection. this will clear the
835 * CO_FL_WAIT_L4_CONN flag on success.
836 */
837 if (!sock_conn_check(conn))
838 goto leave;
839 need_wake = 1;
840 }
841
842 if (fd_send_ready(fd) && fd_send_active(fd)) {
843 /* force reporting of activity by clearing the previous flags :
844 * we'll have at least ERROR or CONNECTED at the end of an I/O,
845 * both of which will be detected below.
846 */
847 flags = 0;
848 if (conn->subs && conn->subs->events & SUB_RETRY_SEND) {
Willy Tarreau9b773ec2023-03-02 15:07:51 +0100849 t = conn->subs->tasklet;
Willy Tarreau586f71b2020-12-11 15:54:36 +0100850 need_wake = 0; // wake will be called after this I/O
Willy Tarreau586f71b2020-12-11 15:54:36 +0100851 conn->subs->events &= ~SUB_RETRY_SEND;
852 if (!conn->subs->events)
853 conn->subs = NULL;
Willy Tarreau9b773ec2023-03-02 15:07:51 +0100854 tasklet_wakeup(t);
Willy Tarreau586f71b2020-12-11 15:54:36 +0100855 }
856 fd_stop_send(fd);
857 }
858
859 /* The data transfer starts here and stops on error and handshakes. Note
860 * that we must absolutely test conn->xprt at each step in case it suddenly
861 * changes due to a quick unexpected close().
862 */
863 if (fd_recv_ready(fd) && fd_recv_active(fd)) {
864 /* force reporting of activity by clearing the previous flags :
865 * we'll have at least ERROR or CONNECTED at the end of an I/O,
866 * both of which will be detected below.
867 */
868 flags = 0;
869 if (conn->subs && conn->subs->events & SUB_RETRY_RECV) {
Willy Tarreau9b773ec2023-03-02 15:07:51 +0100870 t = conn->subs->tasklet;
Willy Tarreau586f71b2020-12-11 15:54:36 +0100871 need_wake = 0; // wake will be called after this I/O
Willy Tarreau586f71b2020-12-11 15:54:36 +0100872 conn->subs->events &= ~SUB_RETRY_RECV;
873 if (!conn->subs->events)
874 conn->subs = NULL;
Willy Tarreau9b773ec2023-03-02 15:07:51 +0100875 tasklet_wakeup(t);
Willy Tarreau586f71b2020-12-11 15:54:36 +0100876 }
877 fd_stop_recv(fd);
878 }
879
880 leave:
881 /* we may have to finish to install a mux or to wake it up based on
882 * what was just done above. It may kill the connection so we have to
883 * be prpared not to use it anymore.
884 */
885 if (conn_notify_mux(conn, flags, need_wake) < 0)
886 return;
887
888 /* commit polling changes in case of error.
889 * WT: it seems that the last case where this could still be relevant
890 * is if a mux wake function above report a connection error but does
891 * not stop polling. Shouldn't we enforce this into the mux instead of
892 * having to deal with this ?
893 */
894 if (unlikely(conn->flags & CO_FL_ERROR)) {
895 if (conn_ctrl_ready(conn))
896 fd_stop_both(fd);
897 }
898}
899
Willy Tarreau427c8462020-12-11 16:19:12 +0100900/* Drains possibly pending incoming data on the file descriptor attached to the
901 * connection. This is used to know whether we need to disable lingering on
902 * close. Returns non-zero if it is safe to close without disabling lingering,
903 * otherwise zero.
904 */
905int sock_drain(struct connection *conn)
906{
907 int turns = 2;
908 int fd = conn->handle.fd;
909 int len;
910
Willy Tarreau07ecfc52022-04-11 18:07:03 +0200911 BUG_ON(conn->flags & CO_FL_FDLESS);
912
Willy Tarreauf5090652021-04-06 17:23:40 +0200913 if (fdtab[fd].state & (FD_POLL_ERR|FD_POLL_HUP))
Willy Tarreau427c8462020-12-11 16:19:12 +0100914 goto shut;
915
Willy Tarreau20b622e2021-10-21 21:31:42 +0200916 if (!(conn->flags & CO_FL_WANT_DRAIN) && !fd_recv_ready(fd))
Willy Tarreau427c8462020-12-11 16:19:12 +0100917 return 0;
918
919 /* no drain function defined, use the generic one */
920
921 while (turns) {
922#ifdef MSG_TRUNC_CLEARS_INPUT
923 len = recv(fd, NULL, INT_MAX, MSG_DONTWAIT | MSG_NOSIGNAL | MSG_TRUNC);
924 if (len == -1 && errno == EFAULT)
925#endif
926 len = recv(fd, trash.area, trash.size, MSG_DONTWAIT | MSG_NOSIGNAL);
927
928 if (len == 0)
929 goto shut;
930
931 if (len < 0) {
Willy Tarreauacef5e22022-04-25 20:32:15 +0200932 if (errno == EAGAIN || errno == EWOULDBLOCK) {
Willy Tarreau427c8462020-12-11 16:19:12 +0100933 /* connection not closed yet */
934 fd_cant_recv(fd);
935 break;
936 }
937 if (errno == EINTR) /* oops, try again */
938 continue;
939 /* other errors indicate a dead connection, fine. */
940 goto shut;
941 }
942 /* OK we read some data, let's try again once */
943 turns--;
944 }
945
946 /* some data are still present, give up */
947 return 0;
948
949 shut:
950 /* we're certain the connection was shut down */
Willy Tarreaub41a6e92021-04-06 17:49:19 +0200951 HA_ATOMIC_AND(&fdtab[fd].state, ~FD_LINGER_RISK);
Willy Tarreau427c8462020-12-11 16:19:12 +0100952 return 1;
953}
954
Willy Tarreau472125b2020-12-11 17:02:50 +0100955/* Checks the connection's FD for readiness of events <event_type>, which may
956 * only be a combination of SUB_RETRY_RECV and SUB_RETRY_SEND. Those which are
957 * ready are returned. The ones that are not ready are enabled. The caller is
958 * expected to do what is needed to handle ready events and to deal with
959 * subsequent wakeups caused by the requested events' readiness.
960 */
961int sock_check_events(struct connection *conn, int event_type)
962{
963 int ret = 0;
964
Willy Tarreau07ecfc52022-04-11 18:07:03 +0200965 BUG_ON(conn->flags & CO_FL_FDLESS);
966
Willy Tarreau472125b2020-12-11 17:02:50 +0100967 if (event_type & SUB_RETRY_RECV) {
968 if (fd_recv_ready(conn->handle.fd))
969 ret |= SUB_RETRY_RECV;
970 else
971 fd_want_recv(conn->handle.fd);
972 }
973
974 if (event_type & SUB_RETRY_SEND) {
975 if (fd_send_ready(conn->handle.fd))
976 ret |= SUB_RETRY_SEND;
977 else
978 fd_want_send(conn->handle.fd);
979 }
980
981 return ret;
982}
983
984/* Ignore readiness events from connection's FD for events of types <event_type>
985 * which may only be a combination of SUB_RETRY_RECV and SUB_RETRY_SEND.
986 */
987void sock_ignore_events(struct connection *conn, int event_type)
988{
Willy Tarreau07ecfc52022-04-11 18:07:03 +0200989 BUG_ON(conn->flags & CO_FL_FDLESS);
990
Willy Tarreau472125b2020-12-11 17:02:50 +0100991 if (event_type & SUB_RETRY_RECV)
992 fd_stop_recv(conn->handle.fd);
993
994 if (event_type & SUB_RETRY_SEND)
995 fd_stop_send(conn->handle.fd);
996}
997
Willy Tarreaub0735732023-04-22 18:25:09 +0200998/* Live check to see if a socket type supports SO_REUSEPORT for the specified
999 * family and socket() settings. Returns non-zero on success, 0 on failure. Use
1000 * protocol_supports_flag() instead, which checks cached flags.
1001 */
1002int _sock_supports_reuseport(const struct proto_fam *fam, int type, int protocol)
1003{
1004 int ret = 0;
1005#ifdef SO_REUSEPORT
1006 struct sockaddr_storage ss;
1007 socklen_t sl = sizeof(ss);
1008 int fd1, fd2;
1009
1010 /* for the check, we'll need two sockets */
1011 fd1 = fd2 = -1;
1012
1013 /* ignore custom sockets */
1014 if (!fam || fam->sock_domain >= AF_MAX)
1015 goto leave;
1016
1017 fd1 = socket(fam->sock_domain, type, protocol);
1018 if (fd1 < 0)
1019 goto leave;
1020
1021 if (setsockopt(fd1, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)) < 0)
1022 goto leave;
1023
1024 /* bind to any address assigned by the kernel, we'll then try to do it twice */
1025 memset(&ss, 0, sizeof(ss));
1026 ss.ss_family = fam->sock_family;
1027 if (bind(fd1, (struct sockaddr *)&ss, fam->sock_addrlen) < 0)
1028 goto leave;
1029
1030 if (getsockname(fd1, (struct sockaddr *)&ss, &sl) < 0)
1031 goto leave;
1032
1033 fd2 = socket(fam->sock_domain, type, protocol);
1034 if (fd2 < 0)
1035 goto leave;
1036
1037 if (setsockopt(fd2, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)) < 0)
1038 goto leave;
1039
1040 if (bind(fd2, (struct sockaddr *)&ss, sl) < 0)
1041 goto leave;
1042
1043 /* OK we could bind twice to the same address:port, REUSEPORT
1044 * is supported for this protocol.
1045 */
1046 ret = 1;
1047
1048 leave:
1049 if (fd2 >= 0)
1050 close(fd2);
1051 if (fd1 >= 0)
1052 close(fd1);
1053#endif
1054 return ret;
1055}
1056
Willy Tarreau18b7df72020-08-28 12:07:22 +02001057/*
1058 * Local variables:
1059 * c-indent-level: 8
1060 * c-basic-offset: 8
1061 * End:
1062 */