Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 1 | /* |
| 2 | * File descriptors management functions. |
| 3 | * |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 4 | * Copyright 2000-2014 Willy Tarreau <w@1wt.eu> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
Willy Tarreau | 7be79a4 | 2012-11-11 15:02:54 +0100 | [diff] [blame] | 11 | * There is no direct link between the FD and the updates list. There is only a |
| 12 | * bit in the fdtab[] to indicate than a file descriptor is already present in |
| 13 | * the updates list. Once an fd is present in the updates list, it will have to |
| 14 | * be considered even if its changes are reverted in the middle or if the fd is |
| 15 | * replaced. |
| 16 | * |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 17 | * The event state for an FD, as found in fdtab[].state, is maintained for each |
| 18 | * direction. The state field is built this way, with R bits in the low nibble |
| 19 | * and W bits in the high nibble for ease of access and debugging : |
Willy Tarreau | 7be79a4 | 2012-11-11 15:02:54 +0100 | [diff] [blame] | 20 | * |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 21 | * 7 6 5 4 3 2 1 0 |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 22 | * [ 0 | 0 | RW | AW | 0 | 0 | RR | AR ] |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 23 | * |
| 24 | * A* = active *R = read |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 25 | * R* = ready *W = write |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 26 | * |
| 27 | * An FD is marked "active" when there is a desire to use it. |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 28 | * An FD is marked "ready" when it has not faced a new EAGAIN since last wake-up |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 29 | * (it is a cache of the last EAGAIN regardless of polling changes). Each poller |
| 30 | * has its own "polled" state for the same fd, as stored in the polled_mask. |
Willy Tarreau | 7be79a4 | 2012-11-11 15:02:54 +0100 | [diff] [blame] | 31 | * |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 32 | * We have 4 possible states for each direction based on these 2 flags : |
Willy Tarreau | 7be79a4 | 2012-11-11 15:02:54 +0100 | [diff] [blame] | 33 | * |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 34 | * +---+---+----------+---------------------------------------------+ |
| 35 | * | R | A | State | Description | |
| 36 | * +---+---+----------+---------------------------------------------+ |
| 37 | * | 0 | 0 | DISABLED | No activity desired, not ready. | |
| 38 | * | 0 | 1 | ACTIVE | Activity desired. | |
| 39 | * | 1 | 0 | STOPPED | End of activity. | |
| 40 | * | 1 | 1 | READY | Activity desired and reported. | |
| 41 | * +---+---+----------+---------------------------------------------+ |
Willy Tarreau | 7be79a4 | 2012-11-11 15:02:54 +0100 | [diff] [blame] | 42 | * |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 43 | * The transitions are pretty simple : |
| 44 | * - fd_want_*() : set flag A |
| 45 | * - fd_stop_*() : clear flag A |
| 46 | * - fd_cant_*() : clear flag R (when facing EAGAIN) |
| 47 | * - fd_may_*() : set flag R (upon return from poll()) |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 48 | * |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 49 | * Each poller then computes its own polled state : |
| 50 | * if (A) { if (!R) P := 1 } else { P := 0 } |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 51 | * |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 52 | * The state transitions look like the diagram below. |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 53 | * |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 54 | * may +----------+ |
| 55 | * ,----| DISABLED | (READY=0, ACTIVE=0) |
| 56 | * | +----------+ |
| 57 | * | want | ^ |
| 58 | * | | | |
| 59 | * | v | stop |
| 60 | * | +----------+ |
| 61 | * | | ACTIVE | (READY=0, ACTIVE=1) |
| 62 | * | +----------+ |
| 63 | * | | ^ |
| 64 | * | may | | |
Thayne McCombs | 8f0cc5c | 2021-01-07 21:35:52 -0700 | [diff] [blame] | 65 | * | v | EAGAIN (can't) |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 66 | * | +--------+ |
| 67 | * | | READY | (READY=1, ACTIVE=1) |
| 68 | * | +--------+ |
| 69 | * | stop | ^ |
| 70 | * | | | |
| 71 | * | v | want |
| 72 | * | +---------+ |
| 73 | * `--->| STOPPED | (READY=1, ACTIVE=0) |
| 74 | * +---------+ |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 75 | */ |
| 76 | |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 77 | #include <stdio.h> |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 78 | #include <string.h> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 79 | #include <unistd.h> |
Olivier Houchard | 79321b9 | 2018-07-26 17:55:11 +0200 | [diff] [blame] | 80 | #include <fcntl.h> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 81 | #include <sys/types.h> |
Willy Tarreau | 2d7f81b | 2019-02-21 22:19:17 +0100 | [diff] [blame] | 82 | #include <sys/resource.h> |
Willy Tarreau | 931d8b7 | 2019-08-27 11:08:17 +0200 | [diff] [blame] | 83 | #include <sys/uio.h> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 84 | |
Willy Tarreau | e573323 | 2019-05-22 19:24:06 +0200 | [diff] [blame] | 85 | #if defined(USE_POLL) |
Willy Tarreau | 9188ac6 | 2019-02-21 22:12:47 +0100 | [diff] [blame] | 86 | #include <poll.h> |
| 87 | #include <errno.h> |
| 88 | #endif |
| 89 | |
Willy Tarreau | 4c7e4b7 | 2020-05-27 12:58:42 +0200 | [diff] [blame] | 90 | #include <haproxy/api.h> |
Willy Tarreau | bc52bec | 2020-06-18 08:58:47 +0200 | [diff] [blame] | 91 | #include <haproxy/cfgparse.h> |
Willy Tarreau | 0f6ffd6 | 2020-06-03 19:33:00 +0200 | [diff] [blame] | 92 | #include <haproxy/fd.h> |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 93 | #include <haproxy/global.h> |
Willy Tarreau | c28aab0 | 2021-05-08 20:35:03 +0200 | [diff] [blame] | 94 | #include <haproxy/log.h> |
Willy Tarreau | fc8f6a8 | 2020-06-03 19:20:59 +0200 | [diff] [blame] | 95 | #include <haproxy/port_range.h> |
Willy Tarreau | bc52bec | 2020-06-18 08:58:47 +0200 | [diff] [blame] | 96 | #include <haproxy/tools.h> |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 97 | |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 98 | |
Willy Tarreau | a1090a5 | 2021-04-10 16:58:13 +0200 | [diff] [blame] | 99 | struct fdtab *fdtab __read_mostly = NULL; /* array of all the file descriptors */ |
| 100 | struct polled_mask *polled_mask __read_mostly = NULL; /* Array for the polled_mask of each fd */ |
| 101 | struct fdinfo *fdinfo __read_mostly = NULL; /* less-often used infos for file descriptors */ |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 102 | int totalconn; /* total # of terminated sessions */ |
| 103 | int actconn; /* # of active sessions */ |
| 104 | |
Willy Tarreau | a1090a5 | 2021-04-10 16:58:13 +0200 | [diff] [blame] | 105 | struct poller pollers[MAX_POLLERS] __read_mostly; |
| 106 | struct poller cur_poller __read_mostly; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 107 | int nbpollers = 0; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 108 | |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 109 | volatile struct fdlist update_list; // Global update list |
Olivier Houchard | 4815c8c | 2018-01-24 18:17:56 +0100 | [diff] [blame] | 110 | |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 111 | THREAD_LOCAL int *fd_updt = NULL; // FD updates list |
| 112 | THREAD_LOCAL int fd_nbupdt = 0; // number of updates in the list |
Olivier Houchard | 79321b9 | 2018-07-26 17:55:11 +0200 | [diff] [blame] | 113 | THREAD_LOCAL int poller_rd_pipe = -1; // Pipe to wake the thread |
Willy Tarreau | a1090a5 | 2021-04-10 16:58:13 +0200 | [diff] [blame] | 114 | int poller_wr_pipe[MAX_THREADS] __read_mostly; // Pipe to wake the threads |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 115 | |
Olivier Houchard | 7c49d2e | 2019-04-16 18:37:05 +0200 | [diff] [blame] | 116 | volatile int ha_used_fds = 0; // Number of FD we're currently using |
| 117 | |
Willy Tarreau | 337fb71 | 2019-12-20 07:20:00 +0100 | [diff] [blame] | 118 | #define _GET_NEXT(fd, off) ((volatile struct fdlist_entry *)(void *)((char *)(&fdtab[fd]) + off))->next |
| 119 | #define _GET_PREV(fd, off) ((volatile struct fdlist_entry *)(void *)((char *)(&fdtab[fd]) + off))->prev |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 120 | /* adds fd <fd> to fd list <list> if it was not yet in it */ |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 121 | void fd_add_to_fd_list(volatile struct fdlist *list, int fd, int off) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 122 | { |
| 123 | int next; |
| 124 | int new; |
| 125 | int old; |
| 126 | int last; |
| 127 | |
| 128 | redo_next: |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 129 | next = _GET_NEXT(fd, off); |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 130 | /* Check that we're not already in the cache, and if not, lock us. */ |
Olivier Houchard | fc51f0f5 | 2019-12-19 18:33:08 +0100 | [diff] [blame] | 131 | if (next > -2) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 132 | goto done; |
Olivier Houchard | fc51f0f5 | 2019-12-19 18:33:08 +0100 | [diff] [blame] | 133 | if (next == -2) |
| 134 | goto redo_next; |
Olivier Houchard | d360879 | 2019-03-08 18:47:42 +0100 | [diff] [blame] | 135 | if (!_HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2)) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 136 | goto redo_next; |
Olivier Houchard | d2b5d16 | 2019-03-08 13:47:21 +0100 | [diff] [blame] | 137 | __ha_barrier_atomic_store(); |
Willy Tarreau | 11559a7 | 2018-02-05 17:52:24 +0100 | [diff] [blame] | 138 | |
| 139 | new = fd; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 140 | redo_last: |
| 141 | /* First, insert in the linked list */ |
| 142 | last = list->last; |
| 143 | old = -1; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 144 | |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 145 | _GET_PREV(fd, off) = -2; |
Willy Tarreau | 11559a7 | 2018-02-05 17:52:24 +0100 | [diff] [blame] | 146 | /* Make sure the "prev" store is visible before we update the last entry */ |
| 147 | __ha_barrier_store(); |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 148 | |
Willy Tarreau | 11559a7 | 2018-02-05 17:52:24 +0100 | [diff] [blame] | 149 | if (unlikely(last == -1)) { |
| 150 | /* list is empty, try to add ourselves alone so that list->last=fd */ |
Olivier Houchard | d360879 | 2019-03-08 18:47:42 +0100 | [diff] [blame] | 151 | if (unlikely(!_HA_ATOMIC_CAS(&list->last, &old, new))) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 152 | goto redo_last; |
| 153 | |
| 154 | /* list->first was necessary -1, we're guaranteed to be alone here */ |
| 155 | list->first = fd; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 156 | } else { |
Willy Tarreau | 11559a7 | 2018-02-05 17:52:24 +0100 | [diff] [blame] | 157 | /* adding ourselves past the last element |
| 158 | * The CAS will only succeed if its next is -1, |
| 159 | * which means it's in the cache, and the last element. |
| 160 | */ |
Olivier Houchard | d360879 | 2019-03-08 18:47:42 +0100 | [diff] [blame] | 161 | if (unlikely(!_HA_ATOMIC_CAS(&_GET_NEXT(last, off), &old, new))) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 162 | goto redo_last; |
Willy Tarreau | 11559a7 | 2018-02-05 17:52:24 +0100 | [diff] [blame] | 163 | |
| 164 | /* Then, update the last entry */ |
| 165 | list->last = fd; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 166 | } |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 167 | __ha_barrier_store(); |
Willy Tarreau | 11559a7 | 2018-02-05 17:52:24 +0100 | [diff] [blame] | 168 | /* since we're alone at the end of the list and still locked(-2), |
Ilya Shipitsin | b8888ab | 2021-01-06 21:20:16 +0500 | [diff] [blame] | 169 | * we know no one tried to add past us. Mark the end of list. |
Willy Tarreau | 11559a7 | 2018-02-05 17:52:24 +0100 | [diff] [blame] | 170 | */ |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 171 | _GET_PREV(fd, off) = last; |
| 172 | _GET_NEXT(fd, off) = -1; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 173 | __ha_barrier_store(); |
| 174 | done: |
| 175 | return; |
| 176 | } |
| 177 | |
| 178 | /* removes fd <fd> from fd list <list> */ |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 179 | void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 180 | { |
| 181 | #if defined(HA_HAVE_CAS_DW) || defined(HA_CAS_IS_8B) |
Willy Tarreau | 2b9f066 | 2020-02-25 09:25:53 +0100 | [diff] [blame] | 182 | volatile union { |
| 183 | struct fdlist_entry ent; |
| 184 | uint64_t u64; |
| 185 | uint32_t u32[2]; |
| 186 | } cur_list, next_list; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 187 | #endif |
| 188 | int old; |
| 189 | int new = -2; |
| 190 | int prev; |
| 191 | int next; |
| 192 | int last; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 193 | lock_self: |
| 194 | #if (defined(HA_CAS_IS_8B) || defined(HA_HAVE_CAS_DW)) |
Willy Tarreau | 2b9f066 | 2020-02-25 09:25:53 +0100 | [diff] [blame] | 195 | next_list.ent.next = next_list.ent.prev = -2; |
| 196 | cur_list.ent = *(volatile struct fdlist_entry *)(((char *)&fdtab[fd]) + off); |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 197 | /* First, attempt to lock our own entries */ |
| 198 | do { |
| 199 | /* The FD is not in the FD cache, give up */ |
Willy Tarreau | 2b9f066 | 2020-02-25 09:25:53 +0100 | [diff] [blame] | 200 | if (unlikely(cur_list.ent.next <= -3)) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 201 | return; |
Willy Tarreau | 2b9f066 | 2020-02-25 09:25:53 +0100 | [diff] [blame] | 202 | if (unlikely(cur_list.ent.prev == -2 || cur_list.ent.next == -2)) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 203 | goto lock_self; |
| 204 | } while ( |
| 205 | #ifdef HA_CAS_IS_8B |
Willy Tarreau | 2b9f066 | 2020-02-25 09:25:53 +0100 | [diff] [blame] | 206 | unlikely(!_HA_ATOMIC_CAS(((uint64_t *)&_GET_NEXT(fd, off)), (uint64_t *)&cur_list.u64, next_list.u64)) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 207 | #else |
Willy Tarreau | 2b9f066 | 2020-02-25 09:25:53 +0100 | [diff] [blame] | 208 | unlikely(!_HA_ATOMIC_DWCAS(((long *)&_GET_NEXT(fd, off)), (uint32_t *)&cur_list.u32, &next_list.u32)) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 209 | #endif |
Willy Tarreau | 2b9f066 | 2020-02-25 09:25:53 +0100 | [diff] [blame] | 210 | ); |
| 211 | next = cur_list.ent.next; |
| 212 | prev = cur_list.ent.prev; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 213 | |
| 214 | #else |
| 215 | lock_self_next: |
Willy Tarreau | 337fb71 | 2019-12-20 07:20:00 +0100 | [diff] [blame] | 216 | next = _GET_NEXT(fd, off); |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 217 | if (next == -2) |
| 218 | goto lock_self_next; |
| 219 | if (next <= -3) |
| 220 | goto done; |
Olivier Houchard | d360879 | 2019-03-08 18:47:42 +0100 | [diff] [blame] | 221 | if (unlikely(!_HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2))) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 222 | goto lock_self_next; |
| 223 | lock_self_prev: |
Willy Tarreau | 337fb71 | 2019-12-20 07:20:00 +0100 | [diff] [blame] | 224 | prev = _GET_PREV(fd, off); |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 225 | if (prev == -2) |
| 226 | goto lock_self_prev; |
Olivier Houchard | d360879 | 2019-03-08 18:47:42 +0100 | [diff] [blame] | 227 | if (unlikely(!_HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2))) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 228 | goto lock_self_prev; |
| 229 | #endif |
Olivier Houchard | d2b5d16 | 2019-03-08 13:47:21 +0100 | [diff] [blame] | 230 | __ha_barrier_atomic_store(); |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 231 | |
| 232 | /* Now, lock the entries of our neighbours */ |
| 233 | if (likely(prev != -1)) { |
| 234 | redo_prev: |
| 235 | old = fd; |
| 236 | |
Olivier Houchard | d360879 | 2019-03-08 18:47:42 +0100 | [diff] [blame] | 237 | if (unlikely(!_HA_ATOMIC_CAS(&_GET_NEXT(prev, off), &old, new))) { |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 238 | if (unlikely(old == -2)) { |
| 239 | /* Neighbour already locked, give up and |
| 240 | * retry again once he's done |
| 241 | */ |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 242 | _GET_PREV(fd, off) = prev; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 243 | __ha_barrier_store(); |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 244 | _GET_NEXT(fd, off) = next; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 245 | __ha_barrier_store(); |
| 246 | goto lock_self; |
| 247 | } |
| 248 | goto redo_prev; |
| 249 | } |
| 250 | } |
| 251 | if (likely(next != -1)) { |
| 252 | redo_next: |
| 253 | old = fd; |
Olivier Houchard | d360879 | 2019-03-08 18:47:42 +0100 | [diff] [blame] | 254 | if (unlikely(!_HA_ATOMIC_CAS(&_GET_PREV(next, off), &old, new))) { |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 255 | if (unlikely(old == -2)) { |
| 256 | /* Neighbour already locked, give up and |
| 257 | * retry again once he's done |
| 258 | */ |
| 259 | if (prev != -1) { |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 260 | _GET_NEXT(prev, off) = fd; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 261 | __ha_barrier_store(); |
| 262 | } |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 263 | _GET_PREV(fd, off) = prev; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 264 | __ha_barrier_store(); |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 265 | _GET_NEXT(fd, off) = next; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 266 | __ha_barrier_store(); |
| 267 | goto lock_self; |
| 268 | } |
| 269 | goto redo_next; |
| 270 | } |
| 271 | } |
| 272 | if (list->first == fd) |
| 273 | list->first = next; |
| 274 | __ha_barrier_store(); |
| 275 | last = list->last; |
Olivier Houchard | d360879 | 2019-03-08 18:47:42 +0100 | [diff] [blame] | 276 | while (unlikely(last == fd && (!_HA_ATOMIC_CAS(&list->last, &last, prev)))) |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 277 | __ha_compiler_barrier(); |
| 278 | /* Make sure we let other threads know we're no longer in cache, |
| 279 | * before releasing our neighbours. |
| 280 | */ |
| 281 | __ha_barrier_store(); |
| 282 | if (likely(prev != -1)) |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 283 | _GET_NEXT(prev, off) = next; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 284 | __ha_barrier_store(); |
| 285 | if (likely(next != -1)) |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 286 | _GET_PREV(next, off) = prev; |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 287 | __ha_barrier_store(); |
| 288 | /* Ok, now we're out of the fd cache */ |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 289 | _GET_NEXT(fd, off) = -(next + 4); |
Willy Tarreau | 4cc67a2 | 2018-02-05 17:14:55 +0100 | [diff] [blame] | 290 | __ha_barrier_store(); |
| 291 | done: |
| 292 | return; |
| 293 | } |
| 294 | |
Olivier Houchard | 6a2cf87 | 2018-04-25 15:10:30 +0200 | [diff] [blame] | 295 | #undef _GET_NEXT |
| 296 | #undef _GET_PREV |
| 297 | |
Willy Tarreau | 2c3f981 | 2021-03-24 10:51:32 +0100 | [diff] [blame] | 298 | /* deletes the FD once nobody uses it anymore, as detected by the caller by its |
| 299 | * thread_mask being zero and its running mask turning to zero. There is no |
| 300 | * protection against concurrent accesses, it's up to the caller to make sure |
| 301 | * only the last thread will call it. This is only for internal use, please use |
| 302 | * fd_delete() instead. |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 303 | */ |
Willy Tarreau | 2c3f981 | 2021-03-24 10:51:32 +0100 | [diff] [blame] | 304 | void _fd_delete_orphan(int fd) |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 305 | { |
Willy Tarreau | b41a6e9 | 2021-04-06 17:49:19 +0200 | [diff] [blame] | 306 | if (fdtab[fd].state & FD_LINGER_RISK) { |
Willy Tarreau | ad38ace | 2013-12-15 14:19:38 +0100 | [diff] [blame] | 307 | /* this is generally set when connecting to servers */ |
Ilya Shipitsin | b7e43f0 | 2020-04-02 15:02:08 +0500 | [diff] [blame] | 308 | DISGUISE(setsockopt(fd, SOL_SOCKET, SO_LINGER, |
| 309 | (struct linger *) &nolinger, sizeof(struct linger))); |
Willy Tarreau | ad38ace | 2013-12-15 14:19:38 +0100 | [diff] [blame] | 310 | } |
Willy Tarreau | 6ea20b1 | 2012-11-11 16:05:19 +0100 | [diff] [blame] | 311 | if (cur_poller.clo) |
| 312 | cur_poller.clo(fd); |
Willy Tarreau | 2d42329 | 2021-03-24 15:34:25 +0100 | [diff] [blame] | 313 | |
| 314 | port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port); |
Olivier Houchard | c22580c | 2019-08-05 18:51:52 +0200 | [diff] [blame] | 315 | polled_mask[fd].poll_recv = polled_mask[fd].poll_send = 0; |
Willy Tarreau | 6ea20b1 | 2012-11-11 16:05:19 +0100 | [diff] [blame] | 316 | |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 317 | fdtab[fd].state = 0; |
Willy Tarreau | 6ea20b1 | 2012-11-11 16:05:19 +0100 | [diff] [blame] | 318 | |
Willy Tarreau | 38e8a1c | 2020-06-23 10:04:54 +0200 | [diff] [blame] | 319 | #ifdef DEBUG_FD |
| 320 | fdtab[fd].event_count = 0; |
| 321 | #endif |
Willy Tarreau | 8d5d77e | 2009-10-18 07:25:52 +0200 | [diff] [blame] | 322 | fdinfo[fd].port_range = NULL; |
Willy Tarreau | db3b326 | 2012-07-05 23:19:22 +0200 | [diff] [blame] | 323 | fdtab[fd].owner = NULL; |
Willy Tarreau | 2c3f981 | 2021-03-24 10:51:32 +0100 | [diff] [blame] | 324 | /* perform the close() call last as it's what unlocks the instant reuse |
| 325 | * of this FD by any other thread. |
| 326 | */ |
Willy Tarreau | 63d8b60 | 2020-08-26 11:54:06 +0200 | [diff] [blame] | 327 | close(fd); |
Willy Tarreau | 4781b15 | 2021-04-06 13:53:36 +0200 | [diff] [blame] | 328 | _HA_ATOMIC_DEC(&ha_used_fds); |
Willy Tarreau | 2c3f981 | 2021-03-24 10:51:32 +0100 | [diff] [blame] | 329 | } |
| 330 | |
| 331 | /* Deletes an FD from the fdsets. The file descriptor is also closed, possibly |
| 332 | * asynchronously. Only the owning thread may do this. |
| 333 | */ |
| 334 | void fd_delete(int fd) |
| 335 | { |
| 336 | /* we must postpone removal of an FD that may currently be in use |
Ilya Shipitsin | b2be9a1 | 2021-04-24 13:25:42 +0500 | [diff] [blame] | 337 | * by another thread. This can happen in the following two situations: |
Willy Tarreau | 2c3f981 | 2021-03-24 10:51:32 +0100 | [diff] [blame] | 338 | * - after a takeover, the owning thread closes the connection but |
| 339 | * the previous one just woke up from the poller and entered |
| 340 | * the FD handler iocb. That thread holds an entry in running_mask |
| 341 | * and requires removal protection. |
| 342 | * - multiple threads are accepting connections on a listener, and |
| 343 | * one of them (or even an separate one) decides to unbind the |
| 344 | * listener under the listener's lock while other ones still hold |
| 345 | * the running bit. |
| 346 | * In both situations the FD is marked as unused (thread_mask = 0) and |
| 347 | * will not take new bits in its running_mask so we have the guarantee |
| 348 | * that the last thread eliminating running_mask is the one allowed to |
| 349 | * safely delete the FD. Most of the time it will be the current thread. |
| 350 | */ |
| 351 | |
| 352 | HA_ATOMIC_OR(&fdtab[fd].running_mask, tid_bit); |
| 353 | HA_ATOMIC_STORE(&fdtab[fd].thread_mask, 0); |
| 354 | if (fd_clr_running(fd) == 0) |
| 355 | _fd_delete_orphan(fd); |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 356 | } |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 357 | |
Olivier Houchard | 8851664 | 2020-03-05 18:10:51 +0100 | [diff] [blame] | 358 | /* |
| 359 | * Take over a FD belonging to another thread. |
| 360 | * unexpected_conn is the expected owner of the fd. |
| 361 | * Returns 0 on success, and -1 on failure. |
| 362 | */ |
| 363 | int fd_takeover(int fd, void *expected_owner) |
| 364 | { |
Willy Tarreau | f69fea6 | 2021-08-03 09:04:32 +0200 | [diff] [blame] | 365 | unsigned long old; |
Willy Tarreau | 4297363 | 2020-06-18 08:05:15 +0200 | [diff] [blame] | 366 | |
Olivier Houchard | 8851664 | 2020-03-05 18:10:51 +0100 | [diff] [blame] | 367 | /* protect ourself against a delete then an insert for the same fd, |
| 368 | * if it happens, then the owner will no longer be the expected |
| 369 | * connection. |
| 370 | */ |
Willy Tarreau | f69fea6 | 2021-08-03 09:04:32 +0200 | [diff] [blame] | 371 | if (fdtab[fd].owner != expected_owner) |
| 372 | return -1; |
Willy Tarreau | f1cad38 | 2020-06-18 08:14:59 +0200 | [diff] [blame] | 373 | |
Willy Tarreau | f69fea6 | 2021-08-03 09:04:32 +0200 | [diff] [blame] | 374 | /* we must be alone to work on this idle FD. If not, it means that its |
| 375 | * poller is currently waking up and is about to use it, likely to |
| 376 | * close it on shut/error, but maybe also to process any unexpectedly |
| 377 | * pending data. |
| 378 | */ |
| 379 | old = 0; |
| 380 | if (!HA_ATOMIC_CAS(&fdtab[fd].running_mask, &old, tid_bit)) |
| 381 | return -1; |
| 382 | |
| 383 | /* success, from now on it's ours */ |
| 384 | HA_ATOMIC_STORE(&fdtab[fd].thread_mask, tid_bit); |
Willy Tarreau | f1cad38 | 2020-06-18 08:14:59 +0200 | [diff] [blame] | 385 | |
Olivier Houchard | ddc874c | 2020-06-17 20:34:05 +0200 | [diff] [blame] | 386 | /* Make sure the FD doesn't have the active bit. It is possible that |
| 387 | * the fd is polled by the thread that used to own it, the new thread |
| 388 | * is supposed to call subscribe() later, to activate polling. |
| 389 | */ |
Willy Tarreau | f69fea6 | 2021-08-03 09:04:32 +0200 | [diff] [blame] | 390 | fd_stop_recv(fd); |
| 391 | |
| 392 | /* we're done with it */ |
| 393 | HA_ATOMIC_AND(&fdtab[fd].running_mask, ~tid_bit); |
| 394 | return 0; |
Olivier Houchard | 8851664 | 2020-03-05 18:10:51 +0100 | [diff] [blame] | 395 | } |
| 396 | |
Willy Tarreau | dbe3060 | 2019-09-04 13:25:41 +0200 | [diff] [blame] | 397 | void updt_fd_polling(const int fd) |
| 398 | { |
Willy Tarreau | 5a7d6eb | 2020-11-26 22:25:10 +0100 | [diff] [blame] | 399 | if (all_threads_mask == 1UL || (fdtab[fd].thread_mask & all_threads_mask) == tid_bit) { |
Willy Tarreau | dbe3060 | 2019-09-04 13:25:41 +0200 | [diff] [blame] | 400 | if (HA_ATOMIC_BTS(&fdtab[fd].update_mask, tid)) |
| 401 | return; |
| 402 | |
| 403 | fd_updt[fd_nbupdt++] = fd; |
| 404 | } else { |
| 405 | unsigned long update_mask = fdtab[fd].update_mask; |
| 406 | do { |
| 407 | if (update_mask == fdtab[fd].thread_mask) |
| 408 | return; |
Willy Tarreau | f015887 | 2020-09-25 12:18:53 +0200 | [diff] [blame] | 409 | } while (!_HA_ATOMIC_CAS(&fdtab[fd].update_mask, &update_mask, fdtab[fd].thread_mask)); |
| 410 | |
Willy Tarreau | dbe3060 | 2019-09-04 13:25:41 +0200 | [diff] [blame] | 411 | fd_add_to_fd_list(&update_list, fd, offsetof(struct fdtab, update)); |
Willy Tarreau | f015887 | 2020-09-25 12:18:53 +0200 | [diff] [blame] | 412 | |
| 413 | if (fd_active(fd) && |
| 414 | !(fdtab[fd].thread_mask & tid_bit) && |
| 415 | (fdtab[fd].thread_mask & ~tid_bit & all_threads_mask & ~sleeping_thread_mask) == 0) { |
| 416 | /* we need to wake up one thread to handle it immediately */ |
| 417 | int thr = my_ffsl(fdtab[fd].thread_mask & ~tid_bit & all_threads_mask) - 1; |
| 418 | |
| 419 | _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr)); |
| 420 | wake_thread(thr); |
| 421 | } |
Willy Tarreau | dbe3060 | 2019-09-04 13:25:41 +0200 | [diff] [blame] | 422 | } |
| 423 | } |
| 424 | |
Willy Tarreau | 84c7922 | 2021-07-29 16:53:46 +0200 | [diff] [blame] | 425 | /* Update events seen for FD <fd> and its state if needed. This should be |
| 426 | * called by the poller, passing FD_EV_*_{R,W,RW} in <evts>. FD_EV_ERR_* |
| 427 | * doesn't need to also pass FD_EV_SHUT_*, it's implied. ERR and SHUT are |
Willy Tarreau | 200bd50 | 2021-07-29 16:57:19 +0200 | [diff] [blame] | 428 | * allowed to be reported regardless of R/W readiness. Returns one of |
| 429 | * FD_UPDT_*. |
Willy Tarreau | 84c7922 | 2021-07-29 16:53:46 +0200 | [diff] [blame] | 430 | */ |
Willy Tarreau | 200bd50 | 2021-07-29 16:57:19 +0200 | [diff] [blame] | 431 | int fd_update_events(int fd, uint evts) |
Willy Tarreau | 84c7922 | 2021-07-29 16:53:46 +0200 | [diff] [blame] | 432 | { |
| 433 | unsigned long locked; |
| 434 | uint old, new; |
| 435 | uint new_flags, must_stop; |
Willy Tarreau | f69fea6 | 2021-08-03 09:04:32 +0200 | [diff] [blame] | 436 | ulong rmask, tmask; |
Willy Tarreau | 84c7922 | 2021-07-29 16:53:46 +0200 | [diff] [blame] | 437 | |
| 438 | ti->flags &= ~TI_FL_STUCK; // this thread is still running |
| 439 | |
| 440 | /* do nothing if the FD was taken over under us */ |
Willy Tarreau | f69fea6 | 2021-08-03 09:04:32 +0200 | [diff] [blame] | 441 | do { |
| 442 | /* make sure we read a synchronous copy of rmask and tmask |
| 443 | * (tmask is only up to date if it reflects all of rmask's |
| 444 | * bits). |
| 445 | */ |
| 446 | do { |
| 447 | rmask = _HA_ATOMIC_LOAD(&fdtab[fd].running_mask); |
| 448 | tmask = _HA_ATOMIC_LOAD(&fdtab[fd].thread_mask); |
| 449 | } while (rmask & ~tmask); |
| 450 | |
| 451 | if (!(tmask & tid_bit)) { |
| 452 | /* a takeover has started */ |
| 453 | activity[tid].poll_skip_fd++; |
| 454 | return FD_UPDT_MIGRATED; |
| 455 | } |
| 456 | } while (!HA_ATOMIC_CAS(&fdtab[fd].running_mask, &rmask, rmask | tid_bit)); |
Willy Tarreau | 84c7922 | 2021-07-29 16:53:46 +0200 | [diff] [blame] | 457 | |
Willy Tarreau | f69fea6 | 2021-08-03 09:04:32 +0200 | [diff] [blame] | 458 | locked = (tmask != tid_bit); |
Willy Tarreau | 84c7922 | 2021-07-29 16:53:46 +0200 | [diff] [blame] | 459 | |
| 460 | /* OK now we are guaranteed that our thread_mask was present and |
| 461 | * that we're allowed to update the FD. |
| 462 | */ |
| 463 | |
| 464 | new_flags = |
| 465 | ((evts & FD_EV_READY_R) ? FD_POLL_IN : 0) | |
| 466 | ((evts & FD_EV_READY_W) ? FD_POLL_OUT : 0) | |
| 467 | ((evts & FD_EV_SHUT_R) ? FD_POLL_HUP : 0) | |
| 468 | ((evts & FD_EV_ERR_RW) ? FD_POLL_ERR : 0); |
| 469 | |
| 470 | /* SHUTW reported while FD was active for writes is an error */ |
| 471 | if ((fdtab[fd].state & FD_EV_ACTIVE_W) && (evts & FD_EV_SHUT_W)) |
| 472 | new_flags |= FD_POLL_ERR; |
| 473 | |
| 474 | /* compute the inactive events reported late that must be stopped */ |
| 475 | must_stop = 0; |
| 476 | if (unlikely(!fd_active(fd))) { |
| 477 | /* both sides stopped */ |
| 478 | must_stop = FD_POLL_IN | FD_POLL_OUT; |
| 479 | } |
| 480 | else if (unlikely(!fd_recv_active(fd) && (evts & (FD_EV_READY_R | FD_EV_SHUT_R | FD_EV_ERR_RW)))) { |
| 481 | /* only send remains */ |
| 482 | must_stop = FD_POLL_IN; |
| 483 | } |
| 484 | else if (unlikely(!fd_send_active(fd) && (evts & (FD_EV_READY_W | FD_EV_SHUT_W | FD_EV_ERR_RW)))) { |
| 485 | /* only recv remains */ |
| 486 | must_stop = FD_POLL_OUT; |
| 487 | } |
| 488 | |
| 489 | if (new_flags & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR)) |
| 490 | new_flags |= FD_EV_READY_R; |
| 491 | |
| 492 | if (new_flags & (FD_POLL_OUT | FD_POLL_ERR)) |
| 493 | new_flags |= FD_EV_READY_W; |
| 494 | |
| 495 | old = fdtab[fd].state; |
| 496 | new = (old & ~FD_POLL_UPDT_MASK) | new_flags; |
| 497 | |
| 498 | if (unlikely(locked)) { |
| 499 | /* Locked FDs (those with more than 2 threads) are atomically updated */ |
| 500 | while (unlikely(new != old && !_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new))) |
| 501 | new = (old & ~FD_POLL_UPDT_MASK) | new_flags; |
| 502 | } else { |
| 503 | if (new != old) |
| 504 | fdtab[fd].state = new; |
| 505 | } |
| 506 | |
| 507 | if (fdtab[fd].iocb && fd_active(fd)) { |
| 508 | fdtab[fd].iocb(fd); |
| 509 | } |
| 510 | |
| 511 | /* another thread might have attempted to close this FD in the mean |
| 512 | * time (e.g. timeout task) striking on a previous thread and closing. |
| 513 | * This is detected by both thread_mask and running_mask being 0 after |
| 514 | * we remove ourselves last. |
| 515 | */ |
| 516 | if ((fdtab[fd].running_mask & tid_bit) && |
| 517 | fd_clr_running(fd) == 0 && !fdtab[fd].thread_mask) { |
| 518 | _fd_delete_orphan(fd); |
Willy Tarreau | 200bd50 | 2021-07-29 16:57:19 +0200 | [diff] [blame] | 519 | return FD_UPDT_CLOSED; |
Willy Tarreau | 84c7922 | 2021-07-29 16:53:46 +0200 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | /* we had to stop this FD and it still must be stopped after the I/O |
| 523 | * cb's changes, so let's program an update for this. |
| 524 | */ |
| 525 | if (must_stop && !(fdtab[fd].update_mask & tid_bit)) { |
| 526 | if (((must_stop & FD_POLL_IN) && !fd_recv_active(fd)) || |
| 527 | ((must_stop & FD_POLL_OUT) && !fd_send_active(fd))) |
| 528 | if (!HA_ATOMIC_BTS(&fdtab[fd].update_mask, tid)) |
| 529 | fd_updt[fd_nbupdt++] = fd; |
| 530 | } |
Willy Tarreau | 200bd50 | 2021-07-29 16:57:19 +0200 | [diff] [blame] | 531 | |
| 532 | return FD_UPDT_DONE; |
Willy Tarreau | 84c7922 | 2021-07-29 16:53:46 +0200 | [diff] [blame] | 533 | } |
| 534 | |
Willy Tarreau | 931d8b7 | 2019-08-27 11:08:17 +0200 | [diff] [blame] | 535 | /* Tries to send <npfx> parts from <prefix> followed by <nmsg> parts from <msg> |
| 536 | * optionally followed by a newline if <nl> is non-null, to file descriptor |
| 537 | * <fd>. The message is sent atomically using writev(). It may be truncated to |
| 538 | * <maxlen> bytes if <maxlen> is non-null. There is no distinction between the |
| 539 | * two lists, it's just a convenience to help the caller prepend some prefixes |
| 540 | * when necessary. It takes the fd's lock to make sure no other thread will |
| 541 | * write to the same fd in parallel. Returns the number of bytes sent, or <=0 |
| 542 | * on failure. A limit to 31 total non-empty segments is enforced. The caller |
| 543 | * is responsible for taking care of making the fd non-blocking. |
| 544 | */ |
| 545 | ssize_t fd_write_frag_line(int fd, size_t maxlen, const struct ist pfx[], size_t npfx, const struct ist msg[], size_t nmsg, int nl) |
| 546 | { |
| 547 | struct iovec iovec[32]; |
| 548 | size_t totlen = 0; |
| 549 | size_t sent = 0; |
| 550 | int vec = 0; |
Willy Tarreau | df18787 | 2020-06-11 14:25:47 +0200 | [diff] [blame] | 551 | int attempts = 0; |
Willy Tarreau | 931d8b7 | 2019-08-27 11:08:17 +0200 | [diff] [blame] | 552 | |
| 553 | if (!maxlen) |
| 554 | maxlen = ~0; |
| 555 | |
| 556 | /* keep one char for a possible trailing '\n' in any case */ |
| 557 | maxlen--; |
| 558 | |
| 559 | /* make an iovec from the concatenation of all parts of the original |
| 560 | * message. Skip empty fields and truncate the whole message to maxlen, |
| 561 | * leaving one spare iovec for the '\n'. |
| 562 | */ |
| 563 | while (vec < (sizeof(iovec) / sizeof(iovec[0]) - 1)) { |
| 564 | if (!npfx) { |
| 565 | pfx = msg; |
| 566 | npfx = nmsg; |
| 567 | nmsg = 0; |
| 568 | if (!npfx) |
| 569 | break; |
| 570 | } |
| 571 | |
| 572 | iovec[vec].iov_base = pfx->ptr; |
| 573 | iovec[vec].iov_len = MIN(maxlen, pfx->len); |
| 574 | maxlen -= iovec[vec].iov_len; |
| 575 | totlen += iovec[vec].iov_len; |
| 576 | if (iovec[vec].iov_len) |
| 577 | vec++; |
| 578 | pfx++; npfx--; |
| 579 | }; |
| 580 | |
| 581 | if (nl) { |
| 582 | iovec[vec].iov_base = "\n"; |
| 583 | iovec[vec].iov_len = 1; |
| 584 | vec++; |
| 585 | } |
| 586 | |
Willy Tarreau | df18787 | 2020-06-11 14:25:47 +0200 | [diff] [blame] | 587 | /* make sure we never interleave writes and we never block. This means |
| 588 | * we prefer to fail on collision than to block. But we don't want to |
| 589 | * lose too many logs so we just perform a few lock attempts then give |
| 590 | * up. |
| 591 | */ |
| 592 | |
Willy Tarreau | 1673c4a | 2021-04-07 17:36:57 +0200 | [diff] [blame] | 593 | while (HA_ATOMIC_BTS(&fdtab[fd].state, FD_EXCL_SYSCALL_BIT)) { |
Willy Tarreau | df18787 | 2020-06-11 14:25:47 +0200 | [diff] [blame] | 594 | if (++attempts >= 200) { |
| 595 | /* so that the caller knows the message couldn't be delivered */ |
| 596 | sent = -1; |
| 597 | errno = EAGAIN; |
| 598 | goto leave; |
| 599 | } |
| 600 | ha_thread_relax(); |
| 601 | } |
| 602 | |
Willy Tarreau | 0cc6128 | 2021-04-06 17:57:12 +0200 | [diff] [blame] | 603 | if (unlikely(!(fdtab[fd].state & FD_INITIALIZED))) { |
| 604 | HA_ATOMIC_OR(&fdtab[fd].state, FD_INITIALIZED); |
Willy Tarreau | 7e9776a | 2019-08-30 14:41:47 +0200 | [diff] [blame] | 605 | if (!isatty(fd)) |
| 606 | fcntl(fd, F_SETFL, O_NONBLOCK); |
| 607 | } |
Willy Tarreau | 931d8b7 | 2019-08-27 11:08:17 +0200 | [diff] [blame] | 608 | sent = writev(fd, iovec, vec); |
Willy Tarreau | 1673c4a | 2021-04-07 17:36:57 +0200 | [diff] [blame] | 609 | HA_ATOMIC_BTR(&fdtab[fd].state, FD_EXCL_SYSCALL_BIT); |
Willy Tarreau | 931d8b7 | 2019-08-27 11:08:17 +0200 | [diff] [blame] | 610 | |
Willy Tarreau | df18787 | 2020-06-11 14:25:47 +0200 | [diff] [blame] | 611 | leave: |
Willy Tarreau | 931d8b7 | 2019-08-27 11:08:17 +0200 | [diff] [blame] | 612 | /* sent > 0 if the message was delivered */ |
| 613 | return sent; |
| 614 | } |
| 615 | |
Olivier Houchard | 2292edf | 2019-02-25 14:26:54 +0100 | [diff] [blame] | 616 | #if defined(USE_CLOSEFROM) |
| 617 | void my_closefrom(int start) |
| 618 | { |
| 619 | closefrom(start); |
| 620 | } |
| 621 | |
Willy Tarreau | e573323 | 2019-05-22 19:24:06 +0200 | [diff] [blame] | 622 | #elif defined(USE_POLL) |
Willy Tarreau | 9188ac6 | 2019-02-21 22:12:47 +0100 | [diff] [blame] | 623 | /* This is a portable implementation of closefrom(). It closes all open file |
| 624 | * descriptors starting at <start> and above. It relies on the fact that poll() |
| 625 | * will return POLLNVAL for each invalid (hence close) file descriptor passed |
| 626 | * in argument in order to skip them. It acts with batches of FDs and will |
| 627 | * typically perform one poll() call per 1024 FDs so the overhead is low in |
| 628 | * case all FDs have to be closed. |
| 629 | */ |
| 630 | void my_closefrom(int start) |
| 631 | { |
| 632 | struct pollfd poll_events[1024]; |
| 633 | struct rlimit limit; |
| 634 | int nbfds, fd, ret, idx; |
| 635 | int step, next; |
| 636 | |
| 637 | if (getrlimit(RLIMIT_NOFILE, &limit) == 0) |
| 638 | step = nbfds = limit.rlim_cur; |
| 639 | else |
| 640 | step = nbfds = 0; |
| 641 | |
| 642 | if (nbfds <= 0) { |
| 643 | /* set safe limit */ |
| 644 | nbfds = 1024; |
| 645 | step = 256; |
| 646 | } |
| 647 | |
| 648 | if (step > sizeof(poll_events) / sizeof(poll_events[0])) |
| 649 | step = sizeof(poll_events) / sizeof(poll_events[0]); |
| 650 | |
| 651 | while (start < nbfds) { |
| 652 | next = (start / step + 1) * step; |
| 653 | |
| 654 | for (fd = start; fd < next && fd < nbfds; fd++) { |
| 655 | poll_events[fd - start].fd = fd; |
| 656 | poll_events[fd - start].events = 0; |
| 657 | } |
| 658 | |
| 659 | do { |
| 660 | ret = poll(poll_events, fd - start, 0); |
| 661 | if (ret >= 0) |
| 662 | break; |
| 663 | } while (errno == EAGAIN || errno == EINTR || errno == ENOMEM); |
| 664 | |
Willy Tarreau | b8e602c | 2019-02-22 09:07:42 +0100 | [diff] [blame] | 665 | if (ret) |
| 666 | ret = fd - start; |
| 667 | |
Willy Tarreau | 9188ac6 | 2019-02-21 22:12:47 +0100 | [diff] [blame] | 668 | for (idx = 0; idx < ret; idx++) { |
| 669 | if (poll_events[idx].revents & POLLNVAL) |
| 670 | continue; /* already closed */ |
| 671 | |
| 672 | fd = poll_events[idx].fd; |
| 673 | close(fd); |
| 674 | } |
| 675 | start = next; |
| 676 | } |
| 677 | } |
| 678 | |
Willy Tarreau | e573323 | 2019-05-22 19:24:06 +0200 | [diff] [blame] | 679 | #else // defined(USE_POLL) |
Willy Tarreau | 9188ac6 | 2019-02-21 22:12:47 +0100 | [diff] [blame] | 680 | |
Willy Tarreau | 2d7f81b | 2019-02-21 22:19:17 +0100 | [diff] [blame] | 681 | /* This is a portable implementation of closefrom(). It closes all open file |
| 682 | * descriptors starting at <start> and above. This is a naive version for use |
| 683 | * when the operating system provides no alternative. |
| 684 | */ |
| 685 | void my_closefrom(int start) |
| 686 | { |
| 687 | struct rlimit limit; |
| 688 | int nbfds; |
| 689 | |
| 690 | if (getrlimit(RLIMIT_NOFILE, &limit) == 0) |
| 691 | nbfds = limit.rlim_cur; |
| 692 | else |
| 693 | nbfds = 0; |
| 694 | |
| 695 | if (nbfds <= 0) |
| 696 | nbfds = 1024; /* safe limit */ |
| 697 | |
| 698 | while (start < nbfds) |
| 699 | close(start++); |
| 700 | } |
Willy Tarreau | e573323 | 2019-05-22 19:24:06 +0200 | [diff] [blame] | 701 | #endif // defined(USE_POLL) |
Willy Tarreau | 2d7f81b | 2019-02-21 22:19:17 +0100 | [diff] [blame] | 702 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 703 | /* disable the specified poller */ |
| 704 | void disable_poller(const char *poller_name) |
| 705 | { |
| 706 | int p; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 707 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 708 | for (p = 0; p < nbpollers; p++) |
| 709 | if (strcmp(pollers[p].name, poller_name) == 0) |
| 710 | pollers[p].pref = 0; |
| 711 | } |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 712 | |
Olivier Houchard | 79321b9 | 2018-07-26 17:55:11 +0200 | [diff] [blame] | 713 | void poller_pipe_io_handler(int fd) |
| 714 | { |
| 715 | char buf[1024]; |
| 716 | /* Flush the pipe */ |
| 717 | while (read(fd, buf, sizeof(buf)) > 0); |
| 718 | fd_cant_recv(fd); |
| 719 | } |
| 720 | |
Willy Tarreau | 082b628 | 2019-05-22 14:42:12 +0200 | [diff] [blame] | 721 | /* allocate the per-thread fd_updt thus needs to be called early after |
| 722 | * thread creation. |
| 723 | */ |
| 724 | static int alloc_pollers_per_thread() |
| 725 | { |
| 726 | fd_updt = calloc(global.maxsock, sizeof(*fd_updt)); |
| 727 | return fd_updt != NULL; |
| 728 | } |
| 729 | |
| 730 | /* Initialize the pollers per thread.*/ |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 731 | static int init_pollers_per_thread() |
| 732 | { |
Olivier Houchard | 79321b9 | 2018-07-26 17:55:11 +0200 | [diff] [blame] | 733 | int mypipe[2]; |
Willy Tarreau | 082b628 | 2019-05-22 14:42:12 +0200 | [diff] [blame] | 734 | |
| 735 | if (pipe(mypipe) < 0) |
Olivier Houchard | 79321b9 | 2018-07-26 17:55:11 +0200 | [diff] [blame] | 736 | return 0; |
Willy Tarreau | 082b628 | 2019-05-22 14:42:12 +0200 | [diff] [blame] | 737 | |
Olivier Houchard | 79321b9 | 2018-07-26 17:55:11 +0200 | [diff] [blame] | 738 | poller_rd_pipe = mypipe[0]; |
| 739 | poller_wr_pipe[tid] = mypipe[1]; |
| 740 | fcntl(poller_rd_pipe, F_SETFL, O_NONBLOCK); |
| 741 | fd_insert(poller_rd_pipe, poller_pipe_io_handler, poller_pipe_io_handler, |
| 742 | tid_bit); |
| 743 | fd_want_recv(poller_rd_pipe); |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 744 | return 1; |
| 745 | } |
| 746 | |
| 747 | /* Deinitialize the pollers per thread */ |
| 748 | static void deinit_pollers_per_thread() |
| 749 | { |
William Lallemand | 808e1b7 | 2018-12-15 22:34:31 +0100 | [diff] [blame] | 750 | /* rd and wr are init at the same place, but only rd is init to -1, so |
| 751 | we rely to rd to close. */ |
| 752 | if (poller_rd_pipe > -1) { |
| 753 | close(poller_rd_pipe); |
| 754 | poller_rd_pipe = -1; |
| 755 | close(poller_wr_pipe[tid]); |
| 756 | poller_wr_pipe[tid] = -1; |
| 757 | } |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 758 | } |
| 759 | |
Willy Tarreau | 082b628 | 2019-05-22 14:42:12 +0200 | [diff] [blame] | 760 | /* Release the pollers per thread, to be called late */ |
| 761 | static void free_pollers_per_thread() |
| 762 | { |
Willy Tarreau | 61cfdf4 | 2021-02-20 10:46:51 +0100 | [diff] [blame] | 763 | ha_free(&fd_updt); |
Willy Tarreau | 082b628 | 2019-05-22 14:42:12 +0200 | [diff] [blame] | 764 | } |
| 765 | |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 766 | /* |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 767 | * Initialize the pollers till the best one is found. |
| 768 | * If none works, returns 0, otherwise 1. |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 769 | */ |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 770 | int init_pollers() |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 771 | { |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 772 | int p; |
| 773 | struct poller *bp; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 774 | |
Willy Tarreau | 7c9f756 | 2020-10-13 15:45:07 +0200 | [diff] [blame] | 775 | if ((fdtab = calloc(global.maxsock, sizeof(*fdtab))) == NULL) { |
| 776 | ha_alert("Not enough memory to allocate %d entries for fdtab!\n", global.maxsock); |
Christopher Faulet | 63fe652 | 2017-08-31 17:52:09 +0200 | [diff] [blame] | 777 | goto fail_tab; |
Willy Tarreau | 7c9f756 | 2020-10-13 15:45:07 +0200 | [diff] [blame] | 778 | } |
Christopher Faulet | 63fe652 | 2017-08-31 17:52:09 +0200 | [diff] [blame] | 779 | |
Willy Tarreau | 7c9f756 | 2020-10-13 15:45:07 +0200 | [diff] [blame] | 780 | if ((polled_mask = calloc(global.maxsock, sizeof(*polled_mask))) == NULL) { |
| 781 | ha_alert("Not enough memory to allocate %d entries for polled_mask!\n", global.maxsock); |
Olivier Houchard | cb92f5c | 2018-04-26 14:23:07 +0200 | [diff] [blame] | 782 | goto fail_polledmask; |
Willy Tarreau | 7c9f756 | 2020-10-13 15:45:07 +0200 | [diff] [blame] | 783 | } |
Uman Shahzad | da7eeed | 2019-01-17 08:21:39 +0000 | [diff] [blame] | 784 | |
Willy Tarreau | 7c9f756 | 2020-10-13 15:45:07 +0200 | [diff] [blame] | 785 | if ((fdinfo = calloc(global.maxsock, sizeof(*fdinfo))) == NULL) { |
| 786 | ha_alert("Not enough memory to allocate %d entries for fdinfo!\n", global.maxsock); |
Christopher Faulet | 63fe652 | 2017-08-31 17:52:09 +0200 | [diff] [blame] | 787 | goto fail_info; |
Willy Tarreau | 7c9f756 | 2020-10-13 15:45:07 +0200 | [diff] [blame] | 788 | } |
Christopher Faulet | 63fe652 | 2017-08-31 17:52:09 +0200 | [diff] [blame] | 789 | |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 790 | update_list.first = update_list.last = -1; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 791 | |
Olivier Houchard | 4815c8c | 2018-01-24 18:17:56 +0100 | [diff] [blame] | 792 | for (p = 0; p < global.maxsock; p++) { |
Olivier Houchard | 4815c8c | 2018-01-24 18:17:56 +0100 | [diff] [blame] | 793 | /* Mark the fd as out of the fd cache */ |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 794 | fdtab[p].update.next = -3; |
Olivier Houchard | 4815c8c | 2018-01-24 18:17:56 +0100 | [diff] [blame] | 795 | } |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 796 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 797 | do { |
| 798 | bp = NULL; |
| 799 | for (p = 0; p < nbpollers; p++) |
| 800 | if (!bp || (pollers[p].pref > bp->pref)) |
| 801 | bp = &pollers[p]; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 802 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 803 | if (!bp || bp->pref == 0) |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 804 | break; |
| 805 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 806 | if (bp->init(bp)) { |
| 807 | memcpy(&cur_poller, bp, sizeof(*bp)); |
| 808 | return 1; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 809 | } |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 810 | } while (!bp || bp->pref == 0); |
Willy Tarreau | 7be79a4 | 2012-11-11 15:02:54 +0100 | [diff] [blame] | 811 | |
Christopher Faulet | 63fe652 | 2017-08-31 17:52:09 +0200 | [diff] [blame] | 812 | free(fdinfo); |
| 813 | fail_info: |
Olivier Houchard | cb92f5c | 2018-04-26 14:23:07 +0200 | [diff] [blame] | 814 | free(polled_mask); |
| 815 | fail_polledmask: |
Uman Shahzad | da7eeed | 2019-01-17 08:21:39 +0000 | [diff] [blame] | 816 | free(fdtab); |
| 817 | fail_tab: |
Willy Tarreau | 7be79a4 | 2012-11-11 15:02:54 +0100 | [diff] [blame] | 818 | return 0; |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 819 | } |
| 820 | |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 821 | /* |
Krzysztof Piotr Oledzki | a643baf | 2008-05-29 23:53:44 +0200 | [diff] [blame] | 822 | * Deinitialize the pollers. |
| 823 | */ |
| 824 | void deinit_pollers() { |
| 825 | |
| 826 | struct poller *bp; |
| 827 | int p; |
| 828 | |
| 829 | for (p = 0; p < nbpollers; p++) { |
| 830 | bp = &pollers[p]; |
| 831 | |
| 832 | if (bp && bp->pref) |
| 833 | bp->term(bp); |
| 834 | } |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 835 | |
Willy Tarreau | 61cfdf4 | 2021-02-20 10:46:51 +0100 | [diff] [blame] | 836 | ha_free(&fdinfo); |
| 837 | ha_free(&fdtab); |
| 838 | ha_free(&polled_mask); |
Krzysztof Piotr Oledzki | a643baf | 2008-05-29 23:53:44 +0200 | [diff] [blame] | 839 | } |
| 840 | |
| 841 | /* |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 842 | * Lists the known pollers on <out>. |
| 843 | * Should be performed only before initialization. |
| 844 | */ |
| 845 | int list_pollers(FILE *out) |
| 846 | { |
| 847 | int p; |
| 848 | int last, next; |
| 849 | int usable; |
| 850 | struct poller *bp; |
| 851 | |
| 852 | fprintf(out, "Available polling systems :\n"); |
| 853 | |
| 854 | usable = 0; |
| 855 | bp = NULL; |
| 856 | last = next = -1; |
| 857 | while (1) { |
| 858 | for (p = 0; p < nbpollers; p++) { |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 859 | if ((next < 0 || pollers[p].pref > next) |
Willy Tarreau | e79c3b2 | 2010-11-19 10:20:36 +0100 | [diff] [blame] | 860 | && (last < 0 || pollers[p].pref < last)) { |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 861 | next = pollers[p].pref; |
Willy Tarreau | e79c3b2 | 2010-11-19 10:20:36 +0100 | [diff] [blame] | 862 | if (!bp || (pollers[p].pref > bp->pref)) |
| 863 | bp = &pollers[p]; |
| 864 | } |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 865 | } |
| 866 | |
| 867 | if (next == -1) |
| 868 | break; |
| 869 | |
| 870 | for (p = 0; p < nbpollers; p++) { |
| 871 | if (pollers[p].pref == next) { |
| 872 | fprintf(out, " %10s : ", pollers[p].name); |
| 873 | if (pollers[p].pref == 0) |
| 874 | fprintf(out, "disabled, "); |
| 875 | else |
| 876 | fprintf(out, "pref=%3d, ", pollers[p].pref); |
| 877 | if (pollers[p].test(&pollers[p])) { |
| 878 | fprintf(out, " test result OK"); |
| 879 | if (next > 0) |
| 880 | usable++; |
Willy Tarreau | e79c3b2 | 2010-11-19 10:20:36 +0100 | [diff] [blame] | 881 | } else { |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 882 | fprintf(out, " test result FAILED"); |
Willy Tarreau | e79c3b2 | 2010-11-19 10:20:36 +0100 | [diff] [blame] | 883 | if (bp == &pollers[p]) |
| 884 | bp = NULL; |
| 885 | } |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 886 | fprintf(out, "\n"); |
| 887 | } |
| 888 | } |
| 889 | last = next; |
| 890 | next = -1; |
| 891 | }; |
| 892 | fprintf(out, "Total: %d (%d usable), will use %s.\n", nbpollers, usable, bp ? bp->name : "none"); |
| 893 | return 0; |
| 894 | } |
| 895 | |
| 896 | /* |
| 897 | * Some pollers may lose their connection after a fork(). It may be necessary |
| 898 | * to create initialize part of them again. Returns 0 in case of failure, |
| 899 | * otherwise 1. The fork() function may be NULL if unused. In case of error, |
| 900 | * the the current poller is destroyed and the caller is responsible for trying |
| 901 | * another one by calling init_pollers() again. |
| 902 | */ |
| 903 | int fork_poller() |
| 904 | { |
Conrad Hoffmann | 041751c | 2014-05-20 14:28:24 +0200 | [diff] [blame] | 905 | int fd; |
Willy Tarreau | ce036bc | 2018-01-29 14:58:02 +0100 | [diff] [blame] | 906 | for (fd = 0; fd < global.maxsock; fd++) { |
Conrad Hoffmann | 041751c | 2014-05-20 14:28:24 +0200 | [diff] [blame] | 907 | if (fdtab[fd].owner) { |
Willy Tarreau | 030dae1 | 2021-04-06 17:53:33 +0200 | [diff] [blame] | 908 | HA_ATOMIC_OR(&fdtab[fd].state, FD_CLONED); |
Conrad Hoffmann | 041751c | 2014-05-20 14:28:24 +0200 | [diff] [blame] | 909 | } |
| 910 | } |
| 911 | |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 912 | if (cur_poller.fork) { |
| 913 | if (cur_poller.fork(&cur_poller)) |
| 914 | return 1; |
| 915 | cur_poller.term(&cur_poller); |
| 916 | return 0; |
| 917 | } |
| 918 | return 1; |
| 919 | } |
| 920 | |
Willy Tarreau | bc52bec | 2020-06-18 08:58:47 +0200 | [diff] [blame] | 921 | /* config parser for global "tune.fd.edge-triggered", accepts "on" or "off" */ |
| 922 | static int cfg_parse_tune_fd_edge_triggered(char **args, int section_type, struct proxy *curpx, |
Willy Tarreau | 0182516 | 2021-03-09 09:53:46 +0100 | [diff] [blame] | 923 | const struct proxy *defpx, const char *file, int line, |
Willy Tarreau | bc52bec | 2020-06-18 08:58:47 +0200 | [diff] [blame] | 924 | char **err) |
| 925 | { |
| 926 | if (too_many_args(1, args, err, NULL)) |
| 927 | return -1; |
| 928 | |
| 929 | if (strcmp(args[1], "on") == 0) |
| 930 | global.tune.options |= GTUNE_FD_ET; |
| 931 | else if (strcmp(args[1], "off") == 0) |
| 932 | global.tune.options &= ~GTUNE_FD_ET; |
| 933 | else { |
| 934 | memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]); |
| 935 | return -1; |
| 936 | } |
| 937 | return 0; |
| 938 | } |
| 939 | |
| 940 | /* config keyword parsers */ |
| 941 | static struct cfg_kw_list cfg_kws = {ILH, { |
Willy Tarreau | 9eec7e2 | 2021-05-08 11:06:32 +0200 | [diff] [blame] | 942 | { CFG_GLOBAL, "tune.fd.edge-triggered", cfg_parse_tune_fd_edge_triggered, KWF_EXPERIMENTAL }, |
Willy Tarreau | bc52bec | 2020-06-18 08:58:47 +0200 | [diff] [blame] | 943 | { 0, NULL, NULL } |
| 944 | }}; |
| 945 | |
| 946 | INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws); |
| 947 | |
Willy Tarreau | 082b628 | 2019-05-22 14:42:12 +0200 | [diff] [blame] | 948 | REGISTER_PER_THREAD_ALLOC(alloc_pollers_per_thread); |
Willy Tarreau | 172f5ce | 2018-11-26 11:21:50 +0100 | [diff] [blame] | 949 | REGISTER_PER_THREAD_INIT(init_pollers_per_thread); |
| 950 | REGISTER_PER_THREAD_DEINIT(deinit_pollers_per_thread); |
Willy Tarreau | 082b628 | 2019-05-22 14:42:12 +0200 | [diff] [blame] | 951 | REGISTER_PER_THREAD_FREE(free_pollers_per_thread); |
Willy Tarreau | 172f5ce | 2018-11-26 11:21:50 +0100 | [diff] [blame] | 952 | |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 953 | /* |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 954 | * Local variables: |
| 955 | * c-indent-level: 8 |
| 956 | * c-basic-offset: 8 |
| 957 | * End: |
| 958 | */ |