blob: 8dd64a5b3935d621b28bd2c3edb49904b5e9db5e [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreau49b046d2012-08-09 12:11:58 +02002 * include/proto/fd.h
3 * File descriptors states.
4 *
Willy Tarreauf817e9f2014-01-10 16:58:45 +01005 * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
Willy Tarreau49b046d2012-08-09 12:11:58 +02006 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
Willy Tarreaubaaee002006-06-26 02:48:02 +020021
22#ifndef _PROTO_FD_H
23#define _PROTO_FD_H
24
Willy Tarreau2ff76222007-04-09 19:29:56 +020025#include <stdio.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020026#include <sys/time.h>
27#include <sys/types.h>
28#include <unistd.h>
29
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020030#include <common/config.h>
Willy Tarreauf37ba942018-10-17 11:25:54 +020031#include <common/ticks.h>
32#include <common/time.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020033#include <types/fd.h>
Willy Tarreau609aad92018-11-22 08:31:09 +010034#include <proto/activity.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020035
Willy Tarreau7be79a42012-11-11 15:02:54 +010036/* public variables */
Christopher Fauletd4604ad2017-05-29 10:40:41 +020037
Olivier Houchard6b96f722018-04-25 16:58:25 +020038extern volatile struct fdlist update_list;
39
Olivier Houchard53055052019-07-25 14:00:18 +000040
41extern struct polled_mask {
42 unsigned long poll_recv;
43 unsigned long poll_send;
44} *polled_mask;
Olivier Houchardcb92f5c2018-04-26 14:23:07 +020045
Christopher Fauletd4604ad2017-05-29 10:40:41 +020046extern THREAD_LOCAL int *fd_updt; // FD updates list
47extern THREAD_LOCAL int fd_nbupdt; // number of updates in the list
48
Olivier Houchard79321b92018-07-26 17:55:11 +020049extern int poller_wr_pipe[MAX_THREADS];
50
Olivier Houchard7c49d2e2019-04-16 18:37:05 +020051extern volatile int ha_used_fds; // Number of FDs we're currently using
52
Willy Tarreau173d9952018-01-26 21:48:23 +010053/* Deletes an FD from the fdsets.
Willy Tarreaubaaee002006-06-26 02:48:02 +020054 * The file descriptor is also closed.
55 */
56void fd_delete(int fd);
57
Willy Tarreau173d9952018-01-26 21:48:23 +010058/* Deletes an FD from the fdsets.
Olivier Houchard1fc05162017-04-06 01:05:05 +020059 * The file descriptor is kept open.
60 */
61void fd_remove(int fd);
62
Willy Tarreau931d8b72019-08-27 11:08:17 +020063ssize_t fd_write_frag_line(int fd, size_t maxlen, const struct ist pfx[], size_t npfx, const struct ist msg[], size_t nmsg, int nl);
64
Willy Tarreau2d7f81b2019-02-21 22:19:17 +010065/* close all FDs starting from <start> */
66void my_closefrom(int start);
67
Willy Tarreau4f60f162007-04-08 16:39:58 +020068/* disable the specified poller */
69void disable_poller(const char *poller_name);
Willy Tarreaubaaee002006-06-26 02:48:02 +020070
Olivier Houchard79321b92018-07-26 17:55:11 +020071void poller_pipe_io_handler(int fd);
72
Willy Tarreau2a429502006-10-15 14:52:29 +020073/*
Willy Tarreau4f60f162007-04-08 16:39:58 +020074 * Initialize the pollers till the best one is found.
75 * If none works, returns 0, otherwise 1.
Willy Tarreauef1d1f82007-04-16 00:25:25 +020076 * The pollers register themselves just before main() is called.
Willy Tarreau2a429502006-10-15 14:52:29 +020077 */
Willy Tarreau4f60f162007-04-08 16:39:58 +020078int init_pollers();
Willy Tarreau2a429502006-10-15 14:52:29 +020079
Willy Tarreau4f60f162007-04-08 16:39:58 +020080/*
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020081 * Deinitialize the pollers.
82 */
83void deinit_pollers();
84
85/*
Willy Tarreau2ff76222007-04-09 19:29:56 +020086 * Some pollers may lose their connection after a fork(). It may be necessary
87 * to create initialize part of them again. Returns 0 in case of failure,
88 * otherwise 1. The fork() function may be NULL if unused. In case of error,
89 * the the current poller is destroyed and the caller is responsible for trying
90 * another one by calling init_pollers() again.
91 */
92int fork_poller();
93
94/*
95 * Lists the known pollers on <out>.
96 * Should be performed only before initialization.
97 */
98int list_pollers(FILE *out);
99
100/*
Willy Tarreau4f60f162007-04-08 16:39:58 +0200101 * Runs the polling loop
102 */
103void run_poller();
Willy Tarreau2a429502006-10-15 14:52:29 +0200104
Olivier Houchard6a2cf872018-04-25 15:10:30 +0200105void fd_add_to_fd_list(volatile struct fdlist *list, int fd, int off);
106void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off);
Willy Tarreaudbe30602019-09-04 13:25:41 +0200107void updt_fd_polling(const int fd);
Willy Tarreau4d841862018-01-17 22:57:54 +0100108
Olivier Houchard6b96f722018-04-25 16:58:25 +0200109/* Called from the poller to acknoledge we read an entry from the global
110 * update list, to remove our bit from the update_mask, and remove it from
111 * the list if we were the last one.
112 */
113static inline void done_update_polling(int fd)
114{
115 unsigned long update_mask;
116
Olivier Houchardd3608792019-03-08 18:47:42 +0100117 update_mask = _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
Olivier Houchard6b96f722018-04-25 16:58:25 +0200118 while ((update_mask & all_threads_mask)== 0) {
119 /* If we were the last one that had to update that entry, remove it from the list */
120 fd_rm_from_fd_list(&update_list, fd, offsetof(struct fdtab, update));
Olivier Houchard6b96f722018-04-25 16:58:25 +0200121 update_mask = (volatile unsigned long)fdtab[fd].update_mask;
122 if ((update_mask & all_threads_mask) != 0) {
123 /* Maybe it's been re-updated in the meanwhile, and we
124 * wrongly removed it from the list, if so, re-add it
125 */
126 fd_add_to_fd_list(&update_list, fd, offsetof(struct fdtab, update));
127 update_mask = (volatile unsigned long)(fdtab[fd].update_mask);
128 /* And then check again, just in case after all it
129 * should be removed, even if it's very unlikely, given
130 * the current thread wouldn't have been able to take
131 * care of it yet */
132 } else
133 break;
Willy Tarreau4d841862018-01-17 22:57:54 +0100134
Olivier Houchard6b96f722018-04-25 16:58:25 +0200135 }
Willy Tarreau7be79a42012-11-11 15:02:54 +0100136}
137
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100138/*
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100139 * returns the FD's recv state (FD_EV_*)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100140 */
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100141static inline int fd_recv_state(const int fd)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100142{
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100143 return ((unsigned)fdtab[fd].state >> (4 * DIR_RD)) & FD_EV_STATUS;
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100144}
145
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100146/*
147 * returns true if the FD is active for recv
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100148 */
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100149static inline int fd_recv_active(const int fd)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100150{
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100151 return (unsigned)fdtab[fd].state & FD_EV_ACTIVE_R;
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100152}
153
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100154/*
155 * returns true if the FD is ready for recv
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100156 */
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100157static inline int fd_recv_ready(const int fd)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100158{
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100159 return (unsigned)fdtab[fd].state & FD_EV_READY_R;
160}
161
162/*
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100163 * returns the FD's send state (FD_EV_*)
164 */
165static inline int fd_send_state(const int fd)
166{
167 return ((unsigned)fdtab[fd].state >> (4 * DIR_WR)) & FD_EV_STATUS;
168}
169
170/*
171 * returns true if the FD is active for send
172 */
173static inline int fd_send_active(const int fd)
174{
175 return (unsigned)fdtab[fd].state & FD_EV_ACTIVE_W;
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100176}
177
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100178/*
179 * returns true if the FD is ready for send
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100180 */
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100181static inline int fd_send_ready(const int fd)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100182{
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100183 return (unsigned)fdtab[fd].state & FD_EV_READY_W;
184}
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100185
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100186/*
Christopher Faulet8db2fdf2017-08-30 09:59:38 +0200187 * returns true if the FD is active for recv or send
188 */
189static inline int fd_active(const int fd)
190{
191 return (unsigned)fdtab[fd].state & FD_EV_ACTIVE_RW;
192}
193
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100194/* Disable processing recv events on fd <fd> */
195static inline void fd_stop_recv(int fd)
196{
Willy Tarreauf8ecc7f2019-09-04 13:22:50 +0200197 if (!(fdtab[fd].state & FD_EV_ACTIVE_R) ||
198 !HA_ATOMIC_BTR(&fdtab[fd].state, FD_EV_ACTIVE_R_BIT))
199 return;
Willy Tarreau5bee3e22019-09-04 09:52:57 +0200200 updt_fd_polling(fd);
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100201}
202
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100203/* Disable processing send events on fd <fd> */
204static inline void fd_stop_send(int fd)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100205{
Willy Tarreauf8ecc7f2019-09-04 13:22:50 +0200206 if (!(fdtab[fd].state & FD_EV_ACTIVE_W) ||
207 !HA_ATOMIC_BTR(&fdtab[fd].state, FD_EV_ACTIVE_W_BIT))
208 return;
Willy Tarreau5bee3e22019-09-04 09:52:57 +0200209 updt_fd_polling(fd);
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100210}
211
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100212/* Disable processing of events on fd <fd> for both directions. */
213static inline void fd_stop_both(int fd)
Willy Tarreau49b046d2012-08-09 12:11:58 +0200214{
Willy Tarreau7ac0e352018-01-17 21:25:57 +0100215 unsigned char old, new;
216
217 old = fdtab[fd].state;
218 do {
219 if (!(old & FD_EV_ACTIVE_RW))
220 return;
221 new = old & ~FD_EV_ACTIVE_RW;
Olivier Houchardd3608792019-03-08 18:47:42 +0100222 } while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
Willy Tarreau5bee3e22019-09-04 09:52:57 +0200223 updt_fd_polling(fd);
Willy Tarreau49b046d2012-08-09 12:11:58 +0200224}
225
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100226/* Report that FD <fd> cannot receive anymore without polling (EAGAIN detected). */
227static inline void fd_cant_recv(const int fd)
Willy Tarreau49b046d2012-08-09 12:11:58 +0200228{
Willy Tarreauf8ecc7f2019-09-04 13:22:50 +0200229 /* marking ready never changes polled status */
230 if (!(fdtab[fd].state & FD_EV_READY_R) ||
231 !HA_ATOMIC_BTR(&fdtab[fd].state, FD_EV_READY_R_BIT))
232 return;
Willy Tarreau49b046d2012-08-09 12:11:58 +0200233}
234
Willy Tarreau1dad3842019-07-08 23:09:03 +0200235/* Report that FD <fd> may receive again without polling. */
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100236static inline void fd_may_recv(const int fd)
Willy Tarreaubabd05a2012-08-09 12:14:03 +0200237{
Willy Tarreau7ac0e352018-01-17 21:25:57 +0100238 /* marking ready never changes polled status */
Willy Tarreau1dad3842019-07-08 23:09:03 +0200239 if ((fdtab[fd].state & FD_EV_READY_R) ||
240 HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_READY_R_BIT))
241 return;
Willy Tarreaubabd05a2012-08-09 12:14:03 +0200242}
243
Willy Tarreau8f2825f2019-09-05 16:39:21 +0200244/* Report that FD <fd> may receive again without polling but only if its not
245 * active yet. This is in order to speculatively try to enable I/Os when it's
246 * highly likely that these will succeed, but without interfering with polling.
247 */
248static inline void fd_cond_recv(const int fd)
249{
250 if ((fdtab[fd].state & (FD_EV_ACTIVE_R|FD_EV_READY_R)) == 0)
251 HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_READY_R_BIT);
252}
253
254/* Report that FD <fd> may send again without polling but only if its not
255 * active yet. This is in order to speculatively try to enable I/Os when it's
256 * highly likely that these will succeed, but without interfering with polling.
257 */
258static inline void fd_cond_send(const int fd)
259{
260 if ((fdtab[fd].state & (FD_EV_ACTIVE_W|FD_EV_READY_W)) == 0)
261 HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_READY_W_BIT);
262}
263
Willy Tarreau4ac9d062019-09-05 16:30:39 +0200264/* Report that FD <fd> may receive and send without polling. Used at FD
265 * initialization.
266 */
267static inline void fd_may_both(const int fd)
268{
269 HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_RW);
270}
271
Willy Tarreauf8ecc7f2019-09-04 13:22:50 +0200272/* Disable readiness when active. This is useful to interrupt reading when it
Willy Tarreau6c11bd22014-01-24 00:54:27 +0100273 * is suspected that the end of data might have been reached (eg: short read).
274 * This can only be done using level-triggered pollers, so if any edge-triggered
275 * is ever implemented, a test will have to be added here.
276 */
277static inline void fd_done_recv(const int fd)
278{
Willy Tarreauf8ecc7f2019-09-04 13:22:50 +0200279 /* removing ready never changes polled status */
280 if ((fdtab[fd].state & (FD_EV_ACTIVE_R|FD_EV_READY_R)) != (FD_EV_ACTIVE_R|FD_EV_READY_R) ||
281 !HA_ATOMIC_BTR(&fdtab[fd].state, FD_EV_READY_R_BIT))
282 return;
Willy Tarreau6c11bd22014-01-24 00:54:27 +0100283}
284
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100285/* Report that FD <fd> cannot send anymore without polling (EAGAIN detected). */
286static inline void fd_cant_send(const int fd)
Willy Tarreau49b046d2012-08-09 12:11:58 +0200287{
Willy Tarreauf8ecc7f2019-09-04 13:22:50 +0200288 /* removing ready never changes polled status */
289 if (!(fdtab[fd].state & FD_EV_READY_W) ||
290 !HA_ATOMIC_BTR(&fdtab[fd].state, FD_EV_READY_W_BIT))
291 return;
Willy Tarreau49b046d2012-08-09 12:11:58 +0200292}
293
Willy Tarreau1dad3842019-07-08 23:09:03 +0200294/* Report that FD <fd> may send again without polling (EAGAIN not detected). */
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100295static inline void fd_may_send(const int fd)
Willy Tarreau49b046d2012-08-09 12:11:58 +0200296{
Willy Tarreau7ac0e352018-01-17 21:25:57 +0100297 /* marking ready never changes polled status */
Willy Tarreau1dad3842019-07-08 23:09:03 +0200298 if ((fdtab[fd].state & FD_EV_READY_W) ||
299 HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_READY_W_BIT))
300 return;
Willy Tarreau49b046d2012-08-09 12:11:58 +0200301}
Willy Tarreau2a429502006-10-15 14:52:29 +0200302
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100303/* Prepare FD <fd> to try to receive */
304static inline void fd_want_recv(int fd)
Willy Tarreaubabd05a2012-08-09 12:14:03 +0200305{
Willy Tarreauf8ecc7f2019-09-04 13:22:50 +0200306 if ((fdtab[fd].state & FD_EV_ACTIVE_R) ||
307 HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_ACTIVE_R_BIT))
308 return;
Willy Tarreau5bee3e22019-09-04 09:52:57 +0200309 updt_fd_polling(fd);
Willy Tarreaubabd05a2012-08-09 12:14:03 +0200310}
311
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100312/* Prepare FD <fd> to try to send */
313static inline void fd_want_send(int fd)
Willy Tarreau49b046d2012-08-09 12:11:58 +0200314{
Willy Tarreauf8ecc7f2019-09-04 13:22:50 +0200315 if ((fdtab[fd].state & FD_EV_ACTIVE_W) ||
316 HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_ACTIVE_W_BIT))
317 return;
Willy Tarreau5bee3e22019-09-04 09:52:57 +0200318 updt_fd_polling(fd);
Willy Tarreau49b046d2012-08-09 12:11:58 +0200319}
Willy Tarreau2a429502006-10-15 14:52:29 +0200320
Willy Tarreau6b308982019-09-06 19:05:50 +0200321/* Update events seen for FD <fd> and its state if needed. This should be
322 * called by the poller, passing FD_EV_*_{R,W,RW} in <evts>. FD_EV_ERR_*
323 * doesn't need to also pass FD_EV_SHUT_*, it's implied. ERR and SHUT are
324 * allowed to be reported regardless of R/W readiness.
325 */
326static inline void fd_update_events(int fd, unsigned char evts)
Christopher Faulet21e92672017-08-30 10:30:04 +0200327{
Richard Russobc9d9842019-02-20 12:43:45 -0800328 unsigned long locked = atleast2(fdtab[fd].thread_mask);
Willy Tarreau1dad3842019-07-08 23:09:03 +0200329 unsigned char old, new;
Willy Tarreau6b308982019-09-06 19:05:50 +0200330 int new_flags;
331
332 new_flags =
333 ((evts & FD_EV_READY_R) ? FD_POLL_IN : 0) |
334 ((evts & FD_EV_READY_W) ? FD_POLL_OUT : 0) |
335 ((evts & FD_EV_SHUT_R) ? FD_POLL_HUP : 0) |
Willy Tarreau6b308982019-09-06 19:05:50 +0200336 ((evts & FD_EV_ERR_R) ? FD_POLL_ERR : 0) |
337 ((evts & FD_EV_ERR_W) ? FD_POLL_ERR : 0);
Richard Russobc9d9842019-02-20 12:43:45 -0800338
Willy Tarreau2aaeee32019-10-01 11:46:40 +0200339 /* SHUTW reported while FD was active for writes is an error */
340 if ((fdtab[fd].ev & FD_EV_ACTIVE_W) && (evts & FD_EV_SHUT_W))
341 new_flags |= FD_POLL_ERR;
342
Willy Tarreau1dad3842019-07-08 23:09:03 +0200343 old = fdtab[fd].ev;
Willy Tarreau6b308982019-09-06 19:05:50 +0200344 new = (old & FD_POLL_STICKY) | new_flags;
Willy Tarreau1dad3842019-07-08 23:09:03 +0200345
346 if (unlikely(locked)) {
347 /* Locked FDs (those with more than 2 threads) are atomically updated */
348 while (unlikely(new != old && !_HA_ATOMIC_CAS(&fdtab[fd].ev, &old, new)))
Willy Tarreau6b308982019-09-06 19:05:50 +0200349 new = (old & FD_POLL_STICKY) | new_flags;
Willy Tarreau1dad3842019-07-08 23:09:03 +0200350 } else {
351 if (new != old)
352 fdtab[fd].ev = new;
353 }
Christopher Faulet21e92672017-08-30 10:30:04 +0200354
355 if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
356 fd_may_recv(fd);
357
358 if (fdtab[fd].ev & (FD_POLL_OUT | FD_POLL_ERR))
359 fd_may_send(fd);
Olivier Houchard305d5ab2019-07-24 18:07:06 +0200360
361 if (fdtab[fd].iocb)
362 fdtab[fd].iocb(fd);
Willy Tarreauf5cab822019-08-16 16:06:14 +0200363
364 ti->flags &= ~TI_FL_STUCK; // this thread is still running
Christopher Faulet21e92672017-08-30 10:30:04 +0200365}
366
Willy Tarreaud6f087e2008-01-18 17:20:13 +0100367/* Prepares <fd> for being polled */
Willy Tarreaua9786b62018-01-25 07:22:13 +0100368static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), unsigned long thread_mask)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200369{
Richard Russobc9d9842019-02-20 12:43:45 -0800370 unsigned long locked = atleast2(thread_mask);
371
372 if (locked)
Willy Tarreau87d54a92018-10-15 09:44:46 +0200373 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreaua9786b62018-01-25 07:22:13 +0100374 fdtab[fd].owner = owner;
375 fdtab[fd].iocb = iocb;
Willy Tarreaud6f087e2008-01-18 17:20:13 +0100376 fdtab[fd].ev = 0;
Willy Tarreauad38ace2013-12-15 14:19:38 +0100377 fdtab[fd].linger_risk = 0;
Conrad Hoffmann041751c2014-05-20 14:28:24 +0200378 fdtab[fd].cloned = 0;
Willy Tarreauf65610a2017-10-31 16:06:06 +0100379 fdtab[fd].thread_mask = thread_mask;
Willy Tarreauc9c83782018-01-17 18:44:46 +0100380 /* note: do not reset polled_mask here as it indicates which poller
381 * still knows this FD from a possible previous round.
382 */
Richard Russobc9d9842019-02-20 12:43:45 -0800383 if (locked)
Willy Tarreau87d54a92018-10-15 09:44:46 +0200384 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreau4ac9d062019-09-05 16:30:39 +0200385 /* the two directions are ready until proven otherwise */
386 fd_may_both(fd);
Olivier Houchard7c49d2e2019-04-16 18:37:05 +0200387 _HA_ATOMIC_ADD(&ha_used_fds, 1);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200388}
389
Willy Tarreauf37ba942018-10-17 11:25:54 +0200390/* Computes the bounded poll() timeout based on the next expiration timer <next>
391 * by bounding it to MAX_DELAY_MS. <next> may equal TICK_ETERNITY. The pollers
392 * just needs to call this function right before polling to get their timeout
393 * value. Timeouts that are already expired (possibly due to a pending event)
394 * are accounted for in activity.poll_exp.
395 */
396static inline int compute_poll_timeout(int next)
397{
398 int wait_time;
399
400 if (!tick_isset(next))
401 wait_time = MAX_DELAY_MS;
402 else if (tick_is_expired(next, now_ms)) {
403 activity[tid].poll_exp++;
404 wait_time = 0;
405 }
406 else {
407 wait_time = TICKS_TO_MS(tick_remain(now_ms, next)) + 1;
408 if (wait_time > MAX_DELAY_MS)
409 wait_time = MAX_DELAY_MS;
410 }
411 return wait_time;
412}
413
Willy Tarreau322e6c72018-01-25 16:37:04 +0100414/* These are replacements for FD_SET, FD_CLR, FD_ISSET, working on uints */
415static inline void hap_fd_set(int fd, unsigned int *evts)
416{
Olivier Houchardd3608792019-03-08 18:47:42 +0100417 _HA_ATOMIC_OR(&evts[fd / (8*sizeof(*evts))], 1U << (fd & (8*sizeof(*evts) - 1)));
Willy Tarreau322e6c72018-01-25 16:37:04 +0100418}
419
420static inline void hap_fd_clr(int fd, unsigned int *evts)
421{
Olivier Houchardd3608792019-03-08 18:47:42 +0100422 _HA_ATOMIC_AND(&evts[fd / (8*sizeof(*evts))], ~(1U << (fd & (8*sizeof(*evts) - 1))));
Willy Tarreau322e6c72018-01-25 16:37:04 +0100423}
424
425static inline unsigned int hap_fd_isset(int fd, unsigned int *evts)
426{
427 return evts[fd / (8*sizeof(*evts))] & (1U << (fd & (8*sizeof(*evts) - 1)));
428}
429
Olivier Houchard79321b92018-07-26 17:55:11 +0200430static inline void wake_thread(int tid)
431{
432 char c = 'c';
433
434 shut_your_big_mouth_gcc(write(poller_wr_pipe[tid], &c, 1));
435}
436
Willy Tarreaubaaee002006-06-26 02:48:02 +0200437
438#endif /* _PROTO_FD_H */
439
440/*
441 * Local variables:
442 * c-indent-level: 8
443 * c-basic-offset: 8
444 * End:
445 */