blob: 535e69172839b615a9fdcc15344dec09449c63ca [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreau49b046d2012-08-09 12:11:58 +02002 * include/proto/fd.h
3 * File descriptors states.
4 *
Willy Tarreauf817e9f2014-01-10 16:58:45 +01005 * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
Willy Tarreau49b046d2012-08-09 12:11:58 +02006 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
Willy Tarreaubaaee002006-06-26 02:48:02 +020021
22#ifndef _PROTO_FD_H
23#define _PROTO_FD_H
24
Willy Tarreau2ff76222007-04-09 19:29:56 +020025#include <stdio.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020026#include <sys/time.h>
27#include <sys/types.h>
28#include <unistd.h>
29
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020030#include <common/config.h>
Christopher Fauletd4604ad2017-05-29 10:40:41 +020031
Willy Tarreaubaaee002006-06-26 02:48:02 +020032#include <types/fd.h>
33
Willy Tarreau7be79a42012-11-11 15:02:54 +010034/* public variables */
Christopher Fauletd4604ad2017-05-29 10:40:41 +020035
Olivier Houchard4815c8c2018-01-24 18:17:56 +010036extern volatile struct fdlist fd_cache;
37extern volatile struct fdlist fd_cache_local[MAX_THREADS];
38
Christopher Faulet69553fe2018-01-15 11:57:03 +010039extern unsigned long fd_cache_mask; // Mask of threads with events in the cache
Christopher Fauletd4604ad2017-05-29 10:40:41 +020040
41extern THREAD_LOCAL int *fd_updt; // FD updates list
42extern THREAD_LOCAL int fd_nbupdt; // number of updates in the list
43
Willy Tarreau8b949692017-11-26 11:07:34 +010044__decl_hathreads(extern HA_RWLOCK_T __attribute__((aligned(64))) fdcache_lock); /* global lock to protect fd_cache array */
Willy Tarreau7be79a42012-11-11 15:02:54 +010045
Willy Tarreau173d9952018-01-26 21:48:23 +010046/* Deletes an FD from the fdsets.
Willy Tarreaubaaee002006-06-26 02:48:02 +020047 * The file descriptor is also closed.
48 */
49void fd_delete(int fd);
50
Willy Tarreau173d9952018-01-26 21:48:23 +010051/* Deletes an FD from the fdsets.
Olivier Houchard1fc05162017-04-06 01:05:05 +020052 * The file descriptor is kept open.
53 */
54void fd_remove(int fd);
55
Willy Tarreau4f60f162007-04-08 16:39:58 +020056/* disable the specified poller */
57void disable_poller(const char *poller_name);
Willy Tarreaubaaee002006-06-26 02:48:02 +020058
Willy Tarreau2a429502006-10-15 14:52:29 +020059/*
Willy Tarreau4f60f162007-04-08 16:39:58 +020060 * Initialize the pollers till the best one is found.
61 * If none works, returns 0, otherwise 1.
Willy Tarreauef1d1f82007-04-16 00:25:25 +020062 * The pollers register themselves just before main() is called.
Willy Tarreau2a429502006-10-15 14:52:29 +020063 */
Willy Tarreau4f60f162007-04-08 16:39:58 +020064int init_pollers();
Willy Tarreau2a429502006-10-15 14:52:29 +020065
Willy Tarreau4f60f162007-04-08 16:39:58 +020066/*
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020067 * Deinitialize the pollers.
68 */
69void deinit_pollers();
70
71/*
Willy Tarreau2ff76222007-04-09 19:29:56 +020072 * Some pollers may lose their connection after a fork(). It may be necessary
73 * to create initialize part of them again. Returns 0 in case of failure,
74 * otherwise 1. The fork() function may be NULL if unused. In case of error,
75 * the the current poller is destroyed and the caller is responsible for trying
76 * another one by calling init_pollers() again.
77 */
78int fork_poller();
79
80/*
81 * Lists the known pollers on <out>.
82 * Should be performed only before initialization.
83 */
84int list_pollers(FILE *out);
85
86/*
Willy Tarreau4f60f162007-04-08 16:39:58 +020087 * Runs the polling loop
88 */
89void run_poller();
Willy Tarreau2a429502006-10-15 14:52:29 +020090
Willy Tarreau033cd9d2014-01-25 19:24:15 +010091/* Scan and process the cached events. This should be called right after
Willy Tarreau09f24562012-11-11 16:43:45 +010092 * the poller.
93 */
Willy Tarreau033cd9d2014-01-25 19:24:15 +010094void fd_process_cached_events();
Willy Tarreau09f24562012-11-11 16:43:45 +010095
Willy Tarreau5be2f352014-11-19 19:43:05 +010096/* Mark fd <fd> as updated for polling and allocate an entry in the update list
97 * for this if it was not already there. This can be done at any time.
Willy Tarreaue8525452014-01-25 09:58:06 +010098 */
Willy Tarreau5be2f352014-11-19 19:43:05 +010099static inline void updt_fd_polling(const int fd)
Willy Tarreau7be79a42012-11-11 15:02:54 +0100100{
Willy Tarreauebc78d72018-01-20 23:53:50 +0100101 if (fdtab[fd].update_mask & tid_bit)
Willy Tarreau7be79a42012-11-11 15:02:54 +0100102 /* already scheduled for update */
103 return;
Willy Tarreauebc78d72018-01-20 23:53:50 +0100104 fdtab[fd].update_mask |= tid_bit;
Willy Tarreau4a291442012-12-13 23:34:18 +0100105 fd_updt[fd_nbupdt++] = fd;
Willy Tarreau7be79a42012-11-11 15:02:54 +0100106}
107
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100108static inline void fd_add_to_fd_list(volatile struct fdlist *list, int fd)
109{
110 int next;
111 int new;
112 int old;
113 int last;
114
115redo_next:
Olivier Houchard12568362018-01-31 18:07:29 +0100116 next = fdtab[fd].cache.next;
117 /* Check that we're not already in the cache, and if not, lock us. */
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100118 if (next >= -2)
119 goto done;
Olivier Houchard12568362018-01-31 18:07:29 +0100120 if (!HA_ATOMIC_CAS(&fdtab[fd].cache.next, &next, -2))
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100121 goto redo_next;
122 __ha_barrier_store();
123redo_last:
124 /* First, insert in the linked list */
125 last = list->last;
126 old = -1;
127 new = fd;
128 if (unlikely(last == -1)) {
129 /* list is empty, try to add ourselves alone so that list->last=fd */
130
Olivier Houchard12568362018-01-31 18:07:29 +0100131 fdtab[fd].cache.prev = last;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100132
133 /* Make sure the "prev" store is visible before we update the last entry */
134 __ha_barrier_store();
135 if (unlikely(!HA_ATOMIC_CAS(&list->last, &old, new)))
136 goto redo_last;
137
138 /* list->first was necessary -1, we're guaranteed to be alone here */
139 list->first = fd;
140
141 /* since we're alone at the end of the list and still locked(-2),
142 * we know noone tried to add past us. Mark the end of list.
143 */
Olivier Houchard12568362018-01-31 18:07:29 +0100144 fdtab[fd].cache.next = -1;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100145 goto done; /* We're done ! */
146 } else {
147 /* non-empty list, add past the tail */
148 do {
149 new = fd;
150 old = -1;
Olivier Houchard12568362018-01-31 18:07:29 +0100151 fdtab[fd].cache.prev = last;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100152
153 __ha_barrier_store();
154
155 /* adding ourselves past the last element
156 * The CAS will only succeed if its next is -1,
157 * which means it's in the cache, and the last element.
158 */
Olivier Houchard12568362018-01-31 18:07:29 +0100159 if (likely(HA_ATOMIC_CAS(&fdtab[last].cache.next, &old, new)))
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100160 break;
161 goto redo_last;
162 } while (1);
163 }
164 /* Then, update the last entry */
165redo_fd_cache:
166 last = list->last;
167 __ha_barrier_load();
168
169 if (unlikely(!HA_ATOMIC_CAS(&list->last, &last, fd)))
170 goto redo_fd_cache;
171 __ha_barrier_store();
Olivier Houchard12568362018-01-31 18:07:29 +0100172 fdtab[fd].cache.next = -1;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100173 __ha_barrier_store();
174done:
175 return;
176}
Willy Tarreau7be79a42012-11-11 15:02:54 +0100177
Willy Tarreau899d9572014-01-25 19:20:35 +0100178/* Allocates a cache entry for a file descriptor if it does not yet have one.
179 * This can be done at any time.
180 */
181static inline void fd_alloc_cache_entry(const int fd)
Willy Tarreau7be79a42012-11-11 15:02:54 +0100182{
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100183 if (!(fdtab[fd].thread_mask & (fdtab[fd].thread_mask - 1)))
184 fd_add_to_fd_list(&fd_cache_local[my_ffsl(fdtab[fd].thread_mask) - 1], fd);
185 else
186 fd_add_to_fd_list(&fd_cache, fd);
187 }
188
189static inline void fd_rm_from_fd_list(volatile struct fdlist *list, int fd)
190{
191#if defined(HA_HAVE_CAS_DW) || defined(HA_CAS_IS_8B)
192 volatile struct fdlist_entry cur_list, next_list;
193#endif
194 int old;
195 int new = -2;
Olivier Houchard12568362018-01-31 18:07:29 +0100196 int prev;
197 int next;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100198 int last;
199
200lock_self:
201#if (defined(HA_CAS_IS_8B) || defined(HA_HAVE_CAS_DW))
202 next_list.next = next_list.prev = -2;
Olivier Houchard12568362018-01-31 18:07:29 +0100203 cur_list = fdtab[fd].cache;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100204 /* First, attempt to lock our own entries */
205 do {
206 /* The FD is not in the FD cache, give up */
207 if (unlikely(cur_list.next <= -3))
208 return;
209 if (unlikely(cur_list.prev == -2 || cur_list.next == -2))
210 goto lock_self;
211 } while (
212#ifdef HA_CAS_IS_8B
Olivier Houchard12568362018-01-31 18:07:29 +0100213 unlikely(!HA_ATOMIC_CAS(((void **)(void *)&fdtab[fd].cache.next), ((void **)(void *)&cur_list), (*(void **)(void *)&next_list))))
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100214#else
Olivier Houchard12568362018-01-31 18:07:29 +0100215 unlikely(!__ha_cas_dw((void *)&fdtab[fd].cache.next, (void *)&cur_list, (void *)&next_list)))
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100216#endif
217 ;
218 next = cur_list.next;
219 prev = cur_list.prev;
220
221#else
222lock_self_next:
Olivier Houchard12568362018-01-31 18:07:29 +0100223 next = fdtab[fd].cache.next;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100224 if (next == -2)
225 goto lock_self_next;
226 if (next <= -3)
227 goto done;
Olivier Houchard12568362018-01-31 18:07:29 +0100228 if (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].cache.next, &next, -2)))
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100229 goto lock_self_next;
230lock_self_prev:
Olivier Houchard12568362018-01-31 18:07:29 +0100231 prev = fdtab[fd].cache.prev;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100232 if (prev == -2)
233 goto lock_self_prev;
Olivier Houchard12568362018-01-31 18:07:29 +0100234 if (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].cache.prev, &prev, -2)))
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100235 goto lock_self_prev;
236#endif
237 __ha_barrier_store();
238
239 /* Now, lock the entries of our neighbours */
240 if (likely(prev != -1)) {
241redo_prev:
242 old = fd;
243
Olivier Houchard12568362018-01-31 18:07:29 +0100244 if (unlikely(!HA_ATOMIC_CAS(&fdtab[prev].cache.next, &old, new))) {
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100245 if (unlikely(old == -2)) {
246 /* Neighbour already locked, give up and
247 * retry again once he's done
248 */
Olivier Houchard12568362018-01-31 18:07:29 +0100249 fdtab[fd].cache.prev = prev;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100250 __ha_barrier_store();
Olivier Houchard12568362018-01-31 18:07:29 +0100251 fdtab[fd].cache.next = next;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100252 __ha_barrier_store();
253 goto lock_self;
254 }
255 goto redo_prev;
256 }
257 }
258 if (likely(next != -1)) {
259redo_next:
260 old = fd;
Olivier Houchard12568362018-01-31 18:07:29 +0100261 if (unlikely(!HA_ATOMIC_CAS(&fdtab[next].cache.prev, &old, new))) {
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100262 if (unlikely(old == -2)) {
263 /* Neighbour already locked, give up and
264 * retry again once he's done
265 */
266 if (prev != -1) {
Olivier Houchard12568362018-01-31 18:07:29 +0100267 fdtab[prev].cache.next = fd;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100268 __ha_barrier_store();
269 }
Olivier Houchard12568362018-01-31 18:07:29 +0100270 fdtab[fd].cache.prev = prev;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100271 __ha_barrier_store();
Olivier Houchard12568362018-01-31 18:07:29 +0100272 fdtab[fd].cache.next = next;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100273 __ha_barrier_store();
274 goto lock_self;
275 }
276 goto redo_next;
277 }
278 }
279 if (list->first == fd)
280 list->first = next;
281 __ha_barrier_store();
282 last = list->last;
283 while (unlikely(last == fd && (!HA_ATOMIC_CAS(&list->last, &last, prev))))
284 __ha_compiler_barrier();
285 /* Make sure we let other threads know we're no longer in cache,
286 * before releasing our neighbours.
287 */
288 __ha_barrier_store();
289 if (likely(prev != -1))
Olivier Houchard12568362018-01-31 18:07:29 +0100290 fdtab[prev].cache.next = next;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100291 __ha_barrier_store();
292 if (likely(next != -1))
Olivier Houchard12568362018-01-31 18:07:29 +0100293 fdtab[next].cache.prev = prev;
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100294 __ha_barrier_store();
295 /* Ok, now we're out of the fd cache */
Olivier Houchard12568362018-01-31 18:07:29 +0100296 fdtab[fd].cache.next = -(next + 4);
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100297 __ha_barrier_store();
298done:
299 return;
Willy Tarreau7be79a42012-11-11 15:02:54 +0100300}
301
Willy Tarreau899d9572014-01-25 19:20:35 +0100302/* Removes entry used by fd <fd> from the FD cache and replaces it with the
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100303 * last one.
Willy Tarreau7be79a42012-11-11 15:02:54 +0100304 * If the fd has no entry assigned, return immediately.
305 */
Willy Tarreau899d9572014-01-25 19:20:35 +0100306static inline void fd_release_cache_entry(int fd)
Willy Tarreau7be79a42012-11-11 15:02:54 +0100307{
Olivier Houchard4815c8c2018-01-24 18:17:56 +0100308 if (!(fdtab[fd].thread_mask & (fdtab[fd].thread_mask - 1)))
309 fd_rm_from_fd_list(&fd_cache_local[my_ffsl(fdtab[fd].thread_mask) - 1], fd);
310 else
311 fd_rm_from_fd_list(&fd_cache, fd);
Willy Tarreau7be79a42012-11-11 15:02:54 +0100312}
Willy Tarreau49b046d2012-08-09 12:11:58 +0200313
Willy Tarreau25002d22014-01-25 10:32:56 +0100314/* Computes the new polled status based on the active and ready statuses, for
315 * each direction. This is meant to be used by pollers while processing updates.
316 */
317static inline int fd_compute_new_polled_status(int state)
318{
319 if (state & FD_EV_ACTIVE_R) {
320 if (!(state & FD_EV_READY_R))
321 state |= FD_EV_POLLED_R;
322 }
323 else
324 state &= ~FD_EV_POLLED_R;
325
326 if (state & FD_EV_ACTIVE_W) {
327 if (!(state & FD_EV_READY_W))
328 state |= FD_EV_POLLED_W;
329 }
330 else
331 state &= ~FD_EV_POLLED_W;
332
333 return state;
334}
335
Willy Tarreau5be2f352014-11-19 19:43:05 +0100336/* This function automatically enables/disables caching for an entry depending
337 * on its state, and also possibly creates an update entry so that the poller
338 * does its job as well. It is only called on state changes.
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100339 */
Willy Tarreau5be2f352014-11-19 19:43:05 +0100340static inline void fd_update_cache(int fd)
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100341{
Willy Tarreau5be2f352014-11-19 19:43:05 +0100342 /* 3 states for each direction require a polling update */
343 if ((fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_ACTIVE_R)) == FD_EV_POLLED_R ||
344 (fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_READY_R | FD_EV_ACTIVE_R)) == FD_EV_ACTIVE_R ||
345 (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_ACTIVE_W)) == FD_EV_POLLED_W ||
346 (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_READY_W | FD_EV_ACTIVE_W)) == FD_EV_ACTIVE_W)
347 updt_fd_polling(fd);
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100348
Willy Tarreau5be2f352014-11-19 19:43:05 +0100349 /* only READY and ACTIVE states (the two with both flags set) require a cache entry */
350 if (((fdtab[fd].state & (FD_EV_READY_R | FD_EV_ACTIVE_R)) == (FD_EV_READY_R | FD_EV_ACTIVE_R)) ||
351 ((fdtab[fd].state & (FD_EV_READY_W | FD_EV_ACTIVE_W)) == (FD_EV_READY_W | FD_EV_ACTIVE_W))) {
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100352 fd_alloc_cache_entry(fd);
353 }
354 else {
355 fd_release_cache_entry(fd);
356 }
357}
358
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100359/*
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100360 * returns the FD's recv state (FD_EV_*)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100361 */
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100362static inline int fd_recv_state(const int fd)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100363{
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100364 return ((unsigned)fdtab[fd].state >> (4 * DIR_RD)) & FD_EV_STATUS;
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100365}
366
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100367/*
368 * returns true if the FD is active for recv
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100369 */
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100370static inline int fd_recv_active(const int fd)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100371{
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100372 return (unsigned)fdtab[fd].state & FD_EV_ACTIVE_R;
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100373}
374
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100375/*
376 * returns true if the FD is ready for recv
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100377 */
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100378static inline int fd_recv_ready(const int fd)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100379{
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100380 return (unsigned)fdtab[fd].state & FD_EV_READY_R;
381}
382
383/*
384 * returns true if the FD is polled for recv
385 */
386static inline int fd_recv_polled(const int fd)
387{
388 return (unsigned)fdtab[fd].state & FD_EV_POLLED_R;
389}
390
391/*
392 * returns the FD's send state (FD_EV_*)
393 */
394static inline int fd_send_state(const int fd)
395{
396 return ((unsigned)fdtab[fd].state >> (4 * DIR_WR)) & FD_EV_STATUS;
397}
398
399/*
400 * returns true if the FD is active for send
401 */
402static inline int fd_send_active(const int fd)
403{
404 return (unsigned)fdtab[fd].state & FD_EV_ACTIVE_W;
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100405}
406
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100407/*
408 * returns true if the FD is ready for send
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100409 */
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100410static inline int fd_send_ready(const int fd)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100411{
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100412 return (unsigned)fdtab[fd].state & FD_EV_READY_W;
413}
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100414
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100415/*
416 * returns true if the FD is polled for send
417 */
418static inline int fd_send_polled(const int fd)
419{
420 return (unsigned)fdtab[fd].state & FD_EV_POLLED_W;
421}
422
Christopher Faulet8db2fdf2017-08-30 09:59:38 +0200423/*
424 * returns true if the FD is active for recv or send
425 */
426static inline int fd_active(const int fd)
427{
428 return (unsigned)fdtab[fd].state & FD_EV_ACTIVE_RW;
429}
430
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100431/* Disable processing recv events on fd <fd> */
432static inline void fd_stop_recv(int fd)
433{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100434 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Fauletd82b1802017-08-30 10:07:47 +0200435 if (fd_recv_active(fd)) {
436 fdtab[fd].state &= ~FD_EV_ACTIVE_R;
437 fd_update_cache(fd); /* need an update entry to change the state */
438 }
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100439 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100440}
441
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100442/* Disable processing send events on fd <fd> */
443static inline void fd_stop_send(int fd)
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100444{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100445 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Fauletd82b1802017-08-30 10:07:47 +0200446 if (fd_send_active(fd)) {
447 fdtab[fd].state &= ~FD_EV_ACTIVE_W;
448 fd_update_cache(fd); /* need an update entry to change the state */
449 }
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100450 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreau6ea20b12012-11-11 16:05:19 +0100451}
452
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100453/* Disable processing of events on fd <fd> for both directions. */
454static inline void fd_stop_both(int fd)
Willy Tarreau49b046d2012-08-09 12:11:58 +0200455{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100456 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Fauletd82b1802017-08-30 10:07:47 +0200457 if (fd_active(fd)) {
458 fdtab[fd].state &= ~FD_EV_ACTIVE_RW;
459 fd_update_cache(fd); /* need an update entry to change the state */
460 }
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100461 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreau49b046d2012-08-09 12:11:58 +0200462}
463
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100464/* Report that FD <fd> cannot receive anymore without polling (EAGAIN detected). */
465static inline void fd_cant_recv(const int fd)
Willy Tarreau49b046d2012-08-09 12:11:58 +0200466{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100467 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Fauletd82b1802017-08-30 10:07:47 +0200468 if (fd_recv_ready(fd)) {
469 fdtab[fd].state &= ~FD_EV_READY_R;
470 fd_update_cache(fd); /* need an update entry to change the state */
471 }
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100472 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreau49b046d2012-08-09 12:11:58 +0200473}
474
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100475/* Report that FD <fd> can receive anymore without polling. */
476static inline void fd_may_recv(const int fd)
Willy Tarreaubabd05a2012-08-09 12:14:03 +0200477{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100478 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Fauletd82b1802017-08-30 10:07:47 +0200479 if (!fd_recv_ready(fd)) {
480 fdtab[fd].state |= FD_EV_READY_R;
481 fd_update_cache(fd); /* need an update entry to change the state */
482 }
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100483 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreaubabd05a2012-08-09 12:14:03 +0200484}
485
Willy Tarreau6c11bd22014-01-24 00:54:27 +0100486/* Disable readiness when polled. This is useful to interrupt reading when it
487 * is suspected that the end of data might have been reached (eg: short read).
488 * This can only be done using level-triggered pollers, so if any edge-triggered
489 * is ever implemented, a test will have to be added here.
490 */
491static inline void fd_done_recv(const int fd)
492{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100493 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Fauletd82b1802017-08-30 10:07:47 +0200494 if (fd_recv_polled(fd) && fd_recv_ready(fd)) {
495 fdtab[fd].state &= ~FD_EV_READY_R;
496 fd_update_cache(fd); /* need an update entry to change the state */
497 }
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100498 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreau6c11bd22014-01-24 00:54:27 +0100499}
500
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100501/* Report that FD <fd> cannot send anymore without polling (EAGAIN detected). */
502static inline void fd_cant_send(const int fd)
Willy Tarreau49b046d2012-08-09 12:11:58 +0200503{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100504 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Fauletd82b1802017-08-30 10:07:47 +0200505 if (fd_send_ready(fd)) {
506 fdtab[fd].state &= ~FD_EV_READY_W;
507 fd_update_cache(fd); /* need an update entry to change the state */
508 }
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100509 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreau49b046d2012-08-09 12:11:58 +0200510}
511
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100512/* Report that FD <fd> can send anymore without polling (EAGAIN detected). */
513static inline void fd_may_send(const int fd)
Willy Tarreau49b046d2012-08-09 12:11:58 +0200514{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100515 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Fauletd82b1802017-08-30 10:07:47 +0200516 if (!fd_send_ready(fd)) {
517 fdtab[fd].state |= FD_EV_READY_W;
518 fd_update_cache(fd); /* need an update entry to change the state */
519 }
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100520 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreau49b046d2012-08-09 12:11:58 +0200521}
Willy Tarreau2a429502006-10-15 14:52:29 +0200522
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100523/* Prepare FD <fd> to try to receive */
524static inline void fd_want_recv(int fd)
Willy Tarreaubabd05a2012-08-09 12:14:03 +0200525{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100526 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Fauletd82b1802017-08-30 10:07:47 +0200527 if (!fd_recv_active(fd)) {
528 fdtab[fd].state |= FD_EV_ACTIVE_R;
529 fd_update_cache(fd); /* need an update entry to change the state */
530 }
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100531 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreaubabd05a2012-08-09 12:14:03 +0200532}
533
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100534/* Prepare FD <fd> to try to send */
535static inline void fd_want_send(int fd)
Willy Tarreau49b046d2012-08-09 12:11:58 +0200536{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100537 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Fauletd82b1802017-08-30 10:07:47 +0200538 if (!fd_send_active(fd)) {
539 fdtab[fd].state |= FD_EV_ACTIVE_W;
540 fd_update_cache(fd); /* need an update entry to change the state */
541 }
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100542 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreau49b046d2012-08-09 12:11:58 +0200543}
Willy Tarreau2a429502006-10-15 14:52:29 +0200544
Christopher Faulet21e92672017-08-30 10:30:04 +0200545/* Update events seen for FD <fd> and its state if needed. This should be called
546 * by the poller to set FD_POLL_* flags. */
547static inline void fd_update_events(int fd, int evts)
548{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100549 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Faulet21e92672017-08-30 10:30:04 +0200550 fdtab[fd].ev &= FD_POLL_STICKY;
551 fdtab[fd].ev |= evts;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100552 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Christopher Faulet21e92672017-08-30 10:30:04 +0200553
554 if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
555 fd_may_recv(fd);
556
557 if (fdtab[fd].ev & (FD_POLL_OUT | FD_POLL_ERR))
558 fd_may_send(fd);
559}
560
Willy Tarreaud6f087e2008-01-18 17:20:13 +0100561/* Prepares <fd> for being polled */
Willy Tarreaua9786b62018-01-25 07:22:13 +0100562static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), unsigned long thread_mask)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200563{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100564 HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreaua9786b62018-01-25 07:22:13 +0100565 fdtab[fd].owner = owner;
566 fdtab[fd].iocb = iocb;
Willy Tarreaud6f087e2008-01-18 17:20:13 +0100567 fdtab[fd].ev = 0;
Willy Tarreauebc78d72018-01-20 23:53:50 +0100568 fdtab[fd].update_mask &= ~tid_bit;
Willy Tarreauad38ace2013-12-15 14:19:38 +0100569 fdtab[fd].linger_risk = 0;
Conrad Hoffmann041751c2014-05-20 14:28:24 +0200570 fdtab[fd].cloned = 0;
Willy Tarreauf65610a2017-10-31 16:06:06 +0100571 fdtab[fd].thread_mask = thread_mask;
Willy Tarreauc9c83782018-01-17 18:44:46 +0100572 /* note: do not reset polled_mask here as it indicates which poller
573 * still knows this FD from a possible previous round.
574 */
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100575 HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200576}
577
Willy Tarreau322e6c72018-01-25 16:37:04 +0100578/* These are replacements for FD_SET, FD_CLR, FD_ISSET, working on uints */
579static inline void hap_fd_set(int fd, unsigned int *evts)
580{
Willy Tarreau82b37d72018-01-25 16:59:09 +0100581 HA_ATOMIC_OR(&evts[fd / (8*sizeof(*evts))], 1U << (fd & (8*sizeof(*evts) - 1)));
Willy Tarreau322e6c72018-01-25 16:37:04 +0100582}
583
584static inline void hap_fd_clr(int fd, unsigned int *evts)
585{
Willy Tarreau82b37d72018-01-25 16:59:09 +0100586 HA_ATOMIC_AND(&evts[fd / (8*sizeof(*evts))], ~(1U << (fd & (8*sizeof(*evts) - 1))));
Willy Tarreau322e6c72018-01-25 16:37:04 +0100587}
588
589static inline unsigned int hap_fd_isset(int fd, unsigned int *evts)
590{
591 return evts[fd / (8*sizeof(*evts))] & (1U << (fd & (8*sizeof(*evts) - 1)));
592}
593
Willy Tarreaubaaee002006-06-26 02:48:02 +0200594
595#endif /* _PROTO_FD_H */
596
597/*
598 * Local variables:
599 * c-indent-level: 8
600 * c-basic-offset: 8
601 * End:
602 */