blob: a362b2894988d5a95fc380f242a0ec1d8be39a8c [file] [log] [blame]
Willy Tarreau4f60f162007-04-08 16:39:58 +02001/*
2 * FD polling functions for generic select()
3 *
Willy Tarreauf817e9f2014-01-10 16:58:45 +01004 * Copyright 2000-2014 Willy Tarreau <w@1wt.eu>
Willy Tarreau4f60f162007-04-08 16:39:58 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <unistd.h>
14#include <sys/time.h>
15#include <sys/types.h>
16
Willy Tarreaub2551052020-06-09 09:07:15 +020017#include <haproxy/activity.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020018#include <haproxy/api.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020019#include <haproxy/fd.h>
20#include <haproxy/global.h>
Willy Tarreauc2f7c582020-06-02 18:15:32 +020021#include <haproxy/ticks.h>
Willy Tarreau92b4f132020-06-01 11:05:15 +020022#include <haproxy/time.h>
Willy Tarreau4f60f162007-04-08 16:39:58 +020023
Willy Tarreau4f60f162007-04-08 16:39:58 +020024
Christopher Fauletd4604ad2017-05-29 10:40:41 +020025/* private data */
Willy Tarreau173d9952018-01-26 21:48:23 +010026static int maxfd; /* # of the highest fd + 1 */
Willy Tarreaud51a5072018-01-25 16:48:46 +010027static unsigned int *fd_evts[2];
Christopher Fauletd4604ad2017-05-29 10:40:41 +020028static THREAD_LOCAL fd_set *tmp_evts[2];
Willy Tarreau4f60f162007-04-08 16:39:58 +020029
Willy Tarreau4d31fb22012-11-11 16:53:50 +010030/* Immediately remove the entry upon close() */
Willy Tarreau03e78532020-02-25 07:38:05 +010031static void __fd_clo(int fd)
Willy Tarreau4f60f162007-04-08 16:39:58 +020032{
Willy Tarreaud51a5072018-01-25 16:48:46 +010033 hap_fd_clr(fd, fd_evts[DIR_RD]);
34 hap_fd_clr(fd, fd_evts[DIR_WR]);
Willy Tarreau4f60f162007-04-08 16:39:58 +020035}
36
Olivier Houchard6b96f722018-04-25 16:58:25 +020037static void _update_fd(int fd, int *max_add_fd)
38{
39 int en;
40
41 en = fdtab[fd].state;
42
43 /* we have a single state for all threads, which is why we
44 * don't check the tid_bit. First thread to see the update
45 * takes it for every other one.
46 */
Willy Tarreau5bee3e22019-09-04 09:52:57 +020047 if (!(en & FD_EV_ACTIVE_RW)) {
Olivier Houchard53055052019-07-25 14:00:18 +000048 if (!(polled_mask[fd].poll_recv | polled_mask[fd].poll_send)) {
Olivier Houchard6b96f722018-04-25 16:58:25 +020049 /* fd was not watched, it's still not */
50 return;
51 }
52 /* fd totally removed from poll list */
53 hap_fd_clr(fd, fd_evts[DIR_RD]);
54 hap_fd_clr(fd, fd_evts[DIR_WR]);
Olivier Houchard53055052019-07-25 14:00:18 +000055 _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, 0);
Olivier Houcharda3a8ea22019-08-05 23:54:37 +020056 _HA_ATOMIC_AND(&polled_mask[fd].poll_send, 0);
Olivier Houchard6b96f722018-04-25 16:58:25 +020057 }
58 else {
59 /* OK fd has to be monitored, it was either added or changed */
Willy Tarreau5bee3e22019-09-04 09:52:57 +020060 if (!(en & FD_EV_ACTIVE_R)) {
Olivier Houchard6b96f722018-04-25 16:58:25 +020061 hap_fd_clr(fd, fd_evts[DIR_RD]);
Olivier Houchard53055052019-07-25 14:00:18 +000062 if (polled_mask[fd].poll_recv & tid_bit)
63 _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~tid_bit);
64 } else {
Olivier Houchard6b96f722018-04-25 16:58:25 +020065 hap_fd_set(fd, fd_evts[DIR_RD]);
Olivier Houchard53055052019-07-25 14:00:18 +000066 if (!(polled_mask[fd].poll_recv & tid_bit))
67 _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, tid_bit);
68 }
Olivier Houchard6b96f722018-04-25 16:58:25 +020069
Willy Tarreau5bee3e22019-09-04 09:52:57 +020070 if (!(en & FD_EV_ACTIVE_W)) {
Olivier Houchard6b96f722018-04-25 16:58:25 +020071 hap_fd_clr(fd, fd_evts[DIR_WR]);
Olivier Houchard53055052019-07-25 14:00:18 +000072 if (polled_mask[fd].poll_send & tid_bit)
73 _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~tid_bit);
74 } else {
Olivier Houchard6b96f722018-04-25 16:58:25 +020075 hap_fd_set(fd, fd_evts[DIR_WR]);
Olivier Houchard53055052019-07-25 14:00:18 +000076 if (!(polled_mask[fd].poll_send & tid_bit))
77 _HA_ATOMIC_OR(&polled_mask[fd].poll_send, tid_bit);
78 }
Olivier Houchard6b96f722018-04-25 16:58:25 +020079
Olivier Houchard6b96f722018-04-25 16:58:25 +020080 if (fd > *max_add_fd)
81 *max_add_fd = fd;
82 }
83}
84
Willy Tarreau4f60f162007-04-08 16:39:58 +020085/*
86 * Select() poller
87 */
Willy Tarreau03e78532020-02-25 07:38:05 +010088static void _do_poll(struct poller *p, int exp, int wake)
Willy Tarreau4f60f162007-04-08 16:39:58 +020089{
90 int status;
91 int fd, i;
92 struct timeval delta;
Willy Tarreaub0b37bc2008-06-23 14:00:57 +020093 int delta_ms;
Willy Tarreau4f60f162007-04-08 16:39:58 +020094 int fds;
Olivier Houchard6b96f722018-04-25 16:58:25 +020095 int updt_idx;
Willy Tarreau4f60f162007-04-08 16:39:58 +020096 char count;
Christopher Fauletd4604ad2017-05-29 10:40:41 +020097 int readnotnull, writenotnull;
Willy Tarreau173d9952018-01-26 21:48:23 +010098 int old_maxfd, new_maxfd, max_add_fd;
Olivier Houchard6b96f722018-04-25 16:58:25 +020099 int old_fd;
Willy Tarreau173d9952018-01-26 21:48:23 +0100100
101 max_add_fd = -1;
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200102
Willy Tarreau4d31fb22012-11-11 16:53:50 +0100103 /* first, scan the update list to find changes */
104 for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
105 fd = fd_updt[updt_idx];
Willy Tarreau4d31fb22012-11-11 16:53:50 +0100106
Olivier Houchardcb6c9272019-03-08 18:49:54 +0100107 _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100108 if (!fdtab[fd].owner) {
Willy Tarreaue4063862020-06-17 20:35:33 +0200109 activity[tid].poll_drop_fd++;
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100110 continue;
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100111 }
Olivier Houchard6b96f722018-04-25 16:58:25 +0200112 _update_fd(fd, &max_add_fd);
113 }
114 /* Now scan the global update list */
115 for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
116 if (fd == -2) {
117 fd = old_fd;
118 continue;
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100119 }
Olivier Houchard6b96f722018-04-25 16:58:25 +0200120 else if (fd <= -3)
121 fd = -fd -4;
122 if (fd == -1)
123 break;
124 if (fdtab[fd].update_mask & tid_bit) {
125 /* Cheat a bit, as the state is global to all pollers
126 * we don't need every thread ot take care of the
127 * update.
128 */
Olivier Houchardcb6c9272019-03-08 18:49:54 +0100129 _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~all_threads_mask);
Olivier Houchard6b96f722018-04-25 16:58:25 +0200130 done_update_polling(fd);
131 } else
132 continue;
133 if (!fdtab[fd].owner)
134 continue;
135 _update_fd(fd, &max_add_fd);
Willy Tarreau4d31fb22012-11-11 16:53:50 +0100136 }
Willy Tarreau173d9952018-01-26 21:48:23 +0100137
Olivier Houchard6b96f722018-04-25 16:58:25 +0200138
Willy Tarreau173d9952018-01-26 21:48:23 +0100139 /* maybe we added at least one fd larger than maxfd */
140 for (old_maxfd = maxfd; old_maxfd <= max_add_fd; ) {
Olivier Houchardcb6c9272019-03-08 18:49:54 +0100141 if (_HA_ATOMIC_CAS(&maxfd, &old_maxfd, max_add_fd + 1))
Willy Tarreau173d9952018-01-26 21:48:23 +0100142 break;
143 }
144
145 /* maxfd doesn't need to be precise but it needs to cover *all* active
146 * FDs. Thus we only shrink it if we have such an opportunity. The algo
147 * is simple : look for the previous used place, try to update maxfd to
148 * point to it, abort if maxfd changed in the mean time.
149 */
150 old_maxfd = maxfd;
151 do {
152 new_maxfd = old_maxfd;
153 while (new_maxfd - 1 >= 0 && !fdtab[new_maxfd - 1].owner)
154 new_maxfd--;
155 if (new_maxfd >= old_maxfd)
156 break;
Olivier Houchardcb6c9272019-03-08 18:49:54 +0100157 } while (!_HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd));
Willy Tarreau173d9952018-01-26 21:48:23 +0100158
Willy Tarreau60b639c2018-08-02 10:16:17 +0200159 thread_harmless_now();
160
Willy Tarreau4d31fb22012-11-11 16:53:50 +0100161 fd_nbupdt = 0;
162
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200163 /* let's restore fdset state */
164 readnotnull = 0; writenotnull = 0;
165 for (i = 0; i < (maxfd + FD_SETSIZE - 1)/(8*sizeof(int)); i++) {
166 readnotnull |= (*(((int*)tmp_evts[DIR_RD])+i) = *(((int*)fd_evts[DIR_RD])+i)) != 0;
167 writenotnull |= (*(((int*)tmp_evts[DIR_WR])+i) = *(((int*)fd_evts[DIR_WR])+i)) != 0;
168 }
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200169
Willy Tarreauf37ba942018-10-17 11:25:54 +0200170 /* now let's wait for events */
Willy Tarreau2ae84e42019-05-28 16:44:05 +0200171 delta_ms = wake ? 0 : compute_poll_timeout(exp);
Willy Tarreauf37ba942018-10-17 11:25:54 +0200172 delta.tv_sec = (delta_ms / 1000);
173 delta.tv_usec = (delta_ms % 1000) * 1000;
Willy Tarreau7e9c4ae2018-10-17 14:31:19 +0200174 tv_entering_poll();
Willy Tarreau609aad92018-11-22 08:31:09 +0100175 activity_count_runtime();
Willy Tarreau4f60f162007-04-08 16:39:58 +0200176 status = select(maxfd,
Willy Tarreau28d86862007-04-08 17:42:27 +0200177 readnotnull ? tmp_evts[DIR_RD] : NULL,
178 writenotnull ? tmp_evts[DIR_WR] : NULL,
Willy Tarreau4f60f162007-04-08 16:39:58 +0200179 NULL,
Willy Tarreaub0b37bc2008-06-23 14:00:57 +0200180 &delta);
Willy Tarreau48f8bc12018-11-22 18:57:37 +0100181 tv_update_date(delta_ms, status);
Willy Tarreau7e9c4ae2018-10-17 14:31:19 +0200182 tv_leaving_poll(delta_ms, status);
Willy Tarreau4f60f162007-04-08 16:39:58 +0200183
Willy Tarreau60b639c2018-08-02 10:16:17 +0200184 thread_harmless_end();
Olivier Houchard305d5ab2019-07-24 18:07:06 +0200185 if (sleeping_thread_mask & tid_bit)
186 _HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
Willy Tarreau60b639c2018-08-02 10:16:17 +0200187
Willy Tarreau4f60f162007-04-08 16:39:58 +0200188 if (status <= 0)
189 return;
190
Willy Tarreaue5451532020-06-17 20:25:18 +0200191 activity[tid].poll_io++;
192
Willy Tarreau177e2b02008-07-15 00:36:31 +0200193 for (fds = 0; (fds * BITS_PER_INT) < maxfd; fds++) {
Willy Tarreau28d86862007-04-08 17:42:27 +0200194 if ((((int *)(tmp_evts[DIR_RD]))[fds] | ((int *)(tmp_evts[DIR_WR]))[fds]) == 0)
Willy Tarreau4f60f162007-04-08 16:39:58 +0200195 continue;
196
Willy Tarreau177e2b02008-07-15 00:36:31 +0200197 for (count = BITS_PER_INT, fd = fds * BITS_PER_INT; count && fd < maxfd; count--, fd++) {
Christopher Fauletab62f512017-08-30 10:34:36 +0200198 unsigned int n = 0;
199
Willy Tarreaudaf9e862021-07-30 13:55:36 +0200200 if (FD_ISSET(fd, tmp_evts[DIR_RD]))
201 n |= FD_EV_READY_R;
202
203 if (FD_ISSET(fd, tmp_evts[DIR_WR]))
204 n |= FD_EV_READY_W;
205
206 if (!n)
207 continue;
208
Willy Tarreau38e8a1c2020-06-23 10:04:54 +0200209#ifdef DEBUG_FD
210 _HA_ATOMIC_ADD(&fdtab[fd].event_count, 1);
211#endif
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100212 if (!fdtab[fd].owner) {
Willy Tarreaue4063862020-06-17 20:35:33 +0200213 activity[tid].poll_dead_fd++;
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100214 continue;
215 }
216
217 if (!(fdtab[fd].thread_mask & tid_bit)) {
Willy Tarreaue4063862020-06-17 20:35:33 +0200218 activity[tid].poll_skip_fd++;
Willy Tarreau076be252012-07-06 16:02:29 +0200219 continue;
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100220 }
Willy Tarreau076be252012-07-06 16:02:29 +0200221
Christopher Fauletab62f512017-08-30 10:34:36 +0200222 fd_update_events(fd, n);
Willy Tarreau4f60f162007-04-08 16:39:58 +0200223 }
224 }
225}
226
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200227static int init_select_per_thread()
228{
229 int fd_set_bytes;
230
231 fd_set_bytes = sizeof(fd_set) * (global.maxsock + FD_SETSIZE - 1) / FD_SETSIZE;
William Dauchy32fba0a2020-05-11 15:20:05 +0200232 tmp_evts[DIR_RD] = (fd_set *)calloc(1, fd_set_bytes);
233 if (tmp_evts[DIR_RD] == NULL)
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200234 goto fail;
William Dauchy32fba0a2020-05-11 15:20:05 +0200235 tmp_evts[DIR_WR] = (fd_set *)calloc(1, fd_set_bytes);
236 if (tmp_evts[DIR_WR] == NULL)
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200237 goto fail;
238 return 1;
239 fail:
240 free(tmp_evts[DIR_RD]);
241 free(tmp_evts[DIR_WR]);
242 return 0;
243}
244
245static void deinit_select_per_thread()
246{
William Dauchy32fba0a2020-05-11 15:20:05 +0200247 free(tmp_evts[DIR_WR]);
248 tmp_evts[DIR_WR] = NULL;
249 free(tmp_evts[DIR_RD]);
250 tmp_evts[DIR_RD] = NULL;
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200251}
252
Willy Tarreau4f60f162007-04-08 16:39:58 +0200253/*
Willy Tarreaue54e9172007-04-09 09:23:31 +0200254 * Initialization of the select() poller.
255 * Returns 0 in case of failure, non-zero in case of success. If it fails, it
256 * disables the poller by setting its pref to 0.
257 */
Willy Tarreau03e78532020-02-25 07:38:05 +0100258static int _do_init(struct poller *p)
Willy Tarreaue54e9172007-04-09 09:23:31 +0200259{
Willy Tarreaue54e9172007-04-09 09:23:31 +0200260 int fd_set_bytes;
261
262 p->private = NULL;
Willy Tarreau3fa87b12013-03-31 14:41:15 +0200263
264 if (global.maxsock > FD_SETSIZE)
William Dauchy42a50bd2020-05-11 15:20:03 +0200265 goto fail_srevt;
Willy Tarreau3fa87b12013-03-31 14:41:15 +0200266
Willy Tarreaue54e9172007-04-09 09:23:31 +0200267 fd_set_bytes = sizeof(fd_set) * (global.maxsock + FD_SETSIZE - 1) / FD_SETSIZE;
Willy Tarreaue54e9172007-04-09 09:23:31 +0200268
Willy Tarreaud51a5072018-01-25 16:48:46 +0100269 if ((fd_evts[DIR_RD] = calloc(1, fd_set_bytes)) == NULL)
Willy Tarreaue54e9172007-04-09 09:23:31 +0200270 goto fail_srevt;
Willy Tarreaud51a5072018-01-25 16:48:46 +0100271 if ((fd_evts[DIR_WR] = calloc(1, fd_set_bytes)) == NULL)
Willy Tarreaue54e9172007-04-09 09:23:31 +0200272 goto fail_swevt;
273
Christopher Fauletcd7879a2017-10-27 13:53:47 +0200274 hap_register_per_thread_init(init_select_per_thread);
275 hap_register_per_thread_deinit(deinit_select_per_thread);
276
Willy Tarreaue54e9172007-04-09 09:23:31 +0200277 return 1;
278
279 fail_swevt:
280 free(fd_evts[DIR_RD]);
281 fail_srevt:
Willy Tarreaue54e9172007-04-09 09:23:31 +0200282 p->pref = 0;
283 return 0;
284}
285
286/*
287 * Termination of the select() poller.
288 * Memory is released and the poller is marked as unselectable.
289 */
Willy Tarreau03e78532020-02-25 07:38:05 +0100290static void _do_term(struct poller *p)
Willy Tarreaue54e9172007-04-09 09:23:31 +0200291{
Willy Tarreaua534fea2008-08-03 12:19:50 +0200292 free(fd_evts[DIR_WR]);
293 free(fd_evts[DIR_RD]);
Willy Tarreaue54e9172007-04-09 09:23:31 +0200294 p->private = NULL;
295 p->pref = 0;
296}
297
298/*
Willy Tarreau2ff76222007-04-09 19:29:56 +0200299 * Check that the poller works.
300 * Returns 1 if OK, otherwise 0.
301 */
Willy Tarreau03e78532020-02-25 07:38:05 +0100302static int _do_test(struct poller *p)
Willy Tarreau2ff76222007-04-09 19:29:56 +0200303{
Willy Tarreau3fa87b12013-03-31 14:41:15 +0200304 if (global.maxsock > FD_SETSIZE)
305 return 0;
306
Willy Tarreau2ff76222007-04-09 19:29:56 +0200307 return 1;
308}
309
310/*
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200311 * It is a constructor, which means that it will automatically be called before
312 * main(). This is GCC-specific but it works at least since 2.95.
313 * Special care must be taken so that it does not need any uninitialized data.
Willy Tarreau4f60f162007-04-08 16:39:58 +0200314 */
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200315__attribute__((constructor))
316static void _do_register(void)
Willy Tarreau4f60f162007-04-08 16:39:58 +0200317{
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200318 struct poller *p;
319
320 if (nbpollers >= MAX_POLLERS)
321 return;
322 p = &pollers[nbpollers++];
323
Willy Tarreau4f60f162007-04-08 16:39:58 +0200324 p->name = "select";
325 p->pref = 150;
Willy Tarreau5a767692017-03-13 11:38:28 +0100326 p->flags = 0;
Willy Tarreau4f60f162007-04-08 16:39:58 +0200327 p->private = NULL;
328
Willy Tarreau70c6fd82012-11-11 21:02:34 +0100329 p->clo = __fd_clo;
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200330 p->test = _do_test;
331 p->init = _do_init;
332 p->term = _do_term;
333 p->poll = _do_poll;
Willy Tarreau4f60f162007-04-08 16:39:58 +0200334}
335
336
337/*
338 * Local variables:
339 * c-indent-level: 8
340 * c-basic-offset: 8
341 * End:
342 */