blob: 544b7f21175e9c34d45b84a8765bf28c0988629e [file] [log] [blame]
Willy Tarreau4f60f162007-04-08 16:39:58 +02001/*
2 * FD polling functions for generic select()
3 *
Willy Tarreauf817e9f2014-01-10 16:58:45 +01004 * Copyright 2000-2014 Willy Tarreau <w@1wt.eu>
Willy Tarreau4f60f162007-04-08 16:39:58 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <unistd.h>
14#include <sys/time.h>
15#include <sys/types.h>
16
Willy Tarreaub2551052020-06-09 09:07:15 +020017#include <haproxy/activity.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020018#include <haproxy/api.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020019#include <haproxy/fd.h>
20#include <haproxy/global.h>
Willy Tarreauc2f7c582020-06-02 18:15:32 +020021#include <haproxy/ticks.h>
Willy Tarreau92b4f132020-06-01 11:05:15 +020022#include <haproxy/time.h>
Willy Tarreau4f60f162007-04-08 16:39:58 +020023
Willy Tarreau4f60f162007-04-08 16:39:58 +020024
Christopher Fauletd4604ad2017-05-29 10:40:41 +020025/* private data */
Willy Tarreau173d9952018-01-26 21:48:23 +010026static int maxfd; /* # of the highest fd + 1 */
Willy Tarreaud51a5072018-01-25 16:48:46 +010027static unsigned int *fd_evts[2];
Christopher Fauletd4604ad2017-05-29 10:40:41 +020028static THREAD_LOCAL fd_set *tmp_evts[2];
Willy Tarreau4f60f162007-04-08 16:39:58 +020029
Willy Tarreau4d31fb22012-11-11 16:53:50 +010030/* Immediately remove the entry upon close() */
Willy Tarreau03e78532020-02-25 07:38:05 +010031static void __fd_clo(int fd)
Willy Tarreau4f60f162007-04-08 16:39:58 +020032{
Willy Tarreaud51a5072018-01-25 16:48:46 +010033 hap_fd_clr(fd, fd_evts[DIR_RD]);
34 hap_fd_clr(fd, fd_evts[DIR_WR]);
Willy Tarreau4f60f162007-04-08 16:39:58 +020035}
36
Olivier Houchard6b96f722018-04-25 16:58:25 +020037static void _update_fd(int fd, int *max_add_fd)
38{
39 int en;
40
41 en = fdtab[fd].state;
42
43 /* we have a single state for all threads, which is why we
44 * don't check the tid_bit. First thread to see the update
45 * takes it for every other one.
46 */
Willy Tarreau5bee3e22019-09-04 09:52:57 +020047 if (!(en & FD_EV_ACTIVE_RW)) {
Olivier Houchard53055052019-07-25 14:00:18 +000048 if (!(polled_mask[fd].poll_recv | polled_mask[fd].poll_send)) {
Olivier Houchard6b96f722018-04-25 16:58:25 +020049 /* fd was not watched, it's still not */
50 return;
51 }
52 /* fd totally removed from poll list */
53 hap_fd_clr(fd, fd_evts[DIR_RD]);
54 hap_fd_clr(fd, fd_evts[DIR_WR]);
Olivier Houchard53055052019-07-25 14:00:18 +000055 _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, 0);
Olivier Houcharda3a8ea22019-08-05 23:54:37 +020056 _HA_ATOMIC_AND(&polled_mask[fd].poll_send, 0);
Olivier Houchard6b96f722018-04-25 16:58:25 +020057 }
58 else {
59 /* OK fd has to be monitored, it was either added or changed */
Willy Tarreau5bee3e22019-09-04 09:52:57 +020060 if (!(en & FD_EV_ACTIVE_R)) {
Olivier Houchard6b96f722018-04-25 16:58:25 +020061 hap_fd_clr(fd, fd_evts[DIR_RD]);
Olivier Houchard53055052019-07-25 14:00:18 +000062 if (polled_mask[fd].poll_recv & tid_bit)
63 _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~tid_bit);
64 } else {
Olivier Houchard6b96f722018-04-25 16:58:25 +020065 hap_fd_set(fd, fd_evts[DIR_RD]);
Olivier Houchard53055052019-07-25 14:00:18 +000066 if (!(polled_mask[fd].poll_recv & tid_bit))
67 _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, tid_bit);
68 }
Olivier Houchard6b96f722018-04-25 16:58:25 +020069
Willy Tarreau5bee3e22019-09-04 09:52:57 +020070 if (!(en & FD_EV_ACTIVE_W)) {
Olivier Houchard6b96f722018-04-25 16:58:25 +020071 hap_fd_clr(fd, fd_evts[DIR_WR]);
Olivier Houchard53055052019-07-25 14:00:18 +000072 if (polled_mask[fd].poll_send & tid_bit)
73 _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~tid_bit);
74 } else {
Olivier Houchard6b96f722018-04-25 16:58:25 +020075 hap_fd_set(fd, fd_evts[DIR_WR]);
Olivier Houchard53055052019-07-25 14:00:18 +000076 if (!(polled_mask[fd].poll_send & tid_bit))
77 _HA_ATOMIC_OR(&polled_mask[fd].poll_send, tid_bit);
78 }
Olivier Houchard6b96f722018-04-25 16:58:25 +020079
Olivier Houchard6b96f722018-04-25 16:58:25 +020080 if (fd > *max_add_fd)
81 *max_add_fd = fd;
82 }
83}
84
Willy Tarreau4f60f162007-04-08 16:39:58 +020085/*
86 * Select() poller
87 */
Willy Tarreau03e78532020-02-25 07:38:05 +010088static void _do_poll(struct poller *p, int exp, int wake)
Willy Tarreau4f60f162007-04-08 16:39:58 +020089{
90 int status;
91 int fd, i;
92 struct timeval delta;
Willy Tarreaub0b37bc2008-06-23 14:00:57 +020093 int delta_ms;
Willy Tarreau4f60f162007-04-08 16:39:58 +020094 int fds;
Olivier Houchard6b96f722018-04-25 16:58:25 +020095 int updt_idx;
Willy Tarreau4f60f162007-04-08 16:39:58 +020096 char count;
Christopher Fauletd4604ad2017-05-29 10:40:41 +020097 int readnotnull, writenotnull;
Willy Tarreau173d9952018-01-26 21:48:23 +010098 int old_maxfd, new_maxfd, max_add_fd;
Olivier Houchard6b96f722018-04-25 16:58:25 +020099 int old_fd;
Willy Tarreau173d9952018-01-26 21:48:23 +0100100
101 max_add_fd = -1;
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200102
Willy Tarreau4d31fb22012-11-11 16:53:50 +0100103 /* first, scan the update list to find changes */
104 for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
105 fd = fd_updt[updt_idx];
Willy Tarreau4d31fb22012-11-11 16:53:50 +0100106
Olivier Houchardcb6c9272019-03-08 18:49:54 +0100107 _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100108 if (!fdtab[fd].owner) {
Willy Tarreaue4063862020-06-17 20:35:33 +0200109 activity[tid].poll_drop_fd++;
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100110 continue;
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100111 }
Olivier Houchard6b96f722018-04-25 16:58:25 +0200112 _update_fd(fd, &max_add_fd);
113 }
114 /* Now scan the global update list */
115 for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
116 if (fd == -2) {
117 fd = old_fd;
118 continue;
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100119 }
Olivier Houchard6b96f722018-04-25 16:58:25 +0200120 else if (fd <= -3)
121 fd = -fd -4;
122 if (fd == -1)
123 break;
124 if (fdtab[fd].update_mask & tid_bit) {
125 /* Cheat a bit, as the state is global to all pollers
126 * we don't need every thread ot take care of the
127 * update.
128 */
Olivier Houchardcb6c9272019-03-08 18:49:54 +0100129 _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~all_threads_mask);
Olivier Houchard6b96f722018-04-25 16:58:25 +0200130 done_update_polling(fd);
131 } else
132 continue;
133 if (!fdtab[fd].owner)
134 continue;
135 _update_fd(fd, &max_add_fd);
Willy Tarreau4d31fb22012-11-11 16:53:50 +0100136 }
Willy Tarreau173d9952018-01-26 21:48:23 +0100137
Olivier Houchard6b96f722018-04-25 16:58:25 +0200138
Willy Tarreau173d9952018-01-26 21:48:23 +0100139 /* maybe we added at least one fd larger than maxfd */
140 for (old_maxfd = maxfd; old_maxfd <= max_add_fd; ) {
Olivier Houchardcb6c9272019-03-08 18:49:54 +0100141 if (_HA_ATOMIC_CAS(&maxfd, &old_maxfd, max_add_fd + 1))
Willy Tarreau173d9952018-01-26 21:48:23 +0100142 break;
143 }
144
145 /* maxfd doesn't need to be precise but it needs to cover *all* active
146 * FDs. Thus we only shrink it if we have such an opportunity. The algo
147 * is simple : look for the previous used place, try to update maxfd to
148 * point to it, abort if maxfd changed in the mean time.
149 */
150 old_maxfd = maxfd;
151 do {
152 new_maxfd = old_maxfd;
153 while (new_maxfd - 1 >= 0 && !fdtab[new_maxfd - 1].owner)
154 new_maxfd--;
155 if (new_maxfd >= old_maxfd)
156 break;
Olivier Houchardcb6c9272019-03-08 18:49:54 +0100157 } while (!_HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd));
Willy Tarreau173d9952018-01-26 21:48:23 +0100158
Willy Tarreau60b639c2018-08-02 10:16:17 +0200159 thread_harmless_now();
160
Willy Tarreau4d31fb22012-11-11 16:53:50 +0100161 fd_nbupdt = 0;
162
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200163 /* let's restore fdset state */
164 readnotnull = 0; writenotnull = 0;
165 for (i = 0; i < (maxfd + FD_SETSIZE - 1)/(8*sizeof(int)); i++) {
166 readnotnull |= (*(((int*)tmp_evts[DIR_RD])+i) = *(((int*)fd_evts[DIR_RD])+i)) != 0;
167 writenotnull |= (*(((int*)tmp_evts[DIR_WR])+i) = *(((int*)fd_evts[DIR_WR])+i)) != 0;
168 }
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200169
Willy Tarreauf37ba942018-10-17 11:25:54 +0200170 /* now let's wait for events */
Willy Tarreau2ae84e42019-05-28 16:44:05 +0200171 delta_ms = wake ? 0 : compute_poll_timeout(exp);
Willy Tarreauf37ba942018-10-17 11:25:54 +0200172 delta.tv_sec = (delta_ms / 1000);
173 delta.tv_usec = (delta_ms % 1000) * 1000;
Willy Tarreau7e9c4ae2018-10-17 14:31:19 +0200174 tv_entering_poll();
Willy Tarreau609aad92018-11-22 08:31:09 +0100175 activity_count_runtime();
Willy Tarreau4f60f162007-04-08 16:39:58 +0200176 status = select(maxfd,
Willy Tarreau28d86862007-04-08 17:42:27 +0200177 readnotnull ? tmp_evts[DIR_RD] : NULL,
178 writenotnull ? tmp_evts[DIR_WR] : NULL,
Willy Tarreau4f60f162007-04-08 16:39:58 +0200179 NULL,
Willy Tarreaub0b37bc2008-06-23 14:00:57 +0200180 &delta);
Willy Tarreau48f8bc12018-11-22 18:57:37 +0100181 tv_update_date(delta_ms, status);
Willy Tarreau7e9c4ae2018-10-17 14:31:19 +0200182 tv_leaving_poll(delta_ms, status);
Willy Tarreau4f60f162007-04-08 16:39:58 +0200183
Willy Tarreau60b639c2018-08-02 10:16:17 +0200184 thread_harmless_end();
Olivier Houchard305d5ab2019-07-24 18:07:06 +0200185 if (sleeping_thread_mask & tid_bit)
186 _HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
Willy Tarreau60b639c2018-08-02 10:16:17 +0200187
Willy Tarreau4f60f162007-04-08 16:39:58 +0200188 if (status <= 0)
189 return;
190
Willy Tarreaue5451532020-06-17 20:25:18 +0200191 activity[tid].poll_io++;
192
Willy Tarreau177e2b02008-07-15 00:36:31 +0200193 for (fds = 0; (fds * BITS_PER_INT) < maxfd; fds++) {
Willy Tarreau28d86862007-04-08 17:42:27 +0200194 if ((((int *)(tmp_evts[DIR_RD]))[fds] | ((int *)(tmp_evts[DIR_WR]))[fds]) == 0)
Willy Tarreau4f60f162007-04-08 16:39:58 +0200195 continue;
196
Willy Tarreau177e2b02008-07-15 00:36:31 +0200197 for (count = BITS_PER_INT, fd = fds * BITS_PER_INT; count && fd < maxfd; count--, fd++) {
Christopher Fauletab62f512017-08-30 10:34:36 +0200198 unsigned int n = 0;
199
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100200 if (!fdtab[fd].owner) {
Willy Tarreaue4063862020-06-17 20:35:33 +0200201 activity[tid].poll_dead_fd++;
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100202 continue;
203 }
204
205 if (!(fdtab[fd].thread_mask & tid_bit)) {
Willy Tarreaue4063862020-06-17 20:35:33 +0200206 activity[tid].poll_skip_fd++;
Willy Tarreau076be252012-07-06 16:02:29 +0200207 continue;
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100208 }
Willy Tarreau076be252012-07-06 16:02:29 +0200209
Willy Tarreau076be252012-07-06 16:02:29 +0200210 if (FD_ISSET(fd, tmp_evts[DIR_RD]))
Willy Tarreau6b308982019-09-06 19:05:50 +0200211 n |= FD_EV_READY_R;
Willy Tarreau4f60f162007-04-08 16:39:58 +0200212
Willy Tarreau076be252012-07-06 16:02:29 +0200213 if (FD_ISSET(fd, tmp_evts[DIR_WR]))
Willy Tarreau6b308982019-09-06 19:05:50 +0200214 n |= FD_EV_READY_W;
Willy Tarreau5be2f352014-11-19 19:43:05 +0100215
Christopher Fauletab62f512017-08-30 10:34:36 +0200216 fd_update_events(fd, n);
Willy Tarreau4f60f162007-04-08 16:39:58 +0200217 }
218 }
219}
220
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200221static int init_select_per_thread()
222{
223 int fd_set_bytes;
224
225 fd_set_bytes = sizeof(fd_set) * (global.maxsock + FD_SETSIZE - 1) / FD_SETSIZE;
William Dauchy32fba0a2020-05-11 15:20:05 +0200226 tmp_evts[DIR_RD] = (fd_set *)calloc(1, fd_set_bytes);
227 if (tmp_evts[DIR_RD] == NULL)
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200228 goto fail;
William Dauchy32fba0a2020-05-11 15:20:05 +0200229 tmp_evts[DIR_WR] = (fd_set *)calloc(1, fd_set_bytes);
230 if (tmp_evts[DIR_WR] == NULL)
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200231 goto fail;
232 return 1;
233 fail:
234 free(tmp_evts[DIR_RD]);
235 free(tmp_evts[DIR_WR]);
236 return 0;
237}
238
239static void deinit_select_per_thread()
240{
William Dauchy32fba0a2020-05-11 15:20:05 +0200241 free(tmp_evts[DIR_WR]);
242 tmp_evts[DIR_WR] = NULL;
243 free(tmp_evts[DIR_RD]);
244 tmp_evts[DIR_RD] = NULL;
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200245}
246
Willy Tarreau4f60f162007-04-08 16:39:58 +0200247/*
Willy Tarreaue54e9172007-04-09 09:23:31 +0200248 * Initialization of the select() poller.
249 * Returns 0 in case of failure, non-zero in case of success. If it fails, it
250 * disables the poller by setting its pref to 0.
251 */
Willy Tarreau03e78532020-02-25 07:38:05 +0100252static int _do_init(struct poller *p)
Willy Tarreaue54e9172007-04-09 09:23:31 +0200253{
Willy Tarreaue54e9172007-04-09 09:23:31 +0200254 int fd_set_bytes;
255
256 p->private = NULL;
Willy Tarreau3fa87b12013-03-31 14:41:15 +0200257
258 if (global.maxsock > FD_SETSIZE)
William Dauchy42a50bd2020-05-11 15:20:03 +0200259 goto fail_srevt;
Willy Tarreau3fa87b12013-03-31 14:41:15 +0200260
Willy Tarreaue54e9172007-04-09 09:23:31 +0200261 fd_set_bytes = sizeof(fd_set) * (global.maxsock + FD_SETSIZE - 1) / FD_SETSIZE;
Willy Tarreaue54e9172007-04-09 09:23:31 +0200262
Willy Tarreaud51a5072018-01-25 16:48:46 +0100263 if ((fd_evts[DIR_RD] = calloc(1, fd_set_bytes)) == NULL)
Willy Tarreaue54e9172007-04-09 09:23:31 +0200264 goto fail_srevt;
Willy Tarreaud51a5072018-01-25 16:48:46 +0100265 if ((fd_evts[DIR_WR] = calloc(1, fd_set_bytes)) == NULL)
Willy Tarreaue54e9172007-04-09 09:23:31 +0200266 goto fail_swevt;
267
Christopher Fauletcd7879a2017-10-27 13:53:47 +0200268 hap_register_per_thread_init(init_select_per_thread);
269 hap_register_per_thread_deinit(deinit_select_per_thread);
270
Willy Tarreaue54e9172007-04-09 09:23:31 +0200271 return 1;
272
273 fail_swevt:
274 free(fd_evts[DIR_RD]);
275 fail_srevt:
Willy Tarreaue54e9172007-04-09 09:23:31 +0200276 p->pref = 0;
277 return 0;
278}
279
280/*
281 * Termination of the select() poller.
282 * Memory is released and the poller is marked as unselectable.
283 */
Willy Tarreau03e78532020-02-25 07:38:05 +0100284static void _do_term(struct poller *p)
Willy Tarreaue54e9172007-04-09 09:23:31 +0200285{
Willy Tarreaua534fea2008-08-03 12:19:50 +0200286 free(fd_evts[DIR_WR]);
287 free(fd_evts[DIR_RD]);
Willy Tarreaue54e9172007-04-09 09:23:31 +0200288 p->private = NULL;
289 p->pref = 0;
290}
291
292/*
Willy Tarreau2ff76222007-04-09 19:29:56 +0200293 * Check that the poller works.
294 * Returns 1 if OK, otherwise 0.
295 */
Willy Tarreau03e78532020-02-25 07:38:05 +0100296static int _do_test(struct poller *p)
Willy Tarreau2ff76222007-04-09 19:29:56 +0200297{
Willy Tarreau3fa87b12013-03-31 14:41:15 +0200298 if (global.maxsock > FD_SETSIZE)
299 return 0;
300
Willy Tarreau2ff76222007-04-09 19:29:56 +0200301 return 1;
302}
303
304/*
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200305 * It is a constructor, which means that it will automatically be called before
306 * main(). This is GCC-specific but it works at least since 2.95.
307 * Special care must be taken so that it does not need any uninitialized data.
Willy Tarreau4f60f162007-04-08 16:39:58 +0200308 */
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200309__attribute__((constructor))
310static void _do_register(void)
Willy Tarreau4f60f162007-04-08 16:39:58 +0200311{
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200312 struct poller *p;
313
314 if (nbpollers >= MAX_POLLERS)
315 return;
316 p = &pollers[nbpollers++];
317
Willy Tarreau4f60f162007-04-08 16:39:58 +0200318 p->name = "select";
319 p->pref = 150;
Willy Tarreau5a767692017-03-13 11:38:28 +0100320 p->flags = 0;
Willy Tarreau4f60f162007-04-08 16:39:58 +0200321 p->private = NULL;
322
Willy Tarreau70c6fd82012-11-11 21:02:34 +0100323 p->clo = __fd_clo;
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200324 p->test = _do_test;
325 p->init = _do_init;
326 p->term = _do_term;
327 p->poll = _do_poll;
Willy Tarreau4f60f162007-04-08 16:39:58 +0200328}
329
330
331/*
332 * Local variables:
333 * c-indent-level: 8
334 * c-basic-offset: 8
335 * End:
336 */