blob: ce71484de330298c77c0ce5c5567388932c65200 [file] [log] [blame]
Willy Tarreau1e63130a2007-04-09 12:03:06 +02001/*
2 * FD polling functions for FreeBSD kqueue()
3 *
Willy Tarreauf817e9f2014-01-10 16:58:45 +01004 * Copyright 2000-2014 Willy Tarreau <w@1wt.eu>
Willy Tarreau1e63130a2007-04-09 12:03:06 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
Willy Tarreau1e63130a2007-04-09 12:03:06 +020011 */
12
Willy Tarreau1e63130a2007-04-09 12:03:06 +020013#include <unistd.h>
14#include <sys/time.h>
15#include <sys/types.h>
16
17#include <sys/event.h>
18#include <sys/time.h>
19
Willy Tarreaub2551052020-06-09 09:07:15 +020020#include <haproxy/activity.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020021#include <haproxy/api.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020022#include <haproxy/fd.h>
23#include <haproxy/global.h>
Willy Tarreau3727a8a2020-06-04 17:37:26 +020024#include <haproxy/signal.h>
Willy Tarreauc2f7c582020-06-02 18:15:32 +020025#include <haproxy/ticks.h>
Willy Tarreau92b4f132020-06-01 11:05:15 +020026#include <haproxy/time.h>
Willy Tarreau1e63130a2007-04-09 12:03:06 +020027
Willy Tarreau1e63130a2007-04-09 12:03:06 +020028
29/* private data */
Willy Tarreau8209c9a2021-04-10 17:09:53 +020030static int kqueue_fd[MAX_THREADS] __read_mostly; // per-thread kqueue_fd
Christopher Fauletd4604ad2017-05-29 10:40:41 +020031static THREAD_LOCAL struct kevent *kev = NULL;
Olivier Houchardebaba752018-04-16 13:24:48 +020032static struct kevent *kev_out = NULL; // Trash buffer for kevent() to write the eventlist in
Willy Tarreau1e63130a2007-04-09 12:03:06 +020033
PiBa-NLc55b88e2018-05-10 01:01:28 +020034static int _update_fd(int fd, int start)
Olivier Houchard6b96f722018-04-25 16:58:25 +020035{
36 int en;
PiBa-NLc55b88e2018-05-10 01:01:28 +020037 int changes = start;
Olivier Houchard6b96f722018-04-25 16:58:25 +020038
39 en = fdtab[fd].state;
40
Willy Tarreau5bee3e22019-09-04 09:52:57 +020041 if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_ACTIVE_RW)) {
Olivier Houchard53055052019-07-25 14:00:18 +000042 if (!(polled_mask[fd].poll_recv & tid_bit) &&
43 !(polled_mask[fd].poll_send & tid_bit)) {
Olivier Houchard6b96f722018-04-25 16:58:25 +020044 /* fd was not watched, it's still not */
Olivier Houchard5ab33942018-09-11 14:44:51 +020045 return changes;
Olivier Houchard6b96f722018-04-25 16:58:25 +020046 }
47 /* fd totally removed from poll list */
48 EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
49 EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
Olivier Houchard53055052019-07-25 14:00:18 +000050 if (polled_mask[fd].poll_recv & tid_bit)
51 _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~tid_bit);
52 if (polled_mask[fd].poll_send & tid_bit)
53 _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~tid_bit);
Olivier Houchard6b96f722018-04-25 16:58:25 +020054 }
55 else {
56 /* OK fd has to be monitored, it was either added or changed */
57
Willy Tarreau5bee3e22019-09-04 09:52:57 +020058 if (en & FD_EV_ACTIVE_R) {
Olivier Houchard53055052019-07-25 14:00:18 +000059 if (!(polled_mask[fd].poll_recv & tid_bit)) {
60 EV_SET(&kev[changes++], fd, EVFILT_READ, EV_ADD, 0, 0, NULL);
61 _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, tid_bit);
62 }
63 }
64 else if (polled_mask[fd].poll_recv & tid_bit) {
Olivier Houchard6b96f722018-04-25 16:58:25 +020065 EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
Olivier Houchard53055052019-07-25 14:00:18 +000066 HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~tid_bit);
67 }
Olivier Houchard6b96f722018-04-25 16:58:25 +020068
Willy Tarreau5bee3e22019-09-04 09:52:57 +020069 if (en & FD_EV_ACTIVE_W) {
Olivier Houchard53055052019-07-25 14:00:18 +000070 if (!(polled_mask[fd].poll_send & tid_bit)) {
71 EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL);
72 _HA_ATOMIC_OR(&polled_mask[fd].poll_send, tid_bit);
73 }
74 }
75 else if (polled_mask[fd].poll_send & tid_bit) {
Olivier Houchard6b96f722018-04-25 16:58:25 +020076 EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
Olivier Houchard53055052019-07-25 14:00:18 +000077 _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~tid_bit);
78 }
Olivier Houchard6b96f722018-04-25 16:58:25 +020079
Olivier Houchard6b96f722018-04-25 16:58:25 +020080 }
81 return changes;
82}
83
Willy Tarreau1e63130a2007-04-09 12:03:06 +020084/*
Willy Tarreau4a226272012-11-11 20:49:49 +010085 * kqueue() poller
Willy Tarreau1e63130a2007-04-09 12:03:06 +020086 */
Willy Tarreau03e78532020-02-25 07:38:05 +010087static void _do_poll(struct poller *p, int exp, int wake)
Willy Tarreau1e63130a2007-04-09 12:03:06 +020088{
Willy Tarreau4a226272012-11-11 20:49:49 +010089 int status;
Willy Tarreaubeb859a2018-11-22 18:07:59 +010090 int count, fd, wait_time;
91 struct timespec timeout_ts;
Olivier Houchard6b96f722018-04-25 16:58:25 +020092 int updt_idx;
Willy Tarreau4a226272012-11-11 20:49:49 +010093 int changes = 0;
Olivier Houchard6b96f722018-04-25 16:58:25 +020094 int old_fd;
Willy Tarreau1e63130a2007-04-09 12:03:06 +020095
Willy Tarreaubeb859a2018-11-22 18:07:59 +010096 timeout_ts.tv_sec = 0;
97 timeout_ts.tv_nsec = 0;
Willy Tarreau4a226272012-11-11 20:49:49 +010098 /* first, scan the update list to find changes */
99 for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
100 fd = fd_updt[updt_idx];
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100101
Olivier Houchardcb6c9272019-03-08 18:49:54 +0100102 _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100103 if (!fdtab[fd].owner) {
Willy Tarreaue4063862020-06-17 20:35:33 +0200104 activity[tid].poll_drop_fd++;
Willy Tarreauf817e9f2014-01-10 16:58:45 +0100105 continue;
Willy Tarreaud80cb4e2018-01-20 19:30:13 +0100106 }
PiBa-NLc55b88e2018-05-10 01:01:28 +0200107 changes = _update_fd(fd, changes);
Olivier Houchard6b96f722018-04-25 16:58:25 +0200108 }
109 /* Scan the global update list */
110 for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
111 if (fd == -2) {
112 fd = old_fd;
113 continue;
Willy Tarreau4a226272012-11-11 20:49:49 +0100114 }
Olivier Houchard6b96f722018-04-25 16:58:25 +0200115 else if (fd <= -3)
116 fd = -fd -4;
117 if (fd == -1)
118 break;
119 if (fdtab[fd].update_mask & tid_bit)
120 done_update_polling(fd);
121 else
122 continue;
123 if (!fdtab[fd].owner)
124 continue;
PiBa-NLc55b88e2018-05-10 01:01:28 +0200125 changes = _update_fd(fd, changes);
Willy Tarreau4a226272012-11-11 20:49:49 +0100126 }
Olivier Houchard6b96f722018-04-25 16:58:25 +0200127
Willy Tarreau60b639c2018-08-02 10:16:17 +0200128 thread_harmless_now();
129
Olivier Houchardebaba752018-04-16 13:24:48 +0200130 if (changes) {
131#ifdef EV_RECEIPT
132 kev[0].flags |= EV_RECEIPT;
133#else
134 /* If EV_RECEIPT isn't defined, just add an invalid entry,
135 * so that we get an error and kevent() stops before scanning
136 * the kqueue.
137 */
138 EV_SET(&kev[changes++], -1, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
139#endif
Willy Tarreaubeb859a2018-11-22 18:07:59 +0100140 kevent(kqueue_fd[tid], kev, changes, kev_out, changes, &timeout_ts);
Olivier Houchardebaba752018-04-16 13:24:48 +0200141 }
Willy Tarreau4a226272012-11-11 20:49:49 +0100142 fd_nbupdt = 0;
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200143
Willy Tarreauf37ba942018-10-17 11:25:54 +0200144 /* now let's wait for events */
Willy Tarreau2ae84e42019-05-28 16:44:05 +0200145 wait_time = wake ? 0 : compute_poll_timeout(exp);
Willy Tarreauce036bc2018-01-29 14:58:02 +0100146 fd = global.tune.maxpollevents;
Willy Tarreau7e9c4ae2018-10-17 14:31:19 +0200147 tv_entering_poll();
Willy Tarreau609aad92018-11-22 08:31:09 +0100148 activity_count_runtime();
Willy Tarreaubeb859a2018-11-22 18:07:59 +0100149
150 do {
151 int timeout = (global.tune.options & GTUNE_BUSY_POLLING) ? 0 : wait_time;
152
153 timeout_ts.tv_sec = (timeout / 1000);
154 timeout_ts.tv_nsec = (timeout % 1000) * 1000000;
155
156 status = kevent(kqueue_fd[tid], // int kq
157 NULL, // const struct kevent *changelist
158 0, // int nchanges
159 kev, // struct kevent *eventlist
160 fd, // int nevents
161 &timeout_ts); // const struct timespec *timeout
162 tv_update_date(timeout, status);
163
Willy Tarreaue5451532020-06-17 20:25:18 +0200164 if (status) {
165 activity[tid].poll_io++;
Willy Tarreaubeb859a2018-11-22 18:07:59 +0100166 break;
Willy Tarreaue5451532020-06-17 20:25:18 +0200167 }
Willy Tarreaubeb859a2018-11-22 18:07:59 +0100168 if (timeout || !wait_time)
169 break;
Willy Tarreau2ae84e42019-05-28 16:44:05 +0200170 if (signal_queue_len || wake)
Willy Tarreaubeb859a2018-11-22 18:07:59 +0100171 break;
172 if (tick_isset(exp) && tick_is_expired(exp, now_ms))
173 break;
174 } while (1);
175
176 tv_leaving_poll(wait_time, status);
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200177
Willy Tarreau60b639c2018-08-02 10:16:17 +0200178 thread_harmless_end();
Olivier Houchard305d5ab2019-07-24 18:07:06 +0200179 if (sleeping_thread_mask & tid_bit)
180 _HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
Willy Tarreau60b639c2018-08-02 10:16:17 +0200181
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200182 for (count = 0; count < status; count++) {
Christopher Fauletab62f512017-08-30 10:34:36 +0200183 unsigned int n = 0;
Willy Tarreau200bd502021-07-29 16:57:19 +0200184 int ret;
185
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200186 fd = kev[count].ident;
Willy Tarreau9845e752012-07-06 11:44:28 +0200187
Willy Tarreau38e8a1c2020-06-23 10:04:54 +0200188#ifdef DEBUG_FD
Willy Tarreau4781b152021-04-06 13:53:36 +0200189 _HA_ATOMIC_INC(&fdtab[fd].event_count);
Willy Tarreau38e8a1c2020-06-23 10:04:54 +0200190#endif
Willy Tarreau6b308982019-09-06 19:05:50 +0200191 if (kev[count].filter == EVFILT_READ) {
Olivier Houchardeaefc3c2019-12-10 18:22:55 +0100192 if (kev[count].data || !(kev[count].flags & EV_EOF))
Willy Tarreau6b308982019-09-06 19:05:50 +0200193 n |= FD_EV_READY_R;
Willy Tarreau19c4ab92017-03-13 20:36:48 +0100194 if (kev[count].flags & EV_EOF)
Willy Tarreau6b308982019-09-06 19:05:50 +0200195 n |= FD_EV_SHUT_R;
Willy Tarreau4a226272012-11-11 20:49:49 +0100196 }
Willy Tarreau6b308982019-09-06 19:05:50 +0200197 else if (kev[count].filter == EVFILT_WRITE) {
198 n |= FD_EV_READY_W;
Willy Tarreau19c4ab92017-03-13 20:36:48 +0100199 if (kev[count].flags & EV_EOF)
Willy Tarreau6b308982019-09-06 19:05:50 +0200200 n |= FD_EV_ERR_RW;
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200201 }
Willy Tarreau4a226272012-11-11 20:49:49 +0100202
Willy Tarreau200bd502021-07-29 16:57:19 +0200203 ret = fd_update_events(fd, n);
204
205 if (ret == FD_UPDT_MIGRATED) {
206 /* FD was migrated, let's stop polling it */
207 if (!HA_ATOMIC_BTS(&fdtab[fd].update_mask, tid))
208 fd_updt[fd_nbupdt++] = fd;
209 }
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200210 }
211}
212
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200213
214static int init_kqueue_per_thread()
215{
Willy Tarreau7a2364d2018-01-19 08:56:14 +0100216 int fd;
217
Olivier Houchardebaba752018-04-16 13:24:48 +0200218 /* we can have up to two events per fd, so allocate enough to store
219 * 2*fd event, and an extra one, in case EV_RECEIPT isn't defined,
220 * so that we can add an invalid entry and get an error, to avoid
221 * scanning the kqueue uselessly.
222 */
223 kev = calloc(1, sizeof(struct kevent) * (2 * global.maxsock + 1));
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200224 if (kev == NULL)
Willy Tarreau7a2364d2018-01-19 08:56:14 +0100225 goto fail_alloc;
226
Christopher Faulet727c89b2018-01-25 16:40:35 +0100227 if (MAX_THREADS > 1 && tid) {
Willy Tarreau7a2364d2018-01-19 08:56:14 +0100228 kqueue_fd[tid] = kqueue();
229 if (kqueue_fd[tid] < 0)
230 goto fail_fd;
231 }
232
233 /* we may have to unregister some events initially registered on the
234 * original fd when it was alone, and/or to register events on the new
235 * fd for this thread. Let's just mark them as updated, the poller will
236 * do the rest.
237 */
Willy Tarreauce036bc2018-01-29 14:58:02 +0100238 for (fd = 0; fd < global.maxsock; fd++)
Willy Tarreau7a2364d2018-01-19 08:56:14 +0100239 updt_fd_polling(fd);
240
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200241 return 1;
Willy Tarreau7a2364d2018-01-19 08:56:14 +0100242 fail_fd:
243 free(kev);
244 fail_alloc:
245 return 0;
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200246}
247
248static void deinit_kqueue_per_thread()
249{
Christopher Faulet727c89b2018-01-25 16:40:35 +0100250 if (MAX_THREADS > 1 && tid)
Christopher Faulet13b007d2018-01-25 16:32:18 +0100251 close(kqueue_fd[tid]);
252
Willy Tarreau61cfdf42021-02-20 10:46:51 +0100253 ha_free(&kev);
Christopher Fauletd4604ad2017-05-29 10:40:41 +0200254}
255
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200256/*
257 * Initialization of the kqueue() poller.
258 * Returns 0 in case of failure, non-zero in case of success. If it fails, it
259 * disables the poller by setting its pref to 0.
260 */
Willy Tarreau03e78532020-02-25 07:38:05 +0100261static int _do_init(struct poller *p)
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200262{
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200263 p->private = NULL;
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200264
Olivier Houchardebaba752018-04-16 13:24:48 +0200265 /* we can have up to two events per fd, so allocate enough to store
266 * 2*fd event, and an extra one, in case EV_RECEIPT isn't defined,
267 * so that we can add an invalid entry and get an error, to avoid
268 * scanning the kqueue uselessly.
269 */
270 kev_out = calloc(1, sizeof(struct kevent) * (2 * global.maxsock + 1));
271 if (!kev_out)
272 goto fail_alloc;
273
Willy Tarreau7a2364d2018-01-19 08:56:14 +0100274 kqueue_fd[tid] = kqueue();
275 if (kqueue_fd[tid] < 0)
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200276 goto fail_fd;
277
Christopher Fauletcd7879a2017-10-27 13:53:47 +0200278 hap_register_per_thread_init(init_kqueue_per_thread);
279 hap_register_per_thread_deinit(deinit_kqueue_per_thread);
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200280 return 1;
281
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200282 fail_fd:
Willy Tarreau61cfdf42021-02-20 10:46:51 +0100283 ha_free(&kev_out);
Olivier Houchardebaba752018-04-16 13:24:48 +0200284fail_alloc:
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200285 p->pref = 0;
286 return 0;
287}
288
289/*
290 * Termination of the kqueue() poller.
291 * Memory is released and the poller is marked as unselectable.
292 */
Willy Tarreau03e78532020-02-25 07:38:05 +0100293static void _do_term(struct poller *p)
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200294{
Willy Tarreau7a2364d2018-01-19 08:56:14 +0100295 if (kqueue_fd[tid] >= 0) {
296 close(kqueue_fd[tid]);
297 kqueue_fd[tid] = -1;
Willy Tarreaud79e79b2009-05-10 10:18:54 +0200298 }
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200299
300 p->private = NULL;
301 p->pref = 0;
Olivier Houchardebaba752018-04-16 13:24:48 +0200302 if (kev_out) {
Willy Tarreau61cfdf42021-02-20 10:46:51 +0100303 ha_free(&kev_out);
Olivier Houchardebaba752018-04-16 13:24:48 +0200304 }
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200305}
306
307/*
Willy Tarreau2ff76222007-04-09 19:29:56 +0200308 * Check that the poller works.
309 * Returns 1 if OK, otherwise 0.
310 */
Willy Tarreau03e78532020-02-25 07:38:05 +0100311static int _do_test(struct poller *p)
Willy Tarreau2ff76222007-04-09 19:29:56 +0200312{
313 int fd;
314
315 fd = kqueue();
316 if (fd < 0)
317 return 0;
318 close(fd);
319 return 1;
320}
321
322/*
323 * Recreate the kqueue file descriptor after a fork(). Returns 1 if OK,
324 * otherwise 0. Note that some pollers need to be reopened after a fork()
325 * (such as kqueue), and some others may fail to do so in a chroot.
326 */
Willy Tarreau03e78532020-02-25 07:38:05 +0100327static int _do_fork(struct poller *p)
Willy Tarreau2ff76222007-04-09 19:29:56 +0200328{
Willy Tarreau7a2364d2018-01-19 08:56:14 +0100329 kqueue_fd[tid] = kqueue();
330 if (kqueue_fd[tid] < 0)
Willy Tarreau2ff76222007-04-09 19:29:56 +0200331 return 0;
332 return 1;
333}
334
335/*
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200336 * It is a constructor, which means that it will automatically be called before
337 * main(). This is GCC-specific but it works at least since 2.95.
338 * Special care must be taken so that it does not need any uninitialized data.
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200339 */
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200340__attribute__((constructor))
341static void _do_register(void)
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200342{
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200343 struct poller *p;
Willy Tarreau7a2364d2018-01-19 08:56:14 +0100344 int i;
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200345
346 if (nbpollers >= MAX_POLLERS)
347 return;
Willy Tarreaud79e79b2009-05-10 10:18:54 +0200348
Willy Tarreau7a2364d2018-01-19 08:56:14 +0100349 for (i = 0; i < MAX_THREADS; i++)
350 kqueue_fd[i] = -1;
351
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200352 p = &pollers[nbpollers++];
353
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200354 p->name = "kqueue";
355 p->pref = 300;
Willy Tarreau11ef0832019-11-28 18:17:33 +0100356 p->flags = HAP_POLL_F_RDHUP | HAP_POLL_F_ERRHUP;
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200357 p->private = NULL;
358
Willy Tarreau70c6fd82012-11-11 21:02:34 +0100359 p->clo = NULL;
Willy Tarreauef1d1f82007-04-16 00:25:25 +0200360 p->test = _do_test;
361 p->init = _do_init;
362 p->term = _do_term;
363 p->poll = _do_poll;
364 p->fork = _do_fork;
Willy Tarreau1e63130a2007-04-09 12:03:06 +0200365}
366
367
368/*
369 * Local variables:
370 * c-indent-level: 8
371 * c-basic-offset: 8
372 * End:
373 */