Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 1 | /* |
| 2 | * FD polling functions for generic poll() |
| 3 | * |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 4 | * Copyright 2000-2014 Willy Tarreau <w@1wt.eu> |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
Willy Tarreau | 3c8a896 | 2017-03-13 17:14:51 +0100 | [diff] [blame] | 13 | #define _GNU_SOURCE // for POLLRDHUP on Linux |
| 14 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 15 | #include <unistd.h> |
Willy Tarreau | 3c8a896 | 2017-03-13 17:14:51 +0100 | [diff] [blame] | 16 | #include <poll.h> |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 17 | #include <sys/time.h> |
| 18 | #include <sys/types.h> |
| 19 | |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 20 | #include <haproxy/activity.h> |
Willy Tarreau | 4c7e4b7 | 2020-05-27 12:58:42 +0200 | [diff] [blame] | 21 | #include <haproxy/api.h> |
Willy Tarreau | 5554264 | 2021-10-08 09:33:24 +0200 | [diff] [blame] | 22 | #include <haproxy/clock.h> |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 23 | #include <haproxy/fd.h> |
| 24 | #include <haproxy/global.h> |
Willy Tarreau | 6dfab11 | 2021-09-30 17:53:22 +0200 | [diff] [blame] | 25 | #include <haproxy/task.h> |
Willy Tarreau | c2f7c58 | 2020-06-02 18:15:32 +0200 | [diff] [blame] | 26 | #include <haproxy/ticks.h> |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 27 | |
Willy Tarreau | 3c8a896 | 2017-03-13 17:14:51 +0100 | [diff] [blame] | 28 | |
| 29 | #ifndef POLLRDHUP |
| 30 | /* POLLRDHUP was defined late in libc, and it appeared in kernel 2.6.17 */ |
| 31 | #define POLLRDHUP 0 |
| 32 | #endif |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 33 | |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 34 | static int maxfd; /* # of the highest fd + 1 */ |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 35 | static unsigned int *fd_evts[2]; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 36 | |
| 37 | /* private data */ |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 38 | static THREAD_LOCAL int nbfd = 0; |
| 39 | static THREAD_LOCAL struct pollfd *poll_events = NULL; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 40 | |
Willy Tarreau | 03e7853 | 2020-02-25 07:38:05 +0100 | [diff] [blame] | 41 | static void __fd_clo(int fd) |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 42 | { |
| 43 | hap_fd_clr(fd, fd_evts[DIR_RD]); |
| 44 | hap_fd_clr(fd, fd_evts[DIR_WR]); |
| 45 | } |
| 46 | |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 47 | static void _update_fd(int fd, int *max_add_fd) |
| 48 | { |
| 49 | int en; |
| 50 | |
| 51 | en = fdtab[fd].state; |
| 52 | |
| 53 | /* we have a single state for all threads, which is why we |
| 54 | * don't check the tid_bit. First thread to see the update |
| 55 | * takes it for every other one. |
| 56 | */ |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 57 | if (!(en & FD_EV_ACTIVE_RW)) { |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 58 | if (!(polled_mask[fd].poll_recv | polled_mask[fd].poll_send)) { |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 59 | /* fd was not watched, it's still not */ |
| 60 | return; |
| 61 | } |
| 62 | /* fd totally removed from poll list */ |
| 63 | hap_fd_clr(fd, fd_evts[DIR_RD]); |
| 64 | hap_fd_clr(fd, fd_evts[DIR_WR]); |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 65 | _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, 0); |
| 66 | _HA_ATOMIC_AND(&polled_mask[fd].poll_send, 0); |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 67 | } |
| 68 | else { |
| 69 | /* OK fd has to be monitored, it was either added or changed */ |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 70 | if (!(en & FD_EV_ACTIVE_R)) { |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 71 | hap_fd_clr(fd, fd_evts[DIR_RD]); |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 72 | if (polled_mask[fd].poll_recv & tid_bit) |
| 73 | _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~tid_bit); |
| 74 | } else { |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 75 | hap_fd_set(fd, fd_evts[DIR_RD]); |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 76 | if (!(polled_mask[fd].poll_recv & tid_bit)) |
| 77 | _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, tid_bit); |
| 78 | } |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 79 | |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 80 | if (!(en & FD_EV_ACTIVE_W)) { |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 81 | hap_fd_clr(fd, fd_evts[DIR_WR]); |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 82 | if (polled_mask[fd].poll_send & tid_bit) |
| 83 | _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~tid_bit); |
| 84 | }else { |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 85 | hap_fd_set(fd, fd_evts[DIR_WR]); |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 86 | if (!(polled_mask[fd].poll_send & tid_bit)) |
| 87 | _HA_ATOMIC_OR(&polled_mask[fd].poll_send, tid_bit); |
| 88 | } |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 89 | |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 90 | if (fd > *max_add_fd) |
| 91 | *max_add_fd = fd; |
| 92 | } |
| 93 | } |
| 94 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 95 | /* |
| 96 | * Poll() poller |
| 97 | */ |
Willy Tarreau | 03e7853 | 2020-02-25 07:38:05 +0100 | [diff] [blame] | 98 | static void _do_poll(struct poller *p, int exp, int wake) |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 99 | { |
| 100 | int status; |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 101 | int fd; |
Willy Tarreau | d825eef | 2007-05-12 22:35:00 +0200 | [diff] [blame] | 102 | int wait_time; |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 103 | int updt_idx; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 104 | int fds, count; |
| 105 | int sr, sw; |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 106 | int old_maxfd, new_maxfd, max_add_fd; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 107 | unsigned rn, wn; /* read new, write new */ |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 108 | int old_fd; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 109 | |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 110 | max_add_fd = -1; |
| 111 | |
Willy Tarreau | cc7e3f7 | 2012-11-11 17:25:15 +0100 | [diff] [blame] | 112 | /* first, scan the update list to find changes */ |
| 113 | for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) { |
| 114 | fd = fd_updt[updt_idx]; |
Willy Tarreau | cc7e3f7 | 2012-11-11 17:25:15 +0100 | [diff] [blame] | 115 | |
Olivier Houchard | cb6c927 | 2019-03-08 18:49:54 +0100 | [diff] [blame] | 116 | _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit); |
Willy Tarreau | d80cb4e | 2018-01-20 19:30:13 +0100 | [diff] [blame] | 117 | if (!fdtab[fd].owner) { |
Willy Tarreau | e406386 | 2020-06-17 20:35:33 +0200 | [diff] [blame] | 118 | activity[tid].poll_drop_fd++; |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 119 | continue; |
Willy Tarreau | d80cb4e | 2018-01-20 19:30:13 +0100 | [diff] [blame] | 120 | } |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 121 | _update_fd(fd, &max_add_fd); |
| 122 | } |
Willy Tarreau | cc7e3f7 | 2012-11-11 17:25:15 +0100 | [diff] [blame] | 123 | |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 124 | /* Now scan the global update list */ |
| 125 | for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { |
| 126 | if (fd == -2) { |
| 127 | fd = old_fd; |
| 128 | continue; |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 129 | } |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 130 | else if (fd <= -3) |
| 131 | fd = -fd -4; |
| 132 | if (fd == -1) |
| 133 | break; |
| 134 | if (fdtab[fd].update_mask & tid_bit) { |
| 135 | /* Cheat a bit, as the state is global to all pollers |
Willy Tarreau | 94a01e1 | 2021-01-06 17:35:12 +0100 | [diff] [blame] | 136 | * we don't need every thread to take care of the |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 137 | * update. |
| 138 | */ |
Olivier Houchard | cb6c927 | 2019-03-08 18:49:54 +0100 | [diff] [blame] | 139 | _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~all_threads_mask); |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 140 | done_update_polling(fd); |
| 141 | } else |
| 142 | continue; |
| 143 | if (!fdtab[fd].owner) |
| 144 | continue; |
| 145 | _update_fd(fd, &max_add_fd); |
Willy Tarreau | cc7e3f7 | 2012-11-11 17:25:15 +0100 | [diff] [blame] | 146 | } |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 147 | |
| 148 | /* maybe we added at least one fd larger than maxfd */ |
| 149 | for (old_maxfd = maxfd; old_maxfd <= max_add_fd; ) { |
Olivier Houchard | cb6c927 | 2019-03-08 18:49:54 +0100 | [diff] [blame] | 150 | if (_HA_ATOMIC_CAS(&maxfd, &old_maxfd, max_add_fd + 1)) |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 151 | break; |
| 152 | } |
| 153 | |
| 154 | /* maxfd doesn't need to be precise but it needs to cover *all* active |
| 155 | * FDs. Thus we only shrink it if we have such an opportunity. The algo |
| 156 | * is simple : look for the previous used place, try to update maxfd to |
| 157 | * point to it, abort if maxfd changed in the mean time. |
| 158 | */ |
| 159 | old_maxfd = maxfd; |
| 160 | do { |
| 161 | new_maxfd = old_maxfd; |
| 162 | while (new_maxfd - 1 >= 0 && !fdtab[new_maxfd - 1].owner) |
| 163 | new_maxfd--; |
| 164 | if (new_maxfd >= old_maxfd) |
| 165 | break; |
Olivier Houchard | cb6c927 | 2019-03-08 18:49:54 +0100 | [diff] [blame] | 166 | } while (!_HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd)); |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 167 | |
Willy Tarreau | 88d1c5d | 2021-08-04 11:44:17 +0200 | [diff] [blame] | 168 | thread_idle_now(); |
Willy Tarreau | 60b639c | 2018-08-02 10:16:17 +0200 | [diff] [blame] | 169 | thread_harmless_now(); |
| 170 | |
Willy Tarreau | cc7e3f7 | 2012-11-11 17:25:15 +0100 | [diff] [blame] | 171 | fd_nbupdt = 0; |
| 172 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 173 | nbfd = 0; |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 174 | for (fds = 0; (fds * 8*sizeof(**fd_evts)) < maxfd; fds++) { |
| 175 | rn = fd_evts[DIR_RD][fds]; |
| 176 | wn = fd_evts[DIR_WR][fds]; |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 177 | |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 178 | if (!(rn|wn)) |
| 179 | continue; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 180 | |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 181 | for (count = 0, fd = fds * 8*sizeof(**fd_evts); count < 8*sizeof(**fd_evts) && fd < maxfd; count++, fd++) { |
| 182 | sr = (rn >> count) & 1; |
| 183 | sw = (wn >> count) & 1; |
| 184 | if ((sr|sw)) { |
Willy Tarreau | d80cb4e | 2018-01-20 19:30:13 +0100 | [diff] [blame] | 185 | if (!fdtab[fd].owner) { |
| 186 | /* should normally not happen here except |
| 187 | * due to rare thread concurrency |
| 188 | */ |
| 189 | continue; |
| 190 | } |
| 191 | |
| 192 | if (!(fdtab[fd].thread_mask & tid_bit)) { |
Willy Tarreau | d80cb4e | 2018-01-20 19:30:13 +0100 | [diff] [blame] | 193 | continue; |
| 194 | } |
| 195 | |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 196 | poll_events[nbfd].fd = fd; |
Willy Tarreau | 3c8a896 | 2017-03-13 17:14:51 +0100 | [diff] [blame] | 197 | poll_events[nbfd].events = (sr ? (POLLIN | POLLRDHUP) : 0) | (sw ? POLLOUT : 0); |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 198 | nbfd++; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 199 | } |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 200 | } |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 201 | } |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 202 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 203 | /* now let's wait for events */ |
Willy Tarreau | 2ae84e4 | 2019-05-28 16:44:05 +0200 | [diff] [blame] | 204 | wait_time = wake ? 0 : compute_poll_timeout(exp); |
Willy Tarreau | f9d5e10 | 2021-10-08 10:43:59 +0200 | [diff] [blame] | 205 | clock_entering_poll(); |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 206 | status = poll(poll_events, nbfd, wait_time); |
Willy Tarreau | 5554264 | 2021-10-08 09:33:24 +0200 | [diff] [blame] | 207 | clock_update_date(wait_time, status); |
Willy Tarreau | 88d1c5d | 2021-08-04 11:44:17 +0200 | [diff] [blame] | 208 | |
Willy Tarreau | 058b2c1 | 2022-06-22 15:21:34 +0200 | [diff] [blame] | 209 | fd_leaving_poll(wait_time, status); |
Willy Tarreau | 60b639c | 2018-08-02 10:16:17 +0200 | [diff] [blame] | 210 | |
Willy Tarreau | e545153 | 2020-06-17 20:25:18 +0200 | [diff] [blame] | 211 | if (status > 0) |
| 212 | activity[tid].poll_io++; |
| 213 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 214 | for (count = 0; status > 0 && count < nbfd; count++) { |
Christopher Faulet | ab62f51 | 2017-08-30 10:34:36 +0200 | [diff] [blame] | 215 | unsigned int n; |
Willy Tarreau | 491c498 | 2012-07-06 11:16:01 +0200 | [diff] [blame] | 216 | int e = poll_events[count].revents; |
Willy Tarreau | b1093c6 | 2022-07-09 18:55:37 +0200 | [diff] [blame] | 217 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 218 | fd = poll_events[count].fd; |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 219 | |
Willy Tarreau | 53a1618 | 2021-07-29 16:19:24 +0200 | [diff] [blame] | 220 | if ((e & POLLRDHUP) && !(cur_poller.flags & HAP_POLL_F_RDHUP)) |
| 221 | _HA_ATOMIC_OR(&cur_poller.flags, HAP_POLL_F_RDHUP); |
| 222 | |
Willy Tarreau | 38e8a1c | 2020-06-23 10:04:54 +0200 | [diff] [blame] | 223 | #ifdef DEBUG_FD |
Willy Tarreau | 4781b15 | 2021-04-06 13:53:36 +0200 | [diff] [blame] | 224 | _HA_ATOMIC_INC(&fdtab[fd].event_count); |
Willy Tarreau | 38e8a1c | 2020-06-23 10:04:54 +0200 | [diff] [blame] | 225 | #endif |
Willy Tarreau | 3c8a896 | 2017-03-13 17:14:51 +0100 | [diff] [blame] | 226 | if (!(e & ( POLLOUT | POLLIN | POLLERR | POLLHUP | POLLRDHUP ))) |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 227 | continue; |
| 228 | |
Willy Tarreau | 076be25 | 2012-07-06 16:02:29 +0200 | [diff] [blame] | 229 | /* ok, we found one active fd */ |
| 230 | status--; |
| 231 | |
Willy Tarreau | 6b30898 | 2019-09-06 19:05:50 +0200 | [diff] [blame] | 232 | n = ((e & POLLIN) ? FD_EV_READY_R : 0) | |
| 233 | ((e & POLLOUT) ? FD_EV_READY_W : 0) | |
| 234 | ((e & POLLRDHUP) ? FD_EV_SHUT_R : 0) | |
| 235 | ((e & POLLHUP) ? FD_EV_SHUT_RW : 0) | |
| 236 | ((e & POLLERR) ? FD_EV_ERR_RW : 0); |
Willy Tarreau | 491c498 | 2012-07-06 11:16:01 +0200 | [diff] [blame] | 237 | |
Willy Tarreau | b1093c6 | 2022-07-09 18:55:37 +0200 | [diff] [blame] | 238 | fd_update_events(fd, n); |
Willy Tarreau | 200bd50 | 2021-07-29 16:57:19 +0200 | [diff] [blame] | 239 | } |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 240 | } |
| 241 | |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 242 | |
| 243 | static int init_poll_per_thread() |
| 244 | { |
| 245 | poll_events = calloc(1, sizeof(struct pollfd) * global.maxsock); |
| 246 | if (poll_events == NULL) |
| 247 | return 0; |
| 248 | return 1; |
| 249 | } |
| 250 | |
| 251 | static void deinit_poll_per_thread() |
| 252 | { |
Willy Tarreau | 61cfdf4 | 2021-02-20 10:46:51 +0100 | [diff] [blame] | 253 | ha_free(&poll_events); |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 254 | } |
| 255 | |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 256 | /* |
| 257 | * Initialization of the poll() poller. |
| 258 | * Returns 0 in case of failure, non-zero in case of success. If it fails, it |
| 259 | * disables the poller by setting its pref to 0. |
| 260 | */ |
Willy Tarreau | 03e7853 | 2020-02-25 07:38:05 +0100 | [diff] [blame] | 261 | static int _do_init(struct poller *p) |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 262 | { |
Christopher Faulet | cd7879a | 2017-10-27 13:53:47 +0200 | [diff] [blame] | 263 | __label__ fail_swevt, fail_srevt; |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 264 | int fd_evts_bytes; |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 265 | |
| 266 | p->private = NULL; |
Willy Tarreau | e5715bf | 2022-07-09 23:38:46 +0200 | [diff] [blame^] | 267 | |
| 268 | /* this old poller uses a process-wide FD list that cannot work with |
| 269 | * groups. |
| 270 | */ |
| 271 | if (global.nbtgroups > 1) |
| 272 | goto fail_srevt; |
| 273 | |
Willy Tarreau | cc35923 | 2018-01-17 15:48:53 +0100 | [diff] [blame] | 274 | fd_evts_bytes = (global.maxsock + sizeof(**fd_evts) * 8 - 1) / (sizeof(**fd_evts) * 8) * sizeof(**fd_evts); |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 275 | |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 276 | if ((fd_evts[DIR_RD] = calloc(1, fd_evts_bytes)) == NULL) |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 277 | goto fail_srevt; |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 278 | if ((fd_evts[DIR_WR] = calloc(1, fd_evts_bytes)) == NULL) |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 279 | goto fail_swevt; |
| 280 | |
Christopher Faulet | cd7879a | 2017-10-27 13:53:47 +0200 | [diff] [blame] | 281 | hap_register_per_thread_init(init_poll_per_thread); |
| 282 | hap_register_per_thread_deinit(deinit_poll_per_thread); |
| 283 | |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 284 | return 1; |
| 285 | |
| 286 | fail_swevt: |
| 287 | free(fd_evts[DIR_RD]); |
| 288 | fail_srevt: |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 289 | p->pref = 0; |
| 290 | return 0; |
| 291 | } |
| 292 | |
| 293 | /* |
| 294 | * Termination of the poll() poller. |
| 295 | * Memory is released and the poller is marked as unselectable. |
| 296 | */ |
Willy Tarreau | 03e7853 | 2020-02-25 07:38:05 +0100 | [diff] [blame] | 297 | static void _do_term(struct poller *p) |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 298 | { |
Willy Tarreau | a534fea | 2008-08-03 12:19:50 +0200 | [diff] [blame] | 299 | free(fd_evts[DIR_WR]); |
| 300 | free(fd_evts[DIR_RD]); |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 301 | p->private = NULL; |
| 302 | p->pref = 0; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 303 | } |
| 304 | |
| 305 | /* |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 306 | * Check that the poller works. |
| 307 | * Returns 1 if OK, otherwise 0. |
| 308 | */ |
Willy Tarreau | 03e7853 | 2020-02-25 07:38:05 +0100 | [diff] [blame] | 309 | static int _do_test(struct poller *p) |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 310 | { |
| 311 | return 1; |
| 312 | } |
| 313 | |
| 314 | /* |
Willy Tarreau | 740d749 | 2022-04-25 19:00:55 +0200 | [diff] [blame] | 315 | * Registers the poller. |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 316 | */ |
Willy Tarreau | ef1d1f8 | 2007-04-16 00:25:25 +0200 | [diff] [blame] | 317 | static void _do_register(void) |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 318 | { |
Willy Tarreau | ef1d1f8 | 2007-04-16 00:25:25 +0200 | [diff] [blame] | 319 | struct poller *p; |
| 320 | |
| 321 | if (nbpollers >= MAX_POLLERS) |
| 322 | return; |
| 323 | p = &pollers[nbpollers++]; |
| 324 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 325 | p->name = "poll"; |
| 326 | p->pref = 200; |
Willy Tarreau | 11ef083 | 2019-11-28 18:17:33 +0100 | [diff] [blame] | 327 | p->flags = HAP_POLL_F_ERRHUP; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 328 | p->private = NULL; |
| 329 | |
Willy Tarreau | 70c6fd8 | 2012-11-11 21:02:34 +0100 | [diff] [blame] | 330 | p->clo = __fd_clo; |
Willy Tarreau | ef1d1f8 | 2007-04-16 00:25:25 +0200 | [diff] [blame] | 331 | p->test = _do_test; |
| 332 | p->init = _do_init; |
| 333 | p->term = _do_term; |
| 334 | p->poll = _do_poll; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 335 | } |
| 336 | |
Willy Tarreau | 740d749 | 2022-04-25 19:00:55 +0200 | [diff] [blame] | 337 | INITCALL0(STG_REGISTER, _do_register); |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 338 | |
| 339 | /* |
| 340 | * Local variables: |
| 341 | * c-indent-level: 8 |
| 342 | * c-basic-offset: 8 |
| 343 | * End: |
| 344 | */ |