Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 1 | /* |
| 2 | * FD polling functions for generic poll() |
| 3 | * |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 4 | * Copyright 2000-2014 Willy Tarreau <w@1wt.eu> |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
Willy Tarreau | 3c8a896 | 2017-03-13 17:14:51 +0100 | [diff] [blame] | 13 | #define _GNU_SOURCE // for POLLRDHUP on Linux |
| 14 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 15 | #include <unistd.h> |
Willy Tarreau | 3c8a896 | 2017-03-13 17:14:51 +0100 | [diff] [blame] | 16 | #include <poll.h> |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 17 | #include <sys/time.h> |
| 18 | #include <sys/types.h> |
| 19 | |
| 20 | #include <common/compat.h> |
| 21 | #include <common/config.h> |
Willy Tarreau | 60b639c | 2018-08-02 10:16:17 +0200 | [diff] [blame] | 22 | #include <common/hathreads.h> |
Willy Tarreau | 0c303ee | 2008-07-07 00:09:58 +0200 | [diff] [blame] | 23 | #include <common/ticks.h> |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 24 | #include <common/time.h> |
| 25 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 26 | #include <types/global.h> |
| 27 | |
Willy Tarreau | 609aad9 | 2018-11-22 08:31:09 +0100 | [diff] [blame] | 28 | #include <proto/activity.h> |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 29 | #include <proto/fd.h> |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 30 | |
Willy Tarreau | 3c8a896 | 2017-03-13 17:14:51 +0100 | [diff] [blame] | 31 | |
| 32 | #ifndef POLLRDHUP |
| 33 | /* POLLRDHUP was defined late in libc, and it appeared in kernel 2.6.17 */ |
| 34 | #define POLLRDHUP 0 |
| 35 | #endif |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 36 | |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 37 | static int maxfd; /* # of the highest fd + 1 */ |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 38 | static unsigned int *fd_evts[2]; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 39 | |
| 40 | /* private data */ |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 41 | static THREAD_LOCAL int nbfd = 0; |
| 42 | static THREAD_LOCAL struct pollfd *poll_events = NULL; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 43 | |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 44 | REGPRM1 static void __fd_clo(int fd) |
| 45 | { |
| 46 | hap_fd_clr(fd, fd_evts[DIR_RD]); |
| 47 | hap_fd_clr(fd, fd_evts[DIR_WR]); |
| 48 | } |
| 49 | |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 50 | static void _update_fd(int fd, int *max_add_fd) |
| 51 | { |
| 52 | int en; |
| 53 | |
| 54 | en = fdtab[fd].state; |
| 55 | |
| 56 | /* we have a single state for all threads, which is why we |
| 57 | * don't check the tid_bit. First thread to see the update |
| 58 | * takes it for every other one. |
| 59 | */ |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 60 | if (!(en & FD_EV_ACTIVE_RW)) { |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 61 | if (!(polled_mask[fd].poll_recv | polled_mask[fd].poll_send)) { |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 62 | /* fd was not watched, it's still not */ |
| 63 | return; |
| 64 | } |
| 65 | /* fd totally removed from poll list */ |
| 66 | hap_fd_clr(fd, fd_evts[DIR_RD]); |
| 67 | hap_fd_clr(fd, fd_evts[DIR_WR]); |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 68 | _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, 0); |
| 69 | _HA_ATOMIC_AND(&polled_mask[fd].poll_send, 0); |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 70 | } |
| 71 | else { |
| 72 | /* OK fd has to be monitored, it was either added or changed */ |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 73 | if (!(en & FD_EV_ACTIVE_R)) { |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 74 | hap_fd_clr(fd, fd_evts[DIR_RD]); |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 75 | if (polled_mask[fd].poll_recv & tid_bit) |
| 76 | _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~tid_bit); |
| 77 | } else { |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 78 | hap_fd_set(fd, fd_evts[DIR_RD]); |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 79 | if (!(polled_mask[fd].poll_recv & tid_bit)) |
| 80 | _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, tid_bit); |
| 81 | } |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 82 | |
Willy Tarreau | 5bee3e2 | 2019-09-04 09:52:57 +0200 | [diff] [blame] | 83 | if (!(en & FD_EV_ACTIVE_W)) { |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 84 | hap_fd_clr(fd, fd_evts[DIR_WR]); |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 85 | if (polled_mask[fd].poll_send & tid_bit) |
| 86 | _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~tid_bit); |
| 87 | }else { |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 88 | hap_fd_set(fd, fd_evts[DIR_WR]); |
Olivier Houchard | 5305505 | 2019-07-25 14:00:18 +0000 | [diff] [blame] | 89 | if (!(polled_mask[fd].poll_send & tid_bit)) |
| 90 | _HA_ATOMIC_OR(&polled_mask[fd].poll_send, tid_bit); |
| 91 | } |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 92 | |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 93 | if (fd > *max_add_fd) |
| 94 | *max_add_fd = fd; |
| 95 | } |
| 96 | } |
| 97 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 98 | /* |
| 99 | * Poll() poller |
| 100 | */ |
Willy Tarreau | 2ae84e4 | 2019-05-28 16:44:05 +0200 | [diff] [blame] | 101 | REGPRM3 static void _do_poll(struct poller *p, int exp, int wake) |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 102 | { |
| 103 | int status; |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 104 | int fd; |
Willy Tarreau | d825eef | 2007-05-12 22:35:00 +0200 | [diff] [blame] | 105 | int wait_time; |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 106 | int updt_idx; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 107 | int fds, count; |
| 108 | int sr, sw; |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 109 | int old_maxfd, new_maxfd, max_add_fd; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 110 | unsigned rn, wn; /* read new, write new */ |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 111 | int old_fd; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 112 | |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 113 | max_add_fd = -1; |
| 114 | |
Willy Tarreau | cc7e3f7 | 2012-11-11 17:25:15 +0100 | [diff] [blame] | 115 | /* first, scan the update list to find changes */ |
| 116 | for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) { |
| 117 | fd = fd_updt[updt_idx]; |
Willy Tarreau | cc7e3f7 | 2012-11-11 17:25:15 +0100 | [diff] [blame] | 118 | |
Olivier Houchard | cb6c927 | 2019-03-08 18:49:54 +0100 | [diff] [blame] | 119 | _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit); |
Willy Tarreau | d80cb4e | 2018-01-20 19:30:13 +0100 | [diff] [blame] | 120 | if (!fdtab[fd].owner) { |
| 121 | activity[tid].poll_drop++; |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 122 | continue; |
Willy Tarreau | d80cb4e | 2018-01-20 19:30:13 +0100 | [diff] [blame] | 123 | } |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 124 | _update_fd(fd, &max_add_fd); |
| 125 | } |
Willy Tarreau | cc7e3f7 | 2012-11-11 17:25:15 +0100 | [diff] [blame] | 126 | |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 127 | /* Now scan the global update list */ |
| 128 | for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { |
| 129 | if (fd == -2) { |
| 130 | fd = old_fd; |
| 131 | continue; |
Willy Tarreau | f817e9f | 2014-01-10 16:58:45 +0100 | [diff] [blame] | 132 | } |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 133 | else if (fd <= -3) |
| 134 | fd = -fd -4; |
| 135 | if (fd == -1) |
| 136 | break; |
| 137 | if (fdtab[fd].update_mask & tid_bit) { |
| 138 | /* Cheat a bit, as the state is global to all pollers |
| 139 | * we don't need every thread ot take care of the |
| 140 | * update. |
| 141 | */ |
Olivier Houchard | cb6c927 | 2019-03-08 18:49:54 +0100 | [diff] [blame] | 142 | _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~all_threads_mask); |
Olivier Houchard | 6b96f72 | 2018-04-25 16:58:25 +0200 | [diff] [blame] | 143 | done_update_polling(fd); |
| 144 | } else |
| 145 | continue; |
| 146 | if (!fdtab[fd].owner) |
| 147 | continue; |
| 148 | _update_fd(fd, &max_add_fd); |
Willy Tarreau | cc7e3f7 | 2012-11-11 17:25:15 +0100 | [diff] [blame] | 149 | } |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 150 | |
| 151 | /* maybe we added at least one fd larger than maxfd */ |
| 152 | for (old_maxfd = maxfd; old_maxfd <= max_add_fd; ) { |
Olivier Houchard | cb6c927 | 2019-03-08 18:49:54 +0100 | [diff] [blame] | 153 | if (_HA_ATOMIC_CAS(&maxfd, &old_maxfd, max_add_fd + 1)) |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 154 | break; |
| 155 | } |
| 156 | |
| 157 | /* maxfd doesn't need to be precise but it needs to cover *all* active |
| 158 | * FDs. Thus we only shrink it if we have such an opportunity. The algo |
| 159 | * is simple : look for the previous used place, try to update maxfd to |
| 160 | * point to it, abort if maxfd changed in the mean time. |
| 161 | */ |
| 162 | old_maxfd = maxfd; |
| 163 | do { |
| 164 | new_maxfd = old_maxfd; |
| 165 | while (new_maxfd - 1 >= 0 && !fdtab[new_maxfd - 1].owner) |
| 166 | new_maxfd--; |
| 167 | if (new_maxfd >= old_maxfd) |
| 168 | break; |
Olivier Houchard | cb6c927 | 2019-03-08 18:49:54 +0100 | [diff] [blame] | 169 | } while (!_HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd)); |
Willy Tarreau | 173d995 | 2018-01-26 21:48:23 +0100 | [diff] [blame] | 170 | |
Willy Tarreau | 60b639c | 2018-08-02 10:16:17 +0200 | [diff] [blame] | 171 | thread_harmless_now(); |
Olivier Houchard | 305d5ab | 2019-07-24 18:07:06 +0200 | [diff] [blame] | 172 | if (sleeping_thread_mask & tid_bit) |
| 173 | _HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit); |
Willy Tarreau | 60b639c | 2018-08-02 10:16:17 +0200 | [diff] [blame] | 174 | |
Willy Tarreau | cc7e3f7 | 2012-11-11 17:25:15 +0100 | [diff] [blame] | 175 | fd_nbupdt = 0; |
| 176 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 177 | nbfd = 0; |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 178 | for (fds = 0; (fds * 8*sizeof(**fd_evts)) < maxfd; fds++) { |
| 179 | rn = fd_evts[DIR_RD][fds]; |
| 180 | wn = fd_evts[DIR_WR][fds]; |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 181 | |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 182 | if (!(rn|wn)) |
| 183 | continue; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 184 | |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 185 | for (count = 0, fd = fds * 8*sizeof(**fd_evts); count < 8*sizeof(**fd_evts) && fd < maxfd; count++, fd++) { |
| 186 | sr = (rn >> count) & 1; |
| 187 | sw = (wn >> count) & 1; |
| 188 | if ((sr|sw)) { |
Willy Tarreau | d80cb4e | 2018-01-20 19:30:13 +0100 | [diff] [blame] | 189 | if (!fdtab[fd].owner) { |
| 190 | /* should normally not happen here except |
| 191 | * due to rare thread concurrency |
| 192 | */ |
| 193 | continue; |
| 194 | } |
| 195 | |
| 196 | if (!(fdtab[fd].thread_mask & tid_bit)) { |
| 197 | activity[tid].poll_skip++; |
| 198 | continue; |
| 199 | } |
| 200 | |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 201 | poll_events[nbfd].fd = fd; |
Willy Tarreau | 3c8a896 | 2017-03-13 17:14:51 +0100 | [diff] [blame] | 202 | poll_events[nbfd].events = (sr ? (POLLIN | POLLRDHUP) : 0) | (sw ? POLLOUT : 0); |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 203 | nbfd++; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 204 | } |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 205 | } |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 206 | } |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 207 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 208 | /* now let's wait for events */ |
Willy Tarreau | 2ae84e4 | 2019-05-28 16:44:05 +0200 | [diff] [blame] | 209 | wait_time = wake ? 0 : compute_poll_timeout(exp); |
Willy Tarreau | 7e9c4ae | 2018-10-17 14:31:19 +0200 | [diff] [blame] | 210 | tv_entering_poll(); |
Willy Tarreau | 609aad9 | 2018-11-22 08:31:09 +0100 | [diff] [blame] | 211 | activity_count_runtime(); |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 212 | status = poll(poll_events, nbfd, wait_time); |
Willy Tarreau | 48f8bc1 | 2018-11-22 18:57:37 +0100 | [diff] [blame] | 213 | tv_update_date(wait_time, status); |
Willy Tarreau | 7e9c4ae | 2018-10-17 14:31:19 +0200 | [diff] [blame] | 214 | tv_leaving_poll(wait_time, status); |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 215 | |
Willy Tarreau | 60b639c | 2018-08-02 10:16:17 +0200 | [diff] [blame] | 216 | thread_harmless_end(); |
| 217 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 218 | for (count = 0; status > 0 && count < nbfd; count++) { |
Christopher Faulet | ab62f51 | 2017-08-30 10:34:36 +0200 | [diff] [blame] | 219 | unsigned int n; |
Willy Tarreau | 491c498 | 2012-07-06 11:16:01 +0200 | [diff] [blame] | 220 | int e = poll_events[count].revents; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 221 | fd = poll_events[count].fd; |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 222 | |
Willy Tarreau | 3c8a896 | 2017-03-13 17:14:51 +0100 | [diff] [blame] | 223 | if (!(e & ( POLLOUT | POLLIN | POLLERR | POLLHUP | POLLRDHUP ))) |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 224 | continue; |
| 225 | |
Willy Tarreau | 076be25 | 2012-07-06 16:02:29 +0200 | [diff] [blame] | 226 | /* ok, we found one active fd */ |
| 227 | status--; |
| 228 | |
Willy Tarreau | d80cb4e | 2018-01-20 19:30:13 +0100 | [diff] [blame] | 229 | if (!fdtab[fd].owner) { |
| 230 | activity[tid].poll_dead++; |
Willy Tarreau | 076be25 | 2012-07-06 16:02:29 +0200 | [diff] [blame] | 231 | continue; |
Willy Tarreau | d80cb4e | 2018-01-20 19:30:13 +0100 | [diff] [blame] | 232 | } |
Willy Tarreau | 076be25 | 2012-07-06 16:02:29 +0200 | [diff] [blame] | 233 | |
Willy Tarreau | 6b30898 | 2019-09-06 19:05:50 +0200 | [diff] [blame] | 234 | n = ((e & POLLIN) ? FD_EV_READY_R : 0) | |
| 235 | ((e & POLLOUT) ? FD_EV_READY_W : 0) | |
| 236 | ((e & POLLRDHUP) ? FD_EV_SHUT_R : 0) | |
| 237 | ((e & POLLHUP) ? FD_EV_SHUT_RW : 0) | |
| 238 | ((e & POLLERR) ? FD_EV_ERR_RW : 0); |
Willy Tarreau | 491c498 | 2012-07-06 11:16:01 +0200 | [diff] [blame] | 239 | |
Willy Tarreau | 6b30898 | 2019-09-06 19:05:50 +0200 | [diff] [blame] | 240 | if ((e & POLLRDHUP) && !(cur_poller.flags & HAP_POLL_F_RDHUP)) |
Olivier Houchard | cb6c927 | 2019-03-08 18:49:54 +0100 | [diff] [blame] | 241 | _HA_ATOMIC_OR(&cur_poller.flags, HAP_POLL_F_RDHUP); |
Willy Tarreau | 6b30898 | 2019-09-06 19:05:50 +0200 | [diff] [blame] | 242 | |
Christopher Faulet | ab62f51 | 2017-08-30 10:34:36 +0200 | [diff] [blame] | 243 | fd_update_events(fd, n); |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 244 | } |
| 245 | |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 246 | } |
| 247 | |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 248 | |
| 249 | static int init_poll_per_thread() |
| 250 | { |
| 251 | poll_events = calloc(1, sizeof(struct pollfd) * global.maxsock); |
| 252 | if (poll_events == NULL) |
| 253 | return 0; |
| 254 | return 1; |
| 255 | } |
| 256 | |
| 257 | static void deinit_poll_per_thread() |
| 258 | { |
| 259 | free(poll_events); |
Christopher Faulet | cd7879a | 2017-10-27 13:53:47 +0200 | [diff] [blame] | 260 | poll_events = NULL; |
Christopher Faulet | d4604ad | 2017-05-29 10:40:41 +0200 | [diff] [blame] | 261 | } |
| 262 | |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 263 | /* |
| 264 | * Initialization of the poll() poller. |
| 265 | * Returns 0 in case of failure, non-zero in case of success. If it fails, it |
| 266 | * disables the poller by setting its pref to 0. |
| 267 | */ |
Willy Tarreau | ef1d1f8 | 2007-04-16 00:25:25 +0200 | [diff] [blame] | 268 | REGPRM1 static int _do_init(struct poller *p) |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 269 | { |
Christopher Faulet | cd7879a | 2017-10-27 13:53:47 +0200 | [diff] [blame] | 270 | __label__ fail_swevt, fail_srevt; |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 271 | int fd_evts_bytes; |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 272 | |
| 273 | p->private = NULL; |
Willy Tarreau | cc35923 | 2018-01-17 15:48:53 +0100 | [diff] [blame] | 274 | fd_evts_bytes = (global.maxsock + sizeof(**fd_evts) * 8 - 1) / (sizeof(**fd_evts) * 8) * sizeof(**fd_evts); |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 275 | |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 276 | if ((fd_evts[DIR_RD] = calloc(1, fd_evts_bytes)) == NULL) |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 277 | goto fail_srevt; |
Willy Tarreau | 80da05a | 2013-03-31 14:06:57 +0200 | [diff] [blame] | 278 | if ((fd_evts[DIR_WR] = calloc(1, fd_evts_bytes)) == NULL) |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 279 | goto fail_swevt; |
| 280 | |
Christopher Faulet | cd7879a | 2017-10-27 13:53:47 +0200 | [diff] [blame] | 281 | hap_register_per_thread_init(init_poll_per_thread); |
| 282 | hap_register_per_thread_deinit(deinit_poll_per_thread); |
| 283 | |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 284 | return 1; |
| 285 | |
| 286 | fail_swevt: |
| 287 | free(fd_evts[DIR_RD]); |
| 288 | fail_srevt: |
| 289 | free(poll_events); |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 290 | p->pref = 0; |
| 291 | return 0; |
| 292 | } |
| 293 | |
| 294 | /* |
| 295 | * Termination of the poll() poller. |
| 296 | * Memory is released and the poller is marked as unselectable. |
| 297 | */ |
Willy Tarreau | ef1d1f8 | 2007-04-16 00:25:25 +0200 | [diff] [blame] | 298 | REGPRM1 static void _do_term(struct poller *p) |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 299 | { |
Willy Tarreau | a534fea | 2008-08-03 12:19:50 +0200 | [diff] [blame] | 300 | free(fd_evts[DIR_WR]); |
| 301 | free(fd_evts[DIR_RD]); |
Willy Tarreau | e54e917 | 2007-04-09 09:23:31 +0200 | [diff] [blame] | 302 | p->private = NULL; |
| 303 | p->pref = 0; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | /* |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 307 | * Check that the poller works. |
| 308 | * Returns 1 if OK, otherwise 0. |
| 309 | */ |
Willy Tarreau | ef1d1f8 | 2007-04-16 00:25:25 +0200 | [diff] [blame] | 310 | REGPRM1 static int _do_test(struct poller *p) |
Willy Tarreau | 2ff7622 | 2007-04-09 19:29:56 +0200 | [diff] [blame] | 311 | { |
| 312 | return 1; |
| 313 | } |
| 314 | |
| 315 | /* |
Willy Tarreau | ef1d1f8 | 2007-04-16 00:25:25 +0200 | [diff] [blame] | 316 | * It is a constructor, which means that it will automatically be called before |
| 317 | * main(). This is GCC-specific but it works at least since 2.95. |
| 318 | * Special care must be taken so that it does not need any uninitialized data. |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 319 | */ |
Willy Tarreau | ef1d1f8 | 2007-04-16 00:25:25 +0200 | [diff] [blame] | 320 | __attribute__((constructor)) |
| 321 | static void _do_register(void) |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 322 | { |
Willy Tarreau | ef1d1f8 | 2007-04-16 00:25:25 +0200 | [diff] [blame] | 323 | struct poller *p; |
| 324 | |
| 325 | if (nbpollers >= MAX_POLLERS) |
| 326 | return; |
| 327 | p = &pollers[nbpollers++]; |
| 328 | |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 329 | p->name = "poll"; |
| 330 | p->pref = 200; |
Willy Tarreau | 11ef083 | 2019-11-28 18:17:33 +0100 | [diff] [blame] | 331 | p->flags = HAP_POLL_F_ERRHUP; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 332 | p->private = NULL; |
| 333 | |
Willy Tarreau | 70c6fd8 | 2012-11-11 21:02:34 +0100 | [diff] [blame] | 334 | p->clo = __fd_clo; |
Willy Tarreau | ef1d1f8 | 2007-04-16 00:25:25 +0200 | [diff] [blame] | 335 | p->test = _do_test; |
| 336 | p->init = _do_init; |
| 337 | p->term = _do_term; |
| 338 | p->poll = _do_poll; |
Willy Tarreau | 4f60f16 | 2007-04-08 16:39:58 +0200 | [diff] [blame] | 339 | } |
| 340 | |
| 341 | |
| 342 | /* |
| 343 | * Local variables: |
| 344 | * c-indent-level: 8 |
| 345 | * c-basic-offset: 8 |
| 346 | * End: |
| 347 | */ |