MINOR: polling: Use fd_update_events to update events seen for a fd
Now, the same function is used by all pollers to update events seen for a
fd. This will ease the threads support integration.
diff --git a/src/ev_epoll.c b/src/ev_epoll.c
index a2e5e0a..9e72802 100644
--- a/src/ev_epoll.c
+++ b/src/ev_epoll.c
@@ -139,7 +139,6 @@
/* it looks complicated but gcc can optimize it away when constants
* have same values... In fact it depends on gcc :-(
*/
- fdtab[fd].ev &= FD_POLL_STICKY;
if (EPOLLIN == FD_POLL_IN && EPOLLOUT == FD_POLL_OUT &&
EPOLLPRI == FD_POLL_PRI && EPOLLERR == FD_POLL_ERR &&
EPOLLHUP == FD_POLL_HUP) {
@@ -158,13 +157,7 @@
cur_poller.flags |= HAP_POLL_F_RDHUP;
n |= FD_POLL_HUP;
}
-
- fdtab[fd].ev |= n;
- if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
- fd_may_recv(fd);
-
- if (fdtab[fd].ev & (FD_POLL_OUT | FD_POLL_ERR))
- fd_may_send(fd);
+ fd_update_events(fd, n);
}
/* the caller will take care of cached events */
}
diff --git a/src/ev_kqueue.c b/src/ev_kqueue.c
index f1c0b8d..02723cc 100644
--- a/src/ev_kqueue.c
+++ b/src/ev_kqueue.c
@@ -117,30 +117,25 @@
measure_idle();
for (count = 0; count < status; count++) {
+ unsigned int n = 0;
fd = kev[count].ident;
if (!fdtab[fd].owner)
continue;
- fdtab[fd].ev &= FD_POLL_STICKY;
-
if (kev[count].filter == EVFILT_READ) {
if (kev[count].data)
- fdtab[fd].ev |= FD_POLL_IN;
+ n |= FD_POLL_IN;
if (kev[count].flags & EV_EOF)
- fdtab[fd].ev |= FD_POLL_HUP;
+ n |= FD_POLL_HUP;
}
else if (kev[count].filter == EVFILT_WRITE) {
- fdtab[fd].ev |= FD_POLL_OUT;
+ n |= FD_POLL_OUT;
if (kev[count].flags & EV_EOF)
- fdtab[fd].ev |= FD_POLL_ERR;
+ n |= FD_POLL_ERR;
}
- if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
- fd_may_recv(fd);
-
- if (fdtab[fd].ev & (FD_POLL_OUT | FD_POLL_ERR))
- fd_may_send(fd);
+ fd_update_events(fd, n);
}
}
diff --git a/src/ev_poll.c b/src/ev_poll.c
index 90ac9e5..1cb8d2d 100644
--- a/src/ev_poll.c
+++ b/src/ev_poll.c
@@ -132,6 +132,7 @@
measure_idle();
for (count = 0; status > 0 && count < nbfd; count++) {
+ unsigned int n;
int e = poll_events[count].revents;
fd = poll_events[count].fd;
@@ -147,14 +148,12 @@
/* it looks complicated but gcc can optimize it away when constants
* have same values... In fact it depends on gcc :-(
*/
- fdtab[fd].ev &= FD_POLL_STICKY;
if (POLLIN == FD_POLL_IN && POLLOUT == FD_POLL_OUT &&
POLLERR == FD_POLL_ERR && POLLHUP == FD_POLL_HUP) {
- fdtab[fd].ev |= e & (POLLIN|POLLOUT|POLLERR|POLLHUP);
+ n = e & (POLLIN|POLLOUT|POLLERR|POLLHUP);
}
else {
- fdtab[fd].ev |=
- ((e & POLLIN ) ? FD_POLL_IN : 0) |
+ n = ((e & POLLIN ) ? FD_POLL_IN : 0) |
((e & POLLOUT) ? FD_POLL_OUT : 0) |
((e & POLLERR) ? FD_POLL_ERR : 0) |
((e & POLLHUP) ? FD_POLL_HUP : 0);
@@ -163,14 +162,10 @@
/* always remap RDHUP to HUP as they're used similarly */
if (e & POLLRDHUP) {
cur_poller.flags |= HAP_POLL_F_RDHUP;
- fdtab[fd].ev |= FD_POLL_HUP;
+ n |= FD_POLL_HUP;
}
- if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
- fd_may_recv(fd);
-
- if (fdtab[fd].ev & (FD_POLL_OUT | FD_POLL_ERR))
- fd_may_send(fd);
+ fd_update_events(fd, n);
}
}
diff --git a/src/ev_select.c b/src/ev_select.c
index 1b40ea1..cf80ac8 100644
--- a/src/ev_select.c
+++ b/src/ev_select.c
@@ -129,24 +129,21 @@
continue;
for (count = BITS_PER_INT, fd = fds * BITS_PER_INT; count && fd < maxfd; count--, fd++) {
+ unsigned int n = 0;
+
/* if we specify read first, the accepts and zero reads will be
* seen first. Moreover, system buffers will be flushed faster.
*/
if (!fdtab[fd].owner)
continue;
- fdtab[fd].ev &= FD_POLL_STICKY;
if (FD_ISSET(fd, tmp_evts[DIR_RD]))
- fdtab[fd].ev |= FD_POLL_IN;
+ n |= FD_POLL_IN;
if (FD_ISSET(fd, tmp_evts[DIR_WR]))
- fdtab[fd].ev |= FD_POLL_OUT;
-
- if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
- fd_may_recv(fd);
+ n |= FD_POLL_OUT;
- if (fdtab[fd].ev & (FD_POLL_OUT | FD_POLL_ERR))
- fd_may_send(fd);
+ fd_update_events(fd, n);
}
}
}