about summary refs log tree commit homepage
diff options
context:
space:
mode:
authorEric Wong <normalperson@yhbt.net>2012-12-30 11:43:10 +0000
committerEric Wong <normalperson@yhbt.net>2012-12-30 11:43:10 +0000
commitc751f42f5f6a5e54a399df472015ab6d2ffc3f7a (patch)
tree91335335f49899437bb10c17a106ec5cb8fdf323
parentc63ad2b2e0e25f0765605e8ba2d7038b5e28d878 (diff)
downloadkgio-c751f42f5f6a5e54a399df472015ab6d2ffc3f7a.tar.gz
This is prone to race conditions in multiprocess situations
where one process is relying on non-blocking operation while
another (likely newer process) relies on blocking operation.

Since the blocking process can always fall back to calling
rb_io_wait_readable(), use that instead and give up some
scalability for higher reliability.

Those interested in avoiding thundering herds will have to
stop/start their processes using blocking sockets (and tolerate
some downtime).
-rw-r--r--ext/kgio/accept.c36
1 files changed, 3 insertions, 33 deletions
diff --git a/ext/kgio/accept.c b/ext/kgio/accept.c
index f8a073f..5f07532 100644
--- a/ext/kgio/accept.c
+++ b/ext/kgio/accept.c
@@ -79,15 +79,6 @@ static VALUE xaccept(void *ptr)
 #ifdef HAVE_RB_THREAD_BLOCKING_REGION
 #  include <time.h>
 #  include "blocking_io_region.h"
-/*
- * Try to use a (real) blocking accept() since that can prevent
- * thundering herds under Linux:
- * http://www.citi.umich.edu/projects/linux-scalability/reports/accept.html
- *
- * So we periodically disable non-blocking, but not too frequently
- * because other processes may set non-blocking (especially during
- * a process upgrade) with Rainbows! concurrency model changes.
- */
 static int thread_accept(struct accept_args *a, int force_nonblock)
 {
         if (force_nonblock)
@@ -95,28 +86,6 @@ static int thread_accept(struct accept_args *a, int force_nonblock)
         return (int)rb_thread_io_blocking_region(xaccept, a, a->fd);
 }
 
-static void set_blocking_or_block(int fd)
-{
-        static time_t last_set_blocking;
-        time_t now = time(NULL);
-
-        if (last_set_blocking == 0) {
-                last_set_blocking = now;
-                (void)rb_io_wait_readable(fd);
-        } else if ((now - last_set_blocking) <= 5) {
-                (void)rb_io_wait_readable(fd);
-        } else {
-                int flags = fcntl(fd, F_GETFL);
-                if (flags == -1)
-                        rb_sys_fail("fcntl(F_GETFL)");
-                if (flags & O_NONBLOCK) {
-                        flags = fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
-                        if (flags == -1)
-                                rb_sys_fail("fcntl(F_SETFL)");
-                }
-                last_set_blocking = now;
-        }
-}
 #else /* ! HAVE_RB_THREAD_BLOCKING_REGION */
 #  include <rubysig.h>
 static int thread_accept(struct accept_args *a, int force_nonblock)
@@ -134,7 +103,6 @@ static int thread_accept(struct accept_args *a, int force_nonblock)
         TRAP_END;
         return rv;
 }
-#define set_blocking_or_block(fd) (void)rb_io_wait_readable(fd)
 #endif /* ! HAVE_RB_THREAD_BLOCKING_REGION */
 
 static void
@@ -209,7 +177,8 @@ retry:
                                 return Qnil;
                         a->fd = my_fileno(a->accept_io);
                         errno = EAGAIN;
-                        set_blocking_or_block(a->fd);
+                        (void)rb_io_wait_readable(a->fd);
+                        /* fall-through to EINTR case */
 #ifdef ECONNABORTED
                 case ECONNABORTED:
 #endif /* ECONNABORTED */
@@ -217,6 +186,7 @@ retry:
                 case EPROTO:
 #endif /* EPROTO */
                 case EINTR:
+                        /* raise IOError if closed during sleep */
                         a->fd = my_fileno(a->accept_io);
                         goto retry;
                 case ENOMEM: