about summary refs log tree commit homepage
path: root/ext/unicorn_http/epollexclusive.h
diff options
context:
space:
mode:
authorEric Wong <bofh@yhbt.net>2021-10-01 03:09:23 +0000
committerEric Wong <bofh@yhbt.net>2021-10-04 17:39:56 -0900
commit158e9aad11ee2ed7dc01182da150e803f7cdbfef (patch)
tree60a074b601b38cac35fb534e141e51754a97faf7 /ext/unicorn_http/epollexclusive.h
parent8732038d9296f668827190b74f887c4821592476 (diff)
downloadunicorn-158e9aad11ee2ed7dc01182da150e803f7cdbfef.tar.gz
While the capabilities of epoll cannot be fully exploited given
our primitive design; avoiding thundering herd wakeups on larger
SMP machines while below 100% utilization is possible with
Linux 4.5+.

With this change, only one worker wakes up per-connect(2)
(instead of all of them via select(2)), avoiding the thundering
herd effect when the system is mostly idle.

Saturated instances should not notice the difference if they
rarely had multiple workers sleeping in select(2).  This change
benefits non-saturated instances.

With 2 parallel clients and 8 workers on a nominally (:P)
8-core CPU (AMD FX-8320), the uconnect.perl test script
invocation showed a reduction from ~3.4s to ~2.5s when
reading an 11-byte response body:

  echo worker_processes 8 >u.conf.rb
  bs=11 ruby -I lib -I test/ruby-2.5.5/ext/unicorn_http/ bin/unicorn \
    test/benchmark/dd.ru -E none -l /tmp/u.sock -c u.conf.rb
  time perl -I lib -w test/benchmark/uconnect.perl \
    -n 100000 -c 2 /tmp/u.sock

Times improve less as "-c" increases for uconnect.perl (system
noise and timings are inconsistent).  The benefit of this change
should be more noticeable on systems with more workers (and
more cores).

I wanted to use EPOLLET (Edge-Triggered) to further reduce
syscalls, here, (similar to the old select()-avoidance bet) but
that would've either added too much complexity to deduplicate
wakeup sources, or run into the same starvation problem we
solved in April 2020[1].

Since the kernel already has the complexity and deduplication
built-in for Level-Triggered epoll support, we'll just let the
kernel deal with it.

Note: do NOT take this as an example of how epoll should be used
in a sophisticated server.  unicorn is primitive by design and
cannot use threads nor handle multiple clients at once, thus it
it only uses epoll in this extremely limited manner.

Linux 4.5+ users will notice a regression of one extra epoll FD
per-worker and at least two epoll watches, so
/proc/sys/fs/epoll/max_user_watches may need to be changed along
with RLIMIT_NOFILE.

This change has also been tested on Linux 3.10.x (CentOS 7.x)
and FreeBSD 11.x to ensure compatibility with systems without
EPOLLEXCLUSIVE.

Various EPOLLEXCLUSIVE discussions over the years:
  https://yhbt.net/lore/lkml/?q=s:EPOLLEXCLUSIVE+d:..20211001&x=t&o=-1

[1] https://yhbt.net/unicorn-public/CAMBWrQ=Yh42MPtzJCEO7XryVknDNetRMuA87irWfqVuLdJmiBQ@mail.gmail.com/
Diffstat (limited to 'ext/unicorn_http/epollexclusive.h')
-rw-r--r--ext/unicorn_http/epollexclusive.h125
1 files changed, 125 insertions, 0 deletions
diff --git a/ext/unicorn_http/epollexclusive.h b/ext/unicorn_http/epollexclusive.h
new file mode 100644
index 0000000..2d2a589
--- /dev/null
+++ b/ext/unicorn_http/epollexclusive.h
@@ -0,0 +1,125 @@
+/*
+ * This is only intended for use inside a unicorn worker, nowhere else.
+ * EPOLLEXCLUSIVE somewhat mitigates the thundering herd problem for
+ * mostly idle processes since we can't use blocking accept4.
+ * This is NOT intended for use with multi-threaded servers, nor
+ * single-threaded multi-client ("C10K") servers or anything advanced
+ * like that.  This use of epoll is only appropriate for a primitive,
+ * single-client, single-threaded servers like unicorn that need to
+ * support SIGKILL timeouts and parent death detection.
+ */
+#if defined(HAVE_EPOLL_CREATE1)
+#  include <sys/epoll.h>
+#  include <errno.h>
+#  include <ruby/io.h>
+#  include <ruby/thread.h>
+#endif /* __linux__ */
+
+#if defined(EPOLLEXCLUSIVE) && defined(HAVE_EPOLL_CREATE1)
+#  define USE_EPOLL (1)
+#else
+#  define USE_EPOLL (0)
+#endif
+
+#if USE_EPOLL
+/*
+ * :nodoc:
+ * returns IO object if EPOLLEXCLUSIVE works and arms readers
+ */
+static VALUE prep_readers(VALUE cls, VALUE readers)
+{
+        long i;
+        int epfd = epoll_create1(EPOLL_CLOEXEC);
+        VALUE epio;
+
+        if (epfd < 0) rb_sys_fail("epoll_create1");
+
+        epio = rb_funcall(cls, rb_intern("for_fd"), 1, INT2NUM(epfd));
+
+        Check_Type(readers, T_ARRAY);
+        for (i = 0; i < RARRAY_LEN(readers); i++) {
+                int rc;
+                struct epoll_event e;
+                rb_io_t *fptr;
+                VALUE io = rb_ary_entry(readers, i);
+
+                e.data.u64 = i; /* the reason readers shouldn't change */
+
+                /*
+                 * I wanted to use EPOLLET here, but maintaining our own
+                 * equivalent of ep->rdllist in Ruby-space doesn't fit
+                 * our design at all (and the kernel already has it's own
+                 * code path for doing it).  So let the kernel spend
+                 * cycles on maintaining level-triggering.
+                 */
+                e.events = EPOLLEXCLUSIVE | EPOLLIN;
+                io = rb_io_get_io(io);
+                GetOpenFile(io, fptr);
+                rc = epoll_ctl(epfd, EPOLL_CTL_ADD, fptr->fd, &e);
+                if (rc < 0) rb_sys_fail("epoll_ctl");
+        }
+        return epio;
+}
+#endif /* USE_EPOLL */
+
+#if USE_EPOLL
+struct ep_wait {
+        struct epoll_event *events;
+        rb_io_t *fptr;
+        int maxevents;
+        int timeout_msec;
+};
+
+static void *do_wait(void *ptr) /* runs w/o GVL */
+{
+        struct ep_wait *epw = ptr;
+
+        return (void *)(long)epoll_wait(epw->fptr->fd, epw->events,
+                                epw->maxevents, epw->timeout_msec);
+}
+
+/* :nodoc: */
+/* readers must not change between prepare_readers and get_readers */
+static VALUE
+get_readers(VALUE epio, VALUE ready, VALUE readers, VALUE timeout_msec)
+{
+        struct ep_wait epw;
+        long i, n;
+        VALUE buf;
+
+        Check_Type(ready, T_ARRAY);
+        Check_Type(readers, T_ARRAY);
+        epw.maxevents = RARRAY_LENINT(readers);
+        buf = rb_str_buf_new(sizeof(struct epoll_event) * epw.maxevents);
+        epw.events = (struct epoll_event *)RSTRING_PTR(buf);
+        epio = rb_io_get_io(epio);
+        GetOpenFile(epio, epw.fptr);
+
+        epw.timeout_msec = NUM2INT(timeout_msec);
+        n = (long)rb_thread_call_without_gvl(do_wait, &epw, RUBY_UBF_IO, NULL);
+        if (n < 0) {
+                if (errno != EINTR) rb_sys_fail("epoll_wait");
+                n = 0;
+        }
+        /* Linux delivers events in order received */
+        for (i = 0; i < n; i++) {
+                struct epoll_event *ev = &epw.events[i];
+                VALUE obj = rb_ary_entry(readers, ev->data.u64);
+
+                if (RTEST(obj))
+                        rb_ary_push(ready, obj);
+        }
+        rb_str_resize(buf, 0);
+        rb_gc_force_recycle(buf);
+        return Qfalse;
+}
+#endif /* USE_EPOLL */
+
+static void init_epollexclusive(VALUE mUnicorn)
+{
+#if USE_EPOLL
+        VALUE cWaiter = rb_define_class_under(mUnicorn, "Waiter", rb_cIO);
+        rb_define_singleton_method(cWaiter, "prep_readers", prep_readers, 1);
+        rb_define_method(cWaiter, "get_readers", get_readers, 3);
+#endif
+}