about summary refs log tree commit homepage
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/benchmark/README18
-rw-r--r--test/benchmark/ddstream.ru50
-rw-r--r--test/benchmark/readinput.ru40
-rwxr-xr-xtest/benchmark/uconnect.perl66
4 files changed, 170 insertions, 4 deletions
diff --git a/test/benchmark/README b/test/benchmark/README
index 1d3cdd0..cd929f3 100644
--- a/test/benchmark/README
+++ b/test/benchmark/README
@@ -42,9 +42,19 @@ The benchmark client is usually httperf.
 Another gentle reminder: performance with slow networks/clients
 is NOT our problem.  That is the job of nginx (or similar).
 
+== ddstream.ru
+
+Standalone Rack app intended to show how BAD we are at slow clients.
+See usage in comments.
+
+== readinput.ru
+
+Standalone Rack app intended to show how bad we are with slow uploaders.
+See usage in comments.
+
 == Contributors
 
-This directory is maintained independently in the "benchmark" branch
-based against v0.1.0.  Only changes to this directory (test/benchmarks)
-are committed to this branch although the master branch may merge this
-branch occassionaly.
+This directory is intended to remain stable.  Do not make changes
+to benchmarking code which can change performance and invalidate
+results across revisions.  Instead, write new benchmarks and update
+coments/documentation as necessary.
diff --git a/test/benchmark/ddstream.ru b/test/benchmark/ddstream.ru
new file mode 100644
index 0000000..b14c973
--- /dev/null
+++ b/test/benchmark/ddstream.ru
@@ -0,0 +1,50 @@
+# This app is intended to test large HTTP responses with or without
+# a fully-buffering reverse proxy such as nginx. Without a fully-buffering
+# reverse proxy, unicorn will be unresponsive when client count exceeds
+# worker_processes.
+#
+# To demonstrate how bad unicorn is at slowly reading clients:
+#
+#   # in one terminal, start unicorn with one worker:
+#   unicorn -E none -l 127.0.0.1:8080 test/benchmark/ddstream.ru
+#
+#   # in a different terminal, start more slow curl processes than
+#   # unicorn workers and watch time outputs
+#   curl --limit-rate 8K --trace-time -vsN http://127.0.0.1:8080/ >/dev/null &
+#   curl --limit-rate 8K --trace-time -vsN http://127.0.0.1:8080/ >/dev/null &
+#   wait
+#
+# The last client won't see a response until the first one is done reading
+#
+# nginx note: do not change the default "proxy_buffering" behavior.
+# Setting "proxy_buffering off" prevents nginx from protecting unicorn.
+
+# totally standalone rack app to stream a giant response
+class BigResponse
+  def initialize(bs, count)
+    @buf = "#{bs.to_s(16)}\r\n#{' ' * bs}\r\n"
+    @count = count
+    @res = [ 200,
+      { 'Transfer-Encoding' => -'chunked', 'Content-Type' => 'text/plain' },
+      self
+    ]
+  end
+
+  # rack response body iterator
+  def each
+    (1..@count).each { yield @buf }
+    yield -"0\r\n\r\n"
+  end
+
+  # rack app entry endpoint
+  def call(_env)
+    @res
+  end
+end
+
+# default to a giant (128M) response because kernel socket buffers
+# can be ridiculously large on some systems
+bs = ENV['bs'] ? ENV['bs'].to_i : 65536
+count = ENV['count'] ? ENV['count'].to_i : 2048
+warn "serving response with bs=#{bs} count=#{count} (#{bs*count} bytes)"
+run BigResponse.new(bs, count)
diff --git a/test/benchmark/readinput.ru b/test/benchmark/readinput.ru
new file mode 100644
index 0000000..c91bec3
--- /dev/null
+++ b/test/benchmark/readinput.ru
@@ -0,0 +1,40 @@
+# This app is intended to test large HTTP requests with or without
+# a fully-buffering reverse proxy such as nginx. Without a fully-buffering
+# reverse proxy, unicorn will be unresponsive when client count exceeds
+# worker_processes.
+
+DOC = <<DOC
+To demonstrate how bad unicorn is at slowly uploading clients:
+
+  # in one terminal, start unicorn with one worker:
+  unicorn -E none -l 127.0.0.1:8080 test/benchmark/readinput.ru
+
+  # in a different terminal, upload 45M from multiple curl processes:
+  dd if=/dev/zero bs=45M count=1 | curl -T- -HExpect: --limit-rate 1M \
+     --trace-time -v http://127.0.0.1:8080/ &
+  dd if=/dev/zero bs=45M count=1 | curl -T- -HExpect: --limit-rate 1M \
+     --trace-time -v http://127.0.0.1:8080/ &
+  wait
+
+# The last client won't see a response until the first one is done uploading
+# You also won't be able to make GET requests to view this documentation
+# while clients are uploading.  You can also view the stderr debug output
+# of unicorn (see logging code in #{__FILE__}).
+DOC
+
+run(lambda do |env|
+  input = env['rack.input']
+  buf = ''.b
+
+  # default logger contains timestamps, rely on that so users can
+  # see what the server is doing
+  l = env['rack.logger']
+
+  l.debug('BEGIN reading input ...') if l
+  :nop while input.read(16384, buf)
+  l.debug('DONE reading input ...') if l
+
+  buf.clear
+  [ 200, [ %W(Content-Length #{DOC.size}), %w(Content-Type text/plain) ],
+    [ DOC ] ]
+end)
diff --git a/test/benchmark/uconnect.perl b/test/benchmark/uconnect.perl
new file mode 100755
index 0000000..230445e
--- /dev/null
+++ b/test/benchmark/uconnect.perl
@@ -0,0 +1,66 @@
+#!/usr/bin/perl -w
+# Benchmark script to spawn some processes and hammer a local unicorn
+# to test accept loop performance.  This only does Unix sockets.
+# There's plenty of TCP benchmarking tools out there, and TCP port reuse
+# has predictability problems since unicorn can't do persistent connections.
+# Written in Perl for the same reason: predictability.
+# Ruby GC is not as predictable as Perl refcounting.
+use strict;
+use Socket qw(AF_UNIX SOCK_STREAM sockaddr_un);
+use POSIX qw(:sys_wait_h);
+use Getopt::Std;
+# -c / -n switches stolen from ab(1)
+my $usage = "$0 [-c CONCURRENCY] [-n NUM_REQUESTS] SOCKET_PATH\n";
+our $opt_c = 2;
+our $opt_n = 1000;
+getopts('c:n:') or die $usage;
+my $unix_path = shift or die $usage;
+use constant REQ => "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n";
+use constant REQ_LEN => length(REQ);
+use constant BUFSIZ => 8192;
+$^F = 99; # don't waste syscall time with FD_CLOEXEC
+
+my %workers; # pid => worker num
+die "-n $opt_n not evenly divisible by -c $opt_c\n" if $opt_n % $opt_c;
+my $n_per_worker = $opt_n / $opt_c;
+my $addr = sockaddr_un($unix_path);
+
+for my $num (1..$opt_c) {
+        defined(my $pid = fork) or die "fork failed: $!\n";
+        if ($pid) {
+                $workers{$pid} = $num;
+        } else {
+                work($n_per_worker);
+        }
+}
+
+reap_worker(0) while scalar keys %workers;
+exit;
+
+sub work {
+        my ($n) = @_;
+        my ($buf, $x);
+        for (1..$n) {
+                socket(S, AF_UNIX, SOCK_STREAM, 0) or die "socket: $!";
+                connect(S, $addr) or die "connect: $!";
+                defined($x = syswrite(S, REQ)) or die "write: $!";
+                $x == REQ_LEN or die "short write: $x != ".REQ_LEN."\n";
+                do {
+                        $x = sysread(S, $buf, BUFSIZ);
+                        unless (defined $x) {
+                                next if $!{EINTR};
+                                die "sysread: $!\n";
+                        }
+                } until ($x == 0);
+        }
+        exit 0;
+}
+
+sub reap_worker {
+        my ($flags) = @_;
+        my $pid = waitpid(-1, $flags);
+        return if !defined $pid || $pid <= 0;
+        my $p = delete $workers{$pid} || '(unknown)';
+        warn("$pid [$p] exited with $?\n") if $?;
+        $p;
+}