about summary refs log tree commit homepage
diff options
context:
space:
mode:
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--GNUmakefile14
-rw-r--r--Rakefile29
-rw-r--r--TODO7
-rwxr-xr-xbin/unicorn26
-rwxr-xr-xbin/unicorn_rails26
-rw-r--r--examples/unicorn.conf.rb5
-rw-r--r--ext/unicorn_http/ext_help.h4
-rw-r--r--ext/unicorn_http/global_variables.h4
-rw-r--r--ext/unicorn_http/unicorn_http.rl184
-rw-r--r--lib/unicorn.rb886
-rw-r--r--lib/unicorn/app/exec_cgi.rb4
-rw-r--r--lib/unicorn/configurator.rb31
-rw-r--r--lib/unicorn/const.rb51
-rw-r--r--lib/unicorn/http_request.rb112
-rw-r--r--lib/unicorn/http_response.rb50
-rw-r--r--lib/unicorn/http_server.rb695
-rw-r--r--lib/unicorn/launcher.rb7
-rw-r--r--lib/unicorn/preread_input.rb30
-rw-r--r--lib/unicorn/socket_helper.rb8
-rw-r--r--lib/unicorn/tee_input.rb126
-rw-r--r--lib/unicorn/tmpio.rb29
-rw-r--r--lib/unicorn/util.rb136
-rw-r--r--lib/unicorn/worker.rb40
-rw-r--r--local.mk.sample9
-rwxr-xr-xscript/isolate_for_tests45
-rw-r--r--t/GNUmakefile8
-rw-r--r--t/preread_input.ru17
-rwxr-xr-xt/t0003-working_directory.sh5
-rwxr-xr-xt/t0010-reap-logging.sh55
-rwxr-xr-xt/t0303-rails3-alt-working_directory_config.ru.sh5
-rwxr-xr-xt/t9000-preread-input.sh48
-rw-r--r--t/test-rails3.sh2
-rw-r--r--test/exec/test_exec.rb2
-rw-r--r--test/unit/test_http_parser_ng.rb38
-rw-r--r--test/unit/test_request.rb12
-rw-r--r--test/unit/test_response.rb23
-rw-r--r--test/unit/test_signals.rb2
-rw-r--r--test/unit/test_tee_input.rb39
-rw-r--r--unicorn.gemspec1
40 files changed, 1519 insertions, 1298 deletions
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 9e60ee9..ae8f539 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 GVF=GIT-VERSION-FILE
-DEF_VER=v1.1.5.GIT
+DEF_VER=v2.0.0pre3.GIT
 
 LF='
 '
diff --git a/GNUmakefile b/GNUmakefile
index a3b761a..b28bded 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -24,6 +24,12 @@ endif
 
 RUBY_ENGINE := $(shell $(RUBY) -e 'puts((RUBY_ENGINE rescue "ruby"))')
 
+isolate_libs := tmp/isolate/$(RUBY_ENGINE)-$(RUBY_VERSION).mk
+$(isolate_libs): script/isolate_for_tests
+        @$(RUBY) script/isolate_for_tests
+-include $(isolate_libs)
+MYLIBS = $(RUBYLIB):$(ISOLATE_LIBS)
+
 # dunno how to implement this as concisely in Ruby, and hell, I love awk
 awk_slow := awk '/def test_/{print FILENAME"--"$$2".n"}' 2>/dev/null
 
@@ -117,14 +123,14 @@ run_test = $(quiet_pre) \
 %.n: arg = $(subst .n,,$(subst --, -n ,$@))
 %.n: t = $(subst .n,$(log_suffix),$@)
 %.n: export PATH := $(test_prefix)/bin:$(PATH)
-%.n: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB)
+%.n: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS)
 %.n: $(test_prefix)/.stamp
         $(run_test)
 
 $(T): arg = $@
 $(T): t = $(subst .rb,$(log_suffix),$@)
 $(T): export PATH := $(test_prefix)/bin:$(PATH)
-$(T): export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB)
+$(T): export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS)
 $(T): $(test_prefix)/.stamp
         $(run_test)
 
@@ -169,7 +175,7 @@ NEWS: GIT-VERSION-FILE .manifest
         $(RAKE) -s news_rdoc > $@+
         mv $@+ $@
 
-SINCE = 1.0.0
+SINCE = 1.1.4
 ChangeLog: LOG_VERSION = \
   $(shell git rev-parse -q "$(GIT_VERSION)" >/dev/null 2>&1 && \
           echo $(GIT_VERSION) || git describe)
@@ -251,7 +257,7 @@ $(T_r).%.r: rv = $(subst .r,,$(subst $(T_r).,,$@))
 $(T_r).%.r: extra = ' 'v$(rv)
 $(T_r).%.r: arg = $(T_r)
 $(T_r).%.r: export PATH := $(test_prefix)/bin:$(PATH)
-$(T_r).%.r: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB)
+$(T_r).%.r: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS)
 $(T_r).%.r: export UNICORN_RAILS_TEST_VERSION = $(rv)
 $(T_r).%.r: export RAILS_GIT_REPO = $(CURDIR)/$(rails_git)
 $(T_r).%.r: $(test_prefix)/.stamp $(rails_git)/info/v2.3.8-stamp
diff --git a/Rakefile b/Rakefile
index 15a0f61..72f7a85 100644
--- a/Rakefile
+++ b/Rakefile
@@ -163,11 +163,12 @@ task :fm_update do
   req = {
     "auth_code" => api_token,
     "release" => {
-      "tag_list" => "Stable",
+      "tag_list" => "Experimental",
       "version" => version,
       "changelog" => changelog,
     },
   }.to_json
+
   if ! changelog.strip.empty? && version =~ %r{\A[\d\.]+\d+\z}
     Net::HTTP.start(uri.host, uri.port) do |http|
       p http.post(uri.path, req, {'Content-Type'=>'application/json'})
@@ -193,29 +194,3 @@ begin
   end
 rescue LoadError
 end
-
-task :isolate do
-  require 'isolate'
-  ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
-  opts = {
-    :system => false,
-    :path => "tmp/isolate/#{ruby_engine}-#{RUBY_VERSION}",
-    :multiruby => false, # we want "1.8.7" instead of "1.8"
-  }
-  fp = File.open(__FILE__, "rb")
-  fp.flock(File::LOCK_EX)
-
-  # C extensions aren't binary-compatible across Ruby versions
-  pid = fork { Isolate.now!(opts) { gem 'sqlite3-ruby', '1.2.5' } }
-  _, status = Process.waitpid2(pid)
-  status.success? or abort status.inspect
-
-  # pure Ruby gems can be shared across all Rubies
-  %w(3.0.0).each do |rails_ver|
-    opts[:path] = "tmp/isolate/rails-#{rails_ver}"
-    pid = fork { Isolate.now!(opts) { gem 'rails', rails_ver } }
-    _, status = Process.waitpid2(pid)
-    status.success? or abort status.inspect
-  end
-  fp.flock(File::LOCK_UN)
-end
diff --git a/TODO b/TODO
index 166a2a0..971bd18 100644
--- a/TODO
+++ b/TODO
@@ -3,3 +3,10 @@
 * performance validation (esp. TeeInput)
 
 * improve test suite
+
+* scalability to >= 1024 worker processes for crazy NUMA systems
+
+* Rack 2.x support (when Rack 2.x exists)
+
+* allow disabling "rack.input" rewindability for performance
+  (but violate the Rack 1.x SPEC)
diff --git a/bin/unicorn b/bin/unicorn
index 8d984bd..86c938b 100755
--- a/bin/unicorn
+++ b/bin/unicorn
@@ -4,16 +4,13 @@ require 'unicorn/launcher'
 require 'optparse'
 
 ENV["RACK_ENV"] ||= "development"
-daemonize = false
-options = { :listeners => [] }
-host, port = Unicorn::Const::DEFAULT_HOST, Unicorn::Const::DEFAULT_PORT
-set_listener = false
+rackup_opts = Unicorn::Configurator::RACKUP
+options = rackup_opts[:options]
 
 opts = OptionParser.new("", 24, '  ') do |opts|
   cmd = File.basename($0)
   opts.banner = "Usage: #{cmd} " \
                 "[ruby options] [#{cmd} options] [rackup config file]"
-
   opts.separator "Ruby options:"
 
   lineno = 1
@@ -46,23 +43,23 @@ opts = OptionParser.new("", 24, '  ') do |opts|
 
   opts.on("-o", "--host HOST",
           "listen on HOST (default: #{Unicorn::Const::DEFAULT_HOST})") do |h|
-    host = h
-    set_listener = true
+    rackup_opts[:host] = h
+    rackup_opts[:set_listener] = true
   end
 
   opts.on("-p", "--port PORT",
           "use PORT (default: #{Unicorn::Const::DEFAULT_PORT})") do |p|
-    port = p.to_i
-    set_listener = true
+    rackup_opts[:port] = p.to_i
+    rackup_opts[:set_listener] = true
   end
 
-  opts.on("-E", "--env ENVIRONMENT",
-          "use ENVIRONMENT for defaults (default: development)") do |e|
+  opts.on("-E", "--env RACK_ENV",
+          "use RACK_ENV for defaults (default: development)") do |e|
     ENV["RACK_ENV"] = e
   end
 
   opts.on("-D", "--daemonize", "run daemonized in the background") do |d|
-    daemonize = d ? true : false
+    rackup_opts[:daemonize] = !!d
   end
 
   opts.on("-P", "--pid FILE", "DEPRECATED") do |f|
@@ -109,16 +106,15 @@ opts = OptionParser.new("", 24, '  ') do |opts|
 end
 
 app = Unicorn.builder(ARGV[0] || 'config.ru', opts)
-options[:listeners] << "#{host}:#{port}" if set_listener
 
 if $DEBUG
   require 'pp'
   pp({
     :unicorn_options => options,
     :app => app,
-    :daemonize => daemonize,
+    :daemonize => rackup_opts[:daemonize],
   })
 end
 
-Unicorn::Launcher.daemonize!(options) if daemonize
+Unicorn::Launcher.daemonize!(options) if rackup_opts[:daemonize]
 Unicorn.run(app, options)
diff --git a/bin/unicorn_rails b/bin/unicorn_rails
index 0b2d92f..0294b59 100755
--- a/bin/unicorn_rails
+++ b/bin/unicorn_rails
@@ -4,11 +4,9 @@ require 'unicorn/launcher'
 require 'optparse'
 require 'fileutils'
 
-daemonize = false
-options = { :listeners => [] }
-host, port = Unicorn::Const::DEFAULT_HOST, Unicorn::Const::DEFAULT_PORT
-set_listener = false
 ENV['RAILS_ENV'] ||= "development"
+rackup_opts = Unicorn::Configurator::RACKUP
+options = rackup_opts[:options]
 
 opts = OptionParser.new("", 24, '  ') do |opts|
   cmd = File.basename($0)
@@ -46,13 +44,14 @@ opts = OptionParser.new("", 24, '  ') do |opts|
 
   opts.on("-o", "--host HOST",
           "listen on HOST (default: #{Unicorn::Const::DEFAULT_HOST})") do |h|
-    host = h
-    set_listener = true
+    rackup_opts[:host] = h
+    rackup_opts[:set_listener] = true
   end
 
-  opts.on("-p", "--port PORT", "use PORT (default: #{port})") do |p|
-    port = p.to_i
-    set_listener = true
+  opts.on("-p", "--port PORT",
+          "use PORT (default: #{Unicorn::Const::DEFAULT_PORT})") do |p|
+    rackup_opts[:port] = p.to_i
+    rackup_opts[:set_listener] = true
   end
 
   opts.on("-E", "--env RAILS_ENV",
@@ -61,7 +60,7 @@ opts = OptionParser.new("", 24, '  ') do |opts|
   end
 
   opts.on("-D", "--daemonize", "run daemonized in the background") do |d|
-    daemonize = d ? true : false
+    rackup_opts[:daemonize] = !!d
   end
 
   # Unicorn-specific stuff
@@ -186,15 +185,14 @@ def rails_builder(ru, opts, daemonize)
   end
 end
 
-app = rails_builder(ARGV[0], opts, daemonize)
-options[:listeners] << "#{host}:#{port}" if set_listener
+app = rails_builder(ARGV[0], opts, rackup_opts[:daemonize])
 
 if $DEBUG
   require 'pp'
   pp({
     :unicorn_options => options,
     :app => app,
-    :daemonize => daemonize,
+    :daemonize => rackup_opts[:daemonize],
   })
 end
 
@@ -203,7 +201,7 @@ options[:after_reload] = lambda do
   FileUtils.mkdir_p(%w(cache pids sessions sockets).map! { |d| "tmp/#{d}" })
 end
 
-if daemonize
+if rackup_opts[:daemonize]
   options[:pid] = "tmp/pids/unicorn.pid"
   Unicorn::Launcher.daemonize!(options)
 end
diff --git a/examples/unicorn.conf.rb b/examples/unicorn.conf.rb
index 37c3e81..28a9e65 100644
--- a/examples/unicorn.conf.rb
+++ b/examples/unicorn.conf.rb
@@ -63,7 +63,10 @@ before_fork do |server, worker|
   #   end
   # end
   #
-  # # *optionally* throttle the master from forking too quickly by sleeping
+  # Throttle the master from forking too quickly by sleeping.  Due
+  # to the implementation of standard Unix signal handlers, this
+  # helps (but does not completely) prevent identical, repeated signals
+  # from being lost when the receiving process is busy.
   # sleep 1
 end
 
diff --git a/ext/unicorn_http/ext_help.h b/ext/unicorn_http/ext_help.h
index 3aa24a8..1f76f54 100644
--- a/ext/unicorn_http/ext_help.h
+++ b/ext/unicorn_http/ext_help.h
@@ -8,10 +8,6 @@
 #define RSTRING_LEN(s) (RSTRING(s)->len)
 #endif /* !defined(RSTRING_LEN) */
 
-#ifndef RUBINIUS
-#  define rb_str_update(x) do {} while (0)
-#endif /* !RUBINIUS */
-
 #ifndef HAVE_RB_STR_SET_LEN
 #  ifdef RUBINIUS
 #    error we should never get here with current Rubinius (1.x)
diff --git a/ext/unicorn_http/global_variables.h b/ext/unicorn_http/global_variables.h
index 7319bcd..8377704 100644
--- a/ext/unicorn_http/global_variables.h
+++ b/ext/unicorn_http/global_variables.h
@@ -35,13 +35,15 @@ static VALUE g_HEAD;
   static const char * const MAX_##N##_LENGTH_ERR = \
     "HTTP element " # N  " is longer than the " # length " allowed length."
 
+NORETURN(static void parser_error(const char *));
+
 /**
  * Validates the max length of given input and throws an HttpParserError
  * exception if over.
  */
 #define VALIDATE_MAX_LENGTH(len, N) do { \
   if (len > MAX_##N##_LENGTH) \
-    rb_raise(eHttpParserError, MAX_##N##_LENGTH_ERR); \
+    parser_error(MAX_##N##_LENGTH_ERR); \
 } while (0)
 
 /** Defines global strings in the init method. */
diff --git a/ext/unicorn_http/unicorn_http.rl b/ext/unicorn_http/unicorn_http.rl
index 1ad2a5d..236fbaa 100644
--- a/ext/unicorn_http/unicorn_http.rl
+++ b/ext/unicorn_http/unicorn_http.rl
@@ -39,6 +39,8 @@ struct http_parser {
     size_t field_len; /* only used during header processing */
     size_t dest_offset; /* only used during body processing */
   } s;
+  VALUE buf;
+  VALUE env;
   VALUE cont; /* Qfalse: unset, Qnil: ignored header, T_STRING: append */
   union {
     off_t content;
@@ -46,7 +48,18 @@ struct http_parser {
   } len;
 };
 
-static void finalize_header(struct http_parser *hp, VALUE req);
+static ID id_clear;
+
+static void finalize_header(struct http_parser *hp);
+
+static void parser_error(const char *msg)
+{
+  VALUE exc = rb_exc_new2(eHttpParserError, msg);
+  VALUE bt = rb_ary_new();
+
+  rb_funcall(exc, rb_intern("set_backtrace"), 1, bt);
+  rb_exc_raise(exc);
+}
 
 #define REMAINING (unsigned long)(pe - p)
 #define LEN(AT, FPC) (FPC - buffer - hp->AT)
@@ -88,7 +101,7 @@ static void hp_keepalive_connection(struct http_parser *hp, VALUE val)
 }
 
 static void
-request_method(struct http_parser *hp, VALUE req, const char *ptr, size_t len)
+request_method(struct http_parser *hp, const char *ptr, size_t len)
 {
   VALUE v;
 
@@ -106,11 +119,11 @@ request_method(struct http_parser *hp, VALUE req, const char *ptr, size_t len)
   } else {
     v = rb_str_new(ptr, len);
   }
-  rb_hash_aset(req, g_request_method, v);
+  rb_hash_aset(hp->env, g_request_method, v);
 }
 
 static void
-http_version(struct http_parser *hp, VALUE req, const char *ptr, size_t len)
+http_version(struct http_parser *hp, const char *ptr, size_t len)
 {
   VALUE v;
 
@@ -125,14 +138,14 @@ http_version(struct http_parser *hp, VALUE req, const char *ptr, size_t len)
   } else {
     v = rb_str_new(ptr, len);
   }
-  rb_hash_aset(req, g_server_protocol, v);
-  rb_hash_aset(req, g_http_version, v);
+  rb_hash_aset(hp->env, g_server_protocol, v);
+  rb_hash_aset(hp->env, g_http_version, v);
 }
 
 static inline void hp_invalid_if_trailer(struct http_parser *hp)
 {
   if (HP_FL_TEST(hp, INTRAILER))
-    rb_raise(eHttpParserError, "invalid Trailer");
+    parser_error("invalid Trailer");
 }
 
 static void write_cont_value(struct http_parser *hp,
@@ -141,7 +154,7 @@ static void write_cont_value(struct http_parser *hp,
   char *vptr;
 
   if (hp->cont == Qfalse)
-     rb_raise(eHttpParserError, "invalid continuation line");
+     parser_error("invalid continuation line");
   if (NIL_P(hp->cont))
      return; /* we're ignoring this header (probably Host:) */
 
@@ -163,7 +176,7 @@ static void write_cont_value(struct http_parser *hp,
   rb_str_buf_cat(hp->cont, vptr, LEN(mark, p));
 }
 
-static void write_value(VALUE req, struct http_parser *hp,
+static void write_value(struct http_parser *hp,
                         const char *buffer, const char *p)
 {
   VALUE f = find_common_field(PTR_TO(start.field), hp->s.field_len);
@@ -192,7 +205,7 @@ static void write_value(VALUE req, struct http_parser *hp,
   } else if (f == g_content_length) {
     hp->len.content = parse_length(RSTRING_PTR(v), RSTRING_LEN(v));
     if (hp->len.content < 0)
-      rb_raise(eHttpParserError, "invalid Content-Length");
+      parser_error("invalid Content-Length");
     HP_FL_SET(hp, HASBODY);
     hp_invalid_if_trailer(hp);
   } else if (f == g_http_transfer_encoding) {
@@ -209,9 +222,9 @@ static void write_value(VALUE req, struct http_parser *hp,
     assert_frozen(f);
   }
 
-  e = rb_hash_aref(req, f);
+  e = rb_hash_aref(hp->env, f);
   if (NIL_P(e)) {
-    hp->cont = rb_hash_aset(req, f, v);
+    hp->cont = rb_hash_aset(hp->env, f, v);
   } else if (f == g_http_host) {
     /*
      * ignored, absolute URLs in REQUEST_URI take precedence over
@@ -236,59 +249,55 @@ static void write_value(VALUE req, struct http_parser *hp,
   action downcase_char { downcase_char(deconst(fpc)); }
   action write_field { hp->s.field_len = LEN(start.field, fpc); }
   action start_value { MARK(mark, fpc); }
-  action write_value { write_value(req, hp, buffer, fpc); }
+  action write_value { write_value(hp, buffer, fpc); }
   action write_cont_value { write_cont_value(hp, buffer, fpc); }
-  action request_method {
-    request_method(hp, req, PTR_TO(mark), LEN(mark, fpc));
-  }
+  action request_method { request_method(hp, PTR_TO(mark), LEN(mark, fpc)); }
   action scheme {
-    rb_hash_aset(req, g_rack_url_scheme, STR_NEW(mark, fpc));
-  }
-  action host {
-    rb_hash_aset(req, g_http_host, STR_NEW(mark, fpc));
+    rb_hash_aset(hp->env, g_rack_url_scheme, STR_NEW(mark, fpc));
   }
+  action host { rb_hash_aset(hp->env, g_http_host, STR_NEW(mark, fpc)); }
   action request_uri {
     VALUE str;
 
     VALIDATE_MAX_LENGTH(LEN(mark, fpc), REQUEST_URI);
-    str = rb_hash_aset(req, g_request_uri, STR_NEW(mark, fpc));
+    str = rb_hash_aset(hp->env, g_request_uri, STR_NEW(mark, fpc));
     /*
      * "OPTIONS * HTTP/1.1\r\n" is a valid request, but we can't have '*'
      * in REQUEST_PATH or PATH_INFO or else Rack::Lint will complain
      */
     if (STR_CSTR_EQ(str, "*")) {
       str = rb_str_new(NULL, 0);
-      rb_hash_aset(req, g_path_info, str);
-      rb_hash_aset(req, g_request_path, str);
+      rb_hash_aset(hp->env, g_path_info, str);
+      rb_hash_aset(hp->env, g_request_path, str);
     }
   }
   action fragment {
     VALIDATE_MAX_LENGTH(LEN(mark, fpc), FRAGMENT);
-    rb_hash_aset(req, g_fragment, STR_NEW(mark, fpc));
+    rb_hash_aset(hp->env, g_fragment, STR_NEW(mark, fpc));
   }
   action start_query {MARK(start.query, fpc); }
   action query_string {
     VALIDATE_MAX_LENGTH(LEN(start.query, fpc), QUERY_STRING);
-    rb_hash_aset(req, g_query_string, STR_NEW(start.query, fpc));
+    rb_hash_aset(hp->env, g_query_string, STR_NEW(start.query, fpc));
   }
-  action http_version { http_version(hp, req, PTR_TO(mark), LEN(mark, fpc)); }
+  action http_version { http_version(hp, PTR_TO(mark), LEN(mark, fpc)); }
   action request_path {
     VALUE val;
 
     VALIDATE_MAX_LENGTH(LEN(mark, fpc), REQUEST_PATH);
-    val = rb_hash_aset(req, g_request_path, STR_NEW(mark, fpc));
+    val = rb_hash_aset(hp->env, g_request_path, STR_NEW(mark, fpc));
 
     /* rack says PATH_INFO must start with "/" or be empty */
     if (!STR_CSTR_EQ(val, "*"))
-      rb_hash_aset(req, g_path_info, val);
+      rb_hash_aset(hp->env, g_path_info, val);
   }
   action add_to_chunk_size {
     hp->len.chunk = step_incr(hp->len.chunk, fc, 16);
     if (hp->len.chunk < 0)
-      rb_raise(eHttpParserError, "invalid chunk size");
+      parser_error("invalid chunk size");
   }
   action header_done {
-    finalize_header(hp, req);
+    finalize_header(hp);
 
     cs = http_parser_first_final;
     if (HP_FL_TEST(hp, HASBODY)) {
@@ -321,7 +330,7 @@ static void write_value(VALUE req, struct http_parser *hp,
   action skip_chunk_data {
   skip_chunk_data_hack: {
     size_t nr = MIN((size_t)hp->len.chunk, REMAINING);
-    memcpy(RSTRING_PTR(req) + hp->s.dest_offset, fpc, nr);
+    memcpy(RSTRING_PTR(hp->cont) + hp->s.dest_offset, fpc, nr);
     hp->s.dest_offset += nr;
     hp->len.chunk -= nr;
     p += nr;
@@ -344,15 +353,20 @@ static void write_value(VALUE req, struct http_parser *hp,
 static void http_parser_init(struct http_parser *hp)
 {
   int cs = 0;
-  memset(hp, 0, sizeof(struct http_parser));
+  hp->flags = 0;
+  hp->mark = 0;
+  hp->offset = 0;
+  hp->start.field = 0;
+  hp->s.field_len = 0;
+  hp->len.content = 0;
   hp->cont = Qfalse; /* zero on MRI, should be optimized away by above */
   %% write init;
   hp->cs = cs;
 }
 
 /** exec **/
-static void http_parser_execute(struct http_parser *hp,
-  VALUE req, char *buffer, size_t len)
+static void
+http_parser_execute(struct http_parser *hp, char *buffer, size_t len)
 {
   const char *p, *pe;
   int cs = hp->cs;
@@ -392,20 +406,20 @@ static struct http_parser *data_get(VALUE self)
   return hp;
 }
 
-static void finalize_header(struct http_parser *hp, VALUE req)
+static void finalize_header(struct http_parser *hp)
 {
-  VALUE temp = rb_hash_aref(req, g_rack_url_scheme);
+  VALUE temp = rb_hash_aref(hp->env, g_rack_url_scheme);
   VALUE server_name = g_localhost;
   VALUE server_port = g_port_80;
 
   /* set rack.url_scheme to "https" or "http", no others are allowed by Rack */
   if (NIL_P(temp)) {
-    temp = rb_hash_aref(req, g_http_x_forwarded_proto);
+    temp = rb_hash_aref(hp->env, g_http_x_forwarded_proto);
     if (!NIL_P(temp) && STR_CSTR_EQ(temp, "https"))
       server_port = g_port_443;
     else
       temp = g_http;
-    rb_hash_aset(req, g_rack_url_scheme, temp);
+    rb_hash_aset(hp->env, g_rack_url_scheme, temp);
   } else if (STR_CSTR_EQ(temp, "https")) {
     server_port = g_port_443;
   } else {
@@ -413,7 +427,7 @@ static void finalize_header(struct http_parser *hp, VALUE req)
   }
 
   /* parse and set the SERVER_NAME and SERVER_PORT variables */
-  temp = rb_hash_aref(req, g_http_host);
+  temp = rb_hash_aref(hp->env, g_http_host);
   if (!NIL_P(temp)) {
     char *colon = memchr(RSTRING_PTR(temp), ':', RSTRING_LEN(temp));
     if (colon) {
@@ -426,20 +440,22 @@ static void finalize_header(struct http_parser *hp, VALUE req)
       server_name = temp;
     }
   }
-  rb_hash_aset(req, g_server_name, server_name);
-  rb_hash_aset(req, g_server_port, server_port);
+  rb_hash_aset(hp->env, g_server_name, server_name);
+  rb_hash_aset(hp->env, g_server_port, server_port);
   if (!HP_FL_TEST(hp, HASHEADER))
-    rb_hash_aset(req, g_server_protocol, g_http_09);
+    rb_hash_aset(hp->env, g_server_protocol, g_http_09);
 
   /* rack requires QUERY_STRING */
-  if (NIL_P(rb_hash_aref(req, g_query_string)))
-    rb_hash_aset(req, g_query_string, rb_str_new(NULL, 0));
+  if (NIL_P(rb_hash_aref(hp->env, g_query_string)))
+    rb_hash_aset(hp->env, g_query_string, rb_str_new(NULL, 0));
 }
 
 static void hp_mark(void *ptr)
 {
   struct http_parser *hp = ptr;
 
+  rb_gc_mark(hp->buf);
+  rb_gc_mark(hp->env);
   rb_gc_mark(hp->cont);
 }
 
@@ -458,7 +474,11 @@ static VALUE HttpParser_alloc(VALUE klass)
  */
 static VALUE HttpParser_init(VALUE self)
 {
-  http_parser_init(data_get(self));
+  struct http_parser *hp = data_get(self);
+
+  http_parser_init(hp);
+  hp->buf = rb_str_new(NULL, 0);
+  hp->env = rb_hash_new();
 
   return self;
 }
@@ -472,7 +492,10 @@ static VALUE HttpParser_init(VALUE self)
  */
 static VALUE HttpParser_reset(VALUE self)
 {
-  http_parser_init(data_get(self));
+  struct http_parser *hp = data_get(self);
+
+  http_parser_init(hp);
+  rb_funcall(hp->env, id_clear, 0);
 
   return Qnil;
 }
@@ -513,32 +536,23 @@ static VALUE HttpParser_content_length(VALUE self)
 }
 
 /**
- * Document-method: trailers
- * call-seq:
- *    parser.trailers(req, data) => req or nil
- *
- * This is an alias for HttpParser#headers
- */
-
-/**
- * Document-method: headers
+ * Document-method: parse
  * call-seq:
- *    parser.headers(req, data) => req or nil
+ *    parser.parse => env or nil
  *
  * Takes a Hash and a String of data, parses the String of data filling
  * in the Hash returning the Hash if parsing is finished, nil otherwise
- * When returning the req Hash, it may modify data to point to where
+ * When returning the env Hash, it may modify data to point to where
  * body processing should begin.
  *
  * Raises HttpParserError if there are parsing errors.
  */
-static VALUE HttpParser_headers(VALUE self, VALUE req, VALUE data)
+static VALUE HttpParser_parse(VALUE self)
 {
   struct http_parser *hp = data_get(self);
+  VALUE data = hp->buf;
 
-  rb_str_update(data);
-
-  http_parser_execute(hp, req, RSTRING_PTR(data), RSTRING_LEN(data));
+  http_parser_execute(hp, RSTRING_PTR(data), RSTRING_LEN(data));
   VALIDATE_MAX_LENGTH(hp->offset, HEADER);
 
   if (hp->cs == http_parser_first_final ||
@@ -546,15 +560,36 @@ static VALUE HttpParser_headers(VALUE self, VALUE req, VALUE data)
     advance_str(data, hp->offset + 1);
     hp->offset = 0;
 
-    return req;
+    return hp->env;
   }
 
   if (hp->cs == http_parser_error)
-    rb_raise(eHttpParserError, "Invalid HTTP format, parsing fails.");
+    parser_error("Invalid HTTP format, parsing fails.");
 
   return Qnil;
 }
 
+/**
+ * Document-method: trailers
+ * call-seq:
+ *    parser.trailers(req, data) => req or nil
+ *
+ * This is an alias for HttpParser#headers
+ */
+
+/**
+ * Document-method: headers
+ */
+static VALUE HttpParser_headers(VALUE self, VALUE env, VALUE buf)
+{
+  struct http_parser *hp = data_get(self);
+
+  hp->env = env;
+  hp->buf = buf;
+
+  return HttpParser_parse(self);
+}
+
 static int chunked_eof(struct http_parser *hp)
 {
   return ((hp->cs == http_parser_first_final) || HP_FL_TEST(hp, INTRAILER));
@@ -610,6 +645,16 @@ static VALUE HttpParser_has_headers(VALUE self)
   return HP_FL_TEST(hp, HASHEADER) ? Qtrue : Qfalse;
 }
 
+static VALUE HttpParser_buf(VALUE self)
+{
+  return data_get(self)->buf;
+}
+
+static VALUE HttpParser_env(VALUE self)
+{
+  return data_get(self)->env;
+}
+
 /**
  * call-seq:
  *    parser.filter_body(buf, data) => nil/data
@@ -630,7 +675,6 @@ static VALUE HttpParser_filter_body(VALUE self, VALUE buf, VALUE data)
   char *dptr;
   long dlen;
 
-  rb_str_update(data);
   dptr = RSTRING_PTR(data);
   dlen = RSTRING_LEN(data);
 
@@ -641,9 +685,11 @@ static VALUE HttpParser_filter_body(VALUE self, VALUE buf, VALUE data)
   if (HP_FL_TEST(hp, CHUNKED)) {
     if (!chunked_eof(hp)) {
       hp->s.dest_offset = 0;
-      http_parser_execute(hp, buf, dptr, dlen);
+      hp->cont = buf;
+      hp->buf = data;
+      http_parser_execute(hp, dptr, dlen);
       if (hp->cs == http_parser_error)
-        rb_raise(eHttpParserError, "Invalid HTTP format, parsing fails.");
+        parser_error("Invalid HTTP format, parsing fails.");
 
       assert(hp->s.dest_offset <= hp->offset &&
              "destination buffer overflow");
@@ -693,13 +739,16 @@ void Init_unicorn_http(void)
   rb_define_alloc_func(cHttpParser, HttpParser_alloc);
   rb_define_method(cHttpParser, "initialize", HttpParser_init,0);
   rb_define_method(cHttpParser, "reset", HttpParser_reset,0);
+  rb_define_method(cHttpParser, "parse", HttpParser_parse, 0);
   rb_define_method(cHttpParser, "headers", HttpParser_headers, 2);
-  rb_define_method(cHttpParser, "filter_body", HttpParser_filter_body, 2);
   rb_define_method(cHttpParser, "trailers", HttpParser_headers, 2);
+  rb_define_method(cHttpParser, "filter_body", HttpParser_filter_body, 2);
   rb_define_method(cHttpParser, "content_length", HttpParser_content_length, 0);
   rb_define_method(cHttpParser, "body_eof?", HttpParser_body_eof, 0);
   rb_define_method(cHttpParser, "keepalive?", HttpParser_keepalive, 0);
   rb_define_method(cHttpParser, "headers?", HttpParser_has_headers, 0);
+  rb_define_method(cHttpParser, "buf", HttpParser_buf, 0);
+  rb_define_method(cHttpParser, "env", HttpParser_env, 0);
 
   /*
    * The maximum size a single chunk when using chunked transfer encoding.
@@ -722,5 +771,6 @@ void Init_unicorn_http(void)
   SET_GLOBAL(g_http_transfer_encoding, "TRANSFER_ENCODING");
   SET_GLOBAL(g_content_length, "CONTENT_LENGTH");
   SET_GLOBAL(g_http_connection, "CONNECTION");
+  id_clear = rb_intern("clear");
 }
 #undef SET_GLOBAL
diff --git a/lib/unicorn.rb b/lib/unicorn.rb
index 735354f..622dc6c 100644
--- a/lib/unicorn.rb
+++ b/lib/unicorn.rb
@@ -1,835 +1,83 @@
 # -*- encoding: binary -*-
-
 require 'fcntl'
 require 'etc'
 require 'stringio'
 require 'rack'
-require 'unicorn/socket_helper'
-require 'unicorn/const'
-require 'unicorn/http_request'
-require 'unicorn/configurator'
-require 'unicorn/util'
-require 'unicorn/tee_input'
-require 'unicorn/http_response'
+require 'kgio'
 
-# Unicorn module containing all of the classes (include C extensions) for running
-# a Unicorn web server.  It contains a minimalist HTTP server with just enough
-# functionality to service web application requests fast as possible.
+# Unicorn module containing all of the classes (include C extensions) for
+# running a Unicorn web server.  It contains a minimalist HTTP server with just
+# enough functionality to service web application requests fast as possible.
 module Unicorn
-
-  # raised inside TeeInput when a client closes the socket inside the
-  # application dispatch.  This is always raised with an empty backtrace
-  # since there is nothing in the application stack that is responsible
-  # for client shutdowns/disconnects.
-  class ClientShutdown < EOFError
-  end
-
-  class << self
-    def run(app, options = {})
-      HttpServer.new(app, options).start.join
-    end
-
-    # This returns a lambda to pass in as the app, this does not "build" the
-    # app (which we defer based on the outcome of "preload_app" in the
-    # Unicorn config).  The returned lambda will be called when it is
-    # time to build the app.
-    def builder(ru, opts)
-      # allow Configurator to parse cli switches embedded in the ru file
-      Unicorn::Configurator::RACKUP.update(:file => ru, :optparse => opts)
-
-      # always called after config file parsing, may be called after forking
-      lambda do ||
-        inner_app = case ru
-        when /\.ru$/
-          raw = File.read(ru)
-          raw.sub!(/^__END__\n.*/, '')
-          eval("Rack::Builder.new {(#{raw}\n)}.to_app", TOPLEVEL_BINDING, ru)
-        else
-          require ru
-          Object.const_get(File.basename(ru, '.rb').capitalize)
-        end
-
-        pp({ :inner_app => inner_app }) if $DEBUG
-
-        # return value, matches rackup defaults based on env
-        case ENV["RACK_ENV"]
-        when "development"
-          Rack::Builder.new do
-            use Rack::CommonLogger, $stderr
-            use Rack::ShowExceptions
-            use Rack::Lint
-            run inner_app
-          end.to_app
-        when "deployment"
-          Rack::Builder.new do
-            use Rack::CommonLogger, $stderr
-            run inner_app
-          end.to_app
-        else
-          inner_app
-        end
-      end
-    end
-
-    # returns an array of strings representing TCP listen socket addresses
-    # and Unix domain socket paths.  This is useful for use with
-    # Raindrops::Middleware under Linux: http://raindrops.bogomips.org/
-    def listener_names
-      HttpServer::LISTENERS.map { |io| SocketHelper.sock_name(io) }
-    end
+  def self.run(app, options = {})
+    Unicorn::HttpServer.new(app, options).start.join
   end
 
-  # This is the process manager of Unicorn. This manages worker
-  # processes which in turn handle the I/O and application process.
-  # Listener sockets are started in the master process and shared with
-  # forked worker children.
-
-  class HttpServer < Struct.new(:app, :timeout, :worker_processes,
-                                :before_fork, :after_fork, :before_exec,
-                                :logger, :pid, :listener_opts, :preload_app,
-                                :reexec_pid, :orig_app, :init_listeners,
-                                :master_pid, :config, :ready_pipe, :user)
-    include ::Unicorn::SocketHelper
-
-    # prevents IO objects in here from being GC-ed
-    IO_PURGATORY = []
-
-    # all bound listener sockets
-    LISTENERS = []
-
-    # This hash maps PIDs to Workers
-    WORKERS = {}
-
-    # We use SELF_PIPE differently in the master and worker processes:
-    #
-    # * The master process never closes or reinitializes this once
-    # initialized.  Signal handlers in the master process will write to
-    # it to wake up the master from IO.select in exactly the same manner
-    # djb describes in http://cr.yp.to/docs/selfpipe.html
-    #
-    # * The workers immediately close the pipe they inherit from the
-    # master and replace it with a new pipe after forking.  This new
-    # pipe is also used to wakeup from IO.select from inside (worker)
-    # signal handlers.  However, workers *close* the pipe descriptors in
-    # the signal handlers to raise EBADF in IO.select instead of writing
-    # like we do in the master.  We cannot easily use the reader set for
-    # IO.select because LISTENERS is already that set, and it's extra
-    # work (and cycles) to distinguish the pipe FD from the reader set
-    # once IO.select returns.  So we're lazy and just close the pipe when
-    # a (rare) signal arrives in the worker and reinitialize the pipe later.
-    SELF_PIPE = []
-
-    # signal queue used for self-piping
-    SIG_QUEUE = []
-
-    # constant lookups are faster and we're single-threaded/non-reentrant
-    REQUEST = HttpRequest.new
-
-    # We populate this at startup so we can figure out how to reexecute
-    # and upgrade the currently running instance of Unicorn
-    # This Hash is considered a stable interface and changing its contents
-    # will allow you to switch between different installations of Unicorn
-    # or even different installations of the same applications without
-    # downtime.  Keys of this constant Hash are described as follows:
-    #
-    # * 0 - the path to the unicorn/unicorn_rails executable
-    # * :argv - a deep copy of the ARGV array the executable originally saw
-    # * :cwd - the working directory of the application, this is where
-    # you originally started Unicorn.
-    #
-    # To change your unicorn executable to a different path without downtime,
-    # you can set the following in your Unicorn config file, HUP and then
-    # continue with the traditional USR2 + QUIT upgrade steps:
-    #
-    #   Unicorn::HttpServer::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn"
-    START_CTX = {
-      :argv => ARGV.map { |arg| arg.dup },
-      :cwd => lambda {
-          # favor ENV['PWD'] since it is (usually) symlink aware for
-          # Capistrano and like systems
-          begin
-            a = File.stat(pwd = ENV['PWD'])
-            b = File.stat(Dir.pwd)
-            a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd
-          rescue
-            Dir.pwd
-          end
-        }.call,
-      0 => $0.dup,
-    }
-
-    # This class and its members can be considered a stable interface
-    # and will not change in a backwards-incompatible fashion between
-    # releases of Unicorn.  You may need to access it in the
-    # before_fork/after_fork hooks.  See the Unicorn::Configurator RDoc
-    # for examples.
-    class Worker < Struct.new(:nr, :tmp, :switched)
-
-      # worker objects may be compared to just plain numbers
-      def ==(other_nr)
-        self.nr == other_nr
-      end
-
-      # Changes the worker process to the specified +user+ and +group+
-      # This is only intended to be called from within the worker
-      # process from the +after_fork+ hook.  This should be called in
-      # the +after_fork+ hook after any priviledged functions need to be
-      # run (e.g. to set per-worker CPU affinity, niceness, etc)
-      #
-      # Any and all errors raised within this method will be propagated
-      # directly back to the caller (usually the +after_fork+ hook.
-      # These errors commonly include ArgumentError for specifying an
-      # invalid user/group and Errno::EPERM for insufficient priviledges
-      def user(user, group = nil)
-        # we do not protect the caller, checking Process.euid == 0 is
-        # insufficient because modern systems have fine-grained
-        # capabilities.  Let the caller handle any and all errors.
-        uid = Etc.getpwnam(user).uid
-        gid = Etc.getgrnam(group).gid if group
-        Unicorn::Util.chown_logs(uid, gid)
-        tmp.chown(uid, gid)
-        if gid && Process.egid != gid
-          Process.initgroups(user, gid)
-          Process::GID.change_privilege(gid)
-        end
-        Process.euid != uid and Process::UID.change_privilege(uid)
-        self.switched = true
-      end
-
-    end
-
-    # Creates a working server on host:port (strange things happen if
-    # port isn't a Number).  Use HttpServer::run to start the server and
-    # HttpServer.run.join to join the thread that's processing
-    # incoming requests on the socket.
-    def initialize(app, options = {})
-      self.app = app
-      self.reexec_pid = 0
-      self.ready_pipe = options.delete(:ready_pipe)
-      self.init_listeners = options[:listeners] ? options[:listeners].dup : []
-      self.config = Configurator.new(options.merge(:use_defaults => true))
-      self.listener_opts = {}
-
-      # we try inheriting listeners first, so we bind them later.
-      # we don't write the pid file until we've bound listeners in case
-      # unicorn was started twice by mistake.  Even though our #pid= method
-      # checks for stale/existing pid files, race conditions are still
-      # possible (and difficult/non-portable to avoid) and can be likely
-      # to clobber the pid if the second start was in quick succession
-      # after the first, so we rely on the listener binding to fail in
-      # that case.  Some tests (in and outside of this source tree) and
-      # monitoring tools may also rely on pid files existing before we
-      # attempt to connect to the listener(s)
-      config.commit!(self, :skip => [:listeners, :pid])
-      self.orig_app = app
-    end
-
-    # Runs the thing.  Returns self so you can run join on it
-    def start
-      BasicSocket.do_not_reverse_lookup = true
-
-      # inherit sockets from parents, they need to be plain Socket objects
-      # before they become UNIXServer or TCPServer
-      inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
-        io = Socket.for_fd(fd.to_i)
-        set_server_sockopt(io, listener_opts[sock_name(io)])
-        IO_PURGATORY << io
-        logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
-        server_cast(io)
-      end
-
-      config_listeners = config[:listeners].dup
-      LISTENERS.replace(inherited)
-
-      # we start out with generic Socket objects that get cast to either
-      # TCPServer or UNIXServer objects; but since the Socket objects
-      # share the same OS-level file descriptor as the higher-level *Server
-      # objects; we need to prevent Socket objects from being garbage-collected
-      config_listeners -= listener_names
-      if config_listeners.empty? && LISTENERS.empty?
-        config_listeners << Unicorn::Const::DEFAULT_LISTEN
-        init_listeners << Unicorn::Const::DEFAULT_LISTEN
-        START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}"
-      end
-      config_listeners.each { |addr| listen(addr) }
-      raise ArgumentError, "no listeners" if LISTENERS.empty?
-
-      # this pipe is used to wake us up from select(2) in #join when signals
-      # are trapped.  See trap_deferred.
-      init_self_pipe!
-
-      # setup signal handlers before writing pid file in case people get
-      # trigger happy and send signals as soon as the pid file exists.
-      # Note that signals don't actually get handled until the #join method
-      QUEUE_SIGS.each { |sig| trap_deferred(sig) }
-      trap(:CHLD) { |_| awaken_master }
-      self.pid = config[:pid]
-
-      self.master_pid = $$
-      build_app! if preload_app
-      maintain_worker_count
-      self
-    end
-
-    # replaces current listener set with +listeners+.  This will
-    # close the socket if it will not exist in the new listener set
-    def listeners=(listeners)
-      cur_names, dead_names = [], []
-      listener_names.each do |name|
-        if ?/ == name[0]
-          # mark unlinked sockets as dead so we can rebind them
-          (File.socket?(name) ? cur_names : dead_names) << name
-        else
-          cur_names << name
-        end
-      end
-      set_names = listener_names(listeners)
-      dead_names.concat(cur_names - set_names).uniq!
-
-      LISTENERS.delete_if do |io|
-        if dead_names.include?(sock_name(io))
-          IO_PURGATORY.delete_if do |pio|
-            pio.fileno == io.fileno && (pio.close rescue nil).nil? # true
-          end
-          (io.close rescue nil).nil? # true
-        else
-          set_server_sockopt(io, listener_opts[sock_name(io)])
-          false
-        end
-      end
-
-      (set_names - cur_names).each { |addr| listen(addr) }
-    end
-
-    def stdout_path=(path); redirect_io($stdout, path); end
-    def stderr_path=(path); redirect_io($stderr, path); end
-
-    def logger=(obj)
-      HttpRequest::DEFAULTS["rack.logger"] = super
-    end
-
-    # sets the path for the PID file of the master process
-    def pid=(path)
-      if path
-        if x = valid_pid?(path)
-          return path if pid && path == pid && x == $$
-          if x == reexec_pid && pid =~ /\.oldbin\z/
-            logger.warn("will not set pid=#{path} while reexec-ed "\
-                        "child is running PID:#{x}")
-            return
-          end
-          raise ArgumentError, "Already running on PID:#{x} " \
-                               "(or pid=#{path} is stale)"
-        end
-      end
-      unlink_pid_safe(pid) if pid
-
-      if path
-        fp = begin
-          tmp = "#{File.dirname(path)}/#{rand}.#$$"
-          File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644)
-        rescue Errno::EEXIST
-          retry
-        end
-        fp.syswrite("#$$\n")
-        File.rename(fp.path, path)
-        fp.close
-      end
-      super(path)
-    end
-
-    # add a given address to the +listeners+ set, idempotently
-    # Allows workers to add a private, per-process listener via the
-    # after_fork hook.  Very useful for debugging and testing.
-    # +:tries+ may be specified as an option for the number of times
-    # to retry, and +:delay+ may be specified as the time in seconds
-    # to delay between retries.
-    # A negative value for +:tries+ indicates the listen will be
-    # retried indefinitely, this is useful when workers belonging to
-    # different masters are spawned during a transparent upgrade.
-    def listen(address, opt = {}.merge(listener_opts[address] || {}))
-      address = config.expand_addr(address)
-      return if String === address && listener_names.include?(address)
-
-      delay = opt[:delay] || 0.5
-      tries = opt[:tries] || 5
-      begin
-        io = bind_listen(address, opt)
-        unless TCPServer === io || UNIXServer === io
-          IO_PURGATORY << io
-          io = server_cast(io)
-        end
-        logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
-        LISTENERS << io
-        io
-      rescue Errno::EADDRINUSE => err
-        logger.error "adding listener failed addr=#{address} (in use)"
-        raise err if tries == 0
-        tries -= 1
-        logger.error "retrying in #{delay} seconds " \
-                     "(#{tries < 0 ? 'infinite' : tries} tries left)"
-        sleep(delay)
-        retry
-      rescue => err
-        logger.fatal "error adding listener addr=#{address}"
-        raise err
-      end
-    end
-
-    # monitors children and receives signals forever
-    # (or until a termination signal is sent).  This handles signals
-    # one-at-a-time time and we'll happily drop signals in case somebody
-    # is signalling us too often.
-    def join
-      respawn = true
-      last_check = Time.now
-
-      proc_name 'master'
-      logger.info "master process ready" # test_exec.rb relies on this message
-      if ready_pipe
-        ready_pipe.syswrite($$.to_s)
-        ready_pipe.close rescue nil
-        self.ready_pipe = nil
-      end
-      begin
-        loop do
-          reap_all_workers
-          case SIG_QUEUE.shift
-          when nil
-            # avoid murdering workers after our master process (or the
-            # machine) comes out of suspend/hibernation
-            if (last_check + timeout) >= (last_check = Time.now)
-              murder_lazy_workers
-            else
-              # wait for workers to wakeup on suspend
-              master_sleep(timeout/2.0 + 1)
-            end
-            maintain_worker_count if respawn
-            master_sleep(1)
-          when :QUIT # graceful shutdown
-            break
-          when :TERM, :INT # immediate shutdown
-            stop(false)
-            break
-          when :USR1 # rotate logs
-            logger.info "master reopening logs..."
-            Unicorn::Util.reopen_logs
-            logger.info "master done reopening logs"
-            kill_each_worker(:USR1)
-          when :USR2 # exec binary, stay alive in case something went wrong
-            reexec
-          when :WINCH
-            if Process.ppid == 1 || Process.getpgrp != $$
-              respawn = false
-              logger.info "gracefully stopping all workers"
-              kill_each_worker(:QUIT)
-              self.worker_processes = 0
-            else
-              logger.info "SIGWINCH ignored because we're not daemonized"
-            end
-          when :TTIN
-            respawn = true
-            self.worker_processes += 1
-          when :TTOU
-            self.worker_processes -= 1 if self.worker_processes > 0
-          when :HUP
-            respawn = true
-            if config.config_file
-              load_config!
-              redo # immediate reaping since we may have QUIT workers
-            else # exec binary and exit if there's no config file
-              logger.info "config_file not present, reexecuting binary"
-              reexec
-              break
-            end
-          end
-        end
-      rescue Errno::EINTR
-        retry
-      rescue => e
-        logger.error "Unhandled master loop exception #{e.inspect}."
-        logger.error e.backtrace.join("\n")
-        retry
-      end
-      stop # gracefully shutdown all workers on our way out
-      logger.info "master complete"
-      unlink_pid_safe(pid) if pid
-    end
-
-    # Terminates all workers, but does not exit master process
-    def stop(graceful = true)
-      self.listeners = []
-      limit = Time.now + timeout
-      until WORKERS.empty? || Time.now > limit
-        kill_each_worker(graceful ? :QUIT : :TERM)
-        sleep(0.1)
-        reap_all_workers
-      end
-      kill_each_worker(:KILL)
-    end
-
-    private
-
-    # list of signals we care about and trap in master.
-    QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP,
-                   :TTIN, :TTOU ]
-
-    # defer a signal for later processing in #join (master process)
-    def trap_deferred(signal)
-      trap(signal) do |sig_nr|
-        if SIG_QUEUE.size < 5
-          SIG_QUEUE << signal
-          awaken_master
-        else
-          logger.error "ignoring SIG#{signal}, queue=#{SIG_QUEUE.inspect}"
-        end
-      end
-    end
-
-    # wait for a signal hander to wake us up and then consume the pipe
-    # Wake up every second anyways to run murder_lazy_workers
-    def master_sleep(sec)
-      IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
-      SELF_PIPE[0].read_nonblock(Const::CHUNK_SIZE, HttpRequest::BUF)
-      rescue Errno::EAGAIN, Errno::EINTR
-    end
-
-    def awaken_master
-      begin
-        SELF_PIPE[1].write_nonblock('.') # wakeup master process from select
-      rescue Errno::EAGAIN, Errno::EINTR
-        # pipe is full, master should wake up anyways
-        retry
-      end
-    end
-
-    # reaps all unreaped workers
-    def reap_all_workers
-      begin
-        loop do
-          wpid, status = Process.waitpid2(-1, Process::WNOHANG)
-          wpid or break
-          if reexec_pid == wpid
-            logger.error "reaped #{status.inspect} exec()-ed"
-            self.reexec_pid = 0
-            self.pid = pid.chomp('.oldbin') if pid
-            proc_name 'master'
-          else
-            worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
-            logger.info "reaped #{status.inspect} " \
-                        "worker=#{worker.nr rescue 'unknown'}"
-          end
-        end
-      rescue Errno::ECHILD
-      end
-    end
-
-    # reexecutes the START_CTX with a new binary
-    def reexec
-      if reexec_pid > 0
-        begin
-          Process.kill(0, reexec_pid)
-          logger.error "reexec-ed child already running PID:#{reexec_pid}"
-          return
-        rescue Errno::ESRCH
-          self.reexec_pid = 0
-        end
-      end
-
-      if pid
-        old_pid = "#{pid}.oldbin"
-        prev_pid = pid.dup
-        begin
-          self.pid = old_pid  # clear the path for a new pid file
-        rescue ArgumentError
-          logger.error "old PID:#{valid_pid?(old_pid)} running with " \
-                       "existing pid=#{old_pid}, refusing rexec"
-          return
-        rescue => e
-          logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}"
-          return
-        end
-      end
-
-      self.reexec_pid = fork do
-        listener_fds = LISTENERS.map { |sock| sock.fileno }
-        ENV['UNICORN_FD'] = listener_fds.join(',')
-        Dir.chdir(START_CTX[:cwd])
-        cmd = [ START_CTX[0] ].concat(START_CTX[:argv])
-
-        # avoid leaking FDs we don't know about, but let before_exec
-        # unset FD_CLOEXEC, if anything else in the app eventually
-        # relies on FD inheritence.
-        (3..1024).each do |io|
-          next if listener_fds.include?(io)
-          io = IO.for_fd(io) rescue nil
-          io or next
-          IO_PURGATORY << io
-          io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-        end
-        logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
-        before_exec.call(self)
-        exec(*cmd)
-      end
-      proc_name 'master (old)'
-    end
-
-    # forcibly terminate all workers that haven't checked in in timeout
-    # seconds.  The timeout is implemented using an unlinked File
-    # shared between the parent process and each worker.  The worker
-    # runs File#chmod to modify the ctime of the File.  If the ctime
-    # is stale for >timeout seconds, then we'll kill the corresponding
-    # worker.
-    def murder_lazy_workers
-      WORKERS.dup.each_pair do |wpid, worker|
-        stat = worker.tmp.stat
-        # skip workers that disable fchmod or have never fchmod-ed
-        stat.mode == 0100600 and next
-        (diff = (Time.now - stat.ctime)) <= timeout and next
-        logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
-                     "(#{diff}s > #{timeout}s), killing"
-        kill_worker(:KILL, wpid) # take no prisoners for timeout violations
-      end
-    end
-
-    def spawn_missing_workers
-      (0...worker_processes).each do |worker_nr|
-        WORKERS.values.include?(worker_nr) and next
-        worker = Worker.new(worker_nr, Unicorn::Util.tmpio)
-        before_fork.call(self, worker)
-        WORKERS[fork {
-          ready_pipe.close if ready_pipe
-          self.ready_pipe = nil
-          worker_loop(worker)
-        }] = worker
-      end
-    end
-
-    def maintain_worker_count
-      (off = WORKERS.size - worker_processes) == 0 and return
-      off < 0 and return spawn_missing_workers
-      WORKERS.dup.each_pair { |wpid,w|
-        w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil
-      }
-    end
-
-    # if we get any error, try to write something back to the client
-    # assuming we haven't closed the socket, but don't get hung up
-    # if the socket is already closed or broken.  We'll always ensure
-    # the socket is closed at the end of this function
-    def handle_error(client, e)
-      msg = case e
-      when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
-        Const::ERROR_500_RESPONSE
-      when HttpParserError # try to tell the client they're bad
-        Const::ERROR_400_RESPONSE
+  # This returns a lambda to pass in as the app, this does not "build" the
+  # app (which we defer based on the outcome of "preload_app" in the
+  # Unicorn config).  The returned lambda will be called when it is
+  # time to build the app.
+  def self.builder(ru, opts)
+    # allow Configurator to parse cli switches embedded in the ru file
+    Unicorn::Configurator::RACKUP.update(:file => ru, :optparse => opts)
+
+    # always called after config file parsing, may be called after forking
+    lambda do ||
+      inner_app = case ru
+      when /\.ru$/
+        raw = File.read(ru)
+        raw.sub!(/^__END__\n.*/, '')
+        eval("Rack::Builder.new {(#{raw}\n)}.to_app", TOPLEVEL_BINDING, ru)
       else
-        logger.error "Read error: #{e.inspect}"
-        logger.error e.backtrace.join("\n")
-        Const::ERROR_500_RESPONSE
-      end
-      client.write_nonblock(msg)
-      client.close
-      rescue
-        nil
-    end
-
-    # once a client is accepted, it is processed in its entirety here
-    # in 3 easy steps: read request, call app, write app response
-    def process_client(client)
-      client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-      response = app.call(env = REQUEST.read(client))
-
-      if 100 == response[0].to_i
-        client.write(Const::EXPECT_100_RESPONSE)
-        env.delete(Const::HTTP_EXPECT)
-        response = app.call(env)
-      end
-      HttpResponse.write(client, response, HttpRequest::PARSER.headers?)
-    rescue => e
-      handle_error(client, e)
-    end
-
-    # gets rid of stuff the worker has no business keeping track of
-    # to free some resources and drops all sig handlers.
-    # traps for USR1, USR2, and HUP may be set in the after_fork Proc
-    # by the user.
-    def init_worker_process(worker)
-      QUEUE_SIGS.each { |sig| trap(sig, nil) }
-      trap(:CHLD, 'DEFAULT')
-      SIG_QUEUE.clear
-      proc_name "worker[#{worker.nr}]"
-      START_CTX.clear
-      init_self_pipe!
-      WORKERS.values.each { |other| other.tmp.close rescue nil }
-      WORKERS.clear
-      LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
-      worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-      after_fork.call(self, worker) # can drop perms
-      worker.user(*user) if user.kind_of?(Array) && ! worker.switched
-      self.timeout /= 2.0 # halve it for select()
-      build_app! unless preload_app
-    end
-
-    def reopen_worker_logs(worker_nr)
-      logger.info "worker=#{worker_nr} reopening logs..."
-      Unicorn::Util.reopen_logs
-      logger.info "worker=#{worker_nr} done reopening logs"
-      init_self_pipe!
-    end
-
-    # runs inside each forked worker, this sits around and waits
-    # for connections and doesn't die until the parent dies (or is
-    # given a INT, QUIT, or TERM signal)
-    def worker_loop(worker)
-      ppid = master_pid
-      init_worker_process(worker)
-      nr = 0 # this becomes negative if we need to reopen logs
-      alive = worker.tmp # tmp is our lifeline to the master process
-      ready = LISTENERS
-
-      # closing anything we IO.select on will raise EBADF
-      trap(:USR1) { nr = -65536; SELF_PIPE[0].close rescue nil }
-      trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
-      [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
-      logger.info "worker=#{worker.nr} ready"
-      m = 0
-
-      begin
-        nr < 0 and reopen_worker_logs(worker.nr)
-        nr = 0
-
-        # we're a goner in timeout seconds anyways if alive.chmod
-        # breaks, so don't trap the exception.  Using fchmod() since
-        # futimes() is not available in base Ruby and I very strongly
-        # prefer temporary files to be unlinked for security,
-        # performance and reliability reasons, so utime is out.  No-op
-        # changes with chmod doesn't update ctime on all filesystems; so
-        # we change our counter each and every time (after process_client
-        # and before IO.select).
-        alive.chmod(m = 0 == m ? 1 : 0)
-
-        ready.each do |sock|
-          begin
-            process_client(sock.accept_nonblock)
-            nr += 1
-            alive.chmod(m = 0 == m ? 1 : 0)
-          rescue Errno::EAGAIN, Errno::ECONNABORTED
-          end
-          break if nr < 0
-        end
-
-        # make the following bet: if we accepted clients this round,
-        # we're probably reasonably busy, so avoid calling select()
-        # and do a speculative accept_nonblock on ready listeners
-        # before we sleep again in select().
-        redo unless nr == 0 # (nr < 0) => reopen logs
-
-        ppid == Process.ppid or return
-        alive.chmod(m = 0 == m ? 1 : 0)
-        begin
-          # timeout used so we can detect parent death:
-          ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) or redo
-          ready = ret[0]
-        rescue Errno::EINTR
-          ready = LISTENERS
-        rescue Errno::EBADF
-          nr < 0 or return
-        end
-      rescue => e
-        if alive
-          logger.error "Unhandled listen loop exception #{e.inspect}."
-          logger.error e.backtrace.join("\n")
-        end
-      end while alive
-    end
-
-    # delivers a signal to a worker and fails gracefully if the worker
-    # is no longer running.
-    def kill_worker(signal, wpid)
-      begin
-        Process.kill(signal, wpid)
-      rescue Errno::ESRCH
-        worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
-      end
-    end
-
-    # delivers a signal to each worker
-    def kill_each_worker(signal)
-      WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
-    end
-
-    # unlinks a PID file at given +path+ if it contains the current PID
-    # still potentially racy without locking the directory (which is
-    # non-portable and may interact badly with other programs), but the
-    # window for hitting the race condition is small
-    def unlink_pid_safe(path)
-      (File.read(path).to_i == $$ and File.unlink(path)) rescue nil
-    end
-
-    # returns a PID if a given path contains a non-stale PID file,
-    # nil otherwise.
-    def valid_pid?(path)
-      wpid = File.read(path).to_i
-      wpid <= 0 and return nil
-      begin
-        Process.kill(0, wpid)
-        wpid
-      rescue Errno::ESRCH
-        # don't unlink stale pid files, racy without non-portable locking...
-      end
-      rescue Errno::ENOENT
-    end
-
-    def load_config!
-      loaded_app = app
-      begin
-        logger.info "reloading config_file=#{config.config_file}"
-        config[:listeners].replace(init_listeners)
-        config.reload
-        config.commit!(self)
-        kill_each_worker(:QUIT)
-        Unicorn::Util.reopen_logs
-        self.app = orig_app
-        build_app! if preload_app
-        logger.info "done reloading config_file=#{config.config_file}"
-      rescue StandardError, LoadError, SyntaxError => e
-        logger.error "error reloading config_file=#{config.config_file}: " \
-                     "#{e.class} #{e.message} #{e.backtrace}"
-        self.app = loaded_app
-      end
-    end
-
-    # returns an array of string names for the given listener array
-    def listener_names(listeners = LISTENERS)
-      listeners.map { |io| sock_name(io) }
-    end
-
-    def build_app!
-      if app.respond_to?(:arity) && app.arity == 0
-        if defined?(Gem) && Gem.respond_to?(:refresh)
-          logger.info "Refreshing Gem list"
-          Gem.refresh
-        end
-        self.app = app.call
+        require ru
+        Object.const_get(File.basename(ru, '.rb').capitalize)
+      end
+
+      pp({ :inner_app => inner_app }) if $DEBUG
+
+      # return value, matches rackup defaults based on env
+      case ENV["RACK_ENV"]
+      when "development"
+        Rack::Builder.new do
+          use Rack::CommonLogger, $stderr
+          use Rack::ShowExceptions
+          use Rack::Lint
+          run inner_app
+        end.to_app
+      when "deployment"
+        Rack::Builder.new do
+          use Rack::CommonLogger, $stderr
+          run inner_app
+        end.to_app
+      else
+        inner_app
       end
     end
+  end
 
-    def proc_name(tag)
-      $0 = ([ File.basename(START_CTX[0]), tag
-            ]).concat(START_CTX[:argv]).join(' ')
-    end
-
-    def redirect_io(io, path)
-      File.open(path, 'ab') { |fp| io.reopen(fp) } if path
-      io.sync = true
-    end
-
-    def init_self_pipe!
-      SELF_PIPE.each { |io| io.close rescue nil }
-      SELF_PIPE.replace(IO.pipe)
-      SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
+  # returns an array of strings representing TCP listen socket addresses
+  # and Unix domain socket paths.  This is useful for use with
+  # Raindrops::Middleware under Linux: http://raindrops.bogomips.org/
+  def self.listener_names
+    Unicorn::HttpServer::LISTENERS.map do |io|
+      Unicorn::SocketHelper.sock_name(io)
     end
-
   end
 end
+
+# raised inside TeeInput when a client closes the socket inside the
+# application dispatch.  This is always raised with an empty backtrace
+# since there is nothing in the application stack that is responsible
+# for client shutdowns/disconnects.
+class Unicorn::ClientShutdown < EOFError; end
+
+require 'unicorn/const'
+require 'unicorn/socket_helper'
+require 'unicorn/tee_input'
+require 'unicorn/http_request'
+require 'unicorn/configurator'
+require 'unicorn/tmpio'
+require 'unicorn/util'
+require 'unicorn/http_response'
+require 'unicorn/worker'
+require 'unicorn/http_server'
diff --git a/lib/unicorn/app/exec_cgi.rb b/lib/unicorn/app/exec_cgi.rb
index 412c1d9..fea22f6 100644
--- a/lib/unicorn/app/exec_cgi.rb
+++ b/lib/unicorn/app/exec_cgi.rb
@@ -43,7 +43,7 @@ module Unicorn::App
 
     # Calls the app
     def call(env)
-      out, err = Unicorn::Util.tmpio, Unicorn::Util.tmpio
+      out, err = Unicorn::TmpIO.new, Unicorn::TmpIO.new
       inp = force_file_input(env)
       pid = fork { run_child(inp, out, err, env) }
       inp.close
@@ -124,7 +124,7 @@ module Unicorn::App
       if inp.respond_to?(:size) && inp.size == 0
         ::File.open('/dev/null', 'rb')
       else
-        tmp = Unicorn::Util.tmpio
+        tmp = Unicorn::TmpIO.new
 
         buf = inp.read(CHUNK_SIZE)
         begin
diff --git a/lib/unicorn/configurator.rb b/lib/unicorn/configurator.rb
index fb37c56..dd515a7 100644
--- a/lib/unicorn/configurator.rb
+++ b/lib/unicorn/configurator.rb
@@ -8,14 +8,22 @@ require 'logger'
 # example configuration files.  An example config file for use with
 # nginx is also available at
 # http://unicorn.bogomips.org/examples/nginx.conf
-class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
+class Unicorn::Configurator
+  attr_accessor :set, :config_file, :after_reload
+
+  # :stopdoc:
   # used to stash stuff for deferred processing of cli options in
   # config.ru after "working_directory" is bound.  Do not rely on
   # this being around later on...
-  RACKUP = {} # :nodoc:
+  RACKUP = {
+    :daemonize => false,
+    :host => Unicorn::Const::DEFAULT_HOST,
+    :port => Unicorn::Const::DEFAULT_PORT,
+    :set_listener => false,
+    :options => { :listeners => [] }
+  }
 
   # Default settings for Unicorn
-  # :stopdoc:
   DEFAULTS = {
     :timeout => 60,
     :logger => Logger.new($stderr),
@@ -58,6 +66,9 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
 
     parse_rackup_file
 
+    RACKUP[:set_listener] and
+      set[:listeners] << "#{RACKUP[:host]}:#{RACKUP[:port]}"
+
     # unicorn_rails creates dirs here after working_directory is bound
     after_reload.call if after_reload
 
@@ -495,23 +506,15 @@ private
     /^#\\(.*)/ =~ File.read(ru) or return
     RACKUP[:optparse].parse!($1.split(/\s+/))
 
-    # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery)
-    host, port, set_listener, options, daemonize =
-                    eval("[ host, port, set_listener, options, daemonize ]",
-                         TOPLEVEL_BINDING)
-
-    # XXX duplicate code from bin/unicorn{,_rails}
-    set[:listeners] << "#{host}:#{port}" if set_listener
-
-    if daemonize
+    if RACKUP[:daemonize]
       # unicorn_rails wants a default pid path, (not plain 'unicorn')
       if after_reload
         spid = set[:pid]
         pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset
       end
       unless RACKUP[:daemonized]
-        Unicorn::Launcher.daemonize!(options)
-        RACKUP[:ready_pipe] = options.delete(:ready_pipe)
+        Unicorn::Launcher.daemonize!(RACKUP[:options])
+        RACKUP[:ready_pipe] = RACKUP[:options].delete(:ready_pipe)
       end
     end
   end
diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb
index 2f0b106..e7908d6 100644
--- a/lib/unicorn/const.rb
+++ b/lib/unicorn/const.rb
@@ -1,36 +1,37 @@
 # -*- encoding: binary -*-
 
-module Unicorn
+# Frequently used constants when constructing requests or responses.
+# Many times the constant just refers to a string with the same
+# contents.  Using these constants gave about a 3% to 10% performance
+# improvement over using the strings directly.  Symbols did not really
+# improve things much compared to constants.
+module Unicorn::Const
 
-  # Frequently used constants when constructing requests or responses.  Many times
-  # the constant just refers to a string with the same contents.  Using these constants
-  # gave about a 3% to 10% performance improvement over using the strings directly.
-  # Symbols did not really improve things much compared to constants.
-  module Const
+  # The current version of Unicorn, currently 2.0.0pre3
+  UNICORN_VERSION = "2.0.0pre3"
 
-    # The current version of Unicorn, currently 1.1.5
-    UNICORN_VERSION="1.1.5"
+  # default TCP listen host address (0.0.0.0, all interfaces)
+  DEFAULT_HOST = "0.0.0.0"
 
-    DEFAULT_HOST = "0.0.0.0" # default TCP listen host address
-    DEFAULT_PORT = 8080      # default TCP listen port
-    DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}"
+  # default TCP listen port (8080)
+  DEFAULT_PORT = 8080
 
-    # The basic max request size we'll try to read.
-    CHUNK_SIZE=(16 * 1024)
+  # default TCP listen address and port (0.0.0.0:8080)
+  DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}"
 
-    # Maximum request body size before it is moved out of memory and into a
-    # temporary file for reading (112 kilobytes).
-    MAX_BODY=1024 * 112
+  # The basic request body size we'll try to read at once (16 kilobytes).
+  CHUNK_SIZE = 16 * 1024
 
-    # common errors we'll send back
-    ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n"
-    ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
-    EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
+  # Maximum request body size before it is moved out of memory and into a
+  # temporary file for reading (112 kilobytes).
+  MAX_BODY = 1024 * 112
 
-    # A frozen format for this is about 15% faster
-    REMOTE_ADDR="REMOTE_ADDR".freeze
-    RACK_INPUT="rack.input".freeze
-    HTTP_EXPECT="HTTP_EXPECT"
-  end
+  # :stopdoc:
+  # common errors we'll send back
+  ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n"
+  ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
+  EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
 
+  HTTP_EXPECT = "HTTP_EXPECT"
+  # :startdoc:
 end
diff --git a/lib/unicorn/http_request.rb b/lib/unicorn/http_request.rb
index 65870ed..2dcd839 100644
--- a/lib/unicorn/http_request.rb
+++ b/lib/unicorn/http_request.rb
@@ -2,70 +2,68 @@
 
 require 'unicorn_http'
 
-module Unicorn
-  class HttpRequest
+# TODO: remove redundant names
+Unicorn.const_set(:HttpRequest, Unicorn::HttpParser)
+class Unicorn::HttpParser
 
-    # default parameters we merge into the request env for Rack handlers
-    DEFAULTS = {
-      "rack.errors" => $stderr,
-      "rack.multiprocess" => true,
-      "rack.multithread" => false,
-      "rack.run_once" => false,
-      "rack.version" => [1, 1],
-      "SCRIPT_NAME" => "",
+  # default parameters we merge into the request env for Rack handlers
+  DEFAULTS = {
+    "rack.errors" => $stderr,
+    "rack.multiprocess" => true,
+    "rack.multithread" => false,
+    "rack.run_once" => false,
+    "rack.version" => [1, 1],
+    "SCRIPT_NAME" => "",
 
-      # this is not in the Rack spec, but some apps may rely on it
-      "SERVER_SOFTWARE" => "Unicorn #{Const::UNICORN_VERSION}"
-    }
+    # this is not in the Rack spec, but some apps may rely on it
+    "SERVER_SOFTWARE" => "Unicorn #{Unicorn::Const::UNICORN_VERSION}"
+  }
 
-    NULL_IO = StringIO.new("")
-    LOCALHOST = '127.0.0.1'
+  NULL_IO = StringIO.new("")
 
-    # Being explicitly single-threaded, we have certain advantages in
-    # not having to worry about variables being clobbered :)
-    BUF = ""
-    PARSER = HttpParser.new
-    REQ = {}
+  # :stopdoc:
+  # A frozen format for this is about 15% faster
+  REMOTE_ADDR = 'REMOTE_ADDR'.freeze
+  RACK_INPUT = 'rack.input'.freeze
+  TeeInput = Unicorn::TeeInput
+  # :startdoc:
 
-    # Does the majority of the IO processing.  It has been written in
-    # Ruby using about 8 different IO processing strategies.
-    #
-    # It is currently carefully constructed to make sure that it gets
-    # the best possible performance for the common case: GET requests
-    # that are fully complete after a single read(2)
-    #
-    # Anyone who thinks they can make it faster is more than welcome to
-    # take a crack at it.
-    #
-    # returns an environment hash suitable for Rack if successful
-    # This does minimal exception trapping and it is up to the caller
-    # to handle any socket errors (e.g. user aborted upload).
-    def read(socket)
-      REQ.clear
-      PARSER.reset
+  # Does the majority of the IO processing.  It has been written in
+  # Ruby using about 8 different IO processing strategies.
+  #
+  # It is currently carefully constructed to make sure that it gets
+  # the best possible performance for the common case: GET requests
+  # that are fully complete after a single read(2)
+  #
+  # Anyone who thinks they can make it faster is more than welcome to
+  # take a crack at it.
+  #
+  # returns an environment hash suitable for Rack if successful
+  # This does minimal exception trapping and it is up to the caller
+  # to handle any socket errors (e.g. user aborted upload).
+  def read(socket)
+    reset
+    e = env
 
-      # From http://www.ietf.org/rfc/rfc3875:
-      # "Script authors should be aware that the REMOTE_ADDR and
-      #  REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
-      #  may not identify the ultimate source of the request.  They
-      #  identify the client for the immediate request to the server;
-      #  that client may be a proxy, gateway, or other intermediary
-      #  acting on behalf of the actual source client."
-      REQ[Const::REMOTE_ADDR] =
-                    TCPSocket === socket ? socket.peeraddr[-1] : LOCALHOST
+    # From http://www.ietf.org/rfc/rfc3875:
+    # "Script authors should be aware that the REMOTE_ADDR and
+    #  REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
+    #  may not identify the ultimate source of the request.  They
+    #  identify the client for the immediate request to the server;
+    #  that client may be a proxy, gateway, or other intermediary
+    #  acting on behalf of the actual source client."
+    e[REMOTE_ADDR] = socket.kgio_addr
 
-      # short circuit the common case with small GET requests first
-      if PARSER.headers(REQ, socket.readpartial(Const::CHUNK_SIZE, BUF)).nil?
-        # Parser is not done, queue up more data to read and continue parsing
-        # an Exception thrown from the PARSER will throw us out of the loop
-        begin
-          BUF << socket.readpartial(Const::CHUNK_SIZE)
-        end while PARSER.headers(REQ, BUF).nil?
-      end
-      REQ[Const::RACK_INPUT] = 0 == PARSER.content_length ?
-                   NULL_IO : Unicorn::TeeInput.new(socket, REQ, PARSER, BUF)
-      REQ.update(DEFAULTS)
+    # short circuit the common case with small GET requests first
+    socket.kgio_read!(16384, buf)
+    if parse.nil?
+      # Parser is not done, queue up more data to read and continue parsing
+      # an Exception thrown from the parser will throw us out of the loop
+      begin
+        buf << socket.kgio_read!(16384)
+      end while parse.nil?
     end
-
+    e[RACK_INPUT] = 0 == content_length ? NULL_IO : TeeInput.new(socket, self)
+    e.merge!(DEFAULTS)
   end
 end
diff --git a/lib/unicorn/http_response.rb b/lib/unicorn/http_response.rb
index 6f1cd48..5725e25 100644
--- a/lib/unicorn/http_response.rb
+++ b/lib/unicorn/http_response.rb
@@ -5,19 +5,12 @@ require 'time'
 # You use it by simply doing:
 #
 #   status, headers, body = rack_app.call(env)
-#   HttpResponse.write(socket, [ status, headers, body ])
+#   http_response_write(socket, [ status, headers, body ])
 #
 # Most header correctness (including Content-Length and Content-Type)
-# is the job of Rack, with the exception of the "Connection: close"
-# and "Date" headers.
+# is the job of Rack, with the exception of the "Date" and "Status" header.
 #
-# A design decision was made to force the client to not pipeline or
-# keepalive requests.  HTTP/1.1 pipelining really kills the
-# performance due to how it has to be handled and how unclear the
-# standard is.  To fix this the HttpResponse always gives a
-# "Connection: close" header which forces the client to close right
-# away.  The bonus for this is that it gives a pretty nice speed boost
-# to most clients since they can close their connection immediately.
+# TODO: allow keepalive
 module Unicorn::HttpResponse
 
   # Every standard HTTP code mapped to the appropriate message.
@@ -25,41 +18,28 @@ module Unicorn::HttpResponse
     hash[code] = "#{code} #{msg}"
     hash
   }
-
-  # Rack does not set/require a Date: header.  We always override the
-  # Connection: and Date: headers no matter what (if anything) our
-  # Rack application sent us.
-  SKIP = { 'connection' => true, 'date' => true, 'status' => true }
+  CRLF = "\r\n"
 
   # writes the rack_response to socket as an HTTP response
-  def self.write(socket, rack_response, have_header = true)
+  def http_response_write(socket, rack_response)
     status, headers, body = rack_response
+    status = CODES[status.to_i] || status
 
-    if have_header
-      status = CODES[status.to_i] || status
-      out = []
-
-      # Don't bother enforcing duplicate supression, it's a Hash most of
-      # the time anyways so just hope our app knows what it's doing
+    if headers
+      buf = "HTTP/1.1 #{status}\r\n" \
+            "Date: #{Time.now.httpdate}\r\n" \
+            "Status: #{status}\r\n" \
+            "Connection: close\r\n"
       headers.each do |key, value|
-        next if SKIP.include?(key.downcase)
+        next if %r{\A(?:Date\z|Status\z|Connection\z)}i =~ key
         if value =~ /\n/
           # avoiding blank, key-only cookies with /\n+/
-          out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" })
+          buf << value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" }.join('')
         else
-          out << "#{key}: #{value}\r\n"
+          buf << "#{key}: #{value}\r\n"
         end
       end
-
-      # Rack should enforce Content-Length or chunked transfer encoding,
-      # so don't worry or care about them.
-      # Date is required by HTTP/1.1 as long as our clock can be trusted.
-      # Some broken clients require a "Status" header so we accomodate them
-      socket.write("HTTP/1.1 #{status}\r\n" \
-                   "Date: #{Time.now.httpdate}\r\n" \
-                   "Status: #{status}\r\n" \
-                   "Connection: close\r\n" \
-                   "#{out.join('')}\r\n")
+      socket.write(buf << CRLF)
     end
 
     body.each { |chunk| socket.write(chunk) }
diff --git a/lib/unicorn/http_server.rb b/lib/unicorn/http_server.rb
new file mode 100644
index 0000000..69b7cc8
--- /dev/null
+++ b/lib/unicorn/http_server.rb
@@ -0,0 +1,695 @@
+# -*- encoding: binary -*-
+
+# This is the process manager of Unicorn. This manages worker
+# processes which in turn handle the I/O and application process.
+# Listener sockets are started in the master process and shared with
+# forked worker children.
+class Unicorn::HttpServer
+  attr_accessor :app, :request, :timeout, :worker_processes,
+                :before_fork, :after_fork, :before_exec,
+                :listener_opts, :preload_app,
+                :reexec_pid, :orig_app, :init_listeners,
+                :master_pid, :config, :ready_pipe, :user
+  attr_reader :pid, :logger
+
+  # :stopdoc:
+  include Unicorn::SocketHelper
+  include Unicorn::HttpResponse
+
+  # backwards compatibility with 1.x
+  Worker = Unicorn::Worker
+
+  # prevents IO objects in here from being GC-ed
+  IO_PURGATORY = []
+
+  # all bound listener sockets
+  LISTENERS = []
+
+  # This hash maps PIDs to Workers
+  WORKERS = {}
+
+  # We use SELF_PIPE differently in the master and worker processes:
+  #
+  # * The master process never closes or reinitializes this once
+  # initialized.  Signal handlers in the master process will write to
+  # it to wake up the master from IO.select in exactly the same manner
+  # djb describes in http://cr.yp.to/docs/selfpipe.html
+  #
+  # * The workers immediately close the pipe they inherit from the
+  # master and replace it with a new pipe after forking.  This new
+  # pipe is also used to wakeup from IO.select from inside (worker)
+  # signal handlers.  However, workers *close* the pipe descriptors in
+  # the signal handlers to raise EBADF in IO.select instead of writing
+  # like we do in the master.  We cannot easily use the reader set for
+  # IO.select because LISTENERS is already that set, and it's extra
+  # work (and cycles) to distinguish the pipe FD from the reader set
+  # once IO.select returns.  So we're lazy and just close the pipe when
+  # a (rare) signal arrives in the worker and reinitialize the pipe later.
+  SELF_PIPE = []
+
+  # signal queue used for self-piping
+  SIG_QUEUE = []
+
+  # list of signals we care about and trap in master.
+  QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP, :TTIN, :TTOU ]
+
+  # :startdoc:
+  # We populate this at startup so we can figure out how to reexecute
+  # and upgrade the currently running instance of Unicorn
+  # This Hash is considered a stable interface and changing its contents
+  # will allow you to switch between different installations of Unicorn
+  # or even different installations of the same applications without
+  # downtime.  Keys of this constant Hash are described as follows:
+  #
+  # * 0 - the path to the unicorn/unicorn_rails executable
+  # * :argv - a deep copy of the ARGV array the executable originally saw
+  # * :cwd - the working directory of the application, this is where
+  # you originally started Unicorn.
+  #
+  # To change your unicorn executable to a different path without downtime,
+  # you can set the following in your Unicorn config file, HUP and then
+  # continue with the traditional USR2 + QUIT upgrade steps:
+  #
+  #   Unicorn::HttpServer::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn"
+  START_CTX = {
+    :argv => ARGV.map { |arg| arg.dup },
+    :cwd => lambda {
+        # favor ENV['PWD'] since it is (usually) symlink aware for
+        # Capistrano and like systems
+        begin
+          a = File.stat(pwd = ENV['PWD'])
+          b = File.stat(Dir.pwd)
+          a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd
+        rescue
+          Dir.pwd
+        end
+      }.call,
+    0 => $0.dup,
+  }
+
+  # Creates a working server on host:port (strange things happen if
+  # port isn't a Number).  Use HttpServer::run to start the server and
+  # HttpServer.run.join to join the thread that's processing
+  # incoming requests on the socket.
+  def initialize(app, options = {})
+    @app = app
+    @request = Unicorn::HttpRequest.new
+    self.reexec_pid = 0
+    options = options.dup
+    self.ready_pipe = options.delete(:ready_pipe)
+    self.init_listeners = options[:listeners] ? options[:listeners].dup : []
+    options[:use_defaults] = true
+    self.config = Unicorn::Configurator.new(options)
+    self.listener_opts = {}
+
+    # we try inheriting listeners first, so we bind them later.
+    # we don't write the pid file until we've bound listeners in case
+    # unicorn was started twice by mistake.  Even though our #pid= method
+    # checks for stale/existing pid files, race conditions are still
+    # possible (and difficult/non-portable to avoid) and can be likely
+    # to clobber the pid if the second start was in quick succession
+    # after the first, so we rely on the listener binding to fail in
+    # that case.  Some tests (in and outside of this source tree) and
+    # monitoring tools may also rely on pid files existing before we
+    # attempt to connect to the listener(s)
+    config.commit!(self, :skip => [:listeners, :pid])
+    self.orig_app = app
+  end
+
+  # Runs the thing.  Returns self so you can run join on it
+  def start
+    BasicSocket.do_not_reverse_lookup = true
+
+    # inherit sockets from parents, they need to be plain Socket objects
+    # before they become Kgio::UNIXServer or Kgio::TCPServer
+    inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
+      io = Socket.for_fd(fd.to_i)
+      set_server_sockopt(io, listener_opts[sock_name(io)])
+      IO_PURGATORY << io
+      logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
+      server_cast(io)
+    end
+
+    config_listeners = config[:listeners].dup
+    LISTENERS.replace(inherited)
+
+    # we start out with generic Socket objects that get cast to either
+    # Kgio::TCPServer or Kgio::UNIXServer objects; but since the Socket
+    # objects share the same OS-level file descriptor as the higher-level
+    # *Server objects; we need to prevent Socket objects from being
+    # garbage-collected
+    config_listeners -= listener_names
+    if config_listeners.empty? && LISTENERS.empty?
+      config_listeners << Unicorn::Const::DEFAULT_LISTEN
+      init_listeners << Unicorn::Const::DEFAULT_LISTEN
+      START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}"
+    end
+    config_listeners.each { |addr| listen(addr) }
+    raise ArgumentError, "no listeners" if LISTENERS.empty?
+
+    # this pipe is used to wake us up from select(2) in #join when signals
+    # are trapped.  See trap_deferred.
+    init_self_pipe!
+
+    # setup signal handlers before writing pid file in case people get
+    # trigger happy and send signals as soon as the pid file exists.
+    # Note that signals don't actually get handled until the #join method
+    QUEUE_SIGS.each { |sig| trap(sig) { SIG_QUEUE << sig; awaken_master } }
+    trap(:CHLD) { awaken_master }
+    self.pid = config[:pid]
+
+    self.master_pid = $$
+    build_app! if preload_app
+    maintain_worker_count
+    self
+  end
+
+  # replaces current listener set with +listeners+.  This will
+  # close the socket if it will not exist in the new listener set
+  def listeners=(listeners)
+    cur_names, dead_names = [], []
+    listener_names.each do |name|
+      if ?/ == name[0]
+        # mark unlinked sockets as dead so we can rebind them
+        (File.socket?(name) ? cur_names : dead_names) << name
+      else
+        cur_names << name
+      end
+    end
+    set_names = listener_names(listeners)
+    dead_names.concat(cur_names - set_names).uniq!
+
+    LISTENERS.delete_if do |io|
+      if dead_names.include?(sock_name(io))
+        IO_PURGATORY.delete_if do |pio|
+          pio.fileno == io.fileno && (pio.close rescue nil).nil? # true
+        end
+        (io.close rescue nil).nil? # true
+      else
+        set_server_sockopt(io, listener_opts[sock_name(io)])
+        false
+      end
+    end
+
+    (set_names - cur_names).each { |addr| listen(addr) }
+  end
+
+  def stdout_path=(path); redirect_io($stdout, path); end
+  def stderr_path=(path); redirect_io($stderr, path); end
+
+  def logger=(obj)
+    Unicorn::HttpRequest::DEFAULTS["rack.logger"] = @logger = obj
+  end
+
+  # sets the path for the PID file of the master process
+  def pid=(path)
+    if path
+      if x = valid_pid?(path)
+        return path if pid && path == pid && x == $$
+        if x == reexec_pid && pid =~ /\.oldbin\z/
+          logger.warn("will not set pid=#{path} while reexec-ed "\
+                      "child is running PID:#{x}")
+          return
+        end
+        raise ArgumentError, "Already running on PID:#{x} " \
+                             "(or pid=#{path} is stale)"
+      end
+    end
+    unlink_pid_safe(pid) if pid
+
+    if path
+      fp = begin
+        tmp = "#{File.dirname(path)}/#{rand}.#$$"
+        File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644)
+      rescue Errno::EEXIST
+        retry
+      end
+      fp.syswrite("#$$\n")
+      File.rename(fp.path, path)
+      fp.close
+    end
+    @pid = path
+  end
+
+  # add a given address to the +listeners+ set, idempotently
+  # Allows workers to add a private, per-process listener via the
+  # after_fork hook.  Very useful for debugging and testing.
+  # +:tries+ may be specified as an option for the number of times
+  # to retry, and +:delay+ may be specified as the time in seconds
+  # to delay between retries.
+  # A negative value for +:tries+ indicates the listen will be
+  # retried indefinitely, this is useful when workers belonging to
+  # different masters are spawned during a transparent upgrade.
+  def listen(address, opt = {}.merge(listener_opts[address] || {}))
+    address = config.expand_addr(address)
+    return if String === address && listener_names.include?(address)
+
+    delay = opt[:delay] || 0.5
+    tries = opt[:tries] || 5
+    begin
+      io = bind_listen(address, opt)
+      unless Kgio::TCPServer === io || Kgio::UNIXServer === io
+        IO_PURGATORY << io
+        io = server_cast(io)
+      end
+      logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
+      LISTENERS << io
+      io
+    rescue Errno::EADDRINUSE => err
+      logger.error "adding listener failed addr=#{address} (in use)"
+      raise err if tries == 0
+      tries -= 1
+      logger.error "retrying in #{delay} seconds " \
+                   "(#{tries < 0 ? 'infinite' : tries} tries left)"
+      sleep(delay)
+      retry
+    rescue => err
+      logger.fatal "error adding listener addr=#{address}"
+      raise err
+    end
+  end
+
+  # monitors children and receives signals forever
+  # (or until a termination signal is sent).  This handles signals
+  # one-at-a-time time and we'll happily drop signals in case somebody
+  # is signalling us too often.
+  def join
+    respawn = true
+    last_check = Time.now
+
+    proc_name 'master'
+    logger.info "master process ready" # test_exec.rb relies on this message
+    if ready_pipe
+      ready_pipe.syswrite($$.to_s)
+      ready_pipe.close rescue nil
+      self.ready_pipe = nil
+    end
+    begin
+      reap_all_workers
+      case SIG_QUEUE.shift
+      when nil
+        # avoid murdering workers after our master process (or the
+        # machine) comes out of suspend/hibernation
+        if (last_check + @timeout) >= (last_check = Time.now)
+          sleep_time = murder_lazy_workers
+        else
+          # wait for workers to wakeup on suspend
+          sleep_time = @timeout/2.0 + 1
+        end
+        maintain_worker_count if respawn
+        master_sleep(sleep_time)
+      when :QUIT # graceful shutdown
+        break
+      when :TERM, :INT # immediate shutdown
+        stop(false)
+        break
+      when :USR1 # rotate logs
+        logger.info "master reopening logs..."
+        Unicorn::Util.reopen_logs
+        logger.info "master done reopening logs"
+        kill_each_worker(:USR1)
+      when :USR2 # exec binary, stay alive in case something went wrong
+        reexec
+      when :WINCH
+        if Process.ppid == 1 || Process.getpgrp != $$
+          respawn = false
+          logger.info "gracefully stopping all workers"
+          kill_each_worker(:QUIT)
+          self.worker_processes = 0
+        else
+          logger.info "SIGWINCH ignored because we're not daemonized"
+        end
+      when :TTIN
+        respawn = true
+        self.worker_processes += 1
+      when :TTOU
+        self.worker_processes -= 1 if self.worker_processes > 0
+      when :HUP
+        respawn = true
+        if config.config_file
+          load_config!
+        else # exec binary and exit if there's no config file
+          logger.info "config_file not present, reexecuting binary"
+          reexec
+        end
+      end
+    rescue Errno::EINTR
+    rescue => e
+      logger.error "Unhandled master loop exception #{e.inspect}."
+      logger.error e.backtrace.join("\n")
+    end while true
+    stop # gracefully shutdown all workers on our way out
+    logger.info "master complete"
+    unlink_pid_safe(pid) if pid
+  end
+
+  # Terminates all workers, but does not exit master process
+  def stop(graceful = true)
+    self.listeners = []
+    limit = Time.now + timeout
+    until WORKERS.empty? || Time.now > limit
+      kill_each_worker(graceful ? :QUIT : :TERM)
+      sleep(0.1)
+      reap_all_workers
+    end
+    kill_each_worker(:KILL)
+  end
+
+  private
+
+  # wait for a signal hander to wake us up and then consume the pipe
+  def master_sleep(sec)
+    IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
+    SELF_PIPE[0].kgio_tryread(11)
+  end
+
+  def awaken_master
+    SELF_PIPE[1].kgio_trywrite('.') # wakeup master process from select
+  end
+
+  # reaps all unreaped workers
+  def reap_all_workers
+    begin
+      wpid, status = Process.waitpid2(-1, Process::WNOHANG)
+      wpid or return
+      if reexec_pid == wpid
+        logger.error "reaped #{status.inspect} exec()-ed"
+        self.reexec_pid = 0
+        self.pid = pid.chomp('.oldbin') if pid
+        proc_name 'master'
+      else
+        worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
+        m = "reaped #{status.inspect} worker=#{worker.nr rescue 'unknown'}"
+        status.success? ? logger.info(m) : logger.error(m)
+      end
+    rescue Errno::ECHILD
+      break
+    end while true
+  end
+
+  # reexecutes the START_CTX with a new binary
+  def reexec
+    if reexec_pid > 0
+      begin
+        Process.kill(0, reexec_pid)
+        logger.error "reexec-ed child already running PID:#{reexec_pid}"
+        return
+      rescue Errno::ESRCH
+        self.reexec_pid = 0
+      end
+    end
+
+    if pid
+      old_pid = "#{pid}.oldbin"
+      prev_pid = pid.dup
+      begin
+        self.pid = old_pid  # clear the path for a new pid file
+      rescue ArgumentError
+        logger.error "old PID:#{valid_pid?(old_pid)} running with " \
+                     "existing pid=#{old_pid}, refusing rexec"
+        return
+      rescue => e
+        logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}"
+        return
+      end
+    end
+
+    self.reexec_pid = fork do
+      listener_fds = LISTENERS.map { |sock| sock.fileno }
+      ENV['UNICORN_FD'] = listener_fds.join(',')
+      Dir.chdir(START_CTX[:cwd])
+      cmd = [ START_CTX[0] ].concat(START_CTX[:argv])
+
+      # avoid leaking FDs we don't know about, but let before_exec
+      # unset FD_CLOEXEC, if anything else in the app eventually
+      # relies on FD inheritence.
+      (3..1024).each do |io|
+        next if listener_fds.include?(io)
+        io = IO.for_fd(io) rescue next
+        IO_PURGATORY << io
+        io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
+      end
+      logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
+      before_exec.call(self)
+      exec(*cmd)
+    end
+    proc_name 'master (old)'
+  end
+
+  # forcibly terminate all workers that haven't checked in in timeout
+  # seconds.  The timeout is implemented using an unlinked File
+  # shared between the parent process and each worker.  The worker
+  # runs File#chmod to modify the ctime of the File.  If the ctime
+  # is stale for >timeout seconds, then we'll kill the corresponding
+  # worker.
+  def murder_lazy_workers
+    t = @timeout
+    next_sleep = 1
+    WORKERS.dup.each_pair do |wpid, worker|
+      stat = worker.tmp.stat
+      # skip workers that disable fchmod or have never fchmod-ed
+      stat.mode == 0100600 and next
+      diff = Time.now - stat.ctime
+      if diff <= t
+        tmp = t - diff
+        next_sleep < tmp and next_sleep = tmp
+        next
+      end
+      logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
+                   "(#{diff}s > #{t}s), killing"
+      kill_worker(:KILL, wpid) # take no prisoners for timeout violations
+    end
+    next_sleep
+  end
+
+  def spawn_missing_workers
+    (0...worker_processes).each do |worker_nr|
+      WORKERS.values.include?(worker_nr) and next
+      worker = Worker.new(worker_nr, Unicorn::TmpIO.new)
+      before_fork.call(self, worker)
+      WORKERS[fork {
+        ready_pipe.close if ready_pipe
+        self.ready_pipe = nil
+        worker_loop(worker)
+      }] = worker
+    end
+  end
+
+  def maintain_worker_count
+    (off = WORKERS.size - worker_processes) == 0 and return
+    off < 0 and return spawn_missing_workers
+    WORKERS.dup.each_pair { |wpid,w|
+      w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil
+    }
+  end
+
+  # if we get any error, try to write something back to the client
+  # assuming we haven't closed the socket, but don't get hung up
+  # if the socket is already closed or broken.  We'll always ensure
+  # the socket is closed at the end of this function
+  def handle_error(client, e)
+    msg = case e
+    when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
+      Unicorn::Const::ERROR_500_RESPONSE
+    when HttpParserError # try to tell the client they're bad
+      Unicorn::Const::ERROR_400_RESPONSE
+    else
+      logger.error "Read error: #{e.inspect}"
+      logger.error e.backtrace.join("\n")
+      Unicorn::Const::ERROR_500_RESPONSE
+    end
+    client.kgio_trywrite(msg)
+    client.close
+    rescue
+      nil
+  end
+
+  # once a client is accepted, it is processed in its entirety here
+  # in 3 easy steps: read request, call app, write app response
+  def process_client(client)
+    r = @app.call(env = @request.read(client))
+
+    if 100 == r[0].to_i
+      client.write(Unicorn::Const::EXPECT_100_RESPONSE)
+      env.delete(Unicorn::Const::HTTP_EXPECT)
+      r = @app.call(env)
+    end
+    # r may be frozen or const, so don't modify it
+    @request.headers? or r = [ r[0], nil, r[2] ]
+    http_response_write(client, r)
+  rescue => e
+    handle_error(client, e)
+  end
+
+  # gets rid of stuff the worker has no business keeping track of
+  # to free some resources and drops all sig handlers.
+  # traps for USR1, USR2, and HUP may be set in the after_fork Proc
+  # by the user.
+  def init_worker_process(worker)
+    QUEUE_SIGS.each { |sig| trap(sig, nil) }
+    trap(:CHLD, 'DEFAULT')
+    SIG_QUEUE.clear
+    proc_name "worker[#{worker.nr}]"
+    START_CTX.clear
+    init_self_pipe!
+    WORKERS.values.each { |other| other.tmp.close rescue nil }
+    WORKERS.clear
+    LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
+    worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
+    after_fork.call(self, worker) # can drop perms
+    worker.user(*user) if user.kind_of?(Array) && ! worker.switched
+    self.timeout /= 2.0 # halve it for select()
+    build_app! unless preload_app
+  end
+
+  def reopen_worker_logs(worker_nr)
+    logger.info "worker=#{worker_nr} reopening logs..."
+    Unicorn::Util.reopen_logs
+    logger.info "worker=#{worker_nr} done reopening logs"
+    init_self_pipe!
+  end
+
+  # runs inside each forked worker, this sits around and waits
+  # for connections and doesn't die until the parent dies (or is
+  # given a INT, QUIT, or TERM signal)
+  def worker_loop(worker)
+    ppid = master_pid
+    init_worker_process(worker)
+    nr = 0 # this becomes negative if we need to reopen logs
+    alive = worker.tmp # tmp is our lifeline to the master process
+    ready = LISTENERS
+
+    # closing anything we IO.select on will raise EBADF
+    trap(:USR1) { nr = -65536; SELF_PIPE[0].close rescue nil }
+    trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
+    [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
+    logger.info "worker=#{worker.nr} ready"
+    m = 0
+
+    begin
+      nr < 0 and reopen_worker_logs(worker.nr)
+      nr = 0
+
+      # we're a goner in timeout seconds anyways if alive.chmod
+      # breaks, so don't trap the exception.  Using fchmod() since
+      # futimes() is not available in base Ruby and I very strongly
+      # prefer temporary files to be unlinked for security,
+      # performance and reliability reasons, so utime is out.  No-op
+      # changes with chmod doesn't update ctime on all filesystems; so
+      # we change our counter each and every time (after process_client
+      # and before IO.select).
+      alive.chmod(m = 0 == m ? 1 : 0)
+
+      ready.each do |sock|
+        if client = sock.kgio_tryaccept
+          process_client(client)
+          nr += 1
+          alive.chmod(m = 0 == m ? 1 : 0)
+        end
+        break if nr < 0
+      end
+
+      # make the following bet: if we accepted clients this round,
+      # we're probably reasonably busy, so avoid calling select()
+      # and do a speculative non-blocking accept() on ready listeners
+      # before we sleep again in select().
+      redo unless nr == 0 # (nr < 0) => reopen logs
+
+      ppid == Process.ppid or return
+      alive.chmod(m = 0 == m ? 1 : 0)
+
+      # timeout used so we can detect parent death:
+      ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) and ready = ret[0]
+    rescue Errno::EINTR
+      ready = LISTENERS
+    rescue Errno::EBADF
+      nr < 0 or return
+    rescue => e
+      if alive
+        logger.error "Unhandled listen loop exception #{e.inspect}."
+        logger.error e.backtrace.join("\n")
+      end
+    end while alive
+  end
+
+  # delivers a signal to a worker and fails gracefully if the worker
+  # is no longer running.
+  def kill_worker(signal, wpid)
+    Process.kill(signal, wpid)
+    rescue Errno::ESRCH
+      worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
+  end
+
+  # delivers a signal to each worker
+  def kill_each_worker(signal)
+    WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
+  end
+
+  # unlinks a PID file at given +path+ if it contains the current PID
+  # still potentially racy without locking the directory (which is
+  # non-portable and may interact badly with other programs), but the
+  # window for hitting the race condition is small
+  def unlink_pid_safe(path)
+    (File.read(path).to_i == $$ and File.unlink(path)) rescue nil
+  end
+
+  # returns a PID if a given path contains a non-stale PID file,
+  # nil otherwise.
+  def valid_pid?(path)
+    wpid = File.read(path).to_i
+    wpid <= 0 and return
+    Process.kill(0, wpid)
+    wpid
+    rescue Errno::ESRCH, Errno::ENOENT
+      # don't unlink stale pid files, racy without non-portable locking...
+  end
+
+  def load_config!
+    loaded_app = app
+    logger.info "reloading config_file=#{config.config_file}"
+    config[:listeners].replace(init_listeners)
+    config.reload
+    config.commit!(self)
+    kill_each_worker(:QUIT)
+    Unicorn::Util.reopen_logs
+    self.app = orig_app
+    build_app! if preload_app
+    logger.info "done reloading config_file=#{config.config_file}"
+  rescue StandardError, LoadError, SyntaxError => e
+    logger.error "error reloading config_file=#{config.config_file}: " \
+                 "#{e.class} #{e.message} #{e.backtrace}"
+    self.app = loaded_app
+  end
+
+  # returns an array of string names for the given listener array
+  def listener_names(listeners = LISTENERS)
+    listeners.map { |io| sock_name(io) }
+  end
+
+  def build_app!
+    if app.respond_to?(:arity) && app.arity == 0
+      if defined?(Gem) && Gem.respond_to?(:refresh)
+        logger.info "Refreshing Gem list"
+        Gem.refresh
+      end
+      self.app = app.call
+    end
+  end
+
+  def proc_name(tag)
+    $0 = ([ File.basename(START_CTX[0]), tag
+          ]).concat(START_CTX[:argv]).join(' ')
+  end
+
+  def redirect_io(io, path)
+    File.open(path, 'ab') { |fp| io.reopen(fp) } if path
+    io.sync = true
+  end
+
+  def init_self_pipe!
+    SELF_PIPE.each { |io| io.close rescue nil }
+    SELF_PIPE.replace(Kgio::Pipe.new)
+    SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
+  end
+end
+
diff --git a/lib/unicorn/launcher.rb b/lib/unicorn/launcher.rb
index 0d415dd..662b603 100644
--- a/lib/unicorn/launcher.rb
+++ b/lib/unicorn/launcher.rb
@@ -20,6 +20,7 @@ module Unicorn::Launcher
   #     to pickup code changes if the original deployment directory
   #     is a symlink or otherwise got replaced.
   def self.daemonize!(options)
+    cfg = Unicorn::Configurator
     $stdin.reopen("/dev/null")
 
     # We only start a new process group if we're not being reexecuted
@@ -52,9 +53,9 @@ module Unicorn::Launcher
       end
     end
     # $stderr/$stderr can/will be redirected separately in the Unicorn config
-    Unicorn::Configurator::DEFAULTS[:stderr_path] ||= "/dev/null"
-    Unicorn::Configurator::DEFAULTS[:stdout_path] ||= "/dev/null"
-    Unicorn::Configurator::RACKUP[:daemonized] = true
+    cfg::DEFAULTS[:stderr_path] ||= "/dev/null"
+    cfg::DEFAULTS[:stdout_path] ||= "/dev/null"
+    cfg::RACKUP[:daemonized] = true
   end
 
 end
diff --git a/lib/unicorn/preread_input.rb b/lib/unicorn/preread_input.rb
new file mode 100644
index 0000000..ec83cb2
--- /dev/null
+++ b/lib/unicorn/preread_input.rb
@@ -0,0 +1,30 @@
+# -*- encoding: binary -*-
+
+module Unicorn
+# This middleware is used to ensure input is buffered to memory
+# or disk (depending on size) before the application is dispatched
+# by entirely consuming it (from TeeInput) beforehand.
+#
+# Usage (in config.ru):
+#
+#     require 'unicorn/preread_input'
+#     if defined?(Unicorn)
+#       use Unicorn::PrereadInput
+#     end
+#     run YourApp.new
+class PrereadInput
+  def initialize(app)
+    @app = app
+  end
+
+  def call(env)
+    buf = ""
+    input = env["rack.input"]
+    if buf = input.read(16384)
+      true while input.read(16384, buf)
+      input.rewind
+    end
+    @app.call(env)
+  end
+end
+end
diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb
index 1d03eab..7364937 100644
--- a/lib/unicorn/socket_helper.rb
+++ b/lib/unicorn/socket_helper.rb
@@ -126,12 +126,12 @@ module Unicorn
         end
         old_umask = File.umask(opt[:umask] || 0)
         begin
-          UNIXServer.new(address)
+          Kgio::UNIXServer.new(address)
         ensure
           File.umask(old_umask)
         end
       elsif address =~ /^(\d+\.\d+\.\d+\.\d+):(\d+)$/
-        TCPServer.new($1, $2.to_i)
+        Kgio::TCPServer.new($1, $2.to_i)
       else
         raise ArgumentError, "Don't know how to bind: #{address}"
       end
@@ -166,9 +166,9 @@ module Unicorn
     def server_cast(sock)
       begin
         Socket.unpack_sockaddr_in(sock.getsockname)
-        TCPServer.for_fd(sock.fileno)
+        Kgio::TCPServer.for_fd(sock.fileno)
       rescue ArgumentError
-        UNIXServer.for_fd(sock.fileno)
+        Kgio::UNIXServer.for_fd(sock.fileno)
       end
     end
 
diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb
index 540cfe0..a3e01d2 100644
--- a/lib/unicorn/tee_input.rb
+++ b/lib/unicorn/tee_input.rb
@@ -11,8 +11,8 @@
 #
 # When processing uploads, Unicorn exposes a TeeInput object under
 # "rack.input" of the Rack environment.
-class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
-                                     :buf, :len, :tmp, :buf2)
+class Unicorn::TeeInput
+  attr_accessor :tmp, :socket, :parser, :env, :buf, :len, :buf2
 
   # The maximum size (in +bytes+) to buffer in memory before
   # resorting to a temporary file.  Default is 112 kilobytes.
@@ -25,16 +25,19 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
 
   # Initializes a new TeeInput object.  You normally do not have to call
   # this unless you are writing an HTTP server.
-  def initialize(*args)
-    super(*args)
-    self.len = parser.content_length
-    self.tmp = len && len < @@client_body_buffer_size ?
-               StringIO.new("") : Unicorn::Util.tmpio
-    self.buf2 = ""
-    if buf.size > 0
-      parser.filter_body(buf2, buf) and finalize_input
-      tmp.write(buf2)
-      tmp.rewind
+  def initialize(socket, request)
+    @socket = socket
+    @parser = request
+    @buf = request.buf
+    @env = request.env
+    @len = request.content_length
+    @tmp = @len && @len < @@client_body_buffer_size ?
+           StringIO.new("") : Unicorn::TmpIO.new
+    @buf2 = ""
+    if @buf.size > 0
+      @parser.filter_body(@buf2, @buf) and finalize_input
+      @tmp.write(@buf2)
+      @tmp.rewind
     end
   end
 
@@ -55,16 +58,16 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
   # earlier.  Most applications should only need to call +read+ with a
   # specified +length+ in a loop until it returns +nil+.
   def size
-    len and return len
+    @len and return @len
 
     if socket
-      pos = tmp.pos
-      while tee(@@io_chunk_size, buf2)
+      pos = @tmp.pos
+      while tee(@@io_chunk_size, @buf2)
       end
-      tmp.seek(pos)
+      @tmp.seek(pos)
     end
 
-    self.len = tmp.size
+    @len = @tmp.size
   end
 
   # :call-seq:
@@ -87,22 +90,22 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
   # any data and only block when nothing is available (providing
   # IO#readpartial semantics).
   def read(*args)
-    socket or return tmp.read(*args)
+    @socket or return @tmp.read(*args)
 
     length = args.shift
     if nil == length
-      rv = tmp.read || ""
-      while tee(@@io_chunk_size, buf2)
-        rv << buf2
+      rv = @tmp.read || ""
+      while tee(@@io_chunk_size, @buf2)
+        rv << @buf2
       end
       rv
     else
       rv = args.shift || ""
-      diff = tmp.size - tmp.pos
+      diff = @tmp.size - @tmp.pos
       if 0 == diff
         ensure_length(tee(length, rv), length)
       else
-        ensure_length(tmp.read(diff > length ? length : diff, rv), length)
+        ensure_length(@tmp.read(diff > length ? length : diff, rv), length)
       end
     end
   end
@@ -117,27 +120,27 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
   # This takes zero arguments for strict Rack::Lint compatibility,
   # unlike IO#gets.
   def gets
-    socket or return tmp.gets
+    @socket or return @tmp.gets
     sep = $/ or return read
 
-    orig_size = tmp.size
-    if tmp.pos == orig_size
-      tee(@@io_chunk_size, buf2) or return nil
-      tmp.seek(orig_size)
+    orig_size = @tmp.size
+    if @tmp.pos == orig_size
+      tee(@@io_chunk_size, @buf2) or return nil
+      @tmp.seek(orig_size)
     end
 
     sep_size = Rack::Utils.bytesize(sep)
-    line = tmp.gets # cannot be nil here since size > pos
+    line = @tmp.gets # cannot be nil here since size > pos
     sep == line[-sep_size, sep_size] and return line
 
-    # unlikely, if we got here, then tmp is at EOF
+    # unlikely, if we got here, then @tmp is at EOF
     begin
-      orig_size = tmp.pos
-      tee(@@io_chunk_size, buf2) or break
-      tmp.seek(orig_size)
-      line << tmp.gets
+      orig_size = @tmp.pos
+      tee(@@io_chunk_size, @buf2) or break
+      @tmp.seek(orig_size)
+      line << @tmp.gets
       sep == line[-sep_size, sep_size] and return line
-      # tmp is at EOF again here, retry the loop
+      # @tmp is at EOF again here, retry the loop
     end while true
 
     line
@@ -163,51 +166,32 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
   # the offset (zero) of the +ios+ pointer.  Subsequent reads will
   # start from the beginning of the previously-buffered input.
   def rewind
-    tmp.rewind # Rack does not specify what the return value is here
+    @tmp.rewind # Rack does not specify what the return value is here
   end
 
 private
 
-  def client_error(e)
-    case e
-    when EOFError
-      # in case client only did a premature shutdown(SHUT_WR)
-      # we do support clients that shutdown(SHUT_WR) after the
-      # _entire_ request has been sent, and those will not have
-      # raised EOFError on us.
-      socket.close if socket
-      raise Unicorn::ClientShutdown, "bytes_read=#{tmp.size}", []
-    when Unicorn::HttpParserError
-      e.set_backtrace([])
-    end
-    raise e
-  end
-
   # tees off a +length+ chunk of data from the input into the IO
   # backing store as well as returning it.  +dst+ must be specified.
   # returns nil if reading from the input returns nil
   def tee(length, dst)
-    unless parser.body_eof?
-      if parser.filter_body(dst, socket.readpartial(length, buf)).nil?
-        tmp.write(dst)
-        tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug
+    unless @parser.body_eof?
+      r = @socket.kgio_read(length, @buf) or eof!
+      unless @parser.filter_body(dst, @buf)
+        @tmp.write(dst)
+        @tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug
         return dst
       end
     end
     finalize_input
-    rescue => e
-      client_error(e)
   end
 
   def finalize_input
-    while parser.trailers(req, buf).nil?
-      # Don't worry about raising ClientShutdown here on EOFError, tee()
-      # will catch EOFError when app is processing it, otherwise in
-      # initialize we never get any chance to enter the app so the
-      # EOFError will just get trapped by Unicorn and not the Rack app
-      buf << socket.readpartial(@@io_chunk_size)
+    while @parser.trailers(@env, @buf).nil?
+      r = @socket.kgio_read(@@io_chunk_size) or eof!
+      @buf << r
     end
-    self.socket = nil
+    @socket = nil
   end
 
   # tee()s into +dst+ until it is of +length+ bytes (or until
@@ -220,13 +204,21 @@ private
     # len is nil for chunked bodies, so we can't ensure length for those
     # since they could be streaming bidirectionally and we don't want to
     # block the caller in that case.
-    return dst if dst.nil? || len.nil?
+    return dst if dst.nil? || @len.nil?
 
-    while dst.size < length && tee(length - dst.size, buf2)
-      dst << buf2
+    while dst.size < length && tee(length - dst.size, @buf2)
+      dst << @buf2
     end
 
     dst
   end
 
+  def eof!
+    # in case client only did a premature shutdown(SHUT_WR)
+    # we do support clients that shutdown(SHUT_WR) after the
+    # _entire_ request has been sent, and those will not have
+    # raised EOFError on us.
+    @socket.close if @socket
+    raise Unicorn::ClientShutdown, "bytes_read=#{@tmp.size}", []
+  end
 end
diff --git a/lib/unicorn/tmpio.rb b/lib/unicorn/tmpio.rb
new file mode 100644
index 0000000..a3c530d
--- /dev/null
+++ b/lib/unicorn/tmpio.rb
@@ -0,0 +1,29 @@
+# -*- encoding: binary -*-
+# :stopdoc:
+require 'tmpdir'
+
+# some versions of Ruby had a broken Tempfile which didn't work
+# well with unlinked files.  This one is much shorter, easier
+# to understand, and slightly faster.
+class Unicorn::TmpIO < File
+
+  # creates and returns a new File object.  The File is unlinked
+  # immediately, switched to binary mode, and userspace output
+  # buffering is disabled
+  def self.new
+    fp = begin
+      super("#{Dir::tmpdir}/#{rand}", RDWR|CREAT|EXCL, 0600)
+    rescue Errno::EEXIST
+      retry
+    end
+    unlink(fp.path)
+    fp.binmode
+    fp.sync = true
+    fp
+  end
+
+  # for easier env["rack.input"] compatibility with Rack <= 1.1
+  def size
+    stat.size
+  end
+end
diff --git a/lib/unicorn/util.rb b/lib/unicorn/util.rb
index e9dd57f..82329eb 100644
--- a/lib/unicorn/util.rb
+++ b/lib/unicorn/util.rb
@@ -1,101 +1,67 @@
 # -*- encoding: binary -*-
 
-require 'fcntl'
-require 'tmpdir'
-
-module Unicorn
-
-  class TmpIO < ::File
+module Unicorn::Util
+
+# :stopdoc:
+  def self.is_log?(fp)
+    append_flags = File::WRONLY | File::APPEND
+
+    ! fp.closed? &&
+      fp.sync &&
+      fp.path[0] == ?/ &&
+      (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags
+    rescue IOError, Errno::EBADF
+      false
+  end
 
-    # for easier env["rack.input"] compatibility
-    def size
-      # flush if sync
-      stat.size
+  def self.chown_logs(uid, gid)
+    ObjectSpace.each_object(File) do |fp|
+      fp.chown(uid, gid) if is_log?(fp)
     end
   end
-
-  module Util
-    class << self
-
-      def is_log?(fp)
-        append_flags = File::WRONLY | File::APPEND
-
-        ! fp.closed? &&
-          fp.sync &&
-          fp.path[0] == ?/ &&
-          (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags
-        rescue IOError, Errno::EBADF
-          false
+# :startdoc:
+
+  # This reopens ALL logfiles in the process that have been rotated
+  # using logrotate(8) (without copytruncate) or similar tools.
+  # A +File+ object is considered for reopening if it is:
+  #   1) opened with the O_APPEND and O_WRONLY flags
+  #   2) opened with an absolute path (starts with "/")
+  #   3) the current open file handle does not match its original open path
+  #   4) unbuffered (as far as userspace buffering goes, not O_SYNC)
+  # Returns the number of files reopened
+  def self.reopen_logs
+    to_reopen = []
+    nr = 0
+    ObjectSpace.each_object(File) { |fp| is_log?(fp) and to_reopen << fp }
+
+    to_reopen.each do |fp|
+      orig_st = begin
+        fp.stat
+      rescue IOError, Errno::EBADF
+        next
       end
 
-      def chown_logs(uid, gid)
-        ObjectSpace.each_object(File) do |fp|
-          fp.chown(uid, gid) if is_log?(fp)
-        end
+      begin
+        b = File.stat(fp.path)
+        next if orig_st.ino == b.ino && orig_st.dev == b.dev
+      rescue Errno::ENOENT
       end
 
-      # This reopens ALL logfiles in the process that have been rotated
-      # using logrotate(8) (without copytruncate) or similar tools.
-      # A +File+ object is considered for reopening if it is:
-      #   1) opened with the O_APPEND and O_WRONLY flags
-      #   2) opened with an absolute path (starts with "/")
-      #   3) the current open file handle does not match its original open path
-      #   4) unbuffered (as far as userspace buffering goes, not O_SYNC)
-      # Returns the number of files reopened
-      def reopen_logs
-        to_reopen = []
-        nr = 0
-        ObjectSpace.each_object(File) { |fp| is_log?(fp) and to_reopen << fp }
-
-        to_reopen.each do |fp|
-          orig_st = begin
-            fp.stat
-          rescue IOError, Errno::EBADF
-            next
-          end
-
-          begin
-            b = File.stat(fp.path)
-            next if orig_st.ino == b.ino && orig_st.dev == b.dev
-          rescue Errno::ENOENT
-          end
-
-          begin
-            File.open(fp.path, 'a') { |tmpfp| fp.reopen(tmpfp) }
-            fp.sync = true
-            new_st = fp.stat
-
-            # this should only happen in the master:
-            if orig_st.uid != new_st.uid || orig_st.gid != new_st.gid
-              fp.chown(orig_st.uid, orig_st.gid)
-            end
+      begin
+        File.open(fp.path, 'a') { |tmpfp| fp.reopen(tmpfp) }
+        fp.sync = true
+        new_st = fp.stat
 
-            nr += 1
-          rescue IOError, Errno::EBADF
-            # not much we can do...
-          end
+        # this should only happen in the master:
+        if orig_st.uid != new_st.uid || orig_st.gid != new_st.gid
+          fp.chown(orig_st.uid, orig_st.gid)
         end
 
-        nr
+        nr += 1
+      rescue IOError, Errno::EBADF
+        # not much we can do...
       end
-
-      # creates and returns a new File object.  The File is unlinked
-      # immediately, switched to binary mode, and userspace output
-      # buffering is disabled
-      def tmpio
-        fp = begin
-          TmpIO.open("#{Dir::tmpdir}/#{rand}",
-                     File::RDWR|File::CREAT|File::EXCL, 0600)
-        rescue Errno::EEXIST
-          retry
-        end
-        File.unlink(fp.path)
-        fp.binmode
-        fp.sync = true
-        fp
-      end
-
     end
-
+    nr
   end
 end
diff --git a/lib/unicorn/worker.rb b/lib/unicorn/worker.rb
new file mode 100644
index 0000000..fd8d20e
--- /dev/null
+++ b/lib/unicorn/worker.rb
@@ -0,0 +1,40 @@
+# -*- encoding: binary -*-
+
+# This class and its members can be considered a stable interface
+# and will not change in a backwards-incompatible fashion between
+# releases of Unicorn.  You may need to access it in the
+# before_fork/after_fork hooks.  See the Unicorn::Configurator RDoc
+# for examples.
+class Unicorn::Worker < Struct.new(:nr, :tmp, :switched)
+
+  # worker objects may be compared to just plain numbers
+  def ==(other_nr)
+    self.nr == other_nr
+  end
+
+  # Changes the worker process to the specified +user+ and +group+
+  # This is only intended to be called from within the worker
+  # process from the +after_fork+ hook.  This should be called in
+  # the +after_fork+ hook after any priviledged functions need to be
+  # run (e.g. to set per-worker CPU affinity, niceness, etc)
+  #
+  # Any and all errors raised within this method will be propagated
+  # directly back to the caller (usually the +after_fork+ hook.
+  # These errors commonly include ArgumentError for specifying an
+  # invalid user/group and Errno::EPERM for insufficient priviledges
+  def user(user, group = nil)
+    # we do not protect the caller, checking Process.euid == 0 is
+    # insufficient because modern systems have fine-grained
+    # capabilities.  Let the caller handle any and all errors.
+    uid = Etc.getpwnam(user).uid
+    gid = Etc.getgrnam(group).gid if group
+    Unicorn::Util.chown_logs(uid, gid)
+    tmp.chown(uid, gid)
+    if gid && Process.egid != gid
+      Process.initgroups(user, gid)
+      Process::GID.change_privilege(gid)
+    end
+    Process.euid != uid and Process::UID.change_privilege(uid)
+    self.switched = true
+  end
+end
diff --git a/local.mk.sample b/local.mk.sample
index c950d87..25bca5d 100644
--- a/local.mk.sample
+++ b/local.mk.sample
@@ -37,15 +37,6 @@ else
   RUBY := $(prefix)/bin/ruby --disable-gems
 endif
 
-# FIXME: use isolate more
-ifndef RUBYLIB
-  gems := rack-1.1.0
-  gem_paths := $(addprefix $(HOME)/lib/ruby/gems/1.8/gems/,$(gems))
-  sp :=
-  sp +=
-  export RUBYLIB := $(subst $(sp),:,$(addsuffix /lib,$(gem_paths)))
-endif
-
 # pipefail is THE reason to use bash (v3+) or never revisions of ksh93
 # SHELL := /bin/bash -e -o pipefail
 SHELL := /bin/ksh93 -e -o pipefail
diff --git a/script/isolate_for_tests b/script/isolate_for_tests
new file mode 100755
index 0000000..5cea47c
--- /dev/null
+++ b/script/isolate_for_tests
@@ -0,0 +1,45 @@
+#!/usr/bin/env ruby
+# scripts/Makefiles can read and eval the output of this script and
+# use it as RUBYLIB
+require 'rubygems'
+require 'isolate'
+fp = File.open(__FILE__, "rb")
+fp.flock(File::LOCK_EX)
+
+ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
+opts = {
+  :system => false,
+  # we want "ruby-1.8.7" and not "ruby-1.8", so disable :multiruby
+  :multiruby => false,
+  :path => "tmp/isolate/#{ruby_engine}-#{RUBY_VERSION}",
+}
+
+pid = fork do
+  Isolate.now!(opts) do
+    gem 'sqlite3-ruby', '1.2.5'
+    gem 'kgio', '1.3.1'
+    gem 'rack', '1.1.0'
+  end
+end
+_, status = Process.waitpid2(pid)
+status.success? or abort status.inspect
+lib_paths = Dir["#{opts[:path]}/gems/*-*/lib"].map { |x| File.expand_path(x) }
+dst = "tmp/isolate/#{ruby_engine}-#{RUBY_VERSION}.mk"
+File.open("#{dst}.#$$", "w") do |fp|
+  fp.puts "ISOLATE_LIBS=#{lib_paths.join(':')}"
+end
+File.rename("#{dst}.#$$", dst)
+
+# pure Ruby gems can be shared across all Rubies
+%w(3.0.0).each do |rails_ver|
+  opts[:path] = "tmp/isolate/rails-#{rails_ver}"
+  pid = fork do
+    Isolate.now!(opts) do
+      gem 'rails', rails_ver
+    end
+  end
+  _, status = Process.waitpid2(pid)
+  status.success? or abort status.inspect
+  more = Dir["#{opts[:path]}/gems/*-*/lib"].map { |x| File.expand_path(x) }
+  lib_paths.concat(more)
+end
diff --git a/t/GNUmakefile b/t/GNUmakefile
index e80c43a..8f2668c 100644
--- a/t/GNUmakefile
+++ b/t/GNUmakefile
@@ -17,6 +17,12 @@ endif
 RUBY_ENGINE := $(shell $(RUBY) -e 'puts((RUBY_ENGINE rescue "ruby"))')
 export RUBY_ENGINE
 
+isolate_libs := ../tmp/isolate/$(RUBY_ENGINE)-$(RUBY_VERSION).mk
+$(isolate_libs): ../script/isolate_for_tests
+        @cd .. && $(RUBY) script/isolate_for_tests
+-include $(isolate_libs)
+MYLIBS := $(RUBYLIB):$(ISOLATE_LIBS)
+
 T = $(wildcard t[0-9][0-9][0-9][0-9]-*.sh)
 
 all:: $(T)
@@ -58,7 +64,7 @@ $(test_prefix)/.stamp:
 $(T): export RUBY := $(RUBY)
 $(T): export RAKE := $(RAKE)
 $(T): export PATH := $(test_prefix)/bin:$(PATH)
-$(T): export RUBYLIB := $(test_prefix)/lib:$(RUBYLIB)
+$(T): export RUBYLIB := $(test_prefix)/lib:$(MYLIBS)
 $(T): dep $(test_prefix)/.stamp trash/.gitignore
         $(TRACER) $(SHELL) $(SH_TEST_OPTS) $@ $(TEST_OPTS)
 
diff --git a/t/preread_input.ru b/t/preread_input.ru
new file mode 100644
index 0000000..79685c4
--- /dev/null
+++ b/t/preread_input.ru
@@ -0,0 +1,17 @@
+#\-E none
+require 'digest/sha1'
+require 'unicorn/preread_input'
+use Rack::ContentLength
+use Rack::ContentType, "text/plain"
+use Unicorn::PrereadInput
+nr = 0
+run lambda { |env|
+  $stderr.write "app dispatch: #{nr += 1}\n"
+  input = env["rack.input"]
+  dig = Digest::SHA1.new
+  while buf = input.read(16384)
+    dig.update(buf)
+  end
+
+  [ 200, {}, [ "#{dig.hexdigest}\n" ] ]
+}
diff --git a/t/t0003-working_directory.sh b/t/t0003-working_directory.sh
index 53345ae..79988d8 100755
--- a/t/t0003-working_directory.sh
+++ b/t/t0003-working_directory.sh
@@ -1,9 +1,4 @@
 #!/bin/sh
-if test -n "$RBX_SKIP"
-then
-        echo "$0 is broken under Rubinius for now"
-        exit 0
-fi
 . ./test-lib.sh
 
 t_plan 4 "config.ru inside alt working_directory"
diff --git a/t/t0010-reap-logging.sh b/t/t0010-reap-logging.sh
new file mode 100755
index 0000000..93d8c60
--- /dev/null
+++ b/t/t0010-reap-logging.sh
@@ -0,0 +1,55 @@
+#!/bin/sh
+. ./test-lib.sh
+t_plan 9 "reap worker logging messages"
+
+t_begin "setup and start" && {
+        unicorn_setup
+        cat >> $unicorn_config <<EOF
+after_fork { |s,w| File.open('$fifo','w') { |f| f.write '.' } }
+EOF
+        unicorn -c $unicorn_config pid.ru &
+        test '.' = $(cat $fifo)
+        unicorn_wait_start
+}
+
+t_begin "kill 1st worker=0" && {
+        pid_1=$(curl http://$listen/)
+        kill -9 $pid_1
+}
+
+t_begin "wait for 2nd worker to start" && {
+        test '.' = $(cat $fifo)
+}
+
+t_begin "ensure log of 1st reap is an ERROR" && {
+        dbgcat r_err
+        grep 'ERROR.*reaped.*worker=0' $r_err | grep $pid_1
+        dbgcat r_err
+        > $r_err
+}
+
+t_begin "kill 2nd worker gracefully" && {
+        pid_2=$(curl http://$listen/)
+        kill -QUIT $pid_2
+}
+
+t_begin "wait for 3rd worker=0 to start " && {
+        test '.' = $(cat $fifo)
+}
+
+t_begin "ensure log of 2nd reap is a INFO" && {
+        grep 'INFO.*reaped.*worker=0' $r_err | grep $pid_2
+        > $r_err
+}
+
+t_begin "killing succeeds" && {
+        kill $unicorn_pid
+        wait
+        kill -0 $unicorn_pid && false
+}
+
+t_begin "check stderr" && {
+        check_stderr
+}
+
+t_done
diff --git a/t/t0303-rails3-alt-working_directory_config.ru.sh b/t/t0303-rails3-alt-working_directory_config.ru.sh
index 444f05a..1433f94 100755
--- a/t/t0303-rails3-alt-working_directory_config.ru.sh
+++ b/t/t0303-rails3-alt-working_directory_config.ru.sh
@@ -1,9 +1,4 @@
 #!/bin/sh
-if test -n "$RBX_SKIP"
-then
-        echo "$0 is broken under Rubinius for now"
-        exit 0
-fi
 . ./test-rails3.sh
 
 t_plan 5 "Rails 3 (beta) inside alt working_directory (w/ config.ru)"
diff --git a/t/t9000-preread-input.sh b/t/t9000-preread-input.sh
new file mode 100755
index 0000000..b9da05e
--- /dev/null
+++ b/t/t9000-preread-input.sh
@@ -0,0 +1,48 @@
+#!/bin/sh
+. ./test-lib.sh
+t_plan 9 "PrereadInput middleware tests"
+
+t_begin "setup and start" && {
+        random_blob_sha1=$(rsha1 < random_blob)
+        unicorn_setup
+        unicorn  -D -c $unicorn_config preread_input.ru
+        unicorn_wait_start
+}
+
+t_begin "single identity request" && {
+        curl -sSf -T random_blob http://$listen/ > $tmp
+}
+
+t_begin "sha1 matches" && {
+        test x"$(cat $tmp)" = x"$random_blob_sha1"
+}
+
+t_begin "single chunked request" && {
+        curl -sSf -T- < random_blob http://$listen/ > $tmp
+}
+
+t_begin "sha1 matches" && {
+        test x"$(cat $tmp)" = x"$random_blob_sha1"
+}
+
+t_begin "app only dispatched twice" && {
+        test 2 -eq "$(grep 'app dispatch:' < $r_err | wc -l )"
+}
+
+t_begin "aborted chunked request" && {
+        rm -f $tmp
+        curl -sSf -T- < $fifo http://$listen/ > $tmp &
+        curl_pid=$!
+        kill -9 $curl_pid
+        wait
+}
+
+t_begin "app only dispatched twice" && {
+        test 2 -eq "$(grep 'app dispatch:' < $r_err | wc -l )"
+}
+
+t_begin "killing succeeds" && {
+        kill -QUIT $unicorn_pid
+}
+
+t_done
diff --git a/t/test-rails3.sh b/t/test-rails3.sh
index b398f03..907ef0d 100644
--- a/t/test-rails3.sh
+++ b/t/test-rails3.sh
@@ -13,7 +13,7 @@ rails_gems=../tmp/isolate/rails-$RAILS_VERSION/gems
 rails_bin="$rails_gems/rails-$RAILS_VERSION/bin/rails"
 if ! test -d "$arch_gems" || ! test -d "$rails_gems" || ! test -x "$rails_bin"
 then
-        ( cd ../ && $RAKE isolate )
+        ( cd ../ && ./script/isolate_for_tests )
 fi
 
 for i in $arch_gems/*-* $rails_gems/*-*
diff --git a/test/exec/test_exec.rb b/test/exec/test_exec.rb
index 1d24ca3..581d5d5 100644
--- a/test/exec/test_exec.rb
+++ b/test/exec/test_exec.rb
@@ -614,7 +614,7 @@ EOF
     results = retry_hit(["http://#{@addr}:#{@port}/"])
     assert_equal String, results[0].class
     assert_shutdown(pid)
-  end unless ENV['RBX_SKIP']
+  end
 
   def test_config_ru_alt_path
     config_path = "#{@tmpdir}/foo.ru"
diff --git a/test/unit/test_http_parser_ng.rb b/test/unit/test_http_parser_ng.rb
index cb30f32..65b843e 100644
--- a/test/unit/test_http_parser_ng.rb
+++ b/test/unit/test_http_parser_ng.rb
@@ -388,6 +388,7 @@ class HttpParserNgTest < Test::Unit::TestCase
       "*" => { qs => "", pi => "" },
     }.each do |uri,expect|
       assert_equal req, @parser.headers(req.clear, str % [ uri ])
+      req = req.dup
       @parser.reset
       assert_equal uri, req["REQUEST_URI"], "REQUEST_URI mismatch"
       assert_equal expect[qs], req[qs], "#{qs} mismatch"
@@ -412,6 +413,7 @@ class HttpParserNgTest < Test::Unit::TestCase
       "/1?a=b;c=d&e=f" => { qs => "a=b;c=d&e=f", pi => "/1" },
     }.each do |uri,expect|
       assert_equal req, @parser.headers(req.clear, str % [ uri ])
+      req = req.dup
       @parser.reset
       assert_equal uri, req["REQUEST_URI"], "REQUEST_URI mismatch"
       assert_equal "example.com", req["HTTP_HOST"], "Host: mismatch"
@@ -440,6 +442,17 @@ class HttpParserNgTest < Test::Unit::TestCase
     end
   end
 
+  def test_backtrace_is_empty
+    begin
+      @parser.headers({}, "AAADFSFDSFD\r\n\r\n")
+      assert false, "should never get here line:#{__LINE__}"
+    rescue HttpParserError => e
+      assert_equal [], e.backtrace
+      return
+    end
+    assert false, "should never get here line:#{__LINE__}"
+  end
+
   def test_ignore_version_header
     http = "GET / HTTP/1.1\r\nVersion: hello\r\n\r\n"
     req = {}
@@ -460,4 +473,29 @@ class HttpParserNgTest < Test::Unit::TestCase
     assert_equal expect, req
   end
 
+  def test_pipelined_requests
+    expect = {
+      "HTTP_HOST" => "example.com",
+      "SERVER_NAME" => "example.com",
+      "REQUEST_PATH" => "/",
+      "rack.url_scheme" => "http",
+      "SERVER_PROTOCOL" => "HTTP/1.1",
+      "PATH_INFO" => "/",
+      "HTTP_VERSION" => "HTTP/1.1",
+      "REQUEST_URI" => "/",
+      "SERVER_PORT" => "80",
+      "REQUEST_METHOD" => "GET",
+      "QUERY_STRING" => ""
+    }
+    str = "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n"
+    @parser.buf << (str * 2)
+    env1 = @parser.parse.dup
+    assert_equal expect, env1
+    assert_equal str, @parser.buf
+    assert @parser.keepalive?
+    @parser.reset
+    env2 = @parser.parse.dup
+    assert_equal expect, env2
+    assert_equal "", @parser.buf
+  end
 end
diff --git a/test/unit/test_request.rb b/test/unit/test_request.rb
index 1896300..67ac1b9 100644
--- a/test/unit/test_request.rb
+++ b/test/unit/test_request.rb
@@ -11,7 +11,11 @@ class RequestTest < Test::Unit::TestCase
 
   class MockRequest < StringIO
     alias_method :readpartial, :sysread
+    alias_method :kgio_read!, :sysread
     alias_method :read_nonblock, :sysread
+    def kgio_addr
+      '127.0.0.1'
+    end
   end
 
   def setup
@@ -159,6 +163,14 @@ class RequestTest < Test::Unit::TestCase
     buf = (' ' * bs).freeze
     length = bs * count
     client = Tempfile.new('big_put')
+    def client.kgio_addr; '127.0.0.1'; end
+    def client.kgio_read(*args)
+      readpartial(*args)
+    rescue EOFError
+    end
+    def client.kgio_read!(*args)
+      readpartial(*args)
+    end
     client.syswrite(
       "PUT / HTTP/1.1\r\n" \
       "Host: foo\r\n" \
diff --git a/test/unit/test_response.rb b/test/unit/test_response.rb
index f9eda8e..e5245e8 100644
--- a/test/unit/test_response.rb
+++ b/test/unit/test_response.rb
@@ -11,10 +11,11 @@ require 'test/test_helper'
 include Unicorn
 
 class ResponseTest < Test::Unit::TestCase
-  
+  include Unicorn::HttpResponse
+
   def test_response_headers
     out = StringIO.new
-    HttpResponse.write(out,[200, {"X-Whatever" => "stuff"}, ["cool"]])
+    http_response_write(out,[200, {"X-Whatever" => "stuff"}, ["cool"]])
     assert out.closed?
 
     assert out.length > 0, "output didn't have data"
@@ -22,7 +23,7 @@ class ResponseTest < Test::Unit::TestCase
 
   def test_response_string_status
     out = StringIO.new
-    HttpResponse.write(out,['200', {}, []])
+    http_response_write(out,['200', {}, []])
     assert out.closed?
     assert out.length > 0, "output didn't have data"
     assert_equal 1, out.string.split(/\r\n/).grep(/^Status: 200 OK/).size
@@ -32,7 +33,7 @@ class ResponseTest < Test::Unit::TestCase
     old_ofs = $,
     $, = "\f\v"
     out = StringIO.new
-    HttpResponse.write(out,[200, {"X-k" => "cd","X-y" => "z"}, ["cool"]])
+    http_response_write(out,[200, {"X-k" => "cd","X-y" => "z"}, ["cool"]])
     assert out.closed?
     resp = out.string
     assert ! resp.include?("\f\v"), "output didn't use $, ($OFS)"
@@ -42,7 +43,7 @@ class ResponseTest < Test::Unit::TestCase
 
   def test_response_200
     io = StringIO.new
-    HttpResponse.write(io, [200, {}, []])
+    http_response_write(io, [200, {}, []])
     assert io.closed?
     assert io.length > 0, "output didn't have data"
   end
@@ -50,7 +51,7 @@ class ResponseTest < Test::Unit::TestCase
   def test_response_with_default_reason
     code = 400
     io = StringIO.new
-    HttpResponse.write(io, [code, {}, []])
+    http_response_write(io, [code, {}, []])
     assert io.closed?
     lines = io.string.split(/\r\n/)
     assert_match(/.* Bad Request$/, lines.first,
@@ -59,7 +60,7 @@ class ResponseTest < Test::Unit::TestCase
 
   def test_rack_multivalue_headers
     out = StringIO.new
-    HttpResponse.write(out,[200, {"X-Whatever" => "stuff\nbleh"}, []])
+    http_response_write(out,[200, {"X-Whatever" => "stuff\nbleh"}, []])
     assert out.closed?
     assert_match(/^X-Whatever: stuff\r\nX-Whatever: bleh\r\n/, out.string)
   end
@@ -68,7 +69,7 @@ class ResponseTest < Test::Unit::TestCase
   # some broken clients still rely on it
   def test_status_header_added
     out = StringIO.new
-    HttpResponse.write(out,[200, {"X-Whatever" => "stuff"}, []])
+    http_response_write(out,[200, {"X-Whatever" => "stuff"}, []])
     assert out.closed?
     assert_equal 1, out.string.split(/\r\n/).grep(/^Status: 200 OK/i).size
   end
@@ -79,7 +80,7 @@ class ResponseTest < Test::Unit::TestCase
   def test_status_header_ignores_app_hash
     out = StringIO.new
     header_hash = {"X-Whatever" => "stuff", 'StaTus' => "666" }
-    HttpResponse.write(out,[200, header_hash, []])
+    http_response_write(out,[200, header_hash, []])
     assert out.closed?
     assert_equal 1, out.string.split(/\r\n/).grep(/^Status: 200 OK/i).size
     assert_equal 1, out.string.split(/\r\n/).grep(/^Status:/i).size
@@ -90,7 +91,7 @@ class ResponseTest < Test::Unit::TestCase
     body = StringIO.new(expect_body)
     body.rewind
     out = StringIO.new
-    HttpResponse.write(out,[200, {}, body])
+    http_response_write(out,[200, {}, body])
     assert out.closed?
     assert body.closed?
     assert_match(expect_body, out.string.split(/\r\n/).last)
@@ -98,7 +99,7 @@ class ResponseTest < Test::Unit::TestCase
 
   def test_unknown_status_pass_through
     out = StringIO.new
-    HttpResponse.write(out,["666 I AM THE BEAST", {}, [] ])
+    http_response_write(out,["666 I AM THE BEAST", {}, [] ])
     assert out.closed?
     headers = out.string.split(/\r\n\r\n/).first.split(/\r\n/)
     assert %r{\AHTTP/\d\.\d 666 I AM THE BEAST\z}.match(headers[0])
diff --git a/test/unit/test_signals.rb b/test/unit/test_signals.rb
index 7c78b44..71cf8f4 100644
--- a/test/unit/test_signals.rb
+++ b/test/unit/test_signals.rb
@@ -166,7 +166,7 @@ class SignalsTest < Test::Unit::TestCase
     expect = @bs * @count
     assert_equal(expect, got, "expect=#{expect} got=#{got}")
     assert_nothing_raised { sock.close }
-  end unless ENV['RBX_SKIP']
+  end
 
   def test_request_read
     app = lambda { |env|
diff --git a/test/unit/test_tee_input.rb b/test/unit/test_tee_input.rb
index a127882..a10ca34 100644
--- a/test/unit/test_tee_input.rb
+++ b/test/unit/test_tee_input.rb
@@ -9,7 +9,7 @@ class TestTeeInput < Test::Unit::TestCase
   def setup
     @rs = $/
     @env = {}
-    @rd, @wr = IO.pipe
+    @rd, @wr = Kgio::UNIXSocket.pair
     @rd.sync = @wr.sync = true
     @start_pid = $$
   end
@@ -27,8 +27,8 @@ class TestTeeInput < Test::Unit::TestCase
   end
 
   def test_gets_long
-    init_parser("hello", 5 + (4096 * 4 * 3) + "#$/foo#$/".size)
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request("hello", 5 + (4096 * 4 * 3) + "#$/foo#$/".size)
+    ti = Unicorn::TeeInput.new(@rd, r)
     status = line = nil
     pid = fork {
       @rd.close
@@ -48,8 +48,8 @@ class TestTeeInput < Test::Unit::TestCase
   end
 
   def test_gets_short
-    init_parser("hello", 5 + "#$/foo".size)
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request("hello", 5 + "#$/foo".size)
+    ti = Unicorn::TeeInput.new(@rd, r)
     status = line = nil
     pid = fork {
       @rd.close
@@ -67,8 +67,8 @@ class TestTeeInput < Test::Unit::TestCase
   end
 
   def test_small_body
-    init_parser('hello')
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request('hello')
+    ti = Unicorn::TeeInput.new(@rd, r)
     assert_equal 0, @parser.content_length
     assert @parser.body_eof?
     assert_equal StringIO, ti.tmp.class
@@ -80,8 +80,8 @@ class TestTeeInput < Test::Unit::TestCase
   end
 
   def test_read_with_buffer
-    init_parser('hello')
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request('hello')
+    ti = Unicorn::TeeInput.new(@rd, r)
     buf = ''
     rv = ti.read(4, buf)
     assert_equal 'hell', rv
@@ -95,8 +95,8 @@ class TestTeeInput < Test::Unit::TestCase
   end
 
   def test_big_body
-    init_parser('.' * Unicorn::Const::MAX_BODY << 'a')
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request('.' * Unicorn::Const::MAX_BODY << 'a')
+    ti = Unicorn::TeeInput.new(@rd, r)
     assert_equal 0, @parser.content_length
     assert @parser.body_eof?
     assert_kind_of File, ti.tmp
@@ -106,9 +106,9 @@ class TestTeeInput < Test::Unit::TestCase
 
   def test_read_in_full_if_content_length
     a, b = 300, 3
-    init_parser('.' * b, 300)
+    r = init_request('.' * b, 300)
     assert_equal 300, @parser.content_length
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    ti = Unicorn::TeeInput.new(@rd, r)
     pid = fork {
       @wr.write('.' * 197)
       sleep 1 # still a *potential* race here that would make the test moot...
@@ -121,8 +121,8 @@ class TestTeeInput < Test::Unit::TestCase
   end
 
   def test_big_body_multi
-    init_parser('.', Unicorn::Const::MAX_BODY + 1)
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request('.', Unicorn::Const::MAX_BODY + 1)
+    ti = Unicorn::TeeInput.new(@rd, r)
     assert_equal Unicorn::Const::MAX_BODY, @parser.content_length
     assert ! @parser.body_eof?
     assert_kind_of File, ti.tmp
@@ -163,7 +163,7 @@ class TestTeeInput < Test::Unit::TestCase
       @wr.write("0\r\n\r\n")
     }
     @wr.close
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    ti = Unicorn::TeeInput.new(@rd, @parser)
     assert_nil @parser.content_length
     assert_nil ti.len
     assert ! @parser.body_eof?
@@ -201,7 +201,7 @@ class TestTeeInput < Test::Unit::TestCase
       end
       @wr.write("0\r\n\r\n")
     }
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    ti = Unicorn::TeeInput.new(@rd, @parser)
     assert_nil @parser.content_length
     assert_nil ti.len
     assert ! @parser.body_eof?
@@ -230,7 +230,7 @@ class TestTeeInput < Test::Unit::TestCase
       @wr.write("Hello: World\r\n\r\n")
     }
     @wr.close
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    ti = Unicorn::TeeInput.new(@rd, @parser)
     assert_nil @parser.content_length
     assert_nil ti.len
     assert ! @parser.body_eof?
@@ -243,7 +243,7 @@ class TestTeeInput < Test::Unit::TestCase
 
 private
 
-  def init_parser(body, size = nil)
+  def init_request(body, size = nil)
     @parser = Unicorn::HttpParser.new
     body = body.to_s.freeze
     @buf = "POST / HTTP/1.1\r\n" \
@@ -252,6 +252,7 @@ private
            "\r\n#{body}"
     assert_equal @env, @parser.headers(@env, @buf)
     assert_equal body, @buf
+    @parser
   end
 
 end
diff --git a/unicorn.gemspec b/unicorn.gemspec
index 973ca09..fd8f0e6 100644
--- a/unicorn.gemspec
+++ b/unicorn.gemspec
@@ -48,6 +48,7 @@ Gem::Specification.new do |s|
   # commented out.  Nevertheless, upgrading to Rails 2.3.4 or later is
   # *strongly* recommended for security reasons.
   s.add_dependency(%q<rack>)
+  s.add_dependency(%q<kgio>, '~> 1.3.1')
 
   s.add_development_dependency('isolate', '~> 3.0.0')