about summary refs log tree commit homepage
diff options
context:
space:
mode:
-rw-r--r--.document2
-rw-r--r--.gitignore1
-rw-r--r--.wrongdoc.yml8
-rw-r--r--DESIGN2
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--GNUmakefile110
-rw-r--r--KNOWN_ISSUES35
-rw-r--r--README12
-rw-r--r--Rakefile144
-rw-r--r--TODO6
-rw-r--r--TUNING40
-rwxr-xr-xbin/unicorn30
-rwxr-xr-xbin/unicorn_rails36
-rw-r--r--examples/logrotate.conf29
-rw-r--r--examples/nginx.conf33
-rw-r--r--examples/unicorn.conf.rb12
-rw-r--r--ext/unicorn_http/ext_help.h4
-rw-r--r--ext/unicorn_http/extconf.rb3
-rw-r--r--ext/unicorn_http/global_variables.h12
-rw-r--r--ext/unicorn_http/httpdate.c82
-rw-r--r--ext/unicorn_http/unicorn_http.rl461
-rw-r--r--ext/unicorn_http/unicorn_http_common.rl2
-rw-r--r--lib/unicorn.rb890
-rw-r--r--lib/unicorn/app/exec_cgi.rb48
-rw-r--r--lib/unicorn/app/inetd.rb5
-rw-r--r--lib/unicorn/app/old_rails.rb1
-rw-r--r--lib/unicorn/app/old_rails/static.rb2
-rw-r--r--lib/unicorn/cgi_wrapper.rb1
-rw-r--r--lib/unicorn/configurator.rb332
-rw-r--r--lib/unicorn/const.rb69
-rw-r--r--lib/unicorn/http_request.rb124
-rw-r--r--lib/unicorn/http_response.rb56
-rw-r--r--lib/unicorn/http_server.rb733
-rw-r--r--lib/unicorn/launcher.rb8
-rw-r--r--lib/unicorn/oob_gc.rb2
-rw-r--r--lib/unicorn/preread_input.rb33
-rw-r--r--lib/unicorn/socket_helper.rb74
-rw-r--r--lib/unicorn/stream_input.rb145
-rw-r--r--lib/unicorn/tee_input.rb176
-rw-r--r--lib/unicorn/tmpio.rb29
-rw-r--r--lib/unicorn/util.rb133
-rw-r--r--lib/unicorn/worker.rb47
-rw-r--r--local.mk.sample9
-rwxr-xr-xscript/isolate_for_tests45
-rw-r--r--t/GNUmakefile8
-rw-r--r--t/preread_input.ru17
-rw-r--r--t/rack-input-tests.ru21
-rwxr-xr-xt/t0002-parser-error.sh31
-rwxr-xr-xt/t0003-working_directory.sh5
-rwxr-xr-xt/t0010-reap-logging.sh55
-rwxr-xr-xt/t0012-reload-empty-config.sh5
-rwxr-xr-xt/t0013-rewindable-input-false.sh24
-rw-r--r--t/t0013.ru12
-rwxr-xr-xt/t0014-rewindable-input-true.sh24
-rw-r--r--t/t0014.ru12
-rwxr-xr-xt/t0015-configurator-internals.sh25
-rwxr-xr-xt/t0016-trust-x-forwarded-false.sh30
-rwxr-xr-xt/t0017-trust-x-forwarded-true.sh30
-rwxr-xr-xt/t0100-rack-input-tests.sh124
-rwxr-xr-xt/t0116-client_body_buffer_size.sh80
-rw-r--r--t/t0116.ru16
-rwxr-xr-xt/t0303-rails3-alt-working_directory_config.ru.sh5
-rwxr-xr-xt/t9000-preread-input.sh48
-rw-r--r--t/test-rails3.sh2
-rw-r--r--test/exec/test_exec.rb5
-rw-r--r--test/rails/app-2.3.8/.gitignore2
-rw-r--r--test/rails/app-2.3.8/Rakefile7
-rw-r--r--test/rails/app-2.3.8/app/controllers/application_controller.rb5
-rw-r--r--test/rails/app-2.3.8/app/controllers/foo_controller.rb36
-rw-r--r--test/rails/app-2.3.8/app/helpers/application_helper.rb4
-rw-r--r--test/rails/app-2.3.8/config/boot.rb109
-rw-r--r--test/rails/app-2.3.8/config/database.yml12
-rw-r--r--test/rails/app-2.3.8/config/environment.rb17
-rw-r--r--test/rails/app-2.3.8/config/environments/development.rb7
-rw-r--r--test/rails/app-2.3.8/config/environments/production.rb6
-rw-r--r--test/rails/app-2.3.8/config/routes.rb6
-rw-r--r--test/rails/app-2.3.8/db/.gitignore0
-rw-r--r--test/rails/app-2.3.8/log/.gitignore1
-rw-r--r--test/rails/app-2.3.8/public/404.html1
-rw-r--r--test/rails/app-2.3.8/public/500.html1
-rw-r--r--test/rails/app-2.3.8/public/x.txt1
-rw-r--r--test/rails/test_rails.rb2
-rw-r--r--test/test_helper.rb2
-rw-r--r--test/unit/test_configurator.rb8
-rw-r--r--test/unit/test_http_parser.rb446
-rw-r--r--test/unit/test_http_parser_ng.rb463
-rw-r--r--test/unit/test_http_parser_xftrust.rb38
-rw-r--r--test/unit/test_request.rb18
-rw-r--r--test/unit/test_response.rb55
-rw-r--r--test/unit/test_server.rb11
-rw-r--r--test/unit/test_signals.rb2
-rw-r--r--test/unit/test_stream_input.rb204
-rw-r--r--test/unit/test_tee_input.rb117
-rw-r--r--test/unit/test_upload.rb12
-rw-r--r--unicorn.gemspec36
95 files changed, 3867 insertions, 2369 deletions
diff --git a/.document b/.document
index 7f77691..317e36b 100644
--- a/.document
+++ b/.document
@@ -11,8 +11,8 @@ KNOWN_ISSUES
 TODO
 NEWS
 ChangeLog
+LATEST
 lib
-ext/unicorn_http/unicorn_http.c
 unicorn_1
 unicorn_rails_1
 ISSUES
diff --git a/.gitignore b/.gitignore
index 78a2a53..50c2736 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,3 +21,4 @@ pkg/
 /GIT-VERSION-FILE
 /man
 /tmp
+/LATEST
diff --git a/.wrongdoc.yml b/.wrongdoc.yml
new file mode 100644
index 0000000..10f10b5
--- /dev/null
+++ b/.wrongdoc.yml
@@ -0,0 +1,8 @@
+---
+cgit_url: http://bogomips.org/unicorn.git
+git_url: git://bogomips.org/unicorn.git
+rdoc_url: http://unicorn.bogomips.org/
+changelog_start: v1.1.5
+merge_html:
+  unicorn_1: Documentation/unicorn.1.html
+  unicorn_rails_1: Documentation/unicorn_rails.1.html
diff --git a/DESIGN b/DESIGN
index 1d195c7..eb9fbea 100644
--- a/DESIGN
+++ b/DESIGN
@@ -11,7 +11,7 @@
   only non-Ruby part and there are no plans to add any more
   non-Ruby components.
 
-* All HTTP protocol parsing and I/O is done much like Mongrel:
+* All HTTP parsing and I/O is done much like Mongrel:
     1. read/parse HTTP request headers in full
     2. call Rack application
     3. write HTTP response back to the client
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 7a97c16..f5fd878 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 GVF=GIT-VERSION-FILE
-DEF_VER=v1.1.7.GIT
+DEF_VER=v3.6.1.GIT
 
 LF='
 '
diff --git a/GNUmakefile b/GNUmakefile
index a3b761a..c0016ef 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -1,7 +1,6 @@
 # use GNU Make to run tests in parallel, and without depending on RubyGems
 all:: test
 
-GIT_URL = git://git.bogomips.org/unicorn.git
 RLFLAGS = -G2
 
 MRI = ruby
@@ -16,7 +15,7 @@ GIT-VERSION-FILE: .FORCE-GIT-VERSION-FILE
 -include local.mk
 ruby_bin := $(shell which $(RUBY))
 ifeq ($(DLEXT),) # "so" for Linux
-  DLEXT := $(shell $(RUBY) -rrbconfig -e 'puts Config::CONFIG["DLEXT"]')
+  DLEXT := $(shell $(RUBY) -rrbconfig -e 'puts RbConfig::CONFIG["DLEXT"]')
 endif
 ifeq ($(RUBY_VERSION),)
   RUBY_VERSION := $(shell $(RUBY) -e 'puts RUBY_VERSION')
@@ -24,6 +23,12 @@ endif
 
 RUBY_ENGINE := $(shell $(RUBY) -e 'puts((RUBY_ENGINE rescue "ruby"))')
 
+isolate_libs := tmp/isolate/$(RUBY_ENGINE)-$(RUBY_VERSION).mk
+$(isolate_libs): script/isolate_for_tests
+        @$(RUBY) script/isolate_for_tests
+-include $(isolate_libs)
+MYLIBS = $(RUBYLIB):$(ISOLATE_LIBS)
+
 # dunno how to implement this as concisely in Ruby, and hell, I love awk
 awk_slow := awk '/def test_/{print FILENAME"--"$$2".n"}' 2>/dev/null
 
@@ -40,7 +45,7 @@ T_r_log := $(subst .r,$(log_suffix),$(T_r))
 test_prefix = $(CURDIR)/test/$(RUBY_ENGINE)-$(RUBY_VERSION)
 
 ext := ext/unicorn_http
-c_files := $(ext)/unicorn_http.c $(wildcard $(ext)/*.h)
+c_files := $(ext)/unicorn_http.c $(ext)/httpdate.c $(wildcard $(ext)/*.h)
 rl_files := $(wildcard $(ext)/*.rl)
 base_bins := unicorn unicorn_rails
 bins := $(addprefix bin/, $(base_bins))
@@ -117,14 +122,14 @@ run_test = $(quiet_pre) \
 %.n: arg = $(subst .n,,$(subst --, -n ,$@))
 %.n: t = $(subst .n,$(log_suffix),$@)
 %.n: export PATH := $(test_prefix)/bin:$(PATH)
-%.n: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB)
+%.n: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS)
 %.n: $(test_prefix)/.stamp
         $(run_test)
 
 $(T): arg = $@
 $(T): t = $(subst .rb,$(log_suffix),$@)
 $(T): export PATH := $(test_prefix)/bin:$(PATH)
-$(T): export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB)
+$(T): export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS)
 $(T): $(test_prefix)/.stamp
         $(run_test)
 
@@ -150,77 +155,39 @@ clean:
         $(RM) $(setup_rb_files) $(t_log)
         $(RM) -r $(test_prefix) man
 
-man:
-        $(MAKE) -C Documentation install-man
+man html:
+        $(MAKE) -C Documentation install-$@
+
+pkg_extra := GIT-VERSION-FILE ChangeLog LATEST NEWS \
+             $(ext)/unicorn_http.c $(man1_paths)
 
-pkg_extra := GIT-VERSION-FILE NEWS ChangeLog $(ext)/unicorn_http.c
-manifest: $(pkg_extra) man
-        $(RM) .manifest
-        $(MAKE) .manifest
+ChangeLog: GIT-VERSION-FILE .wrongdoc.yml
+        wrongdoc prepare
 
-.manifest:
-        (git ls-files && \
-         for i in $@ $(pkg_extra) $(man1_paths); \
-         do echo $$i; done) | LC_ALL=C sort > $@+
+.manifest: ChangeLog $(ext)/unicorn_http.c
+        (git ls-files && for i in $@ $(pkg_extra); do echo $$i; done) | \
+          LC_ALL=C sort > $@+
         cmp $@+ $@ || mv $@+ $@
         $(RM) $@+
 
-NEWS: GIT-VERSION-FILE .manifest
-        $(RAKE) -s news_rdoc > $@+
-        mv $@+ $@
-
-SINCE = 1.0.0
-ChangeLog: LOG_VERSION = \
-  $(shell git rev-parse -q "$(GIT_VERSION)" >/dev/null 2>&1 && \
-          echo $(GIT_VERSION) || git describe)
-ChangeLog: log_range = v$(SINCE)..$(LOG_VERSION)
-ChangeLog: GIT-VERSION-FILE
-        @echo "ChangeLog from $(GIT_URL) ($(log_range))" > $@+
-        @echo >> $@+
-        git log $(log_range) | sed -e 's/^/    /' >> $@+
-        mv $@+ $@
-
-news_atom := http://unicorn.bogomips.org/NEWS.atom.xml
-cgit_atom := http://git.bogomips.org/cgit/unicorn.git/atom/?h=master
-atom = <link rel="alternate" title="Atom feed" href="$(1)" \
-             type="application/atom+xml"/>
-
-# using rdoc 2.5.x+
-doc: .document $(ext)/unicorn_http.c NEWS ChangeLog
+doc: .document $(ext)/unicorn_http.c man html .wrongdoc.yml
         for i in $(man1_rdoc); do echo > $$i; done
         find bin lib -type f -name '*.rbc' -exec rm -f '{}' ';'
-        rdoc -t "$(shell sed -ne '1s/^= //p' README)"
+        $(RM) -r doc
+        wrongdoc all
         install -m644 COPYING doc/COPYING
-        install -m644 $(shell grep '^[A-Z]' .document)  doc/
-        $(MAKE) -C Documentation install-html install-man
+        install -m644 $(shell grep '^[A-Z]' .document) doc/
         install -m644 $(man1_paths) doc/
-        cd doc && for i in $(base_bins); do \
-          $(RM) 1.html $${i}.1.html; \
-          sed -e '/"documentation">/r man1/'$$i'.1.html' \
-                < $${i}_1.html > tmp && mv tmp $${i}_1.html; \
-          ln $${i}_1.html $${i}.1.html; \
-          done
-        $(RUBY) -i -p -e \
-          '$$_.gsub!("</title>",%q{\&$(call atom,$(cgit_atom))})' \
-          doc/ChangeLog.html
-        $(RUBY) -i -p -e \
-          '$$_.gsub!("</title>",%q{\&$(call atom,$(news_atom))})' \
-          doc/NEWS.html doc/README.html
-        $(RAKE) -s news_atom > doc/NEWS.atom.xml
-        cd doc && ln README.html tmp && mv tmp index.html
+        tar cf - $$(git ls-files examples/) | (cd doc && tar xf -)
         $(RM) $(man1_rdoc)
 
 # publishes docs to http://unicorn.bogomips.org
 publish_doc:
         -git set-file-times
-        $(RM) -r doc ChangeLog NEWS
-        $(MAKE) doc LOG_VERSION=$(shell git tag -l | tail -1)
-        @awk 'BEGIN{RS="=== ";ORS=""}NR==2{sub(/\n$$/,"");print RS""$$0 }' \
-         < NEWS > doc/LATEST
-        find doc/images doc/js -type f | \
-                TZ=UTC xargs touch -d '1970-01-01 00:00:00' doc/rdoc.css
+        $(MAKE) doc
+        find doc/images -type f | \
+                TZ=UTC xargs touch -d '1970-01-01 00:00:02' doc/rdoc.css
         $(MAKE) doc_gz
-        tar cf - $$(git ls-files examples/) | (cd doc && tar xf -)
         chmod 644 $$(find doc -type f)
         $(RSYNC) -av doc/ unicorn.bogomips.org:/srv/unicorn/
         git ls-files | xargs touch
@@ -229,7 +196,6 @@ publish_doc:
 # "gzip_static on" can serve the gzipped versions directly.
 doc_gz: docs = $(shell find doc -type f ! -regex '^.*\.\(gif\|jpg\|png\|gz\)$$')
 doc_gz:
-        touch doc/NEWS.atom.xml -d "$$(awk 'NR==1{print $$4,$$5,$$6}' NEWS)"
         for i in $(docs); do \
           gzip --rsyncable -9 < $$i > $$i.gz; touch -r $$i $$i.gz; done
 
@@ -239,9 +205,9 @@ $(rails_git)/info/cloned-stamp:
         git clone --mirror -q $(rails_git_url) $(rails_git)
         > $@
 
-$(rails_git)/info/v2.3.8-stamp: $(rails_git)/info/cloned-stamp
+$(rails_git)/info/v2.2.3-stamp: $(rails_git)/info/cloned-stamp
         cd $(rails_git) && git fetch
-        cd $(rails_git) && git rev-parse --verify refs/tags/v2.3.8
+        cd $(rails_git) && git rev-parse --verify refs/tags/v2.2.3
         > $@
 
 rails_tests := $(addsuffix .r,$(addprefix $(T_r).,$(rails_vers)))
@@ -251,10 +217,10 @@ $(T_r).%.r: rv = $(subst .r,,$(subst $(T_r).,,$@))
 $(T_r).%.r: extra = ' 'v$(rv)
 $(T_r).%.r: arg = $(T_r)
 $(T_r).%.r: export PATH := $(test_prefix)/bin:$(PATH)
-$(T_r).%.r: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB)
+$(T_r).%.r: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS)
 $(T_r).%.r: export UNICORN_RAILS_TEST_VERSION = $(rv)
 $(T_r).%.r: export RAILS_GIT_REPO = $(CURDIR)/$(rails_git)
-$(T_r).%.r: $(test_prefix)/.stamp $(rails_git)/info/v2.3.8-stamp
+$(T_r).%.r: $(test_prefix)/.stamp $(rails_git)/info/v2.2.3-stamp
         $(run_test)
 
 ifneq ($(VERSION),)
@@ -268,10 +234,10 @@ release_changes := release_changes-$(VERSION)
 release-notes: $(release_notes)
 release-changes: $(release_changes)
 $(release_changes):
-        $(RAKE) -s release_changes > $@+
+        wrongdoc release_changes > $@+
         $(VISUAL) $@+ && test -s $@+ && mv $@+ $@
 $(release_notes):
-        GIT_URL=$(GIT_URL) $(RAKE) -s release_notes > $@+
+        wrongdoc release_notes > $@+
         $(VISUAL) $@+ && test -s $@+ && mv $@+ $@
 
 # ensures we're actually on the tagged $(VERSION), only used for release
@@ -291,18 +257,18 @@ gem: $(pkggem)
 install-gem: $(pkggem)
         gem install $(CURDIR)/$<
 
-$(pkggem): manifest fix-perms
+$(pkggem): .manifest fix-perms
         gem build $(rfpackage).gemspec
         mkdir -p pkg
         mv $(@F) $@
 
 $(pkgtgz): distdir = $(basename $@)
 $(pkgtgz): HEAD = v$(VERSION)
-$(pkgtgz): manifest fix-perms
+$(pkgtgz): .manifest fix-perms
         @test -n "$(distdir)"
         $(RM) -r $(distdir)
         mkdir -p $(distdir)
-        tar cf - `cat .manifest` | (cd $(distdir) && tar xf -)
+        tar cf - $$(cat .manifest) | (cd $(distdir) && tar xf -)
         cd pkg && tar cf - $(basename $(@F)) | gzip -9 > $(@F)+
         mv $@+ $@
 
@@ -324,5 +290,5 @@ gem install-gem: GIT-VERSION-FILE
         $(MAKE) $@ VERSION=$(GIT_VERSION)
 endif
 
-.PHONY: .FORCE-GIT-VERSION-FILE doc $(T) $(slow_tests) manifest man
+.PHONY: .FORCE-GIT-VERSION-FILE doc $(T) $(slow_tests) man
 .PHONY: test-install
diff --git a/KNOWN_ISSUES b/KNOWN_ISSUES
index bc3dac5..2bd4151 100644
--- a/KNOWN_ISSUES
+++ b/KNOWN_ISSUES
@@ -3,16 +3,28 @@
 Occasionally odd {issues}[link:ISSUES.html] arise without a transparent or
 acceptable solution.  Those issues are documented here.
 
+* PRNGs (pseudo-random number generators) loaded before forking
+  (e.g. "preload_app true") may need to have their internal state
+  reset in the after_fork hook.  Starting with \Unicorn 3.6.1, we
+  have builtin workarounds for Kernel#rand and OpenSSL::Random users,
+  but applications may use other PRNGs.
+
+* Under some versions of Ruby 1.8, it is necessary to call +srand+ in an
+  after_fork hook to get correct random number generation.  We have a builtin
+  workaround for this starting with \Unicorn 3.6.1
+
+  See http://redmine.ruby-lang.org/issues/show/4338
+
+* On Ruby 1.8 prior to Ruby 1.8.7-p248, *BSD platforms have a broken
+  stdio that causes failure for file uploads larger than 112K.  Upgrade
+  your version of Ruby or continue using Unicorn 1.x/3.4.x.
+
 * For notes on sandboxing tools such as Bundler or Isolate,
   see the {Sandbox}[link:Sandbox.html] page.
 
-* Under Ruby 1.9.1, methods like Array#shuffle and Array#sample will
-  segfault if called after forking.  This is fixed in trunk (r26936) and
-  should be backported to the next 1.9.1 stable release (after p378).
-  Until then, it is advisable to call "Kernel.rand" in your after_fork
-  hook to reinitialize the random number generator.
-
-  See http://redmine.ruby-lang.org/issues/show/2962 for more details
+* nginx with "sendfile on" under FreeBSD 8 is broken when
+  uploads are buffered to disk.  Disabling sendfile is required to
+  work around this bug which should be fixed in newer versions of FreeBSD.
 
 * When using "preload_app true", with apps using background threads
   need to restart them in the after_fork hook because threads are never
@@ -22,6 +34,15 @@ acceptable solution.  Those issues are documented here.
   deadlocks.  The core Ruby Logger class needlessly uses a MonitorMutex
   which can be disabled with a {monkey patch}[link:examples/logger_mp_safe.rb]
 
+== Known Issues (Old)
+
+* Under Ruby 1.9.1, methods like Array#shuffle and Array#sample will
+  segfault if called after forking.  Upgrade to Ruby 1.9.2 or call
+  "Kernel.rand" in your after_fork hook to reinitialize the random
+  number generator.
+
+  See http://redmine.ruby-lang.org/issues/show/2962 for more details
+
 * Rails 2.3.2 bundles its own version of Rack.  This may cause subtle
   bugs when simultaneously loaded with the system-wide Rack Rubygem
   which Unicorn depends on.  Upgrading to Rails 2.3.4 (or later) is
diff --git a/README b/README
index b4bbae2..4cd5d3e 100644
--- a/README
+++ b/README
@@ -60,11 +60,11 @@ both the the request and response in between \Unicorn and slow clients.
 == License
 
 \Unicorn is copyright 2009 by all contributors (see logs in git).
-It is based on Mongrel and carries the same license.
+It is based on Mongrel 1.1.5 and carries the same license.
 
 Mongrel is copyright 2007 Zed A. Shaw and contributors. It is licensed
-under the Ruby license and the GPL2. See the included LICENSE file for
-details.
+under the Ruby (1.8) license and the GPL2. See the included LICENSE file
+for details.
 
 \Unicorn is 100% Free Software.
 
@@ -78,20 +78,20 @@ and run setup.rb after unpacking it:
 
 http://rubyforge.org/frs/?group_id=1306
 
-You may also install it via RubyGems on Gemcutter:
+You may also install it via RubyGems on RubyGems.org:
 
   gem install unicorn
 
 You can get the latest source via git from the following locations
 (these versions may not be stable):
 
-  git://git.bogomips.org/unicorn.git
+  git://bogomips.org/unicorn.git
   git://repo.or.cz/unicorn.git (mirror)
 
 You may browse the code from the web and download the latest snapshot
 tarballs here:
 
-* http://git.bogomips.org/cgit/unicorn.git (cgit)
+* http://bogomips.org/unicorn.git (cgit)
 * http://repo.or.cz/w/unicorn.git (gitweb)
 
 See the HACKING guide on how to contribute and build prerelease gems
diff --git a/Rakefile b/Rakefile
index 15a0f61..ffdf982 100644
--- a/Rakefile
+++ b/Rakefile
@@ -1,111 +1,9 @@
 # -*- encoding: binary -*-
 autoload :Gem, 'rubygems'
+require 'wrongdoc'
 
-# most tasks are in the GNUmakefile which offers better parallelism
-
-def old_summaries
-  @old_summaries ||= File.readlines(".CHANGELOG.old").inject({}) do |hash, line|
-    version, summary = line.split(/ - /, 2)
-    hash[version] = summary
-    hash
-  end
-end
-
-def tags
-  timefmt = '%Y-%m-%dT%H:%M:%SZ'
-  @tags ||= `git tag -l`.split(/\n/).map do |tag|
-    next if tag == "v0.0.0"
-    if %r{\Av[\d\.]+} =~ tag
-      header, subject, body = `git cat-file tag #{tag}`.split(/\n\n/, 3)
-      header = header.split(/\n/)
-      tagger = header.grep(/\Atagger /).first
-      body ||= "initial"
-      {
-        :time => Time.at(tagger.split(/ /)[-2].to_i).utc.strftime(timefmt),
-        :tagger_name => %r{^tagger ([^<]+)}.match(tagger)[1].strip,
-        :tagger_email => %r{<([^>]+)>}.match(tagger)[1].strip,
-        :id => `git rev-parse refs/tags/#{tag}`.chomp!,
-        :tag => tag,
-        :subject => subject,
-        :body => (old = old_summaries[tag]) ? "#{old}\n#{body}" : body,
-      }
-    end
-  end.compact.sort { |a,b| b[:time] <=> a[:time] }
-end
-
-cgit_url = "http://git.bogomips.org/cgit/unicorn.git"
-git_url = ENV['GIT_URL'] || 'git://git.bogomips.org/unicorn.git'
-
-desc 'prints news as an Atom feed'
-task :news_atom do
-  require 'nokogiri'
-  new_tags = tags[0,10]
-  puts(Nokogiri::XML::Builder.new do
-    feed :xmlns => "http://www.w3.org/2005/Atom" do
-      id! "http://unicorn.bogomips.org/NEWS.atom.xml"
-      title "Unicorn news"
-      subtitle "Rack HTTP server for Unix and fast clients"
-      link! :rel => 'alternate', :type => 'text/html',
-            :href => 'http://unicorn.bogomips.org/NEWS.html'
-      updated new_tags.first[:time]
-      new_tags.each do |tag|
-        entry do
-          title tag[:subject]
-          updated tag[:time]
-          published tag[:time]
-          author {
-            name tag[:tagger_name]
-            email tag[:tagger_email]
-          }
-          url = "#{cgit_url}/tag/?id=#{tag[:tag]}"
-          link! :rel => "alternate", :type => "text/html", :href =>url
-          id! url
-          message_only = tag[:body].split(/\n.+\(\d+\):\n {6}/s).first.strip
-          content({:type =>:text}, message_only)
-          content(:type =>:xhtml) { pre tag[:body] }
-        end
-      end
-    end
-  end.to_xml)
-end
-
-desc 'prints RDoc-formatted news'
-task :news_rdoc do
-  tags.each do |tag|
-    time = tag[:time].tr!('T', ' ').gsub!(/:\d\dZ/, ' UTC')
-    puts "=== #{tag[:tag].sub(/^v/, '')} / #{time}"
-    puts ""
-
-    body = tag[:body]
-    puts tag[:body].gsub(/^/sm, "  ").gsub(/[ \t]+$/sm, "")
-    puts ""
-  end
-end
-
-desc "print release changelog for Rubyforge"
-task :release_changes do
-  version = ENV['VERSION'] or abort "VERSION= needed"
-  version = "v#{version}"
-  vtags = tags.map { |tag| tag[:tag] =~ /\Av/ and tag[:tag] }.sort
-  prev = vtags[vtags.index(version) - 1]
-  system('git', 'diff', '--stat', prev, version) or abort $?
-  puts ""
-  system('git', 'log', "#{prev}..#{version}") or abort $?
-end
-
-desc "print release notes for Rubyforge"
-task :release_notes do
-  spec = Gem::Specification.load('unicorn.gemspec')
-  puts spec.description.strip
-  puts ""
-  puts "* #{spec.homepage}"
-  puts "* #{spec.email}"
-  puts "* #{git_url}"
-
-  _, _, body = `git cat-file tag v#{spec.version}`.split(/\n\n/, 3)
-  print "\nChanges:\n\n"
-  puts body
-end
+cgit_url = Wrongdoc.config[:cgit_url]
+git_url = Wrongdoc.config[:git_url]
 
 desc "post to RAA"
 task :raa_update do
@@ -154,20 +52,24 @@ task :fm_update do
   uri = URI.parse('http://freshmeat.net/projects/unicorn/releases.json')
   rc = Net::Netrc.locate('unicorn-fm') or abort "~/.netrc not found"
   api_token = rc.password
-  changelog = tags.find { |t| t[:tag] == "v#{version}" }[:body]
+  _, subject, body = `git cat-file tag v#{version}`.split(/\n\n/, 3)
   tmp = Tempfile.new('fm-changelog')
-  tmp.syswrite(changelog)
+  tmp.puts subject
+  tmp.puts
+  tmp.puts body
+  tmp.flush
   system(ENV["VISUAL"], tmp.path) or abort "#{ENV["VISUAL"]} failed: #$?"
   changelog = File.read(tmp.path).strip
 
   req = {
     "auth_code" => api_token,
     "release" => {
-      "tag_list" => "Stable",
+      "tag_list" => "Experimental",
       "version" => version,
       "changelog" => changelog,
     },
   }.to_json
+
   if ! changelog.strip.empty? && version =~ %r{\A[\d\.]+\d+\z}
     Net::HTTP.start(uri.host, uri.port) do |http|
       p http.post(uri.path, req, {'Content-Type'=>'application/json'})
@@ -193,29 +95,3 @@ begin
   end
 rescue LoadError
 end
-
-task :isolate do
-  require 'isolate'
-  ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
-  opts = {
-    :system => false,
-    :path => "tmp/isolate/#{ruby_engine}-#{RUBY_VERSION}",
-    :multiruby => false, # we want "1.8.7" instead of "1.8"
-  }
-  fp = File.open(__FILE__, "rb")
-  fp.flock(File::LOCK_EX)
-
-  # C extensions aren't binary-compatible across Ruby versions
-  pid = fork { Isolate.now!(opts) { gem 'sqlite3-ruby', '1.2.5' } }
-  _, status = Process.waitpid2(pid)
-  status.success? or abort status.inspect
-
-  # pure Ruby gems can be shared across all Rubies
-  %w(3.0.0).each do |rails_ver|
-    opts[:path] = "tmp/isolate/rails-#{rails_ver}"
-    pid = fork { Isolate.now!(opts) { gem 'rails', rails_ver } }
-    _, status = Process.waitpid2(pid)
-    status.success? or abort status.inspect
-  end
-  fp.flock(File::LOCK_UN)
-end
diff --git a/TODO b/TODO
index 166a2a0..a20e6c4 100644
--- a/TODO
+++ b/TODO
@@ -1,5 +1,7 @@
 * Documentation improvements
 
-* performance validation (esp. TeeInput)
-
 * improve test suite
+
+* scalability to >= 1024 worker processes for crazy NUMA systems
+
+* Rack 2.x support (when Rack 2.x exists)
diff --git a/TUNING b/TUNING
index 9a54a01..ca291ad 100644
--- a/TUNING
+++ b/TUNING
@@ -1,12 +1,34 @@
-= Tuning Unicorn
+= Tuning \Unicorn
 
-Unicorn performance is generally as good as a (mostly) Ruby web server
+\Unicorn performance is generally as good as a (mostly) Ruby web server
 can provide.  Most often the performance bottleneck is in the web
 application running on Unicorn rather than Unicorn itself.
 
-== Unicorn Configuration
+== \Unicorn Configuration
 
 See Unicorn::Configurator for details on the config file format.
++worker_processes+ is the most-commonly needed tuning parameter.
+
+=== Unicorn::Configurator#worker_processes
+
+* worker_processes should be scaled to the number of processes your
+  backend system(s) can support.  DO NOT scale it to the number of
+  external network clients your application expects to be serving.
+  \Unicorn is NOT for serving slow clients, that is the job of nginx.
+
+* worker_processes should be *at* *least* the number of CPU cores on
+  a dedicated server.  If your application has occasionally slow
+  responses that are /not/ CPU-intensive, you may increase this to
+  workaround those inefficiencies.
+
+* worker_processes may be increased for Unicorn::OobGC users to provide
+  more consistent response times.
+
+* Never, ever, increase worker_processes to the point where the system
+  runs out of physical memory and hits swap.  Production servers should
+  never see heavy swap activity.
+
+=== Unicorn::Configurator#listen Options
 
 * Setting a very low value for the :backlog parameter in "listen"
   directives can allow failover to happen more quickly if your
@@ -30,6 +52,11 @@ See Unicorn::Configurator for details on the config file format.
   and may also thrash CPU caches, cancelling out performance gains
   one would normally expect.
 
+* UNIX domain sockets are slighly faster than TCP sockets, but only
+  work if nginx is on the same machine.
+
+== Other \Unicorn settings
+
 * Setting "preload_app true" can allow copy-on-write-friendly GC to
   be used to save memory.  It will probably not work out of the box with
   applications that open sockets or perform random I/O on files.
@@ -40,12 +67,7 @@ See Unicorn::Configurator for details on the config file format.
 * On POSIX-compliant filesystems, it is safe for multiple threads or
   processes to append to one log file as long as all the processes are
   have them unbuffered (File#sync = true) or they are
-  record(line)-buffered in userspace.
-
-* worker_processes should be scaled to the number of processes your
-  backend system(s) can support.  DO NOT scale it to the number of
-  external network clients your application expects to be serving.
-  Unicorn is NOT for serving slow clients, that is the job of nginx.
+  record(line)-buffered in userspace before any writes.
 
 == Kernel Parameters (Linux sysctl)
 
diff --git a/bin/unicorn b/bin/unicorn
index 8d984bd..f8c20dc 100755
--- a/bin/unicorn
+++ b/bin/unicorn
@@ -4,16 +4,13 @@ require 'unicorn/launcher'
 require 'optparse'
 
 ENV["RACK_ENV"] ||= "development"
-daemonize = false
-options = { :listeners => [] }
-host, port = Unicorn::Const::DEFAULT_HOST, Unicorn::Const::DEFAULT_PORT
-set_listener = false
+rackup_opts = Unicorn::Configurator::RACKUP
+options = rackup_opts[:options]
 
-opts = OptionParser.new("", 24, '  ') do |opts|
+op = OptionParser.new("", 24, '  ') do |opts|
   cmd = File.basename($0)
   opts.banner = "Usage: #{cmd} " \
                 "[ruby options] [#{cmd} options] [rackup config file]"
-
   opts.separator "Ruby options:"
 
   lineno = 1
@@ -46,23 +43,23 @@ opts = OptionParser.new("", 24, '  ') do |opts|
 
   opts.on("-o", "--host HOST",
           "listen on HOST (default: #{Unicorn::Const::DEFAULT_HOST})") do |h|
-    host = h
-    set_listener = true
+    rackup_opts[:host] = h
+    rackup_opts[:set_listener] = true
   end
 
   opts.on("-p", "--port PORT",
           "use PORT (default: #{Unicorn::Const::DEFAULT_PORT})") do |p|
-    port = p.to_i
-    set_listener = true
+    rackup_opts[:port] = p.to_i
+    rackup_opts[:set_listener] = true
   end
 
-  opts.on("-E", "--env ENVIRONMENT",
-          "use ENVIRONMENT for defaults (default: development)") do |e|
+  opts.on("-E", "--env RACK_ENV",
+          "use RACK_ENV for defaults (default: development)") do |e|
     ENV["RACK_ENV"] = e
   end
 
   opts.on("-D", "--daemonize", "run daemonized in the background") do |d|
-    daemonize = d ? true : false
+    rackup_opts[:daemonize] = !!d
   end
 
   opts.on("-P", "--pid FILE", "DEPRECATED") do |f|
@@ -108,17 +105,16 @@ opts = OptionParser.new("", 24, '  ') do |opts|
   opts.parse! ARGV
 end
 
-app = Unicorn.builder(ARGV[0] || 'config.ru', opts)
-options[:listeners] << "#{host}:#{port}" if set_listener
+app = Unicorn.builder(ARGV[0] || 'config.ru', op)
 
 if $DEBUG
   require 'pp'
   pp({
     :unicorn_options => options,
     :app => app,
-    :daemonize => daemonize,
+    :daemonize => rackup_opts[:daemonize],
   })
 end
 
-Unicorn::Launcher.daemonize!(options) if daemonize
+Unicorn::Launcher.daemonize!(options) if rackup_opts[:daemonize]
 Unicorn.run(app, options)
diff --git a/bin/unicorn_rails b/bin/unicorn_rails
index 0b2d92f..58c232b 100755
--- a/bin/unicorn_rails
+++ b/bin/unicorn_rails
@@ -4,13 +4,11 @@ require 'unicorn/launcher'
 require 'optparse'
 require 'fileutils'
 
-daemonize = false
-options = { :listeners => [] }
-host, port = Unicorn::Const::DEFAULT_HOST, Unicorn::Const::DEFAULT_PORT
-set_listener = false
 ENV['RAILS_ENV'] ||= "development"
+rackup_opts = Unicorn::Configurator::RACKUP
+options = rackup_opts[:options]
 
-opts = OptionParser.new("", 24, '  ') do |opts|
+op = OptionParser.new("", 24, '  ') do |opts|
   cmd = File.basename($0)
   opts.banner = "Usage: #{cmd} " \
                 "[ruby options] [#{cmd} options] [rackup config file]"
@@ -46,13 +44,14 @@ opts = OptionParser.new("", 24, '  ') do |opts|
 
   opts.on("-o", "--host HOST",
           "listen on HOST (default: #{Unicorn::Const::DEFAULT_HOST})") do |h|
-    host = h
-    set_listener = true
+    rackup_opts[:host] = h
+    rackup_opts[:set_listener] = true
   end
 
-  opts.on("-p", "--port PORT", "use PORT (default: #{port})") do |p|
-    port = p.to_i
-    set_listener = true
+  opts.on("-p", "--port PORT",
+          "use PORT (default: #{Unicorn::Const::DEFAULT_PORT})") do |p|
+    rackup_opts[:port] = p.to_i
+    rackup_opts[:set_listener] = true
   end
 
   opts.on("-E", "--env RAILS_ENV",
@@ -61,7 +60,7 @@ opts = OptionParser.new("", 24, '  ') do |opts|
   end
 
   opts.on("-D", "--daemonize", "run daemonized in the background") do |d|
-    daemonize = d ? true : false
+    rackup_opts[:daemonize] = !!d
   end
 
   # Unicorn-specific stuff
@@ -125,11 +124,11 @@ def rails_dispatcher
   result || abort("Unable to locate the application dispatcher class")
 end
 
-def rails_builder(ru, opts, daemonize)
-  return Unicorn.builder(ru, opts) if ru
+def rails_builder(ru, op, daemonize)
+  return Unicorn.builder(ru, op) if ru
 
   # allow Configurator to parse cli switches embedded in the ru file
-  Unicorn::Configurator::RACKUP.update(:file => :rails, :optparse => opts)
+  Unicorn::Configurator::RACKUP.update(:file => :rails, :optparse => op)
 
   # this lambda won't run until after forking if preload_app is false
   # this runs after config file reloading
@@ -137,7 +136,7 @@ def rails_builder(ru, opts, daemonize)
     # Rails 3 includes a config.ru, use it if we find it after
     # working_directory is bound.
     ::File.exist?('config.ru') and
-      return Unicorn.builder('config.ru', opts).call
+      return Unicorn.builder('config.ru', op).call
 
     # Load Rails and (possibly) the private version of Rack it bundles.
     begin
@@ -186,15 +185,14 @@ def rails_builder(ru, opts, daemonize)
   end
 end
 
-app = rails_builder(ARGV[0], opts, daemonize)
-options[:listeners] << "#{host}:#{port}" if set_listener
+app = rails_builder(ARGV[0], op, rackup_opts[:daemonize])
 
 if $DEBUG
   require 'pp'
   pp({
     :unicorn_options => options,
     :app => app,
-    :daemonize => daemonize,
+    :daemonize => rackup_opts[:daemonize],
   })
 end
 
@@ -203,7 +201,7 @@ options[:after_reload] = lambda do
   FileUtils.mkdir_p(%w(cache pids sessions sockets).map! { |d| "tmp/#{d}" })
 end
 
-if daemonize
+if rackup_opts[:daemonize]
   options[:pid] = "tmp/pids/unicorn.pid"
   Unicorn::Launcher.daemonize!(options)
 end
diff --git a/examples/logrotate.conf b/examples/logrotate.conf
new file mode 100644
index 0000000..03fefc6
--- /dev/null
+++ b/examples/logrotate.conf
@@ -0,0 +1,29 @@
+# example logrotate config file, I usually keep this in
+# /etc/logrotate.d/unicorn_app on my Debian systems
+#
+# See the logrotate(8) manpage for more information:
+#    http://linux.die.net/man/8/logrotate
+
+# Modify the following glob to match the logfiles your app writes to:
+/var/log/unicorn_app/*.log {
+        # this first block is mostly just personal preference, though
+        # I wish logrotate offered an "hourly" option...
+        daily
+        missingok
+        rotate 180
+        compress # must use with delaycompress below
+        dateext
+
+        # this is important if using "compress" since we need to call
+        # the "lastaction" script below before compressing:
+        delaycompress
+
+        # note the lack of the evil "copytruncate" option in this
+        # config.  Unicorn supports the USR1 signal and we send it
+        # as our "lastaction" action:
+        lastaction
+                # assuming your pid file is in /var/run/unicorn_app/pid
+                pid=/var/run/unicorn_app/pid
+                test -s $pid && kill -USR1 "$(cat $pid)"
+        endscript
+}
diff --git a/examples/nginx.conf b/examples/nginx.conf
index d42ade8..9f245c8 100644
--- a/examples/nginx.conf
+++ b/examples/nginx.conf
@@ -83,9 +83,9 @@ http {
   }
 
   server {
+    # enable one of the following if you're on Linux or FreeBSD
     # listen 80 default deferred; # for Linux
     # listen 80 default accept_filter=httpready; # for FreeBSD
-    listen 80 default;
 
     client_max_body_size 4G;
     server_name _;
@@ -98,7 +98,16 @@ http {
     # path for static files
     root /path/to/app/current/public;
 
-    location / {
+    # Prefer to serve static files directly from nginx to avoid unnecessary
+    # data copies from the application server.
+    #
+    # try_files directive appeared in in nginx 0.7.27 and has stabilized
+    # over time.  Older versions of nginx (e.g. 0.6.x) requires
+    # "if (!-f $request_filename)" which was less efficient:
+    # http://bogomips.org/unicorn.git/tree/examples/nginx.conf?id=v3.3.1#n127
+    try_files $uri/index.html $uri.html $uri @app;
+
+    location @app {
       # an HTTP header important enough to have its own Wikipedia entry:
       #   http://en.wikipedia.org/wiki/X-Forwarded-For
       proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -116,18 +125,18 @@ http {
       proxy_redirect off;
 
       # set "proxy_buffering off" *only* for Rainbows! when doing
-      # Comet/long-poll stuff.  It's also safe to set if you're
-      # using only serving fast clients with Unicorn + nginx.
-      # Otherwise you _want_ nginx to buffer responses to slow
-      # clients, really.
+      # Comet/long-poll/streaming.  It's also safe to set if you're using
+      # only serving fast clients with Unicorn + nginx, but not slow
+      # clients.  You normally want nginx to buffer responses to slow
+      # clients, even with Rails 3.1 streaming because otherwise a slow
+      # client can become a bottleneck of Unicorn.
+      #
+      # The Rack application may also set "X-Accel-Buffering (yes|no)"
+      # in the response headers do disable/enable buffering on a
+      # per-response basis.
       # proxy_buffering off;
 
-      # Try to serve static files from nginx, no point in making an
-      # *application* server like Unicorn/Rainbows! serve static files.
-      if (!-f $request_filename) {
-        proxy_pass http://app_server;
-        break;
-      }
+      proxy_pass http://app_server;
     }
 
     # Rails error pages
diff --git a/examples/unicorn.conf.rb b/examples/unicorn.conf.rb
index 37c3e81..61f0b4b 100644
--- a/examples/unicorn.conf.rb
+++ b/examples/unicorn.conf.rb
@@ -12,6 +12,13 @@
 # more will usually help for _short_ waits on databases/caches.
 worker_processes 4
 
+# Since Unicorn is never exposed to outside clients, it does not need to
+# run on the standard HTTP port (80), there is no reason to start Unicorn
+# as root unless it's from system init scripts.
+# If running the master process as root and the workers as an unprivileged
+# user, do this to switch euid/egid in the workers (also chowns logs):
+# user "unprivileged_user", "unprivileged_group"
+
 # Help ensure your application will always spawn in the symlinked
 # "current" directory that Capistrano sets up.
 working_directory "/path/to/app/current" # available in 0.94.0+
@@ -63,7 +70,10 @@ before_fork do |server, worker|
   #   end
   # end
   #
-  # # *optionally* throttle the master from forking too quickly by sleeping
+  # Throttle the master from forking too quickly by sleeping.  Due
+  # to the implementation of standard Unix signal handlers, this
+  # helps (but does not completely) prevent identical, repeated signals
+  # from being lost when the receiving process is busy.
   # sleep 1
 end
 
diff --git a/ext/unicorn_http/ext_help.h b/ext/unicorn_http/ext_help.h
index 3aa24a8..1f76f54 100644
--- a/ext/unicorn_http/ext_help.h
+++ b/ext/unicorn_http/ext_help.h
@@ -8,10 +8,6 @@
 #define RSTRING_LEN(s) (RSTRING(s)->len)
 #endif /* !defined(RSTRING_LEN) */
 
-#ifndef RUBINIUS
-#  define rb_str_update(x) do {} while (0)
-#endif /* !RUBINIUS */
-
 #ifndef HAVE_RB_STR_SET_LEN
 #  ifdef RUBINIUS
 #    error we should never get here with current Rubinius (1.x)
diff --git a/ext/unicorn_http/extconf.rb b/ext/unicorn_http/extconf.rb
index fabe507..7da82e7 100644
--- a/ext/unicorn_http/extconf.rb
+++ b/ext/unicorn_http/extconf.rb
@@ -1,10 +1,9 @@
 # -*- encoding: binary -*-
 require 'mkmf'
 
-dir_config("unicorn_http")
-
 have_macro("SIZEOF_OFF_T", "ruby.h") or check_sizeof("off_t", "sys/types.h")
 have_macro("SIZEOF_LONG", "ruby.h") or check_sizeof("long", "sys/types.h")
 have_func("rb_str_set_len", "ruby.h")
+have_func("gmtime_r", "time.h")
 
 create_makefile("unicorn_http")
diff --git a/ext/unicorn_http/global_variables.h b/ext/unicorn_http/global_variables.h
index 7319bcd..cdbc42d 100644
--- a/ext/unicorn_http/global_variables.h
+++ b/ext/unicorn_http/global_variables.h
@@ -15,6 +15,7 @@ static VALUE g_server_port;
 static VALUE g_server_protocol;
 static VALUE g_http_host;
 static VALUE g_http_x_forwarded_proto;
+static VALUE g_http_x_forwarded_ssl;
 static VALUE g_http_transfer_encoding;
 static VALUE g_content_length;
 static VALUE g_http_trailer;
@@ -23,11 +24,10 @@ static VALUE g_port_80;
 static VALUE g_port_443;
 static VALUE g_localhost;
 static VALUE g_http;
+static VALUE g_https;
 static VALUE g_http_09;
 static VALUE g_http_10;
 static VALUE g_http_11;
-static VALUE g_GET;
-static VALUE g_HEAD;
 
 /** Defines common length and error messages for input length validation. */
 #define DEF_MAX_LENGTH(N, length) \
@@ -35,13 +35,15 @@ static VALUE g_HEAD;
   static const char * const MAX_##N##_LENGTH_ERR = \
     "HTTP element " # N  " is longer than the " # length " allowed length."
 
+NORETURN(static void parser_error(const char *));
+
 /**
  * Validates the max length of given input and throws an HttpParserError
  * exception if over.
  */
 #define VALIDATE_MAX_LENGTH(len, N) do { \
   if (len > MAX_##N##_LENGTH) \
-    rb_raise(eHttpParserError, MAX_##N##_LENGTH_ERR); \
+    parser_error(MAX_##N##_LENGTH_ERR); \
 } while (0)
 
 /** Defines global strings in the init method. */
@@ -73,15 +75,15 @@ static void init_globals(void)
   DEF_GLOBAL(server_port, "SERVER_PORT");
   DEF_GLOBAL(server_protocol, "SERVER_PROTOCOL");
   DEF_GLOBAL(http_x_forwarded_proto, "HTTP_X_FORWARDED_PROTO");
+  DEF_GLOBAL(http_x_forwarded_ssl, "HTTP_X_FORWARDED_SSL");
   DEF_GLOBAL(port_80, "80");
   DEF_GLOBAL(port_443, "443");
   DEF_GLOBAL(localhost, "localhost");
   DEF_GLOBAL(http, "http");
+  DEF_GLOBAL(https, "https");
   DEF_GLOBAL(http_11, "HTTP/1.1");
   DEF_GLOBAL(http_10, "HTTP/1.0");
   DEF_GLOBAL(http_09, "HTTP/0.9");
-  DEF_GLOBAL(GET, "GET");
-  DEF_GLOBAL(HEAD, "HEAD");
 }
 
 #undef DEF_GLOBAL
diff --git a/ext/unicorn_http/httpdate.c b/ext/unicorn_http/httpdate.c
new file mode 100644
index 0000000..bfa11ca
--- /dev/null
+++ b/ext/unicorn_http/httpdate.c
@@ -0,0 +1,82 @@
+#include <ruby.h>
+#include <time.h>
+#include <stdio.h>
+
+static const size_t buf_capa = sizeof("Thu, 01 Jan 1970 00:00:00 GMT");
+static VALUE buf;
+static char *buf_ptr;
+static const char *const week[] = {
+        "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
+};
+static const char *const months[] = {
+        "Jan", "Feb", "Mar", "Apr", "May", "Jun",
+        "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
+};
+
+/* for people on wonky systems only */
+#ifndef HAVE_GMTIME_R
+static struct tm * my_gmtime_r(time_t *now, struct tm *tm)
+{
+        struct tm *global = gmtime(now);
+        if (global)
+                *tm = *global;
+        return tm;
+}
+#  define gmtime_r my_gmtime_r
+#endif
+
+
+/*
+ * Returns a string which represents the time as rfc1123-date of HTTP-date
+ * defined by RFC 2616:
+ *
+ *   day-of-week, DD month-name CCYY hh:mm:ss GMT
+ *
+ * Note that the result is always GMT.
+ *
+ * This method is identical to Time#httpdate in the Ruby standard library,
+ * except it is implemented in C for performance.  We always saw
+ * Time#httpdate at or near the top of the profiler output so we
+ * decided to rewrite this in C.
+ *
+ * Caveats: it relies on a Ruby implementation with the global VM lock,
+ * a thread-safe version will be provided when a Unix-only, GVL-free Ruby
+ * implementation becomes viable.
+ */
+static VALUE httpdate(VALUE self)
+{
+        static time_t last;
+        time_t now = time(NULL); /* not a syscall on modern 64-bit systems */
+        struct tm tm;
+
+        if (last == now)
+                return buf;
+        last = now;
+        gmtime_r(&now, &tm);
+
+        /* we can make this thread-safe later if our Ruby loses the GVL */
+        snprintf(buf_ptr, buf_capa,
+                 "%s, %02d %s %4d %02d:%02d:%02d GMT",
+                 week[tm.tm_wday],
+                 tm.tm_mday,
+                 months[tm.tm_mon],
+                 tm.tm_year + 1900,
+                 tm.tm_hour,
+                 tm.tm_min,
+                 tm.tm_sec);
+
+        return buf;
+}
+
+void init_unicorn_httpdate(void)
+{
+        VALUE mod = rb_const_get(rb_cObject, rb_intern("Unicorn"));
+        mod = rb_define_module_under(mod, "HttpResponse");
+
+        buf = rb_str_new(0, buf_capa - 1);
+        rb_global_variable(&buf);
+        buf_ptr = RSTRING_PTR(buf);
+        httpdate(Qnil);
+
+        rb_define_method(mod, "httpdate", httpdate, 0);
+}
diff --git a/ext/unicorn_http/unicorn_http.rl b/ext/unicorn_http/unicorn_http.rl
index 1ad2a5d..971cdc1 100644
--- a/ext/unicorn_http/unicorn_http.rl
+++ b/ext/unicorn_http/unicorn_http.rl
@@ -12,23 +12,81 @@
 #include "global_variables.h"
 #include "c_util.h"
 
+void init_unicorn_httpdate(void);
+
 #define UH_FL_CHUNKED  0x1
 #define UH_FL_HASBODY  0x2
 #define UH_FL_INBODY   0x4
 #define UH_FL_HASTRAILER 0x8
 #define UH_FL_INTRAILER 0x10
 #define UH_FL_INCHUNK  0x20
-#define UH_FL_KAMETHOD 0x40
+#define UH_FL_REQEOF 0x40
 #define UH_FL_KAVERSION 0x80
 #define UH_FL_HASHEADER 0x100
+#define UH_FL_TO_CLEAR 0x200
+
+/* all of these flags need to be set for keepalive to be supported */
+#define UH_FL_KEEPALIVE (UH_FL_KAVERSION | UH_FL_REQEOF | UH_FL_HASHEADER)
+
+/*
+ * whether or not to trust X-Forwarded-Proto and X-Forwarded-SSL when
+ * setting rack.url_scheme
+ */
+static VALUE trust_x_forward = Qtrue;
+
+static unsigned long keepalive_requests = 100; /* same as nginx */
+
+/*
+ * Returns the maximum number of keepalive requests a client may make
+ * before the parser refuses to continue.
+ */
+static VALUE ka_req(VALUE self)
+{
+  return ULONG2NUM(keepalive_requests);
+}
 
-/* both of these flags need to be set for keepalive to be supported */
-#define UH_FL_KEEPALIVE (UH_FL_KAMETHOD | UH_FL_KAVERSION)
+/*
+ * Sets the maximum number of keepalive requests a client may make.
+ * A special value of +nil+ causes this to be the maximum value
+ * possible (this is architecture-dependent).
+ */
+static VALUE set_ka_req(VALUE self, VALUE val)
+{
+  keepalive_requests = NIL_P(val) ? ULONG_MAX : NUM2ULONG(val);
+
+  return ka_req(self);
+}
+
+/*
+ * Sets whether or not the parser will trust X-Forwarded-Proto and
+ * X-Forwarded-SSL headers and set "rack.url_scheme" to "https" accordingly.
+ * Rainbows!/Zbatery installations facing untrusted clients directly
+ * should set this to +false+
+ */
+static VALUE set_xftrust(VALUE self, VALUE val)
+{
+  if (Qtrue == val || Qfalse == val)
+    trust_x_forward = val;
+  else
+    rb_raise(rb_eTypeError, "must be true or false");
+
+  return val;
+}
+
+/*
+ * returns whether or not the parser will trust X-Forwarded-Proto and
+ * X-Forwarded-SSL headers and set "rack.url_scheme" to "https" accordingly
+ */
+static VALUE xftrust(VALUE self)
+{
+  return trust_x_forward;
+}
 
 /* keep this small for Rainbows! since every client has one */
 struct http_parser {
   int cs; /* Ragel internal state */
   unsigned int flags;
+  unsigned long nr_requests;
   size_t mark;
   size_t offset;
   union { /* these 2 fields don't nest */
@@ -39,6 +97,8 @@ struct http_parser {
     size_t field_len; /* only used during header processing */
     size_t dest_offset; /* only used during body processing */
   } s;
+  VALUE buf;
+  VALUE env;
   VALUE cont; /* Qfalse: unset, Qnil: ignored header, T_STRING: append */
   union {
     off_t content;
@@ -46,7 +106,18 @@ struct http_parser {
   } len;
 };
 
-static void finalize_header(struct http_parser *hp, VALUE req);
+static ID id_clear;
+
+static void finalize_header(struct http_parser *hp);
+
+static void parser_error(const char *msg)
+{
+  VALUE exc = rb_exc_new2(eHttpParserError, msg);
+  VALUE bt = rb_ary_new();
+
+  rb_funcall(exc, rb_intern("set_backtrace"), 1, bt);
+  rb_exc_raise(exc);
+}
 
 #define REMAINING (unsigned long)(pe - p)
 #define LEN(AT, FPC) (FPC - buffer - hp->AT)
@@ -66,51 +137,34 @@ static void finalize_header(struct http_parser *hp, VALUE req);
  */
 static void hp_keepalive_connection(struct http_parser *hp, VALUE val)
 {
-  /* REQUEST_METHOD is always set before any headers */
-  if (HP_FL_TEST(hp, KAMETHOD)) {
-    if (STR_CSTR_CASE_EQ(val, "keep-alive")) {
-      /* basically have HTTP/1.0 masquerade as HTTP/1.1+ */
-      HP_FL_SET(hp, KAVERSION);
-    } else if (STR_CSTR_CASE_EQ(val, "close")) {
-      /*
-       * it doesn't matter what HTTP version or request method we have,
-       * if a client says "Connection: close", we disable keepalive
-       */
-      HP_FL_UNSET(hp, KEEPALIVE);
-    } else {
-      /*
-       * client could've sent anything, ignore it for now.  Maybe
-       * "HP_FL_UNSET(hp, KEEPALIVE);" just in case?
-       * Raising an exception might be too mean...
-       */
-    }
+  if (STR_CSTR_CASE_EQ(val, "keep-alive")) {
+    /* basically have HTTP/1.0 masquerade as HTTP/1.1+ */
+    HP_FL_SET(hp, KAVERSION);
+  } else if (STR_CSTR_CASE_EQ(val, "close")) {
+    /*
+     * it doesn't matter what HTTP version or request method we have,
+     * if a client says "Connection: close", we disable keepalive
+     */
+    HP_FL_UNSET(hp, KAVERSION);
+  } else {
+    /*
+     * client could've sent anything, ignore it for now.  Maybe
+     * "HP_FL_UNSET(hp, KAVERSION);" just in case?
+     * Raising an exception might be too mean...
+     */
   }
 }
 
 static void
-request_method(struct http_parser *hp, VALUE req, const char *ptr, size_t len)
+request_method(struct http_parser *hp, const char *ptr, size_t len)
 {
-  VALUE v;
+  VALUE v = rb_str_new(ptr, len);
 
-  /*
-   * we only support keepalive for GET and HEAD requests for now other
-   * methods are too rarely seen to be worth optimizing.  POST is unsafe
-   * since some clients send extra bytes after POST bodies.
-   */
-  if (CONST_MEM_EQ("GET", ptr, len)) {
-    HP_FL_SET(hp, KAMETHOD);
-    v = g_GET;
-  } else if (CONST_MEM_EQ("HEAD", ptr, len)) {
-    HP_FL_SET(hp, KAMETHOD);
-    v = g_HEAD;
-  } else {
-    v = rb_str_new(ptr, len);
-  }
-  rb_hash_aset(req, g_request_method, v);
+  rb_hash_aset(hp->env, g_request_method, v);
 }
 
 static void
-http_version(struct http_parser *hp, VALUE req, const char *ptr, size_t len)
+http_version(struct http_parser *hp, const char *ptr, size_t len)
 {
   VALUE v;
 
@@ -125,14 +179,14 @@ http_version(struct http_parser *hp, VALUE req, const char *ptr, size_t len)
   } else {
     v = rb_str_new(ptr, len);
   }
-  rb_hash_aset(req, g_server_protocol, v);
-  rb_hash_aset(req, g_http_version, v);
+  rb_hash_aset(hp->env, g_server_protocol, v);
+  rb_hash_aset(hp->env, g_http_version, v);
 }
 
 static inline void hp_invalid_if_trailer(struct http_parser *hp)
 {
   if (HP_FL_TEST(hp, INTRAILER))
-    rb_raise(eHttpParserError, "invalid Trailer");
+    parser_error("invalid Trailer");
 }
 
 static void write_cont_value(struct http_parser *hp,
@@ -141,7 +195,7 @@ static void write_cont_value(struct http_parser *hp,
   char *vptr;
 
   if (hp->cont == Qfalse)
-     rb_raise(eHttpParserError, "invalid continuation line");
+     parser_error("invalid continuation line");
   if (NIL_P(hp->cont))
      return; /* we're ignoring this header (probably Host:) */
 
@@ -163,7 +217,7 @@ static void write_cont_value(struct http_parser *hp,
   rb_str_buf_cat(hp->cont, vptr, LEN(mark, p));
 }
 
-static void write_value(VALUE req, struct http_parser *hp,
+static void write_value(struct http_parser *hp,
                         const char *buffer, const char *p)
 {
   VALUE f = find_common_field(PTR_TO(start.field), hp->s.field_len);
@@ -192,8 +246,9 @@ static void write_value(VALUE req, struct http_parser *hp,
   } else if (f == g_content_length) {
     hp->len.content = parse_length(RSTRING_PTR(v), RSTRING_LEN(v));
     if (hp->len.content < 0)
-      rb_raise(eHttpParserError, "invalid Content-Length");
-    HP_FL_SET(hp, HASBODY);
+      parser_error("invalid Content-Length");
+    if (hp->len.content != 0)
+      HP_FL_SET(hp, HASBODY);
     hp_invalid_if_trailer(hp);
   } else if (f == g_http_transfer_encoding) {
     if (STR_CSTR_CASE_EQ(v, "chunked")) {
@@ -209,9 +264,9 @@ static void write_value(VALUE req, struct http_parser *hp,
     assert_frozen(f);
   }
 
-  e = rb_hash_aref(req, f);
+  e = rb_hash_aref(hp->env, f);
   if (NIL_P(e)) {
-    hp->cont = rb_hash_aset(req, f, v);
+    hp->cont = rb_hash_aset(hp->env, f, v);
   } else if (f == g_http_host) {
     /*
      * ignored, absolute URLs in REQUEST_URI take precedence over
@@ -236,59 +291,55 @@ static void write_value(VALUE req, struct http_parser *hp,
   action downcase_char { downcase_char(deconst(fpc)); }
   action write_field { hp->s.field_len = LEN(start.field, fpc); }
   action start_value { MARK(mark, fpc); }
-  action write_value { write_value(req, hp, buffer, fpc); }
+  action write_value { write_value(hp, buffer, fpc); }
   action write_cont_value { write_cont_value(hp, buffer, fpc); }
-  action request_method {
-    request_method(hp, req, PTR_TO(mark), LEN(mark, fpc));
-  }
+  action request_method { request_method(hp, PTR_TO(mark), LEN(mark, fpc)); }
   action scheme {
-    rb_hash_aset(req, g_rack_url_scheme, STR_NEW(mark, fpc));
-  }
-  action host {
-    rb_hash_aset(req, g_http_host, STR_NEW(mark, fpc));
+    rb_hash_aset(hp->env, g_rack_url_scheme, STR_NEW(mark, fpc));
   }
+  action host { rb_hash_aset(hp->env, g_http_host, STR_NEW(mark, fpc)); }
   action request_uri {
     VALUE str;
 
     VALIDATE_MAX_LENGTH(LEN(mark, fpc), REQUEST_URI);
-    str = rb_hash_aset(req, g_request_uri, STR_NEW(mark, fpc));
+    str = rb_hash_aset(hp->env, g_request_uri, STR_NEW(mark, fpc));
     /*
      * "OPTIONS * HTTP/1.1\r\n" is a valid request, but we can't have '*'
      * in REQUEST_PATH or PATH_INFO or else Rack::Lint will complain
      */
     if (STR_CSTR_EQ(str, "*")) {
       str = rb_str_new(NULL, 0);
-      rb_hash_aset(req, g_path_info, str);
-      rb_hash_aset(req, g_request_path, str);
+      rb_hash_aset(hp->env, g_path_info, str);
+      rb_hash_aset(hp->env, g_request_path, str);
     }
   }
   action fragment {
     VALIDATE_MAX_LENGTH(LEN(mark, fpc), FRAGMENT);
-    rb_hash_aset(req, g_fragment, STR_NEW(mark, fpc));
+    rb_hash_aset(hp->env, g_fragment, STR_NEW(mark, fpc));
   }
   action start_query {MARK(start.query, fpc); }
   action query_string {
     VALIDATE_MAX_LENGTH(LEN(start.query, fpc), QUERY_STRING);
-    rb_hash_aset(req, g_query_string, STR_NEW(start.query, fpc));
+    rb_hash_aset(hp->env, g_query_string, STR_NEW(start.query, fpc));
   }
-  action http_version { http_version(hp, req, PTR_TO(mark), LEN(mark, fpc)); }
+  action http_version { http_version(hp, PTR_TO(mark), LEN(mark, fpc)); }
   action request_path {
     VALUE val;
 
     VALIDATE_MAX_LENGTH(LEN(mark, fpc), REQUEST_PATH);
-    val = rb_hash_aset(req, g_request_path, STR_NEW(mark, fpc));
+    val = rb_hash_aset(hp->env, g_request_path, STR_NEW(mark, fpc));
 
     /* rack says PATH_INFO must start with "/" or be empty */
     if (!STR_CSTR_EQ(val, "*"))
-      rb_hash_aset(req, g_path_info, val);
+      rb_hash_aset(hp->env, g_path_info, val);
   }
   action add_to_chunk_size {
     hp->len.chunk = step_incr(hp->len.chunk, fc, 16);
     if (hp->len.chunk < 0)
-      rb_raise(eHttpParserError, "invalid chunk size");
+      parser_error("invalid chunk size");
   }
   action header_done {
-    finalize_header(hp, req);
+    finalize_header(hp);
 
     cs = http_parser_first_final;
     if (HP_FL_TEST(hp, HASBODY)) {
@@ -296,6 +347,7 @@ static void write_value(VALUE req, struct http_parser *hp,
       if (HP_FL_TEST(hp, CHUNKED))
         cs = http_parser_en_ChunkedBody;
     } else {
+      HP_FL_SET(hp, REQEOF);
       assert(!HP_FL_TEST(hp, CHUNKED) && "chunked encoding without body!");
     }
     /*
@@ -321,7 +373,7 @@ static void write_value(VALUE req, struct http_parser *hp,
   action skip_chunk_data {
   skip_chunk_data_hack: {
     size_t nr = MIN((size_t)hp->len.chunk, REMAINING);
-    memcpy(RSTRING_PTR(req) + hp->s.dest_offset, fpc, nr);
+    memcpy(RSTRING_PTR(hp->cont) + hp->s.dest_offset, fpc, nr);
     hp->s.dest_offset += nr;
     hp->len.chunk -= nr;
     p += nr;
@@ -344,15 +396,20 @@ static void write_value(VALUE req, struct http_parser *hp,
 static void http_parser_init(struct http_parser *hp)
 {
   int cs = 0;
-  memset(hp, 0, sizeof(struct http_parser));
+  hp->flags = 0;
+  hp->mark = 0;
+  hp->offset = 0;
+  hp->start.field = 0;
+  hp->s.field_len = 0;
+  hp->len.content = 0;
   hp->cont = Qfalse; /* zero on MRI, should be optimized away by above */
   %% write init;
   hp->cs = cs;
 }
 
 /** exec **/
-static void http_parser_execute(struct http_parser *hp,
-  VALUE req, char *buffer, size_t len)
+static void
+http_parser_execute(struct http_parser *hp, char *buffer, size_t len)
 {
   const char *p, *pe;
   int cs = hp->cs;
@@ -392,54 +449,110 @@ static struct http_parser *data_get(VALUE self)
   return hp;
 }
 
-static void finalize_header(struct http_parser *hp, VALUE req)
+/*
+ * set rack.url_scheme to "https" or "http", no others are allowed by Rack
+ * this resembles the Rack::Request#scheme method as of rack commit
+ * 35bb5ba6746b5d346de9202c004cc926039650c7
+ */
+static void set_url_scheme(VALUE env, VALUE *server_port)
 {
-  VALUE temp = rb_hash_aref(req, g_rack_url_scheme);
-  VALUE server_name = g_localhost;
-  VALUE server_port = g_port_80;
+  VALUE scheme = rb_hash_aref(env, g_rack_url_scheme);
 
-  /* set rack.url_scheme to "https" or "http", no others are allowed by Rack */
-  if (NIL_P(temp)) {
-    temp = rb_hash_aref(req, g_http_x_forwarded_proto);
-    if (!NIL_P(temp) && STR_CSTR_EQ(temp, "https"))
-      server_port = g_port_443;
-    else
-      temp = g_http;
-    rb_hash_aset(req, g_rack_url_scheme, temp);
-  } else if (STR_CSTR_EQ(temp, "https")) {
-    server_port = g_port_443;
+  if (NIL_P(scheme)) {
+    if (trust_x_forward == Qfalse) {
+      scheme = g_http;
+    } else {
+      scheme = rb_hash_aref(env, g_http_x_forwarded_ssl);
+      if (!NIL_P(scheme) && STR_CSTR_EQ(scheme, "on")) {
+        *server_port = g_port_443;
+        scheme = g_https;
+      } else {
+        scheme = rb_hash_aref(env, g_http_x_forwarded_proto);
+        if (NIL_P(scheme)) {
+          scheme = g_http;
+        } else {
+          long len = RSTRING_LEN(scheme);
+          if (len >= 5 && !memcmp(RSTRING_PTR(scheme), "https", 5)) {
+            if (len != 5)
+              scheme = g_https;
+            *server_port = g_port_443;
+          } else {
+            scheme = g_http;
+          }
+        }
+      }
+    }
+    rb_hash_aset(env, g_rack_url_scheme, scheme);
+  } else if (STR_CSTR_EQ(scheme, "https")) {
+    *server_port = g_port_443;
   } else {
-    assert(server_port == g_port_80 && "server_port not set");
+    assert(*server_port == g_port_80 && "server_port not set");
   }
+}
+
+/*
+ * Parse and set the SERVER_NAME and SERVER_PORT variables
+ * Not supporting X-Forwarded-Host/X-Forwarded-Port in here since
+ * anybody who needs them is using an unsupported configuration and/or
+ * incompetent.  Rack::Request will handle X-Forwarded-{Port,Host} just
+ * fine.
+ */
+static void set_server_vars(VALUE env, VALUE *server_port)
+{
+  VALUE server_name = g_localhost;
+  VALUE host = rb_hash_aref(env, g_http_host);
+
+  if (!NIL_P(host)) {
+    char *host_ptr = RSTRING_PTR(host);
+    long host_len = RSTRING_LEN(host);
+    char *colon;
+
+    if (*host_ptr == '[') { /* ipv6 address format */
+      char *rbracket = memchr(host_ptr + 1, ']', host_len - 1);
+
+      if (rbracket)
+        colon = (rbracket[1] == ':') ? rbracket + 1 : NULL;
+      else
+        colon = memchr(host_ptr + 1, ':', host_len - 1);
+    } else {
+      colon = memchr(host_ptr, ':', host_len);
+    }
 
-  /* parse and set the SERVER_NAME and SERVER_PORT variables */
-  temp = rb_hash_aref(req, g_http_host);
-  if (!NIL_P(temp)) {
-    char *colon = memchr(RSTRING_PTR(temp), ':', RSTRING_LEN(temp));
     if (colon) {
-      long port_start = colon - RSTRING_PTR(temp) + 1;
+      long port_start = colon - host_ptr + 1;
 
-      server_name = rb_str_substr(temp, 0, colon - RSTRING_PTR(temp));
-      if ((RSTRING_LEN(temp) - port_start) > 0)
-        server_port = rb_str_substr(temp, port_start, RSTRING_LEN(temp));
+      server_name = rb_str_substr(host, 0, colon - host_ptr);
+      if ((host_len - port_start) > 0)
+        *server_port = rb_str_substr(host, port_start, host_len);
     } else {
-      server_name = temp;
+      server_name = host;
     }
   }
-  rb_hash_aset(req, g_server_name, server_name);
-  rb_hash_aset(req, g_server_port, server_port);
+  rb_hash_aset(env, g_server_name, server_name);
+  rb_hash_aset(env, g_server_port, *server_port);
+}
+
+static void finalize_header(struct http_parser *hp)
+{
+  VALUE server_port = g_port_80;
+
+  set_url_scheme(hp->env, &server_port);
+  set_server_vars(hp->env, &server_port);
+
   if (!HP_FL_TEST(hp, HASHEADER))
-    rb_hash_aset(req, g_server_protocol, g_http_09);
+    rb_hash_aset(hp->env, g_server_protocol, g_http_09);
 
   /* rack requires QUERY_STRING */
-  if (NIL_P(rb_hash_aref(req, g_query_string)))
-    rb_hash_aset(req, g_query_string, rb_str_new(NULL, 0));
+  if (NIL_P(rb_hash_aref(hp->env, g_query_string)))
+    rb_hash_aset(hp->env, g_query_string, rb_str_new(NULL, 0));
 }
 
 static void hp_mark(void *ptr)
 {
   struct http_parser *hp = ptr;
 
+  rb_gc_mark(hp->buf);
+  rb_gc_mark(hp->env);
   rb_gc_mark(hp->cont);
 }
 
@@ -458,7 +571,29 @@ static VALUE HttpParser_alloc(VALUE klass)
  */
 static VALUE HttpParser_init(VALUE self)
 {
-  http_parser_init(data_get(self));
+  struct http_parser *hp = data_get(self);
+
+  http_parser_init(hp);
+  hp->buf = rb_str_new(NULL, 0);
+  hp->env = rb_hash_new();
+  hp->nr_requests = keepalive_requests;
+
+  return self;
+}
+
+/**
+ * call-seq:
+ *    parser.clear => parser
+ *
+ * Resets the parser to it's initial state so that you can reuse it
+ * rather than making new ones.
+ */
+static VALUE HttpParser_clear(VALUE self)
+{
+  struct http_parser *hp = data_get(self);
+
+  http_parser_init(hp);
+  rb_funcall(hp->env, id_clear, 0);
 
   return self;
 }
@@ -469,11 +604,18 @@ static VALUE HttpParser_init(VALUE self)
  *
  * Resets the parser to it's initial state so that you can reuse it
  * rather than making new ones.
+ *
+ * This method is deprecated and to be removed in Unicorn 4.x
  */
 static VALUE HttpParser_reset(VALUE self)
 {
-  http_parser_init(data_get(self));
+  static int warned;
 
+  if (!warned) {
+    rb_warn("Unicorn::HttpParser#reset is deprecated; "
+            "use Unicorn::HttpParser#clear instead");
+  }
+  HttpParser_clear(self);
   return Qnil;
 }
 
@@ -513,48 +655,67 @@ static VALUE HttpParser_content_length(VALUE self)
 }
 
 /**
- * Document-method: trailers
+ * Document-method: parse
  * call-seq:
- *    parser.trailers(req, data) => req or nil
- *
- * This is an alias for HttpParser#headers
- */
-
-/**
- * Document-method: headers
- * call-seq:
- *    parser.headers(req, data) => req or nil
+ *    parser.parse => env or nil
  *
  * Takes a Hash and a String of data, parses the String of data filling
  * in the Hash returning the Hash if parsing is finished, nil otherwise
- * When returning the req Hash, it may modify data to point to where
+ * When returning the env Hash, it may modify data to point to where
  * body processing should begin.
  *
  * Raises HttpParserError if there are parsing errors.
  */
-static VALUE HttpParser_headers(VALUE self, VALUE req, VALUE data)
+static VALUE HttpParser_parse(VALUE self)
 {
   struct http_parser *hp = data_get(self);
+  VALUE data = hp->buf;
 
-  rb_str_update(data);
+  if (HP_FL_TEST(hp, TO_CLEAR)) {
+    http_parser_init(hp);
+    rb_funcall(hp->env, id_clear, 0);
+  }
 
-  http_parser_execute(hp, req, RSTRING_PTR(data), RSTRING_LEN(data));
+  http_parser_execute(hp, RSTRING_PTR(data), RSTRING_LEN(data));
   VALIDATE_MAX_LENGTH(hp->offset, HEADER);
 
   if (hp->cs == http_parser_first_final ||
       hp->cs == http_parser_en_ChunkedBody) {
     advance_str(data, hp->offset + 1);
     hp->offset = 0;
+    if (HP_FL_TEST(hp, INTRAILER))
+      HP_FL_SET(hp, REQEOF);
 
-    return req;
+    return hp->env;
   }
 
   if (hp->cs == http_parser_error)
-    rb_raise(eHttpParserError, "Invalid HTTP format, parsing fails.");
+    parser_error("Invalid HTTP format, parsing fails.");
 
   return Qnil;
 }
 
+/**
+ * Document-method: trailers
+ * call-seq:
+ *    parser.trailers(req, data) => req or nil
+ *
+ * This is an alias for HttpParser#headers
+ */
+
+/**
+ * Document-method: headers
+ */
+static VALUE HttpParser_headers(VALUE self, VALUE env, VALUE buf)
+{
+  struct http_parser *hp = data_get(self);
+
+  hp->env = env;
+  hp->buf = buf;
+
+  return HttpParser_parse(self);
+}
+
 static int chunked_eof(struct http_parser *hp)
 {
   return ((hp->cs == http_parser_first_final) || HP_FL_TEST(hp, INTRAILER));
@@ -597,6 +758,26 @@ static VALUE HttpParser_keepalive(VALUE self)
 
 /**
  * call-seq:
+ *    parser.next? => true or false
+ *
+ * Exactly like HttpParser#keepalive?, except it will reset the internal
+ * parser state on next parse if it returns true.  It will also respect
+ * the maximum *keepalive_requests* value and return false if that is
+ * reached.
+ */
+static VALUE HttpParser_next(VALUE self)
+{
+  struct http_parser *hp = data_get(self);
+
+  if ((HP_FL_ALL(hp, KEEPALIVE)) && (hp->nr_requests-- != 0)) {
+    HP_FL_SET(hp, TO_CLEAR);
+    return Qtrue;
+  }
+  return Qfalse;
+}
+
+/**
+ * call-seq:
  *    parser.headers? => true or false
  *
  * This should be used to detect if a request has headers (and if
@@ -610,6 +791,16 @@ static VALUE HttpParser_has_headers(VALUE self)
   return HP_FL_TEST(hp, HASHEADER) ? Qtrue : Qfalse;
 }
 
+static VALUE HttpParser_buf(VALUE self)
+{
+  return data_get(self)->buf;
+}
+
+static VALUE HttpParser_env(VALUE self)
+{
+  return data_get(self)->env;
+}
+
 /**
  * call-seq:
  *    parser.filter_body(buf, data) => nil/data
@@ -630,7 +821,6 @@ static VALUE HttpParser_filter_body(VALUE self, VALUE buf, VALUE data)
   char *dptr;
   long dlen;
 
-  rb_str_update(data);
   dptr = RSTRING_PTR(data);
   dlen = RSTRING_LEN(data);
 
@@ -641,9 +831,11 @@ static VALUE HttpParser_filter_body(VALUE self, VALUE buf, VALUE data)
   if (HP_FL_TEST(hp, CHUNKED)) {
     if (!chunked_eof(hp)) {
       hp->s.dest_offset = 0;
-      http_parser_execute(hp, buf, dptr, dlen);
+      hp->cont = buf;
+      hp->buf = data;
+      http_parser_execute(hp, dptr, dlen);
       if (hp->cs == http_parser_error)
-        rb_raise(eHttpParserError, "Invalid HTTP format, parsing fails.");
+        parser_error("Invalid HTTP format, parsing fails.");
 
       assert(hp->s.dest_offset <= hp->offset &&
              "destination buffer overflow");
@@ -662,10 +854,13 @@ static VALUE HttpParser_filter_body(VALUE self, VALUE buf, VALUE data)
     if (hp->len.content > 0) {
       long nr = MIN(dlen, hp->len.content);
 
+      hp->buf = data;
       memcpy(RSTRING_PTR(buf), dptr, nr);
       hp->len.content -= nr;
-      if (hp->len.content == 0)
+      if (hp->len.content == 0) {
+        HP_FL_SET(hp, REQEOF);
         hp->cs = http_parser_first_final;
+      }
       advance_str(data, nr);
       rb_str_set_len(buf, nr);
       data = Qnil;
@@ -691,15 +886,20 @@ void Init_unicorn_http(void)
 
   init_globals();
   rb_define_alloc_func(cHttpParser, HttpParser_alloc);
-  rb_define_method(cHttpParser, "initialize", HttpParser_init,0);
-  rb_define_method(cHttpParser, "reset", HttpParser_reset,0);
+  rb_define_method(cHttpParser, "initialize", HttpParser_init, 0);
+  rb_define_method(cHttpParser, "clear", HttpParser_clear, 0);
+  rb_define_method(cHttpParser, "reset", HttpParser_reset, 0);
+  rb_define_method(cHttpParser, "parse", HttpParser_parse, 0);
   rb_define_method(cHttpParser, "headers", HttpParser_headers, 2);
-  rb_define_method(cHttpParser, "filter_body", HttpParser_filter_body, 2);
   rb_define_method(cHttpParser, "trailers", HttpParser_headers, 2);
+  rb_define_method(cHttpParser, "filter_body", HttpParser_filter_body, 2);
   rb_define_method(cHttpParser, "content_length", HttpParser_content_length, 0);
   rb_define_method(cHttpParser, "body_eof?", HttpParser_body_eof, 0);
   rb_define_method(cHttpParser, "keepalive?", HttpParser_keepalive, 0);
   rb_define_method(cHttpParser, "headers?", HttpParser_has_headers, 0);
+  rb_define_method(cHttpParser, "next?", HttpParser_next, 0);
+  rb_define_method(cHttpParser, "buf", HttpParser_buf, 0);
+  rb_define_method(cHttpParser, "env", HttpParser_env, 0);
 
   /*
    * The maximum size a single chunk when using chunked transfer encoding.
@@ -716,11 +916,22 @@ void Init_unicorn_http(void)
    */
   rb_define_const(cHttpParser, "LENGTH_MAX", OFFT2NUM(UH_OFF_T_MAX));
 
+  /* default value for keepalive_requests */
+  rb_define_const(cHttpParser, "KEEPALIVE_REQUESTS_DEFAULT",
+                  ULONG2NUM(keepalive_requests));
+
+  rb_define_singleton_method(cHttpParser, "keepalive_requests", ka_req, 0);
+  rb_define_singleton_method(cHttpParser, "keepalive_requests=", set_ka_req, 1);
+  rb_define_singleton_method(cHttpParser, "trust_x_forwarded=", set_xftrust, 1);
+  rb_define_singleton_method(cHttpParser, "trust_x_forwarded?", xftrust, 0);
+
   init_common_fields();
   SET_GLOBAL(g_http_host, "HOST");
   SET_GLOBAL(g_http_trailer, "TRAILER");
   SET_GLOBAL(g_http_transfer_encoding, "TRANSFER_ENCODING");
   SET_GLOBAL(g_content_length, "CONTENT_LENGTH");
   SET_GLOBAL(g_http_connection, "CONNECTION");
+  id_clear = rb_intern("clear");
+  init_unicorn_httpdate();
 }
 #undef SET_GLOBAL
diff --git a/ext/unicorn_http/unicorn_http_common.rl b/ext/unicorn_http/unicorn_http_common.rl
index f165e3f..cf93fec 100644
--- a/ext/unicorn_http/unicorn_http_common.rl
+++ b/ext/unicorn_http/unicorn_http_common.rl
@@ -26,7 +26,7 @@
 
 # URI schemes and absolute paths
   scheme = ( "http"i ("s"i)? ) $downcase_char >mark %scheme;
-  hostname = (alnum | "-" | "." | "_")+;
+  hostname = ((alnum | "-" | "." | "_")+ | ("[" (":" | xdigit)+ "]"));
   host_with_port = (hostname (":" digit*)?) >mark %host;
   userinfo = ((unreserved | escape | ";" | ":" | "&" | "=" | "+")+ "@")*;
 
diff --git a/lib/unicorn.rb b/lib/unicorn.rb
index 31332c9..8a5fdcc 100644
--- a/lib/unicorn.rb
+++ b/lib/unicorn.rb
@@ -1,836 +1,96 @@
 # -*- encoding: binary -*-
-
 require 'fcntl'
 require 'etc'
 require 'stringio'
 require 'rack'
-require 'unicorn/socket_helper'
-require 'unicorn/const'
-require 'unicorn/http_request'
-require 'unicorn/configurator'
-require 'unicorn/util'
-require 'unicorn/tee_input'
-require 'unicorn/http_response'
-
-# Unicorn module containing all of the classes (include C extensions) for running
-# a Unicorn web server.  It contains a minimalist HTTP server with just enough
-# functionality to service web application requests fast as possible.
+require 'kgio'
+
+# :stopdoc:
+# Unicorn module containing all of the classes (include C extensions) for
+# running a Unicorn web server.  It contains a minimalist HTTP server with just
+# enough functionality to service web application requests fast as possible.
+# :startdoc:
+
+# \Unicorn exposes very little of an user-visible API and most of its
+# internals are subject to change.  \Unicorn is designed to host Rack
+# applications, so applications should be written against the Rack SPEC
+# and not \Unicorn internals.
 module Unicorn
 
-  # raised inside TeeInput when a client closes the socket inside the
+  # Raised inside TeeInput when a client closes the socket inside the
   # application dispatch.  This is always raised with an empty backtrace
   # since there is nothing in the application stack that is responsible
-  # for client shutdowns/disconnects.
+  # for client shutdowns/disconnects.  This exception is visible to Rack
+  # applications unless PrereadInput middleware is loaded.
   class ClientShutdown < EOFError
   end
 
-  class << self
-    def run(app, options = {})
-      HttpServer.new(app, options).start.join
-    end
-
-    # This returns a lambda to pass in as the app, this does not "build" the
-    # app (which we defer based on the outcome of "preload_app" in the
-    # Unicorn config).  The returned lambda will be called when it is
-    # time to build the app.
-    def builder(ru, opts)
-      # allow Configurator to parse cli switches embedded in the ru file
-      Unicorn::Configurator::RACKUP.update(:file => ru, :optparse => opts)
-
-      # always called after config file parsing, may be called after forking
-      lambda do ||
-        inner_app = case ru
-        when /\.ru$/
-          raw = File.read(ru)
-          raw.sub!(/^__END__\n.*/, '')
-          eval("Rack::Builder.new {(#{raw}\n)}.to_app", TOPLEVEL_BINDING, ru)
-        else
-          require ru
-          Object.const_get(File.basename(ru, '.rb').capitalize)
-        end
-
-        pp({ :inner_app => inner_app }) if $DEBUG
-
-        # return value, matches rackup defaults based on env
-        case ENV["RACK_ENV"]
-        when "development"
-          Rack::Builder.new do
-            use Rack::CommonLogger, $stderr
-            use Rack::ShowExceptions
-            use Rack::Lint
-            run inner_app
-          end.to_app
-        when "deployment"
-          Rack::Builder.new do
-            use Rack::CommonLogger, $stderr
-            run inner_app
-          end.to_app
-        else
-          inner_app
-        end
-      end
-    end
-
-    # returns an array of strings representing TCP listen socket addresses
-    # and Unix domain socket paths.  This is useful for use with
-    # Raindrops::Middleware under Linux: http://raindrops.bogomips.org/
-    def listener_names
-      HttpServer::LISTENERS.map { |io| SocketHelper.sock_name(io) }
-    end
+  # :stopdoc:
+  def self.run(app, options = {})
+    Unicorn::HttpServer.new(app, options).start.join
   end
 
-  # This is the process manager of Unicorn. This manages worker
-  # processes which in turn handle the I/O and application process.
-  # Listener sockets are started in the master process and shared with
-  # forked worker children.
-
-  class HttpServer < Struct.new(:app, :timeout, :worker_processes,
-                                :before_fork, :after_fork, :before_exec,
-                                :logger, :pid, :listener_opts, :preload_app,
-                                :reexec_pid, :orig_app, :init_listeners,
-                                :master_pid, :config, :ready_pipe, :user)
-    include ::Unicorn::SocketHelper
-
-    # prevents IO objects in here from being GC-ed
-    IO_PURGATORY = []
-
-    # all bound listener sockets
-    LISTENERS = []
-
-    # This hash maps PIDs to Workers
-    WORKERS = {}
-
-    # We use SELF_PIPE differently in the master and worker processes:
-    #
-    # * The master process never closes or reinitializes this once
-    # initialized.  Signal handlers in the master process will write to
-    # it to wake up the master from IO.select in exactly the same manner
-    # djb describes in http://cr.yp.to/docs/selfpipe.html
-    #
-    # * The workers immediately close the pipe they inherit from the
-    # master and replace it with a new pipe after forking.  This new
-    # pipe is also used to wakeup from IO.select from inside (worker)
-    # signal handlers.  However, workers *close* the pipe descriptors in
-    # the signal handlers to raise EBADF in IO.select instead of writing
-    # like we do in the master.  We cannot easily use the reader set for
-    # IO.select because LISTENERS is already that set, and it's extra
-    # work (and cycles) to distinguish the pipe FD from the reader set
-    # once IO.select returns.  So we're lazy and just close the pipe when
-    # a (rare) signal arrives in the worker and reinitialize the pipe later.
-    SELF_PIPE = []
-
-    # signal queue used for self-piping
-    SIG_QUEUE = []
-
-    # constant lookups are faster and we're single-threaded/non-reentrant
-    REQUEST = HttpRequest.new
-
-    # We populate this at startup so we can figure out how to reexecute
-    # and upgrade the currently running instance of Unicorn
-    # This Hash is considered a stable interface and changing its contents
-    # will allow you to switch between different installations of Unicorn
-    # or even different installations of the same applications without
-    # downtime.  Keys of this constant Hash are described as follows:
-    #
-    # * 0 - the path to the unicorn/unicorn_rails executable
-    # * :argv - a deep copy of the ARGV array the executable originally saw
-    # * :cwd - the working directory of the application, this is where
-    # you originally started Unicorn.
-    #
-    # To change your unicorn executable to a different path without downtime,
-    # you can set the following in your Unicorn config file, HUP and then
-    # continue with the traditional USR2 + QUIT upgrade steps:
-    #
-    #   Unicorn::HttpServer::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn"
-    START_CTX = {
-      :argv => ARGV.map { |arg| arg.dup },
-      :cwd => lambda {
-          # favor ENV['PWD'] since it is (usually) symlink aware for
-          # Capistrano and like systems
-          begin
-            a = File.stat(pwd = ENV['PWD'])
-            b = File.stat(Dir.pwd)
-            a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd
-          rescue
-            Dir.pwd
-          end
-        }.call,
-      0 => $0.dup,
-    }
-
-    # This class and its members can be considered a stable interface
-    # and will not change in a backwards-incompatible fashion between
-    # releases of Unicorn.  You may need to access it in the
-    # before_fork/after_fork hooks.  See the Unicorn::Configurator RDoc
-    # for examples.
-    class Worker < Struct.new(:nr, :tmp, :switched)
-
-      # worker objects may be compared to just plain numbers
-      def ==(other_nr)
-        self.nr == other_nr
-      end
-
-      # Changes the worker process to the specified +user+ and +group+
-      # This is only intended to be called from within the worker
-      # process from the +after_fork+ hook.  This should be called in
-      # the +after_fork+ hook after any priviledged functions need to be
-      # run (e.g. to set per-worker CPU affinity, niceness, etc)
-      #
-      # Any and all errors raised within this method will be propagated
-      # directly back to the caller (usually the +after_fork+ hook.
-      # These errors commonly include ArgumentError for specifying an
-      # invalid user/group and Errno::EPERM for insufficient priviledges
-      def user(user, group = nil)
-        # we do not protect the caller, checking Process.euid == 0 is
-        # insufficient because modern systems have fine-grained
-        # capabilities.  Let the caller handle any and all errors.
-        uid = Etc.getpwnam(user).uid
-        gid = Etc.getgrnam(group).gid if group
-        Unicorn::Util.chown_logs(uid, gid)
-        tmp.chown(uid, gid)
-        if gid && Process.egid != gid
-          Process.initgroups(user, gid)
-          Process::GID.change_privilege(gid)
-        end
-        Process.euid != uid and Process::UID.change_privilege(uid)
-        self.switched = true
-      end
-
-    end
-
-    # Creates a working server on host:port (strange things happen if
-    # port isn't a Number).  Use HttpServer::run to start the server and
-    # HttpServer.run.join to join the thread that's processing
-    # incoming requests on the socket.
-    def initialize(app, options = {})
-      self.app = app
-      self.reexec_pid = 0
-      self.ready_pipe = options.delete(:ready_pipe)
-      self.init_listeners = options[:listeners] ? options[:listeners].dup : []
-      self.config = Configurator.new(options.merge(:use_defaults => true))
-      self.listener_opts = {}
-
-      # we try inheriting listeners first, so we bind them later.
-      # we don't write the pid file until we've bound listeners in case
-      # unicorn was started twice by mistake.  Even though our #pid= method
-      # checks for stale/existing pid files, race conditions are still
-      # possible (and difficult/non-portable to avoid) and can be likely
-      # to clobber the pid if the second start was in quick succession
-      # after the first, so we rely on the listener binding to fail in
-      # that case.  Some tests (in and outside of this source tree) and
-      # monitoring tools may also rely on pid files existing before we
-      # attempt to connect to the listener(s)
-      config.commit!(self, :skip => [:listeners, :pid])
-      self.orig_app = app
-    end
-
-    # Runs the thing.  Returns self so you can run join on it
-    def start
-      BasicSocket.do_not_reverse_lookup = true
-
-      # inherit sockets from parents, they need to be plain Socket objects
-      # before they become UNIXServer or TCPServer
-      inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
-        io = Socket.for_fd(fd.to_i)
-        set_server_sockopt(io, listener_opts[sock_name(io)])
-        IO_PURGATORY << io
-        logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
-        server_cast(io)
-      end
-
-      config_listeners = config[:listeners].dup
-      LISTENERS.replace(inherited)
-
-      # we start out with generic Socket objects that get cast to either
-      # TCPServer or UNIXServer objects; but since the Socket objects
-      # share the same OS-level file descriptor as the higher-level *Server
-      # objects; we need to prevent Socket objects from being garbage-collected
-      config_listeners -= listener_names
-      if config_listeners.empty? && LISTENERS.empty?
-        config_listeners << Unicorn::Const::DEFAULT_LISTEN
-        init_listeners << Unicorn::Const::DEFAULT_LISTEN
-        START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}"
-      end
-      config_listeners.each { |addr| listen(addr) }
-      raise ArgumentError, "no listeners" if LISTENERS.empty?
-
-      # this pipe is used to wake us up from select(2) in #join when signals
-      # are trapped.  See trap_deferred.
-      init_self_pipe!
-
-      # setup signal handlers before writing pid file in case people get
-      # trigger happy and send signals as soon as the pid file exists.
-      # Note that signals don't actually get handled until the #join method
-      QUEUE_SIGS.each { |sig| trap_deferred(sig) }
-      trap(:CHLD) { |_| awaken_master }
-      self.pid = config[:pid]
-
-      self.master_pid = $$
-      build_app! if preload_app
-      maintain_worker_count
-      self
-    end
-
-    # replaces current listener set with +listeners+.  This will
-    # close the socket if it will not exist in the new listener set
-    def listeners=(listeners)
-      cur_names, dead_names = [], []
-      listener_names.each do |name|
-        if ?/ == name[0]
-          # mark unlinked sockets as dead so we can rebind them
-          (File.socket?(name) ? cur_names : dead_names) << name
-        else
-          cur_names << name
-        end
-      end
-      set_names = listener_names(listeners)
-      dead_names.concat(cur_names - set_names).uniq!
-
-      LISTENERS.delete_if do |io|
-        if dead_names.include?(sock_name(io))
-          IO_PURGATORY.delete_if do |pio|
-            pio.fileno == io.fileno && (pio.close rescue nil).nil? # true
-          end
-          (io.close rescue nil).nil? # true
-        else
-          set_server_sockopt(io, listener_opts[sock_name(io)])
-          false
-        end
-      end
-
-      (set_names - cur_names).each { |addr| listen(addr) }
-    end
-
-    def stdout_path=(path); redirect_io($stdout, path); end
-    def stderr_path=(path); redirect_io($stderr, path); end
-
-    def logger=(obj)
-      HttpRequest::DEFAULTS["rack.logger"] = super
-    end
-
-    # sets the path for the PID file of the master process
-    def pid=(path)
-      if path
-        if x = valid_pid?(path)
-          return path if pid && path == pid && x == $$
-          if x == reexec_pid && pid =~ /\.oldbin\z/
-            logger.warn("will not set pid=#{path} while reexec-ed "\
-                        "child is running PID:#{x}")
-            return
-          end
-          raise ArgumentError, "Already running on PID:#{x} " \
-                               "(or pid=#{path} is stale)"
-        end
-      end
-      unlink_pid_safe(pid) if pid
-
-      if path
-        fp = begin
-          tmp = "#{File.dirname(path)}/#{rand}.#$$"
-          File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644)
-        rescue Errno::EEXIST
-          retry
-        end
-        fp.syswrite("#$$\n")
-        File.rename(fp.path, path)
-        fp.close
-      end
-      super(path)
-    end
-
-    # add a given address to the +listeners+ set, idempotently
-    # Allows workers to add a private, per-process listener via the
-    # after_fork hook.  Very useful for debugging and testing.
-    # +:tries+ may be specified as an option for the number of times
-    # to retry, and +:delay+ may be specified as the time in seconds
-    # to delay between retries.
-    # A negative value for +:tries+ indicates the listen will be
-    # retried indefinitely, this is useful when workers belonging to
-    # different masters are spawned during a transparent upgrade.
-    def listen(address, opt = {}.merge(listener_opts[address] || {}))
-      address = config.expand_addr(address)
-      return if String === address && listener_names.include?(address)
-
-      delay = opt[:delay] || 0.5
-      tries = opt[:tries] || 5
-      begin
-        io = bind_listen(address, opt)
-        unless TCPServer === io || UNIXServer === io
-          IO_PURGATORY << io
-          io = server_cast(io)
-        end
-        logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
-        LISTENERS << io
-        io
-      rescue Errno::EADDRINUSE => err
-        logger.error "adding listener failed addr=#{address} (in use)"
-        raise err if tries == 0
-        tries -= 1
-        logger.error "retrying in #{delay} seconds " \
-                     "(#{tries < 0 ? 'infinite' : tries} tries left)"
-        sleep(delay)
-        retry
-      rescue => err
-        logger.fatal "error adding listener addr=#{address}"
-        raise err
-      end
-    end
-
-    # monitors children and receives signals forever
-    # (or until a termination signal is sent).  This handles signals
-    # one-at-a-time time and we'll happily drop signals in case somebody
-    # is signalling us too often.
-    def join
-      respawn = true
-      last_check = Time.now
-
-      proc_name 'master'
-      logger.info "master process ready" # test_exec.rb relies on this message
-      if ready_pipe
-        ready_pipe.syswrite($$.to_s)
-        ready_pipe.close rescue nil
-        self.ready_pipe = nil
-      end
-      begin
-        loop do
-          reap_all_workers
-          case SIG_QUEUE.shift
-          when nil
-            # avoid murdering workers after our master process (or the
-            # machine) comes out of suspend/hibernation
-            if (last_check + timeout) >= (last_check = Time.now)
-              murder_lazy_workers
-            else
-              # wait for workers to wakeup on suspend
-              master_sleep(timeout/2.0 + 1)
-            end
-            maintain_worker_count if respawn
-            master_sleep(1)
-          when :QUIT # graceful shutdown
-            break
-          when :TERM, :INT # immediate shutdown
-            stop(false)
-            break
-          when :USR1 # rotate logs
-            logger.info "master reopening logs..."
-            Unicorn::Util.reopen_logs
-            logger.info "master done reopening logs"
-            kill_each_worker(:USR1)
-          when :USR2 # exec binary, stay alive in case something went wrong
-            reexec
-          when :WINCH
-            if Process.ppid == 1 || Process.getpgrp != $$
-              respawn = false
-              logger.info "gracefully stopping all workers"
-              kill_each_worker(:QUIT)
-              self.worker_processes = 0
-            else
-              logger.info "SIGWINCH ignored because we're not daemonized"
-            end
-          when :TTIN
-            respawn = true
-            self.worker_processes += 1
-          when :TTOU
-            self.worker_processes -= 1 if self.worker_processes > 0
-          when :HUP
-            respawn = true
-            if config.config_file
-              load_config!
-              redo # immediate reaping since we may have QUIT workers
-            else # exec binary and exit if there's no config file
-              logger.info "config_file not present, reexecuting binary"
-              reexec
-              break
-            end
-          end
-        end
-      rescue Errno::EINTR
-        retry
-      rescue => e
-        logger.error "Unhandled master loop exception #{e.inspect}."
-        logger.error e.backtrace.join("\n")
-        retry
-      end
-      stop # gracefully shutdown all workers on our way out
-      logger.info "master complete"
-      unlink_pid_safe(pid) if pid
-    end
-
-    # Terminates all workers, but does not exit master process
-    def stop(graceful = true)
-      self.listeners = []
-      limit = Time.now + timeout
-      until WORKERS.empty? || Time.now > limit
-        kill_each_worker(graceful ? :QUIT : :TERM)
-        sleep(0.1)
-        reap_all_workers
-      end
-      kill_each_worker(:KILL)
-    end
-
-    private
-
-    # list of signals we care about and trap in master.
-    QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP,
-                   :TTIN, :TTOU ]
-
-    # defer a signal for later processing in #join (master process)
-    def trap_deferred(signal)
-      trap(signal) do |sig_nr|
-        if SIG_QUEUE.size < 5
-          SIG_QUEUE << signal
-          awaken_master
-        else
-          logger.error "ignoring SIG#{signal}, queue=#{SIG_QUEUE.inspect}"
-        end
-      end
-    end
-
-    # wait for a signal hander to wake us up and then consume the pipe
-    # Wake up every second anyways to run murder_lazy_workers
-    def master_sleep(sec)
-      IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
-      SELF_PIPE[0].read_nonblock(Const::CHUNK_SIZE, HttpRequest::BUF)
-      rescue Errno::EAGAIN, Errno::EINTR
-    end
-
-    def awaken_master
-      begin
-        SELF_PIPE[1].write_nonblock('.') # wakeup master process from select
-      rescue Errno::EAGAIN, Errno::EINTR
-        # pipe is full, master should wake up anyways
-        retry
-      end
-    end
-
-    # reaps all unreaped workers
-    def reap_all_workers
-      begin
-        loop do
-          wpid, status = Process.waitpid2(-1, Process::WNOHANG)
-          wpid or break
-          if reexec_pid == wpid
-            logger.error "reaped #{status.inspect} exec()-ed"
-            self.reexec_pid = 0
-            self.pid = pid.chomp('.oldbin') if pid
-            proc_name 'master'
-          else
-            worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
-            logger.info "reaped #{status.inspect} " \
-                        "worker=#{worker.nr rescue 'unknown'}"
-          end
-        end
-      rescue Errno::ECHILD
-      end
-    end
-
-    # reexecutes the START_CTX with a new binary
-    def reexec
-      if reexec_pid > 0
-        begin
-          Process.kill(0, reexec_pid)
-          logger.error "reexec-ed child already running PID:#{reexec_pid}"
-          return
-        rescue Errno::ESRCH
-          self.reexec_pid = 0
-        end
-      end
-
-      if pid
-        old_pid = "#{pid}.oldbin"
-        prev_pid = pid.dup
-        begin
-          self.pid = old_pid  # clear the path for a new pid file
-        rescue ArgumentError
-          logger.error "old PID:#{valid_pid?(old_pid)} running with " \
-                       "existing pid=#{old_pid}, refusing rexec"
-          return
-        rescue => e
-          logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}"
-          return
-        end
-      end
-
-      self.reexec_pid = fork do
-        listener_fds = LISTENERS.map { |sock| sock.fileno }
-        ENV['UNICORN_FD'] = listener_fds.join(',')
-        Dir.chdir(START_CTX[:cwd])
-        cmd = [ START_CTX[0] ].concat(START_CTX[:argv])
-
-        # avoid leaking FDs we don't know about, but let before_exec
-        # unset FD_CLOEXEC, if anything else in the app eventually
-        # relies on FD inheritence.
-        (3..1024).each do |io|
-          next if listener_fds.include?(io)
-          io = IO.for_fd(io) rescue nil
-          io or next
-          IO_PURGATORY << io
-          io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-        end
-        logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
-        before_exec.call(self)
-        exec(*cmd)
-      end
-      proc_name 'master (old)'
-    end
-
-    # forcibly terminate all workers that haven't checked in in timeout
-    # seconds.  The timeout is implemented using an unlinked File
-    # shared between the parent process and each worker.  The worker
-    # runs File#chmod to modify the ctime of the File.  If the ctime
-    # is stale for >timeout seconds, then we'll kill the corresponding
-    # worker.
-    def murder_lazy_workers
-      WORKERS.dup.each_pair do |wpid, worker|
-        stat = worker.tmp.stat
-        # skip workers that disable fchmod or have never fchmod-ed
-        stat.mode == 0100600 and next
-        (diff = (Time.now - stat.ctime)) <= timeout and next
-        logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
-                     "(#{diff}s > #{timeout}s), killing"
-        kill_worker(:KILL, wpid) # take no prisoners for timeout violations
-      end
-    end
-
-    def spawn_missing_workers
-      (0...worker_processes).each do |worker_nr|
-        WORKERS.values.include?(worker_nr) and next
-        worker = Worker.new(worker_nr, Unicorn::Util.tmpio)
-        before_fork.call(self, worker)
-        WORKERS[fork {
-          ready_pipe.close if ready_pipe
-          self.ready_pipe = nil
-          worker_loop(worker)
-        }] = worker
-      end
-    end
-
-    def maintain_worker_count
-      (off = WORKERS.size - worker_processes) == 0 and return
-      off < 0 and return spawn_missing_workers
-      WORKERS.dup.each_pair { |wpid,w|
-        w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil
-      }
-    end
-
-    # if we get any error, try to write something back to the client
-    # assuming we haven't closed the socket, but don't get hung up
-    # if the socket is already closed or broken.  We'll always ensure
-    # the socket is closed at the end of this function
-    def handle_error(client, e)
-      msg = case e
-      when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
-        Const::ERROR_500_RESPONSE
-      when HttpParserError # try to tell the client they're bad
-        Const::ERROR_400_RESPONSE
+  # This returns a lambda to pass in as the app, this does not "build" the
+  # app (which we defer based on the outcome of "preload_app" in the
+  # Unicorn config).  The returned lambda will be called when it is
+  # time to build the app.
+  def self.builder(ru, opts)
+    # allow Configurator to parse cli switches embedded in the ru file
+    Unicorn::Configurator::RACKUP.update(:file => ru, :optparse => opts)
+
+    # always called after config file parsing, may be called after forking
+    lambda do ||
+      inner_app = case ru
+      when /\.ru$/
+        raw = File.read(ru)
+        raw.sub!(/^__END__\n.*/, '')
+        eval("Rack::Builder.new {(#{raw}\n)}.to_app", TOPLEVEL_BINDING, ru)
       else
-        logger.error "Read error: #{e.inspect}"
-        logger.error e.backtrace.join("\n")
-        Const::ERROR_500_RESPONSE
-      end
-      client.write_nonblock(msg)
-      client.close
-      rescue
-        nil
-    end
-
-    # once a client is accepted, it is processed in its entirety here
-    # in 3 easy steps: read request, call app, write app response
-    def process_client(client)
-      client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-      response = app.call(env = REQUEST.read(client))
-
-      if 100 == response[0].to_i
-        client.write(Const::EXPECT_100_RESPONSE)
-        env.delete(Const::HTTP_EXPECT)
-        response = app.call(env)
-      end
-      HttpResponse.write(client, response, HttpRequest::PARSER.headers?)
-      client.close # flushes and uncorks the socket immediately, no keepalive
-    rescue => e
-      handle_error(client, e)
-    end
-
-    # gets rid of stuff the worker has no business keeping track of
-    # to free some resources and drops all sig handlers.
-    # traps for USR1, USR2, and HUP may be set in the after_fork Proc
-    # by the user.
-    def init_worker_process(worker)
-      QUEUE_SIGS.each { |sig| trap(sig, nil) }
-      trap(:CHLD, 'DEFAULT')
-      SIG_QUEUE.clear
-      proc_name "worker[#{worker.nr}]"
-      START_CTX.clear
-      init_self_pipe!
-      WORKERS.values.each { |other| other.tmp.close rescue nil }
-      WORKERS.clear
-      LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
-      worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-      after_fork.call(self, worker) # can drop perms
-      worker.user(*user) if user.kind_of?(Array) && ! worker.switched
-      self.timeout /= 2.0 # halve it for select()
-      build_app! unless preload_app
-    end
-
-    def reopen_worker_logs(worker_nr)
-      logger.info "worker=#{worker_nr} reopening logs..."
-      Unicorn::Util.reopen_logs
-      logger.info "worker=#{worker_nr} done reopening logs"
-      init_self_pipe!
-    end
-
-    # runs inside each forked worker, this sits around and waits
-    # for connections and doesn't die until the parent dies (or is
-    # given a INT, QUIT, or TERM signal)
-    def worker_loop(worker)
-      ppid = master_pid
-      init_worker_process(worker)
-      nr = 0 # this becomes negative if we need to reopen logs
-      alive = worker.tmp # tmp is our lifeline to the master process
-      ready = LISTENERS
-
-      # closing anything we IO.select on will raise EBADF
-      trap(:USR1) { nr = -65536; SELF_PIPE[0].close rescue nil }
-      trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
-      [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
-      logger.info "worker=#{worker.nr} ready"
-      m = 0
-
-      begin
-        nr < 0 and reopen_worker_logs(worker.nr)
-        nr = 0
-
-        # we're a goner in timeout seconds anyways if alive.chmod
-        # breaks, so don't trap the exception.  Using fchmod() since
-        # futimes() is not available in base Ruby and I very strongly
-        # prefer temporary files to be unlinked for security,
-        # performance and reliability reasons, so utime is out.  No-op
-        # changes with chmod doesn't update ctime on all filesystems; so
-        # we change our counter each and every time (after process_client
-        # and before IO.select).
-        alive.chmod(m = 0 == m ? 1 : 0)
-
-        ready.each do |sock|
-          begin
-            process_client(sock.accept_nonblock)
-            nr += 1
-            alive.chmod(m = 0 == m ? 1 : 0)
-          rescue Errno::EAGAIN, Errno::ECONNABORTED
-          end
-          break if nr < 0
-        end
-
-        # make the following bet: if we accepted clients this round,
-        # we're probably reasonably busy, so avoid calling select()
-        # and do a speculative accept_nonblock on ready listeners
-        # before we sleep again in select().
-        redo unless nr == 0 # (nr < 0) => reopen logs
-
-        ppid == Process.ppid or return
-        alive.chmod(m = 0 == m ? 1 : 0)
-        begin
-          # timeout used so we can detect parent death:
-          ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) or redo
-          ready = ret[0]
-        rescue Errno::EINTR
-          ready = LISTENERS
-        rescue Errno::EBADF
-          nr < 0 or return
-        end
-      rescue => e
-        if alive
-          logger.error "Unhandled listen loop exception #{e.inspect}."
-          logger.error e.backtrace.join("\n")
-        end
-      end while alive
-    end
-
-    # delivers a signal to a worker and fails gracefully if the worker
-    # is no longer running.
-    def kill_worker(signal, wpid)
-      begin
-        Process.kill(signal, wpid)
-      rescue Errno::ESRCH
-        worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
-      end
-    end
-
-    # delivers a signal to each worker
-    def kill_each_worker(signal)
-      WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
-    end
-
-    # unlinks a PID file at given +path+ if it contains the current PID
-    # still potentially racy without locking the directory (which is
-    # non-portable and may interact badly with other programs), but the
-    # window for hitting the race condition is small
-    def unlink_pid_safe(path)
-      (File.read(path).to_i == $$ and File.unlink(path)) rescue nil
-    end
-
-    # returns a PID if a given path contains a non-stale PID file,
-    # nil otherwise.
-    def valid_pid?(path)
-      wpid = File.read(path).to_i
-      wpid <= 0 and return nil
-      begin
-        Process.kill(0, wpid)
-        wpid
-      rescue Errno::ESRCH
-        # don't unlink stale pid files, racy without non-portable locking...
-      end
-      rescue Errno::ENOENT
-    end
-
-    def load_config!
-      loaded_app = app
-      begin
-        logger.info "reloading config_file=#{config.config_file}"
-        config[:listeners].replace(init_listeners)
-        config.reload
-        config.commit!(self)
-        kill_each_worker(:QUIT)
-        Unicorn::Util.reopen_logs
-        self.app = orig_app
-        build_app! if preload_app
-        logger.info "done reloading config_file=#{config.config_file}"
-      rescue StandardError, LoadError, SyntaxError => e
-        logger.error "error reloading config_file=#{config.config_file}: " \
-                     "#{e.class} #{e.message} #{e.backtrace}"
-        self.app = loaded_app
-      end
-    end
-
-    # returns an array of string names for the given listener array
-    def listener_names(listeners = LISTENERS)
-      listeners.map { |io| sock_name(io) }
-    end
-
-    def build_app!
-      if app.respond_to?(:arity) && app.arity == 0
-        if defined?(Gem) && Gem.respond_to?(:refresh)
-          logger.info "Refreshing Gem list"
-          Gem.refresh
-        end
-        self.app = app.call
+        require ru
+        Object.const_get(File.basename(ru, '.rb').capitalize)
+      end
+
+      pp({ :inner_app => inner_app }) if $DEBUG
+
+      # return value, matches rackup defaults based on env
+      case ENV["RACK_ENV"]
+      when "development"
+        Rack::Builder.new do
+          use Rack::CommonLogger, $stderr
+          use Rack::ShowExceptions
+          use Rack::Lint
+          run inner_app
+        end.to_app
+      when "deployment"
+        Rack::Builder.new do
+          use Rack::CommonLogger, $stderr
+          run inner_app
+        end.to_app
+      else
+        inner_app
       end
     end
+  end
 
-    def proc_name(tag)
-      $0 = ([ File.basename(START_CTX[0]), tag
-            ]).concat(START_CTX[:argv]).join(' ')
-    end
-
-    def redirect_io(io, path)
-      File.open(path, 'ab') { |fp| io.reopen(fp) } if path
-      io.sync = true
-    end
-
-    def init_self_pipe!
-      SELF_PIPE.each { |io| io.close rescue nil }
-      SELF_PIPE.replace(IO.pipe)
-      SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
+  # returns an array of strings representing TCP listen socket addresses
+  # and Unix domain socket paths.  This is useful for use with
+  # Raindrops::Middleware under Linux: http://raindrops.bogomips.org/
+  def self.listener_names
+    Unicorn::HttpServer::LISTENERS.map do |io|
+      Unicorn::SocketHelper.sock_name(io)
     end
-
   end
+  # :startdoc:
 end
+# :enddoc:
+require 'unicorn/const'
+require 'unicorn/socket_helper'
+require 'unicorn/stream_input'
+require 'unicorn/tee_input'
+require 'unicorn/http_request'
+require 'unicorn/configurator'
+require 'unicorn/tmpio'
+require 'unicorn/util'
+require 'unicorn/http_response'
+require 'unicorn/worker'
+require 'unicorn/http_server'
diff --git a/lib/unicorn/app/exec_cgi.rb b/lib/unicorn/app/exec_cgi.rb
index ff5f53a..232b681 100644
--- a/lib/unicorn/app/exec_cgi.rb
+++ b/lib/unicorn/app/exec_cgi.rb
@@ -1,5 +1,5 @@
 # -*- encoding: binary -*-
-
+# :enddoc:
 require 'unicorn'
 
 module Unicorn::App
@@ -28,6 +28,24 @@ module Unicorn::App
       SERVER_SOFTWARE
     ).map { |x| x.freeze } # frozen strings are faster for Hash assignments
 
+    class Body < Unicorn::TmpIO
+      def body_offset=(n)
+        sysseek(@body_offset = n)
+      end
+
+      def each
+        sysseek @body_offset
+        # don't use a preallocated buffer for sysread since we can't
+        # guarantee an actual socket is consuming the yielded string
+        # (or if somebody is pushing to an array for eventual concatenation
+        begin
+          yield sysread(CHUNK_SIZE)
+        rescue EOFError
+          break
+        end while true
+      end
+    end
+
     # Intializes the app, example of usage in a config.ru
     #   map "/cgit" do
     #     run Unicorn::App::ExecCgi.new("/path/to/cgit.cgi")
@@ -43,7 +61,7 @@ module Unicorn::App
 
     # Calls the app
     def call(env)
-      out, err = Unicorn::Util.tmpio, Unicorn::Util.tmpio
+      out, err = Body.new, Unicorn::TmpIO.new
       inp = force_file_input(env)
       pid = fork { run_child(inp, out, err, env) }
       inp.close
@@ -67,9 +85,9 @@ module Unicorn::App
       ENV['GATEWAY_INTERFACE'] = 'CGI/1.1'
       env.keys.grep(/^HTTP_/) { |key| ENV[key] = env[key] }
 
-      a = IO.new(0).reopen(inp)
-      b = IO.new(1).reopen(out)
-      c = IO.new(2).reopen(err)
+      $stdin.reopen(inp)
+      $stdout.reopen(out)
+      $stderr.reopen(err)
       exec(*args)
     end
 
@@ -87,23 +105,7 @@ module Unicorn::App
         offset = 4
       end
       offset += head.length
-
-      # Allows +out+ to be used as a Rack body.
-      out.instance_eval { class << self; self; end }.instance_eval {
-        define_method(:each) { |&blk|
-          sysseek(offset)
-
-          # don't use a preallocated buffer for sysread since we can't
-          # guarantee an actual socket is consuming the yielded string
-          # (or if somebody is pushing to an array for eventual concatenation
-          begin
-            blk.call(sysread(CHUNK_SIZE))
-          rescue EOFError
-            break
-          end while true
-        }
-      }
-
+      out.body_offset = offset
       size -= offset
       prev = nil
       headers = Rack::Utils::HeaderHash.new
@@ -125,7 +127,7 @@ module Unicorn::App
       if inp.respond_to?(:size) && inp.size == 0
         ::File.open('/dev/null', 'rb')
       else
-        tmp = Unicorn::Util.tmpio
+        tmp = Unicorn::TmpIO.new
 
         buf = inp.read(CHUNK_SIZE)
         begin
diff --git a/lib/unicorn/app/inetd.rb b/lib/unicorn/app/inetd.rb
index 9bfa7cb..2a212a2 100644
--- a/lib/unicorn/app/inetd.rb
+++ b/lib/unicorn/app/inetd.rb
@@ -1,10 +1,9 @@
 # -*- encoding: binary -*-
-
+# :enddoc:
 # Copyright (c) 2009 Eric Wong
 # You can redistribute it and/or modify it under the same terms as Ruby.
 
 # this class *must* be used with Rack::Chunked
-
 module Unicorn::App
   class Inetd < Struct.new(:cmd)
 
@@ -47,7 +46,7 @@ module Unicorn::App
         }
       end
 
-      def each(&block)
+      def each
         begin
           rd, = IO.select([err_rd, out_rd])
           rd && rd.first or next
diff --git a/lib/unicorn/app/old_rails.rb b/lib/unicorn/app/old_rails.rb
index e674d78..5f04ce7 100644
--- a/lib/unicorn/app/old_rails.rb
+++ b/lib/unicorn/app/old_rails.rb
@@ -1,5 +1,6 @@
 # -*- encoding: binary -*-
 
+# :enddoc:
 # This code is based on the original Rails handler in Mongrel
 # Copyright (c) 2005 Zed A. Shaw
 # Copyright (c) 2009 Eric Wong
diff --git a/lib/unicorn/app/old_rails/static.rb b/lib/unicorn/app/old_rails/static.rb
index 13a435e..1d53717 100644
--- a/lib/unicorn/app/old_rails/static.rb
+++ b/lib/unicorn/app/old_rails/static.rb
@@ -1,5 +1,5 @@
 # -*- encoding: binary -*-
-
+# :enddoc:
 # This code is based on the original Rails handler in Mongrel
 # Copyright (c) 2005 Zed A. Shaw
 # Copyright (c) 2009 Eric Wong
diff --git a/lib/unicorn/cgi_wrapper.rb b/lib/unicorn/cgi_wrapper.rb
index b6eeb33..0dc3f33 100644
--- a/lib/unicorn/cgi_wrapper.rb
+++ b/lib/unicorn/cgi_wrapper.rb
@@ -1,5 +1,6 @@
 # -*- encoding: binary -*-
 
+# :enddoc:
 # This code is based on the original CGIWrapper from Mongrel
 # Copyright (c) 2005 Zed A. Shaw
 # Copyright (c) 2009 Eric Wong
diff --git a/lib/unicorn/configurator.rb b/lib/unicorn/configurator.rb
index fb37c56..b6ad022 100644
--- a/lib/unicorn/configurator.rb
+++ b/lib/unicorn/configurator.rb
@@ -8,14 +8,26 @@ require 'logger'
 # example configuration files.  An example config file for use with
 # nginx is also available at
 # http://unicorn.bogomips.org/examples/nginx.conf
-class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
+#
+# See the link:/TUNING.html document for more information on tuning unicorn.
+class Unicorn::Configurator
+  include Unicorn
+
+  # :stopdoc:
+  attr_accessor :set, :config_file, :after_reload
+
   # used to stash stuff for deferred processing of cli options in
   # config.ru after "working_directory" is bound.  Do not rely on
   # this being around later on...
-  RACKUP = {} # :nodoc:
+  RACKUP = {
+    :daemonize => false,
+    :host => Unicorn::Const::DEFAULT_HOST,
+    :port => Unicorn::Const::DEFAULT_PORT,
+    :set_listener => false,
+    :options => { :listeners => [] }
+  }
 
   # Default settings for Unicorn
-  # :stopdoc:
   DEFAULTS = {
     :timeout => 60,
     :logger => Logger.new($stderr),
@@ -31,6 +43,9 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
       },
     :pid => nil,
     :preload_app => false,
+    :rewindable_input => true, # for Rack 2.x: (Rack::VERSION[0] <= 1),
+    :client_body_buffer_size => Unicorn::Const::MAX_BODY,
+    :trust_x_forwarded => true,
   }
   #:startdoc:
 
@@ -58,6 +73,9 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
 
     parse_rackup_file
 
+    RACKUP[:set_listener] and
+      set[:listeners] << "#{RACKUP[:host]}:#{RACKUP[:port]}"
+
     # unicorn_rails creates dirs here after working_directory is bound
     after_reload.call if after_reload
 
@@ -87,20 +105,24 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
     set[key]
   end
 
-  # sets object to the +new+ Logger-like object.  The new logger-like
+  # sets object to the +obj+ Logger-like object.  The new Logger-like
   # object must respond to the following methods:
-  #  +debug+, +info+, +warn+, +error+, +fatal+
+  # * debug
+  # * info
+  # * warn
+  # * error
+  # * fatal
   # The default Logger will log its output to the path specified
   # by +stderr_path+.  If you're running Unicorn daemonized, then
   # you must specify a path to prevent error messages from going
   # to /dev/null.
-  def logger(new)
+  def logger(obj)
     %w(debug info warn error fatal).each do |m|
-      new.respond_to?(m) and next
-      raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
+      obj.respond_to?(m) and next
+      raise ArgumentError, "logger=#{obj} does not respond to method=#{m}"
     end
 
-    set[:logger] = new
+    set[:logger] = obj
   end
 
   # sets after_fork hook to a given block.  This block will be called by
@@ -116,11 +138,6 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
   #    # Existing options for Unicorn::Configurator#listen such as
   #    # :backlog, :rcvbuf, :sndbuf are available here as well.
   #    server.listen(addr, :tries => -1, :delay => 5, :backlog => 128)
-  #
-  #    # drop permissions to "www-data" in the worker
-  #    # generally there's no reason to start Unicorn as a priviledged user
-  #    # as it is not recommended to expose Unicorn to public clients.
-  #    worker.user('www-data', 'www-data') if Process.euid == 0
   #  end
   def after_fork(*args, &block)
     set_hook(:after_fork, block_given? ? block : args[0])
@@ -168,11 +185,7 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
   #      server 192.168.0.9:8080 fail_timeout=0;
   #    }
   def timeout(seconds)
-    Numeric === seconds or raise ArgumentError,
-                                "not numeric: timeout=#{seconds.inspect}"
-    seconds >= 3 or raise ArgumentError,
-                                "too low: timeout=#{seconds.inspect}"
-    set[:timeout] = seconds
+    set_int(:timeout, seconds, 3)
   end
 
   # sets the current number of worker_processes to +nr+.  Each worker
@@ -182,11 +195,7 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
   # the rest of your Unicorn configuration.  See the SIGNALS document
   # for more information.
   def worker_processes(nr)
-    Integer === nr or raise ArgumentError,
-                           "not an integer: worker_processes=#{nr.inspect}"
-    nr >= 0 or raise ArgumentError,
-                           "not non-negative: worker_processes=#{nr.inspect}"
-    set[:worker_processes] = nr
+    set_int(:worker_processes, nr, 1)
   end
 
   # sets listeners to the given +addresses+, replacing or augmenting the
@@ -200,131 +209,165 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
     set[:listeners] = addresses
   end
 
-  # adds an +address+ to the existing listener set.
+  # Adds an +address+ to the existing listener set.  May be specified more
+  # than once.  +address+ may be an Integer port number for a TCP port, an
+  # "IP_ADDRESS:PORT" for TCP listeners or a pathname for UNIX domain sockets.
+  #
+  #   listen 3000 # listen to port 3000 on all TCP interfaces
+  #   listen "127.0.0.1:3000"  # listen to port 3000 on the loopback interface
+  #   listen "/tmp/.unicorn.sock" # listen on the given Unix domain socket
+  #   listen "[::1]:3000" # listen to port 3000 on the IPv6 loopback interface
   #
   # The following options may be specified (but are generally not needed):
   #
-  # +:backlog+: this is the backlog of the listen() syscall.
+  # [:backlog => number of clients]
+  #
+  #   This is the backlog of the listen() syscall.
+  #
+  #   Some operating systems allow negative values here to specify the
+  #   maximum allowable value.  In most cases, this number is only
+  #   recommendation and there are other OS-specific tunables and
+  #   variables that can affect this number.  See the listen(2)
+  #   syscall documentation of your OS for the exact semantics of
+  #   this.
+  #
+  #   If you are running unicorn on multiple machines, lowering this number
+  #   can help your load balancer detect when a machine is overloaded
+  #   and give requests to a different machine.
+  #
+  #   Default: 1024
+  #
+  # [:rcvbuf => bytes, :sndbuf => bytes]
+  #
+  #   Maximum receive and send buffer sizes (in bytes) of sockets.
+  #
+  #   These correspond to the SO_RCVBUF and SO_SNDBUF settings which
+  #   can be set via the setsockopt(2) syscall.  Some kernels
+  #   (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
+  #   there is no need (and it is sometimes detrimental) to specify them.
+  #
+  #   See the socket API documentation of your operating system
+  #   to determine the exact semantics of these settings and
+  #   other operating system-specific knobs where they can be
+  #   specified.
+  #
+  #   Defaults: operating system defaults
+  #
+  # [:tcp_nodelay => true or false]
+  #
+  #   Disables Nagle's algorithm on TCP sockets if +true+.
   #
-  # Some operating systems allow negative values here to specify the
-  # maximum allowable value.  In most cases, this number is only
-  # recommendation and there are other OS-specific tunables and
-  # variables that can affect this number.  See the listen(2)
-  # syscall documentation of your OS for the exact semantics of
-  # this.
+  #   Setting this to +true+ can make streaming responses in Rails 3.1
+  #   appear more quickly at the cost of slightly higher bandwidth usage.
+  #   The effect of this option is most visible if nginx is not used,
+  #   but nginx remains highly recommended with \Unicorn.
   #
-  # If you are running unicorn on multiple machines, lowering this number
-  # can help your load balancer detect when a machine is overloaded
-  # and give requests to a different machine.
+  #   This has no effect on UNIX sockets.
   #
-  # Default: 1024
+  #   Default: +false+ (Nagle's algorithm enabled) in \Unicorn,
+  #   +true+ in Rainbows!
   #
-  # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets
+  # [:tcp_nopush => true or false]
   #
-  # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
-  # can be set via the setsockopt(2) syscall.  Some kernels
-  # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
-  # there is no need (and it is sometimes detrimental) to specify them.
+  #   Enables/disables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
   #
-  # See the socket API documentation of your operating system
-  # to determine the exact semantics of these settings and
-  # other operating system-specific knobs where they can be
-  # specified.
+  #   This prevents partial TCP frames from being sent out and reduces
+  #   wakeups in nginx if it is on a different machine.  Since \Unicorn
+  #   is only designed for applications that send the response body
+  #   quickly without keepalive, sockets will always be flushed on close
+  #   to prevent delays.
   #
-  # Defaults: operating system defaults
+  #   This has no effect on UNIX sockets.
   #
-  # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets
+  #   Default: +true+ in \Unicorn 3.4+, +false+ in Rainbows!
   #
-  # This has no effect on UNIX sockets.
+  # [:tries => Integer]
   #
-  # Default: operating system defaults (usually Nagle's algorithm enabled)
+  #   Times to retry binding a socket if it is already in use
   #
-  # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
+  #   A negative number indicates we will retry indefinitely, this is
+  #   useful for migrations and upgrades when individual workers
+  #   are binding to different ports.
   #
-  # This will prevent partial TCP frames from being sent out.
-  # Enabling +tcp_nopush+ is generally not needed or recommended as
-  # controlling +tcp_nodelay+ already provides sufficient latency
-  # reduction whereas Unicorn does not know when the best times are
-  # for flushing corked sockets.
+  #   Default: 5
   #
-  # This has no effect on UNIX sockets.
+  # [:delay => seconds]
   #
-  # +:tries+: times to retry binding a socket if it is already in use
+  #   Seconds to wait between successive +tries+
   #
-  # A negative number indicates we will retry indefinitely, this is
-  # useful for migrations and upgrades when individual workers
-  # are binding to different ports.
+  #   Default: 0.5 seconds
   #
-  # Default: 5
+  # [:umask => mode]
   #
-  # +:delay+: seconds to wait between successive +tries+
+  #   Sets the file mode creation mask for UNIX sockets.  If specified,
+  #   this is usually in octal notation.
   #
-  # Default: 0.5 seconds
+  #   Typically UNIX domain sockets are created with more liberal
+  #   file permissions than the rest of the application.  By default,
+  #   we create UNIX domain sockets to be readable and writable by
+  #   all local users to give them the same accessibility as
+  #   locally-bound TCP listeners.
   #
-  # +:umask+: sets the file mode creation mask for UNIX sockets
+  #   This has no effect on TCP listeners.
   #
-  # Typically UNIX domain sockets are created with more liberal
-  # file permissions than the rest of the application.  By default,
-  # we create UNIX domain sockets to be readable and writable by
-  # all local users to give them the same accessibility as
-  # locally-bound TCP listeners.
+  #   Default: 0000 (world-read/writable)
   #
-  # This has no effect on TCP listeners.
+  # [:tcp_defer_accept => Integer]
   #
-  # Default: 0 (world read/writable)
+  #   Defer accept() until data is ready (Linux-only)
   #
-  # +:tcp_defer_accept:+ defer accept() until data is ready (Linux-only)
+  #   For Linux 2.6.32 and later, this is the number of retransmits to
+  #   defer an accept() for if no data arrives, but the client will
+  #   eventually be accepted after the specified number of retransmits
+  #   regardless of whether data is ready.
   #
-  # For Linux 2.6.32 and later, this is the number of retransmits to
-  # defer an accept() for if no data arrives, but the client will
-  # eventually be accepted after the specified number of retransmits
-  # regardless of whether data is ready.
+  #   For Linux before 2.6.32, this is a boolean option, and
+  #   accepts are _always_ deferred indefinitely if no data arrives.
+  #   This is similar to <code>:accept_filter => "dataready"</code>
+  #   under FreeBSD.
   #
-  # For Linux before 2.6.32, this is a boolean option, and
-  # accepts are _always_ deferred indefinitely if no data arrives.
-  # This is similar to <code>:accept_filter => "dataready"</code>
-  # under FreeBSD.
+  #   Specifying +true+ is synonymous for the default value(s) below,
+  #   and +false+ or +nil+ is synonymous for a value of zero.
   #
-  # Specifying +true+ is synonymous for the default value(s) below,
-  # and +false+ or +nil+ is synonymous for a value of zero.
+  #   A value of +1+ is a good optimization for local networks
+  #   and trusted clients.  For Rainbows! and Zbatery users, a higher
+  #   value (e.g. +60+) provides more protection against some
+  #   denial-of-service attacks.  There is no good reason to ever
+  #   disable this with a +zero+ value when serving HTTP.
   #
-  # A value of +1+ is a good optimization for local networks
-  # and trusted clients.  For Rainbows! and Zbatery users, a higher
-  # value (e.g. +60+) provides more protection against some
-  # denial-of-service attacks.  There is no good reason to ever
-  # disable this with a +zero+ value when serving HTTP.
+  #   Default: 1 retransmit for \Unicorn, 60 for Rainbows! 0.95.0\+
   #
-  # Default: 1 retransmit for \Unicorn, 60 for Rainbows! 0.95.0\+
+  # [:accept_filter => String]
   #
-  # +:accept_filter: defer accept() until data is ready (FreeBSD-only)
+  #   defer accept() until data is ready (FreeBSD-only)
   #
-  # This enables either the "dataready" or (default) "httpready"
-  # accept() filter under FreeBSD.  This is intended as an
-  # optimization to reduce context switches with common GET/HEAD
-  # requests.  For Rainbows! and Zbatery users, this provides
-  # some protection against certain denial-of-service attacks, too.
+  #   This enables either the "dataready" or (default) "httpready"
+  #   accept() filter under FreeBSD.  This is intended as an
+  #   optimization to reduce context switches with common GET/HEAD
+  #   requests.  For Rainbows! and Zbatery users, this provides
+  #   some protection against certain denial-of-service attacks, too.
   #
-  # There is no good reason to change from the default.
+  #   There is no good reason to change from the default.
   #
-  # Default: "httpready"
-  def listen(address, opt = {})
+  #   Default: "httpready"
+  def listen(address, options = {})
     address = expand_addr(address)
     if String === address
       [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key|
-        value = opt[key] or next
+        value = options[key] or next
         Integer === value or
           raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
       end
       [ :tcp_nodelay, :tcp_nopush ].each do |key|
-        (value = opt[key]).nil? and next
+        (value = options[key]).nil? and next
         TrueClass === value || FalseClass === value or
           raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
       end
-      unless (value = opt[:delay]).nil?
+      unless (value = options[:delay]).nil?
         Numeric === value or
           raise ArgumentError, "not numeric: delay=#{value.inspect}"
       end
-      set[:listener_opts][address].merge!(opt)
+      set[:listener_opts][address].merge!(options)
     end
 
     set[:listeners] << address
@@ -362,12 +405,30 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
   # cause the master process to exit with an error.
 
   def preload_app(bool)
-    case bool
-    when TrueClass, FalseClass
-      set[:preload_app] = bool
-    else
-      raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
-    end
+    set_bool(:preload_app, bool)
+  end
+
+  # Toggles making \env[\"rack.input\"] rewindable.
+  # Disabling rewindability can improve performance by lowering
+  # I/O and memory usage for applications that accept uploads.
+  # Keep in mind that the Rack 1.x spec requires
+  # \env[\"rack.input\"] to be rewindable, so this allows
+  # intentionally violating the current Rack 1.x spec.
+  #
+  # +rewindable_input+ defaults to +true+ when used with Rack 1.x for
+  # Rack conformance.  When Rack 2.x is finalized, this will most
+  # likely default to +false+ while still conforming to the newer
+  # (less demanding) spec.
+  def rewindable_input(bool)
+    set_bool(:rewindable_input, bool)
+  end
+
+  # The maximum size (in +bytes+) to buffer in memory before
+  # resorting to a temporary file.  Default is 112 kilobytes.
+  # This option has no effect if "rewindable_input" is set to
+  # +false+.
+  def client_body_buffer_size(bytes)
+    set_int(:client_body_buffer_size, bytes, 0)
   end
 
   # Allow redirecting $stderr to a given path.  Unlike doing this from
@@ -417,6 +478,7 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
   # The master process always stays running as the user who started it.
   # This switch will occur after calling the after_fork hook, and only
   # if the Worker#user method is not called in the after_fork hook
+  # +group+ is optional and will not change if unspecified.
   def user(user, group = nil)
     # raises ArgumentError on invalid user/group
     Etc.getpwnam(user)
@@ -424,10 +486,22 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
     set[:user] = [ user, group ]
   end
 
+  # Sets whether or not the parser will trust X-Forwarded-Proto and
+  # X-Forwarded-SSL headers and set "rack.url_scheme" to "https" accordingly.
+  # Rainbows!/Zbatery installations facing untrusted clients directly
+  # should set this to +false+.  This is +true+ by default as Unicorn
+  # is designed to only sit behind trusted nginx proxies.
+  #
+  # This has never been publically documented and is subject to removal
+  # in future releases.
+  def trust_x_forwarded(bool) # :nodoc:
+    set_bool(:trust_x_forwarded, bool)
+  end
+
   # expands "unix:path/to/foo" to a socket relative to the current path
   # expands pathnames of sockets if relative to "~" or "~username"
   # expands "*:port and ":port" to "0.0.0.0:port"
-  def expand_addr(address) #:nodoc
+  def expand_addr(address) #:nodoc:
     return "0.0.0.0:#{address}" if Integer === address
     return address unless String === address
 
@@ -438,16 +512,27 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
       File.expand_path(address)
     when %r{\A(?:\*:)?(\d+)\z}
       "0.0.0.0:#$1"
-    when %r{\A(.*):(\d+)\z}
-      # canonicalize the name
-      packed = Socket.pack_sockaddr_in($2.to_i, $1)
-      Socket.unpack_sockaddr_in(packed).reverse!.join(':')
+    when %r{\A\[([a-fA-F0-9:]+)\]\z}, %r/\A((?:\d+\.){3}\d+)\z/
+      canonicalize_tcp($1, 80)
+    when %r{\A\[([a-fA-F0-9:]+)\]:(\d+)\z}, %r{\A(.*):(\d+)\z}
+      canonicalize_tcp($1, $2.to_i)
     else
       address
     end
   end
 
 private
+  def set_int(var, n, min) #:nodoc:
+    Integer === n or raise ArgumentError, "not an integer: #{var}=#{n.inspect}"
+    n >= min or raise ArgumentError, "too low (< #{min}): #{var}=#{n.inspect}"
+    set[var] = n
+  end
+
+  def canonicalize_tcp(addr, port)
+    packed = Socket.pack_sockaddr_in(port, addr)
+    port, addr = Socket.unpack_sockaddr_in(packed)
+    /:/ =~ addr ? "[#{addr}]:#{port}" : "#{addr}:#{port}"
+  end
 
   def set_path(var, path) #:nodoc:
     case path
@@ -458,6 +543,15 @@ private
     end
   end
 
+  def set_bool(var, bool) #:nodoc:
+    case bool
+    when true, false
+      set[var] = bool
+    else
+      raise ArgumentError, "#{var}=#{bool.inspect} not a boolean"
+    end
+  end
+
   def set_hook(var, my_proc, req_arity = 2) #:nodoc:
     case my_proc
     when Proc
@@ -495,23 +589,15 @@ private
     /^#\\(.*)/ =~ File.read(ru) or return
     RACKUP[:optparse].parse!($1.split(/\s+/))
 
-    # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery)
-    host, port, set_listener, options, daemonize =
-                    eval("[ host, port, set_listener, options, daemonize ]",
-                         TOPLEVEL_BINDING)
-
-    # XXX duplicate code from bin/unicorn{,_rails}
-    set[:listeners] << "#{host}:#{port}" if set_listener
-
-    if daemonize
+    if RACKUP[:daemonize]
       # unicorn_rails wants a default pid path, (not plain 'unicorn')
       if after_reload
         spid = set[:pid]
         pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset
       end
       unless RACKUP[:daemonized]
-        Unicorn::Launcher.daemonize!(options)
-        RACKUP[:ready_pipe] = options.delete(:ready_pipe)
+        Unicorn::Launcher.daemonize!(RACKUP[:options])
+        RACKUP[:ready_pipe] = RACKUP[:options].delete(:ready_pipe)
       end
     end
   end
diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb
index b428be5..c65c242 100644
--- a/lib/unicorn/const.rb
+++ b/lib/unicorn/const.rb
@@ -1,36 +1,39 @@
 # -*- encoding: binary -*-
 
-module Unicorn
-
-  # Frequently used constants when constructing requests or responses.  Many times
-  # the constant just refers to a string with the same contents.  Using these constants
-  # gave about a 3% to 10% performance improvement over using the strings directly.
-  # Symbols did not really improve things much compared to constants.
-  module Const
-
-    # The current version of Unicorn, currently 1.1.7
-    UNICORN_VERSION="1.1.7"
-
-    DEFAULT_HOST = "0.0.0.0" # default TCP listen host address
-    DEFAULT_PORT = 8080      # default TCP listen port
-    DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}"
-
-    # The basic max request size we'll try to read.
-    CHUNK_SIZE=(16 * 1024)
-
-    # Maximum request body size before it is moved out of memory and into a
-    # temporary file for reading (112 kilobytes).
-    MAX_BODY=1024 * 112
-
-    # common errors we'll send back
-    ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n"
-    ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
-    EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
-
-    # A frozen format for this is about 15% faster
-    REMOTE_ADDR="REMOTE_ADDR".freeze
-    RACK_INPUT="rack.input".freeze
-    HTTP_EXPECT="HTTP_EXPECT"
-  end
-
+# :enddoc:
+# Frequently used constants when constructing requests or responses.
+# Many times the constant just refers to a string with the same
+# contents.  Using these constants gave about a 3% to 10% performance
+# improvement over using the strings directly.  Symbols did not really
+# improve things much compared to constants.
+module Unicorn::Const
+
+  # The current version of Unicorn, currently 3.4.0
+  UNICORN_VERSION = "3.4.0"
+
+  # default TCP listen host address (0.0.0.0, all interfaces)
+  DEFAULT_HOST = "0.0.0.0"
+
+  # default TCP listen port (8080)
+  DEFAULT_PORT = 8080
+
+  # default TCP listen address and port (0.0.0.0:8080)
+  DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}"
+
+  # The basic request body size we'll try to read at once (16 kilobytes).
+  CHUNK_SIZE = 16 * 1024
+
+  # Maximum request body size before it is moved out of memory and into a
+  # temporary file for reading (112 kilobytes).  This is the default
+  # value of of client_body_buffer_size.
+  MAX_BODY = 1024 * 112
+
+  # :stopdoc:
+  # common errors we'll send back
+  ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n"
+  ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
+  EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
+
+  HTTP_EXPECT = "HTTP_EXPECT"
+  # :startdoc:
 end
diff --git a/lib/unicorn/http_request.rb b/lib/unicorn/http_request.rb
index 65870ed..e72f571 100644
--- a/lib/unicorn/http_request.rb
+++ b/lib/unicorn/http_request.rb
@@ -1,71 +1,79 @@
 # -*- encoding: binary -*-
-
+# :enddoc:
+# no stable API here
 require 'unicorn_http'
 
-module Unicorn
-  class HttpRequest
+# TODO: remove redundant names
+Unicorn.const_set(:HttpRequest, Unicorn::HttpParser)
+class Unicorn::HttpParser
+
+  # default parameters we merge into the request env for Rack handlers
+  DEFAULTS = {
+    "rack.errors" => $stderr,
+    "rack.multiprocess" => true,
+    "rack.multithread" => false,
+    "rack.run_once" => false,
+    "rack.version" => [1, 1],
+    "SCRIPT_NAME" => "",
+
+    # this is not in the Rack spec, but some apps may rely on it
+    "SERVER_SOFTWARE" => "Unicorn #{Unicorn::Const::UNICORN_VERSION}"
+  }
 
-    # default parameters we merge into the request env for Rack handlers
-    DEFAULTS = {
-      "rack.errors" => $stderr,
-      "rack.multiprocess" => true,
-      "rack.multithread" => false,
-      "rack.run_once" => false,
-      "rack.version" => [1, 1],
-      "SCRIPT_NAME" => "",
+  NULL_IO = StringIO.new("")
 
-      # this is not in the Rack spec, but some apps may rely on it
-      "SERVER_SOFTWARE" => "Unicorn #{Const::UNICORN_VERSION}"
-    }
+  # :stopdoc:
+  # A frozen format for this is about 15% faster
+  REMOTE_ADDR = 'REMOTE_ADDR'.freeze
+  RACK_INPUT = 'rack.input'.freeze
+  @@input_class = Unicorn::TeeInput
 
-    NULL_IO = StringIO.new("")
-    LOCALHOST = '127.0.0.1'
+  def self.input_class
+    @@input_class
+  end
 
-    # Being explicitly single-threaded, we have certain advantages in
-    # not having to worry about variables being clobbered :)
-    BUF = ""
-    PARSER = HttpParser.new
-    REQ = {}
+  def self.input_class=(klass)
+    @@input_class = klass
+  end
+  # :startdoc:
 
-    # Does the majority of the IO processing.  It has been written in
-    # Ruby using about 8 different IO processing strategies.
-    #
-    # It is currently carefully constructed to make sure that it gets
-    # the best possible performance for the common case: GET requests
-    # that are fully complete after a single read(2)
-    #
-    # Anyone who thinks they can make it faster is more than welcome to
-    # take a crack at it.
-    #
-    # returns an environment hash suitable for Rack if successful
-    # This does minimal exception trapping and it is up to the caller
-    # to handle any socket errors (e.g. user aborted upload).
-    def read(socket)
-      REQ.clear
-      PARSER.reset
+  # Does the majority of the IO processing.  It has been written in
+  # Ruby using about 8 different IO processing strategies.
+  #
+  # It is currently carefully constructed to make sure that it gets
+  # the best possible performance for the common case: GET requests
+  # that are fully complete after a single read(2)
+  #
+  # Anyone who thinks they can make it faster is more than welcome to
+  # take a crack at it.
+  #
+  # returns an environment hash suitable for Rack if successful
+  # This does minimal exception trapping and it is up to the caller
+  # to handle any socket errors (e.g. user aborted upload).
+  def read(socket)
+    clear
+    e = env
 
-      # From http://www.ietf.org/rfc/rfc3875:
-      # "Script authors should be aware that the REMOTE_ADDR and
-      #  REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
-      #  may not identify the ultimate source of the request.  They
-      #  identify the client for the immediate request to the server;
-      #  that client may be a proxy, gateway, or other intermediary
-      #  acting on behalf of the actual source client."
-      REQ[Const::REMOTE_ADDR] =
-                    TCPSocket === socket ? socket.peeraddr[-1] : LOCALHOST
+    # From http://www.ietf.org/rfc/rfc3875:
+    # "Script authors should be aware that the REMOTE_ADDR and
+    #  REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
+    #  may not identify the ultimate source of the request.  They
+    #  identify the client for the immediate request to the server;
+    #  that client may be a proxy, gateway, or other intermediary
+    #  acting on behalf of the actual source client."
+    e[REMOTE_ADDR] = socket.kgio_addr
 
-      # short circuit the common case with small GET requests first
-      if PARSER.headers(REQ, socket.readpartial(Const::CHUNK_SIZE, BUF)).nil?
-        # Parser is not done, queue up more data to read and continue parsing
-        # an Exception thrown from the PARSER will throw us out of the loop
-        begin
-          BUF << socket.readpartial(Const::CHUNK_SIZE)
-        end while PARSER.headers(REQ, BUF).nil?
-      end
-      REQ[Const::RACK_INPUT] = 0 == PARSER.content_length ?
-                   NULL_IO : Unicorn::TeeInput.new(socket, REQ, PARSER, BUF)
-      REQ.update(DEFAULTS)
+    # short circuit the common case with small GET requests first
+    socket.kgio_read!(16384, buf)
+    if parse.nil?
+      # Parser is not done, queue up more data to read and continue parsing
+      # an Exception thrown from the parser will throw us out of the loop
+      begin
+        buf << socket.kgio_read!(16384)
+      end while parse.nil?
     end
-
+    e[RACK_INPUT] = 0 == content_length ?
+                    NULL_IO : @@input_class.new(socket, self)
+    e.merge!(DEFAULTS)
   end
 end
diff --git a/lib/unicorn/http_response.rb b/lib/unicorn/http_response.rb
index f3b5a82..b781e20 100644
--- a/lib/unicorn/http_response.rb
+++ b/lib/unicorn/http_response.rb
@@ -1,23 +1,13 @@
 # -*- encoding: binary -*-
-require 'time'
-
+# :enddoc:
 # Writes a Rack response to your client using the HTTP/1.1 specification.
 # You use it by simply doing:
 #
 #   status, headers, body = rack_app.call(env)
-#   HttpResponse.write(socket, [ status, headers, body ])
+#   http_response_write(socket, status, headers, body)
 #
 # Most header correctness (including Content-Length and Content-Type)
-# is the job of Rack, with the exception of the "Connection: close"
-# and "Date" headers.
-#
-# A design decision was made to force the client to not pipeline or
-# keepalive requests.  HTTP/1.1 pipelining really kills the
-# performance due to how it has to be handled and how unclear the
-# standard is.  To fix this the HttpResponse always gives a
-# "Connection: close" header which forces the client to close right
-# away.  The bonus for this is that it gives a pretty nice speed boost
-# to most clients since they can close their connection immediately.
+# is the job of Rack, with the exception of the "Date" and "Status" header.
 module Unicorn::HttpResponse
 
   # Every standard HTTP code mapped to the appropriate message.
@@ -25,41 +15,27 @@ module Unicorn::HttpResponse
     hash[code] = "#{code} #{msg}"
     hash
   }
-
-  # Rack does not set/require a Date: header.  We always override the
-  # Connection: and Date: headers no matter what (if anything) our
-  # Rack application sent us.
-  SKIP = { 'connection' => true, 'date' => true, 'status' => true }
+  CRLF = "\r\n"
 
   # writes the rack_response to socket as an HTTP response
-  def self.write(socket, rack_response, have_header = true)
-    status, headers, body = rack_response
-
-    if have_header
-      status = CODES[status.to_i] || status
-      out = []
-
-      # Don't bother enforcing duplicate supression, it's a Hash most of
-      # the time anyways so just hope our app knows what it's doing
+  def http_response_write(socket, status, headers, body)
+    status = CODES[status.to_i] || status
+
+    if headers
+      buf = "HTTP/1.1 #{status}\r\n" \
+            "Date: #{httpdate}\r\n" \
+            "Status: #{status}\r\n" \
+            "Connection: close\r\n"
       headers.each do |key, value|
-        next if SKIP.include?(key.downcase)
+        next if %r{\A(?:Date\z|Connection\z)}i =~ key
         if value =~ /\n/
           # avoiding blank, key-only cookies with /\n+/
-          out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" })
+          buf << value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" }.join
         else
-          out << "#{key}: #{value}\r\n"
+          buf << "#{key}: #{value}\r\n"
         end
       end
-
-      # Rack should enforce Content-Length or chunked transfer encoding,
-      # so don't worry or care about them.
-      # Date is required by HTTP/1.1 as long as our clock can be trusted.
-      # Some broken clients require a "Status" header so we accomodate them
-      socket.write("HTTP/1.1 #{status}\r\n" \
-                   "Date: #{Time.now.httpdate}\r\n" \
-                   "Status: #{status}\r\n" \
-                   "Connection: close\r\n" \
-                   "#{out.join('')}\r\n")
+      socket.write(buf << CRLF)
     end
 
     body.each { |chunk| socket.write(chunk) }
diff --git a/lib/unicorn/http_server.rb b/lib/unicorn/http_server.rb
new file mode 100644
index 0000000..3077b95
--- /dev/null
+++ b/lib/unicorn/http_server.rb
@@ -0,0 +1,733 @@
+# -*- encoding: binary -*-
+
+# This is the process manager of Unicorn. This manages worker
+# processes which in turn handle the I/O and application process.
+# Listener sockets are started in the master process and shared with
+# forked worker children.
+#
+# Users do not need to know the internals of this class, but reading the
+# {source}[http://bogomips.org/unicorn.git/tree/lib/unicorn/http_server.rb]
+# is education for programmers wishing to learn how \Unicorn works.
+# See Unicorn::Configurator for information on how to configure \Unicorn.
+class Unicorn::HttpServer
+  # :stopdoc:
+  attr_accessor :app, :request, :timeout, :worker_processes,
+                :before_fork, :after_fork, :before_exec,
+                :listener_opts, :preload_app,
+                :reexec_pid, :orig_app, :init_listeners,
+                :master_pid, :config, :ready_pipe, :user
+  attr_reader :pid, :logger
+  include Unicorn::SocketHelper
+  include Unicorn::HttpResponse
+
+  # backwards compatibility with 1.x
+  Worker = Unicorn::Worker
+
+  # prevents IO objects in here from being GC-ed
+  IO_PURGATORY = []
+
+  # all bound listener sockets
+  LISTENERS = []
+
+  # This hash maps PIDs to Workers
+  WORKERS = {}
+
+  # We use SELF_PIPE differently in the master and worker processes:
+  #
+  # * The master process never closes or reinitializes this once
+  # initialized.  Signal handlers in the master process will write to
+  # it to wake up the master from IO.select in exactly the same manner
+  # djb describes in http://cr.yp.to/docs/selfpipe.html
+  #
+  # * The workers immediately close the pipe they inherit from the
+  # master and replace it with a new pipe after forking.  This new
+  # pipe is also used to wakeup from IO.select from inside (worker)
+  # signal handlers.  However, workers *close* the pipe descriptors in
+  # the signal handlers to raise EBADF in IO.select instead of writing
+  # like we do in the master.  We cannot easily use the reader set for
+  # IO.select because LISTENERS is already that set, and it's extra
+  # work (and cycles) to distinguish the pipe FD from the reader set
+  # once IO.select returns.  So we're lazy and just close the pipe when
+  # a (rare) signal arrives in the worker and reinitialize the pipe later.
+  SELF_PIPE = []
+
+  # signal queue used for self-piping
+  SIG_QUEUE = []
+
+  # list of signals we care about and trap in master.
+  QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP, :TTIN, :TTOU ]
+
+  # :startdoc:
+  # We populate this at startup so we can figure out how to reexecute
+  # and upgrade the currently running instance of Unicorn
+  # This Hash is considered a stable interface and changing its contents
+  # will allow you to switch between different installations of Unicorn
+  # or even different installations of the same applications without
+  # downtime.  Keys of this constant Hash are described as follows:
+  #
+  # * 0 - the path to the unicorn/unicorn_rails executable
+  # * :argv - a deep copy of the ARGV array the executable originally saw
+  # * :cwd - the working directory of the application, this is where
+  # you originally started Unicorn.
+  #
+  # To change your unicorn executable to a different path without downtime,
+  # you can set the following in your Unicorn config file, HUP and then
+  # continue with the traditional USR2 + QUIT upgrade steps:
+  #
+  #   Unicorn::HttpServer::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn"
+  START_CTX = {
+    :argv => ARGV.map { |arg| arg.dup },
+    0 => $0.dup,
+  }
+  # We favor ENV['PWD'] since it is (usually) symlink aware for Capistrano
+  # and like systems
+  START_CTX[:cwd] = begin
+    a = File.stat(pwd = ENV['PWD'])
+    b = File.stat(Dir.pwd)
+    a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd
+  rescue
+    Dir.pwd
+  end
+  # :stopdoc:
+
+  # Creates a working server on host:port (strange things happen if
+  # port isn't a Number).  Use HttpServer::run to start the server and
+  # HttpServer.run.join to join the thread that's processing
+  # incoming requests on the socket.
+  def initialize(app, options = {})
+    @app = app
+    @request = Unicorn::HttpRequest.new
+    self.reexec_pid = 0
+    options = options.dup
+    @ready_pipe = options.delete(:ready_pipe)
+    self.init_listeners = options[:listeners] ? options[:listeners].dup : []
+    options[:use_defaults] = true
+    self.config = Unicorn::Configurator.new(options)
+    self.listener_opts = {}
+
+    # we try inheriting listeners first, so we bind them later.
+    # we don't write the pid file until we've bound listeners in case
+    # unicorn was started twice by mistake.  Even though our #pid= method
+    # checks for stale/existing pid files, race conditions are still
+    # possible (and difficult/non-portable to avoid) and can be likely
+    # to clobber the pid if the second start was in quick succession
+    # after the first, so we rely on the listener binding to fail in
+    # that case.  Some tests (in and outside of this source tree) and
+    # monitoring tools may also rely on pid files existing before we
+    # attempt to connect to the listener(s)
+    config.commit!(self, :skip => [:listeners, :pid])
+    self.orig_app = app
+  end
+
+  # Runs the thing.  Returns self so you can run join on it
+  def start
+    BasicSocket.do_not_reverse_lookup = true
+
+    # inherit sockets from parents, they need to be plain Socket objects
+    # before they become Kgio::UNIXServer or Kgio::TCPServer
+    inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
+      io = Socket.for_fd(fd.to_i)
+      set_server_sockopt(io, listener_opts[sock_name(io)])
+      IO_PURGATORY << io
+      logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
+      server_cast(io)
+    end
+
+    config_listeners = config[:listeners].dup
+    LISTENERS.replace(inherited)
+
+    # we start out with generic Socket objects that get cast to either
+    # Kgio::TCPServer or Kgio::UNIXServer objects; but since the Socket
+    # objects share the same OS-level file descriptor as the higher-level
+    # *Server objects; we need to prevent Socket objects from being
+    # garbage-collected
+    config_listeners -= listener_names
+    if config_listeners.empty? && LISTENERS.empty?
+      config_listeners << Unicorn::Const::DEFAULT_LISTEN
+      init_listeners << Unicorn::Const::DEFAULT_LISTEN
+      START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}"
+    end
+    config_listeners.each { |addr| listen(addr) }
+    raise ArgumentError, "no listeners" if LISTENERS.empty?
+
+    # this pipe is used to wake us up from select(2) in #join when signals
+    # are trapped.  See trap_deferred.
+    init_self_pipe!
+
+    # setup signal handlers before writing pid file in case people get
+    # trigger happy and send signals as soon as the pid file exists.
+    # Note that signals don't actually get handled until the #join method
+    QUEUE_SIGS.each { |sig| trap(sig) { SIG_QUEUE << sig; awaken_master } }
+    trap(:CHLD) { awaken_master }
+    self.pid = config[:pid]
+
+    self.master_pid = $$
+    build_app! if preload_app
+    maintain_worker_count
+    self
+  end
+
+  # replaces current listener set with +listeners+.  This will
+  # close the socket if it will not exist in the new listener set
+  def listeners=(listeners)
+    cur_names, dead_names = [], []
+    listener_names.each do |name|
+      if ?/ == name[0]
+        # mark unlinked sockets as dead so we can rebind them
+        (File.socket?(name) ? cur_names : dead_names) << name
+      else
+        cur_names << name
+      end
+    end
+    set_names = listener_names(listeners)
+    dead_names.concat(cur_names - set_names).uniq!
+
+    LISTENERS.delete_if do |io|
+      if dead_names.include?(sock_name(io))
+        IO_PURGATORY.delete_if do |pio|
+          pio.fileno == io.fileno && (pio.close rescue nil).nil? # true
+        end
+        (io.close rescue nil).nil? # true
+      else
+        set_server_sockopt(io, listener_opts[sock_name(io)])
+        false
+      end
+    end
+
+    (set_names - cur_names).each { |addr| listen(addr) }
+  end
+
+  def stdout_path=(path); redirect_io($stdout, path); end
+  def stderr_path=(path); redirect_io($stderr, path); end
+
+  def logger=(obj)
+    Unicorn::HttpRequest::DEFAULTS["rack.logger"] = @logger = obj
+  end
+
+  # sets the path for the PID file of the master process
+  def pid=(path)
+    if path
+      if x = valid_pid?(path)
+        return path if pid && path == pid && x == $$
+        if x == reexec_pid && pid =~ /\.oldbin\z/
+          logger.warn("will not set pid=#{path} while reexec-ed "\
+                      "child is running PID:#{x}")
+          return
+        end
+        raise ArgumentError, "Already running on PID:#{x} " \
+                             "(or pid=#{path} is stale)"
+      end
+    end
+    unlink_pid_safe(pid) if pid
+
+    if path
+      fp = begin
+        tmp = "#{File.dirname(path)}/#{rand}.#$$"
+        File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644)
+      rescue Errno::EEXIST
+        retry
+      end
+      fp.syswrite("#$$\n")
+      File.rename(fp.path, path)
+      fp.close
+    end
+    @pid = path
+  end
+
+  # add a given address to the +listeners+ set, idempotently
+  # Allows workers to add a private, per-process listener via the
+  # after_fork hook.  Very useful for debugging and testing.
+  # +:tries+ may be specified as an option for the number of times
+  # to retry, and +:delay+ may be specified as the time in seconds
+  # to delay between retries.
+  # A negative value for +:tries+ indicates the listen will be
+  # retried indefinitely, this is useful when workers belonging to
+  # different masters are spawned during a transparent upgrade.
+  def listen(address, opt = {}.merge(listener_opts[address] || {}))
+    address = config.expand_addr(address)
+    return if String === address && listener_names.include?(address)
+
+    delay = opt[:delay] || 0.5
+    tries = opt[:tries] || 5
+    begin
+      io = bind_listen(address, opt)
+      unless Kgio::TCPServer === io || Kgio::UNIXServer === io
+        IO_PURGATORY << io
+        io = server_cast(io)
+      end
+      logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
+      LISTENERS << io
+      io
+    rescue Errno::EADDRINUSE => err
+      logger.error "adding listener failed addr=#{address} (in use)"
+      raise err if tries == 0
+      tries -= 1
+      logger.error "retrying in #{delay} seconds " \
+                   "(#{tries < 0 ? 'infinite' : tries} tries left)"
+      sleep(delay)
+      retry
+    rescue => err
+      logger.fatal "error adding listener addr=#{address}"
+      raise err
+    end
+  end
+
+  # monitors children and receives signals forever
+  # (or until a termination signal is sent).  This handles signals
+  # one-at-a-time time and we'll happily drop signals in case somebody
+  # is signalling us too often.
+  def join
+    respawn = true
+    last_check = Time.now
+
+    proc_name 'master'
+    logger.info "master process ready" # test_exec.rb relies on this message
+    if @ready_pipe
+      @ready_pipe.syswrite($$.to_s)
+      @ready_pipe.close rescue nil
+      @ready_pipe = nil
+    end
+    begin
+      reap_all_workers
+      case SIG_QUEUE.shift
+      when nil
+        # avoid murdering workers after our master process (or the
+        # machine) comes out of suspend/hibernation
+        if (last_check + @timeout) >= (last_check = Time.now)
+          sleep_time = murder_lazy_workers
+        else
+          # wait for workers to wakeup on suspend
+          sleep_time = @timeout/2.0 + 1
+        end
+        maintain_worker_count if respawn
+        master_sleep(sleep_time)
+      when :QUIT # graceful shutdown
+        break
+      when :TERM, :INT # immediate shutdown
+        stop(false)
+        break
+      when :USR1 # rotate logs
+        logger.info "master reopening logs..."
+        Unicorn::Util.reopen_logs
+        logger.info "master done reopening logs"
+        kill_each_worker(:USR1)
+      when :USR2 # exec binary, stay alive in case something went wrong
+        reexec
+      when :WINCH
+        if Process.ppid == 1 || Process.getpgrp != $$
+          respawn = false
+          logger.info "gracefully stopping all workers"
+          kill_each_worker(:QUIT)
+          self.worker_processes = 0
+        else
+          logger.info "SIGWINCH ignored because we're not daemonized"
+        end
+      when :TTIN
+        respawn = true
+        self.worker_processes += 1
+      when :TTOU
+        self.worker_processes -= 1 if self.worker_processes > 0
+      when :HUP
+        respawn = true
+        if config.config_file
+          load_config!
+        else # exec binary and exit if there's no config file
+          logger.info "config_file not present, reexecuting binary"
+          reexec
+        end
+      end
+    rescue Errno::EINTR
+    rescue => e
+      logger.error "Unhandled master loop exception #{e.inspect}."
+      logger.error e.backtrace.join("\n")
+    end while true
+    stop # gracefully shutdown all workers on our way out
+    logger.info "master complete"
+    unlink_pid_safe(pid) if pid
+  end
+
+  # Terminates all workers, but does not exit master process
+  def stop(graceful = true)
+    self.listeners = []
+    limit = Time.now + timeout
+    until WORKERS.empty? || Time.now > limit
+      kill_each_worker(graceful ? :QUIT : :TERM)
+      sleep(0.1)
+      reap_all_workers
+    end
+    kill_each_worker(:KILL)
+  end
+
+  def rewindable_input
+    Unicorn::HttpRequest.input_class.method_defined?(:rewind)
+  end
+
+  def rewindable_input=(bool)
+    Unicorn::HttpRequest.input_class = bool ?
+                                Unicorn::TeeInput : Unicorn::StreamInput
+  end
+
+  def client_body_buffer_size
+    Unicorn::TeeInput.client_body_buffer_size
+  end
+
+  def client_body_buffer_size=(bytes)
+    Unicorn::TeeInput.client_body_buffer_size = bytes
+  end
+
+  def trust_x_forwarded
+    Unicorn::HttpParser.trust_x_forwarded?
+  end
+
+  def trust_x_forwarded=(bool)
+    Unicorn::HttpParser.trust_x_forwarded = bool
+  end
+
+  private
+
+  # wait for a signal hander to wake us up and then consume the pipe
+  def master_sleep(sec)
+    IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
+    SELF_PIPE[0].kgio_tryread(11)
+  end
+
+  def awaken_master
+    SELF_PIPE[1].kgio_trywrite('.') # wakeup master process from select
+  end
+
+  # reaps all unreaped workers
+  def reap_all_workers
+    begin
+      wpid, status = Process.waitpid2(-1, Process::WNOHANG)
+      wpid or return
+      if reexec_pid == wpid
+        logger.error "reaped #{status.inspect} exec()-ed"
+        self.reexec_pid = 0
+        self.pid = pid.chomp('.oldbin') if pid
+        proc_name 'master'
+      else
+        worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
+        m = "reaped #{status.inspect} worker=#{worker.nr rescue 'unknown'}"
+        status.success? ? logger.info(m) : logger.error(m)
+      end
+    rescue Errno::ECHILD
+      break
+    end while true
+  end
+
+  # reexecutes the START_CTX with a new binary
+  def reexec
+    if reexec_pid > 0
+      begin
+        Process.kill(0, reexec_pid)
+        logger.error "reexec-ed child already running PID:#{reexec_pid}"
+        return
+      rescue Errno::ESRCH
+        self.reexec_pid = 0
+      end
+    end
+
+    if pid
+      old_pid = "#{pid}.oldbin"
+      begin
+        self.pid = old_pid  # clear the path for a new pid file
+      rescue ArgumentError
+        logger.error "old PID:#{valid_pid?(old_pid)} running with " \
+                     "existing pid=#{old_pid}, refusing rexec"
+        return
+      rescue => e
+        logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}"
+        return
+      end
+    end
+
+    self.reexec_pid = fork do
+      listener_fds = LISTENERS.map { |sock| sock.fileno }
+      ENV['UNICORN_FD'] = listener_fds.join(',')
+      Dir.chdir(START_CTX[:cwd])
+      cmd = [ START_CTX[0] ].concat(START_CTX[:argv])
+
+      # avoid leaking FDs we don't know about, but let before_exec
+      # unset FD_CLOEXEC, if anything else in the app eventually
+      # relies on FD inheritence.
+      (3..1024).each do |io|
+        next if listener_fds.include?(io)
+        io = IO.for_fd(io) rescue next
+        IO_PURGATORY << io
+        io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
+      end
+      logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
+      before_exec.call(self)
+      exec(*cmd)
+    end
+    proc_name 'master (old)'
+  end
+
+  # forcibly terminate all workers that haven't checked in in timeout
+  # seconds.  The timeout is implemented using an unlinked File
+  # shared between the parent process and each worker.  The worker
+  # runs File#chmod to modify the ctime of the File.  If the ctime
+  # is stale for >timeout seconds, then we'll kill the corresponding
+  # worker.
+  def murder_lazy_workers
+    t = @timeout
+    next_sleep = 1
+    WORKERS.dup.each_pair do |wpid, worker|
+      stat = worker.tmp.stat
+      # skip workers that disable fchmod or have never fchmod-ed
+      stat.mode == 0100600 and next
+      diff = Time.now - stat.ctime
+      if diff <= t
+        tmp = t - diff
+        next_sleep < tmp and next_sleep = tmp
+        next
+      end
+      logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
+                   "(#{diff}s > #{t}s), killing"
+      kill_worker(:KILL, wpid) # take no prisoners for timeout violations
+    end
+    next_sleep
+  end
+
+  def after_fork_internal
+    @ready_pipe.close if @ready_pipe
+    self.ready_pipe = nil # XXX Rainbows! compat, change for Unicorn 4.x
+    srand # http://redmine.ruby-lang.org/issues/4338
+
+    # The OpenSSL PRNG is seeded with only the pid, and apps with frequently
+    # dying workers can recycle pids
+    OpenSSL::Random.seed(rand.to_s) if defined?(OpenSSL::Random)
+  end
+
+  def spawn_missing_workers
+    (0...worker_processes).each do |worker_nr|
+      WORKERS.values.include?(worker_nr) and next
+      worker = Worker.new(worker_nr, Unicorn::TmpIO.new)
+      before_fork.call(self, worker)
+      WORKERS[fork {
+        after_fork_internal
+        worker_loop(worker)
+      }] = worker
+    end
+  end
+
+  def maintain_worker_count
+    (off = WORKERS.size - worker_processes) == 0 and return
+    off < 0 and return spawn_missing_workers
+    WORKERS.dup.each_pair { |wpid,w|
+      w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil
+    }
+  end
+
+  # if we get any error, try to write something back to the client
+  # assuming we haven't closed the socket, but don't get hung up
+  # if the socket is already closed or broken.  We'll always ensure
+  # the socket is closed at the end of this function
+  def handle_error(client, e)
+    msg = case e
+    when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
+      Unicorn::Const::ERROR_500_RESPONSE
+    when Unicorn::HttpParserError # try to tell the client they're bad
+      Unicorn::Const::ERROR_400_RESPONSE
+    else
+      logger.error "Read error: #{e.inspect}"
+      logger.error e.backtrace.join("\n")
+      Unicorn::Const::ERROR_500_RESPONSE
+    end
+    client.kgio_trywrite(msg)
+    client.close
+    rescue
+  end
+
+  # once a client is accepted, it is processed in its entirety here
+  # in 3 easy steps: read request, call app, write app response
+  def process_client(client)
+    status, headers, body = @app.call(env = @request.read(client))
+
+    if 100 == status.to_i
+      client.write(Unicorn::Const::EXPECT_100_RESPONSE)
+      env.delete(Unicorn::Const::HTTP_EXPECT)
+      status, headers, body = @app.call(env)
+    end
+    @request.headers? or headers = nil
+    http_response_write(client, status, headers, body)
+    client.close # flush and uncork socket immediately, no keepalive
+  rescue => e
+    handle_error(client, e)
+  end
+
+  # gets rid of stuff the worker has no business keeping track of
+  # to free some resources and drops all sig handlers.
+  # traps for USR1, USR2, and HUP may be set in the after_fork Proc
+  # by the user.
+  def init_worker_process(worker)
+    QUEUE_SIGS.each { |sig| trap(sig, nil) }
+    trap(:CHLD, 'DEFAULT')
+    SIG_QUEUE.clear
+    proc_name "worker[#{worker.nr}]"
+    START_CTX.clear
+    init_self_pipe!
+    WORKERS.values.each { |other| other.tmp.close rescue nil }
+    WORKERS.clear
+    LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
+    worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
+    after_fork.call(self, worker) # can drop perms
+    worker.user(*user) if user.kind_of?(Array) && ! worker.switched
+    self.timeout /= 2.0 # halve it for select()
+    build_app! unless preload_app
+  end
+
+  def reopen_worker_logs(worker_nr)
+    logger.info "worker=#{worker_nr} reopening logs..."
+    Unicorn::Util.reopen_logs
+    logger.info "worker=#{worker_nr} done reopening logs"
+    init_self_pipe!
+    rescue => e
+      logger.error(e) rescue nil
+      exit!(77) # EX_NOPERM in sysexits.h
+  end
+
+  # runs inside each forked worker, this sits around and waits
+  # for connections and doesn't die until the parent dies (or is
+  # given a INT, QUIT, or TERM signal)
+  def worker_loop(worker)
+    ppid = master_pid
+    init_worker_process(worker)
+    nr = 0 # this becomes negative if we need to reopen logs
+    alive = worker.tmp # tmp is our lifeline to the master process
+    ready = LISTENERS
+
+    # closing anything we IO.select on will raise EBADF
+    trap(:USR1) { nr = -65536; SELF_PIPE[0].close rescue nil }
+    trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil }.clear }
+    [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
+    logger.info "worker=#{worker.nr} ready"
+    m = 0
+
+    begin
+      nr < 0 and reopen_worker_logs(worker.nr)
+      nr = 0
+
+      # we're a goner in timeout seconds anyways if alive.chmod
+      # breaks, so don't trap the exception.  Using fchmod() since
+      # futimes() is not available in base Ruby and I very strongly
+      # prefer temporary files to be unlinked for security,
+      # performance and reliability reasons, so utime is out.  No-op
+      # changes with chmod doesn't update ctime on all filesystems; so
+      # we change our counter each and every time (after process_client
+      # and before IO.select).
+      alive.chmod(m = 0 == m ? 1 : 0)
+
+      ready.each do |sock|
+        if client = sock.kgio_tryaccept
+          process_client(client)
+          nr += 1
+          alive.chmod(m = 0 == m ? 1 : 0)
+        end
+        break if nr < 0
+      end
+
+      # make the following bet: if we accepted clients this round,
+      # we're probably reasonably busy, so avoid calling select()
+      # and do a speculative non-blocking accept() on ready listeners
+      # before we sleep again in select().
+      redo unless nr == 0 # (nr < 0) => reopen logs
+
+      ppid == Process.ppid or return
+      alive.chmod(m = 0 == m ? 1 : 0)
+
+      # timeout used so we can detect parent death:
+      ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) and ready = ret[0]
+    rescue Errno::EINTR
+      ready = LISTENERS
+    rescue Errno::EBADF
+      nr < 0 or return
+    rescue => e
+      if alive
+        logger.error "Unhandled listen loop exception #{e.inspect}."
+        logger.error e.backtrace.join("\n")
+      end
+    end while alive
+  end
+
+  # delivers a signal to a worker and fails gracefully if the worker
+  # is no longer running.
+  def kill_worker(signal, wpid)
+    Process.kill(signal, wpid)
+    rescue Errno::ESRCH
+      worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
+  end
+
+  # delivers a signal to each worker
+  def kill_each_worker(signal)
+    WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
+  end
+
+  # unlinks a PID file at given +path+ if it contains the current PID
+  # still potentially racy without locking the directory (which is
+  # non-portable and may interact badly with other programs), but the
+  # window for hitting the race condition is small
+  def unlink_pid_safe(path)
+    (File.read(path).to_i == $$ and File.unlink(path)) rescue nil
+  end
+
+  # returns a PID if a given path contains a non-stale PID file,
+  # nil otherwise.
+  def valid_pid?(path)
+    wpid = File.read(path).to_i
+    wpid <= 0 and return
+    Process.kill(0, wpid)
+    wpid
+    rescue Errno::ESRCH, Errno::ENOENT
+      # don't unlink stale pid files, racy without non-portable locking...
+  end
+
+  def load_config!
+    loaded_app = app
+    logger.info "reloading config_file=#{config.config_file}"
+    config[:listeners].replace(init_listeners)
+    config.reload
+    config.commit!(self)
+    kill_each_worker(:QUIT)
+    Unicorn::Util.reopen_logs
+    self.app = orig_app
+    build_app! if preload_app
+    logger.info "done reloading config_file=#{config.config_file}"
+  rescue StandardError, LoadError, SyntaxError => e
+    logger.error "error reloading config_file=#{config.config_file}: " \
+                 "#{e.class} #{e.message} #{e.backtrace}"
+    self.app = loaded_app
+  end
+
+  # returns an array of string names for the given listener array
+  def listener_names(listeners = LISTENERS)
+    listeners.map { |io| sock_name(io) }
+  end
+
+  def build_app!
+    if app.respond_to?(:arity) && app.arity == 0
+      if defined?(Gem) && Gem.respond_to?(:refresh)
+        logger.info "Refreshing Gem list"
+        Gem.refresh
+      end
+      self.app = app.call
+    end
+  end
+
+  def proc_name(tag)
+    $0 = ([ File.basename(START_CTX[0]), tag
+          ]).concat(START_CTX[:argv]).join(' ')
+  end
+
+  def redirect_io(io, path)
+    File.open(path, 'ab') { |fp| io.reopen(fp) } if path
+    io.sync = true
+  end
+
+  def init_self_pipe!
+    SELF_PIPE.each { |io| io.close rescue nil }
+    SELF_PIPE.replace(Kgio::Pipe.new)
+    SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
+  end
+end
+
diff --git a/lib/unicorn/launcher.rb b/lib/unicorn/launcher.rb
index 0d415dd..5eafe5b 100644
--- a/lib/unicorn/launcher.rb
+++ b/lib/unicorn/launcher.rb
@@ -1,5 +1,6 @@
 # -*- encoding: binary -*-
 
+# :enddoc:
 $stdout.sync = $stderr.sync = true
 $stdin.binmode
 $stdout.binmode
@@ -20,6 +21,7 @@ module Unicorn::Launcher
   #     to pickup code changes if the original deployment directory
   #     is a symlink or otherwise got replaced.
   def self.daemonize!(options)
+    cfg = Unicorn::Configurator
     $stdin.reopen("/dev/null")
 
     # We only start a new process group if we're not being reexecuted
@@ -52,9 +54,9 @@ module Unicorn::Launcher
       end
     end
     # $stderr/$stderr can/will be redirected separately in the Unicorn config
-    Unicorn::Configurator::DEFAULTS[:stderr_path] ||= "/dev/null"
-    Unicorn::Configurator::DEFAULTS[:stdout_path] ||= "/dev/null"
-    Unicorn::Configurator::RACKUP[:daemonized] = true
+    cfg::DEFAULTS[:stderr_path] ||= "/dev/null"
+    cfg::DEFAULTS[:stdout_path] ||= "/dev/null"
+    cfg::RACKUP[:daemonized] = true
   end
 
 end
diff --git a/lib/unicorn/oob_gc.rb b/lib/unicorn/oob_gc.rb
index a0e8f1d..312b44c 100644
--- a/lib/unicorn/oob_gc.rb
+++ b/lib/unicorn/oob_gc.rb
@@ -47,9 +47,9 @@ module Unicorn::OobGC
     @@nr = interval
     self.const_set :OOBGC_PATH, path
     self.const_set :OOBGC_INTERVAL, interval
-    self.const_set :OOBGC_ENV, Unicorn::HttpRequest::REQ
     ObjectSpace.each_object(Unicorn::HttpServer) do |s|
       s.extend(self)
+      self.const_set :OOBGC_ENV, s.instance_variable_get(:@request).env
     end
     app # pretend to be Rack middleware since it was in the past
   end
diff --git a/lib/unicorn/preread_input.rb b/lib/unicorn/preread_input.rb
new file mode 100644
index 0000000..12eb3e8
--- /dev/null
+++ b/lib/unicorn/preread_input.rb
@@ -0,0 +1,33 @@
+# -*- encoding: binary -*-
+
+module Unicorn
+# This middleware is used to ensure input is buffered to memory
+# or disk (depending on size) before the application is dispatched
+# by entirely consuming it (from TeeInput) beforehand.
+#
+# Usage (in config.ru):
+#
+#     require 'unicorn/preread_input'
+#     if defined?(Unicorn)
+#       use Unicorn::PrereadInput
+#     end
+#     run YourApp.new
+class PrereadInput
+
+  # :stopdoc:
+  def initialize(app)
+    @app = app
+  end
+
+  def call(env)
+    buf = ""
+    input = env["rack.input"]
+    if input.respond_to?(:rewind)
+      true while input.read(16384, buf)
+      input.rewind
+    end
+    @app.call(env)
+  end
+  # :startdoc:
+end
+end
diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb
index 1d03eab..9f2d55c 100644
--- a/lib/unicorn/socket_helper.rb
+++ b/lib/unicorn/socket_helper.rb
@@ -17,9 +17,16 @@ module Unicorn
       # denial-of-service attacks
       :tcp_defer_accept => 1,
 
-      # FreeBSD, we need to override this to 'dataready' when we
+      # FreeBSD, we need to override this to 'dataready' if we
       # eventually get HTTPS support
       :accept_filter => 'httpready',
+
+      # same default value as Mongrel
+      :backlog => 1024,
+
+      # since we don't do keepalive, we'll always flush-on-close and
+      # this saves packets for everyone.
+      :tcp_nopush => true,
     }
     #:startdoc:
 
@@ -41,19 +48,20 @@ module Unicorn
     end
 
     def set_tcp_sockopt(sock, opt)
-
       # highly portable, but off by default because we don't do keepalive
-      if defined?(TCP_NODELAY) && ! (val = opt[:tcp_nodelay]).nil?
+      if defined?(TCP_NODELAY)
+        val = opt[:tcp_nodelay]
+        val = DEFAULTS[:tcp_nodelay] if nil == val
         sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, val ? 1 : 0)
       end
 
-      unless (val = opt[:tcp_nopush]).nil?
-        val = val ? 1 : 0
-        if defined?(TCP_CORK) # Linux
-          sock.setsockopt(IPPROTO_TCP, TCP_CORK, val)
-        elsif defined?(TCP_NOPUSH) # TCP_NOPUSH is untested (FreeBSD)
-          sock.setsockopt(IPPROTO_TCP, TCP_NOPUSH, val)
-        end
+      val = opt[:tcp_nopush]
+      val = DEFAULTS[:tcp_nopush] if nil == val
+      val = val ? 1 : 0
+      if defined?(TCP_CORK) # Linux
+        sock.setsockopt(IPPROTO_TCP, TCP_CORK, val)
+      elsif defined?(TCP_NOPUSH) # TCP_NOPUSH is untested (FreeBSD)
+        sock.setsockopt(IPPROTO_TCP, TCP_NOPUSH, val)
       end
 
       # No good reason to ever have deferred accepts off
@@ -61,26 +69,24 @@ module Unicorn
       if defined?(TCP_DEFER_ACCEPT)
         # this differs from nginx, since nginx doesn't allow us to
         # configure the the timeout...
-        tmp = DEFAULTS.merge(opt)
-        seconds = tmp[:tcp_defer_accept]
-        seconds = DEFAULTS[:tcp_defer_accept] if seconds == true
+        seconds = opt[:tcp_defer_accept]
+        seconds = DEFAULTS[:tcp_defer_accept] if [true,nil].include?(seconds)
         seconds = 0 unless seconds # nil/false means disable this
         sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds)
       elsif respond_to?(:accf_arg)
-        tmp = DEFAULTS.merge(opt)
-        if name = tmp[:accept_filter]
-          begin
-            sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name))
-          rescue => e
-            logger.error("#{sock_name(sock)} " \
-                         "failed to set accept_filter=#{name} (#{e.inspect})")
-          end
+        name = opt[:accept_filter]
+        name = DEFAULTS[:accept_filter] if nil == name
+        begin
+          sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name))
+        rescue => e
+          logger.error("#{sock_name(sock)} " \
+                       "failed to set accept_filter=#{name} (#{e.inspect})")
         end
       end
     end
 
     def set_server_sockopt(sock, opt)
-      opt ||= {}
+      opt = DEFAULTS.merge(opt || {})
 
       TCPSocket === sock and set_tcp_sockopt(sock, opt)
 
@@ -90,7 +96,7 @@ module Unicorn
         sock.setsockopt(SOL_SOCKET, SO_SNDBUF, opt[:sndbuf]) if opt[:sndbuf]
         log_buffer_sizes(sock, " after: ")
       end
-      sock.listen(opt[:backlog] || 1024)
+      sock.listen(opt[:backlog])
       rescue => e
         logger.error "error setting socket options: #{e.inspect}"
         logger.error e.backtrace.join("\n")
@@ -126,12 +132,13 @@ module Unicorn
         end
         old_umask = File.umask(opt[:umask] || 0)
         begin
-          UNIXServer.new(address)
+          Kgio::UNIXServer.new(address)
         ensure
           File.umask(old_umask)
         end
-      elsif address =~ /^(\d+\.\d+\.\d+\.\d+):(\d+)$/
-        TCPServer.new($1, $2.to_i)
+      elsif /\A(\d+\.\d+\.\d+\.\d+):(\d+)\z/ =~ address ||
+            /\A\[([a-fA-F0-9:]+)\]:(\d+)\z/ =~ address
+        Kgio::TCPServer.new($1, $2.to_i)
       else
         raise ArgumentError, "Don't know how to bind: #{address}"
       end
@@ -139,6 +146,13 @@ module Unicorn
       sock
     end
 
+    # returns rfc2732-style (e.g. "[::1]:666") addresses for IPv6
+    def tcp_name(sock)
+      port, addr = Socket.unpack_sockaddr_in(sock.getsockname)
+      /:/ =~ addr ? "[#{addr}]:#{port}" : "#{addr}:#{port}"
+    end
+    module_function :tcp_name
+
     # Returns the configuration name of a socket as a string.  sock may
     # be a string value, in which case it is returned as-is
     # Warning: TCP sockets may not always return the name given to it.
@@ -148,10 +162,10 @@ module Unicorn
       when UNIXServer
         Socket.unpack_sockaddr_un(sock.getsockname)
       when TCPServer
-        Socket.unpack_sockaddr_in(sock.getsockname).reverse!.join(':')
+        tcp_name(sock)
       when Socket
         begin
-          Socket.unpack_sockaddr_in(sock.getsockname).reverse!.join(':')
+          tcp_name(sock)
         rescue ArgumentError
           Socket.unpack_sockaddr_un(sock.getsockname)
         end
@@ -166,9 +180,9 @@ module Unicorn
     def server_cast(sock)
       begin
         Socket.unpack_sockaddr_in(sock.getsockname)
-        TCPServer.for_fd(sock.fileno)
+        Kgio::TCPServer.for_fd(sock.fileno)
       rescue ArgumentError
-        UNIXServer.for_fd(sock.fileno)
+        Kgio::UNIXServer.for_fd(sock.fileno)
       end
     end
 
diff --git a/lib/unicorn/stream_input.rb b/lib/unicorn/stream_input.rb
new file mode 100644
index 0000000..4ca5a04
--- /dev/null
+++ b/lib/unicorn/stream_input.rb
@@ -0,0 +1,145 @@
+# -*- encoding: binary -*-
+
+# When processing uploads, Unicorn may expose a StreamInput object under
+# "rack.input" of the (future) Rack (2.x) environment.
+class Unicorn::StreamInput
+  # The I/O chunk size (in +bytes+) for I/O operations where
+  # the size cannot be user-specified when a method is called.
+  # The default is 16 kilobytes.
+  @@io_chunk_size = Unicorn::Const::CHUNK_SIZE
+
+  # Initializes a new StreamInput object.  You normally do not have to call
+  # this unless you are writing an HTTP server.
+  def initialize(socket, request)
+    @chunked = request.content_length.nil?
+    @socket = socket
+    @parser = request
+    @buf = request.buf
+    @rbuf = ''
+    @bytes_read = 0
+    filter_body(@rbuf, @buf) unless @buf.empty?
+  end
+
+  # :call-seq:
+  #   ios.read([length [, buffer ]]) => string, buffer, or nil
+  #
+  # Reads at most length bytes from the I/O stream, or to the end of
+  # file if length is omitted or is nil. length must be a non-negative
+  # integer or nil. If the optional buffer argument is present, it
+  # must reference a String, which will receive the data.
+  #
+  # At end of file, it returns nil or '' depend on length.
+  # ios.read() and ios.read(nil) returns ''.
+  # ios.read(length [, buffer]) returns nil.
+  #
+  # If the Content-Length of the HTTP request is known (as is the common
+  # case for POST requests), then ios.read(length [, buffer]) will block
+  # until the specified length is read (or it is the last chunk).
+  # Otherwise, for uncommon "Transfer-Encoding: chunked" requests,
+  # ios.read(length [, buffer]) will return immediately if there is
+  # any data and only block when nothing is available (providing
+  # IO#readpartial semantics).
+  def read(length = nil, rv = '')
+    if length
+      if length <= @rbuf.size
+        length < 0 and raise ArgumentError, "negative length #{length} given"
+        rv.replace(@rbuf.slice!(0, length))
+      else
+        to_read = length - @rbuf.size
+        rv.replace(@rbuf.slice!(0, @rbuf.size))
+        until to_read == 0 || eof? || (rv.size > 0 && @chunked)
+          @socket.kgio_read(to_read, @buf) or eof!
+          filter_body(@rbuf, @buf)
+          rv << @rbuf
+          to_read -= @rbuf.size
+        end
+        @rbuf.replace('')
+      end
+      rv = nil if rv.empty? && length != 0
+    else
+      read_all(rv)
+    end
+    rv
+  end
+
+  # :call-seq:
+  #   ios.gets   => string or nil
+  #
+  # Reads the next ``line'' from the I/O stream; lines are separated
+  # by the global record separator ($/, typically "\n"). A global
+  # record separator of nil reads the entire unread contents of ios.
+  # Returns nil if called at the end of file.
+  # This takes zero arguments for strict Rack::Lint compatibility,
+  # unlike IO#gets.
+  def gets
+    sep = $/
+    if sep.nil?
+      read_all(rv = '')
+      return rv.empty? ? nil : rv
+    end
+    re = /\A(.*?#{Regexp.escape(sep)})/
+
+    begin
+      @rbuf.sub!(re, '') and return $1
+      return @rbuf.empty? ? nil : @rbuf.slice!(0, @rbuf.size) if eof?
+      @socket.kgio_read(@@io_chunk_size, @buf) or eof!
+      filter_body(once = '', @buf)
+      @rbuf << once
+    end while true
+  end
+
+  # :call-seq:
+  #   ios.each { |line| block }  => ios
+  #
+  # Executes the block for every ``line'' in *ios*, where lines are
+  # separated by the global record separator ($/, typically "\n").
+  def each
+    while line = gets
+      yield line
+    end
+
+    self # Rack does not specify what the return value is here
+  end
+
+private
+
+  def eof?
+    if @parser.body_eof?
+      while @chunked && ! @parser.parse
+        once = @socket.kgio_read(@@io_chunk_size) or eof!
+        @buf << once
+      end
+      @socket = nil
+      true
+    else
+      false
+    end
+  end
+
+  def filter_body(dst, src)
+    rv = @parser.filter_body(dst, src)
+    @bytes_read += dst.size
+    rv
+  end
+
+  def read_all(dst)
+    dst.replace(@rbuf)
+    @socket or return
+    until eof?
+      @socket.kgio_read(@@io_chunk_size, @buf) or eof!
+      filter_body(@rbuf, @buf)
+      dst << @rbuf
+    end
+    ensure
+      @rbuf.replace('')
+  end
+
+  def eof!
+    # in case client only did a premature shutdown(SHUT_WR)
+    # we do support clients that shutdown(SHUT_WR) after the
+    # _entire_ request has been sent, and those will not have
+    # raised EOFError on us.
+    @socket.close if @socket
+    raise Unicorn::ClientShutdown, "bytes_read=#{@bytes_read}", []
+  end
+end
diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb
index 540cfe0..637c583 100644
--- a/lib/unicorn/tee_input.rb
+++ b/lib/unicorn/tee_input.rb
@@ -11,31 +11,30 @@
 #
 # When processing uploads, Unicorn exposes a TeeInput object under
 # "rack.input" of the Rack environment.
-class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
-                                     :buf, :len, :tmp, :buf2)
-
+class Unicorn::TeeInput < Unicorn::StreamInput
   # The maximum size (in +bytes+) to buffer in memory before
   # resorting to a temporary file.  Default is 112 kilobytes.
   @@client_body_buffer_size = Unicorn::Const::MAX_BODY
 
-  # The I/O chunk size (in +bytes+) for I/O operations where
-  # the size cannot be user-specified when a method is called.
-  # The default is 16 kilobytes.
-  @@io_chunk_size = Unicorn::Const::CHUNK_SIZE
+  # sets the maximum size of request bodies to buffer in memory,
+  # amounts larger than this are buffered to the filesystem
+  def self.client_body_buffer_size=(bytes)
+    @@client_body_buffer_size = bytes
+  end
+
+  # returns the maximum size of request bodies to buffer in memory,
+  # amounts larger than this are buffered to the filesystem
+  def self.client_body_buffer_size
+    @@client_body_buffer_size
+  end
 
   # Initializes a new TeeInput object.  You normally do not have to call
   # this unless you are writing an HTTP server.
-  def initialize(*args)
-    super(*args)
-    self.len = parser.content_length
-    self.tmp = len && len < @@client_body_buffer_size ?
-               StringIO.new("") : Unicorn::Util.tmpio
-    self.buf2 = ""
-    if buf.size > 0
-      parser.filter_body(buf2, buf) and finalize_input
-      tmp.write(buf2)
-      tmp.rewind
-    end
+  def initialize(socket, request)
+    @len = request.content_length
+    super
+    @tmp = @len && @len <= @@client_body_buffer_size ?
+           StringIO.new("") : Unicorn::TmpIO.new
   end
 
   # :call-seq:
@@ -55,16 +54,11 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
   # earlier.  Most applications should only need to call +read+ with a
   # specified +length+ in a loop until it returns +nil+.
   def size
-    len and return len
-
-    if socket
-      pos = tmp.pos
-      while tee(@@io_chunk_size, buf2)
-      end
-      tmp.seek(pos)
-    end
-
-    self.len = tmp.size
+    @len and return @len
+    pos = @tmp.pos
+    consume!
+    @tmp.pos = pos
+    @len = @tmp.size
   end
 
   # :call-seq:
@@ -87,24 +81,7 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
   # any data and only block when nothing is available (providing
   # IO#readpartial semantics).
   def read(*args)
-    socket or return tmp.read(*args)
-
-    length = args.shift
-    if nil == length
-      rv = tmp.read || ""
-      while tee(@@io_chunk_size, buf2)
-        rv << buf2
-      end
-      rv
-    else
-      rv = args.shift || ""
-      diff = tmp.size - tmp.pos
-      if 0 == diff
-        ensure_length(tee(length, rv), length)
-      else
-        ensure_length(tmp.read(diff > length ? length : diff, rv), length)
-      end
-    end
+    @socket ? tee(super) : @tmp.read(*args)
   end
 
   # :call-seq:
@@ -117,43 +94,7 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
   # This takes zero arguments for strict Rack::Lint compatibility,
   # unlike IO#gets.
   def gets
-    socket or return tmp.gets
-    sep = $/ or return read
-
-    orig_size = tmp.size
-    if tmp.pos == orig_size
-      tee(@@io_chunk_size, buf2) or return nil
-      tmp.seek(orig_size)
-    end
-
-    sep_size = Rack::Utils.bytesize(sep)
-    line = tmp.gets # cannot be nil here since size > pos
-    sep == line[-sep_size, sep_size] and return line
-
-    # unlikely, if we got here, then tmp is at EOF
-    begin
-      orig_size = tmp.pos
-      tee(@@io_chunk_size, buf2) or break
-      tmp.seek(orig_size)
-      line << tmp.gets
-      sep == line[-sep_size, sep_size] and return line
-      # tmp is at EOF again here, retry the loop
-    end while true
-
-    line
-  end
-
-  # :call-seq:
-  #   ios.each { |line| block }  => ios
-  #
-  # Executes the block for every ``line'' in *ios*, where lines are
-  # separated by the global record separator ($/, typically "\n").
-  def each(&block)
-    while line = gets
-      yield line
-    end
-
-    self # Rack does not specify what the return value is here
+    @socket ? tee(super) : @tmp.gets
   end
 
   # :call-seq:
@@ -163,70 +104,23 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
   # the offset (zero) of the +ios+ pointer.  Subsequent reads will
   # start from the beginning of the previously-buffered input.
   def rewind
-    tmp.rewind # Rack does not specify what the return value is here
+    return 0 if 0 == @tmp.size
+    consume! if @socket
+    @tmp.rewind # Rack does not specify what the return value is here
   end
 
 private
 
-  def client_error(e)
-    case e
-    when EOFError
-      # in case client only did a premature shutdown(SHUT_WR)
-      # we do support clients that shutdown(SHUT_WR) after the
-      # _entire_ request has been sent, and those will not have
-      # raised EOFError on us.
-      socket.close if socket
-      raise Unicorn::ClientShutdown, "bytes_read=#{tmp.size}", []
-    when Unicorn::HttpParserError
-      e.set_backtrace([])
-    end
-    raise e
+  # consumes the stream of the socket
+  def consume!
+    junk = ""
+    nil while read(@@io_chunk_size, junk)
   end
 
-  # tees off a +length+ chunk of data from the input into the IO
-  # backing store as well as returning it.  +dst+ must be specified.
-  # returns nil if reading from the input returns nil
-  def tee(length, dst)
-    unless parser.body_eof?
-      if parser.filter_body(dst, socket.readpartial(length, buf)).nil?
-        tmp.write(dst)
-        tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug
-        return dst
-      end
+  def tee(buffer)
+    if buffer && buffer.size > 0
+      @tmp.write(buffer)
     end
-    finalize_input
-    rescue => e
-      client_error(e)
+    buffer
   end
-
-  def finalize_input
-    while parser.trailers(req, buf).nil?
-      # Don't worry about raising ClientShutdown here on EOFError, tee()
-      # will catch EOFError when app is processing it, otherwise in
-      # initialize we never get any chance to enter the app so the
-      # EOFError will just get trapped by Unicorn and not the Rack app
-      buf << socket.readpartial(@@io_chunk_size)
-    end
-    self.socket = nil
-  end
-
-  # tee()s into +dst+ until it is of +length+ bytes (or until
-  # we've reached the Content-Length of the request body).
-  # Returns +dst+ (the exact object, not a duplicate)
-  # To continue supporting applications that need near-real-time
-  # streaming input bodies, this is a no-op for
-  # "Transfer-Encoding: chunked" requests.
-  def ensure_length(dst, length)
-    # len is nil for chunked bodies, so we can't ensure length for those
-    # since they could be streaming bidirectionally and we don't want to
-    # block the caller in that case.
-    return dst if dst.nil? || len.nil?
-
-    while dst.size < length && tee(length - dst.size, buf2)
-      dst << buf2
-    end
-
-    dst
-  end
-
 end
diff --git a/lib/unicorn/tmpio.rb b/lib/unicorn/tmpio.rb
new file mode 100644
index 0000000..2da05a2
--- /dev/null
+++ b/lib/unicorn/tmpio.rb
@@ -0,0 +1,29 @@
+# -*- encoding: binary -*-
+# :stopdoc:
+require 'tmpdir'
+
+# some versions of Ruby had a broken Tempfile which didn't work
+# well with unlinked files.  This one is much shorter, easier
+# to understand, and slightly faster.
+class Unicorn::TmpIO < File
+
+  # creates and returns a new File object.  The File is unlinked
+  # immediately, switched to binary mode, and userspace output
+  # buffering is disabled
+  def self.new
+    fp = begin
+      super("#{Dir::tmpdir}/#{rand}", RDWR|CREAT|EXCL, 0600)
+    rescue Errno::EEXIST
+      retry
+    end
+    unlink(fp.path)
+    fp.binmode
+    fp.sync = true
+    fp
+  end
+
+  # for easier env["rack.input"] compatibility with Rack <= 1.1
+  def size
+    stat.size
+  end unless File.method_defined?(:size)
+end
diff --git a/lib/unicorn/util.rb b/lib/unicorn/util.rb
index e9dd57f..cde2563 100644
--- a/lib/unicorn/util.rb
+++ b/lib/unicorn/util.rb
@@ -1,101 +1,68 @@
 # -*- encoding: binary -*-
 
-require 'fcntl'
-require 'tmpdir'
+module Unicorn::Util
 
-module Unicorn
+# :stopdoc:
+  def self.is_log?(fp)
+    append_flags = File::WRONLY | File::APPEND
 
-  class TmpIO < ::File
+    ! fp.closed? &&
+      fp.sync &&
+      (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags
+    rescue IOError, Errno::EBADF
+      false
+  end
 
-    # for easier env["rack.input"] compatibility
-    def size
-      # flush if sync
-      stat.size
+  def self.chown_logs(uid, gid)
+    ObjectSpace.each_object(File) do |fp|
+      fp.chown(uid, gid) if is_log?(fp)
     end
   end
-
-  module Util
-    class << self
-
-      def is_log?(fp)
-        append_flags = File::WRONLY | File::APPEND
-
-        ! fp.closed? &&
-          fp.sync &&
-          fp.path[0] == ?/ &&
-          (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags
-        rescue IOError, Errno::EBADF
-          false
+# :startdoc:
+
+  # This reopens ALL logfiles in the process that have been rotated
+  # using logrotate(8) (without copytruncate) or similar tools.
+  # A +File+ object is considered for reopening if it is:
+  #   1) opened with the O_APPEND and O_WRONLY flags
+  #   2) the current open file handle does not match its original open path
+  #   3) unbuffered (as far as userspace buffering goes, not O_SYNC)
+  # Returns the number of files reopened
+  #
+  # In Unicorn 3.5.x and earlier, files must be opened with an absolute
+  # path to be considered a log file.
+  def self.reopen_logs
+    to_reopen = []
+    nr = 0
+    ObjectSpace.each_object(File) { |fp| is_log?(fp) and to_reopen << fp }
+
+    to_reopen.each do |fp|
+      orig_st = begin
+        fp.stat
+      rescue IOError, Errno::EBADF
+        next
       end
 
-      def chown_logs(uid, gid)
-        ObjectSpace.each_object(File) do |fp|
-          fp.chown(uid, gid) if is_log?(fp)
-        end
+      begin
+        b = File.stat(fp.path)
+        next if orig_st.ino == b.ino && orig_st.dev == b.dev
+      rescue Errno::ENOENT
       end
 
-      # This reopens ALL logfiles in the process that have been rotated
-      # using logrotate(8) (without copytruncate) or similar tools.
-      # A +File+ object is considered for reopening if it is:
-      #   1) opened with the O_APPEND and O_WRONLY flags
-      #   2) opened with an absolute path (starts with "/")
-      #   3) the current open file handle does not match its original open path
-      #   4) unbuffered (as far as userspace buffering goes, not O_SYNC)
-      # Returns the number of files reopened
-      def reopen_logs
-        to_reopen = []
-        nr = 0
-        ObjectSpace.each_object(File) { |fp| is_log?(fp) and to_reopen << fp }
-
-        to_reopen.each do |fp|
-          orig_st = begin
-            fp.stat
-          rescue IOError, Errno::EBADF
-            next
-          end
-
-          begin
-            b = File.stat(fp.path)
-            next if orig_st.ino == b.ino && orig_st.dev == b.dev
-          rescue Errno::ENOENT
-          end
-
-          begin
-            File.open(fp.path, 'a') { |tmpfp| fp.reopen(tmpfp) }
-            fp.sync = true
-            new_st = fp.stat
-
-            # this should only happen in the master:
-            if orig_st.uid != new_st.uid || orig_st.gid != new_st.gid
-              fp.chown(orig_st.uid, orig_st.gid)
-            end
+      begin
+        File.open(fp.path, 'a') { |tmpfp| fp.reopen(tmpfp) }
+        fp.sync = true
+        new_st = fp.stat
 
-            nr += 1
-          rescue IOError, Errno::EBADF
-            # not much we can do...
-          end
+        # this should only happen in the master:
+        if orig_st.uid != new_st.uid || orig_st.gid != new_st.gid
+          fp.chown(orig_st.uid, orig_st.gid)
         end
 
-        nr
-      end
-
-      # creates and returns a new File object.  The File is unlinked
-      # immediately, switched to binary mode, and userspace output
-      # buffering is disabled
-      def tmpio
-        fp = begin
-          TmpIO.open("#{Dir::tmpdir}/#{rand}",
-                     File::RDWR|File::CREAT|File::EXCL, 0600)
-        rescue Errno::EEXIST
-          retry
-        end
-        File.unlink(fp.path)
-        fp.binmode
-        fp.sync = true
-        fp
+        nr += 1
+      rescue IOError, Errno::EBADF
+        # not much we can do...
       end
-
     end
-
+    nr
   end
 end
diff --git a/lib/unicorn/worker.rb b/lib/unicorn/worker.rb
new file mode 100644
index 0000000..39e9e32
--- /dev/null
+++ b/lib/unicorn/worker.rb
@@ -0,0 +1,47 @@
+# -*- encoding: binary -*-
+
+# This class and its members can be considered a stable interface
+# and will not change in a backwards-incompatible fashion between
+# releases of \Unicorn.  Knowledge of this class is generally not
+# not needed for most users of \Unicorn.
+#
+# Some users may want to access it in the before_fork/after_fork hooks.
+# See the Unicorn::Configurator RDoc for examples.
+class Unicorn::Worker < Struct.new(:nr, :tmp, :switched)
+
+  # worker objects may be compared to just plain Integers
+  def ==(other_nr) # :nodoc:
+    nr == other_nr
+  end
+
+  # In most cases, you should be using the Unicorn::Configurator#user
+  # directive instead.  This method should only be used if you need
+  # fine-grained control of exactly when you want to change permissions
+  # in your after_fork hooks.
+  #
+  # Changes the worker process to the specified +user+ and +group+
+  # This is only intended to be called from within the worker
+  # process from the +after_fork+ hook.  This should be called in
+  # the +after_fork+ hook after any priviledged functions need to be
+  # run (e.g. to set per-worker CPU affinity, niceness, etc)
+  #
+  # Any and all errors raised within this method will be propagated
+  # directly back to the caller (usually the +after_fork+ hook.
+  # These errors commonly include ArgumentError for specifying an
+  # invalid user/group and Errno::EPERM for insufficient priviledges
+  def user(user, group = nil)
+    # we do not protect the caller, checking Process.euid == 0 is
+    # insufficient because modern systems have fine-grained
+    # capabilities.  Let the caller handle any and all errors.
+    uid = Etc.getpwnam(user).uid
+    gid = Etc.getgrnam(group).gid if group
+    Unicorn::Util.chown_logs(uid, gid)
+    tmp.chown(uid, gid)
+    if gid && Process.egid != gid
+      Process.initgroups(user, gid)
+      Process::GID.change_privilege(gid)
+    end
+    Process.euid != uid and Process::UID.change_privilege(uid)
+    self.switched = true
+  end
+end
diff --git a/local.mk.sample b/local.mk.sample
index c950d87..25bca5d 100644
--- a/local.mk.sample
+++ b/local.mk.sample
@@ -37,15 +37,6 @@ else
   RUBY := $(prefix)/bin/ruby --disable-gems
 endif
 
-# FIXME: use isolate more
-ifndef RUBYLIB
-  gems := rack-1.1.0
-  gem_paths := $(addprefix $(HOME)/lib/ruby/gems/1.8/gems/,$(gems))
-  sp :=
-  sp +=
-  export RUBYLIB := $(subst $(sp),:,$(addsuffix /lib,$(gem_paths)))
-endif
-
 # pipefail is THE reason to use bash (v3+) or never revisions of ksh93
 # SHELL := /bin/bash -e -o pipefail
 SHELL := /bin/ksh93 -e -o pipefail
diff --git a/script/isolate_for_tests b/script/isolate_for_tests
new file mode 100755
index 0000000..b277a1f
--- /dev/null
+++ b/script/isolate_for_tests
@@ -0,0 +1,45 @@
+#!/usr/bin/env ruby
+# scripts/Makefiles can read and eval the output of this script and
+# use it as RUBYLIB
+require 'rubygems'
+require 'isolate'
+fp = File.open(__FILE__, "rb")
+fp.flock(File::LOCK_EX)
+
+ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
+opts = {
+  :system => false,
+  # we want "ruby-1.8.7" and not "ruby-1.8", so disable :multiruby
+  :multiruby => false,
+  :path => "tmp/isolate/#{ruby_engine}-#{RUBY_VERSION}",
+}
+
+pid = fork do
+  Isolate.now!(opts) do
+    gem 'sqlite3-ruby', '1.2.5'
+    gem 'kgio', '2.3.3'
+    gem 'rack', '1.2.2'
+  end
+end
+_, status = Process.waitpid2(pid)
+status.success? or abort status.inspect
+lib_paths = Dir["#{opts[:path]}/gems/*-*/lib"].map { |x| File.expand_path(x) }
+dst = "tmp/isolate/#{ruby_engine}-#{RUBY_VERSION}.mk"
+File.open("#{dst}.#$$", "w") do |fp|
+  fp.puts "ISOLATE_LIBS=#{lib_paths.join(':')}"
+end
+File.rename("#{dst}.#$$", dst)
+
+# pure Ruby gems can be shared across all Rubies
+%w(3.0.0).each do |rails_ver|
+  opts[:path] = "tmp/isolate/rails-#{rails_ver}"
+  pid = fork do
+    Isolate.now!(opts) do
+      gem 'rails', rails_ver
+    end
+  end
+  _, status = Process.waitpid2(pid)
+  status.success? or abort status.inspect
+  more = Dir["#{opts[:path]}/gems/*-*/lib"].map { |x| File.expand_path(x) }
+  lib_paths.concat(more)
+end
diff --git a/t/GNUmakefile b/t/GNUmakefile
index e80c43a..8f2668c 100644
--- a/t/GNUmakefile
+++ b/t/GNUmakefile
@@ -17,6 +17,12 @@ endif
 RUBY_ENGINE := $(shell $(RUBY) -e 'puts((RUBY_ENGINE rescue "ruby"))')
 export RUBY_ENGINE
 
+isolate_libs := ../tmp/isolate/$(RUBY_ENGINE)-$(RUBY_VERSION).mk
+$(isolate_libs): ../script/isolate_for_tests
+        @cd .. && $(RUBY) script/isolate_for_tests
+-include $(isolate_libs)
+MYLIBS := $(RUBYLIB):$(ISOLATE_LIBS)
+
 T = $(wildcard t[0-9][0-9][0-9][0-9]-*.sh)
 
 all:: $(T)
@@ -58,7 +64,7 @@ $(test_prefix)/.stamp:
 $(T): export RUBY := $(RUBY)
 $(T): export RAKE := $(RAKE)
 $(T): export PATH := $(test_prefix)/bin:$(PATH)
-$(T): export RUBYLIB := $(test_prefix)/lib:$(RUBYLIB)
+$(T): export RUBYLIB := $(test_prefix)/lib:$(MYLIBS)
 $(T): dep $(test_prefix)/.stamp trash/.gitignore
         $(TRACER) $(SHELL) $(SH_TEST_OPTS) $@ $(TEST_OPTS)
 
diff --git a/t/preread_input.ru b/t/preread_input.ru
new file mode 100644
index 0000000..79685c4
--- /dev/null
+++ b/t/preread_input.ru
@@ -0,0 +1,17 @@
+#\-E none
+require 'digest/sha1'
+require 'unicorn/preread_input'
+use Rack::ContentLength
+use Rack::ContentType, "text/plain"
+use Unicorn::PrereadInput
+nr = 0
+run lambda { |env|
+  $stderr.write "app dispatch: #{nr += 1}\n"
+  input = env["rack.input"]
+  dig = Digest::SHA1.new
+  while buf = input.read(16384)
+    dig.update(buf)
+  end
+
+  [ 200, {}, [ "#{dig.hexdigest}\n" ] ]
+}
diff --git a/t/rack-input-tests.ru b/t/rack-input-tests.ru
new file mode 100644
index 0000000..8c35630
--- /dev/null
+++ b/t/rack-input-tests.ru
@@ -0,0 +1,21 @@
+# SHA1 checksum generator
+require 'digest/sha1'
+use Rack::ContentLength
+cap = 16384
+app = lambda do |env|
+  /\A100-continue\z/i =~ env['HTTP_EXPECT'] and
+    return [ 100, {}, [] ]
+  digest = Digest::SHA1.new
+  input = env['rack.input']
+  input.size if env["PATH_INFO"] == "/size_first"
+  input.rewind if env["PATH_INFO"] == "/rewind_first"
+  if buf = input.read(rand(cap))
+    begin
+      raise "#{buf.size} > #{cap}" if buf.size > cap
+      digest.update(buf)
+    end while input.read(rand(cap), buf)
+  end
+
+  [ 200, {'Content-Type' => 'text/plain'}, [ digest.hexdigest << "\n" ] ]
+end
+run app
diff --git a/t/t0002-parser-error.sh b/t/t0002-parser-error.sh
new file mode 100755
index 0000000..9a3e7cf
--- /dev/null
+++ b/t/t0002-parser-error.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+. ./test-lib.sh
+t_plan 5 "parser error test"
+
+t_begin "setup and startup" && {
+        unicorn_setup
+        unicorn -D env.ru -c $unicorn_config
+        unicorn_wait_start
+}
+
+t_begin "send a bad request" && {
+        (
+                printf 'GET / HTTP/1/1\r\nHost: example.com\r\n\r\n'
+                cat $fifo > $tmp &
+                wait
+                echo ok > $ok
+        ) | socat - TCP:$listen > $fifo
+        test xok = x$(cat $ok)
+}
+
+dbgcat tmp
+
+t_begin "response should be a 400" && {
+        grep -F 'HTTP/1.1 400 Bad Request' $tmp
+}
+
+t_begin "server stderr should be clean" && check_stderr
+
+t_begin "term signal sent" && kill $unicorn_pid
+
+t_done
diff --git a/t/t0003-working_directory.sh b/t/t0003-working_directory.sh
index 53345ae..79988d8 100755
--- a/t/t0003-working_directory.sh
+++ b/t/t0003-working_directory.sh
@@ -1,9 +1,4 @@
 #!/bin/sh
-if test -n "$RBX_SKIP"
-then
-        echo "$0 is broken under Rubinius for now"
-        exit 0
-fi
 . ./test-lib.sh
 
 t_plan 4 "config.ru inside alt working_directory"
diff --git a/t/t0010-reap-logging.sh b/t/t0010-reap-logging.sh
new file mode 100755
index 0000000..93d8c60
--- /dev/null
+++ b/t/t0010-reap-logging.sh
@@ -0,0 +1,55 @@
+#!/bin/sh
+. ./test-lib.sh
+t_plan 9 "reap worker logging messages"
+
+t_begin "setup and start" && {
+        unicorn_setup
+        cat >> $unicorn_config <<EOF
+after_fork { |s,w| File.open('$fifo','w') { |f| f.write '.' } }
+EOF
+        unicorn -c $unicorn_config pid.ru &
+        test '.' = $(cat $fifo)
+        unicorn_wait_start
+}
+
+t_begin "kill 1st worker=0" && {
+        pid_1=$(curl http://$listen/)
+        kill -9 $pid_1
+}
+
+t_begin "wait for 2nd worker to start" && {
+        test '.' = $(cat $fifo)
+}
+
+t_begin "ensure log of 1st reap is an ERROR" && {
+        dbgcat r_err
+        grep 'ERROR.*reaped.*worker=0' $r_err | grep $pid_1
+        dbgcat r_err
+        > $r_err
+}
+
+t_begin "kill 2nd worker gracefully" && {
+        pid_2=$(curl http://$listen/)
+        kill -QUIT $pid_2
+}
+
+t_begin "wait for 3rd worker=0 to start " && {
+        test '.' = $(cat $fifo)
+}
+
+t_begin "ensure log of 2nd reap is a INFO" && {
+        grep 'INFO.*reaped.*worker=0' $r_err | grep $pid_2
+        > $r_err
+}
+
+t_begin "killing succeeds" && {
+        kill $unicorn_pid
+        wait
+        kill -0 $unicorn_pid && false
+}
+
+t_begin "check stderr" && {
+        check_stderr
+}
+
+t_done
diff --git a/t/t0012-reload-empty-config.sh b/t/t0012-reload-empty-config.sh
index c18c030..81e1fb3 100755
--- a/t/t0012-reload-empty-config.sh
+++ b/t/t0012-reload-empty-config.sh
@@ -50,7 +50,10 @@ t_begin "reload signal succeeds" && {
         do
                 sleep 1
         done
-
+        while ! grep reaped < $r_err >/dev/null
+        do
+                sleep 1
+        done
         grep 'done reloading' $r_err >/dev/null
 }
 
diff --git a/t/t0013-rewindable-input-false.sh b/t/t0013-rewindable-input-false.sh
new file mode 100755
index 0000000..0e89631
--- /dev/null
+++ b/t/t0013-rewindable-input-false.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+. ./test-lib.sh
+t_plan 4 "rewindable_input toggled to false"
+
+t_begin "setup and start" && {
+        unicorn_setup
+        echo rewindable_input false >> $unicorn_config
+        unicorn -D -c $unicorn_config t0013.ru
+        unicorn_wait_start
+}
+
+t_begin "ensure worker is started" && {
+        test xOK = x$(curl -T t0013.ru -H Expect: -vsSf http://$listen/)
+}
+
+t_begin "killing succeeds" && {
+        kill $unicorn_pid
+}
+
+t_begin "check stderr" && {
+        check_stderr
+}
+
+t_done
diff --git a/t/t0013.ru b/t/t0013.ru
new file mode 100644
index 0000000..48a3a34
--- /dev/null
+++ b/t/t0013.ru
@@ -0,0 +1,12 @@
+#\ -E none
+use Rack::ContentLength
+use Rack::ContentType, 'text/plain'
+app = lambda do |env|
+  case env['rack.input']
+  when Unicorn::StreamInput
+    [ 200, {}, %w(OK) ]
+  else
+    [ 500, {}, %w(NO) ]
+  end
+end
+run app
diff --git a/t/t0014-rewindable-input-true.sh b/t/t0014-rewindable-input-true.sh
new file mode 100755
index 0000000..dd48bc6
--- /dev/null
+++ b/t/t0014-rewindable-input-true.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+. ./test-lib.sh
+t_plan 4 "rewindable_input toggled to true"
+
+t_begin "setup and start" && {
+        unicorn_setup
+        echo rewindable_input true >> $unicorn_config
+        unicorn -D -c $unicorn_config t0014.ru
+        unicorn_wait_start
+}
+
+t_begin "ensure worker is started" && {
+        test xOK = x$(curl -T t0014.ru -sSf http://$listen/)
+}
+
+t_begin "killing succeeds" && {
+        kill $unicorn_pid
+}
+
+t_begin "check stderr" && {
+        check_stderr
+}
+
+t_done
diff --git a/t/t0014.ru b/t/t0014.ru
new file mode 100644
index 0000000..b0bd2b7
--- /dev/null
+++ b/t/t0014.ru
@@ -0,0 +1,12 @@
+#\ -E none
+use Rack::ContentLength
+use Rack::ContentType, 'text/plain'
+app = lambda do |env|
+  case env['rack.input']
+  when Unicorn::TeeInput
+    [ 200, {}, %w(OK) ]
+  else
+    [ 500, {}, %w(NO) ]
+  end
+end
+run app
diff --git a/t/t0015-configurator-internals.sh b/t/t0015-configurator-internals.sh
new file mode 100755
index 0000000..4e3acbe
--- /dev/null
+++ b/t/t0015-configurator-internals.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+. ./test-lib.sh
+t_plan 4 "configurator internals tests (from FAQ)"
+
+t_begin "setup and start" && {
+        unicorn_setup
+        cat >> $unicorn_config <<EOF
+HttpRequest::DEFAULTS["rack.url_scheme"] = "https"
+Configurator::DEFAULTS[:logger].formatter = Logger::Formatter.new
+EOF
+        unicorn -D -c $unicorn_config env.ru
+        unicorn_wait_start
+}
+
+t_begin "single request" && {
+        curl -sSfv http://$listen/ | grep '"rack.url_scheme"=>"https"'
+}
+
+t_begin "killing succeeds" && {
+        kill $unicorn_pid
+}
+
+t_begin "no errors" && check_stderr
+
+t_done
diff --git a/t/t0016-trust-x-forwarded-false.sh b/t/t0016-trust-x-forwarded-false.sh
new file mode 100755
index 0000000..3163690
--- /dev/null
+++ b/t/t0016-trust-x-forwarded-false.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+. ./test-lib.sh
+t_plan 5 "trust_x_forwarded=false configuration test"
+
+t_begin "setup and start" && {
+        unicorn_setup
+        echo "trust_x_forwarded false" >> $unicorn_config
+        unicorn -D -c $unicorn_config env.ru
+        unicorn_wait_start
+}
+
+t_begin "spoofed request with X-Forwarded-Proto does not trigger" && {
+        curl -H 'X-Forwarded-Proto: https' http://$listen/ | \
+                grep -F '"rack.url_scheme"=>"http"'
+}
+
+t_begin "spoofed request with X-Forwarded-SSL does not trigger" && {
+        curl -H 'X-Forwarded-SSL: on' http://$listen/ | \
+                grep -F '"rack.url_scheme"=>"http"'
+}
+
+t_begin "killing succeeds" && {
+        kill $unicorn_pid
+}
+
+t_begin "check stderr has no errors" && {
+        check_stderr
+}
+
+t_done
diff --git a/t/t0017-trust-x-forwarded-true.sh b/t/t0017-trust-x-forwarded-true.sh
new file mode 100755
index 0000000..11103c5
--- /dev/null
+++ b/t/t0017-trust-x-forwarded-true.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+. ./test-lib.sh
+t_plan 5 "trust_x_forwarded=true configuration test"
+
+t_begin "setup and start" && {
+        unicorn_setup
+        echo "trust_x_forwarded true " >> $unicorn_config
+        unicorn -D -c $unicorn_config env.ru
+        unicorn_wait_start
+}
+
+t_begin "spoofed request with X-Forwarded-Proto sets 'https'" && {
+        curl -H 'X-Forwarded-Proto: https' http://$listen/ | \
+                grep -F '"rack.url_scheme"=>"https"'
+}
+
+t_begin "spoofed request with X-Forwarded-SSL sets 'https'" && {
+        curl -H 'X-Forwarded-SSL: on' http://$listen/ | \
+                grep -F '"rack.url_scheme"=>"https"'
+}
+
+t_begin "killing succeeds" && {
+        kill $unicorn_pid
+}
+
+t_begin "check stderr has no errors" && {
+        check_stderr
+}
+
+t_done
diff --git a/t/t0100-rack-input-tests.sh b/t/t0100-rack-input-tests.sh
new file mode 100755
index 0000000..13d4207
--- /dev/null
+++ b/t/t0100-rack-input-tests.sh
@@ -0,0 +1,124 @@
+#!/bin/sh
+. ./test-lib.sh
+test -r random_blob || die "random_blob required, run with 'make $0'"
+
+t_plan 10 "rack.input read tests"
+
+t_begin "setup and startup" && {
+        rtmpfiles curl_out curl_err
+        unicorn_setup
+        unicorn -E none -D rack-input-tests.ru -c $unicorn_config
+        blob_sha1=$(rsha1 < random_blob)
+        blob_size=$(wc -c < random_blob)
+        t_info "blob_sha1=$blob_sha1"
+        unicorn_wait_start
+}
+
+t_begin "corked identity request" && {
+        rm -f $tmp
+        (
+                cat $fifo > $tmp &
+                printf 'PUT / HTTP/1.0\r\n'
+                printf 'Content-Length: %d\r\n\r\n' $blob_size
+                cat random_blob
+                wait
+                echo ok > $ok
+        ) | ( sleep 1 && socat - TCP4:$listen > $fifo )
+        test 1 -eq $(grep $blob_sha1 $tmp |wc -l)
+        test x"$(cat $ok)" = xok
+}
+
+t_begin "corked chunked request" && {
+        rm -f $tmp
+        (
+                cat $fifo > $tmp &
+                content-md5-put < random_blob
+                wait
+                echo ok > $ok
+        ) | ( sleep 1 && socat - TCP4:$listen > $fifo )
+        test 1 -eq $(grep $blob_sha1 $tmp |wc -l)
+        test x"$(cat $ok)" = xok
+}
+
+t_begin "corked identity request (input#size first)" && {
+        rm -f $tmp
+        (
+                cat $fifo > $tmp &
+                printf 'PUT /size_first HTTP/1.0\r\n'
+                printf 'Content-Length: %d\r\n\r\n' $blob_size
+                cat random_blob
+                wait
+                echo ok > $ok
+        ) | ( sleep 1 && socat - TCP4:$listen > $fifo )
+        test 1 -eq $(grep $blob_sha1 $tmp |wc -l)
+        test x"$(cat $ok)" = xok
+}
+
+t_begin "corked identity request (input#rewind first)" && {
+        rm -f $tmp
+        (
+                cat $fifo > $tmp &
+                printf 'PUT /rewind_first HTTP/1.0\r\n'
+                printf 'Content-Length: %d\r\n\r\n' $blob_size
+                cat random_blob
+                wait
+                echo ok > $ok
+        ) | ( sleep 1 && socat - TCP4:$listen > $fifo )
+        test 1 -eq $(grep $blob_sha1 $tmp |wc -l)
+        test x"$(cat $ok)" = xok
+}
+
+t_begin "corked chunked request (input#size first)" && {
+        rm -f $tmp
+        (
+                cat $fifo > $tmp &
+                printf 'PUT /size_first HTTP/1.1\r\n'
+                printf 'Host: example.com\r\n'
+                printf 'Transfer-Encoding: chunked\r\n'
+                printf 'Trailer: Content-MD5\r\n'
+                printf '\r\n'
+                content-md5-put --no-headers < random_blob
+                wait
+                echo ok > $ok
+        ) | ( sleep 1 && socat - TCP4:$listen > $fifo )
+        test 1 -eq $(grep $blob_sha1 $tmp |wc -l)
+        test 1 -eq $(grep $blob_sha1 $tmp |wc -l)
+        test x"$(cat $ok)" = xok
+}
+
+t_begin "corked chunked request (input#rewind first)" && {
+        rm -f $tmp
+        (
+                cat $fifo > $tmp &
+                printf 'PUT /rewind_first HTTP/1.1\r\n'
+                printf 'Host: example.com\r\n'
+                printf 'Transfer-Encoding: chunked\r\n'
+                printf 'Trailer: Content-MD5\r\n'
+                printf '\r\n'
+                content-md5-put --no-headers < random_blob
+                wait
+                echo ok > $ok
+        ) | ( sleep 1 && socat - TCP4:$listen > $fifo )
+        test 1 -eq $(grep $blob_sha1 $tmp |wc -l)
+        test x"$(cat $ok)" = xok
+}
+
+t_begin "regular request" && {
+        curl -sSf -T random_blob http://$listen/ > $curl_out 2> $curl_err
+        test x$blob_sha1 = x$(cat $curl_out)
+        test ! -s $curl_err
+}
+
+t_begin "chunked request" && {
+        curl -sSf -T- < random_blob http://$listen/ > $curl_out 2> $curl_err
+        test x$blob_sha1 = x$(cat $curl_out)
+        test ! -s $curl_err
+}
+
+dbgcat r_err
+
+t_begin "shutdown" && {
+        kill $unicorn_pid
+}
+
+t_done
diff --git a/t/t0116-client_body_buffer_size.sh b/t/t0116-client_body_buffer_size.sh
new file mode 100755
index 0000000..c9e17c7
--- /dev/null
+++ b/t/t0116-client_body_buffer_size.sh
@@ -0,0 +1,80 @@
+#!/bin/sh
+. ./test-lib.sh
+t_plan 12 "client_body_buffer_size settings"
+
+t_begin "setup and start" && {
+        unicorn_setup
+        rtmpfiles unicorn_config_tmp one_meg
+        dd if=/dev/zero bs=1M count=1 of=$one_meg
+        cat >> $unicorn_config <<EOF
+after_fork do |server, worker|
+  File.open("$fifo", "wb") { |fp| fp.syswrite "START" }
+end
+EOF
+        cat $unicorn_config > $unicorn_config_tmp
+        echo client_body_buffer_size 0 >> $unicorn_config
+        unicorn -D -c $unicorn_config t0116.ru
+        unicorn_wait_start
+        fs_class=Unicorn::TmpIO
+        mem_class=StringIO
+
+        test x"$(cat $fifo)" = xSTART
+}
+
+t_begin "class for a zero-byte file should be StringIO" && {
+        > $tmp
+        test xStringIO = x"$(curl -T $tmp -sSf http://$listen/input_class)"
+}
+
+t_begin "class for a 1 byte file should be filesystem-backed" && {
+        echo > $tmp
+        test x$fs_class = x"$(curl -T $tmp -sSf http://$listen/tmp_class)"
+}
+
+t_begin "reload with default client_body_buffer_size" && {
+        mv $unicorn_config_tmp $unicorn_config
+        kill -HUP $unicorn_pid
+        test x"$(cat $fifo)" = xSTART
+}
+
+t_begin "class for a 1 byte file should be memory-backed" && {
+        echo > $tmp
+        test x$mem_class = x"$(curl -T $tmp -sSf http://$listen/tmp_class)"
+}
+
+t_begin "class for a random blob file should be filesystem-backed" && {
+        resp="$(curl -T random_blob -sSf http://$listen/tmp_class)"
+        test x$fs_class = x"$resp"
+}
+
+t_begin "one megabyte file should be filesystem-backed" && {
+        resp="$(curl -T $one_meg -sSf http://$listen/tmp_class)"
+        test x$fs_class = x"$resp"
+}
+
+t_begin "reload with a big client_body_buffer_size" && {
+        echo "client_body_buffer_size(1024 * 1024)" >> $unicorn_config
+        kill -HUP $unicorn_pid
+        test x"$(cat $fifo)" = xSTART
+}
+
+t_begin "one megabyte file should be memory-backed" && {
+        resp="$(curl -T $one_meg -sSf http://$listen/tmp_class)"
+        test x$mem_class = x"$resp"
+}
+
+t_begin "one megabyte + 1 byte file should be filesystem-backed" && {
+        echo >> $one_meg
+        resp="$(curl -T $one_meg -sSf http://$listen/tmp_class)"
+        test x$fs_class = x"$resp"
+}
+
+t_begin "killing succeeds" && {
+        kill $unicorn_pid
+}
+
+t_begin "check stderr" && {
+        check_stderr
+}
+
+t_done
diff --git a/t/t0116.ru b/t/t0116.ru
new file mode 100644
index 0000000..fab5fce
--- /dev/null
+++ b/t/t0116.ru
@@ -0,0 +1,16 @@
+#\ -E none
+use Rack::ContentLength
+use Rack::ContentType, 'text/plain'
+app = lambda do |env|
+  input = env['rack.input']
+  case env["PATH_INFO"]
+  when "/tmp_class"
+    body = input.instance_variable_get(:@tmp).class.name
+  when "/input_class"
+    body = input.class.name
+  else
+    return [ 500, {}, [] ]
+  end
+  [ 200, {}, [ body ] ]
+end
+run app
diff --git a/t/t0303-rails3-alt-working_directory_config.ru.sh b/t/t0303-rails3-alt-working_directory_config.ru.sh
index 444f05a..1433f94 100755
--- a/t/t0303-rails3-alt-working_directory_config.ru.sh
+++ b/t/t0303-rails3-alt-working_directory_config.ru.sh
@@ -1,9 +1,4 @@
 #!/bin/sh
-if test -n "$RBX_SKIP"
-then
-        echo "$0 is broken under Rubinius for now"
-        exit 0
-fi
 . ./test-rails3.sh
 
 t_plan 5 "Rails 3 (beta) inside alt working_directory (w/ config.ru)"
diff --git a/t/t9000-preread-input.sh b/t/t9000-preread-input.sh
new file mode 100755
index 0000000..b9da05e
--- /dev/null
+++ b/t/t9000-preread-input.sh
@@ -0,0 +1,48 @@
+#!/bin/sh
+. ./test-lib.sh
+t_plan 9 "PrereadInput middleware tests"
+
+t_begin "setup and start" && {
+        random_blob_sha1=$(rsha1 < random_blob)
+        unicorn_setup
+        unicorn  -D -c $unicorn_config preread_input.ru
+        unicorn_wait_start
+}
+
+t_begin "single identity request" && {
+        curl -sSf -T random_blob http://$listen/ > $tmp
+}
+
+t_begin "sha1 matches" && {
+        test x"$(cat $tmp)" = x"$random_blob_sha1"
+}
+
+t_begin "single chunked request" && {
+        curl -sSf -T- < random_blob http://$listen/ > $tmp
+}
+
+t_begin "sha1 matches" && {
+        test x"$(cat $tmp)" = x"$random_blob_sha1"
+}
+
+t_begin "app only dispatched twice" && {
+        test 2 -eq "$(grep 'app dispatch:' < $r_err | wc -l )"
+}
+
+t_begin "aborted chunked request" && {
+        rm -f $tmp
+        curl -sSf -T- < $fifo http://$listen/ > $tmp &
+        curl_pid=$!
+        kill -9 $curl_pid
+        wait
+}
+
+t_begin "app only dispatched twice" && {
+        test 2 -eq "$(grep 'app dispatch:' < $r_err | wc -l )"
+}
+
+t_begin "killing succeeds" && {
+        kill -QUIT $unicorn_pid
+}
+
+t_done
diff --git a/t/test-rails3.sh b/t/test-rails3.sh
index b398f03..907ef0d 100644
--- a/t/test-rails3.sh
+++ b/t/test-rails3.sh
@@ -13,7 +13,7 @@ rails_gems=../tmp/isolate/rails-$RAILS_VERSION/gems
 rails_bin="$rails_gems/rails-$RAILS_VERSION/bin/rails"
 if ! test -d "$arch_gems" || ! test -d "$rails_gems" || ! test -x "$rails_bin"
 then
-        ( cd ../ && $RAKE isolate )
+        ( cd ../ && ./script/isolate_for_tests )
 fi
 
 for i in $arch_gems/*-* $rails_gems/*-*
diff --git a/test/exec/test_exec.rb b/test/exec/test_exec.rb
index 1d24ca3..0f6b083 100644
--- a/test/exec/test_exec.rb
+++ b/test/exec/test_exec.rb
@@ -614,7 +614,7 @@ EOF
     results = retry_hit(["http://#{@addr}:#{@port}/"])
     assert_equal String, results[0].class
     assert_shutdown(pid)
-  end unless ENV['RBX_SKIP']
+  end
 
   def test_config_ru_alt_path
     config_path = "#{@tmpdir}/foo.ru"
@@ -797,7 +797,6 @@ EOF
 
   def test_daemonize_redirect_fail
     pid_file = "#{@tmpdir}/test.pid"
-    log = Tempfile.new('unicorn_test_log')
     ucfg = Tempfile.new('unicorn_test_config')
     ucfg.syswrite("pid #{pid_file}\"\n")
     err = Tempfile.new('stderr')
@@ -1040,7 +1039,7 @@ EOF
       lock_path = "#{Dir::tmpdir}/unicorn_test." \
                   "#{Unicorn::Const::DEFAULT_LISTEN}.lock"
       begin
-        lock = File.open(lock_path, File::WRONLY|File::CREAT|File::EXCL, 0600)
+        File.open(lock_path, File::WRONLY|File::CREAT|File::EXCL, 0600)
         yield
       rescue Errno::EEXIST
         lock_path = nil
diff --git a/test/rails/app-2.3.8/.gitignore b/test/rails/app-2.3.8/.gitignore
deleted file mode 100644
index f451f91..0000000
--- a/test/rails/app-2.3.8/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/tmp
-/vendor
diff --git a/test/rails/app-2.3.8/Rakefile b/test/rails/app-2.3.8/Rakefile
deleted file mode 100644
index fbebfca..0000000
--- a/test/rails/app-2.3.8/Rakefile
+++ /dev/null
@@ -1,7 +0,0 @@
-require(File.join(File.dirname(__FILE__), 'config', 'boot'))
-
-require 'rake'
-require 'rake/testtask'
-require 'rake/rdoctask'
-
-require 'tasks/rails'
diff --git a/test/rails/app-2.3.8/app/controllers/application_controller.rb b/test/rails/app-2.3.8/app/controllers/application_controller.rb
deleted file mode 100644
index 07c333e..0000000
--- a/test/rails/app-2.3.8/app/controllers/application_controller.rb
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- encoding: binary -*-
-
-class ApplicationController < ActionController::Base
-  helper :all
-end
diff --git a/test/rails/app-2.3.8/app/controllers/foo_controller.rb b/test/rails/app-2.3.8/app/controllers/foo_controller.rb
deleted file mode 100644
index 54ca1ed..0000000
--- a/test/rails/app-2.3.8/app/controllers/foo_controller.rb
+++ /dev/null
@@ -1,36 +0,0 @@
-# -*- encoding: binary -*-
-
-require 'digest/sha1'
-class FooController < ApplicationController
-  def index
-    render :text => "FOO\n"
-  end
-
-  def xcookie
-    cookies["foo"] = "cookie-#$$-#{session[:gotta_use_the_session_in_2_3]}"
-    render :text => ""
-  end
-
-  def xnotice
-    flash[:notice] = "session #$$"
-    render :text => ""
-  end
-
-  def xpost
-    if request.post?
-      digest = Digest::SHA1.new
-      out = "params: #{params.inspect}\n"
-      if file = params[:file]
-        loop do
-          buf = file.read(4096) or break
-          digest.update(buf)
-        end
-        out << "sha1: #{digest.to_s}\n"
-      end
-      headers['content-type'] = 'text/plain'
-      render :text => out
-    else
-      render :status => 403, :text => "need post\n"
-    end
-  end
-end
diff --git a/test/rails/app-2.3.8/app/helpers/application_helper.rb b/test/rails/app-2.3.8/app/helpers/application_helper.rb
deleted file mode 100644
index d9889b3..0000000
--- a/test/rails/app-2.3.8/app/helpers/application_helper.rb
+++ /dev/null
@@ -1,4 +0,0 @@
-# -*- encoding: binary -*-
-
-module ApplicationHelper
-end
diff --git a/test/rails/app-2.3.8/config/boot.rb b/test/rails/app-2.3.8/config/boot.rb
deleted file mode 100644
index b6c80d5..0000000
--- a/test/rails/app-2.3.8/config/boot.rb
+++ /dev/null
@@ -1,109 +0,0 @@
-# -*- encoding: binary -*-
-
-RAILS_ROOT = "#{File.dirname(__FILE__)}/.." unless defined?(RAILS_ROOT)
-
-module Rails
-  class << self
-    def boot!
-      unless booted?
-        preinitialize
-        pick_boot.run
-      end
-    end
-
-    def booted?
-      defined? Rails::Initializer
-    end
-
-    def pick_boot
-      (vendor_rails? ? VendorBoot : GemBoot).new
-    end
-
-    def vendor_rails?
-      File.exist?("#{RAILS_ROOT}/vendor/rails")
-    end
-
-    def preinitialize
-      load(preinitializer_path) if File.exist?(preinitializer_path)
-    end
-
-    def preinitializer_path
-      "#{RAILS_ROOT}/config/preinitializer.rb"
-    end
-  end
-
-  class Boot
-    def run
-      load_initializer
-      Rails::Initializer.run(:set_load_path)
-    end
-  end
-
-  class VendorBoot < Boot
-    def load_initializer
-      require "#{RAILS_ROOT}/vendor/rails/railties/lib/initializer"
-      Rails::Initializer.run(:install_gem_spec_stubs)
-      Rails::GemDependency.add_frozen_gem_path
-    end
-  end
-
-  class GemBoot < Boot
-    def load_initializer
-      self.class.load_rubygems
-      load_rails_gem
-      require 'initializer'
-    end
-
-    def load_rails_gem
-      if version = self.class.gem_version
-        gem 'rails', version
-      else
-        gem 'rails'
-      end
-    rescue Gem::LoadError => load_error
-      $stderr.puts %(Missing the Rails #{version} gem. Please `gem install -v=#{version} rails`, update your RAILS_GEM_VERSION setting in config/environment.rb for the Rails version you do have installed, or comment out RAILS_GEM_VERSION to use the latest version installed.)
-      exit 1
-    end
-
-    class << self
-      def rubygems_version
-        Gem::RubyGemsVersion rescue nil
-      end
-
-      def gem_version
-        if defined? RAILS_GEM_VERSION
-          RAILS_GEM_VERSION
-        elsif ENV.include?('RAILS_GEM_VERSION')
-          ENV['RAILS_GEM_VERSION']
-        else
-          parse_gem_version(read_environment_rb)
-        end
-      end
-
-      def load_rubygems
-        require 'rubygems'
-        min_version = '1.3.1'
-        unless rubygems_version >= min_version
-          $stderr.puts %Q(Rails requires RubyGems >= #{min_version} (you have #{rubygems_version}). Please `gem update --system` and try again.)
-          exit 1
-        end
-
-      rescue LoadError
-        $stderr.puts %Q(Rails requires RubyGems >= #{min_version}. Please install RubyGems and try again: http://rubygems.rubyforge.org)
-        exit 1
-      end
-
-      def parse_gem_version(text)
-        $1 if text =~ /^[^#]*RAILS_GEM_VERSION\s*=\s*["']([!~<>=]*\s*[\d.]+)["']/
-      end
-
-      private
-        def read_environment_rb
-          File.read("#{RAILS_ROOT}/config/environment.rb")
-        end
-    end
-  end
-end
-
-# All that for this:
-Rails.boot!
diff --git a/test/rails/app-2.3.8/config/database.yml b/test/rails/app-2.3.8/config/database.yml
deleted file mode 100644
index 9f77843..0000000
--- a/test/rails/app-2.3.8/config/database.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-development:
-  adapter: sqlite3
-  database: db/development.sqlite3
-  timeout: 5000
-test:
-  adapter: sqlite3
-  database: db/test.sqlite3
-  timeout: 5000
-production:
-  adapter: sqlite3
-  database: db/production.sqlite3
-  timeout: 5000
diff --git a/test/rails/app-2.3.8/config/environment.rb b/test/rails/app-2.3.8/config/environment.rb
deleted file mode 100644
index 6eb092c..0000000
--- a/test/rails/app-2.3.8/config/environment.rb
+++ /dev/null
@@ -1,17 +0,0 @@
-# -*- encoding: binary -*-
-
-unless defined? RAILS_GEM_VERSION
-  RAILS_GEM_VERSION = ENV['UNICORN_RAILS_VERSION']
-end
-
-# Bootstrap the Rails environment, frameworks, and default configuration
-require File.join(File.dirname(__FILE__), 'boot')
-
-Rails::Initializer.run do |config|
-  config.frameworks -= [ :active_resource, :action_mailer ]
-  config.action_controller.session_store = :active_record_store
-  config.action_controller.session = {
-    :session_key => "_unicorn_rails_test.#{rand}",
-    :secret => "#{rand}#{rand}#{rand}#{rand}",
-  }
-end
diff --git a/test/rails/app-2.3.8/config/environments/development.rb b/test/rails/app-2.3.8/config/environments/development.rb
deleted file mode 100644
index 3d381d2..0000000
--- a/test/rails/app-2.3.8/config/environments/development.rb
+++ /dev/null
@@ -1,7 +0,0 @@
-# -*- encoding: binary -*-
-
-config.cache_classes = false
-config.whiny_nils = true
-config.action_controller.consider_all_requests_local = true
-config.action_view.debug_rjs                         = true
-config.action_controller.perform_caching             = false
diff --git a/test/rails/app-2.3.8/config/environments/production.rb b/test/rails/app-2.3.8/config/environments/production.rb
deleted file mode 100644
index 08710a4..0000000
--- a/test/rails/app-2.3.8/config/environments/production.rb
+++ /dev/null
@@ -1,6 +0,0 @@
-# -*- encoding: binary -*-
-
-config.cache_classes = true
-config.action_controller.consider_all_requests_local = false
-config.action_controller.perform_caching             = true
-config.action_view.cache_template_loading            = true
diff --git a/test/rails/app-2.3.8/config/routes.rb b/test/rails/app-2.3.8/config/routes.rb
deleted file mode 100644
index ac7877c..0000000
--- a/test/rails/app-2.3.8/config/routes.rb
+++ /dev/null
@@ -1,6 +0,0 @@
-# -*- encoding: binary -*-
-
-ActionController::Routing::Routes.draw do |map|
-  map.connect ':controller/:action/:id'
-  map.connect ':controller/:action/:id.:format'
-end
diff --git a/test/rails/app-2.3.8/db/.gitignore b/test/rails/app-2.3.8/db/.gitignore
deleted file mode 100644
index e69de29..0000000
--- a/test/rails/app-2.3.8/db/.gitignore
+++ /dev/null
diff --git a/test/rails/app-2.3.8/log/.gitignore b/test/rails/app-2.3.8/log/.gitignore
deleted file mode 100644
index 397b4a7..0000000
--- a/test/rails/app-2.3.8/log/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.log
diff --git a/test/rails/app-2.3.8/public/404.html b/test/rails/app-2.3.8/public/404.html
deleted file mode 100644
index 44d986c..0000000
--- a/test/rails/app-2.3.8/public/404.html
+++ /dev/null
@@ -1 +0,0 @@
-404 Not Found
diff --git a/test/rails/app-2.3.8/public/500.html b/test/rails/app-2.3.8/public/500.html
deleted file mode 100644
index e534a49..0000000
--- a/test/rails/app-2.3.8/public/500.html
+++ /dev/null
@@ -1 +0,0 @@
-500 Internal Server Error
diff --git a/test/rails/app-2.3.8/public/x.txt b/test/rails/app-2.3.8/public/x.txt
deleted file mode 100644
index e427984..0000000
--- a/test/rails/app-2.3.8/public/x.txt
+++ /dev/null
@@ -1 +0,0 @@
-HELLO
diff --git a/test/rails/test_rails.rb b/test/rails/test_rails.rb
index 4b3857f..ab8f0a2 100644
--- a/test/rails/test_rails.rb
+++ b/test/rails/test_rails.rb
@@ -265,7 +265,7 @@ logger Logger.new('#{COMMON_TMP.path}')
 
     if @pid
       Process.kill(:QUIT, @pid)
-      pid2, status = Process.waitpid2(@pid)
+      _, status = Process.waitpid2(@pid)
       assert status.success?
     end
 
diff --git a/test/test_helper.rb b/test/test_helper.rb
index c4e56a2..92195e6 100644
--- a/test/test_helper.rb
+++ b/test/test_helper.rb
@@ -123,7 +123,7 @@ def unused_port(addr = '127.0.0.1')
     # when running tests in parallel with gmake.  Create a lock file while
     # we have the port here to ensure that does not happen .
     lock_path = "#{Dir::tmpdir}/unicorn_test.#{addr}:#{port}.lock"
-    lock = File.open(lock_path, File::WRONLY|File::CREAT|File::EXCL, 0600)
+    File.open(lock_path, File::WRONLY|File::CREAT|File::EXCL, 0600).close
     at_exit { File.unlink(lock_path) rescue nil }
   rescue Errno::EEXIST
     sock.close rescue nil
diff --git a/test/unit/test_configurator.rb b/test/unit/test_configurator.rb
index ac1efa8..c19c427 100644
--- a/test/unit/test_configurator.rb
+++ b/test/unit/test_configurator.rb
@@ -33,6 +33,14 @@ class TestConfigurator < Test::Unit::TestCase
     assert_equal "0.0.0.0:2007", meth.call('2007')
     assert_equal "0.0.0.0:2007", meth.call(2007)
 
+    %w([::1]:2007 [::]:2007).each do |addr|
+      assert_equal addr, meth.call(addr.dup)
+    end
+
+    # for Rainbows! users only
+    assert_equal "[::]:80", meth.call("[::]")
+    assert_equal "127.6.6.6:80", meth.call("127.6.6.6")
+
     # the next two aren't portable, consider them unsupported for now
     # assert_match %r{\A\d+\.\d+\.\d+\.\d+:2007\z}, meth.call('1:2007')
     # assert_match %r{\A\d+\.\d+\.\d+\.\d+:2007\z}, meth.call('2:2007')
diff --git a/test/unit/test_http_parser.rb b/test/unit/test_http_parser.rb
index 222c227..dc1aab7 100644
--- a/test/unit/test_http_parser.rb
+++ b/test/unit/test_http_parser.rb
@@ -14,9 +14,10 @@ class HttpParserTest < Test::Unit::TestCase
 
   def test_parse_simple
     parser = HttpParser.new
-    req = {}
-    http = "GET / HTTP/1.1\r\n\r\n"
-    assert_equal req, parser.headers(req, http)
+    req = parser.env
+    http = parser.buf
+    http << "GET / HTTP/1.1\r\n\r\n"
+    assert_equal req, parser.parse
     assert_equal '', http
 
     assert_equal 'HTTP/1.1', req['SERVER_PROTOCOL']
@@ -28,17 +29,17 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal '', req['QUERY_STRING']
 
     assert parser.keepalive?
-    parser.reset
+    parser.clear
     req.clear
 
-    http = "G"
-    assert_nil parser.headers(req, http)
+    http << "G"
+    assert_nil parser.parse
     assert_equal "G", http
     assert req.empty?
 
     # try parsing again to ensure we were reset correctly
-    http = "GET /hello-world HTTP/1.1\r\n\r\n"
-    assert parser.headers(req, http)
+    http << "ET /hello-world HTTP/1.1\r\n\r\n"
+    assert parser.parse
 
     assert_equal 'HTTP/1.1', req['SERVER_PROTOCOL']
     assert_equal '/hello-world', req['REQUEST_PATH']
@@ -53,96 +54,161 @@ class HttpParserTest < Test::Unit::TestCase
 
   def test_tab_lws
     parser = HttpParser.new
-    req = {}
-    tmp = "GET / HTTP/1.1\r\nHost:\tfoo.bar\r\n\r\n"
-    assert_equal req.object_id, parser.headers(req, tmp).object_id
+    req = parser.env
+    parser.buf << "GET / HTTP/1.1\r\nHost:\tfoo.bar\r\n\r\n"
+    assert_equal req.object_id, parser.parse.object_id
     assert_equal "foo.bar", req['HTTP_HOST']
   end
 
   def test_connection_close_no_ka
     parser = HttpParser.new
-    req = {}
-    tmp = "GET / HTTP/1.1\r\nConnection: close\r\n\r\n"
-    assert_equal req.object_id, parser.headers(req, tmp).object_id
+    req = parser.env
+    parser.buf << "GET / HTTP/1.1\r\nConnection: close\r\n\r\n"
+    assert_equal req.object_id, parser.parse.object_id
     assert_equal "GET", req['REQUEST_METHOD']
     assert ! parser.keepalive?
   end
 
   def test_connection_keep_alive_ka
     parser = HttpParser.new
-    req = {}
-    tmp = "HEAD / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n"
-    assert_equal req.object_id, parser.headers(req, tmp).object_id
+    req = parser.env
+    parser.buf << "HEAD / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n"
+    assert_equal req.object_id, parser.parse.object_id
     assert parser.keepalive?
   end
 
-  def test_connection_keep_alive_ka_bad_method
+  def test_connection_keep_alive_no_body
     parser = HttpParser.new
-    req = {}
-    tmp = "POST / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n"
-    assert_equal req.object_id, parser.headers(req, tmp).object_id
-    assert ! parser.keepalive?
+    req = parser.env
+    parser.buf << "POST / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n"
+    assert_equal req.object_id, parser.parse.object_id
+    assert parser.keepalive?
+  end
+
+  def test_connection_keep_alive_no_body_empty
+    parser = HttpParser.new
+    req = parser.env
+    parser.buf << "POST / HTTP/1.1\r\n" \
+                  "Content-Length: 0\r\n" \
+                  "Connection: keep-alive\r\n\r\n"
+    assert_equal req.object_id, parser.parse.object_id
+    assert parser.keepalive?
   end
 
   def test_connection_keep_alive_ka_bad_version
     parser = HttpParser.new
-    req = {}
-    tmp = "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n"
-    assert_equal req.object_id, parser.headers(req, tmp).object_id
+    req = parser.env
+    parser.buf << "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n"
+    assert_equal req.object_id, parser.parse.object_id
     assert parser.keepalive?
   end
 
   def test_parse_server_host_default_port
     parser = HttpParser.new
-    req = {}
-    tmp = "GET / HTTP/1.1\r\nHost: foo\r\n\r\n"
-    assert_equal req, parser.headers(req, tmp)
+    req = parser.env
+    parser.buf << "GET / HTTP/1.1\r\nHost: foo\r\n\r\n"
+    assert_equal req, parser.parse
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
-    assert_equal '', tmp
+    assert_equal '', parser.buf
     assert parser.keepalive?
   end
 
   def test_parse_server_host_alt_port
     parser = HttpParser.new
-    req = {}
-    tmp = "GET / HTTP/1.1\r\nHost: foo:999\r\n\r\n"
-    assert_equal req, parser.headers(req, tmp)
+    req = parser.env
+    parser.buf << "GET / HTTP/1.1\r\nHost: foo:999\r\n\r\n"
+    assert_equal req, parser.parse
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '999', req['SERVER_PORT']
-    assert_equal '', tmp
+    assert_equal '', parser.buf
     assert parser.keepalive?
   end
 
   def test_parse_server_host_empty_port
     parser = HttpParser.new
-    req = {}
-    tmp = "GET / HTTP/1.1\r\nHost: foo:\r\n\r\n"
-    assert_equal req, parser.headers(req, tmp)
+    req = parser.env
+    parser.buf << "GET / HTTP/1.1\r\nHost: foo:\r\n\r\n"
+    assert_equal req, parser.parse
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
-    assert_equal '', tmp
+    assert_equal '', parser.buf
     assert parser.keepalive?
   end
 
   def test_parse_server_host_xfp_https
     parser = HttpParser.new
-    req = {}
-    tmp = "GET / HTTP/1.1\r\nHost: foo:\r\n" \
-          "X-Forwarded-Proto: https\r\n\r\n"
-    assert_equal req, parser.headers(req, tmp)
+    req = parser.env
+    parser.buf << "GET / HTTP/1.1\r\nHost: foo:\r\n" \
+                  "X-Forwarded-Proto: https\r\n\r\n"
+    assert_equal req, parser.parse
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '443', req['SERVER_PORT']
-    assert_equal '', tmp
+    assert_equal '', parser.buf
     assert parser.keepalive?
   end
 
+  def test_parse_xfp_https_chained
+    parser = HttpParser.new
+    req = parser.env
+    parser.buf << "GET / HTTP/1.0\r\n" \
+                  "X-Forwarded-Proto: https,http\r\n\r\n"
+    assert_equal req, parser.parse
+    assert_equal '443', req['SERVER_PORT'], req.inspect
+    assert_equal 'https', req['rack.url_scheme'], req.inspect
+    assert_equal '', parser.buf
+  end
+
+  def test_parse_xfp_https_chained_backwards
+    parser = HttpParser.new
+    req = parser.env
+    parser.buf << "GET / HTTP/1.0\r\n" \
+          "X-Forwarded-Proto: http,https\r\n\r\n"
+    assert_equal req, parser.parse
+    assert_equal '80', req['SERVER_PORT'], req.inspect
+    assert_equal 'http', req['rack.url_scheme'], req.inspect
+    assert_equal '', parser.buf
+  end
+
+  def test_parse_xfp_gopher_is_ignored
+    parser = HttpParser.new
+    req = parser.env
+    parser.buf << "GET / HTTP/1.0\r\n" \
+                  "X-Forwarded-Proto: gopher\r\n\r\n"
+    assert_equal req, parser.parse
+    assert_equal '80', req['SERVER_PORT'], req.inspect
+    assert_equal 'http', req['rack.url_scheme'], req.inspect
+    assert_equal '', parser.buf
+  end
+
+  def test_parse_x_forwarded_ssl_on
+    parser = HttpParser.new
+    req = parser.env
+    parser.buf << "GET / HTTP/1.0\r\n" \
+                  "X-Forwarded-Ssl: on\r\n\r\n"
+    assert_equal req, parser.parse
+    assert_equal '443', req['SERVER_PORT'], req.inspect
+    assert_equal 'https', req['rack.url_scheme'], req.inspect
+    assert_equal '', parser.buf
+  end
+
+  def test_parse_x_forwarded_ssl_off
+    parser = HttpParser.new
+    req = parser.env
+    parser.buf << "GET / HTTP/1.0\r\nX-Forwarded-Ssl: off\r\n\r\n"
+    assert_equal req, parser.parse
+    assert_equal '80', req['SERVER_PORT'], req.inspect
+    assert_equal 'http', req['rack.url_scheme'], req.inspect
+    assert_equal '', parser.buf
+  end
+
   def test_parse_strange_headers
     parser = HttpParser.new
-    req = {}
+    req = parser.env
     should_be_good = "GET / HTTP/1.1\r\naaaaaaaaaaaaa:++++++++++\r\n\r\n"
-    assert_equal req, parser.headers(req, should_be_good)
-    assert_equal '', should_be_good
+    parser.buf << should_be_good
+    assert_equal req, parser.parse
+    assert_equal '', parser.buf
     assert parser.keepalive?
   end
 
@@ -152,14 +218,14 @@ class HttpParserTest < Test::Unit::TestCase
   def test_nasty_pound_header
     parser = HttpParser.new
     nasty_pound_header = "GET / HTTP/1.1\r\nX-SSL-Bullshit:   -----BEGIN CERTIFICATE-----\r\n\tMIIFbTCCBFWgAwIBAgICH4cwDQYJKoZIhvcNAQEFBQAwcDELMAkGA1UEBhMCVUsx\r\n\tETAPBgNVBAoTCGVTY2llbmNlMRIwEAYDVQQLEwlBdXRob3JpdHkxCzAJBgNVBAMT\r\n\tAkNBMS0wKwYJKoZIhvcNAQkBFh5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMu\r\n\tdWswHhcNMDYwNzI3MTQxMzI4WhcNMDcwNzI3MTQxMzI4WjBbMQswCQYDVQQGEwJV\r\n\tSzERMA8GA1UEChMIZVNjaWVuY2UxEzARBgNVBAsTCk1hbmNoZXN0ZXIxCzAJBgNV\r\n\tBAcTmrsogriqMWLAk1DMRcwFQYDVQQDEw5taWNoYWVsIHBhcmQYJKoZIhvcNAQEB\r\n\tBQADggEPADCCAQoCggEBANPEQBgl1IaKdSS1TbhF3hEXSl72G9J+WC/1R64fAcEF\r\n\tW51rEyFYiIeZGx/BVzwXbeBoNUK41OK65sxGuflMo5gLflbwJtHBRIEKAfVVp3YR\r\n\tgW7cMA/s/XKgL1GEC7rQw8lIZT8RApukCGqOVHSi/F1SiFlPDxuDfmdiNzL31+sL\r\n\t0iwHDdNkGjy5pyBSB8Y79dsSJtCW/iaLB0/n8Sj7HgvvZJ7x0fr+RQjYOUUfrePP\r\n\tu2MSpFyf+9BbC/aXgaZuiCvSR+8Snv3xApQY+fULK/xY8h8Ua51iXoQ5jrgu2SqR\r\n\twgA7BUi3G8LFzMBl8FRCDYGUDy7M6QaHXx1ZWIPWNKsCAwEAAaOCAiQwggIgMAwG\r\n\tA1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwID6DAs\r\n\tBglghkgBhvhCAQ0EHxYdVUsgZS1TY2llbmNlIFVzZXIgQ2VydGlmaWNhdGUwHQYD\r\n\tVR0OBBYEFDTt/sf9PeMaZDHkUIldrDYMNTBZMIGaBgNVHSMEgZIwgY+AFAI4qxGj\r\n\tloCLDdMVKwiljjDastqooXSkcjBwMQswCQYDVQQGEwJVSzERMA8GA1UEChMIZVNj\r\n\taWVuY2UxEjAQBgNVBAsTCUF1dGhvcml0eTELMAkGA1UEAxMCQ0ExLTArBgkqhkiG\r\n\t9w0BCQEWHmNhLW9wZXJhdG9yQGdyaWQtc3VwcG9ydC5hYy51a4IBADApBgNVHRIE\r\n\tIjAggR5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMudWswGQYDVR0gBBIwEDAO\r\n\tBgwrBgEEAdkvAQEBAQYwPQYJYIZIAYb4QgEEBDAWLmh0dHA6Ly9jYS5ncmlkLXN1\r\n\tcHBvcnQuYWMudmT4sopwqlBWsvcHViL2NybC9jYWNybC5jcmwwPQYJYIZIAYb4QgEDBDAWLmh0\r\n\tdHA6Ly9jYS5ncmlkLXN1cHBvcnQuYWMudWsvcHViL2NybC9jYWNybC5jcmwwPwYD\r\n\tVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NhLmdyaWQt5hYy51ay9wdWIv\r\n\tY3JsL2NhY3JsLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAS/U4iiooBENGW/Hwmmd3\r\n\tXCy6Zrt08YjKCzGNjorT98g8uGsqYjSxv/hmi0qlnlHs+k/3Iobc3LjS5AMYr5L8\r\n\tUO7OSkgFFlLHQyC9JzPfmLCAugvzEbyv4Olnsr8hbxF1MbKZoQxUZtMVu29wjfXk\r\n\thTeApBv7eaKCWpSp7MCbvgzm74izKhu3vlDk9w6qVrxePfGgpKPqfHiOoGhFnbTK\r\n\twTC6o2xq5y0qZ03JonF7OJspEd3I5zKY3E+ov7/ZhW6DqT8UFvsAdjvQbXyhV8Eu\r\n\tYhixw1aKEPzNjNowuIseVogKOLXxWI5vAi5HgXdS0/ES5gDGsABo4fqovUKlgop3\r\n\tRA==\r\n\t-----END CERTIFICATE-----\r\n\r\n"
-    req = {}
-    buf = nasty_pound_header.dup
+    req = parser.env
+    parser.buf << nasty_pound_header.dup
 
     assert nasty_pound_header =~ /(-----BEGIN .*--END CERTIFICATE-----)/m
     expect = $1.dup
     expect.gsub!(/\r\n\t/, ' ')
-    assert_equal req, parser.headers(req, buf)
-    assert_equal '', buf
+    assert_equal req, parser.parse
+    assert_equal '', parser.buf
     assert_equal expect, req['HTTP_X_SSL_BULLSHIT']
   end
 
@@ -170,9 +236,10 @@ class HttpParserTest < Test::Unit::TestCase
              "\t\r\n" \
              "    \r\n" \
              "  ASDF\r\n\r\n"
-    req = {}
-    assert_equal req, parser.headers(req, header)
-    assert_equal '', header
+    parser.buf << header
+    req = parser.env
+    assert_equal req, parser.parse
+    assert_equal '', parser.buf
     assert_equal 'ASDF', req['HTTP_X_ASDF']
   end
 
@@ -184,9 +251,10 @@ class HttpParserTest < Test::Unit::TestCase
              "\t\r\n" \
              "       x\r\n" \
              "  ASDF\r\n\r\n"
-    req = {}
-    assert_equal req, parser.headers(req, header)
-    assert_equal '', header
+    req = parser.env
+    parser.buf << header
+    assert_equal req, parser.parse
+    assert_equal '', parser.buf
     assert_equal 'hi y x ASDF', req['HTTP_X_ASDF']
   end
 
@@ -196,8 +264,9 @@ class HttpParserTest < Test::Unit::TestCase
              "Host: \r\n" \
              "    YHBT.net\r\n" \
              "\r\n"
-    req = {}
-    assert_equal req, parser.headers(req, header)
+    parser.buf << header
+    req = parser.env
+    assert_equal req, parser.parse
     assert_equal 'example.com', req['HTTP_HOST']
   end
 
@@ -206,21 +275,22 @@ class HttpParserTest < Test::Unit::TestCase
   # in case we ever go multithreaded/evented...
   def test_resumable_continuations
     nr = 1000
-    req = {}
     header = "GET / HTTP/1.1\r\n" \
              "X-ASDF:      \r\n" \
              "  hello\r\n"
     tmp = []
     nr.times { |i|
       parser = HttpParser.new
-      assert parser.headers(req, "#{header} #{i}\r\n").nil?
+      req = parser.env
+      parser.buf << "#{header} #{i}\r\n"
+      assert parser.parse.nil?
       asdf = req['HTTP_X_ASDF']
       assert_equal "hello #{i}", asdf
       tmp << [ parser, asdf ]
-      req.clear
     }
     tmp.each_with_index { |(parser, asdf), i|
-      assert_equal req, parser.headers(req, "#{header} #{i}\r\n .\r\n\r\n")
+      parser.buf << " .\r\n\r\n"
+      assert parser.parse
       assert_equal "hello #{i} .", asdf
     }
   end
@@ -231,8 +301,8 @@ class HttpParserTest < Test::Unit::TestCase
              "    y\r\n" \
              "Host: hello\r\n" \
              "\r\n"
-    req = {}
-    assert_raises(HttpParserError) { parser.headers(req, header) }
+    parser.buf << header
+    assert_raises(HttpParserError) { parser.parse }
   end
 
   def test_parse_ie6_urls
@@ -244,7 +314,7 @@ class HttpParserTest < Test::Unit::TestCase
        /mal"formed"?
     ).each do |path|
       parser = HttpParser.new
-      req = {}
+      req = parser.env
       sorta_safe = %(GET #{path} HTTP/1.1\r\n\r\n)
       assert_equal req, parser.headers(req, sorta_safe)
       assert_equal path, req['REQUEST_URI']
@@ -255,13 +325,13 @@ class HttpParserTest < Test::Unit::TestCase
   
   def test_parse_error
     parser = HttpParser.new
-    req = {}
+    req = parser.env
     bad_http = "GET / SsUTF/1.1"
 
     assert_raises(HttpParserError) { parser.headers(req, bad_http) }
 
     # make sure we can recover
-    parser.reset
+    parser.clear
     req.clear
     assert_equal req, parser.headers(req, "GET / HTTP/1.0\r\n\r\n")
     assert ! parser.keepalive?
@@ -269,7 +339,7 @@ class HttpParserTest < Test::Unit::TestCase
 
   def test_piecemeal
     parser = HttpParser.new
-    req = {}
+    req = parser.env
     http = "GET"
     assert_nil parser.headers(req, http)
     assert_nil parser.headers(req, http)
@@ -291,9 +361,10 @@ class HttpParserTest < Test::Unit::TestCase
   # not common, but underscores do appear in practice
   def test_absolute_uri_underscores
     parser = HttpParser.new
-    req = {}
+    req = parser.env
     http = "GET http://under_score.example.com/foo?q=bar HTTP/1.0\r\n\r\n"
-    assert_equal req, parser.headers(req, http)
+    parser.buf << http
+    assert_equal req, parser.parse
     assert_equal 'http', req['rack.url_scheme']
     assert_equal '/foo?q=bar', req['REQUEST_URI']
     assert_equal '/foo', req['REQUEST_PATH']
@@ -302,16 +373,17 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'under_score.example.com', req['HTTP_HOST']
     assert_equal 'under_score.example.com', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
-    assert_equal "", http
+    assert_equal "", parser.buf
     assert ! parser.keepalive?
   end
 
   # some dumb clients add users because they're stupid
   def test_absolute_uri_w_user
     parser = HttpParser.new
-    req = {}
+    req = parser.env
     http = "GET http://user%20space@example.com/foo?q=bar HTTP/1.0\r\n\r\n"
-    assert_equal req, parser.headers(req, http)
+    parser.buf << http
+    assert_equal req, parser.parse
     assert_equal 'http', req['rack.url_scheme']
     assert_equal '/foo?q=bar', req['REQUEST_URI']
     assert_equal '/foo', req['REQUEST_PATH']
@@ -320,7 +392,7 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'example.com', req['HTTP_HOST']
     assert_equal 'example.com', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
-    assert_equal "", http
+    assert_equal "", parser.buf
     assert ! parser.keepalive?
   end
 
@@ -329,7 +401,7 @@ class HttpParserTest < Test::Unit::TestCase
   def test_absolute_uri_uri_parse
     "#{URI::REGEXP::PATTERN::UNRESERVED};:&=+$,".split(//).each do |char|
       parser = HttpParser.new
-      req = {}
+      req = parser.env
       http = "GET http://#{char}@example.com/ HTTP/1.0\r\n\r\n"
       assert_equal req, parser.headers(req, http)
       assert_equal 'http', req['rack.url_scheme']
@@ -347,9 +419,9 @@ class HttpParserTest < Test::Unit::TestCase
 
   def test_absolute_uri
     parser = HttpParser.new
-    req = {}
-    http = "GET http://example.com/foo?q=bar HTTP/1.0\r\n\r\n"
-    assert_equal req, parser.headers(req, http)
+    req = parser.env
+    parser.buf << "GET http://example.com/foo?q=bar HTTP/1.0\r\n\r\n"
+    assert_equal req, parser.parse
     assert_equal 'http', req['rack.url_scheme']
     assert_equal '/foo?q=bar', req['REQUEST_URI']
     assert_equal '/foo', req['REQUEST_PATH']
@@ -358,17 +430,18 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'example.com', req['HTTP_HOST']
     assert_equal 'example.com', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
-    assert_equal "", http
+    assert_equal "", parser.buf
     assert ! parser.keepalive?
   end
 
   # X-Forwarded-Proto is not in rfc2616, absolute URIs are, however...
   def test_absolute_uri_https
     parser = HttpParser.new
-    req = {}
+    req = parser.env
     http = "GET https://example.com/foo?q=bar HTTP/1.1\r\n" \
            "X-Forwarded-Proto: http\r\n\r\n"
-    assert_equal req, parser.headers(req, http)
+    parser.buf << http
+    assert_equal req, parser.parse
     assert_equal 'https', req['rack.url_scheme']
     assert_equal '/foo?q=bar', req['REQUEST_URI']
     assert_equal '/foo', req['REQUEST_PATH']
@@ -377,17 +450,17 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'example.com', req['HTTP_HOST']
     assert_equal 'example.com', req['SERVER_NAME']
     assert_equal '443', req['SERVER_PORT']
-    assert_equal "", http
+    assert_equal "", parser.buf
     assert parser.keepalive?
   end
 
   # Host: header should be ignored for absolute URIs
   def test_absolute_uri_with_port
     parser = HttpParser.new
-    req = {}
-    http = "GET http://example.com:8080/foo?q=bar HTTP/1.2\r\n" \
+    req = parser.env
+    parser.buf << "GET http://example.com:8080/foo?q=bar HTTP/1.2\r\n" \
            "Host: bad.example.com\r\n\r\n"
-    assert_equal req, parser.headers(req, http)
+    assert_equal req, parser.parse
     assert_equal 'http', req['rack.url_scheme']
     assert_equal '/foo?q=bar', req['REQUEST_URI']
     assert_equal '/foo', req['REQUEST_PATH']
@@ -396,16 +469,16 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'example.com:8080', req['HTTP_HOST']
     assert_equal 'example.com', req['SERVER_NAME']
     assert_equal '8080', req['SERVER_PORT']
-    assert_equal "", http
+    assert_equal "", parser.buf
     assert ! parser.keepalive? # TODO: read HTTP/1.2 when it's final
   end
 
   def test_absolute_uri_with_empty_port
     parser = HttpParser.new
-    req = {}
-    http = "GET https://example.com:/foo?q=bar HTTP/1.1\r\n" \
+    req = parser.env
+    parser.buf << "GET https://example.com:/foo?q=bar HTTP/1.1\r\n" \
            "Host: bad.example.com\r\n\r\n"
-    assert_equal req, parser.headers(req, http)
+    assert_equal req, parser.parse
     assert_equal 'https', req['rack.url_scheme']
     assert_equal '/foo?q=bar', req['REQUEST_URI']
     assert_equal '/foo', req['REQUEST_PATH']
@@ -414,42 +487,192 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'example.com:', req['HTTP_HOST']
     assert_equal 'example.com', req['SERVER_NAME']
     assert_equal '443', req['SERVER_PORT']
+    assert_equal "", parser.buf
+    assert parser.keepalive? # TODO: read HTTP/1.2 when it's final
+  end
+
+  def test_absolute_ipv6_uri
+    parser = HttpParser.new
+    req = parser.env
+    url = "http://[::1]/foo?q=bar"
+    http = "GET #{url} HTTP/1.1\r\n" \
+           "Host: bad.example.com\r\n\r\n"
+    assert_equal req, parser.headers(req, http)
+    assert_equal 'http', req['rack.url_scheme']
+    assert_equal '/foo?q=bar', req['REQUEST_URI']
+    assert_equal '/foo', req['REQUEST_PATH']
+    assert_equal 'q=bar', req['QUERY_STRING']
+
+    uri = URI.parse(url)
+    assert_equal "[::1]", uri.host,
+                 "URI.parse changed upstream for #{url}? host=#{uri.host}"
+    assert_equal "[::1]", req['HTTP_HOST']
+    assert_equal "[::1]", req['SERVER_NAME']
+    assert_equal '80', req['SERVER_PORT']
     assert_equal "", http
     assert parser.keepalive? # TODO: read HTTP/1.2 when it's final
   end
 
-  def test_put_body_oneshot
+  def test_absolute_ipv6_uri_alpha
     parser = HttpParser.new
-    req = {}
-    http = "PUT / HTTP/1.0\r\nContent-Length: 5\r\n\r\nabcde"
+    req = parser.env
+    url = "http://[::a]/"
+    http = "GET #{url} HTTP/1.1\r\n" \
+           "Host: bad.example.com\r\n\r\n"
     assert_equal req, parser.headers(req, http)
+    assert_equal 'http', req['rack.url_scheme']
+
+    uri = URI.parse(url)
+    assert_equal "[::a]", uri.host,
+                 "URI.parse changed upstream for #{url}? host=#{uri.host}"
+    assert_equal "[::a]", req['HTTP_HOST']
+    assert_equal "[::a]", req['SERVER_NAME']
+    assert_equal '80', req['SERVER_PORT']
+  end
+
+  def test_absolute_ipv6_uri_alpha_2
+    parser = HttpParser.new
+    req = parser.env
+    url = "http://[::B]/"
+    http = "GET #{url} HTTP/1.1\r\n" \
+           "Host: bad.example.com\r\n\r\n"
+    assert_equal req, parser.headers(req, http)
+    assert_equal 'http', req['rack.url_scheme']
+
+    uri = URI.parse(url)
+    assert_equal "[::B]", uri.host,
+                 "URI.parse changed upstream for #{url}? host=#{uri.host}"
+    assert_equal "[::B]", req['HTTP_HOST']
+    assert_equal "[::B]", req['SERVER_NAME']
+    assert_equal '80', req['SERVER_PORT']
+  end
+
+  def test_absolute_ipv6_uri_with_empty_port
+    parser = HttpParser.new
+    req = parser.env
+    url = "https://[::1]:/foo?q=bar"
+    http = "GET #{url} HTTP/1.1\r\n" \
+           "Host: bad.example.com\r\n\r\n"
+    assert_equal req, parser.headers(req, http)
+    assert_equal 'https', req['rack.url_scheme']
+    assert_equal '/foo?q=bar', req['REQUEST_URI']
+    assert_equal '/foo', req['REQUEST_PATH']
+    assert_equal 'q=bar', req['QUERY_STRING']
+
+    uri = URI.parse(url)
+    assert_equal "[::1]", uri.host,
+                 "URI.parse changed upstream for #{url}? host=#{uri.host}"
+    assert_equal "[::1]:", req['HTTP_HOST']
+    assert_equal "[::1]", req['SERVER_NAME']
+    assert_equal '443', req['SERVER_PORT']
+    assert_equal "", http
+    assert parser.keepalive? # TODO: read HTTP/1.2 when it's final
+  end
+
+  def test_absolute_ipv6_uri_with_port
+    parser = HttpParser.new
+    req = parser.env
+    url = "https://[::1]:666/foo?q=bar"
+    http = "GET #{url} HTTP/1.1\r\n" \
+           "Host: bad.example.com\r\n\r\n"
+    assert_equal req, parser.headers(req, http)
+    assert_equal 'https', req['rack.url_scheme']
+    assert_equal '/foo?q=bar', req['REQUEST_URI']
+    assert_equal '/foo', req['REQUEST_PATH']
+    assert_equal 'q=bar', req['QUERY_STRING']
+
+    uri = URI.parse(url)
+    assert_equal "[::1]", uri.host,
+                 "URI.parse changed upstream for #{url}? host=#{uri.host}"
+    assert_equal "[::1]:666", req['HTTP_HOST']
+    assert_equal "[::1]", req['SERVER_NAME']
+    assert_equal '666', req['SERVER_PORT']
+    assert_equal "", http
+    assert parser.keepalive? # TODO: read HTTP/1.2 when it's final
+  end
+
+  def test_ipv6_host_header
+    parser = HttpParser.new
+    req = parser.env
+    parser.buf << "GET / HTTP/1.1\r\n" \
+                  "Host: [::1]\r\n\r\n"
+    assert_equal req, parser.parse
+    assert_equal "[::1]", req['HTTP_HOST']
+    assert_equal "[::1]", req['SERVER_NAME']
+    assert_equal '80', req['SERVER_PORT']
+    assert_equal "", parser.buf
+    assert parser.keepalive? # TODO: read HTTP/1.2 when it's final
+  end
+
+  def test_ipv6_host_header_with_port
+    parser = HttpParser.new
+    req = parser.env
+    parser.buf << "GET / HTTP/1.1\r\n" \
+                  "Host: [::1]:666\r\n\r\n"
+    assert_equal req, parser.parse
+    assert_equal "[::1]", req['SERVER_NAME']
+    assert_equal '666', req['SERVER_PORT']
+    assert_equal "[::1]:666", req['HTTP_HOST']
+    assert_equal "", parser.buf
+    assert parser.keepalive? # TODO: read HTTP/1.2 when it's final
+  end
+
+  def test_ipv6_host_header_with_empty_port
+    parser = HttpParser.new
+    req = parser.env
+    parser.buf << "GET / HTTP/1.1\r\nHost: [::1]:\r\n\r\n"
+    assert_equal req, parser.parse
+    assert_equal "[::1]", req['SERVER_NAME']
+    assert_equal '80', req['SERVER_PORT']
+    assert_equal "[::1]:", req['HTTP_HOST']
+    assert_equal "", parser.buf
+    assert parser.keepalive? # TODO: read HTTP/1.2 when it's final
+  end
+
+  # XXX Highly unlikely..., just make sure we don't segfault or assert on it
+  def test_broken_ipv6_host_header
+    parser = HttpParser.new
+    req = parser.env
+    parser.buf << "GET / HTTP/1.1\r\nHost: [::1:\r\n\r\n"
+    assert_equal req, parser.parse
+    assert_equal "[", req['SERVER_NAME']
+    assert_equal ':1:', req['SERVER_PORT']
+    assert_equal "[::1:", req['HTTP_HOST']
+    assert_equal "", parser.buf
+  end
+
+  def test_put_body_oneshot
+    parser = HttpParser.new
+    req = parser.env
+    parser.buf << "PUT / HTTP/1.0\r\nContent-Length: 5\r\n\r\nabcde"
+    assert_equal req, parser.parse
     assert_equal '/', req['REQUEST_PATH']
     assert_equal '/', req['REQUEST_URI']
     assert_equal 'PUT', req['REQUEST_METHOD']
     assert_equal 'HTTP/1.0', req['HTTP_VERSION']
     assert_equal 'HTTP/1.0', req['SERVER_PROTOCOL']
-    assert_equal "abcde", http
+    assert_equal "abcde", parser.buf
     assert ! parser.keepalive? # TODO: read HTTP/1.2 when it's final
   end
 
   def test_put_body_later
     parser = HttpParser.new
-    req = {}
-    http = "PUT /l HTTP/1.0\r\nContent-Length: 5\r\n\r\n"
-    assert_equal req, parser.headers(req, http)
+    req = parser.env
+    parser.buf << "PUT /l HTTP/1.0\r\nContent-Length: 5\r\n\r\n"
+    assert_equal req, parser.parse
     assert_equal '/l', req['REQUEST_PATH']
     assert_equal '/l', req['REQUEST_URI']
     assert_equal 'PUT', req['REQUEST_METHOD']
     assert_equal 'HTTP/1.0', req['HTTP_VERSION']
     assert_equal 'HTTP/1.0', req['SERVER_PROTOCOL']
-    assert_equal "", http
+    assert_equal "", parser.buf
     assert ! parser.keepalive? # TODO: read HTTP/1.2 when it's final
   end
 
   def test_unknown_methods
     %w(GETT HEADR XGET XHEAD).each { |m|
       parser = HttpParser.new
-      req = {}
+      req = parser.env
       s = "#{m} /forums/1/topics/2375?page=1#posts-17408 HTTP/1.1\r\n\r\n"
       ok = false
       assert_nothing_raised do
@@ -461,23 +684,24 @@ class HttpParserTest < Test::Unit::TestCase
       assert_equal 'page=1', req['QUERY_STRING']
       assert_equal "", s
       assert_equal m, req['REQUEST_METHOD']
-      assert ! parser.keepalive? # TODO: read HTTP/1.2 when it's final
+      assert parser.keepalive? # TODO: read HTTP/1.2 when it's final
     }
   end
 
   def test_fragment_in_uri
     parser = HttpParser.new
-    req = {}
+    req = parser.env
     get = "GET /forums/1/topics/2375?page=1#posts-17408 HTTP/1.1\r\n\r\n"
+    parser.buf << get
     ok = false
     assert_nothing_raised do
-      ok = parser.headers(req, get)
+      ok = parser.parse
     end
     assert ok
     assert_equal '/forums/1/topics/2375?page=1', req['REQUEST_URI']
     assert_equal 'posts-17408', req['FRAGMENT']
     assert_equal 'page=1', req['QUERY_STRING']
-    assert_equal '', get
+    assert_equal '', parser.buf
     assert parser.keepalive?
   end
 
@@ -503,8 +727,9 @@ class HttpParserTest < Test::Unit::TestCase
     10.times do |c|
       get = "GET /#{rand_data(10,120)} HTTP/1.1\r\nX-#{rand_data(1024, 1024+(c*1024))}: Test\r\n\r\n"
       assert_raises Unicorn::HttpParserError do
-        parser.headers({}, get)
-        parser.reset
+        parser.buf << get
+        parser.parse
+        parser.clear
       end
     end
 
@@ -512,25 +737,28 @@ class HttpParserTest < Test::Unit::TestCase
     10.times do |c|
       get = "GET /#{rand_data(10,120)} HTTP/1.1\r\nX-Test: #{rand_data(1024, 1024+(c*1024), false)}\r\n\r\n"
       assert_raises Unicorn::HttpParserError do
-        parser.headers({}, get)
-        parser.reset
+        parser.buf << get
+        parser.parse
+        parser.clear
       end
     end
 
     # then large headers are rejected too
     get = "GET /#{rand_data(10,120)} HTTP/1.1\r\n"
     get << "X-Test: test\r\n" * (80 * 1024)
+    parser.buf << get
     assert_raises Unicorn::HttpParserError do
-      parser.headers({}, get)
-      parser.reset
+      parser.parse
     end
+    parser.clear
 
     # finally just that random garbage gets blocked all the time
     10.times do |c|
       get = "GET #{rand_data(1024, 1024+(c*1024), false)} #{rand_data(1024, 1024+(c*1024), false)}\r\n\r\n"
       assert_raises Unicorn::HttpParserError do
-        parser.headers({}, get)
-        parser.reset
+        parser.buf << get
+        parser.parse
+        parser.clear
       end
     end
 
diff --git a/test/unit/test_http_parser_ng.rb b/test/unit/test_http_parser_ng.rb
index cb30f32..e57428c 100644
--- a/test/unit/test_http_parser_ng.rb
+++ b/test/unit/test_http_parser_ng.rb
@@ -8,57 +8,164 @@ include Unicorn
 class HttpParserNgTest < Test::Unit::TestCase
 
   def setup
+    HttpParser.keepalive_requests = HttpParser::KEEPALIVE_REQUESTS_DEFAULT
     @parser = HttpParser.new
   end
 
+  def test_keepalive_requests_default_constant
+    assert_kind_of Integer, HttpParser::KEEPALIVE_REQUESTS_DEFAULT
+    assert HttpParser::KEEPALIVE_REQUESTS_DEFAULT >= 0
+  end
+
+  def test_keepalive_requests_setting
+    HttpParser.keepalive_requests = 0
+    assert_equal 0, HttpParser.keepalive_requests
+    HttpParser.keepalive_requests = nil
+    assert HttpParser.keepalive_requests >= 0xffffffff
+    HttpParser.keepalive_requests = 1
+    assert_equal 1, HttpParser.keepalive_requests
+    HttpParser.keepalive_requests = 666
+    assert_equal 666, HttpParser.keepalive_requests
+
+    assert_raises(TypeError) { HttpParser.keepalive_requests = "666" }
+    assert_raises(TypeError) { HttpParser.keepalive_requests = [] }
+  end
+
+  def test_keepalive_requests_with_next?
+    req = "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n".freeze
+    expect = {
+      "SERVER_NAME" => "example.com",
+      "HTTP_HOST" => "example.com",
+      "rack.url_scheme" => "http",
+      "REQUEST_PATH" => "/",
+      "SERVER_PROTOCOL" => "HTTP/1.1",
+      "PATH_INFO" => "/",
+      "HTTP_VERSION" => "HTTP/1.1",
+      "REQUEST_URI" => "/",
+      "SERVER_PORT" => "80",
+      "REQUEST_METHOD" => "GET",
+      "QUERY_STRING" => ""
+    }.freeze
+    HttpParser::KEEPALIVE_REQUESTS_DEFAULT.times do |nr|
+      @parser.buf << req
+      assert_equal expect, @parser.parse
+      assert @parser.next?
+    end
+    @parser.buf << req
+    assert_equal expect, @parser.parse
+    assert ! @parser.next?
+  end
+
+  def test_fewer_keepalive_requests_with_next?
+    HttpParser.keepalive_requests = 5
+    @parser = HttpParser.new
+    req = "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n".freeze
+    expect = {
+      "SERVER_NAME" => "example.com",
+      "HTTP_HOST" => "example.com",
+      "rack.url_scheme" => "http",
+      "REQUEST_PATH" => "/",
+      "SERVER_PROTOCOL" => "HTTP/1.1",
+      "PATH_INFO" => "/",
+      "HTTP_VERSION" => "HTTP/1.1",
+      "REQUEST_URI" => "/",
+      "SERVER_PORT" => "80",
+      "REQUEST_METHOD" => "GET",
+      "QUERY_STRING" => ""
+    }.freeze
+    5.times do |nr|
+      @parser.buf << req
+      assert_equal expect, @parser.parse
+      assert @parser.next?
+    end
+    @parser.buf << req
+    assert_equal expect, @parser.parse
+    assert ! @parser.next?
+  end
+
+  def test_default_keepalive_is_off
+    assert ! @parser.keepalive?
+    assert ! @parser.next?
+    assert_nothing_raised do
+      @parser.buf << "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n"
+      @parser.parse
+    end
+    assert @parser.keepalive?
+    @parser.clear
+    assert ! @parser.keepalive?
+    assert ! @parser.next?
+  end
+
   def test_identity_byte_headers
-    req = {}
+    req = @parser.env
     str = "PUT / HTTP/1.1\r\n"
     str << "Content-Length: 123\r\n"
     str << "\r"
-    hdr = ""
+    hdr = @parser.buf
     str.each_byte { |byte|
-      assert_nil @parser.headers(req, hdr << byte.chr)
+      hdr << byte.chr
+      assert_nil @parser.parse
     }
     hdr << "\n"
-    assert_equal req.object_id, @parser.headers(req, hdr).object_id
+    assert_equal req.object_id, @parser.parse.object_id
     assert_equal '123', req['CONTENT_LENGTH']
     assert_equal 0, hdr.size
     assert ! @parser.keepalive?
     assert @parser.headers?
     assert_equal 123, @parser.content_length
+    dst = ""
+    buf = '.' * 123
+    @parser.filter_body(dst, buf)
+    assert_equal '.' * 123, dst
+    assert_equal "", buf
+    assert @parser.keepalive?
   end
 
   def test_identity_step_headers
-    req = {}
-    str = "PUT / HTTP/1.1\r\n"
-    assert ! @parser.headers(req, str)
+    req = @parser.env
+    str = @parser.buf
+    str << "PUT / HTTP/1.1\r\n"
+    assert ! @parser.parse
     str << "Content-Length: 123\r\n"
-    assert ! @parser.headers(req, str)
+    assert ! @parser.parse
     str << "\r\n"
-    assert_equal req.object_id, @parser.headers(req, str).object_id
+    assert_equal req.object_id, @parser.parse.object_id
     assert_equal '123', req['CONTENT_LENGTH']
     assert_equal 0, str.size
     assert ! @parser.keepalive?
     assert @parser.headers?
+    dst = ""
+    buf = '.' * 123
+    @parser.filter_body(dst, buf)
+    assert_equal '.' * 123, dst
+    assert_equal "", buf
+    assert @parser.keepalive?
   end
 
   def test_identity_oneshot_header
-    req = {}
-    str = "PUT / HTTP/1.1\r\nContent-Length: 123\r\n\r\n"
-    assert_equal req.object_id, @parser.headers(req, str).object_id
+    req = @parser.env
+    str = @parser.buf
+    str << "PUT / HTTP/1.1\r\nContent-Length: 123\r\n\r\n"
+    assert_equal req.object_id, @parser.parse.object_id
     assert_equal '123', req['CONTENT_LENGTH']
     assert_equal 0, str.size
     assert ! @parser.keepalive?
+    assert @parser.headers?
+    dst = ""
+    buf = '.' * 123
+    @parser.filter_body(dst, buf)
+    assert_equal '.' * 123, dst
+    assert_equal "", buf
   end
 
   def test_identity_oneshot_header_with_body
     body = ('a' * 123).freeze
-    req = {}
-    str = "PUT / HTTP/1.1\r\n" \
-          "Content-Length: #{body.length}\r\n" \
-          "\r\n#{body}"
-    assert_equal req.object_id, @parser.headers(req, str).object_id
+    req = @parser.env
+    str = @parser.buf
+    str << "PUT / HTTP/1.1\r\n" \
+           "Content-Length: #{body.length}\r\n" \
+           "\r\n#{body}"
+    assert_equal req.object_id, @parser.parse.object_id
     assert_equal '123', req['CONTENT_LENGTH']
     assert_equal 123, str.size
     assert_equal body, str
@@ -67,12 +174,13 @@ class HttpParserNgTest < Test::Unit::TestCase
     assert_equal 0, str.size
     assert_equal tmp, body
     assert_equal "", @parser.filter_body(tmp, str)
-    assert ! @parser.keepalive?
+    assert @parser.keepalive?
   end
 
   def test_identity_oneshot_header_with_body_partial
-    str = "PUT / HTTP/1.1\r\nContent-Length: 123\r\n\r\na"
-    assert_equal Hash, @parser.headers({}, str).class
+    str = @parser.buf
+    str << "PUT / HTTP/1.1\r\nContent-Length: 123\r\n\r\na"
+    assert_equal Hash, @parser.parse.class
     assert_equal 1, str.size
     assert_equal 'a', str
     tmp = ''
@@ -85,12 +193,13 @@ class HttpParserNgTest < Test::Unit::TestCase
     assert_nil rv
     assert_equal "", str
     assert_equal str.object_id, @parser.filter_body(tmp, str).object_id
-    assert ! @parser.keepalive?
+    assert @parser.keepalive?
   end
 
   def test_identity_oneshot_header_with_body_slop
-    str = "PUT / HTTP/1.1\r\nContent-Length: 1\r\n\r\naG"
-    assert_equal Hash, @parser.headers({}, str).class
+    str = @parser.buf
+    str << "PUT / HTTP/1.1\r\nContent-Length: 1\r\n\r\naG"
+    assert_equal Hash, @parser.parse.class
     assert_equal 2, str.size
     assert_equal 'aG', str
     tmp = ''
@@ -99,92 +208,100 @@ class HttpParserNgTest < Test::Unit::TestCase
     assert_equal "G", @parser.filter_body(tmp, str)
     assert_equal 1, tmp.size
     assert_equal "a", tmp
-    assert ! @parser.keepalive?
+    assert @parser.keepalive?
   end
 
   def test_chunked
-    str = "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
+    str = @parser.buf
+    req = @parser.env
+    str << "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n"
+    assert_equal req, @parser.parse, "msg=#{str}"
     assert_equal 0, str.size
     tmp = ""
-    assert_nil @parser.filter_body(tmp, "6")
+    assert_nil @parser.filter_body(tmp, str << "6")
     assert_equal 0, tmp.size
-    assert_nil @parser.filter_body(tmp, rv = "\r\n")
-    assert_equal 0, rv.size
+    assert_nil @parser.filter_body(tmp, str << "\r\n")
+    assert_equal 0, str.size
     assert_equal 0, tmp.size
     tmp = ""
-    assert_nil @parser.filter_body(tmp, "..")
+    assert_nil @parser.filter_body(tmp, str << "..")
     assert_equal "..", tmp
-    assert_nil @parser.filter_body(tmp, "abcd\r\n0\r\n")
+    assert_nil @parser.filter_body(tmp, str << "abcd\r\n0\r\n")
     assert_equal "abcd", tmp
-    rv = "PUT"
-    assert_equal rv.object_id, @parser.filter_body(tmp, rv).object_id
-    assert_equal "PUT", rv
+    assert_equal str.object_id, @parser.filter_body(tmp, str << "PUT").object_id
+    assert_equal "PUT", str
     assert ! @parser.keepalive?
+    str << "TY: FOO\r\n\r\n"
+    assert_equal req, @parser.parse
+    assert_equal "FOO", req["HTTP_PUTTY"]
+    assert @parser.keepalive?
   end
 
   def test_two_chunks
-    str = "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
+    str = @parser.buf
+    str << "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n"
+    req = @parser.env
+    assert_equal req, @parser.parse
     assert_equal 0, str.size
     tmp = ""
-    assert_nil @parser.filter_body(tmp, "6")
+    assert_nil @parser.filter_body(tmp, str << "6")
     assert_equal 0, tmp.size
-    assert_nil @parser.filter_body(tmp, rv = "\r\n")
-    assert_equal "", rv
+    assert_nil @parser.filter_body(tmp, str << "\r\n")
+    assert_equal "", str
     assert_equal 0, tmp.size
     tmp = ""
-    assert_nil @parser.filter_body(tmp, "..")
+    assert_nil @parser.filter_body(tmp, str << "..")
     assert_equal 2, tmp.size
     assert_equal "..", tmp
-    assert_nil @parser.filter_body(tmp, "abcd\r\n1")
+    assert_nil @parser.filter_body(tmp, str << "abcd\r\n1")
     assert_equal "abcd", tmp
-    assert_nil @parser.filter_body(tmp, "\r")
+    assert_nil @parser.filter_body(tmp, str << "\r")
     assert_equal "", tmp
-    assert_nil @parser.filter_body(tmp, "\n")
+    assert_nil @parser.filter_body(tmp, str << "\n")
     assert_equal "", tmp
-    assert_nil @parser.filter_body(tmp, "z")
+    assert_nil @parser.filter_body(tmp, str << "z")
     assert_equal "z", tmp
-    assert_nil @parser.filter_body(tmp, "\r\n")
-    assert_nil @parser.filter_body(tmp, "0")
-    assert_nil @parser.filter_body(tmp, "\r")
-    rv = @parser.filter_body(tmp, buf = "\nGET")
+    assert_nil @parser.filter_body(tmp, str << "\r\n")
+    assert_nil @parser.filter_body(tmp, str << "0")
+    assert_nil @parser.filter_body(tmp, str << "\r")
+    rv = @parser.filter_body(tmp, str << "\nGET")
     assert_equal "GET", rv
-    assert_equal buf.object_id, rv.object_id
+    assert_equal str.object_id, rv.object_id
     assert ! @parser.keepalive?
   end
 
   def test_big_chunk
-    str = "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n" \
-          "4000\r\nabcd"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
+    str = @parser.buf
+    str << "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n" \
+           "4000\r\nabcd"
+    req = @parser.env
+    assert_equal req, @parser.parse
     tmp = ''
     assert_nil @parser.filter_body(tmp, str)
     assert_equal '', str
-    str = ' ' * 16300
+    str << ' ' * 16300
     assert_nil @parser.filter_body(tmp, str)
     assert_equal '', str
-    str = ' ' * 80
+    str << ' ' * 80
     assert_nil @parser.filter_body(tmp, str)
     assert_equal '', str
     assert ! @parser.body_eof?
-    assert_equal "", @parser.filter_body(tmp, "\r\n0\r\n")
+    assert_equal "", @parser.filter_body(tmp, str << "\r\n0\r\n")
     assert_equal "", tmp
     assert @parser.body_eof?
-    assert_equal req, @parser.trailers(req, moo = "\r\n")
-    assert_equal "", moo
+    str << "\r\n"
+    assert_equal req, @parser.parse
+    assert_equal "", str
     assert @parser.body_eof?
-    assert ! @parser.keepalive?
+    assert @parser.keepalive?
   end
 
   def test_two_chunks_oneshot
-    str = "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n" \
-          "1\r\na\r\n2\r\n..\r\n0\r\n"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
+    str = @parser.buf
+    req = @parser.env
+    str << "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n" \
+           "1\r\na\r\n2\r\n..\r\n0\r\n"
+    assert_equal req, @parser.parse
     tmp = ''
     assert_nil @parser.filter_body(tmp, str)
     assert_equal 'a..', tmp
@@ -195,31 +312,33 @@ class HttpParserNgTest < Test::Unit::TestCase
 
   def test_chunks_bytewise
     chunked = "10\r\nabcdefghijklmnop\r\n11\r\n0123456789abcdefg\r\n0\r\n"
-    str = "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n#{chunked}"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
-    assert_equal chunked, str
+    str = "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n"
+    buf = @parser.buf
+    buf << str
+    req = @parser.env
+    assert_equal req, @parser.parse
+    assert_equal "", buf
     tmp = ''
-    buf = ''
     body = ''
-    str = str[0..-2]
+    str = chunked[0..-2]
     str.each_byte { |byte|
       assert_nil @parser.filter_body(tmp, buf << byte.chr)
       body << tmp
     }
     assert_equal 'abcdefghijklmnop0123456789abcdefg', body
-    rv = @parser.filter_body(tmp, buf << "\n")
+    rv = @parser.filter_body(tmp, buf<< "\n")
     assert_equal rv.object_id, buf.object_id
     assert ! @parser.keepalive?
   end
 
   def test_trailers
-    str = "PUT / HTTP/1.1\r\n" \
-          "Trailer: Content-MD5\r\n" \
-          "transfer-Encoding: chunked\r\n\r\n" \
-          "1\r\na\r\n2\r\n..\r\n0\r\n"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
+    req = @parser.env
+    str = @parser.buf
+    str << "PUT / HTTP/1.1\r\n" \
+           "Trailer: Content-MD5\r\n" \
+           "transfer-Encoding: chunked\r\n\r\n" \
+           "1\r\na\r\n2\r\n..\r\n0\r\n"
+    assert_equal req, @parser.parse
     assert_equal 'Content-MD5', req['HTTP_TRAILER']
     assert_nil req['HTTP_CONTENT_MD5']
     tmp = ''
@@ -234,19 +353,22 @@ class HttpParserNgTest < Test::Unit::TestCase
     assert_nil @parser.trailers(req, str)
     assert_equal md5_b64, req['HTTP_CONTENT_MD5']
     assert_equal "CONTENT_MD5: #{md5_b64}\r\n", str
-    assert_nil @parser.trailers(req, str << "\r")
-    assert_equal req, @parser.trailers(req, str << "\nGET / ")
+    str << "\r"
+    assert_nil @parser.parse
+    str << "\nGET / "
+    assert_equal req, @parser.parse
     assert_equal "GET / ", str
-    assert ! @parser.keepalive?
+    assert @parser.keepalive?
   end
 
   def test_trailers_slowly
-    str = "PUT / HTTP/1.1\r\n" \
-          "Trailer: Content-MD5\r\n" \
-          "transfer-Encoding: chunked\r\n\r\n" \
-          "1\r\na\r\n2\r\n..\r\n0\r\n"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
+    str = @parser.buf
+    str << "PUT / HTTP/1.1\r\n" \
+           "Trailer: Content-MD5\r\n" \
+           "transfer-Encoding: chunked\r\n\r\n" \
+           "1\r\na\r\n2\r\n..\r\n0\r\n"
+    req = @parser.env
+    assert_equal req, @parser.parse
     assert_equal 'Content-MD5', req['HTTP_TRAILER']
     assert_nil req['HTTP_CONTENT_MD5']
     tmp = ''
@@ -264,16 +386,19 @@ class HttpParserNgTest < Test::Unit::TestCase
     }
     assert_equal md5_b64, req['HTTP_CONTENT_MD5']
     assert_equal "CONTENT_MD5: #{md5_b64}\r\n", str
-    assert_nil @parser.trailers(req, str << "\r")
-    assert_equal req, @parser.trailers(req, str << "\n")
+    str << "\r"
+    assert_nil @parser.parse
+    str << "\n"
+    assert_equal req, @parser.parse
   end
 
   def test_max_chunk
-    str = "PUT / HTTP/1.1\r\n" \
-          "transfer-Encoding: chunked\r\n\r\n" \
-          "#{HttpParser::CHUNK_MAX.to_s(16)}\r\na\r\n2\r\n..\r\n0\r\n"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
+    str = @parser.buf
+    str << "PUT / HTTP/1.1\r\n" \
+           "transfer-Encoding: chunked\r\n\r\n" \
+           "#{HttpParser::CHUNK_MAX.to_s(16)}\r\na\r\n2\r\n..\r\n0\r\n"
+    req = @parser.env
+    assert_equal req, @parser.parse
     assert_nil @parser.content_length
     assert_nothing_raised { @parser.filter_body('', str) }
     assert ! @parser.keepalive?
@@ -281,64 +406,61 @@ class HttpParserNgTest < Test::Unit::TestCase
 
   def test_max_body
     n = HttpParser::LENGTH_MAX
-    str = "PUT / HTTP/1.1\r\nContent-Length: #{n}\r\n\r\n"
-    req = {}
-    assert_nothing_raised { @parser.headers(req, str) }
+    @parser.buf << "PUT / HTTP/1.1\r\nContent-Length: #{n}\r\n\r\n"
+    req = @parser.env
+    assert_nothing_raised { @parser.headers(req, @parser.buf) }
     assert_equal n, req['CONTENT_LENGTH'].to_i
     assert ! @parser.keepalive?
   end
 
   def test_overflow_chunk
     n = HttpParser::CHUNK_MAX + 1
-    str = "PUT / HTTP/1.1\r\n" \
-          "transfer-Encoding: chunked\r\n\r\n" \
-          "#{n.to_s(16)}\r\na\r\n2\r\n..\r\n0\r\n"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
+    str = @parser.buf
+    req = @parser.env
+    str << "PUT / HTTP/1.1\r\n" \
+           "transfer-Encoding: chunked\r\n\r\n" \
+           "#{n.to_s(16)}\r\na\r\n2\r\n..\r\n0\r\n"
+    assert_equal req, @parser.parse
     assert_nil @parser.content_length
     assert_raise(HttpParserError) { @parser.filter_body('', str) }
-    assert ! @parser.keepalive?
   end
 
   def test_overflow_content_length
     n = HttpParser::LENGTH_MAX + 1
-    str = "PUT / HTTP/1.1\r\nContent-Length: #{n}\r\n\r\n"
-    assert_raise(HttpParserError) { @parser.headers({}, str) }
-    assert ! @parser.keepalive?
+    @parser.buf << "PUT / HTTP/1.1\r\nContent-Length: #{n}\r\n\r\n"
+    assert_raise(HttpParserError) { @parser.parse }
   end
 
   def test_bad_chunk
-    str = "PUT / HTTP/1.1\r\n" \
-          "transfer-Encoding: chunked\r\n\r\n" \
-          "#zzz\r\na\r\n2\r\n..\r\n0\r\n"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
+    @parser.buf << "PUT / HTTP/1.1\r\n" \
+                   "transfer-Encoding: chunked\r\n\r\n" \
+                   "#zzz\r\na\r\n2\r\n..\r\n0\r\n"
+    req = @parser.env
+    assert_equal req, @parser.parse
     assert_nil @parser.content_length
-    assert_raise(HttpParserError) { @parser.filter_body('', str) }
-    assert ! @parser.keepalive?
+    assert_raise(HttpParserError) { @parser.filter_body("", @parser.buf) }
   end
 
   def test_bad_content_length
-    str = "PUT / HTTP/1.1\r\nContent-Length: 7ff\r\n\r\n"
-    assert_raise(HttpParserError) { @parser.headers({}, str) }
-    assert ! @parser.keepalive?
+    @parser.buf << "PUT / HTTP/1.1\r\nContent-Length: 7ff\r\n\r\n"
+    assert_raise(HttpParserError) { @parser.parse }
   end
 
   def test_bad_trailers
-    str = "PUT / HTTP/1.1\r\n" \
-          "Trailer: Transfer-Encoding\r\n" \
-          "transfer-Encoding: chunked\r\n\r\n" \
-          "1\r\na\r\n2\r\n..\r\n0\r\n"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
+    str = @parser.buf
+    req = @parser.env
+    str << "PUT / HTTP/1.1\r\n" \
+           "Trailer: Transfer-Encoding\r\n" \
+           "transfer-Encoding: chunked\r\n\r\n" \
+           "1\r\na\r\n2\r\n..\r\n0\r\n"
+    assert_equal req, @parser.parse
     assert_equal 'Transfer-Encoding', req['HTTP_TRAILER']
     tmp = ''
     assert_nil @parser.filter_body(tmp, str)
     assert_equal 'a..', tmp
     assert_equal '', str
     str << "Transfer-Encoding: identity\r\n\r\n"
-    assert_raise(HttpParserError) { @parser.trailers(req, str) }
-    assert ! @parser.keepalive?
+    assert_raise(HttpParserError) { @parser.parse }
   end
 
   def test_repeat_headers
@@ -347,18 +469,19 @@ class HttpParserNgTest < Test::Unit::TestCase
           "Trailer: Content-SHA1\r\n" \
           "transfer-Encoding: chunked\r\n\r\n" \
           "1\r\na\r\n2\r\n..\r\n0\r\n"
-    req = {}
-    assert_equal req, @parser.headers(req, str)
+    req = @parser.env
+    @parser.buf << str
+    assert_equal req, @parser.parse
     assert_equal 'Content-MD5,Content-SHA1', req['HTTP_TRAILER']
     assert ! @parser.keepalive?
   end
 
   def test_parse_simple_request
     parser = HttpParser.new
-    req = {}
-    http = "GET /read-rfc1945-if-you-dont-believe-me\r\n"
-    assert_equal req, parser.headers(req, http)
-    assert_equal '', http
+    req = parser.env
+    parser.buf << "GET /read-rfc1945-if-you-dont-believe-me\r\n"
+    assert_equal req, parser.parse
+    assert_equal '', parser.buf
     expect = {
       "SERVER_NAME"=>"localhost",
       "rack.url_scheme"=>"http",
@@ -388,7 +511,8 @@ class HttpParserNgTest < Test::Unit::TestCase
       "*" => { qs => "", pi => "" },
     }.each do |uri,expect|
       assert_equal req, @parser.headers(req.clear, str % [ uri ])
-      @parser.reset
+      req = req.dup
+      @parser.clear
       assert_equal uri, req["REQUEST_URI"], "REQUEST_URI mismatch"
       assert_equal expect[qs], req[qs], "#{qs} mismatch"
       assert_equal expect[pi], req[pi], "#{pi} mismatch"
@@ -412,7 +536,8 @@ class HttpParserNgTest < Test::Unit::TestCase
       "/1?a=b;c=d&e=f" => { qs => "a=b;c=d&e=f", pi => "/1" },
     }.each do |uri,expect|
       assert_equal req, @parser.headers(req.clear, str % [ uri ])
-      @parser.reset
+      req = req.dup
+      @parser.clear
       assert_equal uri, req["REQUEST_URI"], "REQUEST_URI mismatch"
       assert_equal "example.com", req["HTTP_HOST"], "Host: mismatch"
       assert_equal expect[qs], req[qs], "#{qs} mismatch"
@@ -440,11 +565,22 @@ class HttpParserNgTest < Test::Unit::TestCase
     end
   end
 
+  def test_backtrace_is_empty
+    begin
+      @parser.headers({}, "AAADFSFDSFD\r\n\r\n")
+      assert false, "should never get here line:#{__LINE__}"
+    rescue HttpParserError => e
+      assert_equal [], e.backtrace
+      return
+    end
+    assert false, "should never get here line:#{__LINE__}"
+  end
+
   def test_ignore_version_header
-    http = "GET / HTTP/1.1\r\nVersion: hello\r\n\r\n"
-    req = {}
-    assert_equal req, @parser.headers(req, http)
-    assert_equal '', http
+    @parser.buf << "GET / HTTP/1.1\r\nVersion: hello\r\n\r\n"
+    req = @parser.env
+    assert_equal req, @parser.parse
+    assert_equal '', @parser.buf
     expect = {
       "SERVER_NAME" => "localhost",
       "rack.url_scheme" => "http",
@@ -460,4 +596,59 @@ class HttpParserNgTest < Test::Unit::TestCase
     assert_equal expect, req
   end
 
+  def test_pipelined_requests
+    host = "example.com"
+    expect = {
+      "HTTP_HOST" => host,
+      "SERVER_NAME" => host,
+      "REQUEST_PATH" => "/",
+      "rack.url_scheme" => "http",
+      "SERVER_PROTOCOL" => "HTTP/1.1",
+      "PATH_INFO" => "/",
+      "HTTP_VERSION" => "HTTP/1.1",
+      "REQUEST_URI" => "/",
+      "SERVER_PORT" => "80",
+      "REQUEST_METHOD" => "GET",
+      "QUERY_STRING" => ""
+    }
+    req1 = "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n"
+    req2 = "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n"
+    @parser.buf << (req1 + req2)
+    env1 = @parser.parse.dup
+    assert_equal expect, env1
+    assert_equal req2, @parser.buf
+    assert ! @parser.env.empty?
+    assert @parser.next?
+    assert @parser.keepalive?
+    assert @parser.headers?
+    assert_equal expect, @parser.env
+    env2 = @parser.parse.dup
+    host.replace "www.example.com"
+    assert_equal "www.example.com", expect["HTTP_HOST"]
+    assert_equal "www.example.com", expect["SERVER_NAME"]
+    assert_equal expect, env2
+    assert_equal "", @parser.buf
+  end
+
+  def test_keepalive_requests_disabled
+    req = "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n".freeze
+    expect = {
+      "SERVER_NAME" => "example.com",
+      "HTTP_HOST" => "example.com",
+      "rack.url_scheme" => "http",
+      "REQUEST_PATH" => "/",
+      "SERVER_PROTOCOL" => "HTTP/1.1",
+      "PATH_INFO" => "/",
+      "HTTP_VERSION" => "HTTP/1.1",
+      "REQUEST_URI" => "/",
+      "SERVER_PORT" => "80",
+      "REQUEST_METHOD" => "GET",
+      "QUERY_STRING" => ""
+    }.freeze
+    HttpParser.keepalive_requests = 0
+    @parser = HttpParser.new
+    @parser.buf << req
+    assert_equal expect, @parser.parse
+    assert ! @parser.next?
+  end
 end
diff --git a/test/unit/test_http_parser_xftrust.rb b/test/unit/test_http_parser_xftrust.rb
new file mode 100644
index 0000000..db8cfa9
--- /dev/null
+++ b/test/unit/test_http_parser_xftrust.rb
@@ -0,0 +1,38 @@
+# -*- encoding: binary -*-
+require 'test/test_helper'
+
+include Unicorn
+
+class HttpParserXFTrustTest < Test::Unit::TestCase
+  def setup
+    assert HttpParser.trust_x_forwarded?
+  end
+
+  def test_xf_trust_false_xfp
+    HttpParser.trust_x_forwarded = false
+    parser = HttpParser.new
+    parser.buf << "GET / HTTP/1.1\r\nHost: foo:\r\n" \
+                  "X-Forwarded-Proto: https\r\n\r\n"
+    env = parser.parse
+    assert_kind_of Hash, env
+    assert_equal 'foo', env['SERVER_NAME']
+    assert_equal '80', env['SERVER_PORT']
+    assert_equal 'http', env['rack.url_scheme']
+  end
+
+  def test_xf_trust_false_xfs
+    HttpParser.trust_x_forwarded = false
+    parser = HttpParser.new
+    parser.buf << "GET / HTTP/1.1\r\nHost: foo:\r\n" \
+                  "X-Forwarded-SSL: on\r\n\r\n"
+    env = parser.parse
+    assert_kind_of Hash, env
+    assert_equal 'foo', env['SERVER_NAME']
+    assert_equal '80', env['SERVER_PORT']
+    assert_equal 'http', env['rack.url_scheme']
+  end
+
+  def teardown
+    HttpParser.trust_x_forwarded = true
+  end
+end
diff --git a/test/unit/test_request.rb b/test/unit/test_request.rb
index 1896300..bd452a5 100644
--- a/test/unit/test_request.rb
+++ b/test/unit/test_request.rb
@@ -11,7 +11,11 @@ class RequestTest < Test::Unit::TestCase
 
   class MockRequest < StringIO
     alias_method :readpartial, :sysread
+    alias_method :kgio_read!, :sysread
     alias_method :read_nonblock, :sysread
+    def kgio_addr
+      '127.0.0.1'
+    end
   end
 
   def setup
@@ -117,7 +121,7 @@ class RequestTest < Test::Unit::TestCase
 
   def test_no_content_stringio
     client = MockRequest.new("GET / HTTP/1.1\r\nHost: foo\r\n\r\n")
-    res = env = nil
+    env = nil
     assert_nothing_raised { env = @request.read(client) }
     assert_equal StringIO, env['rack.input'].class
   end
@@ -126,7 +130,7 @@ class RequestTest < Test::Unit::TestCase
     client = MockRequest.new("PUT / HTTP/1.1\r\n" \
                              "Content-Length: 0\r\n" \
                              "Host: foo\r\n\r\n")
-    res = env = nil
+    env = nil
     assert_nothing_raised { env = @request.read(client) }
     assert_equal StringIO, env['rack.input'].class
   end
@@ -135,7 +139,7 @@ class RequestTest < Test::Unit::TestCase
     client = MockRequest.new("PUT / HTTP/1.1\r\n" \
                              "Content-Length: 1\r\n" \
                              "Host: foo\r\n\r\n")
-    res = env = nil
+    env = nil
     assert_nothing_raised { env = @request.read(client) }
     assert_equal Unicorn::TeeInput, env['rack.input'].class
   end
@@ -159,6 +163,14 @@ class RequestTest < Test::Unit::TestCase
     buf = (' ' * bs).freeze
     length = bs * count
     client = Tempfile.new('big_put')
+    def client.kgio_addr; '127.0.0.1'; end
+    def client.kgio_read(*args)
+      readpartial(*args)
+    rescue EOFError
+    end
+    def client.kgio_read!(*args)
+      readpartial(*args)
+    end
     client.syswrite(
       "PUT / HTTP/1.1\r\n" \
       "Host: foo\r\n" \
diff --git a/test/unit/test_response.rb b/test/unit/test_response.rb
index b3bc3a2..fb6edef 100644
--- a/test/unit/test_response.rb
+++ b/test/unit/test_response.rb
@@ -7,41 +7,42 @@
 # for more information.
 
 require 'test/test_helper'
+require 'time'
 
 include Unicorn
 
 class ResponseTest < Test::Unit::TestCase
-  
+  include Unicorn::HttpResponse
+
+  def test_httpdate
+    before = Time.now.to_i
+    str = httpdate
+    assert_kind_of(String, str)
+    middle = Time.parse(str).to_i
+    after = Time.now.to_i
+    assert before <= middle
+    assert middle <= after
+  end
+
   def test_response_headers
     out = StringIO.new
-    HttpResponse.write(out,[200, {"X-Whatever" => "stuff"}, ["cool"]])
+    http_response_write(out, 200, {"X-Whatever" => "stuff"}, ["cool"])
     assert ! out.closed?
+
     assert out.length > 0, "output didn't have data"
   end
 
   def test_response_string_status
     out = StringIO.new
-    HttpResponse.write(out,['200', {}, []])
+    http_response_write(out,'200', {}, [])
     assert ! out.closed?
     assert out.length > 0, "output didn't have data"
     assert_equal 1, out.string.split(/\r\n/).grep(/^Status: 200 OK/).size
   end
 
-  def test_response_OFS_set
-    old_ofs = $,
-    $, = "\f\v"
-    out = StringIO.new
-    HttpResponse.write(out,[200, {"X-k" => "cd","X-y" => "z"}, ["cool"]])
-    assert ! out.closed?
-    resp = out.string
-    assert ! resp.include?("\f\v"), "output didn't use $, ($OFS)"
-    ensure
-      $, = old_ofs
-  end
-
   def test_response_200
     io = StringIO.new
-    HttpResponse.write(io, [200, {}, []])
+    http_response_write(io, 200, {}, [])
     assert ! io.closed?
     assert io.length > 0, "output didn't have data"
   end
@@ -49,7 +50,7 @@ class ResponseTest < Test::Unit::TestCase
   def test_response_with_default_reason
     code = 400
     io = StringIO.new
-    HttpResponse.write(io, [code, {}, []])
+    http_response_write(io, code, {}, [])
     assert ! io.closed?
     lines = io.string.split(/\r\n/)
     assert_match(/.* Bad Request$/, lines.first,
@@ -58,7 +59,7 @@ class ResponseTest < Test::Unit::TestCase
 
   def test_rack_multivalue_headers
     out = StringIO.new
-    HttpResponse.write(out,[200, {"X-Whatever" => "stuff\nbleh"}, []])
+    http_response_write(out,200, {"X-Whatever" => "stuff\nbleh"}, [])
     assert ! out.closed?
     assert_match(/^X-Whatever: stuff\r\nX-Whatever: bleh\r\n/, out.string)
   end
@@ -67,21 +68,9 @@ class ResponseTest < Test::Unit::TestCase
   # some broken clients still rely on it
   def test_status_header_added
     out = StringIO.new
-    HttpResponse.write(out,[200, {"X-Whatever" => "stuff"}, []])
-    assert ! out.closed?
-    assert_equal 1, out.string.split(/\r\n/).grep(/^Status: 200 OK/i).size
-  end
-
-  # we always favor the code returned by the application, since "Status"
-  # in the header hash is not allowed by Rack (but not every app is
-  # fully Rack-compliant).
-  def test_status_header_ignores_app_hash
-    out = StringIO.new
-    header_hash = {"X-Whatever" => "stuff", 'StaTus' => "666" }
-    HttpResponse.write(out,[200, header_hash, []])
+    http_response_write(out,200, {"X-Whatever" => "stuff"}, [])
     assert ! out.closed?
     assert_equal 1, out.string.split(/\r\n/).grep(/^Status: 200 OK/i).size
-    assert_equal 1, out.string.split(/\r\n/).grep(/^Status:/i).size
   end
 
   def test_body_closed
@@ -89,7 +78,7 @@ class ResponseTest < Test::Unit::TestCase
     body = StringIO.new(expect_body)
     body.rewind
     out = StringIO.new
-    HttpResponse.write(out,[200, {}, body])
+    http_response_write(out,200, {}, body)
     assert ! out.closed?
     assert body.closed?
     assert_match(expect_body, out.string.split(/\r\n/).last)
@@ -97,7 +86,7 @@ class ResponseTest < Test::Unit::TestCase
 
   def test_unknown_status_pass_through
     out = StringIO.new
-    HttpResponse.write(out,["666 I AM THE BEAST", {}, [] ])
+    http_response_write(out,"666 I AM THE BEAST", {}, [] )
     assert ! out.closed?
     headers = out.string.split(/\r\n\r\n/).first.split(/\r\n/)
     assert %r{\AHTTP/\d\.\d 666 I AM THE BEAST\z}.match(headers[0])
diff --git a/test/unit/test_server.rb b/test/unit/test_server.rb
index 41d3e02..88d7aba 100644
--- a/test/unit/test_server.rb
+++ b/test/unit/test_server.rb
@@ -10,7 +10,7 @@ require 'test/test_helper'
 
 include Unicorn
 
-class TestHandler
+class TestHandler
 
   def call(env)
     while env['rack.input'].read(4096)
@@ -19,7 +19,7 @@ class TestHandler
     rescue Unicorn::ClientShutdown, Unicorn::HttpParserError => e
       $stderr.syswrite("#{e.class}: #{e.message} #{e.backtrace.empty?}\n")
       raise e
-   end
+  end
 end
 
 
@@ -169,7 +169,6 @@ class WebServerTest < Test::Unit::TestCase
 
   def test_client_malformed_body
     sock = nil
-    buf = nil
     bs = 15653984
     assert_nothing_raised do
       sock = TCPSocket.new('127.0.0.1', @port)
@@ -271,7 +270,7 @@ class WebServerTest < Test::Unit::TestCase
   def test_file_streamed_request
     body = "a" * (Unicorn::Const::MAX_BODY * 2)
     long = "PUT /test HTTP/1.1\r\nContent-length: #{body.length}\r\n\r\n" + body
-    do_test(long, Unicorn::Const::CHUNK_SIZE * 2 -400)
+    do_test(long, Unicorn::Const::CHUNK_SIZE * 2 - 400)
   end
 
   def test_file_streamed_request_bad_body
@@ -279,13 +278,11 @@ class WebServerTest < Test::Unit::TestCase
     long = "GET /test HTTP/1.1\r\nContent-ength: #{body.length}\r\n\r\n" + body
     assert_raises(EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,
                   Errno::EBADF) {
-      do_test(long, Unicorn::Const::CHUNK_SIZE * 2 -400)
+      do_test(long, Unicorn::Const::CHUNK_SIZE * 2 - 400)
     }
   end
 
   def test_listener_names
     assert_equal [ "127.0.0.1:#@port" ], Unicorn.listener_names
   end
-
 end
-
diff --git a/test/unit/test_signals.rb b/test/unit/test_signals.rb
index 7c78b44..71cf8f4 100644
--- a/test/unit/test_signals.rb
+++ b/test/unit/test_signals.rb
@@ -166,7 +166,7 @@ class SignalsTest < Test::Unit::TestCase
     expect = @bs * @count
     assert_equal(expect, got, "expect=#{expect} got=#{got}")
     assert_nothing_raised { sock.close }
-  end unless ENV['RBX_SKIP']
+  end
 
   def test_request_read
     app = lambda { |env|
diff --git a/test/unit/test_stream_input.rb b/test/unit/test_stream_input.rb
new file mode 100644
index 0000000..f59157a
--- /dev/null
+++ b/test/unit/test_stream_input.rb
@@ -0,0 +1,204 @@
+# -*- encoding: binary -*-
+
+require 'test/unit'
+require 'digest/sha1'
+require 'unicorn'
+
+class TestStreamInput < Test::Unit::TestCase
+  def setup
+    @rs = $/
+    @env = {}
+    @rd, @wr = Kgio::UNIXSocket.pair
+    @rd.sync = @wr.sync = true
+    @start_pid = $$
+  end
+
+  def teardown
+    return if $$ != @start_pid
+    $/ = @rs
+    @rd.close rescue nil
+    @wr.close rescue nil
+    Process.waitall
+  end
+
+  def test_read_negative
+    r = init_request('hello')
+    si = Unicorn::StreamInput.new(@rd, r)
+    assert_raises(ArgumentError) { si.read(-1) }
+    assert_equal 'hello', si.read
+  end
+
+  def test_read_small
+    r = init_request('hello')
+    si = Unicorn::StreamInput.new(@rd, r)
+    assert_equal 'hello', si.read
+    assert_equal '', si.read
+    assert_nil si.read(5)
+    assert_nil si.gets
+  end
+
+  def test_gets_oneliner
+    r = init_request('hello')
+    si = Unicorn::StreamInput.new(@rd, r)
+    assert_equal 'hello', si.gets
+    assert_nil si.gets
+  end
+
+  def test_gets_multiline
+    r = init_request("a\nb\n\n")
+    si = Unicorn::StreamInput.new(@rd, r)
+    assert_equal "a\n", si.gets
+    assert_equal "b\n", si.gets
+    assert_equal "\n", si.gets
+    assert_nil si.gets
+  end
+
+  def test_gets_empty_rs
+    $/ = nil
+    r = init_request("a\nb\n\n")
+    si = Unicorn::StreamInput.new(@rd, r)
+    assert_equal "a\nb\n\n", si.gets
+    assert_nil si.gets
+  end
+
+  def test_read_with_equal_len
+    r = init_request("abcde")
+    si = Unicorn::StreamInput.new(@rd, r)
+    assert_equal "abcde", si.read(5)
+    assert_nil si.read(5)
+  end
+
+  def test_big_body_multi
+    r = init_request('.', Unicorn::Const::MAX_BODY + 1)
+    si = Unicorn::StreamInput.new(@rd, r)
+    assert_equal Unicorn::Const::MAX_BODY, @parser.content_length
+    assert ! @parser.body_eof?
+    nr = Unicorn::Const::MAX_BODY / 4
+    pid = fork {
+      @rd.close
+      nr.times { @wr.write('....') }
+      @wr.close
+    }
+    @wr.close
+    assert_equal '.', si.read(1)
+    nr.times { |x|
+      assert_equal '....', si.read(4), "nr=#{x}"
+    }
+    assert_nil si.read(1)
+    status = nil
+    assert_nothing_raised { pid, status = Process.waitpid2(pid) }
+    assert status.success?
+  end
+
+  def test_gets_long
+    r = init_request("hello", 5 + (4096 * 4 * 3) + "#$/foo#$/".size)
+    si = Unicorn::StreamInput.new(@rd, r)
+    status = line = nil
+    pid = fork {
+      @rd.close
+      3.times { @wr.write("ffff" * 4096) }
+      @wr.write "#$/foo#$/"
+      @wr.close
+    }
+    @wr.close
+    assert_nothing_raised { line = si.gets }
+    assert_equal(4096 * 4 * 3 + 5 + $/.size, line.size)
+    assert_equal("hello" << ("ffff" * 4096 * 3) << "#$/", line)
+    assert_nothing_raised { line = si.gets }
+    assert_equal "foo#$/", line
+    assert_nil si.gets
+    assert_nothing_raised { pid, status = Process.waitpid2(pid) }
+    assert status.success?
+  end
+
+  def test_read_with_buffer
+    r = init_request('hello')
+    si = Unicorn::StreamInput.new(@rd, r)
+    buf = ''
+    rv = si.read(4, buf)
+    assert_equal 'hell', rv
+    assert_equal 'hell', buf
+    assert_equal rv.object_id, buf.object_id
+    assert_equal 'o', si.read
+    assert_equal nil, si.read(5, buf)
+  end
+
+  def test_read_with_buffer_clobbers
+    r = init_request('hello')
+    si = Unicorn::StreamInput.new(@rd, r)
+    buf = 'foo'
+    assert_equal 'hello', si.read(nil, buf)
+    assert_equal 'hello', buf
+    assert_equal '', si.read(nil, buf)
+    assert_equal '', buf
+    buf = 'asdf'
+    assert_nil si.read(5, buf)
+    assert_equal '', buf
+  end
+
+  def test_read_zero
+    r = init_request('hello')
+    si = Unicorn::StreamInput.new(@rd, r)
+    assert_equal '', si.read(0)
+    buf = 'asdf'
+    rv = si.read(0, buf)
+    assert_equal rv.object_id, buf.object_id
+    assert_equal '', buf
+    assert_equal 'hello', si.read
+    assert_nil si.read(5)
+    assert_equal '', si.read(0)
+    buf = 'hello'
+    rv = si.read(0, buf)
+    assert_equal rv.object_id, buf.object_id
+    assert_equal '', rv
+  end
+
+  def test_gets_read_mix
+    r = init_request("hello\nasdfasdf")
+    si = Unicorn::StreamInput.new(@rd, r)
+    assert_equal "hello\n", si.gets
+    assert_equal "asdfasdf", si.read(9)
+    assert_nil si.read(9)
+  end
+
+  def test_gets_read_mix_chunked
+    r = @parser = Unicorn::HttpParser.new
+    body = "6\r\nhello"
+    @buf = "POST / HTTP/1.1\r\n" \
+           "Host: localhost\r\n" \
+           "Transfer-Encoding: chunked\r\n" \
+           "\r\n#{body}"
+    assert_equal @env, @parser.headers(@env, @buf)
+    assert_equal body, @buf
+    si = Unicorn::StreamInput.new(@rd, r)
+    @wr.syswrite "\n\r\n"
+    assert_equal "hello\n", si.gets
+    @wr.syswrite "8\r\nasdfasdf\r\n"
+    assert_equal"asdfasdf", si.read(9) + si.read(9)
+    @wr.syswrite "0\r\n\r\n"
+    assert_nil si.read(9)
+  end
+
+  def test_gets_read_mix_big
+    r = init_request("hello\n#{'.' * 65536}")
+    si = Unicorn::StreamInput.new(@rd, r)
+    assert_equal "hello\n", si.gets
+    assert_equal '.' * 16384, si.read(16384)
+    assert_equal '.' * 16383, si.read(16383)
+    assert_equal '.' * 16384, si.read(16384)
+    assert_equal '.' * 16385, si.read(16385)
+    assert_nil si.gets
+  end
+
+  def init_request(body, size = nil)
+    @parser = Unicorn::HttpParser.new
+    body = body.to_s.freeze
+    @buf = "POST / HTTP/1.1\r\n" \
+           "Host: localhost\r\n" \
+           "Content-Length: #{size || body.size}\r\n" \
+           "\r\n#{body}"
+    assert_equal @env, @parser.headers(@env, @buf)
+    assert_equal body, @buf
+    @parser
+  end
+end
diff --git a/test/unit/test_tee_input.rb b/test/unit/test_tee_input.rb
index a127882..96eb268 100644
--- a/test/unit/test_tee_input.rb
+++ b/test/unit/test_tee_input.rb
@@ -4,12 +4,15 @@ require 'test/unit'
 require 'digest/sha1'
 require 'unicorn'
 
+class TeeInput < Unicorn::TeeInput
+  attr_accessor :tmp, :len
+end
+
 class TestTeeInput < Test::Unit::TestCase
 
   def setup
     @rs = $/
-    @env = {}
-    @rd, @wr = IO.pipe
+    @rd, @wr = Kgio::UNIXSocket.pair
     @rd.sync = @wr.sync = true
     @start_pid = $$
   end
@@ -27,8 +30,8 @@ class TestTeeInput < Test::Unit::TestCase
   end
 
   def test_gets_long
-    init_parser("hello", 5 + (4096 * 4 * 3) + "#$/foo#$/".size)
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request("hello", 5 + (4096 * 4 * 3) + "#$/foo#$/".size)
+    ti = TeeInput.new(@rd, r)
     status = line = nil
     pid = fork {
       @rd.close
@@ -48,8 +51,8 @@ class TestTeeInput < Test::Unit::TestCase
   end
 
   def test_gets_short
-    init_parser("hello", 5 + "#$/foo".size)
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request("hello", 5 + "#$/foo".size)
+    ti = TeeInput.new(@rd, r)
     status = line = nil
     pid = fork {
       @rd.close
@@ -67,8 +70,8 @@ class TestTeeInput < Test::Unit::TestCase
   end
 
   def test_small_body
-    init_parser('hello')
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request('hello')
+    ti = TeeInput.new(@rd, r)
     assert_equal 0, @parser.content_length
     assert @parser.body_eof?
     assert_equal StringIO, ti.tmp.class
@@ -77,11 +80,12 @@ class TestTeeInput < Test::Unit::TestCase
     assert_equal 'hello', ti.read
     assert_equal '', ti.read
     assert_nil ti.read(4096)
+    assert_equal 5, ti.size
   end
 
   def test_read_with_buffer
-    init_parser('hello')
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request('hello')
+    ti = TeeInput.new(@rd, r)
     buf = ''
     rv = ti.read(4, buf)
     assert_equal 'hell', rv
@@ -95,8 +99,8 @@ class TestTeeInput < Test::Unit::TestCase
   end
 
   def test_big_body
-    init_parser('.' * Unicorn::Const::MAX_BODY << 'a')
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request('.' * Unicorn::Const::MAX_BODY << 'a')
+    ti = TeeInput.new(@rd, r)
     assert_equal 0, @parser.content_length
     assert @parser.body_eof?
     assert_kind_of File, ti.tmp
@@ -106,9 +110,9 @@ class TestTeeInput < Test::Unit::TestCase
 
   def test_read_in_full_if_content_length
     a, b = 300, 3
-    init_parser('.' * b, 300)
+    r = init_request('.' * b, 300)
     assert_equal 300, @parser.content_length
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    ti = TeeInput.new(@rd, r)
     pid = fork {
       @wr.write('.' * 197)
       sleep 1 # still a *potential* race here that would make the test moot...
@@ -121,13 +125,12 @@ class TestTeeInput < Test::Unit::TestCase
   end
 
   def test_big_body_multi
-    init_parser('.', Unicorn::Const::MAX_BODY + 1)
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    r = init_request('.', Unicorn::Const::MAX_BODY + 1)
+    ti = TeeInput.new(@rd, r)
     assert_equal Unicorn::Const::MAX_BODY, @parser.content_length
     assert ! @parser.body_eof?
     assert_kind_of File, ti.tmp
     assert_equal 0, ti.tmp.pos
-    assert_equal 1, ti.tmp.size
     assert_equal Unicorn::Const::MAX_BODY + 1, ti.size
     nr = Unicorn::Const::MAX_BODY / 4
     pid = fork {
@@ -138,8 +141,8 @@ class TestTeeInput < Test::Unit::TestCase
     @wr.close
     assert_equal '.', ti.read(1)
     assert_equal Unicorn::Const::MAX_BODY + 1, ti.size
-    nr.times {
-      assert_equal '....', ti.read(4)
+    nr.times { |x|
+      assert_equal '....', ti.read(4), "nr=#{x}"
       assert_equal Unicorn::Const::MAX_BODY + 1, ti.size
     }
     assert_nil ti.read(1)
@@ -150,12 +153,12 @@ class TestTeeInput < Test::Unit::TestCase
 
   def test_chunked
     @parser = Unicorn::HttpParser.new
-    @buf = "POST / HTTP/1.1\r\n" \
-           "Host: localhost\r\n" \
-           "Transfer-Encoding: chunked\r\n" \
-           "\r\n"
-    assert_equal @env, @parser.headers(@env, @buf)
-    assert_equal "", @buf
+    @parser.buf << "POST / HTTP/1.1\r\n" \
+                   "Host: localhost\r\n" \
+                   "Transfer-Encoding: chunked\r\n" \
+                   "\r\n"
+    assert @parser.parse
+    assert_equal "", @parser.buf
 
     pid = fork {
       @rd.close
@@ -163,7 +166,7 @@ class TestTeeInput < Test::Unit::TestCase
       @wr.write("0\r\n\r\n")
     }
     @wr.close
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    ti = TeeInput.new(@rd, @parser)
     assert_nil @parser.content_length
     assert_nil ti.len
     assert ! @parser.body_eof?
@@ -185,12 +188,13 @@ class TestTeeInput < Test::Unit::TestCase
 
   def test_chunked_ping_pong
     @parser = Unicorn::HttpParser.new
-    @buf = "POST / HTTP/1.1\r\n" \
+    buf = @parser.buf
+    buf << "POST / HTTP/1.1\r\n" \
            "Host: localhost\r\n" \
            "Transfer-Encoding: chunked\r\n" \
            "\r\n"
-    assert_equal @env, @parser.headers(@env, @buf)
-    assert_equal "", @buf
+    assert @parser.parse
+    assert_equal "", buf
     chunks = %w(aa bbb cccc dddd eeee)
     rd, wr = IO.pipe
 
@@ -201,7 +205,7 @@ class TestTeeInput < Test::Unit::TestCase
       end
       @wr.write("0\r\n\r\n")
     }
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    ti = TeeInput.new(@rd, @parser)
     assert_nil @parser.content_length
     assert_nil ti.len
     assert ! @parser.body_eof?
@@ -215,13 +219,14 @@ class TestTeeInput < Test::Unit::TestCase
 
   def test_chunked_with_trailer
     @parser = Unicorn::HttpParser.new
-    @buf = "POST / HTTP/1.1\r\n" \
+    buf = @parser.buf
+    buf << "POST / HTTP/1.1\r\n" \
            "Host: localhost\r\n" \
            "Trailer: Hello\r\n" \
            "Transfer-Encoding: chunked\r\n" \
            "\r\n"
-    assert_equal @env, @parser.headers(@env, @buf)
-    assert_equal "", @buf
+    assert @parser.parse
+    assert_equal "", buf
 
     pid = fork {
       @rd.close
@@ -230,28 +235,62 @@ class TestTeeInput < Test::Unit::TestCase
       @wr.write("Hello: World\r\n\r\n")
     }
     @wr.close
-    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    ti = TeeInput.new(@rd, @parser)
     assert_nil @parser.content_length
     assert_nil ti.len
     assert ! @parser.body_eof?
     assert_equal 25, ti.size
-    assert_equal "World", @env['HTTP_HELLO']
+    assert_equal "World", @parser.env['HTTP_HELLO']
     status = nil
     assert_nothing_raised { pid, status = Process.waitpid2(pid) }
     assert status.success?
   end
 
+  def test_chunked_and_size_slow
+    @parser = Unicorn::HttpParser.new
+    buf = @parser.buf
+    buf << "POST / HTTP/1.1\r\n" \
+           "Host: localhost\r\n" \
+           "Trailer: Hello\r\n" \
+           "Transfer-Encoding: chunked\r\n" \
+           "\r\n"
+    assert @parser.parse
+    assert_equal "", buf
+
+    @wr.write("9\r\nabcde")
+    ti = TeeInput.new(@rd, @parser)
+    assert_nil @parser.content_length
+    assert_equal "abcde", ti.read(9)
+    assert ! @parser.body_eof?
+    @wr.write("fghi\r\n0\r\nHello: World\r\n\r\n")
+    assert_equal 9, ti.size
+    assert_equal "fghi", ti.read(9)
+    assert_equal nil, ti.read(9)
+    assert_equal "World", @parser.env['HTTP_HELLO']
+  end
+
+  def test_gets_read_mix
+    r = init_request("hello\nasdfasdf")
+    ti = Unicorn::TeeInput.new(@rd, r)
+    assert_equal "hello\n", ti.gets
+    assert_equal "asdfasdf", ti.read(9)
+    assert_nil ti.read(9)
+  end
+
 private
 
-  def init_parser(body, size = nil)
+  def init_request(body, size = nil)
     @parser = Unicorn::HttpParser.new
     body = body.to_s.freeze
-    @buf = "POST / HTTP/1.1\r\n" \
+    buf = @parser.buf
+    buf << "POST / HTTP/1.1\r\n" \
            "Host: localhost\r\n" \
            "Content-Length: #{size || body.size}\r\n" \
            "\r\n#{body}"
-    assert_equal @env, @parser.headers(@env, @buf)
-    assert_equal body, @buf
+    assert @parser.parse
+    assert_equal body, buf
+    @buf = buf
+    @parser
   end
 
 end
diff --git a/test/unit/test_upload.rb b/test/unit/test_upload.rb
index dc0eb40..e2c103a 100644
--- a/test/unit/test_upload.rb
+++ b/test/unit/test_upload.rb
@@ -145,8 +145,14 @@ class UploadTest < Test::Unit::TestCase
   end
 
   def test_put_excessive_overwrite_closed
+    tmp = Tempfile.new('overwrite_check')
+    tmp.sync = true
     start_server(lambda { |env|
-      while env['rack.input'].read(65536); end
+      nr = 0
+      while buf = env['rack.input'].read(65536)
+        nr += buf.size
+      end
+      tmp.write(nr.to_s)
       [ 200, @hdr, [] ]
     })
     sock = TCPSocket.new(@addr, @port)
@@ -157,7 +163,9 @@ class UploadTest < Test::Unit::TestCase
     assert_raise(Errno::ECONNRESET, Errno::EPIPE) do
       ::Unicorn::Const::CHUNK_SIZE.times { sock.syswrite(buf) }
     end
-    assert_equal "HTTP/1.1 200 OK\r\n", sock.gets
+    assert_nothing_raised { sock.gets }
+    tmp.rewind
+    assert_equal length, tmp.read.to_i
   end
 
   # Despite reading numerous articles and inspecting the 1.9.1-p0 C
diff --git a/unicorn.gemspec b/unicorn.gemspec
index 973ca09..68e91ab 100644
--- a/unicorn.gemspec
+++ b/unicorn.gemspec
@@ -1,7 +1,9 @@
 # -*- encoding: binary -*-
-
 ENV["VERSION"] or abort "VERSION= must be specified"
 manifest = File.readlines('.manifest').map! { |x| x.chomp! }
+require 'wrongdoc'
+extend Wrongdoc::Gemspec
+name, summary, title = readme_metadata
 
 # don't bother with tests that fork, not worth our time to get working
 # with `gem check -t` ... (of course we care for them when testing with
@@ -12,35 +14,19 @@ end.compact
 
 Gem::Specification.new do |s|
   s.name = %q{unicorn}
-  s.version = ENV["VERSION"]
-
-  s.authors = ["Unicorn hackers"]
+  s.version = ENV["VERSION"].dup
+  s.authors = ["#{name} hackers"]
+  s.summary = summary
   s.date = Time.now.utc.strftime('%Y-%m-%d')
-  s.description = File.read("README").split(/\n\n/)[1].delete('\\')
+  s.description = readme_description
   s.email = %q{mongrel-unicorn@rubyforge.org}
   s.executables = %w(unicorn unicorn_rails)
   s.extensions = %w(ext/unicorn_http/extconf.rb)
-
-  s.extra_rdoc_files = File.readlines('.document').map! do |x|
-    x.chomp!
-    if File.directory?(x)
-      manifest.grep(%r{\A#{x}/})
-    elsif File.file?(x)
-      x
-    else
-      nil
-    end
-  end.flatten.compact
-
+  s.extra_rdoc_files = extra_rdoc_files(manifest)
   s.files = manifest
-  s.homepage = %q{http://unicorn.bogomips.org/}
-
-  summary = %q{Rack HTTP server for fast clients and Unix}
-  s.rdoc_options = [ "-t", "Unicorn: #{summary}" ]
-  s.require_paths = %w(lib ext)
+  s.homepage = Wrongdoc.config[:rdoc_url]
+  s.rdoc_options = rdoc_options
   s.rubyforge_project = %q{mongrel}
-  s.summary = summary
-
   s.test_files = test_files
 
   # for people that are absolutely stuck on Rails 2.3.2 and can't
@@ -48,8 +34,10 @@ Gem::Specification.new do |s|
   # commented out.  Nevertheless, upgrading to Rails 2.3.4 or later is
   # *strongly* recommended for security reasons.
   s.add_dependency(%q<rack>)
+  s.add_dependency(%q<kgio>, '~> 2.3')
 
   s.add_development_dependency('isolate', '~> 3.0.0')
+  s.add_development_dependency('wrongdoc', '~> 1.5')
 
   # s.licenses = %w(GPLv2 Ruby) # licenses= method is not in older RubyGems
 end