about summary refs log tree commit homepage
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/unicorn.rb887
-rw-r--r--lib/unicorn/app/exec_cgi.rb4
-rw-r--r--lib/unicorn/configurator.rb914
-rw-r--r--lib/unicorn/const.rb51
-rw-r--r--lib/unicorn/http_request.rb113
-rw-r--r--lib/unicorn/http_response.rb107
-rw-r--r--lib/unicorn/http_server.rb695
-rw-r--r--lib/unicorn/launcher.rb7
-rw-r--r--lib/unicorn/preread_input.rb30
-rw-r--r--lib/unicorn/socket_helper.rb78
-rw-r--r--lib/unicorn/tee_input.rb392
-rw-r--r--lib/unicorn/tmpio.rb29
-rw-r--r--lib/unicorn/util.rb114
-rw-r--r--lib/unicorn/worker.rb40
14 files changed, 1757 insertions, 1704 deletions
diff --git a/lib/unicorn.rb b/lib/unicorn.rb
index f454eb7..622dc6c 100644
--- a/lib/unicorn.rb
+++ b/lib/unicorn.rb
@@ -1,834 +1,83 @@
 # -*- encoding: binary -*-
-
 require 'fcntl'
 require 'etc'
+require 'stringio'
 require 'rack'
-require 'unicorn/socket_helper'
-require 'unicorn/const'
-require 'unicorn/http_request'
-require 'unicorn/configurator'
-require 'unicorn/util'
-require 'unicorn/tee_input'
-require 'unicorn/http_response'
+require 'kgio'
 
-# Unicorn module containing all of the classes (include C extensions) for running
-# a Unicorn web server.  It contains a minimalist HTTP server with just enough
-# functionality to service web application requests fast as possible.
+# Unicorn module containing all of the classes (include C extensions) for
+# running a Unicorn web server.  It contains a minimalist HTTP server with just
+# enough functionality to service web application requests fast as possible.
 module Unicorn
-
-  # raised inside TeeInput when a client closes the socket inside the
-  # application dispatch.  This is always raised with an empty backtrace
-  # since there is nothing in the application stack that is responsible
-  # for client shutdowns/disconnects.
-  class ClientShutdown < EOFError
-  end
-
-  class << self
-    def run(app, options = {})
-      HttpServer.new(app, options).start.join
-    end
-
-    # This returns a lambda to pass in as the app, this does not "build" the
-    # app (which we defer based on the outcome of "preload_app" in the
-    # Unicorn config).  The returned lambda will be called when it is
-    # time to build the app.
-    def builder(ru, opts)
-      # allow Configurator to parse cli switches embedded in the ru file
-      Unicorn::Configurator::RACKUP.update(:file => ru, :optparse => opts)
-
-      # always called after config file parsing, may be called after forking
-      lambda do ||
-        inner_app = case ru
-        when /\.ru$/
-          raw = File.read(ru)
-          raw.sub!(/^__END__\n.*/, '')
-          eval("Rack::Builder.new {(#{raw}\n)}.to_app", TOPLEVEL_BINDING, ru)
-        else
-          require ru
-          Object.const_get(File.basename(ru, '.rb').capitalize)
-        end
-
-        pp({ :inner_app => inner_app }) if $DEBUG
-
-        # return value, matches rackup defaults based on env
-        case ENV["RACK_ENV"]
-        when "development"
-          Rack::Builder.new do
-            use Rack::CommonLogger, $stderr
-            use Rack::ShowExceptions
-            use Rack::Lint
-            run inner_app
-          end.to_app
-        when "deployment"
-          Rack::Builder.new do
-            use Rack::CommonLogger, $stderr
-            run inner_app
-          end.to_app
-        else
-          inner_app
-        end
-      end
-    end
-
-    # returns an array of strings representing TCP listen socket addresses
-    # and Unix domain socket paths.  This is useful for use with
-    # Raindrops::Middleware under Linux: http://raindrops.bogomips.org/
-    def listener_names
-      HttpServer::LISTENERS.map { |io| SocketHelper.sock_name(io) }
-    end
+  def self.run(app, options = {})
+    Unicorn::HttpServer.new(app, options).start.join
   end
 
-  # This is the process manager of Unicorn. This manages worker
-  # processes which in turn handle the I/O and application process.
-  # Listener sockets are started in the master process and shared with
-  # forked worker children.
-
-  class HttpServer < Struct.new(:app, :timeout, :worker_processes,
-                                :before_fork, :after_fork, :before_exec,
-                                :logger, :pid, :listener_opts, :preload_app,
-                                :reexec_pid, :orig_app, :init_listeners,
-                                :master_pid, :config, :ready_pipe, :user)
-    include ::Unicorn::SocketHelper
-
-    # prevents IO objects in here from being GC-ed
-    IO_PURGATORY = []
-
-    # all bound listener sockets
-    LISTENERS = []
-
-    # This hash maps PIDs to Workers
-    WORKERS = {}
-
-    # We use SELF_PIPE differently in the master and worker processes:
-    #
-    # * The master process never closes or reinitializes this once
-    # initialized.  Signal handlers in the master process will write to
-    # it to wake up the master from IO.select in exactly the same manner
-    # djb describes in http://cr.yp.to/docs/selfpipe.html
-    #
-    # * The workers immediately close the pipe they inherit from the
-    # master and replace it with a new pipe after forking.  This new
-    # pipe is also used to wakeup from IO.select from inside (worker)
-    # signal handlers.  However, workers *close* the pipe descriptors in
-    # the signal handlers to raise EBADF in IO.select instead of writing
-    # like we do in the master.  We cannot easily use the reader set for
-    # IO.select because LISTENERS is already that set, and it's extra
-    # work (and cycles) to distinguish the pipe FD from the reader set
-    # once IO.select returns.  So we're lazy and just close the pipe when
-    # a (rare) signal arrives in the worker and reinitialize the pipe later.
-    SELF_PIPE = []
-
-    # signal queue used for self-piping
-    SIG_QUEUE = []
-
-    # constant lookups are faster and we're single-threaded/non-reentrant
-    REQUEST = HttpRequest.new
-
-    # We populate this at startup so we can figure out how to reexecute
-    # and upgrade the currently running instance of Unicorn
-    # This Hash is considered a stable interface and changing its contents
-    # will allow you to switch between different installations of Unicorn
-    # or even different installations of the same applications without
-    # downtime.  Keys of this constant Hash are described as follows:
-    #
-    # * 0 - the path to the unicorn/unicorn_rails executable
-    # * :argv - a deep copy of the ARGV array the executable originally saw
-    # * :cwd - the working directory of the application, this is where
-    # you originally started Unicorn.
-    #
-    # To change your unicorn executable to a different path without downtime,
-    # you can set the following in your Unicorn config file, HUP and then
-    # continue with the traditional USR2 + QUIT upgrade steps:
-    #
-    #   Unicorn::HttpServer::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn"
-    START_CTX = {
-      :argv => ARGV.map { |arg| arg.dup },
-      :cwd => lambda {
-          # favor ENV['PWD'] since it is (usually) symlink aware for
-          # Capistrano and like systems
-          begin
-            a = File.stat(pwd = ENV['PWD'])
-            b = File.stat(Dir.pwd)
-            a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd
-          rescue
-            Dir.pwd
-          end
-        }.call,
-      0 => $0.dup,
-    }
-
-    # This class and its members can be considered a stable interface
-    # and will not change in a backwards-incompatible fashion between
-    # releases of Unicorn.  You may need to access it in the
-    # before_fork/after_fork hooks.  See the Unicorn::Configurator RDoc
-    # for examples.
-    class Worker < Struct.new(:nr, :tmp, :switched)
-
-      # worker objects may be compared to just plain numbers
-      def ==(other_nr)
-        self.nr == other_nr
-      end
-
-      # Changes the worker process to the specified +user+ and +group+
-      # This is only intended to be called from within the worker
-      # process from the +after_fork+ hook.  This should be called in
-      # the +after_fork+ hook after any priviledged functions need to be
-      # run (e.g. to set per-worker CPU affinity, niceness, etc)
-      #
-      # Any and all errors raised within this method will be propagated
-      # directly back to the caller (usually the +after_fork+ hook.
-      # These errors commonly include ArgumentError for specifying an
-      # invalid user/group and Errno::EPERM for insufficient priviledges
-      def user(user, group = nil)
-        # we do not protect the caller, checking Process.euid == 0 is
-        # insufficient because modern systems have fine-grained
-        # capabilities.  Let the caller handle any and all errors.
-        uid = Etc.getpwnam(user).uid
-        gid = Etc.getgrnam(group).gid if group
-        Unicorn::Util.chown_logs(uid, gid)
-        tmp.chown(uid, gid)
-        if gid && Process.egid != gid
-          Process.initgroups(user, gid)
-          Process::GID.change_privilege(gid)
-        end
-        Process.euid != uid and Process::UID.change_privilege(uid)
-        self.switched = true
-      end
-
-    end
-
-    # Creates a working server on host:port (strange things happen if
-    # port isn't a Number).  Use HttpServer::run to start the server and
-    # HttpServer.run.join to join the thread that's processing
-    # incoming requests on the socket.
-    def initialize(app, options = {})
-      self.app = app
-      self.reexec_pid = 0
-      self.ready_pipe = options.delete(:ready_pipe)
-      self.init_listeners = options[:listeners] ? options[:listeners].dup : []
-      self.config = Configurator.new(options.merge(:use_defaults => true))
-      self.listener_opts = {}
-
-      # we try inheriting listeners first, so we bind them later.
-      # we don't write the pid file until we've bound listeners in case
-      # unicorn was started twice by mistake.  Even though our #pid= method
-      # checks for stale/existing pid files, race conditions are still
-      # possible (and difficult/non-portable to avoid) and can be likely
-      # to clobber the pid if the second start was in quick succession
-      # after the first, so we rely on the listener binding to fail in
-      # that case.  Some tests (in and outside of this source tree) and
-      # monitoring tools may also rely on pid files existing before we
-      # attempt to connect to the listener(s)
-      config.commit!(self, :skip => [:listeners, :pid])
-      self.orig_app = app
-    end
-
-    # Runs the thing.  Returns self so you can run join on it
-    def start
-      BasicSocket.do_not_reverse_lookup = true
-
-      # inherit sockets from parents, they need to be plain Socket objects
-      # before they become UNIXServer or TCPServer
-      inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
-        io = Socket.for_fd(fd.to_i)
-        set_server_sockopt(io, listener_opts[sock_name(io)])
-        IO_PURGATORY << io
-        logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
-        server_cast(io)
-      end
-
-      config_listeners = config[:listeners].dup
-      LISTENERS.replace(inherited)
-
-      # we start out with generic Socket objects that get cast to either
-      # TCPServer or UNIXServer objects; but since the Socket objects
-      # share the same OS-level file descriptor as the higher-level *Server
-      # objects; we need to prevent Socket objects from being garbage-collected
-      config_listeners -= listener_names
-      if config_listeners.empty? && LISTENERS.empty?
-        config_listeners << Unicorn::Const::DEFAULT_LISTEN
-        init_listeners << Unicorn::Const::DEFAULT_LISTEN
-        START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}"
-      end
-      config_listeners.each { |addr| listen(addr) }
-      raise ArgumentError, "no listeners" if LISTENERS.empty?
-
-      # this pipe is used to wake us up from select(2) in #join when signals
-      # are trapped.  See trap_deferred.
-      init_self_pipe!
-
-      # setup signal handlers before writing pid file in case people get
-      # trigger happy and send signals as soon as the pid file exists.
-      # Note that signals don't actually get handled until the #join method
-      QUEUE_SIGS.each { |sig| trap_deferred(sig) }
-      trap(:CHLD) { |_| awaken_master }
-      self.pid = config[:pid]
-
-      self.master_pid = $$
-      build_app! if preload_app
-      maintain_worker_count
-      self
-    end
-
-    # replaces current listener set with +listeners+.  This will
-    # close the socket if it will not exist in the new listener set
-    def listeners=(listeners)
-      cur_names, dead_names = [], []
-      listener_names.each do |name|
-        if ?/ == name[0]
-          # mark unlinked sockets as dead so we can rebind them
-          (File.socket?(name) ? cur_names : dead_names) << name
-        else
-          cur_names << name
-        end
-      end
-      set_names = listener_names(listeners)
-      dead_names.concat(cur_names - set_names).uniq!
-
-      LISTENERS.delete_if do |io|
-        if dead_names.include?(sock_name(io))
-          IO_PURGATORY.delete_if do |pio|
-            pio.fileno == io.fileno && (pio.close rescue nil).nil? # true
-          end
-          (io.close rescue nil).nil? # true
-        else
-          set_server_sockopt(io, listener_opts[sock_name(io)])
-          false
-        end
-      end
-
-      (set_names - cur_names).each { |addr| listen(addr) }
-    end
-
-    def stdout_path=(path); redirect_io($stdout, path); end
-    def stderr_path=(path); redirect_io($stderr, path); end
-
-    def logger=(obj)
-      HttpRequest::DEFAULTS["rack.logger"] = super
-    end
-
-    # sets the path for the PID file of the master process
-    def pid=(path)
-      if path
-        if x = valid_pid?(path)
-          return path if pid && path == pid && x == $$
-          if x == reexec_pid && pid =~ /\.oldbin\z/
-            logger.warn("will not set pid=#{path} while reexec-ed "\
-                        "child is running PID:#{x}")
-            return
-          end
-          raise ArgumentError, "Already running on PID:#{x} " \
-                               "(or pid=#{path} is stale)"
-        end
-      end
-      unlink_pid_safe(pid) if pid
-
-      if path
-        fp = begin
-          tmp = "#{File.dirname(path)}/#{rand}.#$$"
-          File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644)
-        rescue Errno::EEXIST
-          retry
-        end
-        fp.syswrite("#$$\n")
-        File.rename(fp.path, path)
-        fp.close
-      end
-      super(path)
-    end
-
-    # add a given address to the +listeners+ set, idempotently
-    # Allows workers to add a private, per-process listener via the
-    # after_fork hook.  Very useful for debugging and testing.
-    # +:tries+ may be specified as an option for the number of times
-    # to retry, and +:delay+ may be specified as the time in seconds
-    # to delay between retries.
-    # A negative value for +:tries+ indicates the listen will be
-    # retried indefinitely, this is useful when workers belonging to
-    # different masters are spawned during a transparent upgrade.
-    def listen(address, opt = {}.merge(listener_opts[address] || {}))
-      address = config.expand_addr(address)
-      return if String === address && listener_names.include?(address)
-
-      delay = opt[:delay] || 0.5
-      tries = opt[:tries] || 5
-      begin
-        io = bind_listen(address, opt)
-        unless TCPServer === io || UNIXServer === io
-          IO_PURGATORY << io
-          io = server_cast(io)
-        end
-        logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
-        LISTENERS << io
-        io
-      rescue Errno::EADDRINUSE => err
-        logger.error "adding listener failed addr=#{address} (in use)"
-        raise err if tries == 0
-        tries -= 1
-        logger.error "retrying in #{delay} seconds " \
-                     "(#{tries < 0 ? 'infinite' : tries} tries left)"
-        sleep(delay)
-        retry
-      rescue => err
-        logger.fatal "error adding listener addr=#{address}"
-        raise err
-      end
-    end
-
-    # monitors children and receives signals forever
-    # (or until a termination signal is sent).  This handles signals
-    # one-at-a-time time and we'll happily drop signals in case somebody
-    # is signalling us too often.
-    def join
-      respawn = true
-      last_check = Time.now
-
-      proc_name 'master'
-      logger.info "master process ready" # test_exec.rb relies on this message
-      if ready_pipe
-        ready_pipe.syswrite($$.to_s)
-        ready_pipe.close rescue nil
-        self.ready_pipe = nil
-      end
-      begin
-        loop do
-          reap_all_workers
-          case SIG_QUEUE.shift
-          when nil
-            # avoid murdering workers after our master process (or the
-            # machine) comes out of suspend/hibernation
-            if (last_check + timeout) >= (last_check = Time.now)
-              murder_lazy_workers
-            else
-              # wait for workers to wakeup on suspend
-              master_sleep(timeout/2.0 + 1)
-            end
-            maintain_worker_count if respawn
-            master_sleep(1)
-          when :QUIT # graceful shutdown
-            break
-          when :TERM, :INT # immediate shutdown
-            stop(false)
-            break
-          when :USR1 # rotate logs
-            logger.info "master reopening logs..."
-            Unicorn::Util.reopen_logs
-            logger.info "master done reopening logs"
-            kill_each_worker(:USR1)
-          when :USR2 # exec binary, stay alive in case something went wrong
-            reexec
-          when :WINCH
-            if Process.ppid == 1 || Process.getpgrp != $$
-              respawn = false
-              logger.info "gracefully stopping all workers"
-              kill_each_worker(:QUIT)
-              self.worker_processes = 0
-            else
-              logger.info "SIGWINCH ignored because we're not daemonized"
-            end
-          when :TTIN
-            respawn = true
-            self.worker_processes += 1
-          when :TTOU
-            self.worker_processes -= 1 if self.worker_processes > 0
-          when :HUP
-            respawn = true
-            if config.config_file
-              load_config!
-              redo # immediate reaping since we may have QUIT workers
-            else # exec binary and exit if there's no config file
-              logger.info "config_file not present, reexecuting binary"
-              reexec
-              break
-            end
-          end
-        end
-      rescue Errno::EINTR
-        retry
-      rescue => e
-        logger.error "Unhandled master loop exception #{e.inspect}."
-        logger.error e.backtrace.join("\n")
-        retry
-      end
-      stop # gracefully shutdown all workers on our way out
-      logger.info "master complete"
-      unlink_pid_safe(pid) if pid
-    end
-
-    # Terminates all workers, but does not exit master process
-    def stop(graceful = true)
-      self.listeners = []
-      limit = Time.now + timeout
-      until WORKERS.empty? || Time.now > limit
-        kill_each_worker(graceful ? :QUIT : :TERM)
-        sleep(0.1)
-        reap_all_workers
-      end
-      kill_each_worker(:KILL)
-    end
-
-    private
-
-    # list of signals we care about and trap in master.
-    QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP,
-                   :TTIN, :TTOU ]
-
-    # defer a signal for later processing in #join (master process)
-    def trap_deferred(signal)
-      trap(signal) do |sig_nr|
-        if SIG_QUEUE.size < 5
-          SIG_QUEUE << signal
-          awaken_master
-        else
-          logger.error "ignoring SIG#{signal}, queue=#{SIG_QUEUE.inspect}"
-        end
-      end
-    end
-
-    # wait for a signal hander to wake us up and then consume the pipe
-    # Wake up every second anyways to run murder_lazy_workers
-    def master_sleep(sec)
-      IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
-      SELF_PIPE[0].read_nonblock(Const::CHUNK_SIZE, HttpRequest::BUF)
-      rescue Errno::EAGAIN, Errno::EINTR
-    end
-
-    def awaken_master
-      begin
-        SELF_PIPE.last.write_nonblock('.') # wakeup master process from select
-      rescue Errno::EAGAIN, Errno::EINTR
-        # pipe is full, master should wake up anyways
-        retry
-      end
-    end
-
-    # reaps all unreaped workers
-    def reap_all_workers
-      begin
-        loop do
-          wpid, status = Process.waitpid2(-1, Process::WNOHANG)
-          wpid or break
-          if reexec_pid == wpid
-            logger.error "reaped #{status.inspect} exec()-ed"
-            self.reexec_pid = 0
-            self.pid = pid.chomp('.oldbin') if pid
-            proc_name 'master'
-          else
-            worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
-            logger.info "reaped #{status.inspect} " \
-                        "worker=#{worker.nr rescue 'unknown'}"
-          end
-        end
-      rescue Errno::ECHILD
-      end
-    end
-
-    # reexecutes the START_CTX with a new binary
-    def reexec
-      if reexec_pid > 0
-        begin
-          Process.kill(0, reexec_pid)
-          logger.error "reexec-ed child already running PID:#{reexec_pid}"
-          return
-        rescue Errno::ESRCH
-          self.reexec_pid = 0
-        end
-      end
-
-      if pid
-        old_pid = "#{pid}.oldbin"
-        prev_pid = pid.dup
-        begin
-          self.pid = old_pid  # clear the path for a new pid file
-        rescue ArgumentError
-          logger.error "old PID:#{valid_pid?(old_pid)} running with " \
-                       "existing pid=#{old_pid}, refusing rexec"
-          return
-        rescue => e
-          logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}"
-          return
-        end
-      end
-
-      self.reexec_pid = fork do
-        listener_fds = LISTENERS.map { |sock| sock.fileno }
-        ENV['UNICORN_FD'] = listener_fds.join(',')
-        Dir.chdir(START_CTX[:cwd])
-        cmd = [ START_CTX[0] ].concat(START_CTX[:argv])
-
-        # avoid leaking FDs we don't know about, but let before_exec
-        # unset FD_CLOEXEC, if anything else in the app eventually
-        # relies on FD inheritence.
-        (3..1024).each do |io|
-          next if listener_fds.include?(io)
-          io = IO.for_fd(io) rescue nil
-          io or next
-          IO_PURGATORY << io
-          io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-        end
-        logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
-        before_exec.call(self)
-        exec(*cmd)
-      end
-      proc_name 'master (old)'
-    end
-
-    # forcibly terminate all workers that haven't checked in in timeout
-    # seconds.  The timeout is implemented using an unlinked File
-    # shared between the parent process and each worker.  The worker
-    # runs File#chmod to modify the ctime of the File.  If the ctime
-    # is stale for >timeout seconds, then we'll kill the corresponding
-    # worker.
-    def murder_lazy_workers
-      WORKERS.dup.each_pair do |wpid, worker|
-        stat = worker.tmp.stat
-        # skip workers that disable fchmod or have never fchmod-ed
-        stat.mode == 0100600 and next
-        (diff = (Time.now - stat.ctime)) <= timeout and next
-        logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
-                     "(#{diff}s > #{timeout}s), killing"
-        kill_worker(:KILL, wpid) # take no prisoners for timeout violations
-      end
-    end
-
-    def spawn_missing_workers
-      (0...worker_processes).each do |worker_nr|
-        WORKERS.values.include?(worker_nr) and next
-        worker = Worker.new(worker_nr, Unicorn::Util.tmpio)
-        before_fork.call(self, worker)
-        WORKERS[fork {
-          ready_pipe.close if ready_pipe
-          self.ready_pipe = nil
-          worker_loop(worker)
-        }] = worker
-      end
-    end
-
-    def maintain_worker_count
-      (off = WORKERS.size - worker_processes) == 0 and return
-      off < 0 and return spawn_missing_workers
-      WORKERS.dup.each_pair { |wpid,w|
-        w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil
-      }
-    end
-
-    # if we get any error, try to write something back to the client
-    # assuming we haven't closed the socket, but don't get hung up
-    # if the socket is already closed or broken.  We'll always ensure
-    # the socket is closed at the end of this function
-    def handle_error(client, e)
-      msg = case e
-      when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
-        Const::ERROR_500_RESPONSE
-      when HttpParserError # try to tell the client they're bad
-        Const::ERROR_400_RESPONSE
+  # This returns a lambda to pass in as the app, this does not "build" the
+  # app (which we defer based on the outcome of "preload_app" in the
+  # Unicorn config).  The returned lambda will be called when it is
+  # time to build the app.
+  def self.builder(ru, opts)
+    # allow Configurator to parse cli switches embedded in the ru file
+    Unicorn::Configurator::RACKUP.update(:file => ru, :optparse => opts)
+
+    # always called after config file parsing, may be called after forking
+    lambda do ||
+      inner_app = case ru
+      when /\.ru$/
+        raw = File.read(ru)
+        raw.sub!(/^__END__\n.*/, '')
+        eval("Rack::Builder.new {(#{raw}\n)}.to_app", TOPLEVEL_BINDING, ru)
       else
-        logger.error "Read error: #{e.inspect}"
-        logger.error e.backtrace.join("\n")
-        Const::ERROR_500_RESPONSE
-      end
-      client.write_nonblock(msg)
-      client.close
-      rescue
-        nil
-    end
-
-    # once a client is accepted, it is processed in its entirety here
-    # in 3 easy steps: read request, call app, write app response
-    def process_client(client)
-      client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-      response = app.call(env = REQUEST.read(client))
-
-      if 100 == response.first.to_i
-        client.write(Const::EXPECT_100_RESPONSE)
-        env.delete(Const::HTTP_EXPECT)
-        response = app.call(env)
-      end
-      HttpResponse.write(client, response, HttpRequest::PARSER.headers?)
-    rescue => e
-      handle_error(client, e)
-    end
-
-    # gets rid of stuff the worker has no business keeping track of
-    # to free some resources and drops all sig handlers.
-    # traps for USR1, USR2, and HUP may be set in the after_fork Proc
-    # by the user.
-    def init_worker_process(worker)
-      QUEUE_SIGS.each { |sig| trap(sig, nil) }
-      trap(:CHLD, 'DEFAULT')
-      SIG_QUEUE.clear
-      proc_name "worker[#{worker.nr}]"
-      START_CTX.clear
-      init_self_pipe!
-      WORKERS.values.each { |other| other.tmp.close rescue nil }
-      WORKERS.clear
-      LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
-      worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-      after_fork.call(self, worker) # can drop perms
-      worker.user(*user) if user.kind_of?(Array) && ! worker.switched
-      self.timeout /= 2.0 # halve it for select()
-      build_app! unless preload_app
-    end
-
-    def reopen_worker_logs(worker_nr)
-      logger.info "worker=#{worker_nr} reopening logs..."
-      Unicorn::Util.reopen_logs
-      logger.info "worker=#{worker_nr} done reopening logs"
-      init_self_pipe!
-    end
-
-    # runs inside each forked worker, this sits around and waits
-    # for connections and doesn't die until the parent dies (or is
-    # given a INT, QUIT, or TERM signal)
-    def worker_loop(worker)
-      ppid = master_pid
-      init_worker_process(worker)
-      nr = 0 # this becomes negative if we need to reopen logs
-      alive = worker.tmp # tmp is our lifeline to the master process
-      ready = LISTENERS
-
-      # closing anything we IO.select on will raise EBADF
-      trap(:USR1) { nr = -65536; SELF_PIPE.first.close rescue nil }
-      trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
-      [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
-      logger.info "worker=#{worker.nr} ready"
-      m = 0
-
-      begin
-        nr < 0 and reopen_worker_logs(worker.nr)
-        nr = 0
-
-        # we're a goner in timeout seconds anyways if alive.chmod
-        # breaks, so don't trap the exception.  Using fchmod() since
-        # futimes() is not available in base Ruby and I very strongly
-        # prefer temporary files to be unlinked for security,
-        # performance and reliability reasons, so utime is out.  No-op
-        # changes with chmod doesn't update ctime on all filesystems; so
-        # we change our counter each and every time (after process_client
-        # and before IO.select).
-        alive.chmod(m = 0 == m ? 1 : 0)
-
-        ready.each do |sock|
-          begin
-            process_client(sock.accept_nonblock)
-            nr += 1
-            alive.chmod(m = 0 == m ? 1 : 0)
-          rescue Errno::EAGAIN, Errno::ECONNABORTED
-          end
-          break if nr < 0
-        end
-
-        # make the following bet: if we accepted clients this round,
-        # we're probably reasonably busy, so avoid calling select()
-        # and do a speculative accept_nonblock on ready listeners
-        # before we sleep again in select().
-        redo unless nr == 0 # (nr < 0) => reopen logs
-
-        ppid == Process.ppid or return
-        alive.chmod(m = 0 == m ? 1 : 0)
-        begin
-          # timeout used so we can detect parent death:
-          ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) or redo
-          ready = ret.first
-        rescue Errno::EINTR
-          ready = LISTENERS
-        rescue Errno::EBADF
-          nr < 0 or return
-        end
-      rescue => e
-        if alive
-          logger.error "Unhandled listen loop exception #{e.inspect}."
-          logger.error e.backtrace.join("\n")
-        end
-      end while alive
-    end
-
-    # delivers a signal to a worker and fails gracefully if the worker
-    # is no longer running.
-    def kill_worker(signal, wpid)
-      begin
-        Process.kill(signal, wpid)
-      rescue Errno::ESRCH
-        worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
-      end
-    end
-
-    # delivers a signal to each worker
-    def kill_each_worker(signal)
-      WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
-    end
-
-    # unlinks a PID file at given +path+ if it contains the current PID
-    # still potentially racy without locking the directory (which is
-    # non-portable and may interact badly with other programs), but the
-    # window for hitting the race condition is small
-    def unlink_pid_safe(path)
-      (File.read(path).to_i == $$ and File.unlink(path)) rescue nil
-    end
-
-    # returns a PID if a given path contains a non-stale PID file,
-    # nil otherwise.
-    def valid_pid?(path)
-      wpid = File.read(path).to_i
-      wpid <= 0 and return nil
-      begin
-        Process.kill(0, wpid)
-        wpid
-      rescue Errno::ESRCH
-        # don't unlink stale pid files, racy without non-portable locking...
-      end
-      rescue Errno::ENOENT
-    end
-
-    def load_config!
-      loaded_app = app
-      begin
-        logger.info "reloading config_file=#{config.config_file}"
-        config[:listeners].replace(init_listeners)
-        config.reload
-        config.commit!(self)
-        kill_each_worker(:QUIT)
-        Unicorn::Util.reopen_logs
-        self.app = orig_app
-        build_app! if preload_app
-        logger.info "done reloading config_file=#{config.config_file}"
-      rescue StandardError, LoadError, SyntaxError => e
-        logger.error "error reloading config_file=#{config.config_file}: " \
-                     "#{e.class} #{e.message} #{e.backtrace}"
-        self.app = loaded_app
-      end
-    end
-
-    # returns an array of string names for the given listener array
-    def listener_names(listeners = LISTENERS)
-      listeners.map { |io| sock_name(io) }
-    end
-
-    def build_app!
-      if app.respond_to?(:arity) && app.arity == 0
-        if defined?(Gem) && Gem.respond_to?(:refresh)
-          logger.info "Refreshing Gem list"
-          Gem.refresh
-        end
-        self.app = app.call
+        require ru
+        Object.const_get(File.basename(ru, '.rb').capitalize)
+      end
+
+      pp({ :inner_app => inner_app }) if $DEBUG
+
+      # return value, matches rackup defaults based on env
+      case ENV["RACK_ENV"]
+      when "development"
+        Rack::Builder.new do
+          use Rack::CommonLogger, $stderr
+          use Rack::ShowExceptions
+          use Rack::Lint
+          run inner_app
+        end.to_app
+      when "deployment"
+        Rack::Builder.new do
+          use Rack::CommonLogger, $stderr
+          run inner_app
+        end.to_app
+      else
+        inner_app
       end
     end
+  end
 
-    def proc_name(tag)
-      $0 = ([ File.basename(START_CTX[0]), tag
-            ]).concat(START_CTX[:argv]).join(' ')
-    end
-
-    def redirect_io(io, path)
-      File.open(path, 'ab') { |fp| io.reopen(fp) } if path
-      io.sync = true
-    end
-
-    def init_self_pipe!
-      SELF_PIPE.each { |io| io.close rescue nil }
-      SELF_PIPE.replace(IO.pipe)
-      SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
+  # returns an array of strings representing TCP listen socket addresses
+  # and Unix domain socket paths.  This is useful for use with
+  # Raindrops::Middleware under Linux: http://raindrops.bogomips.org/
+  def self.listener_names
+    Unicorn::HttpServer::LISTENERS.map do |io|
+      Unicorn::SocketHelper.sock_name(io)
     end
-
   end
 end
+
+# raised inside TeeInput when a client closes the socket inside the
+# application dispatch.  This is always raised with an empty backtrace
+# since there is nothing in the application stack that is responsible
+# for client shutdowns/disconnects.
+class Unicorn::ClientShutdown < EOFError; end
+
+require 'unicorn/const'
+require 'unicorn/socket_helper'
+require 'unicorn/tee_input'
+require 'unicorn/http_request'
+require 'unicorn/configurator'
+require 'unicorn/tmpio'
+require 'unicorn/util'
+require 'unicorn/http_response'
+require 'unicorn/worker'
+require 'unicorn/http_server'
diff --git a/lib/unicorn/app/exec_cgi.rb b/lib/unicorn/app/exec_cgi.rb
index 412c1d9..fea22f6 100644
--- a/lib/unicorn/app/exec_cgi.rb
+++ b/lib/unicorn/app/exec_cgi.rb
@@ -43,7 +43,7 @@ module Unicorn::App
 
     # Calls the app
     def call(env)
-      out, err = Unicorn::Util.tmpio, Unicorn::Util.tmpio
+      out, err = Unicorn::TmpIO.new, Unicorn::TmpIO.new
       inp = force_file_input(env)
       pid = fork { run_child(inp, out, err, env) }
       inp.close
@@ -124,7 +124,7 @@ module Unicorn::App
       if inp.respond_to?(:size) && inp.size == 0
         ::File.open('/dev/null', 'rb')
       else
-        tmp = Unicorn::Util.tmpio
+        tmp = Unicorn::TmpIO.new
 
         buf = inp.read(CHUNK_SIZE)
         begin
diff --git a/lib/unicorn/configurator.rb b/lib/unicorn/configurator.rb
index 378a130..dd515a7 100644
--- a/lib/unicorn/configurator.rb
+++ b/lib/unicorn/configurator.rb
@@ -1,489 +1,521 @@
 # -*- encoding: binary -*-
-
-require 'socket'
 require 'logger'
 
-module Unicorn
-
-  # Implements a simple DSL for configuring a Unicorn server.
-  #
-  # See http://unicorn.bogomips.org/examples/unicorn.conf.rb and
-  # http://unicorn.bogomips.org/examples/unicorn.conf.minimal.rb
-  # example configuration files.  An example config file for use with
-  # nginx is also available at
-  # http://unicorn.bogomips.org/examples/nginx.conf
-  class Configurator < Struct.new(:set, :config_file, :after_reload)
-    # :stopdoc:
-    # used to stash stuff for deferred processing of cli options in
-    # config.ru after "working_directory" is bound.  Do not rely on
-    # this being around later on...
-    RACKUP = {}
-    # :startdoc:
-
-    # Default settings for Unicorn
-    DEFAULTS = {
-      :timeout => 60,
-      :logger => Logger.new($stderr),
-      :worker_processes => 1,
-      :after_fork => lambda { |server, worker|
-          server.logger.info("worker=#{worker.nr} spawned pid=#{$$}")
-        },
-      :before_fork => lambda { |server, worker|
-          server.logger.info("worker=#{worker.nr} spawning...")
-        },
-      :before_exec => lambda { |server|
-          server.logger.info("forked child re-executing...")
-        },
-      :pid => nil,
-      :preload_app => false,
-    }
-
-    def initialize(defaults = {}) #:nodoc:
-      self.set = Hash.new(:unset)
-      @use_defaults = defaults.delete(:use_defaults)
-      self.config_file = defaults.delete(:config_file)
-
-      # after_reload is only used by unicorn_rails, unsupported otherwise
-      self.after_reload = defaults.delete(:after_reload)
+# Implements a simple DSL for configuring a \Unicorn server.
+#
+# See http://unicorn.bogomips.org/examples/unicorn.conf.rb and
+# http://unicorn.bogomips.org/examples/unicorn.conf.minimal.rb
+# example configuration files.  An example config file for use with
+# nginx is also available at
+# http://unicorn.bogomips.org/examples/nginx.conf
+class Unicorn::Configurator
+  attr_accessor :set, :config_file, :after_reload
+
+  # :stopdoc:
+  # used to stash stuff for deferred processing of cli options in
+  # config.ru after "working_directory" is bound.  Do not rely on
+  # this being around later on...
+  RACKUP = {
+    :daemonize => false,
+    :host => Unicorn::Const::DEFAULT_HOST,
+    :port => Unicorn::Const::DEFAULT_PORT,
+    :set_listener => false,
+    :options => { :listeners => [] }
+  }
+
+  # Default settings for Unicorn
+  DEFAULTS = {
+    :timeout => 60,
+    :logger => Logger.new($stderr),
+    :worker_processes => 1,
+    :after_fork => lambda { |server, worker|
+        server.logger.info("worker=#{worker.nr} spawned pid=#{$$}")
+      },
+    :before_fork => lambda { |server, worker|
+        server.logger.info("worker=#{worker.nr} spawning...")
+      },
+    :before_exec => lambda { |server|
+        server.logger.info("forked child re-executing...")
+      },
+    :pid => nil,
+    :preload_app => false,
+  }
+  #:startdoc:
+
+  def initialize(defaults = {}) #:nodoc:
+    self.set = Hash.new(:unset)
+    @use_defaults = defaults.delete(:use_defaults)
+    self.config_file = defaults.delete(:config_file)
+
+    # after_reload is only used by unicorn_rails, unsupported otherwise
+    self.after_reload = defaults.delete(:after_reload)
+
+    set.merge!(DEFAULTS) if @use_defaults
+    defaults.each { |key, value| self.__send__(key, value) }
+    Hash === set[:listener_opts] or
+        set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
+    Array === set[:listeners] or set[:listeners] = []
+    reload(false)
+  end
 
+  def reload(merge_defaults = true) #:nodoc:
+    if merge_defaults && @use_defaults
       set.merge!(DEFAULTS) if @use_defaults
-      defaults.each { |key, value| self.__send__(key, value) }
-      Hash === set[:listener_opts] or
-          set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
-      Array === set[:listeners] or set[:listeners] = []
-      reload(false)
     end
+    instance_eval(File.read(config_file), config_file) if config_file
 
-    def reload(merge_defaults = true) #:nodoc:
-      if merge_defaults && @use_defaults
-        set.merge!(DEFAULTS) if @use_defaults
-      end
-      instance_eval(File.read(config_file), config_file) if config_file
+    parse_rackup_file
 
-      parse_rackup_file
+    RACKUP[:set_listener] and
+      set[:listeners] << "#{RACKUP[:host]}:#{RACKUP[:port]}"
 
-      # unicorn_rails creates dirs here after working_directory is bound
-      after_reload.call if after_reload
+    # unicorn_rails creates dirs here after working_directory is bound
+    after_reload.call if after_reload
 
-      # working_directory binds immediately (easier error checking that way),
-      # now ensure any paths we changed are correctly set.
-      [ :pid, :stderr_path, :stdout_path ].each do |var|
-        String === (path = set[var]) or next
-        path = File.expand_path(path)
-        File.writable?(path) || File.writable?(File.dirname(path)) or \
-              raise ArgumentError, "directory for #{var}=#{path} not writable"
-      end
+    # working_directory binds immediately (easier error checking that way),
+    # now ensure any paths we changed are correctly set.
+    [ :pid, :stderr_path, :stdout_path ].each do |var|
+      String === (path = set[var]) or next
+      path = File.expand_path(path)
+      File.writable?(path) || File.writable?(File.dirname(path)) or \
+            raise ArgumentError, "directory for #{var}=#{path} not writable"
     end
+  end
 
-    def commit!(server, options = {}) #:nodoc:
-      skip = options[:skip] || []
-      if ready_pipe = RACKUP.delete(:ready_pipe)
-        server.ready_pipe = ready_pipe
-      end
-      set.each do |key, value|
-        value == :unset and next
-        skip.include?(key) and next
-        server.__send__("#{key}=", value)
-      end
+  def commit!(server, options = {}) #:nodoc:
+    skip = options[:skip] || []
+    if ready_pipe = RACKUP.delete(:ready_pipe)
+      server.ready_pipe = ready_pipe
     end
-
-    def [](key) # :nodoc:
-      set[key]
+    set.each do |key, value|
+      value == :unset and next
+      skip.include?(key) and next
+      server.__send__("#{key}=", value)
     end
+  end
 
-    # sets object to the +new+ Logger-like object.  The new logger-like
-    # object must respond to the following methods:
-    #  +debug+, +info+, +warn+, +error+, +fatal+
-    # The default Logger will log its output to the path specified
-    # by +stderr_path+.  If you're running Unicorn daemonized, then
-    # you must specify a path to prevent error messages from going
-    # to /dev/null.
-    def logger(new)
-      %w(debug info warn error fatal).each do |m|
-        new.respond_to?(m) and next
-        raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
-      end
-
-      set[:logger] = new
-    end
+  def [](key) # :nodoc:
+    set[key]
+  end
 
-    # sets after_fork hook to a given block.  This block will be called by
-    # the worker after forking.  The following is an example hook which adds
-    # a per-process listener to every worker:
-    #
-    #  after_fork do |server,worker|
-    #    # per-process listener ports for debugging/admin:
-    #    addr = "127.0.0.1:#{9293 + worker.nr}"
-    #
-    #    # the negative :tries parameter indicates we will retry forever
-    #    # waiting on the existing process to exit with a 5 second :delay
-    #    # Existing options for Unicorn::Configurator#listen such as
-    #    # :backlog, :rcvbuf, :sndbuf are available here as well.
-    #    server.listen(addr, :tries => -1, :delay => 5, :backlog => 128)
-    #
-    #    # drop permissions to "www-data" in the worker
-    #    # generally there's no reason to start Unicorn as a priviledged user
-    #    # as it is not recommended to expose Unicorn to public clients.
-    #    worker.user('www-data', 'www-data') if Process.euid == 0
-    #  end
-    def after_fork(*args, &block)
-      set_hook(:after_fork, block_given? ? block : args[0])
+  # sets object to the +new+ Logger-like object.  The new logger-like
+  # object must respond to the following methods:
+  #  +debug+, +info+, +warn+, +error+, +fatal+
+  # The default Logger will log its output to the path specified
+  # by +stderr_path+.  If you're running Unicorn daemonized, then
+  # you must specify a path to prevent error messages from going
+  # to /dev/null.
+  def logger(new)
+    %w(debug info warn error fatal).each do |m|
+      new.respond_to?(m) and next
+      raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
     end
 
-    # sets before_fork got be a given Proc object.  This Proc
-    # object will be called by the master process before forking
-    # each worker.
-    def before_fork(*args, &block)
-      set_hook(:before_fork, block_given? ? block : args[0])
-    end
+    set[:logger] = new
+  end
 
-    # sets the before_exec hook to a given Proc object.  This
-    # Proc object will be called by the master process right
-    # before exec()-ing the new unicorn binary.  This is useful
-    # for freeing certain OS resources that you do NOT wish to
-    # share with the reexeced child process.
-    # There is no corresponding after_exec hook (for obvious reasons).
-    def before_exec(*args, &block)
-      set_hook(:before_exec, block_given? ? block : args[0], 1)
-    end
+  # sets after_fork hook to a given block.  This block will be called by
+  # the worker after forking.  The following is an example hook which adds
+  # a per-process listener to every worker:
+  #
+  #  after_fork do |server,worker|
+  #    # per-process listener ports for debugging/admin:
+  #    addr = "127.0.0.1:#{9293 + worker.nr}"
+  #
+  #    # the negative :tries parameter indicates we will retry forever
+  #    # waiting on the existing process to exit with a 5 second :delay
+  #    # Existing options for Unicorn::Configurator#listen such as
+  #    # :backlog, :rcvbuf, :sndbuf are available here as well.
+  #    server.listen(addr, :tries => -1, :delay => 5, :backlog => 128)
+  #
+  #    # drop permissions to "www-data" in the worker
+  #    # generally there's no reason to start Unicorn as a priviledged user
+  #    # as it is not recommended to expose Unicorn to public clients.
+  #    worker.user('www-data', 'www-data') if Process.euid == 0
+  #  end
+  def after_fork(*args, &block)
+    set_hook(:after_fork, block_given? ? block : args[0])
+  end
 
-    # sets the timeout of worker processes to +seconds+.  Workers
-    # handling the request/app.call/response cycle taking longer than
-    # this time period will be forcibly killed (via SIGKILL).  This
-    # timeout is enforced by the master process itself and not subject
-    # to the scheduling limitations by the worker process.  Due the
-    # low-complexity, low-overhead implementation, timeouts of less
-    # than 3.0 seconds can be considered inaccurate and unsafe.
-    #
-    # For running Unicorn behind nginx, it is recommended to set
-    # "fail_timeout=0" for in your nginx configuration like this
-    # to have nginx always retry backends that may have had workers
-    # SIGKILL-ed due to timeouts.
-    #
-    #    # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details
-    #    # on nginx upstream configuration:
-    #    upstream unicorn_backend {
-    #      # for UNIX domain socket setups:
-    #      server unix:/path/to/unicorn.sock fail_timeout=0;
-    #
-    #      # for TCP setups
-    #      server 192.168.0.7:8080 fail_timeout=0;
-    #      server 192.168.0.8:8080 fail_timeout=0;
-    #      server 192.168.0.9:8080 fail_timeout=0;
-    #    }
-    def timeout(seconds)
-      Numeric === seconds or raise ArgumentError,
-                                  "not numeric: timeout=#{seconds.inspect}"
-      seconds >= 3 or raise ArgumentError,
-                                  "too low: timeout=#{seconds.inspect}"
-      set[:timeout] = seconds
-    end
+  # sets before_fork got be a given Proc object.  This Proc
+  # object will be called by the master process before forking
+  # each worker.
+  def before_fork(*args, &block)
+    set_hook(:before_fork, block_given? ? block : args[0])
+  end
 
-    # sets the current number of worker_processes to +nr+.  Each worker
-    # process will serve exactly one client at a time.  You can
-    # increment or decrement this value at runtime by sending SIGTTIN
-    # or SIGTTOU respectively to the master process without reloading
-    # the rest of your Unicorn configuration.  See the SIGNALS document
-    # for more information.
-    def worker_processes(nr)
-      Integer === nr or raise ArgumentError,
-                             "not an integer: worker_processes=#{nr.inspect}"
-      nr >= 0 or raise ArgumentError,
-                             "not non-negative: worker_processes=#{nr.inspect}"
-      set[:worker_processes] = nr
-    end
+  # sets the before_exec hook to a given Proc object.  This
+  # Proc object will be called by the master process right
+  # before exec()-ing the new unicorn binary.  This is useful
+  # for freeing certain OS resources that you do NOT wish to
+  # share with the reexeced child process.
+  # There is no corresponding after_exec hook (for obvious reasons).
+  def before_exec(*args, &block)
+    set_hook(:before_exec, block_given? ? block : args[0], 1)
+  end
 
-    # sets listeners to the given +addresses+, replacing or augmenting the
-    # current set.  This is for the global listener pool shared by all
-    # worker processes.  For per-worker listeners, see the after_fork example
-    # This is for internal API use only, do not use it in your Unicorn
-    # config file.  Use listen instead.
-    def listeners(addresses) # :nodoc:
-      Array === addresses or addresses = Array(addresses)
-      addresses.map! { |addr| expand_addr(addr) }
-      set[:listeners] = addresses
-    end
+  # sets the timeout of worker processes to +seconds+.  Workers
+  # handling the request/app.call/response cycle taking longer than
+  # this time period will be forcibly killed (via SIGKILL).  This
+  # timeout is enforced by the master process itself and not subject
+  # to the scheduling limitations by the worker process.  Due the
+  # low-complexity, low-overhead implementation, timeouts of less
+  # than 3.0 seconds can be considered inaccurate and unsafe.
+  #
+  # For running Unicorn behind nginx, it is recommended to set
+  # "fail_timeout=0" for in your nginx configuration like this
+  # to have nginx always retry backends that may have had workers
+  # SIGKILL-ed due to timeouts.
+  #
+  #    # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details
+  #    # on nginx upstream configuration:
+  #    upstream unicorn_backend {
+  #      # for UNIX domain socket setups:
+  #      server unix:/path/to/unicorn.sock fail_timeout=0;
+  #
+  #      # for TCP setups
+  #      server 192.168.0.7:8080 fail_timeout=0;
+  #      server 192.168.0.8:8080 fail_timeout=0;
+  #      server 192.168.0.9:8080 fail_timeout=0;
+  #    }
+  def timeout(seconds)
+    Numeric === seconds or raise ArgumentError,
+                                "not numeric: timeout=#{seconds.inspect}"
+    seconds >= 3 or raise ArgumentError,
+                                "too low: timeout=#{seconds.inspect}"
+    set[:timeout] = seconds
+  end
 
-    # adds an +address+ to the existing listener set.
-    #
-    # The following options may be specified (but are generally not needed):
-    #
-    # +:backlog+: this is the backlog of the listen() syscall.
-    #
-    # Some operating systems allow negative values here to specify the
-    # maximum allowable value.  In most cases, this number is only
-    # recommendation and there are other OS-specific tunables and
-    # variables that can affect this number.  See the listen(2)
-    # syscall documentation of your OS for the exact semantics of
-    # this.
-    #
-    # If you are running unicorn on multiple machines, lowering this number
-    # can help your load balancer detect when a machine is overloaded
-    # and give requests to a different machine.
-    #
-    # Default: 1024
-    #
-    # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets
-    #
-    # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
-    # can be set via the setsockopt(2) syscall.  Some kernels
-    # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
-    # there is no need (and it is sometimes detrimental) to specify them.
-    #
-    # See the socket API documentation of your operating system
-    # to determine the exact semantics of these settings and
-    # other operating system-specific knobs where they can be
-    # specified.
-    #
-    # Defaults: operating system defaults
-    #
-    # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets
-    #
-    # This has no effect on UNIX sockets.
-    #
-    # Default: operating system defaults (usually Nagle's algorithm enabled)
-    #
-    # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
-    #
-    # This will prevent partial TCP frames from being sent out.
-    # Enabling +tcp_nopush+ is generally not needed or recommended as
-    # controlling +tcp_nodelay+ already provides sufficient latency
-    # reduction whereas Unicorn does not know when the best times are
-    # for flushing corked sockets.
-    #
-    # This has no effect on UNIX sockets.
-    #
-    # +:tries+: times to retry binding a socket if it is already in use
-    #
-    # A negative number indicates we will retry indefinitely, this is
-    # useful for migrations and upgrades when individual workers
-    # are binding to different ports.
-    #
-    # Default: 5
-    #
-    # +:delay+: seconds to wait between successive +tries+
-    #
-    # Default: 0.5 seconds
-    #
-    # +:umask+: sets the file mode creation mask for UNIX sockets
-    #
-    # Typically UNIX domain sockets are created with more liberal
-    # file permissions than the rest of the application.  By default,
-    # we create UNIX domain sockets to be readable and writable by
-    # all local users to give them the same accessibility as
-    # locally-bound TCP listeners.
-    #
-    # This has no effect on TCP listeners.
-    #
-    # Default: 0 (world read/writable)
-    def listen(address, opt = {})
-      address = expand_addr(address)
-      if String === address
-        [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key|
-          value = opt[key] or next
-          Integer === value or
-            raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
-        end
-        [ :tcp_nodelay, :tcp_nopush ].each do |key|
-          (value = opt[key]).nil? and next
-          TrueClass === value || FalseClass === value or
-            raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
-        end
-        unless (value = opt[:delay]).nil?
-          Numeric === value or
-            raise ArgumentError, "not numeric: delay=#{value.inspect}"
-        end
-        set[:listener_opts][address].merge!(opt)
-      end
+  # sets the current number of worker_processes to +nr+.  Each worker
+  # process will serve exactly one client at a time.  You can
+  # increment or decrement this value at runtime by sending SIGTTIN
+  # or SIGTTOU respectively to the master process without reloading
+  # the rest of your Unicorn configuration.  See the SIGNALS document
+  # for more information.
+  def worker_processes(nr)
+    Integer === nr or raise ArgumentError,
+                           "not an integer: worker_processes=#{nr.inspect}"
+    nr >= 0 or raise ArgumentError,
+                           "not non-negative: worker_processes=#{nr.inspect}"
+    set[:worker_processes] = nr
+  end
 
-      set[:listeners] << address
-    end
+  # sets listeners to the given +addresses+, replacing or augmenting the
+  # current set.  This is for the global listener pool shared by all
+  # worker processes.  For per-worker listeners, see the after_fork example
+  # This is for internal API use only, do not use it in your Unicorn
+  # config file.  Use listen instead.
+  def listeners(addresses) # :nodoc:
+    Array === addresses or addresses = Array(addresses)
+    addresses.map! { |addr| expand_addr(addr) }
+    set[:listeners] = addresses
+  end
 
-    # sets the +path+ for the PID file of the unicorn master process
-    def pid(path); set_path(:pid, path); end
-
-    # Enabling this preloads an application before forking worker
-    # processes.  This allows memory savings when using a
-    # copy-on-write-friendly GC but can cause bad things to happen when
-    # resources like sockets are opened at load time by the master
-    # process and shared by multiple children.  People enabling this are
-    # highly encouraged to look at the before_fork/after_fork hooks to
-    # properly close/reopen sockets.  Files opened for logging do not
-    # have to be reopened as (unbuffered-in-userspace) files opened with
-    # the File::APPEND flag are written to atomically on UNIX.
-    #
-    # In addition to reloading the unicorn-specific config settings,
-    # SIGHUP will reload application code in the working
-    # directory/symlink when workers are gracefully restarted when
-    # preload_app=false (the default).  As reloading the application
-    # sometimes requires RubyGems updates, +Gem.refresh+ is always
-    # called before the application is loaded (for RubyGems users).
-    #
-    # During deployments, care should _always_ be taken to ensure your
-    # applications are properly deployed and running.  Using
-    # preload_app=false (the default) means you _must_ check if
-    # your application is responding properly after a deployment.
-    # Improperly deployed applications can go into a spawn loop
-    # if the application fails to load.  While your children are
-    # in a spawn loop, it is is possible to fix an application
-    # by properly deploying all required code and dependencies.
-    # Using preload_app=true means any application load error will
-    # cause the master process to exit with an error.
-
-    def preload_app(bool)
-      case bool
-      when TrueClass, FalseClass
-        set[:preload_app] = bool
-      else
-        raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
+  # adds an +address+ to the existing listener set.
+  #
+  # The following options may be specified (but are generally not needed):
+  #
+  # +:backlog+: this is the backlog of the listen() syscall.
+  #
+  # Some operating systems allow negative values here to specify the
+  # maximum allowable value.  In most cases, this number is only
+  # recommendation and there are other OS-specific tunables and
+  # variables that can affect this number.  See the listen(2)
+  # syscall documentation of your OS for the exact semantics of
+  # this.
+  #
+  # If you are running unicorn on multiple machines, lowering this number
+  # can help your load balancer detect when a machine is overloaded
+  # and give requests to a different machine.
+  #
+  # Default: 1024
+  #
+  # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets
+  #
+  # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
+  # can be set via the setsockopt(2) syscall.  Some kernels
+  # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
+  # there is no need (and it is sometimes detrimental) to specify them.
+  #
+  # See the socket API documentation of your operating system
+  # to determine the exact semantics of these settings and
+  # other operating system-specific knobs where they can be
+  # specified.
+  #
+  # Defaults: operating system defaults
+  #
+  # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets
+  #
+  # This has no effect on UNIX sockets.
+  #
+  # Default: operating system defaults (usually Nagle's algorithm enabled)
+  #
+  # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
+  #
+  # This will prevent partial TCP frames from being sent out.
+  # Enabling +tcp_nopush+ is generally not needed or recommended as
+  # controlling +tcp_nodelay+ already provides sufficient latency
+  # reduction whereas Unicorn does not know when the best times are
+  # for flushing corked sockets.
+  #
+  # This has no effect on UNIX sockets.
+  #
+  # +:tries+: times to retry binding a socket if it is already in use
+  #
+  # A negative number indicates we will retry indefinitely, this is
+  # useful for migrations and upgrades when individual workers
+  # are binding to different ports.
+  #
+  # Default: 5
+  #
+  # +:delay+: seconds to wait between successive +tries+
+  #
+  # Default: 0.5 seconds
+  #
+  # +:umask+: sets the file mode creation mask for UNIX sockets
+  #
+  # Typically UNIX domain sockets are created with more liberal
+  # file permissions than the rest of the application.  By default,
+  # we create UNIX domain sockets to be readable and writable by
+  # all local users to give them the same accessibility as
+  # locally-bound TCP listeners.
+  #
+  # This has no effect on TCP listeners.
+  #
+  # Default: 0 (world read/writable)
+  #
+  # +:tcp_defer_accept:+ defer accept() until data is ready (Linux-only)
+  #
+  # For Linux 2.6.32 and later, this is the number of retransmits to
+  # defer an accept() for if no data arrives, but the client will
+  # eventually be accepted after the specified number of retransmits
+  # regardless of whether data is ready.
+  #
+  # For Linux before 2.6.32, this is a boolean option, and
+  # accepts are _always_ deferred indefinitely if no data arrives.
+  # This is similar to <code>:accept_filter => "dataready"</code>
+  # under FreeBSD.
+  #
+  # Specifying +true+ is synonymous for the default value(s) below,
+  # and +false+ or +nil+ is synonymous for a value of zero.
+  #
+  # A value of +1+ is a good optimization for local networks
+  # and trusted clients.  For Rainbows! and Zbatery users, a higher
+  # value (e.g. +60+) provides more protection against some
+  # denial-of-service attacks.  There is no good reason to ever
+  # disable this with a +zero+ value when serving HTTP.
+  #
+  # Default: 1 retransmit for \Unicorn, 60 for Rainbows! 0.95.0\+
+  #
+  # +:accept_filter: defer accept() until data is ready (FreeBSD-only)
+  #
+  # This enables either the "dataready" or (default) "httpready"
+  # accept() filter under FreeBSD.  This is intended as an
+  # optimization to reduce context switches with common GET/HEAD
+  # requests.  For Rainbows! and Zbatery users, this provides
+  # some protection against certain denial-of-service attacks, too.
+  #
+  # There is no good reason to change from the default.
+  #
+  # Default: "httpready"
+  def listen(address, opt = {})
+    address = expand_addr(address)
+    if String === address
+      [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key|
+        value = opt[key] or next
+        Integer === value or
+          raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
       end
+      [ :tcp_nodelay, :tcp_nopush ].each do |key|
+        (value = opt[key]).nil? and next
+        TrueClass === value || FalseClass === value or
+          raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
+      end
+      unless (value = opt[:delay]).nil?
+        Numeric === value or
+          raise ArgumentError, "not numeric: delay=#{value.inspect}"
+      end
+      set[:listener_opts][address].merge!(opt)
     end
 
-    # Allow redirecting $stderr to a given path.  Unlike doing this from
-    # the shell, this allows the unicorn process to know the path its
-    # writing to and rotate the file if it is used for logging.  The
-    # file will be opened with the File::APPEND flag and writes
-    # synchronized to the kernel (but not necessarily to _disk_) so
-    # multiple processes can safely append to it.
-    #
-    # If you are daemonizing and using the default +logger+, it is important
-    # to specify this as errors will otherwise be lost to /dev/null.
-    # Some applications/libraries may also triggering warnings that go to
-    # stderr, and they will end up here.
-    def stderr_path(path)
-      set_path(:stderr_path, path)
+    set[:listeners] << address
+  end
+
+  # sets the +path+ for the PID file of the unicorn master process
+  def pid(path); set_path(:pid, path); end
+
+  # Enabling this preloads an application before forking worker
+  # processes.  This allows memory savings when using a
+  # copy-on-write-friendly GC but can cause bad things to happen when
+  # resources like sockets are opened at load time by the master
+  # process and shared by multiple children.  People enabling this are
+  # highly encouraged to look at the before_fork/after_fork hooks to
+  # properly close/reopen sockets.  Files opened for logging do not
+  # have to be reopened as (unbuffered-in-userspace) files opened with
+  # the File::APPEND flag are written to atomically on UNIX.
+  #
+  # In addition to reloading the unicorn-specific config settings,
+  # SIGHUP will reload application code in the working
+  # directory/symlink when workers are gracefully restarted when
+  # preload_app=false (the default).  As reloading the application
+  # sometimes requires RubyGems updates, +Gem.refresh+ is always
+  # called before the application is loaded (for RubyGems users).
+  #
+  # During deployments, care should _always_ be taken to ensure your
+  # applications are properly deployed and running.  Using
+  # preload_app=false (the default) means you _must_ check if
+  # your application is responding properly after a deployment.
+  # Improperly deployed applications can go into a spawn loop
+  # if the application fails to load.  While your children are
+  # in a spawn loop, it is is possible to fix an application
+  # by properly deploying all required code and dependencies.
+  # Using preload_app=true means any application load error will
+  # cause the master process to exit with an error.
+
+  def preload_app(bool)
+    case bool
+    when TrueClass, FalseClass
+      set[:preload_app] = bool
+    else
+      raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
     end
+  end
+
+  # Allow redirecting $stderr to a given path.  Unlike doing this from
+  # the shell, this allows the unicorn process to know the path its
+  # writing to and rotate the file if it is used for logging.  The
+  # file will be opened with the File::APPEND flag and writes
+  # synchronized to the kernel (but not necessarily to _disk_) so
+  # multiple processes can safely append to it.
+  #
+  # If you are daemonizing and using the default +logger+, it is important
+  # to specify this as errors will otherwise be lost to /dev/null.
+  # Some applications/libraries may also triggering warnings that go to
+  # stderr, and they will end up here.
+  def stderr_path(path)
+    set_path(:stderr_path, path)
+  end
+
+  # Same as stderr_path, except for $stdout.  Not many Rack applications
+  # write to $stdout, but any that do will have their output written here.
+  # It is safe to point this to the same location a stderr_path.
+  # Like stderr_path, this defaults to /dev/null when daemonized.
+  def stdout_path(path)
+    set_path(:stdout_path, path)
+  end
 
-    # Same as stderr_path, except for $stdout.  Not many Rack applications
-    # write to $stdout, but any that do will have their output written here.
-    # It is safe to point this to the same location a stderr_path.
-    # Like stderr_path, this defaults to /dev/null when daemonized.
-    def stdout_path(path)
-      set_path(:stdout_path, path)
+  # sets the working directory for Unicorn.  This ensures SIGUSR2 will
+  # start a new instance of Unicorn in this directory.  This may be
+  # a symlink, a common scenario for Capistrano users.  Unlike
+  # all other Unicorn configuration directives, this binds immediately
+  # for error checking and cannot be undone by unsetting it in the
+  # configuration file and reloading.
+  def working_directory(path)
+    # just let chdir raise errors
+    path = File.expand_path(path)
+    if config_file &&
+       config_file[0] != ?/ &&
+       ! File.readable?("#{path}/#{config_file}")
+      raise ArgumentError,
+            "config_file=#{config_file} would not be accessible in" \
+            " working_directory=#{path}"
     end
+    Dir.chdir(path)
+    Unicorn::HttpServer::START_CTX[:cwd] = ENV["PWD"] = path
+  end
 
-    # sets the working directory for Unicorn.  This ensures SIGUSR2 will
-    # start a new instance of Unicorn in this directory.  This may be
-    # a symlink, a common scenario for Capistrano users.  Unlike
-    # all other Unicorn configuration directives, this binds immediately
-    # for error checking and cannot be undone by unsetting it in the
-    # configuration file and reloading.
-    def working_directory(path)
-      # just let chdir raise errors
-      path = File.expand_path(path)
-      if config_file &&
-         config_file[0] != ?/ &&
-         ! File.readable?("#{path}/#{config_file}")
-        raise ArgumentError,
-              "config_file=#{config_file} would not be accessible in" \
-              " working_directory=#{path}"
-      end
-      Dir.chdir(path)
-      HttpServer::START_CTX[:cwd] = ENV["PWD"] = path
+  # Runs worker processes as the specified +user+ and +group+.
+  # The master process always stays running as the user who started it.
+  # This switch will occur after calling the after_fork hook, and only
+  # if the Worker#user method is not called in the after_fork hook
+  def user(user, group = nil)
+    # raises ArgumentError on invalid user/group
+    Etc.getpwnam(user)
+    Etc.getgrnam(group) if group
+    set[:user] = [ user, group ]
+  end
+
+  # expands "unix:path/to/foo" to a socket relative to the current path
+  # expands pathnames of sockets if relative to "~" or "~username"
+  # expands "*:port and ":port" to "0.0.0.0:port"
+  def expand_addr(address) #:nodoc
+    return "0.0.0.0:#{address}" if Integer === address
+    return address unless String === address
+
+    case address
+    when %r{\Aunix:(.*)\z}
+      File.expand_path($1)
+    when %r{\A~}
+      File.expand_path(address)
+    when %r{\A(?:\*:)?(\d+)\z}
+      "0.0.0.0:#$1"
+    when %r{\A(.*):(\d+)\z}
+      # canonicalize the name
+      packed = Socket.pack_sockaddr_in($2.to_i, $1)
+      Socket.unpack_sockaddr_in(packed).reverse!.join(':')
+    else
+      address
     end
+  end
+
+private
 
-    # Runs worker processes as the specified +user+ and +group+.
-    # The master process always stays running as the user who started it.
-    # This switch will occur after calling the after_fork hook, and only
-    # if the Worker#user method is not called in the after_fork hook
-    def user(user, group = nil)
-      # raises ArgumentError on invalid user/group
-      Etc.getpwnam(user)
-      Etc.getgrnam(group) if group
-      set[:user] = [ user, group ]
+  def set_path(var, path) #:nodoc:
+    case path
+    when NilClass, String
+      set[var] = path
+    else
+      raise ArgumentError
     end
+  end
 
-    # expands "unix:path/to/foo" to a socket relative to the current path
-    # expands pathnames of sockets if relative to "~" or "~username"
-    # expands "*:port and ":port" to "0.0.0.0:port"
-    def expand_addr(address) #:nodoc
-      return "0.0.0.0:#{address}" if Integer === address
-      return address unless String === address
-
-      case address
-      when %r{\Aunix:(.*)\z}
-        File.expand_path($1)
-      when %r{\A~}
-        File.expand_path(address)
-      when %r{\A(?:\*:)?(\d+)\z}
-        "0.0.0.0:#$1"
-      when %r{\A(.*):(\d+)\z}
-        # canonicalize the name
-        packed = Socket.pack_sockaddr_in($2.to_i, $1)
-        Socket.unpack_sockaddr_in(packed).reverse!.join(':')
-      else
-        address
-      end
+  def set_hook(var, my_proc, req_arity = 2) #:nodoc:
+    case my_proc
+    when Proc
+      arity = my_proc.arity
+      (arity == req_arity) or \
+        raise ArgumentError,
+              "#{var}=#{my_proc.inspect} has invalid arity: " \
+              "#{arity} (need #{req_arity})"
+    when NilClass
+      my_proc = DEFAULTS[var]
+    else
+      raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
     end
+    set[var] = my_proc
+  end
 
-  private
+  # this is called _after_ working_directory is bound.  This only
+  # parses the embedded switches in .ru files
+  # (for "rackup" compatibility)
+  def parse_rackup_file # :nodoc:
+    ru = RACKUP[:file] or return # we only return here in unit tests
 
-    def set_path(var, path) #:nodoc:
-      case path
-      when NilClass, String
-        set[var] = path
-      else
-        raise ArgumentError
-      end
+    # :rails means use (old) Rails autodetect
+    if ru == :rails
+      File.readable?('config.ru') or return
+      ru = 'config.ru'
     end
 
-    def set_hook(var, my_proc, req_arity = 2) #:nodoc:
-      case my_proc
-      when Proc
-        arity = my_proc.arity
-        (arity == req_arity) or \
-          raise ArgumentError,
-                "#{var}=#{my_proc.inspect} has invalid arity: " \
-                "#{arity} (need #{req_arity})"
-      when NilClass
-        my_proc = DEFAULTS[var]
-      else
-        raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
-      end
-      set[var] = my_proc
-    end
+    File.readable?(ru) or
+      raise ArgumentError, "rackup file (#{ru}) not readable"
 
-    # this is called _after_ working_directory is bound.  This only
-    # parses the embedded switches in .ru files
-    # (for "rackup" compatibility)
-    def parse_rackup_file # :nodoc:
-      ru = RACKUP[:file] or return # we only return here in unit tests
+    # it could be a .rb file, too, we don't parse those manually
+    ru =~ /\.ru\z/ or return
 
-      # :rails means use (old) Rails autodetect
-      if ru == :rails
-        File.readable?('config.ru') or return
-        ru = 'config.ru'
-      end
+    /^#\\(.*)/ =~ File.read(ru) or return
+    RACKUP[:optparse].parse!($1.split(/\s+/))
 
-      File.readable?(ru) or
-        raise ArgumentError, "rackup file (#{ru}) not readable"
-
-      # it could be a .rb file, too, we don't parse those manually
-      ru =~ /\.ru\z/ or return
-
-      /^#\\(.*)/ =~ File.read(ru) or return
-      RACKUP[:optparse].parse!($1.split(/\s+/))
-
-      # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery)
-      host, port, set_listener, options, daemonize =
-                      eval("[ host, port, set_listener, options, daemonize ]",
-                           TOPLEVEL_BINDING)
-
-      # XXX duplicate code from bin/unicorn{,_rails}
-      set[:listeners] << "#{host}:#{port}" if set_listener
-
-      if daemonize
-        # unicorn_rails wants a default pid path, (not plain 'unicorn')
-        if after_reload
-          spid = set[:pid]
-          pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset
-        end
-        unless RACKUP[:daemonized]
-          Unicorn::Launcher.daemonize!(options)
-          RACKUP[:ready_pipe] = options.delete(:ready_pipe)
-        end
+    if RACKUP[:daemonize]
+      # unicorn_rails wants a default pid path, (not plain 'unicorn')
+      if after_reload
+        spid = set[:pid]
+        pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset
+      end
+      unless RACKUP[:daemonized]
+        Unicorn::Launcher.daemonize!(RACKUP[:options])
+        RACKUP[:ready_pipe] = RACKUP[:options].delete(:ready_pipe)
       end
     end
-
   end
 end
diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb
index 907e9eb..e7908d6 100644
--- a/lib/unicorn/const.rb
+++ b/lib/unicorn/const.rb
@@ -1,36 +1,37 @@
 # -*- encoding: binary -*-
 
-module Unicorn
+# Frequently used constants when constructing requests or responses.
+# Many times the constant just refers to a string with the same
+# contents.  Using these constants gave about a 3% to 10% performance
+# improvement over using the strings directly.  Symbols did not really
+# improve things much compared to constants.
+module Unicorn::Const
 
-  # Frequently used constants when constructing requests or responses.  Many times
-  # the constant just refers to a string with the same contents.  Using these constants
-  # gave about a 3% to 10% performance improvement over using the strings directly.
-  # Symbols did not really improve things much compared to constants.
-  module Const
+  # The current version of Unicorn, currently 2.0.0pre3
+  UNICORN_VERSION = "2.0.0pre3"
 
-    # The current version of Unicorn, currently 1.0.2
-    UNICORN_VERSION="1.0.2"
+  # default TCP listen host address (0.0.0.0, all interfaces)
+  DEFAULT_HOST = "0.0.0.0"
 
-    DEFAULT_HOST = "0.0.0.0" # default TCP listen host address
-    DEFAULT_PORT = 8080      # default TCP listen port
-    DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}"
+  # default TCP listen port (8080)
+  DEFAULT_PORT = 8080
 
-    # The basic max request size we'll try to read.
-    CHUNK_SIZE=(16 * 1024)
+  # default TCP listen address and port (0.0.0.0:8080)
+  DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}"
 
-    # Maximum request body size before it is moved out of memory and into a
-    # temporary file for reading (112 kilobytes).
-    MAX_BODY=1024 * 112
+  # The basic request body size we'll try to read at once (16 kilobytes).
+  CHUNK_SIZE = 16 * 1024
 
-    # common errors we'll send back
-    ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n"
-    ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
-    EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
+  # Maximum request body size before it is moved out of memory and into a
+  # temporary file for reading (112 kilobytes).
+  MAX_BODY = 1024 * 112
 
-    # A frozen format for this is about 15% faster
-    REMOTE_ADDR="REMOTE_ADDR".freeze
-    RACK_INPUT="rack.input".freeze
-    HTTP_EXPECT="HTTP_EXPECT"
-  end
+  # :stopdoc:
+  # common errors we'll send back
+  ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n"
+  ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
+  EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
 
+  HTTP_EXPECT = "HTTP_EXPECT"
+  # :startdoc:
 end
diff --git a/lib/unicorn/http_request.rb b/lib/unicorn/http_request.rb
index 65b09fa..2dcd839 100644
--- a/lib/unicorn/http_request.rb
+++ b/lib/unicorn/http_request.rb
@@ -1,72 +1,69 @@
 # -*- encoding: binary -*-
 
-require 'stringio'
 require 'unicorn_http'
 
-module Unicorn
-  class HttpRequest
+# TODO: remove redundant names
+Unicorn.const_set(:HttpRequest, Unicorn::HttpParser)
+class Unicorn::HttpParser
 
-    # default parameters we merge into the request env for Rack handlers
-    DEFAULTS = {
-      "rack.errors" => $stderr,
-      "rack.multiprocess" => true,
-      "rack.multithread" => false,
-      "rack.run_once" => false,
-      "rack.version" => [1, 1],
-      "SCRIPT_NAME" => "",
+  # default parameters we merge into the request env for Rack handlers
+  DEFAULTS = {
+    "rack.errors" => $stderr,
+    "rack.multiprocess" => true,
+    "rack.multithread" => false,
+    "rack.run_once" => false,
+    "rack.version" => [1, 1],
+    "SCRIPT_NAME" => "",
 
-      # this is not in the Rack spec, but some apps may rely on it
-      "SERVER_SOFTWARE" => "Unicorn #{Const::UNICORN_VERSION}"
-    }
+    # this is not in the Rack spec, but some apps may rely on it
+    "SERVER_SOFTWARE" => "Unicorn #{Unicorn::Const::UNICORN_VERSION}"
+  }
 
-    NULL_IO = StringIO.new("")
-    LOCALHOST = '127.0.0.1'
+  NULL_IO = StringIO.new("")
 
-    # Being explicitly single-threaded, we have certain advantages in
-    # not having to worry about variables being clobbered :)
-    BUF = ""
-    PARSER = HttpParser.new
-    REQ = {}
+  # :stopdoc:
+  # A frozen format for this is about 15% faster
+  REMOTE_ADDR = 'REMOTE_ADDR'.freeze
+  RACK_INPUT = 'rack.input'.freeze
+  TeeInput = Unicorn::TeeInput
+  # :startdoc:
 
-    # Does the majority of the IO processing.  It has been written in
-    # Ruby using about 8 different IO processing strategies.
-    #
-    # It is currently carefully constructed to make sure that it gets
-    # the best possible performance for the common case: GET requests
-    # that are fully complete after a single read(2)
-    #
-    # Anyone who thinks they can make it faster is more than welcome to
-    # take a crack at it.
-    #
-    # returns an environment hash suitable for Rack if successful
-    # This does minimal exception trapping and it is up to the caller
-    # to handle any socket errors (e.g. user aborted upload).
-    def read(socket)
-      REQ.clear
-      PARSER.reset
+  # Does the majority of the IO processing.  It has been written in
+  # Ruby using about 8 different IO processing strategies.
+  #
+  # It is currently carefully constructed to make sure that it gets
+  # the best possible performance for the common case: GET requests
+  # that are fully complete after a single read(2)
+  #
+  # Anyone who thinks they can make it faster is more than welcome to
+  # take a crack at it.
+  #
+  # returns an environment hash suitable for Rack if successful
+  # This does minimal exception trapping and it is up to the caller
+  # to handle any socket errors (e.g. user aborted upload).
+  def read(socket)
+    reset
+    e = env
 
-      # From http://www.ietf.org/rfc/rfc3875:
-      # "Script authors should be aware that the REMOTE_ADDR and
-      #  REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
-      #  may not identify the ultimate source of the request.  They
-      #  identify the client for the immediate request to the server;
-      #  that client may be a proxy, gateway, or other intermediary
-      #  acting on behalf of the actual source client."
-      REQ[Const::REMOTE_ADDR] =
-                    TCPSocket === socket ? socket.peeraddr.last : LOCALHOST
+    # From http://www.ietf.org/rfc/rfc3875:
+    # "Script authors should be aware that the REMOTE_ADDR and
+    #  REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
+    #  may not identify the ultimate source of the request.  They
+    #  identify the client for the immediate request to the server;
+    #  that client may be a proxy, gateway, or other intermediary
+    #  acting on behalf of the actual source client."
+    e[REMOTE_ADDR] = socket.kgio_addr
 
-      # short circuit the common case with small GET requests first
-      if PARSER.headers(REQ, socket.readpartial(Const::CHUNK_SIZE, BUF)).nil?
-        # Parser is not done, queue up more data to read and continue parsing
-        # an Exception thrown from the PARSER will throw us out of the loop
-        begin
-          BUF << socket.readpartial(Const::CHUNK_SIZE)
-        end while PARSER.headers(REQ, BUF).nil?
-      end
-      REQ[Const::RACK_INPUT] = 0 == PARSER.content_length ?
-                   NULL_IO : Unicorn::TeeInput.new(socket, REQ, PARSER, BUF)
-      REQ.update(DEFAULTS)
+    # short circuit the common case with small GET requests first
+    socket.kgio_read!(16384, buf)
+    if parse.nil?
+      # Parser is not done, queue up more data to read and continue parsing
+      # an Exception thrown from the parser will throw us out of the loop
+      begin
+        buf << socket.kgio_read!(16384)
+      end while parse.nil?
     end
-
+    e[RACK_INPUT] = 0 == content_length ? NULL_IO : TeeInput.new(socket, self)
+    e.merge!(DEFAULTS)
   end
 end
diff --git a/lib/unicorn/http_response.rb b/lib/unicorn/http_response.rb
index 96e484b..5725e25 100644
--- a/lib/unicorn/http_response.rb
+++ b/lib/unicorn/http_response.rb
@@ -1,75 +1,50 @@
 # -*- encoding: binary -*-
-
 require 'time'
 
-module Unicorn
-  # Writes a Rack response to your client using the HTTP/1.1 specification.
-  # You use it by simply doing:
-  #
-  #   status, headers, body = rack_app.call(env)
-  #   HttpResponse.write(socket, [ status, headers, body ])
-  #
-  # Most header correctness (including Content-Length and Content-Type)
-  # is the job of Rack, with the exception of the "Connection: close"
-  # and "Date" headers.
-  #
-  # A design decision was made to force the client to not pipeline or
-  # keepalive requests.  HTTP/1.1 pipelining really kills the
-  # performance due to how it has to be handled and how unclear the
-  # standard is.  To fix this the HttpResponse always gives a
-  # "Connection: close" header which forces the client to close right
-  # away.  The bonus for this is that it gives a pretty nice speed boost
-  # to most clients since they can close their connection immediately.
-
-  class HttpResponse
-
-    # Every standard HTTP code mapped to the appropriate message.
-    CODES = Rack::Utils::HTTP_STATUS_CODES.inject({}) { |hash,(code,msg)|
-      hash[code] = "#{code} #{msg}"
-      hash
-    }
-
-    # Rack does not set/require a Date: header.  We always override the
-    # Connection: and Date: headers no matter what (if anything) our
-    # Rack application sent us.
-    SKIP = { 'connection' => true, 'date' => true, 'status' => true }
-
-    # writes the rack_response to socket as an HTTP response
-    def self.write(socket, rack_response, have_header = true)
-      status, headers, body = rack_response
-
-      if have_header
-        status = CODES[status.to_i] || status
-        out = []
-
-        # Don't bother enforcing duplicate supression, it's a Hash most of
-        # the time anyways so just hope our app knows what it's doing
-        headers.each do |key, value|
-          next if SKIP.include?(key.downcase)
-          if value =~ /\n/
-            # avoiding blank, key-only cookies with /\n+/
-            out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" })
-          else
-            out << "#{key}: #{value}\r\n"
-          end
+# Writes a Rack response to your client using the HTTP/1.1 specification.
+# You use it by simply doing:
+#
+#   status, headers, body = rack_app.call(env)
+#   http_response_write(socket, [ status, headers, body ])
+#
+# Most header correctness (including Content-Length and Content-Type)
+# is the job of Rack, with the exception of the "Date" and "Status" header.
+#
+# TODO: allow keepalive
+module Unicorn::HttpResponse
+
+  # Every standard HTTP code mapped to the appropriate message.
+  CODES = Rack::Utils::HTTP_STATUS_CODES.inject({}) { |hash,(code,msg)|
+    hash[code] = "#{code} #{msg}"
+    hash
+  }
+  CRLF = "\r\n"
+
+  # writes the rack_response to socket as an HTTP response
+  def http_response_write(socket, rack_response)
+    status, headers, body = rack_response
+    status = CODES[status.to_i] || status
+
+    if headers
+      buf = "HTTP/1.1 #{status}\r\n" \
+            "Date: #{Time.now.httpdate}\r\n" \
+            "Status: #{status}\r\n" \
+            "Connection: close\r\n"
+      headers.each do |key, value|
+        next if %r{\A(?:Date\z|Status\z|Connection\z)}i =~ key
+        if value =~ /\n/
+          # avoiding blank, key-only cookies with /\n+/
+          buf << value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" }.join('')
+        else
+          buf << "#{key}: #{value}\r\n"
         end
-
-        # Rack should enforce Content-Length or chunked transfer encoding,
-        # so don't worry or care about them.
-        # Date is required by HTTP/1.1 as long as our clock can be trusted.
-        # Some broken clients require a "Status" header so we accomodate them
-        socket.write("HTTP/1.1 #{status}\r\n" \
-                     "Date: #{Time.now.httpdate}\r\n" \
-                     "Status: #{status}\r\n" \
-                     "Connection: close\r\n" \
-                     "#{out.join('')}\r\n")
       end
-
-      body.each { |chunk| socket.write(chunk) }
-      socket.close # flushes and uncorks the socket immediately
-      ensure
-        body.respond_to?(:close) and body.close
+      socket.write(buf << CRLF)
     end
 
+    body.each { |chunk| socket.write(chunk) }
+    socket.close # flushes and uncorks the socket immediately
+    ensure
+      body.respond_to?(:close) and body.close
   end
 end
diff --git a/lib/unicorn/http_server.rb b/lib/unicorn/http_server.rb
new file mode 100644
index 0000000..69b7cc8
--- /dev/null
+++ b/lib/unicorn/http_server.rb
@@ -0,0 +1,695 @@
+# -*- encoding: binary -*-
+
+# This is the process manager of Unicorn. This manages worker
+# processes which in turn handle the I/O and application process.
+# Listener sockets are started in the master process and shared with
+# forked worker children.
+class Unicorn::HttpServer
+  attr_accessor :app, :request, :timeout, :worker_processes,
+                :before_fork, :after_fork, :before_exec,
+                :listener_opts, :preload_app,
+                :reexec_pid, :orig_app, :init_listeners,
+                :master_pid, :config, :ready_pipe, :user
+  attr_reader :pid, :logger
+
+  # :stopdoc:
+  include Unicorn::SocketHelper
+  include Unicorn::HttpResponse
+
+  # backwards compatibility with 1.x
+  Worker = Unicorn::Worker
+
+  # prevents IO objects in here from being GC-ed
+  IO_PURGATORY = []
+
+  # all bound listener sockets
+  LISTENERS = []
+
+  # This hash maps PIDs to Workers
+  WORKERS = {}
+
+  # We use SELF_PIPE differently in the master and worker processes:
+  #
+  # * The master process never closes or reinitializes this once
+  # initialized.  Signal handlers in the master process will write to
+  # it to wake up the master from IO.select in exactly the same manner
+  # djb describes in http://cr.yp.to/docs/selfpipe.html
+  #
+  # * The workers immediately close the pipe they inherit from the
+  # master and replace it with a new pipe after forking.  This new
+  # pipe is also used to wakeup from IO.select from inside (worker)
+  # signal handlers.  However, workers *close* the pipe descriptors in
+  # the signal handlers to raise EBADF in IO.select instead of writing
+  # like we do in the master.  We cannot easily use the reader set for
+  # IO.select because LISTENERS is already that set, and it's extra
+  # work (and cycles) to distinguish the pipe FD from the reader set
+  # once IO.select returns.  So we're lazy and just close the pipe when
+  # a (rare) signal arrives in the worker and reinitialize the pipe later.
+  SELF_PIPE = []
+
+  # signal queue used for self-piping
+  SIG_QUEUE = []
+
+  # list of signals we care about and trap in master.
+  QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP, :TTIN, :TTOU ]
+
+  # :startdoc:
+  # We populate this at startup so we can figure out how to reexecute
+  # and upgrade the currently running instance of Unicorn
+  # This Hash is considered a stable interface and changing its contents
+  # will allow you to switch between different installations of Unicorn
+  # or even different installations of the same applications without
+  # downtime.  Keys of this constant Hash are described as follows:
+  #
+  # * 0 - the path to the unicorn/unicorn_rails executable
+  # * :argv - a deep copy of the ARGV array the executable originally saw
+  # * :cwd - the working directory of the application, this is where
+  # you originally started Unicorn.
+  #
+  # To change your unicorn executable to a different path without downtime,
+  # you can set the following in your Unicorn config file, HUP and then
+  # continue with the traditional USR2 + QUIT upgrade steps:
+  #
+  #   Unicorn::HttpServer::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn"
+  START_CTX = {
+    :argv => ARGV.map { |arg| arg.dup },
+    :cwd => lambda {
+        # favor ENV['PWD'] since it is (usually) symlink aware for
+        # Capistrano and like systems
+        begin
+          a = File.stat(pwd = ENV['PWD'])
+          b = File.stat(Dir.pwd)
+          a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd
+        rescue
+          Dir.pwd
+        end
+      }.call,
+    0 => $0.dup,
+  }
+
+  # Creates a working server on host:port (strange things happen if
+  # port isn't a Number).  Use HttpServer::run to start the server and
+  # HttpServer.run.join to join the thread that's processing
+  # incoming requests on the socket.
+  def initialize(app, options = {})
+    @app = app
+    @request = Unicorn::HttpRequest.new
+    self.reexec_pid = 0
+    options = options.dup
+    self.ready_pipe = options.delete(:ready_pipe)
+    self.init_listeners = options[:listeners] ? options[:listeners].dup : []
+    options[:use_defaults] = true
+    self.config = Unicorn::Configurator.new(options)
+    self.listener_opts = {}
+
+    # we try inheriting listeners first, so we bind them later.
+    # we don't write the pid file until we've bound listeners in case
+    # unicorn was started twice by mistake.  Even though our #pid= method
+    # checks for stale/existing pid files, race conditions are still
+    # possible (and difficult/non-portable to avoid) and can be likely
+    # to clobber the pid if the second start was in quick succession
+    # after the first, so we rely on the listener binding to fail in
+    # that case.  Some tests (in and outside of this source tree) and
+    # monitoring tools may also rely on pid files existing before we
+    # attempt to connect to the listener(s)
+    config.commit!(self, :skip => [:listeners, :pid])
+    self.orig_app = app
+  end
+
+  # Runs the thing.  Returns self so you can run join on it
+  def start
+    BasicSocket.do_not_reverse_lookup = true
+
+    # inherit sockets from parents, they need to be plain Socket objects
+    # before they become Kgio::UNIXServer or Kgio::TCPServer
+    inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
+      io = Socket.for_fd(fd.to_i)
+      set_server_sockopt(io, listener_opts[sock_name(io)])
+      IO_PURGATORY << io
+      logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
+      server_cast(io)
+    end
+
+    config_listeners = config[:listeners].dup
+    LISTENERS.replace(inherited)
+
+    # we start out with generic Socket objects that get cast to either
+    # Kgio::TCPServer or Kgio::UNIXServer objects; but since the Socket
+    # objects share the same OS-level file descriptor as the higher-level
+    # *Server objects; we need to prevent Socket objects from being
+    # garbage-collected
+    config_listeners -= listener_names
+    if config_listeners.empty? && LISTENERS.empty?
+      config_listeners << Unicorn::Const::DEFAULT_LISTEN
+      init_listeners << Unicorn::Const::DEFAULT_LISTEN
+      START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}"
+    end
+    config_listeners.each { |addr| listen(addr) }
+    raise ArgumentError, "no listeners" if LISTENERS.empty?
+
+    # this pipe is used to wake us up from select(2) in #join when signals
+    # are trapped.  See trap_deferred.
+    init_self_pipe!
+
+    # setup signal handlers before writing pid file in case people get
+    # trigger happy and send signals as soon as the pid file exists.
+    # Note that signals don't actually get handled until the #join method
+    QUEUE_SIGS.each { |sig| trap(sig) { SIG_QUEUE << sig; awaken_master } }
+    trap(:CHLD) { awaken_master }
+    self.pid = config[:pid]
+
+    self.master_pid = $$
+    build_app! if preload_app
+    maintain_worker_count
+    self
+  end
+
+  # replaces current listener set with +listeners+.  This will
+  # close the socket if it will not exist in the new listener set
+  def listeners=(listeners)
+    cur_names, dead_names = [], []
+    listener_names.each do |name|
+      if ?/ == name[0]
+        # mark unlinked sockets as dead so we can rebind them
+        (File.socket?(name) ? cur_names : dead_names) << name
+      else
+        cur_names << name
+      end
+    end
+    set_names = listener_names(listeners)
+    dead_names.concat(cur_names - set_names).uniq!
+
+    LISTENERS.delete_if do |io|
+      if dead_names.include?(sock_name(io))
+        IO_PURGATORY.delete_if do |pio|
+          pio.fileno == io.fileno && (pio.close rescue nil).nil? # true
+        end
+        (io.close rescue nil).nil? # true
+      else
+        set_server_sockopt(io, listener_opts[sock_name(io)])
+        false
+      end
+    end
+
+    (set_names - cur_names).each { |addr| listen(addr) }
+  end
+
+  def stdout_path=(path); redirect_io($stdout, path); end
+  def stderr_path=(path); redirect_io($stderr, path); end
+
+  def logger=(obj)
+    Unicorn::HttpRequest::DEFAULTS["rack.logger"] = @logger = obj
+  end
+
+  # sets the path for the PID file of the master process
+  def pid=(path)
+    if path
+      if x = valid_pid?(path)
+        return path if pid && path == pid && x == $$
+        if x == reexec_pid && pid =~ /\.oldbin\z/
+          logger.warn("will not set pid=#{path} while reexec-ed "\
+                      "child is running PID:#{x}")
+          return
+        end
+        raise ArgumentError, "Already running on PID:#{x} " \
+                             "(or pid=#{path} is stale)"
+      end
+    end
+    unlink_pid_safe(pid) if pid
+
+    if path
+      fp = begin
+        tmp = "#{File.dirname(path)}/#{rand}.#$$"
+        File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644)
+      rescue Errno::EEXIST
+        retry
+      end
+      fp.syswrite("#$$\n")
+      File.rename(fp.path, path)
+      fp.close
+    end
+    @pid = path
+  end
+
+  # add a given address to the +listeners+ set, idempotently
+  # Allows workers to add a private, per-process listener via the
+  # after_fork hook.  Very useful for debugging and testing.
+  # +:tries+ may be specified as an option for the number of times
+  # to retry, and +:delay+ may be specified as the time in seconds
+  # to delay between retries.
+  # A negative value for +:tries+ indicates the listen will be
+  # retried indefinitely, this is useful when workers belonging to
+  # different masters are spawned during a transparent upgrade.
+  def listen(address, opt = {}.merge(listener_opts[address] || {}))
+    address = config.expand_addr(address)
+    return if String === address && listener_names.include?(address)
+
+    delay = opt[:delay] || 0.5
+    tries = opt[:tries] || 5
+    begin
+      io = bind_listen(address, opt)
+      unless Kgio::TCPServer === io || Kgio::UNIXServer === io
+        IO_PURGATORY << io
+        io = server_cast(io)
+      end
+      logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
+      LISTENERS << io
+      io
+    rescue Errno::EADDRINUSE => err
+      logger.error "adding listener failed addr=#{address} (in use)"
+      raise err if tries == 0
+      tries -= 1
+      logger.error "retrying in #{delay} seconds " \
+                   "(#{tries < 0 ? 'infinite' : tries} tries left)"
+      sleep(delay)
+      retry
+    rescue => err
+      logger.fatal "error adding listener addr=#{address}"
+      raise err
+    end
+  end
+
+  # monitors children and receives signals forever
+  # (or until a termination signal is sent).  This handles signals
+  # one-at-a-time time and we'll happily drop signals in case somebody
+  # is signalling us too often.
+  def join
+    respawn = true
+    last_check = Time.now
+
+    proc_name 'master'
+    logger.info "master process ready" # test_exec.rb relies on this message
+    if ready_pipe
+      ready_pipe.syswrite($$.to_s)
+      ready_pipe.close rescue nil
+      self.ready_pipe = nil
+    end
+    begin
+      reap_all_workers
+      case SIG_QUEUE.shift
+      when nil
+        # avoid murdering workers after our master process (or the
+        # machine) comes out of suspend/hibernation
+        if (last_check + @timeout) >= (last_check = Time.now)
+          sleep_time = murder_lazy_workers
+        else
+          # wait for workers to wakeup on suspend
+          sleep_time = @timeout/2.0 + 1
+        end
+        maintain_worker_count if respawn
+        master_sleep(sleep_time)
+      when :QUIT # graceful shutdown
+        break
+      when :TERM, :INT # immediate shutdown
+        stop(false)
+        break
+      when :USR1 # rotate logs
+        logger.info "master reopening logs..."
+        Unicorn::Util.reopen_logs
+        logger.info "master done reopening logs"
+        kill_each_worker(:USR1)
+      when :USR2 # exec binary, stay alive in case something went wrong
+        reexec
+      when :WINCH
+        if Process.ppid == 1 || Process.getpgrp != $$
+          respawn = false
+          logger.info "gracefully stopping all workers"
+          kill_each_worker(:QUIT)
+          self.worker_processes = 0
+        else
+          logger.info "SIGWINCH ignored because we're not daemonized"
+        end
+      when :TTIN
+        respawn = true
+        self.worker_processes += 1
+      when :TTOU
+        self.worker_processes -= 1 if self.worker_processes > 0
+      when :HUP
+        respawn = true
+        if config.config_file
+          load_config!
+        else # exec binary and exit if there's no config file
+          logger.info "config_file not present, reexecuting binary"
+          reexec
+        end
+      end
+    rescue Errno::EINTR
+    rescue => e
+      logger.error "Unhandled master loop exception #{e.inspect}."
+      logger.error e.backtrace.join("\n")
+    end while true
+    stop # gracefully shutdown all workers on our way out
+    logger.info "master complete"
+    unlink_pid_safe(pid) if pid
+  end
+
+  # Terminates all workers, but does not exit master process
+  def stop(graceful = true)
+    self.listeners = []
+    limit = Time.now + timeout
+    until WORKERS.empty? || Time.now > limit
+      kill_each_worker(graceful ? :QUIT : :TERM)
+      sleep(0.1)
+      reap_all_workers
+    end
+    kill_each_worker(:KILL)
+  end
+
+  private
+
+  # wait for a signal hander to wake us up and then consume the pipe
+  def master_sleep(sec)
+    IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
+    SELF_PIPE[0].kgio_tryread(11)
+  end
+
+  def awaken_master
+    SELF_PIPE[1].kgio_trywrite('.') # wakeup master process from select
+  end
+
+  # reaps all unreaped workers
+  def reap_all_workers
+    begin
+      wpid, status = Process.waitpid2(-1, Process::WNOHANG)
+      wpid or return
+      if reexec_pid == wpid
+        logger.error "reaped #{status.inspect} exec()-ed"
+        self.reexec_pid = 0
+        self.pid = pid.chomp('.oldbin') if pid
+        proc_name 'master'
+      else
+        worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
+        m = "reaped #{status.inspect} worker=#{worker.nr rescue 'unknown'}"
+        status.success? ? logger.info(m) : logger.error(m)
+      end
+    rescue Errno::ECHILD
+      break
+    end while true
+  end
+
+  # reexecutes the START_CTX with a new binary
+  def reexec
+    if reexec_pid > 0
+      begin
+        Process.kill(0, reexec_pid)
+        logger.error "reexec-ed child already running PID:#{reexec_pid}"
+        return
+      rescue Errno::ESRCH
+        self.reexec_pid = 0
+      end
+    end
+
+    if pid
+      old_pid = "#{pid}.oldbin"
+      prev_pid = pid.dup
+      begin
+        self.pid = old_pid  # clear the path for a new pid file
+      rescue ArgumentError
+        logger.error "old PID:#{valid_pid?(old_pid)} running with " \
+                     "existing pid=#{old_pid}, refusing rexec"
+        return
+      rescue => e
+        logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}"
+        return
+      end
+    end
+
+    self.reexec_pid = fork do
+      listener_fds = LISTENERS.map { |sock| sock.fileno }
+      ENV['UNICORN_FD'] = listener_fds.join(',')
+      Dir.chdir(START_CTX[:cwd])
+      cmd = [ START_CTX[0] ].concat(START_CTX[:argv])
+
+      # avoid leaking FDs we don't know about, but let before_exec
+      # unset FD_CLOEXEC, if anything else in the app eventually
+      # relies on FD inheritence.
+      (3..1024).each do |io|
+        next if listener_fds.include?(io)
+        io = IO.for_fd(io) rescue next
+        IO_PURGATORY << io
+        io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
+      end
+      logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
+      before_exec.call(self)
+      exec(*cmd)
+    end
+    proc_name 'master (old)'
+  end
+
+  # forcibly terminate all workers that haven't checked in in timeout
+  # seconds.  The timeout is implemented using an unlinked File
+  # shared between the parent process and each worker.  The worker
+  # runs File#chmod to modify the ctime of the File.  If the ctime
+  # is stale for >timeout seconds, then we'll kill the corresponding
+  # worker.
+  def murder_lazy_workers
+    t = @timeout
+    next_sleep = 1
+    WORKERS.dup.each_pair do |wpid, worker|
+      stat = worker.tmp.stat
+      # skip workers that disable fchmod or have never fchmod-ed
+      stat.mode == 0100600 and next
+      diff = Time.now - stat.ctime
+      if diff <= t
+        tmp = t - diff
+        next_sleep < tmp and next_sleep = tmp
+        next
+      end
+      logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
+                   "(#{diff}s > #{t}s), killing"
+      kill_worker(:KILL, wpid) # take no prisoners for timeout violations
+    end
+    next_sleep
+  end
+
+  def spawn_missing_workers
+    (0...worker_processes).each do |worker_nr|
+      WORKERS.values.include?(worker_nr) and next
+      worker = Worker.new(worker_nr, Unicorn::TmpIO.new)
+      before_fork.call(self, worker)
+      WORKERS[fork {
+        ready_pipe.close if ready_pipe
+        self.ready_pipe = nil
+        worker_loop(worker)
+      }] = worker
+    end
+  end
+
+  def maintain_worker_count
+    (off = WORKERS.size - worker_processes) == 0 and return
+    off < 0 and return spawn_missing_workers
+    WORKERS.dup.each_pair { |wpid,w|
+      w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil
+    }
+  end
+
+  # if we get any error, try to write something back to the client
+  # assuming we haven't closed the socket, but don't get hung up
+  # if the socket is already closed or broken.  We'll always ensure
+  # the socket is closed at the end of this function
+  def handle_error(client, e)
+    msg = case e
+    when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
+      Unicorn::Const::ERROR_500_RESPONSE
+    when HttpParserError # try to tell the client they're bad
+      Unicorn::Const::ERROR_400_RESPONSE
+    else
+      logger.error "Read error: #{e.inspect}"
+      logger.error e.backtrace.join("\n")
+      Unicorn::Const::ERROR_500_RESPONSE
+    end
+    client.kgio_trywrite(msg)
+    client.close
+    rescue
+      nil
+  end
+
+  # once a client is accepted, it is processed in its entirety here
+  # in 3 easy steps: read request, call app, write app response
+  def process_client(client)
+    r = @app.call(env = @request.read(client))
+
+    if 100 == r[0].to_i
+      client.write(Unicorn::Const::EXPECT_100_RESPONSE)
+      env.delete(Unicorn::Const::HTTP_EXPECT)
+      r = @app.call(env)
+    end
+    # r may be frozen or const, so don't modify it
+    @request.headers? or r = [ r[0], nil, r[2] ]
+    http_response_write(client, r)
+  rescue => e
+    handle_error(client, e)
+  end
+
+  # gets rid of stuff the worker has no business keeping track of
+  # to free some resources and drops all sig handlers.
+  # traps for USR1, USR2, and HUP may be set in the after_fork Proc
+  # by the user.
+  def init_worker_process(worker)
+    QUEUE_SIGS.each { |sig| trap(sig, nil) }
+    trap(:CHLD, 'DEFAULT')
+    SIG_QUEUE.clear
+    proc_name "worker[#{worker.nr}]"
+    START_CTX.clear
+    init_self_pipe!
+    WORKERS.values.each { |other| other.tmp.close rescue nil }
+    WORKERS.clear
+    LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
+    worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
+    after_fork.call(self, worker) # can drop perms
+    worker.user(*user) if user.kind_of?(Array) && ! worker.switched
+    self.timeout /= 2.0 # halve it for select()
+    build_app! unless preload_app
+  end
+
+  def reopen_worker_logs(worker_nr)
+    logger.info "worker=#{worker_nr} reopening logs..."
+    Unicorn::Util.reopen_logs
+    logger.info "worker=#{worker_nr} done reopening logs"
+    init_self_pipe!
+  end
+
+  # runs inside each forked worker, this sits around and waits
+  # for connections and doesn't die until the parent dies (or is
+  # given a INT, QUIT, or TERM signal)
+  def worker_loop(worker)
+    ppid = master_pid
+    init_worker_process(worker)
+    nr = 0 # this becomes negative if we need to reopen logs
+    alive = worker.tmp # tmp is our lifeline to the master process
+    ready = LISTENERS
+
+    # closing anything we IO.select on will raise EBADF
+    trap(:USR1) { nr = -65536; SELF_PIPE[0].close rescue nil }
+    trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
+    [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
+    logger.info "worker=#{worker.nr} ready"
+    m = 0
+
+    begin
+      nr < 0 and reopen_worker_logs(worker.nr)
+      nr = 0
+
+      # we're a goner in timeout seconds anyways if alive.chmod
+      # breaks, so don't trap the exception.  Using fchmod() since
+      # futimes() is not available in base Ruby and I very strongly
+      # prefer temporary files to be unlinked for security,
+      # performance and reliability reasons, so utime is out.  No-op
+      # changes with chmod doesn't update ctime on all filesystems; so
+      # we change our counter each and every time (after process_client
+      # and before IO.select).
+      alive.chmod(m = 0 == m ? 1 : 0)
+
+      ready.each do |sock|
+        if client = sock.kgio_tryaccept
+          process_client(client)
+          nr += 1
+          alive.chmod(m = 0 == m ? 1 : 0)
+        end
+        break if nr < 0
+      end
+
+      # make the following bet: if we accepted clients this round,
+      # we're probably reasonably busy, so avoid calling select()
+      # and do a speculative non-blocking accept() on ready listeners
+      # before we sleep again in select().
+      redo unless nr == 0 # (nr < 0) => reopen logs
+
+      ppid == Process.ppid or return
+      alive.chmod(m = 0 == m ? 1 : 0)
+
+      # timeout used so we can detect parent death:
+      ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) and ready = ret[0]
+    rescue Errno::EINTR
+      ready = LISTENERS
+    rescue Errno::EBADF
+      nr < 0 or return
+    rescue => e
+      if alive
+        logger.error "Unhandled listen loop exception #{e.inspect}."
+        logger.error e.backtrace.join("\n")
+      end
+    end while alive
+  end
+
+  # delivers a signal to a worker and fails gracefully if the worker
+  # is no longer running.
+  def kill_worker(signal, wpid)
+    Process.kill(signal, wpid)
+    rescue Errno::ESRCH
+      worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
+  end
+
+  # delivers a signal to each worker
+  def kill_each_worker(signal)
+    WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
+  end
+
+  # unlinks a PID file at given +path+ if it contains the current PID
+  # still potentially racy without locking the directory (which is
+  # non-portable and may interact badly with other programs), but the
+  # window for hitting the race condition is small
+  def unlink_pid_safe(path)
+    (File.read(path).to_i == $$ and File.unlink(path)) rescue nil
+  end
+
+  # returns a PID if a given path contains a non-stale PID file,
+  # nil otherwise.
+  def valid_pid?(path)
+    wpid = File.read(path).to_i
+    wpid <= 0 and return
+    Process.kill(0, wpid)
+    wpid
+    rescue Errno::ESRCH, Errno::ENOENT
+      # don't unlink stale pid files, racy without non-portable locking...
+  end
+
+  def load_config!
+    loaded_app = app
+    logger.info "reloading config_file=#{config.config_file}"
+    config[:listeners].replace(init_listeners)
+    config.reload
+    config.commit!(self)
+    kill_each_worker(:QUIT)
+    Unicorn::Util.reopen_logs
+    self.app = orig_app
+    build_app! if preload_app
+    logger.info "done reloading config_file=#{config.config_file}"
+  rescue StandardError, LoadError, SyntaxError => e
+    logger.error "error reloading config_file=#{config.config_file}: " \
+                 "#{e.class} #{e.message} #{e.backtrace}"
+    self.app = loaded_app
+  end
+
+  # returns an array of string names for the given listener array
+  def listener_names(listeners = LISTENERS)
+    listeners.map { |io| sock_name(io) }
+  end
+
+  def build_app!
+    if app.respond_to?(:arity) && app.arity == 0
+      if defined?(Gem) && Gem.respond_to?(:refresh)
+        logger.info "Refreshing Gem list"
+        Gem.refresh
+      end
+      self.app = app.call
+    end
+  end
+
+  def proc_name(tag)
+    $0 = ([ File.basename(START_CTX[0]), tag
+          ]).concat(START_CTX[:argv]).join(' ')
+  end
+
+  def redirect_io(io, path)
+    File.open(path, 'ab') { |fp| io.reopen(fp) } if path
+    io.sync = true
+  end
+
+  def init_self_pipe!
+    SELF_PIPE.each { |io| io.close rescue nil }
+    SELF_PIPE.replace(Kgio::Pipe.new)
+    SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
+  end
+end
+
diff --git a/lib/unicorn/launcher.rb b/lib/unicorn/launcher.rb
index 0d415dd..662b603 100644
--- a/lib/unicorn/launcher.rb
+++ b/lib/unicorn/launcher.rb
@@ -20,6 +20,7 @@ module Unicorn::Launcher
   #     to pickup code changes if the original deployment directory
   #     is a symlink or otherwise got replaced.
   def self.daemonize!(options)
+    cfg = Unicorn::Configurator
     $stdin.reopen("/dev/null")
 
     # We only start a new process group if we're not being reexecuted
@@ -52,9 +53,9 @@ module Unicorn::Launcher
       end
     end
     # $stderr/$stderr can/will be redirected separately in the Unicorn config
-    Unicorn::Configurator::DEFAULTS[:stderr_path] ||= "/dev/null"
-    Unicorn::Configurator::DEFAULTS[:stdout_path] ||= "/dev/null"
-    Unicorn::Configurator::RACKUP[:daemonized] = true
+    cfg::DEFAULTS[:stderr_path] ||= "/dev/null"
+    cfg::DEFAULTS[:stdout_path] ||= "/dev/null"
+    cfg::RACKUP[:daemonized] = true
   end
 
 end
diff --git a/lib/unicorn/preread_input.rb b/lib/unicorn/preread_input.rb
new file mode 100644
index 0000000..ec83cb2
--- /dev/null
+++ b/lib/unicorn/preread_input.rb
@@ -0,0 +1,30 @@
+# -*- encoding: binary -*-
+
+module Unicorn
+# This middleware is used to ensure input is buffered to memory
+# or disk (depending on size) before the application is dispatched
+# by entirely consuming it (from TeeInput) beforehand.
+#
+# Usage (in config.ru):
+#
+#     require 'unicorn/preread_input'
+#     if defined?(Unicorn)
+#       use Unicorn::PrereadInput
+#     end
+#     run YourApp.new
+class PrereadInput
+  def initialize(app)
+    @app = app
+  end
+
+  def call(env)
+    buf = ""
+    input = env["rack.input"]
+    if buf = input.read(16384)
+      true while input.read(16384, buf)
+      input.rewind
+    end
+    @app.call(env)
+  end
+end
+end
diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb
index 9a4266d..7364937 100644
--- a/lib/unicorn/socket_helper.rb
+++ b/lib/unicorn/socket_helper.rb
@@ -1,11 +1,28 @@
 # -*- encoding: binary -*-
-
+# :enddoc:
 require 'socket'
 
 module Unicorn
   module SocketHelper
     include Socket::Constants
 
+    # :stopdoc:
+    # internal interface, only used by Rainbows!/Zbatery
+    DEFAULTS = {
+      # The semantics for TCP_DEFER_ACCEPT changed in Linux 2.6.32+
+      # with commit d1b99ba41d6c5aa1ed2fc634323449dd656899e9
+      # This change shouldn't affect Unicorn users behind nginx (a
+      # value of 1 remains an optimization), but Rainbows! users may
+      # want to use a higher value on Linux 2.6.32+ to protect against
+      # denial-of-service attacks
+      :tcp_defer_accept => 1,
+
+      # FreeBSD, we need to override this to 'dataready' when we
+      # eventually get HTTPS support
+      :accept_filter => 'httpready',
+    }
+    #:startdoc:
+
     # configure platform-specific options (only tested on Linux 2.6 so far)
     case RUBY_PLATFORM
     when /linux/
@@ -14,22 +31,13 @@ module Unicorn
 
       # do not send out partial frames (Linux)
       TCP_CORK = 3 unless defined?(TCP_CORK)
-    when /freebsd(([1-4]\..{1,2})|5\.[0-4])/
-      # Do nothing for httpready, just closing a bug when freebsd <= 5.4
-      TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH) # :nodoc:
     when /freebsd/
       # do not send out partial frames (FreeBSD)
       TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH)
 
-      # Use the HTTP accept filter if available.
-      # The struct made by pack() is defined in /usr/include/sys/socket.h
-      # as accept_filter_arg
-      unless `/sbin/sysctl -nq net.inet.accf.http`.empty?
-        # set set the "httpready" accept filter in FreeBSD if available
-        # if other protocols are to be supported, this may be
-        # String#replace-d with "dataready" arguments instead
-        FILTER_ARG = ['httpready', nil].pack('a16a240')
-      end
+      def accf_arg(af_name)
+        [ af_name, nil ].pack('a16a240')
+      end if defined?(SO_ACCEPTFILTER)
     end
 
     def set_tcp_sockopt(sock, opt)
@@ -49,10 +57,25 @@ module Unicorn
       end
 
       # No good reason to ever have deferred accepts off
+      # (except maybe benchmarking)
       if defined?(TCP_DEFER_ACCEPT)
-        sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, 1)
-      elsif defined?(SO_ACCEPTFILTER) && defined?(FILTER_ARG)
-        sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, FILTER_ARG)
+        # this differs from nginx, since nginx doesn't allow us to
+        # configure the the timeout...
+        tmp = DEFAULTS.merge(opt)
+        seconds = tmp[:tcp_defer_accept]
+        seconds = DEFAULTS[:tcp_defer_accept] if seconds == true
+        seconds = 0 unless seconds # nil/false means disable this
+        sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds)
+      elsif respond_to?(:accf_arg)
+        tmp = DEFAULTS.merge(opt)
+        if name = tmp[:accept_filter]
+          begin
+            sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name))
+          rescue => e
+            logger.error("#{sock_name(sock)} " \
+                         "failed to set accept_filter=#{name} (#{e.inspect})")
+          end
+        end
       end
     end
 
@@ -69,14 +92,11 @@ module Unicorn
       end
       sock.listen(opt[:backlog] || 1024)
       rescue => e
-        if respond_to?(:logger)
-          logger.error "error setting socket options: #{e.inspect}"
-          logger.error e.backtrace.join("\n")
-        end
+        logger.error "error setting socket options: #{e.inspect}"
+        logger.error e.backtrace.join("\n")
     end
 
     def log_buffer_sizes(sock, pfx = '')
-      respond_to?(:logger) or return
       rcvbuf = sock.getsockopt(SOL_SOCKET, SO_RCVBUF).unpack('i')
       sndbuf = sock.getsockopt(SOL_SOCKET, SO_SNDBUF).unpack('i')
       logger.info "#{pfx}#{sock_name(sock)} rcvbuf=#{rcvbuf} sndbuf=#{sndbuf}"
@@ -91,10 +111,14 @@ module Unicorn
       sock = if address[0] == ?/
         if File.exist?(address)
           if File.socket?(address)
-            if self.respond_to?(:logger)
+            begin
+              UNIXSocket.new(address).close
+              # fall through, try to bind(2) and fail with EADDRINUSE
+              # (or succeed from a small race condition we can't sanely avoid).
+            rescue Errno::ECONNREFUSED
               logger.info "unlinking existing socket=#{address}"
+              File.unlink(address)
             end
-            File.unlink(address)
           else
             raise ArgumentError,
                   "socket=#{address} specified but it is not a socket!"
@@ -102,12 +126,12 @@ module Unicorn
         end
         old_umask = File.umask(opt[:umask] || 0)
         begin
-          UNIXServer.new(address)
+          Kgio::UNIXServer.new(address)
         ensure
           File.umask(old_umask)
         end
       elsif address =~ /^(\d+\.\d+\.\d+\.\d+):(\d+)$/
-        TCPServer.new($1, $2.to_i)
+        Kgio::TCPServer.new($1, $2.to_i)
       else
         raise ArgumentError, "Don't know how to bind: #{address}"
       end
@@ -142,9 +166,9 @@ module Unicorn
     def server_cast(sock)
       begin
         Socket.unpack_sockaddr_in(sock.getsockname)
-        TCPServer.for_fd(sock.fileno)
+        Kgio::TCPServer.for_fd(sock.fileno)
       rescue ArgumentError
-        UNIXServer.for_fd(sock.fileno)
+        Kgio::UNIXServer.for_fd(sock.fileno)
       end
     end
 
diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb
index 563747c..a3e01d2 100644
--- a/lib/unicorn/tee_input.rb
+++ b/lib/unicorn/tee_input.rb
@@ -1,224 +1,224 @@
 # -*- encoding: binary -*-
 
-module Unicorn
-
-  # acts like tee(1) on an input input to provide a input-like stream
-  # while providing rewindable semantics through a File/StringIO backing
-  # store.  On the first pass, the input is only read on demand so your
-  # Rack application can use input notification (upload progress and
-  # like).  This should fully conform to the Rack::Lint::InputWrapper
-  # specification on the public API.  This class is intended to be a
-  # strict interpretation of Rack::Lint::InputWrapper functionality and
-  # will not support any deviations from it.
-  #
-  # When processing uploads, Unicorn exposes a TeeInput object under
-  # "rack.input" of the Rack environment.
-  class TeeInput < Struct.new(:socket, :req, :parser, :buf, :len, :tmp, :buf2)
-
-    # Initializes a new TeeInput object.  You normally do not have to call
-    # this unless you are writing an HTTP server.
-    def initialize(*args)
-      super(*args)
-      self.len = parser.content_length
-      self.tmp = len && len < Const::MAX_BODY ? StringIO.new("") : Util.tmpio
-      self.buf2 = ""
-      if buf.size > 0
-        parser.filter_body(buf2, buf) and finalize_input
-        tmp.write(buf2)
-        tmp.seek(0)
-      end
+# acts like tee(1) on an input input to provide a input-like stream
+# while providing rewindable semantics through a File/StringIO backing
+# store.  On the first pass, the input is only read on demand so your
+# Rack application can use input notification (upload progress and
+# like).  This should fully conform to the Rack::Lint::InputWrapper
+# specification on the public API.  This class is intended to be a
+# strict interpretation of Rack::Lint::InputWrapper functionality and
+# will not support any deviations from it.
+#
+# When processing uploads, Unicorn exposes a TeeInput object under
+# "rack.input" of the Rack environment.
+class Unicorn::TeeInput
+  attr_accessor :tmp, :socket, :parser, :env, :buf, :len, :buf2
+
+  # The maximum size (in +bytes+) to buffer in memory before
+  # resorting to a temporary file.  Default is 112 kilobytes.
+  @@client_body_buffer_size = Unicorn::Const::MAX_BODY
+
+  # The I/O chunk size (in +bytes+) for I/O operations where
+  # the size cannot be user-specified when a method is called.
+  # The default is 16 kilobytes.
+  @@io_chunk_size = Unicorn::Const::CHUNK_SIZE
+
+  # Initializes a new TeeInput object.  You normally do not have to call
+  # this unless you are writing an HTTP server.
+  def initialize(socket, request)
+    @socket = socket
+    @parser = request
+    @buf = request.buf
+    @env = request.env
+    @len = request.content_length
+    @tmp = @len && @len < @@client_body_buffer_size ?
+           StringIO.new("") : Unicorn::TmpIO.new
+    @buf2 = ""
+    if @buf.size > 0
+      @parser.filter_body(@buf2, @buf) and finalize_input
+      @tmp.write(@buf2)
+      @tmp.rewind
     end
+  end
 
-    # :call-seq:
-    #   ios.size  => Integer
-    #
-    # Returns the size of the input.  For requests with a Content-Length
-    # header value, this will not read data off the socket and just return
-    # the value of the Content-Length header as an Integer.
-    #
-    # For Transfer-Encoding:chunked requests, this requires consuming
-    # all of the input stream before returning since there's no other
-    # way to determine the size of the request body beforehand.
-    #
-    # This method is no longer part of the Rack specification as of
-    # Rack 1.2, so its use is not recommended.  This method only exists
-    # for compatibility with Rack applications designed for Rack 1.1 and
-    # earlier.  Most applications should only need to call +read+ with a
-    # specified +length+ in a loop until it returns +nil+.
-    def size
-      len and return len
-
-      if socket
-        pos = tmp.pos
-        while tee(Const::CHUNK_SIZE, buf2)
-        end
-        tmp.seek(pos)
+  # :call-seq:
+  #   ios.size  => Integer
+  #
+  # Returns the size of the input.  For requests with a Content-Length
+  # header value, this will not read data off the socket and just return
+  # the value of the Content-Length header as an Integer.
+  #
+  # For Transfer-Encoding:chunked requests, this requires consuming
+  # all of the input stream before returning since there's no other
+  # way to determine the size of the request body beforehand.
+  #
+  # This method is no longer part of the Rack specification as of
+  # Rack 1.2, so its use is not recommended.  This method only exists
+  # for compatibility with Rack applications designed for Rack 1.1 and
+  # earlier.  Most applications should only need to call +read+ with a
+  # specified +length+ in a loop until it returns +nil+.
+  def size
+    @len and return @len
+
+    if socket
+      pos = @tmp.pos
+      while tee(@@io_chunk_size, @buf2)
       end
-
-      self.len = tmp.size
+      @tmp.seek(pos)
     end
 
-    # :call-seq:
-    #   ios.read([length [, buffer ]]) => string, buffer, or nil
-    #
-    # Reads at most length bytes from the I/O stream, or to the end of
-    # file if length is omitted or is nil. length must be a non-negative
-    # integer or nil. If the optional buffer argument is present, it
-    # must reference a String, which will receive the data.
-    #
-    # At end of file, it returns nil or "" depend on length.
-    # ios.read() and ios.read(nil) returns "".
-    # ios.read(length [, buffer]) returns nil.
-    #
-    # If the Content-Length of the HTTP request is known (as is the common
-    # case for POST requests), then ios.read(length [, buffer]) will block
-    # until the specified length is read (or it is the last chunk).
-    # Otherwise, for uncommon "Transfer-Encoding: chunked" requests,
-    # ios.read(length [, buffer]) will return immediately if there is
-    # any data and only block when nothing is available (providing
-    # IO#readpartial semantics).
-    def read(*args)
-      socket or return tmp.read(*args)
-
-      length = args.shift
-      if nil == length
-        rv = tmp.read || ""
-        while tee(Const::CHUNK_SIZE, buf2)
-          rv << buf2
-        end
-        rv
+    @len = @tmp.size
+  end
+
+  # :call-seq:
+  #   ios.read([length [, buffer ]]) => string, buffer, or nil
+  #
+  # Reads at most length bytes from the I/O stream, or to the end of
+  # file if length is omitted or is nil. length must be a non-negative
+  # integer or nil. If the optional buffer argument is present, it
+  # must reference a String, which will receive the data.
+  #
+  # At end of file, it returns nil or "" depend on length.
+  # ios.read() and ios.read(nil) returns "".
+  # ios.read(length [, buffer]) returns nil.
+  #
+  # If the Content-Length of the HTTP request is known (as is the common
+  # case for POST requests), then ios.read(length [, buffer]) will block
+  # until the specified length is read (or it is the last chunk).
+  # Otherwise, for uncommon "Transfer-Encoding: chunked" requests,
+  # ios.read(length [, buffer]) will return immediately if there is
+  # any data and only block when nothing is available (providing
+  # IO#readpartial semantics).
+  def read(*args)
+    @socket or return @tmp.read(*args)
+
+    length = args.shift
+    if nil == length
+      rv = @tmp.read || ""
+      while tee(@@io_chunk_size, @buf2)
+        rv << @buf2
+      end
+      rv
+    else
+      rv = args.shift || ""
+      diff = @tmp.size - @tmp.pos
+      if 0 == diff
+        ensure_length(tee(length, rv), length)
       else
-        rv = args.shift || ""
-        diff = tmp.size - tmp.pos
-        if 0 == diff
-          ensure_length(tee(length, rv), length)
-        else
-          ensure_length(tmp.read(diff > length ? length : diff, rv), length)
-        end
+        ensure_length(@tmp.read(diff > length ? length : diff, rv), length)
       end
     end
+  end
 
-    # :call-seq:
-    #   ios.gets   => string or nil
-    #
-    # Reads the next ``line'' from the I/O stream; lines are separated
-    # by the global record separator ($/, typically "\n"). A global
-    # record separator of nil reads the entire unread contents of ios.
-    # Returns nil if called at the end of file.
-    # This takes zero arguments for strict Rack::Lint compatibility,
-    # unlike IO#gets.
-    def gets
-      socket or return tmp.gets
-      sep = $/ or return read
-
-      orig_size = tmp.size
-      if tmp.pos == orig_size
-        tee(Const::CHUNK_SIZE, buf2) or return nil
-        tmp.seek(orig_size)
-      end
+  # :call-seq:
+  #   ios.gets   => string or nil
+  #
+  # Reads the next ``line'' from the I/O stream; lines are separated
+  # by the global record separator ($/, typically "\n"). A global
+  # record separator of nil reads the entire unread contents of ios.
+  # Returns nil if called at the end of file.
+  # This takes zero arguments for strict Rack::Lint compatibility,
+  # unlike IO#gets.
+  def gets
+    @socket or return @tmp.gets
+    sep = $/ or return read
+
+    orig_size = @tmp.size
+    if @tmp.pos == orig_size
+      tee(@@io_chunk_size, @buf2) or return nil
+      @tmp.seek(orig_size)
+    end
 
-      sep_size = Rack::Utils.bytesize(sep)
-      line = tmp.gets # cannot be nil here since size > pos
-      sep == line[-sep_size, sep_size] and return line
+    sep_size = Rack::Utils.bytesize(sep)
+    line = @tmp.gets # cannot be nil here since size > pos
+    sep == line[-sep_size, sep_size] and return line
 
-      # unlikely, if we got here, then tmp is at EOF
-      begin
-        orig_size = tmp.pos
-        tee(Const::CHUNK_SIZE, buf2) or break
-        tmp.seek(orig_size)
-        line << tmp.gets
-        sep == line[-sep_size, sep_size] and return line
-        # tmp is at EOF again here, retry the loop
-      end while true
-
-      line
-    end
+    # unlikely, if we got here, then @tmp is at EOF
+    begin
+      orig_size = @tmp.pos
+      tee(@@io_chunk_size, @buf2) or break
+      @tmp.seek(orig_size)
+      line << @tmp.gets
+      sep == line[-sep_size, sep_size] and return line
+      # @tmp is at EOF again here, retry the loop
+    end while true
 
-    # :call-seq:
-    #   ios.each { |line| block }  => ios
-    #
-    # Executes the block for every ``line'' in *ios*, where lines are
-    # separated by the global record separator ($/, typically "\n").
-    def each(&block)
-      while line = gets
-        yield line
-      end
+    line
+  end
 
-      self # Rack does not specify what the return value is here
+  # :call-seq:
+  #   ios.each { |line| block }  => ios
+  #
+  # Executes the block for every ``line'' in *ios*, where lines are
+  # separated by the global record separator ($/, typically "\n").
+  def each(&block)
+    while line = gets
+      yield line
     end
 
-    # :call-seq:
-    #   ios.rewind    => 0
-    #
-    # Positions the *ios* pointer to the beginning of input, returns
-    # the offset (zero) of the +ios+ pointer.  Subsequent reads will
-    # start from the beginning of the previously-buffered input.
-    def rewind
-      tmp.rewind # Rack does not specify what the return value is here
-    end
+    self # Rack does not specify what the return value is here
+  end
 
-  private
-
-    def client_error(e)
-      case e
-      when EOFError
-        # in case client only did a premature shutdown(SHUT_WR)
-        # we do support clients that shutdown(SHUT_WR) after the
-        # _entire_ request has been sent, and those will not have
-        # raised EOFError on us.
-        socket.close if socket
-        raise ClientShutdown, "bytes_read=#{tmp.size}", []
-      when HttpParserError
-        e.set_backtrace([])
-      end
-      raise e
-    end
+  # :call-seq:
+  #   ios.rewind    => 0
+  #
+  # Positions the *ios* pointer to the beginning of input, returns
+  # the offset (zero) of the +ios+ pointer.  Subsequent reads will
+  # start from the beginning of the previously-buffered input.
+  def rewind
+    @tmp.rewind # Rack does not specify what the return value is here
+  end
 
-    # tees off a +length+ chunk of data from the input into the IO
-    # backing store as well as returning it.  +dst+ must be specified.
-    # returns nil if reading from the input returns nil
-    def tee(length, dst)
-      unless parser.body_eof?
-        if parser.filter_body(dst, socket.readpartial(length, buf)).nil?
-          tmp.write(dst)
-          tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug
-          return dst
-        end
+private
+
+  # tees off a +length+ chunk of data from the input into the IO
+  # backing store as well as returning it.  +dst+ must be specified.
+  # returns nil if reading from the input returns nil
+  def tee(length, dst)
+    unless @parser.body_eof?
+      r = @socket.kgio_read(length, @buf) or eof!
+      unless @parser.filter_body(dst, @buf)
+        @tmp.write(dst)
+        @tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug
+        return dst
       end
-      finalize_input
-      rescue => e
-        client_error(e)
     end
+    finalize_input
+  end
 
-    def finalize_input
-      while parser.trailers(req, buf).nil?
-        # Don't worry about raising ClientShutdown here on EOFError, tee()
-        # will catch EOFError when app is processing it, otherwise in
-        # initialize we never get any chance to enter the app so the
-        # EOFError will just get trapped by Unicorn and not the Rack app
-        buf << socket.readpartial(Const::CHUNK_SIZE)
-      end
-      self.socket = nil
+  def finalize_input
+    while @parser.trailers(@env, @buf).nil?
+      r = @socket.kgio_read(@@io_chunk_size) or eof!
+      @buf << r
     end
+    @socket = nil
+  end
 
-    # tee()s into +dst+ until it is of +length+ bytes (or until
-    # we've reached the Content-Length of the request body).
-    # Returns +dst+ (the exact object, not a duplicate)
-    # To continue supporting applications that need near-real-time
-    # streaming input bodies, this is a no-op for
-    # "Transfer-Encoding: chunked" requests.
-    def ensure_length(dst, length)
-      # len is nil for chunked bodies, so we can't ensure length for those
-      # since they could be streaming bidirectionally and we don't want to
-      # block the caller in that case.
-      return dst if dst.nil? || len.nil?
-
-      while dst.size < length && tee(length - dst.size, buf2)
-        dst << buf2
-      end
-
-      dst
+  # tee()s into +dst+ until it is of +length+ bytes (or until
+  # we've reached the Content-Length of the request body).
+  # Returns +dst+ (the exact object, not a duplicate)
+  # To continue supporting applications that need near-real-time
+  # streaming input bodies, this is a no-op for
+  # "Transfer-Encoding: chunked" requests.
+  def ensure_length(dst, length)
+    # len is nil for chunked bodies, so we can't ensure length for those
+    # since they could be streaming bidirectionally and we don't want to
+    # block the caller in that case.
+    return dst if dst.nil? || @len.nil?
+
+    while dst.size < length && tee(length - dst.size, @buf2)
+      dst << @buf2
     end
 
+    dst
+  end
+
+  def eof!
+    # in case client only did a premature shutdown(SHUT_WR)
+    # we do support clients that shutdown(SHUT_WR) after the
+    # _entire_ request has been sent, and those will not have
+    # raised EOFError on us.
+    @socket.close if @socket
+    raise Unicorn::ClientShutdown, "bytes_read=#{@tmp.size}", []
   end
 end
diff --git a/lib/unicorn/tmpio.rb b/lib/unicorn/tmpio.rb
new file mode 100644
index 0000000..a3c530d
--- /dev/null
+++ b/lib/unicorn/tmpio.rb
@@ -0,0 +1,29 @@
+# -*- encoding: binary -*-
+# :stopdoc:
+require 'tmpdir'
+
+# some versions of Ruby had a broken Tempfile which didn't work
+# well with unlinked files.  This one is much shorter, easier
+# to understand, and slightly faster.
+class Unicorn::TmpIO < File
+
+  # creates and returns a new File object.  The File is unlinked
+  # immediately, switched to binary mode, and userspace output
+  # buffering is disabled
+  def self.new
+    fp = begin
+      super("#{Dir::tmpdir}/#{rand}", RDWR|CREAT|EXCL, 0600)
+    rescue Errno::EEXIST
+      retry
+    end
+    unlink(fp.path)
+    fp.binmode
+    fp.sync = true
+    fp
+  end
+
+  # for easier env["rack.input"] compatibility with Rack <= 1.1
+  def size
+    stat.size
+  end
+end
diff --git a/lib/unicorn/util.rb b/lib/unicorn/util.rb
index e8c09d0..82329eb 100644
--- a/lib/unicorn/util.rb
+++ b/lib/unicorn/util.rb
@@ -1,87 +1,67 @@
 # -*- encoding: binary -*-
 
-require 'fcntl'
-require 'tmpdir'
+module Unicorn::Util
 
-module Unicorn
+# :stopdoc:
+  def self.is_log?(fp)
+    append_flags = File::WRONLY | File::APPEND
 
-  class TmpIO < ::File
+    ! fp.closed? &&
+      fp.sync &&
+      fp.path[0] == ?/ &&
+      (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags
+    rescue IOError, Errno::EBADF
+      false
+  end
 
-    # for easier env["rack.input"] compatibility
-    def size
-      # flush if sync
-      stat.size
+  def self.chown_logs(uid, gid)
+    ObjectSpace.each_object(File) do |fp|
+      fp.chown(uid, gid) if is_log?(fp)
     end
   end
+# :startdoc:
 
-  module Util
-    class << self
-
-      def is_log?(fp)
-        append_flags = File::WRONLY | File::APPEND
+  # This reopens ALL logfiles in the process that have been rotated
+  # using logrotate(8) (without copytruncate) or similar tools.
+  # A +File+ object is considered for reopening if it is:
+  #   1) opened with the O_APPEND and O_WRONLY flags
+  #   2) opened with an absolute path (starts with "/")
+  #   3) the current open file handle does not match its original open path
+  #   4) unbuffered (as far as userspace buffering goes, not O_SYNC)
+  # Returns the number of files reopened
+  def self.reopen_logs
+    to_reopen = []
+    nr = 0
+    ObjectSpace.each_object(File) { |fp| is_log?(fp) and to_reopen << fp }
 
-        ! fp.closed? &&
-          fp.sync &&
-          fp.path[0] == ?/ &&
-          (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags
+    to_reopen.each do |fp|
+      orig_st = begin
+        fp.stat
+      rescue IOError, Errno::EBADF
+        next
       end
 
-      def chown_logs(uid, gid)
-        ObjectSpace.each_object(File) do |fp|
-          fp.chown(uid, gid) if is_log?(fp)
-        end
+      begin
+        b = File.stat(fp.path)
+        next if orig_st.ino == b.ino && orig_st.dev == b.dev
+      rescue Errno::ENOENT
       end
 
-      # This reopens ALL logfiles in the process that have been rotated
-      # using logrotate(8) (without copytruncate) or similar tools.
-      # A +File+ object is considered for reopening if it is:
-      #   1) opened with the O_APPEND and O_WRONLY flags
-      #   2) opened with an absolute path (starts with "/")
-      #   3) the current open file handle does not match its original open path
-      #   4) unbuffered (as far as userspace buffering goes, not O_SYNC)
-      # Returns the number of files reopened
-      def reopen_logs
-        to_reopen = []
-        nr = 0
-        ObjectSpace.each_object(File) { |fp| is_log?(fp) and to_reopen << fp }
-
-        to_reopen.each do |fp|
-          orig_st = fp.stat
-          begin
-            b = File.stat(fp.path)
-            next if orig_st.ino == b.ino && orig_st.dev == b.dev
-          rescue Errno::ENOENT
-          end
+      begin
+        File.open(fp.path, 'a') { |tmpfp| fp.reopen(tmpfp) }
+        fp.sync = true
+        new_st = fp.stat
 
-          File.open(fp.path, 'a') { |tmpfp| fp.reopen(tmpfp) }
-          fp.sync = true
-          new_st = fp.stat
-          if orig_st.uid != new_st.uid || orig_st.gid != new_st.gid
-            fp.chown(orig_st.uid, orig_st.gid)
-          end
-          nr += 1
+        # this should only happen in the master:
+        if orig_st.uid != new_st.uid || orig_st.gid != new_st.gid
+          fp.chown(orig_st.uid, orig_st.gid)
         end
 
-        nr
+        nr += 1
+      rescue IOError, Errno::EBADF
+        # not much we can do...
       end
-
-      # creates and returns a new File object.  The File is unlinked
-      # immediately, switched to binary mode, and userspace output
-      # buffering is disabled
-      def tmpio
-        fp = begin
-          TmpIO.open("#{Dir::tmpdir}/#{rand}",
-                     File::RDWR|File::CREAT|File::EXCL, 0600)
-        rescue Errno::EEXIST
-          retry
-        end
-        File.unlink(fp.path)
-        fp.binmode
-        fp.sync = true
-        fp
-      end
-
     end
-
+    nr
   end
 end
diff --git a/lib/unicorn/worker.rb b/lib/unicorn/worker.rb
new file mode 100644
index 0000000..fd8d20e
--- /dev/null
+++ b/lib/unicorn/worker.rb
@@ -0,0 +1,40 @@
+# -*- encoding: binary -*-
+
+# This class and its members can be considered a stable interface
+# and will not change in a backwards-incompatible fashion between
+# releases of Unicorn.  You may need to access it in the
+# before_fork/after_fork hooks.  See the Unicorn::Configurator RDoc
+# for examples.
+class Unicorn::Worker < Struct.new(:nr, :tmp, :switched)
+
+  # worker objects may be compared to just plain numbers
+  def ==(other_nr)
+    self.nr == other_nr
+  end
+
+  # Changes the worker process to the specified +user+ and +group+
+  # This is only intended to be called from within the worker
+  # process from the +after_fork+ hook.  This should be called in
+  # the +after_fork+ hook after any priviledged functions need to be
+  # run (e.g. to set per-worker CPU affinity, niceness, etc)
+  #
+  # Any and all errors raised within this method will be propagated
+  # directly back to the caller (usually the +after_fork+ hook.
+  # These errors commonly include ArgumentError for specifying an
+  # invalid user/group and Errno::EPERM for insufficient priviledges
+  def user(user, group = nil)
+    # we do not protect the caller, checking Process.euid == 0 is
+    # insufficient because modern systems have fine-grained
+    # capabilities.  Let the caller handle any and all errors.
+    uid = Etc.getpwnam(user).uid
+    gid = Etc.getgrnam(group).gid if group
+    Unicorn::Util.chown_logs(uid, gid)
+    tmp.chown(uid, gid)
+    if gid && Process.egid != gid
+      Process.initgroups(user, gid)
+      Process::GID.change_privilege(gid)
+    end
+    Process.euid != uid and Process::UID.change_privilege(uid)
+    self.switched = true
+  end
+end