about summary refs log tree commit homepage
diff options
context:
space:
mode:
-rw-r--r--Documentation/unicorn.1.txt3
-rw-r--r--Documentation/unicorn_rails.1.txt7
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--GNUmakefile2
-rw-r--r--ext/unicorn_http/unicorn_http.rl2
-rw-r--r--lib/unicorn.rb13
-rw-r--r--lib/unicorn/configurator.rb903
-rw-r--r--lib/unicorn/const.rb4
-rw-r--r--lib/unicorn/http_request.rb3
-rw-r--r--lib/unicorn/http_response.rb115
-rw-r--r--lib/unicorn/socket_helper.rb66
-rw-r--r--lib/unicorn/tee_input.rb398
-rw-r--r--test/test_helper.rb1
-rw-r--r--test/unit/test_socket_helper.rb24
14 files changed, 811 insertions, 732 deletions
diff --git a/Documentation/unicorn.1.txt b/Documentation/unicorn.1.txt
index 24df7ab..c20a570 100644
--- a/Documentation/unicorn.1.txt
+++ b/Documentation/unicorn.1.txt
@@ -36,6 +36,9 @@ with rackup(1) but strongly discouraged.
     implemented as a Ruby DSL, so Ruby code may executed.
     See the RDoc/ri for the *Unicorn::Configurator* class for the full
     list of directives available from the DSL.
+    Using an absolute path for for CONFIG_FILE is recommended as it
+    makes multiple instances of Unicorn easily distinguishable when
+    viewing ps(1) output.
 
 -D, \--daemonize
 :   Run daemonized in the background.  The process is detached from
diff --git a/Documentation/unicorn_rails.1.txt b/Documentation/unicorn_rails.1.txt
index 267e425..f426b07 100644
--- a/Documentation/unicorn_rails.1.txt
+++ b/Documentation/unicorn_rails.1.txt
@@ -34,8 +34,11 @@ as much as possible.
 -c, \--config-file CONFIG_FILE
 :   Path to the Unicorn-specific config file.  The config file is
     implemented as a Ruby DSL, so Ruby code may executed.
-    See the RDoc/ri for the *Unicorn::Configurator* class for the
-    full list of directives available from the DSL.
+    See the RDoc/ri for the *Unicorn::Configurator* class for the full
+    list of directives available from the DSL.
+    Using an absolute path for for CONFIG_FILE is recommended as it
+    makes multiple instances of Unicorn easily distinguishable when
+    viewing ps(1) output.
 
 -D, \--daemonize
 :   Run daemonized in the background.  The process is detached from
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 7d0c7ed..432f3c0 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 GVF=GIT-VERSION-FILE
-DEF_VER=v1.0.1.GIT
+DEF_VER=v1.1.1.GIT
 
 LF='
 '
diff --git a/GNUmakefile b/GNUmakefile
index b5fe9fd..3354ff1 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -169,7 +169,7 @@ NEWS: GIT-VERSION-FILE .manifest
         $(RAKE) -s news_rdoc > $@+
         mv $@+ $@
 
-SINCE = 0.991.0
+SINCE = 1.0.0
 ChangeLog: LOG_VERSION = \
   $(shell git rev-parse -q "$(GIT_VERSION)" >/dev/null 2>&1 && \
           echo $(GIT_VERSION) || git describe)
diff --git a/ext/unicorn_http/unicorn_http.rl b/ext/unicorn_http/unicorn_http.rl
index f6c632f..1ad2a5d 100644
--- a/ext/unicorn_http/unicorn_http.rl
+++ b/ext/unicorn_http/unicorn_http.rl
@@ -684,7 +684,7 @@ void Init_unicorn_http(void)
 {
   VALUE mUnicorn, cHttpParser;
 
-  mUnicorn = rb_define_module("Unicorn");
+  mUnicorn = rb_const_get(rb_cObject, rb_intern("Unicorn"));
   cHttpParser = rb_define_class_under(mUnicorn, "HttpParser", rb_cObject);
   eHttpParserError =
          rb_define_class_under(mUnicorn, "HttpParserError", rb_eIOError);
diff --git a/lib/unicorn.rb b/lib/unicorn.rb
index cbb5520..c231a4d 100644
--- a/lib/unicorn.rb
+++ b/lib/unicorn.rb
@@ -2,6 +2,7 @@
 
 require 'fcntl'
 require 'etc'
+require 'stringio'
 require 'rack'
 require 'unicorn/socket_helper'
 require 'unicorn/const'
@@ -487,8 +488,8 @@ module Unicorn
     # Wake up every second anyways to run murder_lazy_workers
     def master_sleep(sec)
       begin
-        IO.select([ SELF_PIPE.first ], nil, nil, sec) or return
-        SELF_PIPE.first.read_nonblock(Const::CHUNK_SIZE, HttpRequest::BUF)
+        IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
+        SELF_PIPE[0].read_nonblock(Const::CHUNK_SIZE, HttpRequest::BUF)
       rescue Errno::EAGAIN, Errno::EINTR
         break
       end while true
@@ -496,7 +497,7 @@ module Unicorn
 
     def awaken_master
       begin
-        SELF_PIPE.last.write_nonblock('.') # wakeup master process from select
+        SELF_PIPE[1].write_nonblock('.') # wakeup master process from select
       rescue Errno::EAGAIN, Errno::EINTR
         # pipe is full, master should wake up anyways
         retry
@@ -640,7 +641,7 @@ module Unicorn
       client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
       response = app.call(env = REQUEST.read(client))
 
-      if 100 == response.first.to_i
+      if 100 == response[0].to_i
         client.write(Const::EXPECT_100_RESPONSE)
         env.delete(Const::HTTP_EXPECT)
         response = app.call(env)
@@ -689,7 +690,7 @@ module Unicorn
       ready = LISTENERS
 
       # closing anything we IO.select on will raise EBADF
-      trap(:USR1) { nr = -65536; SELF_PIPE.first.close rescue nil }
+      trap(:USR1) { nr = -65536; SELF_PIPE[0].close rescue nil }
       trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
       [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
       logger.info "worker=#{worker.nr} ready"
@@ -730,7 +731,7 @@ module Unicorn
         begin
           # timeout used so we can detect parent death:
           ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) or redo
-          ready = ret.first
+          ready = ret[0]
         rescue Errno::EINTR
           ready = LISTENERS
         rescue Errno::EBADF
diff --git a/lib/unicorn/configurator.rb b/lib/unicorn/configurator.rb
index 533e0ed..6be6fbd 100644
--- a/lib/unicorn/configurator.rb
+++ b/lib/unicorn/configurator.rb
@@ -1,483 +1,512 @@
 # -*- encoding: binary -*-
-
-require 'socket'
 require 'logger'
 
-module Unicorn
-
-  # Implements a simple DSL for configuring a Unicorn server.
-  #
-  # See http://unicorn.bogomips.org/examples/unicorn.conf.rb and
-  # http://unicorn.bogomips.org/examples/unicorn.conf.minimal.rb
-  # example configuration files.  An example config file for use with
-  # nginx is also available at
-  # http://unicorn.bogomips.org/examples/nginx.conf
-  class Configurator < Struct.new(:set, :config_file, :after_reload)
-    # :stopdoc:
-    # used to stash stuff for deferred processing of cli options in
-    # config.ru after "working_directory" is bound.  Do not rely on
-    # this being around later on...
-    RACKUP = {}
-    # :startdoc:
-
-    # Default settings for Unicorn
-    DEFAULTS = {
-      :timeout => 60,
-      :logger => Logger.new($stderr),
-      :worker_processes => 1,
-      :after_fork => lambda { |server, worker|
-          server.logger.info("worker=#{worker.nr} spawned pid=#{$$}")
-        },
-      :before_fork => lambda { |server, worker|
-          server.logger.info("worker=#{worker.nr} spawning...")
-        },
-      :before_exec => lambda { |server|
-          server.logger.info("forked child re-executing...")
-        },
-      :pid => nil,
-      :preload_app => false,
-    }
-
-    def initialize(defaults = {}) #:nodoc:
-      self.set = Hash.new(:unset)
-      use_defaults = defaults.delete(:use_defaults)
-      self.config_file = defaults.delete(:config_file)
-
-      # after_reload is only used by unicorn_rails, unsupported otherwise
-      self.after_reload = defaults.delete(:after_reload)
-
-      set.merge!(DEFAULTS) if use_defaults
-      defaults.each { |key, value| self.send(key, value) }
-      Hash === set[:listener_opts] or
-          set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
-      Array === set[:listeners] or set[:listeners] = []
-      reload
-    end
+# Implements a simple DSL for configuring a \Unicorn server.
+#
+# See http://unicorn.bogomips.org/examples/unicorn.conf.rb and
+# http://unicorn.bogomips.org/examples/unicorn.conf.minimal.rb
+# example configuration files.  An example config file for use with
+# nginx is also available at
+# http://unicorn.bogomips.org/examples/nginx.conf
+class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
+  # used to stash stuff for deferred processing of cli options in
+  # config.ru after "working_directory" is bound.  Do not rely on
+  # this being around later on...
+  RACKUP = {} # :nodoc:
+
+  # Default settings for Unicorn
+  # :stopdoc:
+  DEFAULTS = {
+    :timeout => 60,
+    :logger => Logger.new($stderr),
+    :worker_processes => 1,
+    :after_fork => lambda { |server, worker|
+        server.logger.info("worker=#{worker.nr} spawned pid=#{$$}")
+      },
+    :before_fork => lambda { |server, worker|
+        server.logger.info("worker=#{worker.nr} spawning...")
+      },
+    :before_exec => lambda { |server|
+        server.logger.info("forked child re-executing...")
+      },
+    :pid => nil,
+    :preload_app => false,
+  }
+  #:startdoc:
+
+  def initialize(defaults = {}) #:nodoc:
+    self.set = Hash.new(:unset)
+    use_defaults = defaults.delete(:use_defaults)
+    self.config_file = defaults.delete(:config_file)
+
+    # after_reload is only used by unicorn_rails, unsupported otherwise
+    self.after_reload = defaults.delete(:after_reload)
+
+    set.merge!(DEFAULTS) if use_defaults
+    defaults.each { |key, value| self.send(key, value) }
+    Hash === set[:listener_opts] or
+        set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
+    Array === set[:listeners] or set[:listeners] = []
+    reload
+  end
 
-    def reload #:nodoc:
-      instance_eval(File.read(config_file), config_file) if config_file
+  def reload #:nodoc:
+    instance_eval(File.read(config_file), config_file) if config_file
 
-      parse_rackup_file
+    parse_rackup_file
 
-      # unicorn_rails creates dirs here after working_directory is bound
-      after_reload.call if after_reload
+    # unicorn_rails creates dirs here after working_directory is bound
+    after_reload.call if after_reload
 
-      # working_directory binds immediately (easier error checking that way),
-      # now ensure any paths we changed are correctly set.
-      [ :pid, :stderr_path, :stdout_path ].each do |var|
-        String === (path = set[var]) or next
-        path = File.expand_path(path)
-        File.writable?(path) || File.writable?(File.dirname(path)) or \
-              raise ArgumentError, "directory for #{var}=#{path} not writable"
-      end
+    # working_directory binds immediately (easier error checking that way),
+    # now ensure any paths we changed are correctly set.
+    [ :pid, :stderr_path, :stdout_path ].each do |var|
+      String === (path = set[var]) or next
+      path = File.expand_path(path)
+      File.writable?(path) || File.writable?(File.dirname(path)) or \
+            raise ArgumentError, "directory for #{var}=#{path} not writable"
     end
+  end
 
-    def commit!(server, options = {}) #:nodoc:
-      skip = options[:skip] || []
-      if ready_pipe = RACKUP.delete(:ready_pipe)
-        server.ready_pipe = ready_pipe
-      end
-      set.each do |key, value|
-        value == :unset and next
-        skip.include?(key) and next
-        server.__send__("#{key}=", value)
-      end
+  def commit!(server, options = {}) #:nodoc:
+    skip = options[:skip] || []
+    if ready_pipe = RACKUP.delete(:ready_pipe)
+      server.ready_pipe = ready_pipe
     end
-
-    def [](key) # :nodoc:
-      set[key]
+    set.each do |key, value|
+      value == :unset and next
+      skip.include?(key) and next
+      server.__send__("#{key}=", value)
     end
+  end
 
-    # sets object to the +new+ Logger-like object.  The new logger-like
-    # object must respond to the following methods:
-    #  +debug+, +info+, +warn+, +error+, +fatal+
-    # The default Logger will log its output to the path specified
-    # by +stderr_path+.  If you're running Unicorn daemonized, then
-    # you must specify a path to prevent error messages from going
-    # to /dev/null.
-    def logger(new)
-      %w(debug info warn error fatal).each do |m|
-        new.respond_to?(m) and next
-        raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
-      end
+  def [](key) # :nodoc:
+    set[key]
+  end
 
-      set[:logger] = new
+  # sets object to the +new+ Logger-like object.  The new logger-like
+  # object must respond to the following methods:
+  #  +debug+, +info+, +warn+, +error+, +fatal+
+  # The default Logger will log its output to the path specified
+  # by +stderr_path+.  If you're running Unicorn daemonized, then
+  # you must specify a path to prevent error messages from going
+  # to /dev/null.
+  def logger(new)
+    %w(debug info warn error fatal).each do |m|
+      new.respond_to?(m) and next
+      raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
     end
 
-    # sets after_fork hook to a given block.  This block will be called by
-    # the worker after forking.  The following is an example hook which adds
-    # a per-process listener to every worker:
-    #
-    #  after_fork do |server,worker|
-    #    # per-process listener ports for debugging/admin:
-    #    addr = "127.0.0.1:#{9293 + worker.nr}"
-    #
-    #    # the negative :tries parameter indicates we will retry forever
-    #    # waiting on the existing process to exit with a 5 second :delay
-    #    # Existing options for Unicorn::Configurator#listen such as
-    #    # :backlog, :rcvbuf, :sndbuf are available here as well.
-    #    server.listen(addr, :tries => -1, :delay => 5, :backlog => 128)
-    #
-    #    # drop permissions to "www-data" in the worker
-    #    # generally there's no reason to start Unicorn as a priviledged user
-    #    # as it is not recommended to expose Unicorn to public clients.
-    #    worker.user('www-data', 'www-data') if Process.euid == 0
-    #  end
-    def after_fork(*args, &block)
-      set_hook(:after_fork, block_given? ? block : args[0])
-    end
+    set[:logger] = new
+  end
 
-    # sets before_fork got be a given Proc object.  This Proc
-    # object will be called by the master process before forking
-    # each worker.
-    def before_fork(*args, &block)
-      set_hook(:before_fork, block_given? ? block : args[0])
-    end
+  # sets after_fork hook to a given block.  This block will be called by
+  # the worker after forking.  The following is an example hook which adds
+  # a per-process listener to every worker:
+  #
+  #  after_fork do |server,worker|
+  #    # per-process listener ports for debugging/admin:
+  #    addr = "127.0.0.1:#{9293 + worker.nr}"
+  #
+  #    # the negative :tries parameter indicates we will retry forever
+  #    # waiting on the existing process to exit with a 5 second :delay
+  #    # Existing options for Unicorn::Configurator#listen such as
+  #    # :backlog, :rcvbuf, :sndbuf are available here as well.
+  #    server.listen(addr, :tries => -1, :delay => 5, :backlog => 128)
+  #
+  #    # drop permissions to "www-data" in the worker
+  #    # generally there's no reason to start Unicorn as a priviledged user
+  #    # as it is not recommended to expose Unicorn to public clients.
+  #    worker.user('www-data', 'www-data') if Process.euid == 0
+  #  end
+  def after_fork(*args, &block)
+    set_hook(:after_fork, block_given? ? block : args[0])
+  end
 
-    # sets the before_exec hook to a given Proc object.  This
-    # Proc object will be called by the master process right
-    # before exec()-ing the new unicorn binary.  This is useful
-    # for freeing certain OS resources that you do NOT wish to
-    # share with the reexeced child process.
-    # There is no corresponding after_exec hook (for obvious reasons).
-    def before_exec(*args, &block)
-      set_hook(:before_exec, block_given? ? block : args[0], 1)
-    end
+  # sets before_fork got be a given Proc object.  This Proc
+  # object will be called by the master process before forking
+  # each worker.
+  def before_fork(*args, &block)
+    set_hook(:before_fork, block_given? ? block : args[0])
+  end
 
-    # sets the timeout of worker processes to +seconds+.  Workers
-    # handling the request/app.call/response cycle taking longer than
-    # this time period will be forcibly killed (via SIGKILL).  This
-    # timeout is enforced by the master process itself and not subject
-    # to the scheduling limitations by the worker process.  Due the
-    # low-complexity, low-overhead implementation, timeouts of less
-    # than 3.0 seconds can be considered inaccurate and unsafe.
-    #
-    # For running Unicorn behind nginx, it is recommended to set
-    # "fail_timeout=0" for in your nginx configuration like this
-    # to have nginx always retry backends that may have had workers
-    # SIGKILL-ed due to timeouts.
-    #
-    #    # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details
-    #    # on nginx upstream configuration:
-    #    upstream unicorn_backend {
-    #      # for UNIX domain socket setups:
-    #      server unix:/path/to/unicorn.sock fail_timeout=0;
-    #
-    #      # for TCP setups
-    #      server 192.168.0.7:8080 fail_timeout=0;
-    #      server 192.168.0.8:8080 fail_timeout=0;
-    #      server 192.168.0.9:8080 fail_timeout=0;
-    #    }
-    def timeout(seconds)
-      Numeric === seconds or raise ArgumentError,
-                                  "not numeric: timeout=#{seconds.inspect}"
-      seconds >= 3 or raise ArgumentError,
-                                  "too low: timeout=#{seconds.inspect}"
-      set[:timeout] = seconds
-    end
+  # sets the before_exec hook to a given Proc object.  This
+  # Proc object will be called by the master process right
+  # before exec()-ing the new unicorn binary.  This is useful
+  # for freeing certain OS resources that you do NOT wish to
+  # share with the reexeced child process.
+  # There is no corresponding after_exec hook (for obvious reasons).
+  def before_exec(*args, &block)
+    set_hook(:before_exec, block_given? ? block : args[0], 1)
+  end
 
-    # sets the current number of worker_processes to +nr+.  Each worker
-    # process will serve exactly one client at a time.  You can
-    # increment or decrement this value at runtime by sending SIGTTIN
-    # or SIGTTOU respectively to the master process without reloading
-    # the rest of your Unicorn configuration.  See the SIGNALS document
-    # for more information.
-    def worker_processes(nr)
-      Integer === nr or raise ArgumentError,
-                             "not an integer: worker_processes=#{nr.inspect}"
-      nr >= 0 or raise ArgumentError,
-                             "not non-negative: worker_processes=#{nr.inspect}"
-      set[:worker_processes] = nr
-    end
+  # sets the timeout of worker processes to +seconds+.  Workers
+  # handling the request/app.call/response cycle taking longer than
+  # this time period will be forcibly killed (via SIGKILL).  This
+  # timeout is enforced by the master process itself and not subject
+  # to the scheduling limitations by the worker process.  Due the
+  # low-complexity, low-overhead implementation, timeouts of less
+  # than 3.0 seconds can be considered inaccurate and unsafe.
+  #
+  # For running Unicorn behind nginx, it is recommended to set
+  # "fail_timeout=0" for in your nginx configuration like this
+  # to have nginx always retry backends that may have had workers
+  # SIGKILL-ed due to timeouts.
+  #
+  #    # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details
+  #    # on nginx upstream configuration:
+  #    upstream unicorn_backend {
+  #      # for UNIX domain socket setups:
+  #      server unix:/path/to/unicorn.sock fail_timeout=0;
+  #
+  #      # for TCP setups
+  #      server 192.168.0.7:8080 fail_timeout=0;
+  #      server 192.168.0.8:8080 fail_timeout=0;
+  #      server 192.168.0.9:8080 fail_timeout=0;
+  #    }
+  def timeout(seconds)
+    Numeric === seconds or raise ArgumentError,
+                                "not numeric: timeout=#{seconds.inspect}"
+    seconds >= 3 or raise ArgumentError,
+                                "too low: timeout=#{seconds.inspect}"
+    set[:timeout] = seconds
+  end
 
-    # sets listeners to the given +addresses+, replacing or augmenting the
-    # current set.  This is for the global listener pool shared by all
-    # worker processes.  For per-worker listeners, see the after_fork example
-    # This is for internal API use only, do not use it in your Unicorn
-    # config file.  Use listen instead.
-    def listeners(addresses) # :nodoc:
-      Array === addresses or addresses = Array(addresses)
-      addresses.map! { |addr| expand_addr(addr) }
-      set[:listeners] = addresses
-    end
+  # sets the current number of worker_processes to +nr+.  Each worker
+  # process will serve exactly one client at a time.  You can
+  # increment or decrement this value at runtime by sending SIGTTIN
+  # or SIGTTOU respectively to the master process without reloading
+  # the rest of your Unicorn configuration.  See the SIGNALS document
+  # for more information.
+  def worker_processes(nr)
+    Integer === nr or raise ArgumentError,
+                           "not an integer: worker_processes=#{nr.inspect}"
+    nr >= 0 or raise ArgumentError,
+                           "not non-negative: worker_processes=#{nr.inspect}"
+    set[:worker_processes] = nr
+  end
+
+  # sets listeners to the given +addresses+, replacing or augmenting the
+  # current set.  This is for the global listener pool shared by all
+  # worker processes.  For per-worker listeners, see the after_fork example
+  # This is for internal API use only, do not use it in your Unicorn
+  # config file.  Use listen instead.
+  def listeners(addresses) # :nodoc:
+    Array === addresses or addresses = Array(addresses)
+    addresses.map! { |addr| expand_addr(addr) }
+    set[:listeners] = addresses
+  end
 
-    # adds an +address+ to the existing listener set.
-    #
-    # The following options may be specified (but are generally not needed):
-    #
-    # +:backlog+: this is the backlog of the listen() syscall.
-    #
-    # Some operating systems allow negative values here to specify the
-    # maximum allowable value.  In most cases, this number is only
-    # recommendation and there are other OS-specific tunables and
-    # variables that can affect this number.  See the listen(2)
-    # syscall documentation of your OS for the exact semantics of
-    # this.
-    #
-    # If you are running unicorn on multiple machines, lowering this number
-    # can help your load balancer detect when a machine is overloaded
-    # and give requests to a different machine.
-    #
-    # Default: 1024
-    #
-    # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets
-    #
-    # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
-    # can be set via the setsockopt(2) syscall.  Some kernels
-    # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
-    # there is no need (and it is sometimes detrimental) to specify them.
-    #
-    # See the socket API documentation of your operating system
-    # to determine the exact semantics of these settings and
-    # other operating system-specific knobs where they can be
-    # specified.
-    #
-    # Defaults: operating system defaults
-    #
-    # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets
-    #
-    # This has no effect on UNIX sockets.
-    #
-    # Default: operating system defaults (usually Nagle's algorithm enabled)
-    #
-    # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
-    #
-    # This will prevent partial TCP frames from being sent out.
-    # Enabling +tcp_nopush+ is generally not needed or recommended as
-    # controlling +tcp_nodelay+ already provides sufficient latency
-    # reduction whereas Unicorn does not know when the best times are
-    # for flushing corked sockets.
-    #
-    # This has no effect on UNIX sockets.
-    #
-    # +:tries+: times to retry binding a socket if it is already in use
-    #
-    # A negative number indicates we will retry indefinitely, this is
-    # useful for migrations and upgrades when individual workers
-    # are binding to different ports.
-    #
-    # Default: 5
-    #
-    # +:delay+: seconds to wait between successive +tries+
-    #
-    # Default: 0.5 seconds
-    #
-    # +:umask+: sets the file mode creation mask for UNIX sockets
-    #
-    # Typically UNIX domain sockets are created with more liberal
-    # file permissions than the rest of the application.  By default,
-    # we create UNIX domain sockets to be readable and writable by
-    # all local users to give them the same accessibility as
-    # locally-bound TCP listeners.
-    #
-    # This has no effect on TCP listeners.
-    #
-    # Default: 0 (world read/writable)
-    def listen(address, opt = {})
-      address = expand_addr(address)
-      if String === address
-        [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key|
-          value = opt[key] or next
-          Integer === value or
-            raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
-        end
-        [ :tcp_nodelay, :tcp_nopush ].each do |key|
-          (value = opt[key]).nil? and next
-          TrueClass === value || FalseClass === value or
-            raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
-        end
-        unless (value = opt[:delay]).nil?
-          Numeric === value or
-            raise ArgumentError, "not numeric: delay=#{value.inspect}"
-        end
-        set[:listener_opts][address].merge!(opt)
+  # adds an +address+ to the existing listener set.
+  #
+  # The following options may be specified (but are generally not needed):
+  #
+  # +:backlog+: this is the backlog of the listen() syscall.
+  #
+  # Some operating systems allow negative values here to specify the
+  # maximum allowable value.  In most cases, this number is only
+  # recommendation and there are other OS-specific tunables and
+  # variables that can affect this number.  See the listen(2)
+  # syscall documentation of your OS for the exact semantics of
+  # this.
+  #
+  # If you are running unicorn on multiple machines, lowering this number
+  # can help your load balancer detect when a machine is overloaded
+  # and give requests to a different machine.
+  #
+  # Default: 1024
+  #
+  # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets
+  #
+  # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
+  # can be set via the setsockopt(2) syscall.  Some kernels
+  # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
+  # there is no need (and it is sometimes detrimental) to specify them.
+  #
+  # See the socket API documentation of your operating system
+  # to determine the exact semantics of these settings and
+  # other operating system-specific knobs where they can be
+  # specified.
+  #
+  # Defaults: operating system defaults
+  #
+  # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets
+  #
+  # This has no effect on UNIX sockets.
+  #
+  # Default: operating system defaults (usually Nagle's algorithm enabled)
+  #
+  # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
+  #
+  # This will prevent partial TCP frames from being sent out.
+  # Enabling +tcp_nopush+ is generally not needed or recommended as
+  # controlling +tcp_nodelay+ already provides sufficient latency
+  # reduction whereas Unicorn does not know when the best times are
+  # for flushing corked sockets.
+  #
+  # This has no effect on UNIX sockets.
+  #
+  # +:tries+: times to retry binding a socket if it is already in use
+  #
+  # A negative number indicates we will retry indefinitely, this is
+  # useful for migrations and upgrades when individual workers
+  # are binding to different ports.
+  #
+  # Default: 5
+  #
+  # +:delay+: seconds to wait between successive +tries+
+  #
+  # Default: 0.5 seconds
+  #
+  # +:umask+: sets the file mode creation mask for UNIX sockets
+  #
+  # Typically UNIX domain sockets are created with more liberal
+  # file permissions than the rest of the application.  By default,
+  # we create UNIX domain sockets to be readable and writable by
+  # all local users to give them the same accessibility as
+  # locally-bound TCP listeners.
+  #
+  # This has no effect on TCP listeners.
+  #
+  # Default: 0 (world read/writable)
+  #
+  # +:tcp_defer_accept:+ defer accept() until data is ready (Linux-only)
+  #
+  # For Linux 2.6.32 and later, this is the number of retransmits to
+  # defer an accept() for if no data arrives, but the client will
+  # eventually be accepted after the specified number of retransmits
+  # regardless of whether data is ready.
+  #
+  # For Linux before 2.6.32, this is a boolean option, and
+  # accepts are _always_ deferred indefinitely if no data arrives.
+  # This is similar to <code>:accept_filter => "dataready"</code>
+  # under FreeBSD.
+  #
+  # Specifying +true+ is synonymous for the default value(s) below,
+  # and +false+ or +nil+ is synonymous for a value of zero.
+  #
+  # A value of +1+ is a good optimization for local networks
+  # and trusted clients.  For Rainbows! and Zbatery users, a higher
+  # value (e.g. +60+) provides more protection against some
+  # denial-of-service attacks.  There is no good reason to ever
+  # disable this with a +zero+ value when serving HTTP.
+  #
+  # Default: 1 retransmit for \Unicorn, 60 for Rainbows! 0.95.0\+
+  #
+  # +:accept_filter: defer accept() until data is ready (FreeBSD-only)
+  #
+  # This enables either the "dataready" or (default) "httpready"
+  # accept() filter under FreeBSD.  This is intended as an
+  # optimization to reduce context switches with common GET/HEAD
+  # requests.  For Rainbows! and Zbatery users, this provides
+  # some protection against certain denial-of-service attacks, too.
+  #
+  # There is no good reason to change from the default.
+  #
+  # Default: "httpready"
+  def listen(address, opt = {})
+    address = expand_addr(address)
+    if String === address
+      [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key|
+        value = opt[key] or next
+        Integer === value or
+          raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
+      end
+      [ :tcp_nodelay, :tcp_nopush ].each do |key|
+        (value = opt[key]).nil? and next
+        TrueClass === value || FalseClass === value or
+          raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
+      end
+      unless (value = opt[:delay]).nil?
+        Numeric === value or
+          raise ArgumentError, "not numeric: delay=#{value.inspect}"
       end
+      set[:listener_opts][address].merge!(opt)
+    end
+
+    set[:listeners] << address
+  end
 
-      set[:listeners] << address
+  # sets the +path+ for the PID file of the unicorn master process
+  def pid(path); set_path(:pid, path); end
+
+  # Enabling this preloads an application before forking worker
+  # processes.  This allows memory savings when using a
+  # copy-on-write-friendly GC but can cause bad things to happen when
+  # resources like sockets are opened at load time by the master
+  # process and shared by multiple children.  People enabling this are
+  # highly encouraged to look at the before_fork/after_fork hooks to
+  # properly close/reopen sockets.  Files opened for logging do not
+  # have to be reopened as (unbuffered-in-userspace) files opened with
+  # the File::APPEND flag are written to atomically on UNIX.
+  #
+  # In addition to reloading the unicorn-specific config settings,
+  # SIGHUP will reload application code in the working
+  # directory/symlink when workers are gracefully restarted when
+  # preload_app=false (the default).  As reloading the application
+  # sometimes requires RubyGems updates, +Gem.refresh+ is always
+  # called before the application is loaded (for RubyGems users).
+  #
+  # During deployments, care should _always_ be taken to ensure your
+  # applications are properly deployed and running.  Using
+  # preload_app=false (the default) means you _must_ check if
+  # your application is responding properly after a deployment.
+  # Improperly deployed applications can go into a spawn loop
+  # if the application fails to load.  While your children are
+  # in a spawn loop, it is is possible to fix an application
+  # by properly deploying all required code and dependencies.
+  # Using preload_app=true means any application load error will
+  # cause the master process to exit with an error.
+
+  def preload_app(bool)
+    case bool
+    when TrueClass, FalseClass
+      set[:preload_app] = bool
+    else
+      raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
     end
+  end
 
-    # sets the +path+ for the PID file of the unicorn master process
-    def pid(path); set_path(:pid, path); end
-
-    # Enabling this preloads an application before forking worker
-    # processes.  This allows memory savings when using a
-    # copy-on-write-friendly GC but can cause bad things to happen when
-    # resources like sockets are opened at load time by the master
-    # process and shared by multiple children.  People enabling this are
-    # highly encouraged to look at the before_fork/after_fork hooks to
-    # properly close/reopen sockets.  Files opened for logging do not
-    # have to be reopened as (unbuffered-in-userspace) files opened with
-    # the File::APPEND flag are written to atomically on UNIX.
-    #
-    # In addition to reloading the unicorn-specific config settings,
-    # SIGHUP will reload application code in the working
-    # directory/symlink when workers are gracefully restarted when
-    # preload_app=false (the default).  As reloading the application
-    # sometimes requires RubyGems updates, +Gem.refresh+ is always
-    # called before the application is loaded (for RubyGems users).
-    #
-    # During deployments, care should _always_ be taken to ensure your
-    # applications are properly deployed and running.  Using
-    # preload_app=false (the default) means you _must_ check if
-    # your application is responding properly after a deployment.
-    # Improperly deployed applications can go into a spawn loop
-    # if the application fails to load.  While your children are
-    # in a spawn loop, it is is possible to fix an application
-    # by properly deploying all required code and dependencies.
-    # Using preload_app=true means any application load error will
-    # cause the master process to exit with an error.
-
-    def preload_app(bool)
-      case bool
-      when TrueClass, FalseClass
-        set[:preload_app] = bool
-      else
-        raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
-      end
+  # Allow redirecting $stderr to a given path.  Unlike doing this from
+  # the shell, this allows the unicorn process to know the path its
+  # writing to and rotate the file if it is used for logging.  The
+  # file will be opened with the File::APPEND flag and writes
+  # synchronized to the kernel (but not necessarily to _disk_) so
+  # multiple processes can safely append to it.
+  #
+  # If you are daemonizing and using the default +logger+, it is important
+  # to specify this as errors will otherwise be lost to /dev/null.
+  # Some applications/libraries may also triggering warnings that go to
+  # stderr, and they will end up here.
+  def stderr_path(path)
+    set_path(:stderr_path, path)
+  end
+
+  # Same as stderr_path, except for $stdout.  Not many Rack applications
+  # write to $stdout, but any that do will have their output written here.
+  # It is safe to point this to the same location a stderr_path.
+  # Like stderr_path, this defaults to /dev/null when daemonized.
+  def stdout_path(path)
+    set_path(:stdout_path, path)
+  end
+
+  # sets the working directory for Unicorn.  This ensures SIGUSR2 will
+  # start a new instance of Unicorn in this directory.  This may be
+  # a symlink, a common scenario for Capistrano users.
+  def working_directory(path)
+    # just let chdir raise errors
+    path = File.expand_path(path)
+    if config_file &&
+       config_file[0] != ?/ &&
+       ! File.readable?("#{path}/#{config_file}")
+      raise ArgumentError,
+            "config_file=#{config_file} would not be accessible in" \
+            " working_directory=#{path}"
     end
+    Dir.chdir(path)
+    Unicorn::HttpServer::START_CTX[:cwd] = ENV["PWD"] = path
+  end
+
+  # Runs worker processes as the specified +user+ and +group+.
+  # The master process always stays running as the user who started it.
+  # This switch will occur after calling the after_fork hook, and only
+  # if the Worker#user method is not called in the after_fork hook
+  def user(user, group = nil)
+    # raises ArgumentError on invalid user/group
+    Etc.getpwnam(user)
+    Etc.getgrnam(group) if group
+    set[:user] = [ user, group ]
+  end
 
-    # Allow redirecting $stderr to a given path.  Unlike doing this from
-    # the shell, this allows the unicorn process to know the path its
-    # writing to and rotate the file if it is used for logging.  The
-    # file will be opened with the File::APPEND flag and writes
-    # synchronized to the kernel (but not necessarily to _disk_) so
-    # multiple processes can safely append to it.
-    #
-    # If you are daemonizing and using the default +logger+, it is important
-    # to specify this as errors will otherwise be lost to /dev/null.
-    # Some applications/libraries may also triggering warnings that go to
-    # stderr, and they will end up here.
-    def stderr_path(path)
-      set_path(:stderr_path, path)
+  # expands "unix:path/to/foo" to a socket relative to the current path
+  # expands pathnames of sockets if relative to "~" or "~username"
+  # expands "*:port and ":port" to "0.0.0.0:port"
+  def expand_addr(address) #:nodoc
+    return "0.0.0.0:#{address}" if Integer === address
+    return address unless String === address
+
+    case address
+    when %r{\Aunix:(.*)\z}
+      File.expand_path($1)
+    when %r{\A~}
+      File.expand_path(address)
+    when %r{\A(?:\*:)?(\d+)\z}
+      "0.0.0.0:#$1"
+    when %r{\A(.*):(\d+)\z}
+      # canonicalize the name
+      packed = Socket.pack_sockaddr_in($2.to_i, $1)
+      Socket.unpack_sockaddr_in(packed).reverse!.join(':')
+    else
+      address
     end
+  end
+
+private
 
-    # Same as stderr_path, except for $stdout.  Not many Rack applications
-    # write to $stdout, but any that do will have their output written here.
-    # It is safe to point this to the same location a stderr_path.
-    # Like stderr_path, this defaults to /dev/null when daemonized.
-    def stdout_path(path)
-      set_path(:stdout_path, path)
+  def set_path(var, path) #:nodoc:
+    case path
+    when NilClass, String
+      set[var] = path
+    else
+      raise ArgumentError
     end
+  end
 
-    # sets the working directory for Unicorn.  This ensures SIGUSR2 will
-    # start a new instance of Unicorn in this directory.  This may be
-    # a symlink, a common scenario for Capistrano users.
-    def working_directory(path)
-      # just let chdir raise errors
-      path = File.expand_path(path)
-      if config_file &&
-         config_file[0] != ?/ &&
-         ! File.readable?("#{path}/#{config_file}")
+  def set_hook(var, my_proc, req_arity = 2) #:nodoc:
+    case my_proc
+    when Proc
+      arity = my_proc.arity
+      (arity == req_arity) or \
         raise ArgumentError,
-              "config_file=#{config_file} would not be accessible in" \
-              " working_directory=#{path}"
-      end
-      Dir.chdir(path)
-      HttpServer::START_CTX[:cwd] = ENV["PWD"] = path
+              "#{var}=#{my_proc.inspect} has invalid arity: " \
+              "#{arity} (need #{req_arity})"
+    when NilClass
+      my_proc = DEFAULTS[var]
+    else
+      raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
     end
+    set[var] = my_proc
+  end
 
-    # Runs worker processes as the specified +user+ and +group+.
-    # The master process always stays running as the user who started it.
-    # This switch will occur after calling the after_fork hook, and only
-    # if the Worker#user method is not called in the after_fork hook
-    def user(user, group = nil)
-      # raises ArgumentError on invalid user/group
-      Etc.getpwnam(user)
-      Etc.getgrnam(group) if group
-      set[:user] = [ user, group ]
-    end
+  # this is called _after_ working_directory is bound.  This only
+  # parses the embedded switches in .ru files
+  # (for "rackup" compatibility)
+  def parse_rackup_file # :nodoc:
+    ru = RACKUP[:file] or return # we only return here in unit tests
 
-    # expands "unix:path/to/foo" to a socket relative to the current path
-    # expands pathnames of sockets if relative to "~" or "~username"
-    # expands "*:port and ":port" to "0.0.0.0:port"
-    def expand_addr(address) #:nodoc
-      return "0.0.0.0:#{address}" if Integer === address
-      return address unless String === address
-
-      case address
-      when %r{\Aunix:(.*)\z}
-        File.expand_path($1)
-      when %r{\A~}
-        File.expand_path(address)
-      when %r{\A(?:\*:)?(\d+)\z}
-        "0.0.0.0:#$1"
-      when %r{\A(.*):(\d+)\z}
-        # canonicalize the name
-        packed = Socket.pack_sockaddr_in($2.to_i, $1)
-        Socket.unpack_sockaddr_in(packed).reverse!.join(':')
-      else
-        address
-      end
+    # :rails means use (old) Rails autodetect
+    if ru == :rails
+      File.readable?('config.ru') or return
+      ru = 'config.ru'
     end
 
-  private
+    File.readable?(ru) or
+      raise ArgumentError, "rackup file (#{ru}) not readable"
 
-    def set_path(var, path) #:nodoc:
-      case path
-      when NilClass, String
-        set[var] = path
-      else
-        raise ArgumentError
-      end
-    end
+    # it could be a .rb file, too, we don't parse those manually
+    ru =~ /\.ru\z/ or return
 
-    def set_hook(var, my_proc, req_arity = 2) #:nodoc:
-      case my_proc
-      when Proc
-        arity = my_proc.arity
-        (arity == req_arity) or \
-          raise ArgumentError,
-                "#{var}=#{my_proc.inspect} has invalid arity: " \
-                "#{arity} (need #{req_arity})"
-      when NilClass
-        my_proc = DEFAULTS[var]
-      else
-        raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
-      end
-      set[var] = my_proc
-    end
+    /^#\\(.*)/ =~ File.read(ru) or return
+    RACKUP[:optparse].parse!($1.split(/\s+/))
 
-    # this is called _after_ working_directory is bound.  This only
-    # parses the embedded switches in .ru files
-    # (for "rackup" compatibility)
-    def parse_rackup_file # :nodoc:
-      ru = RACKUP[:file] or return # we only return here in unit tests
+    # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery)
+    host, port, set_listener, options, daemonize =
+                    eval("[ host, port, set_listener, options, daemonize ]",
+                         TOPLEVEL_BINDING)
 
-      # :rails means use (old) Rails autodetect
-      if ru == :rails
-        File.readable?('config.ru') or return
-        ru = 'config.ru'
-      end
+    # XXX duplicate code from bin/unicorn{,_rails}
+    set[:listeners] << "#{host}:#{port}" if set_listener
 
-      File.readable?(ru) or
-        raise ArgumentError, "rackup file (#{ru}) not readable"
-
-      # it could be a .rb file, too, we don't parse those manually
-      ru =~ /\.ru\z/ or return
-
-      /^#\\(.*)/ =~ File.read(ru) or return
-      RACKUP[:optparse].parse!($1.split(/\s+/))
-
-      # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery)
-      host, port, set_listener, options, daemonize =
-                      eval("[ host, port, set_listener, options, daemonize ]",
-                           TOPLEVEL_BINDING)
-
-      # XXX duplicate code from bin/unicorn{,_rails}
-      set[:listeners] << "#{host}:#{port}" if set_listener
-
-      if daemonize
-        # unicorn_rails wants a default pid path, (not plain 'unicorn')
-        if after_reload
-          spid = set[:pid]
-          pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset
-        end
-        unless RACKUP[:daemonized]
-          Unicorn::Launcher.daemonize!(options)
-          RACKUP[:ready_pipe] = options.delete(:ready_pipe)
-        end
+    if daemonize
+      # unicorn_rails wants a default pid path, (not plain 'unicorn')
+      if after_reload
+        spid = set[:pid]
+        pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset
+      end
+      unless RACKUP[:daemonized]
+        Unicorn::Launcher.daemonize!(options)
+        RACKUP[:ready_pipe] = options.delete(:ready_pipe)
       end
     end
-
   end
 end
diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb
index 51a8a3b..52fe201 100644
--- a/lib/unicorn/const.rb
+++ b/lib/unicorn/const.rb
@@ -8,8 +8,8 @@ module Unicorn
   # Symbols did not really improve things much compared to constants.
   module Const
 
-    # The current version of Unicorn, currently 1.0.1
-    UNICORN_VERSION="1.0.1"
+    # The current version of Unicorn, currently 1.1.1
+    UNICORN_VERSION="1.1.1"
 
     DEFAULT_HOST = "0.0.0.0" # default TCP listen host address
     DEFAULT_PORT = 8080      # default TCP listen port
diff --git a/lib/unicorn/http_request.rb b/lib/unicorn/http_request.rb
index 65b09fa..65870ed 100644
--- a/lib/unicorn/http_request.rb
+++ b/lib/unicorn/http_request.rb
@@ -1,6 +1,5 @@
 # -*- encoding: binary -*-
 
-require 'stringio'
 require 'unicorn_http'
 
 module Unicorn
@@ -53,7 +52,7 @@ module Unicorn
       #  that client may be a proxy, gateway, or other intermediary
       #  acting on behalf of the actual source client."
       REQ[Const::REMOTE_ADDR] =
-                    TCPSocket === socket ? socket.peeraddr.last : LOCALHOST
+                    TCPSocket === socket ? socket.peeraddr[-1] : LOCALHOST
 
       # short circuit the common case with small GET requests first
       if PARSER.headers(REQ, socket.readpartial(Const::CHUNK_SIZE, BUF)).nil?
diff --git a/lib/unicorn/http_response.rb b/lib/unicorn/http_response.rb
index 96e484b..6f1cd48 100644
--- a/lib/unicorn/http_response.rb
+++ b/lib/unicorn/http_response.rb
@@ -1,75 +1,70 @@
 # -*- encoding: binary -*-
-
 require 'time'
 
-module Unicorn
-  # Writes a Rack response to your client using the HTTP/1.1 specification.
-  # You use it by simply doing:
-  #
-  #   status, headers, body = rack_app.call(env)
-  #   HttpResponse.write(socket, [ status, headers, body ])
-  #
-  # Most header correctness (including Content-Length and Content-Type)
-  # is the job of Rack, with the exception of the "Connection: close"
-  # and "Date" headers.
-  #
-  # A design decision was made to force the client to not pipeline or
-  # keepalive requests.  HTTP/1.1 pipelining really kills the
-  # performance due to how it has to be handled and how unclear the
-  # standard is.  To fix this the HttpResponse always gives a
-  # "Connection: close" header which forces the client to close right
-  # away.  The bonus for this is that it gives a pretty nice speed boost
-  # to most clients since they can close their connection immediately.
-
-  class HttpResponse
+# Writes a Rack response to your client using the HTTP/1.1 specification.
+# You use it by simply doing:
+#
+#   status, headers, body = rack_app.call(env)
+#   HttpResponse.write(socket, [ status, headers, body ])
+#
+# Most header correctness (including Content-Length and Content-Type)
+# is the job of Rack, with the exception of the "Connection: close"
+# and "Date" headers.
+#
+# A design decision was made to force the client to not pipeline or
+# keepalive requests.  HTTP/1.1 pipelining really kills the
+# performance due to how it has to be handled and how unclear the
+# standard is.  To fix this the HttpResponse always gives a
+# "Connection: close" header which forces the client to close right
+# away.  The bonus for this is that it gives a pretty nice speed boost
+# to most clients since they can close their connection immediately.
+module Unicorn::HttpResponse
 
-    # Every standard HTTP code mapped to the appropriate message.
-    CODES = Rack::Utils::HTTP_STATUS_CODES.inject({}) { |hash,(code,msg)|
-      hash[code] = "#{code} #{msg}"
-      hash
-    }
+  # Every standard HTTP code mapped to the appropriate message.
+  CODES = Rack::Utils::HTTP_STATUS_CODES.inject({}) { |hash,(code,msg)|
+    hash[code] = "#{code} #{msg}"
+    hash
+  }
 
-    # Rack does not set/require a Date: header.  We always override the
-    # Connection: and Date: headers no matter what (if anything) our
-    # Rack application sent us.
-    SKIP = { 'connection' => true, 'date' => true, 'status' => true }
+  # Rack does not set/require a Date: header.  We always override the
+  # Connection: and Date: headers no matter what (if anything) our
+  # Rack application sent us.
+  SKIP = { 'connection' => true, 'date' => true, 'status' => true }
 
-    # writes the rack_response to socket as an HTTP response
-    def self.write(socket, rack_response, have_header = true)
-      status, headers, body = rack_response
+  # writes the rack_response to socket as an HTTP response
+  def self.write(socket, rack_response, have_header = true)
+    status, headers, body = rack_response
 
-      if have_header
-        status = CODES[status.to_i] || status
-        out = []
+    if have_header
+      status = CODES[status.to_i] || status
+      out = []
 
-        # Don't bother enforcing duplicate supression, it's a Hash most of
-        # the time anyways so just hope our app knows what it's doing
-        headers.each do |key, value|
-          next if SKIP.include?(key.downcase)
-          if value =~ /\n/
-            # avoiding blank, key-only cookies with /\n+/
-            out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" })
-          else
-            out << "#{key}: #{value}\r\n"
-          end
+      # Don't bother enforcing duplicate supression, it's a Hash most of
+      # the time anyways so just hope our app knows what it's doing
+      headers.each do |key, value|
+        next if SKIP.include?(key.downcase)
+        if value =~ /\n/
+          # avoiding blank, key-only cookies with /\n+/
+          out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" })
+        else
+          out << "#{key}: #{value}\r\n"
         end
-
-        # Rack should enforce Content-Length or chunked transfer encoding,
-        # so don't worry or care about them.
-        # Date is required by HTTP/1.1 as long as our clock can be trusted.
-        # Some broken clients require a "Status" header so we accomodate them
-        socket.write("HTTP/1.1 #{status}\r\n" \
-                     "Date: #{Time.now.httpdate}\r\n" \
-                     "Status: #{status}\r\n" \
-                     "Connection: close\r\n" \
-                     "#{out.join('')}\r\n")
       end
 
-      body.each { |chunk| socket.write(chunk) }
-      socket.close # flushes and uncorks the socket immediately
-      ensure
-        body.respond_to?(:close) and body.close
+      # Rack should enforce Content-Length or chunked transfer encoding,
+      # so don't worry or care about them.
+      # Date is required by HTTP/1.1 as long as our clock can be trusted.
+      # Some broken clients require a "Status" header so we accomodate them
+      socket.write("HTTP/1.1 #{status}\r\n" \
+                   "Date: #{Time.now.httpdate}\r\n" \
+                   "Status: #{status}\r\n" \
+                   "Connection: close\r\n" \
+                   "#{out.join('')}\r\n")
     end
 
+    body.each { |chunk| socket.write(chunk) }
+    socket.close # flushes and uncorks the socket immediately
+    ensure
+      body.respond_to?(:close) and body.close
   end
 end
diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb
index 9a4266d..9a155e1 100644
--- a/lib/unicorn/socket_helper.rb
+++ b/lib/unicorn/socket_helper.rb
@@ -1,11 +1,28 @@
 # -*- encoding: binary -*-
-
+# :enddoc:
 require 'socket'
 
 module Unicorn
   module SocketHelper
     include Socket::Constants
 
+    # :stopdoc:
+    # internal interface, only used by Rainbows!/Zbatery
+    DEFAULTS = {
+      # The semantics for TCP_DEFER_ACCEPT changed in Linux 2.6.32+
+      # with commit d1b99ba41d6c5aa1ed2fc634323449dd656899e9
+      # This change shouldn't affect Unicorn users behind nginx (a
+      # value of 1 remains an optimization), but Rainbows! users may
+      # want to use a higher value on Linux 2.6.32+ to protect against
+      # denial-of-service attacks
+      :tcp_defer_accept => 1,
+
+      # FreeBSD, we need to override this to 'dataready' when we
+      # eventually get HTTPS support
+      :accept_filter => 'httpready',
+    }
+    #:startdoc:
+
     # configure platform-specific options (only tested on Linux 2.6 so far)
     case RUBY_PLATFORM
     when /linux/
@@ -14,22 +31,13 @@ module Unicorn
 
       # do not send out partial frames (Linux)
       TCP_CORK = 3 unless defined?(TCP_CORK)
-    when /freebsd(([1-4]\..{1,2})|5\.[0-4])/
-      # Do nothing for httpready, just closing a bug when freebsd <= 5.4
-      TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH) # :nodoc:
     when /freebsd/
       # do not send out partial frames (FreeBSD)
       TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH)
 
-      # Use the HTTP accept filter if available.
-      # The struct made by pack() is defined in /usr/include/sys/socket.h
-      # as accept_filter_arg
-      unless `/sbin/sysctl -nq net.inet.accf.http`.empty?
-        # set set the "httpready" accept filter in FreeBSD if available
-        # if other protocols are to be supported, this may be
-        # String#replace-d with "dataready" arguments instead
-        FILTER_ARG = ['httpready', nil].pack('a16a240')
-      end
+      def accf_arg(af_name)
+        [ af_name, nil ].pack('a16a240')
+      end if defined?(SO_ACCEPTFILTER)
     end
 
     def set_tcp_sockopt(sock, opt)
@@ -49,10 +57,25 @@ module Unicorn
       end
 
       # No good reason to ever have deferred accepts off
+      # (except maybe benchmarking)
       if defined?(TCP_DEFER_ACCEPT)
-        sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, 1)
-      elsif defined?(SO_ACCEPTFILTER) && defined?(FILTER_ARG)
-        sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, FILTER_ARG)
+        # this differs from nginx, since nginx doesn't allow us to
+        # configure the the timeout...
+        tmp = DEFAULTS.merge(opt)
+        seconds = tmp[:tcp_defer_accept]
+        seconds = DEFAULTS[:tcp_defer_accept] if seconds == true
+        seconds = 0 unless seconds # nil/false means disable this
+        sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds)
+      elsif respond_to?(:accf_arg)
+        tmp = DEFAULTS.merge(opt)
+        if name = tmp[:accept_filter]
+          begin
+            sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name))
+          rescue => e
+            logger.error("#{sock_name(sock)} " \
+                         "failed to set accept_filter=#{name} (#{e.inspect})")
+          end
+        end
       end
     end
 
@@ -69,14 +92,11 @@ module Unicorn
       end
       sock.listen(opt[:backlog] || 1024)
       rescue => e
-        if respond_to?(:logger)
-          logger.error "error setting socket options: #{e.inspect}"
-          logger.error e.backtrace.join("\n")
-        end
+        logger.error "error setting socket options: #{e.inspect}"
+        logger.error e.backtrace.join("\n")
     end
 
     def log_buffer_sizes(sock, pfx = '')
-      respond_to?(:logger) or return
       rcvbuf = sock.getsockopt(SOL_SOCKET, SO_RCVBUF).unpack('i')
       sndbuf = sock.getsockopt(SOL_SOCKET, SO_SNDBUF).unpack('i')
       logger.info "#{pfx}#{sock_name(sock)} rcvbuf=#{rcvbuf} sndbuf=#{sndbuf}"
@@ -91,9 +111,7 @@ module Unicorn
       sock = if address[0] == ?/
         if File.exist?(address)
           if File.socket?(address)
-            if self.respond_to?(:logger)
-              logger.info "unlinking existing socket=#{address}"
-            end
+            logger.info "unlinking existing socket=#{address}"
             File.unlink(address)
           else
             raise ArgumentError,
diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb
index 563747c..540cfe0 100644
--- a/lib/unicorn/tee_input.rb
+++ b/lib/unicorn/tee_input.rb
@@ -1,224 +1,232 @@
 # -*- encoding: binary -*-
 
-module Unicorn
-
-  # acts like tee(1) on an input input to provide a input-like stream
-  # while providing rewindable semantics through a File/StringIO backing
-  # store.  On the first pass, the input is only read on demand so your
-  # Rack application can use input notification (upload progress and
-  # like).  This should fully conform to the Rack::Lint::InputWrapper
-  # specification on the public API.  This class is intended to be a
-  # strict interpretation of Rack::Lint::InputWrapper functionality and
-  # will not support any deviations from it.
-  #
-  # When processing uploads, Unicorn exposes a TeeInput object under
-  # "rack.input" of the Rack environment.
-  class TeeInput < Struct.new(:socket, :req, :parser, :buf, :len, :tmp, :buf2)
-
-    # Initializes a new TeeInput object.  You normally do not have to call
-    # this unless you are writing an HTTP server.
-    def initialize(*args)
-      super(*args)
-      self.len = parser.content_length
-      self.tmp = len && len < Const::MAX_BODY ? StringIO.new("") : Util.tmpio
-      self.buf2 = ""
-      if buf.size > 0
-        parser.filter_body(buf2, buf) and finalize_input
-        tmp.write(buf2)
-        tmp.seek(0)
-      end
+# acts like tee(1) on an input input to provide a input-like stream
+# while providing rewindable semantics through a File/StringIO backing
+# store.  On the first pass, the input is only read on demand so your
+# Rack application can use input notification (upload progress and
+# like).  This should fully conform to the Rack::Lint::InputWrapper
+# specification on the public API.  This class is intended to be a
+# strict interpretation of Rack::Lint::InputWrapper functionality and
+# will not support any deviations from it.
+#
+# When processing uploads, Unicorn exposes a TeeInput object under
+# "rack.input" of the Rack environment.
+class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
+                                     :buf, :len, :tmp, :buf2)
+
+  # The maximum size (in +bytes+) to buffer in memory before
+  # resorting to a temporary file.  Default is 112 kilobytes.
+  @@client_body_buffer_size = Unicorn::Const::MAX_BODY
+
+  # The I/O chunk size (in +bytes+) for I/O operations where
+  # the size cannot be user-specified when a method is called.
+  # The default is 16 kilobytes.
+  @@io_chunk_size = Unicorn::Const::CHUNK_SIZE
+
+  # Initializes a new TeeInput object.  You normally do not have to call
+  # this unless you are writing an HTTP server.
+  def initialize(*args)
+    super(*args)
+    self.len = parser.content_length
+    self.tmp = len && len < @@client_body_buffer_size ?
+               StringIO.new("") : Unicorn::Util.tmpio
+    self.buf2 = ""
+    if buf.size > 0
+      parser.filter_body(buf2, buf) and finalize_input
+      tmp.write(buf2)
+      tmp.rewind
     end
+  end
 
-    # :call-seq:
-    #   ios.size  => Integer
-    #
-    # Returns the size of the input.  For requests with a Content-Length
-    # header value, this will not read data off the socket and just return
-    # the value of the Content-Length header as an Integer.
-    #
-    # For Transfer-Encoding:chunked requests, this requires consuming
-    # all of the input stream before returning since there's no other
-    # way to determine the size of the request body beforehand.
-    #
-    # This method is no longer part of the Rack specification as of
-    # Rack 1.2, so its use is not recommended.  This method only exists
-    # for compatibility with Rack applications designed for Rack 1.1 and
-    # earlier.  Most applications should only need to call +read+ with a
-    # specified +length+ in a loop until it returns +nil+.
-    def size
-      len and return len
-
-      if socket
-        pos = tmp.pos
-        while tee(Const::CHUNK_SIZE, buf2)
-        end
-        tmp.seek(pos)
+  # :call-seq:
+  #   ios.size  => Integer
+  #
+  # Returns the size of the input.  For requests with a Content-Length
+  # header value, this will not read data off the socket and just return
+  # the value of the Content-Length header as an Integer.
+  #
+  # For Transfer-Encoding:chunked requests, this requires consuming
+  # all of the input stream before returning since there's no other
+  # way to determine the size of the request body beforehand.
+  #
+  # This method is no longer part of the Rack specification as of
+  # Rack 1.2, so its use is not recommended.  This method only exists
+  # for compatibility with Rack applications designed for Rack 1.1 and
+  # earlier.  Most applications should only need to call +read+ with a
+  # specified +length+ in a loop until it returns +nil+.
+  def size
+    len and return len
+
+    if socket
+      pos = tmp.pos
+      while tee(@@io_chunk_size, buf2)
       end
-
-      self.len = tmp.size
+      tmp.seek(pos)
     end
 
-    # :call-seq:
-    #   ios.read([length [, buffer ]]) => string, buffer, or nil
-    #
-    # Reads at most length bytes from the I/O stream, or to the end of
-    # file if length is omitted or is nil. length must be a non-negative
-    # integer or nil. If the optional buffer argument is present, it
-    # must reference a String, which will receive the data.
-    #
-    # At end of file, it returns nil or "" depend on length.
-    # ios.read() and ios.read(nil) returns "".
-    # ios.read(length [, buffer]) returns nil.
-    #
-    # If the Content-Length of the HTTP request is known (as is the common
-    # case for POST requests), then ios.read(length [, buffer]) will block
-    # until the specified length is read (or it is the last chunk).
-    # Otherwise, for uncommon "Transfer-Encoding: chunked" requests,
-    # ios.read(length [, buffer]) will return immediately if there is
-    # any data and only block when nothing is available (providing
-    # IO#readpartial semantics).
-    def read(*args)
-      socket or return tmp.read(*args)
-
-      length = args.shift
-      if nil == length
-        rv = tmp.read || ""
-        while tee(Const::CHUNK_SIZE, buf2)
-          rv << buf2
-        end
-        rv
+    self.len = tmp.size
+  end
+
+  # :call-seq:
+  #   ios.read([length [, buffer ]]) => string, buffer, or nil
+  #
+  # Reads at most length bytes from the I/O stream, or to the end of
+  # file if length is omitted or is nil. length must be a non-negative
+  # integer or nil. If the optional buffer argument is present, it
+  # must reference a String, which will receive the data.
+  #
+  # At end of file, it returns nil or "" depend on length.
+  # ios.read() and ios.read(nil) returns "".
+  # ios.read(length [, buffer]) returns nil.
+  #
+  # If the Content-Length of the HTTP request is known (as is the common
+  # case for POST requests), then ios.read(length [, buffer]) will block
+  # until the specified length is read (or it is the last chunk).
+  # Otherwise, for uncommon "Transfer-Encoding: chunked" requests,
+  # ios.read(length [, buffer]) will return immediately if there is
+  # any data and only block when nothing is available (providing
+  # IO#readpartial semantics).
+  def read(*args)
+    socket or return tmp.read(*args)
+
+    length = args.shift
+    if nil == length
+      rv = tmp.read || ""
+      while tee(@@io_chunk_size, buf2)
+        rv << buf2
+      end
+      rv
+    else
+      rv = args.shift || ""
+      diff = tmp.size - tmp.pos
+      if 0 == diff
+        ensure_length(tee(length, rv), length)
       else
-        rv = args.shift || ""
-        diff = tmp.size - tmp.pos
-        if 0 == diff
-          ensure_length(tee(length, rv), length)
-        else
-          ensure_length(tmp.read(diff > length ? length : diff, rv), length)
-        end
+        ensure_length(tmp.read(diff > length ? length : diff, rv), length)
       end
     end
+  end
 
-    # :call-seq:
-    #   ios.gets   => string or nil
-    #
-    # Reads the next ``line'' from the I/O stream; lines are separated
-    # by the global record separator ($/, typically "\n"). A global
-    # record separator of nil reads the entire unread contents of ios.
-    # Returns nil if called at the end of file.
-    # This takes zero arguments for strict Rack::Lint compatibility,
-    # unlike IO#gets.
-    def gets
-      socket or return tmp.gets
-      sep = $/ or return read
-
-      orig_size = tmp.size
-      if tmp.pos == orig_size
-        tee(Const::CHUNK_SIZE, buf2) or return nil
-        tmp.seek(orig_size)
-      end
+  # :call-seq:
+  #   ios.gets   => string or nil
+  #
+  # Reads the next ``line'' from the I/O stream; lines are separated
+  # by the global record separator ($/, typically "\n"). A global
+  # record separator of nil reads the entire unread contents of ios.
+  # Returns nil if called at the end of file.
+  # This takes zero arguments for strict Rack::Lint compatibility,
+  # unlike IO#gets.
+  def gets
+    socket or return tmp.gets
+    sep = $/ or return read
+
+    orig_size = tmp.size
+    if tmp.pos == orig_size
+      tee(@@io_chunk_size, buf2) or return nil
+      tmp.seek(orig_size)
+    end
 
-      sep_size = Rack::Utils.bytesize(sep)
-      line = tmp.gets # cannot be nil here since size > pos
-      sep == line[-sep_size, sep_size] and return line
+    sep_size = Rack::Utils.bytesize(sep)
+    line = tmp.gets # cannot be nil here since size > pos
+    sep == line[-sep_size, sep_size] and return line
 
-      # unlikely, if we got here, then tmp is at EOF
-      begin
-        orig_size = tmp.pos
-        tee(Const::CHUNK_SIZE, buf2) or break
-        tmp.seek(orig_size)
-        line << tmp.gets
-        sep == line[-sep_size, sep_size] and return line
-        # tmp is at EOF again here, retry the loop
-      end while true
-
-      line
-    end
+    # unlikely, if we got here, then tmp is at EOF
+    begin
+      orig_size = tmp.pos
+      tee(@@io_chunk_size, buf2) or break
+      tmp.seek(orig_size)
+      line << tmp.gets
+      sep == line[-sep_size, sep_size] and return line
+      # tmp is at EOF again here, retry the loop
+    end while true
 
-    # :call-seq:
-    #   ios.each { |line| block }  => ios
-    #
-    # Executes the block for every ``line'' in *ios*, where lines are
-    # separated by the global record separator ($/, typically "\n").
-    def each(&block)
-      while line = gets
-        yield line
-      end
+    line
+  end
 
-      self # Rack does not specify what the return value is here
+  # :call-seq:
+  #   ios.each { |line| block }  => ios
+  #
+  # Executes the block for every ``line'' in *ios*, where lines are
+  # separated by the global record separator ($/, typically "\n").
+  def each(&block)
+    while line = gets
+      yield line
     end
 
-    # :call-seq:
-    #   ios.rewind    => 0
-    #
-    # Positions the *ios* pointer to the beginning of input, returns
-    # the offset (zero) of the +ios+ pointer.  Subsequent reads will
-    # start from the beginning of the previously-buffered input.
-    def rewind
-      tmp.rewind # Rack does not specify what the return value is here
-    end
+    self # Rack does not specify what the return value is here
+  end
 
-  private
-
-    def client_error(e)
-      case e
-      when EOFError
-        # in case client only did a premature shutdown(SHUT_WR)
-        # we do support clients that shutdown(SHUT_WR) after the
-        # _entire_ request has been sent, and those will not have
-        # raised EOFError on us.
-        socket.close if socket
-        raise ClientShutdown, "bytes_read=#{tmp.size}", []
-      when HttpParserError
-        e.set_backtrace([])
-      end
-      raise e
-    end
+  # :call-seq:
+  #   ios.rewind    => 0
+  #
+  # Positions the *ios* pointer to the beginning of input, returns
+  # the offset (zero) of the +ios+ pointer.  Subsequent reads will
+  # start from the beginning of the previously-buffered input.
+  def rewind
+    tmp.rewind # Rack does not specify what the return value is here
+  end
 
-    # tees off a +length+ chunk of data from the input into the IO
-    # backing store as well as returning it.  +dst+ must be specified.
-    # returns nil if reading from the input returns nil
-    def tee(length, dst)
-      unless parser.body_eof?
-        if parser.filter_body(dst, socket.readpartial(length, buf)).nil?
-          tmp.write(dst)
-          tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug
-          return dst
-        end
-      end
-      finalize_input
-      rescue => e
-        client_error(e)
+private
+
+  def client_error(e)
+    case e
+    when EOFError
+      # in case client only did a premature shutdown(SHUT_WR)
+      # we do support clients that shutdown(SHUT_WR) after the
+      # _entire_ request has been sent, and those will not have
+      # raised EOFError on us.
+      socket.close if socket
+      raise Unicorn::ClientShutdown, "bytes_read=#{tmp.size}", []
+    when Unicorn::HttpParserError
+      e.set_backtrace([])
     end
+    raise e
+  end
 
-    def finalize_input
-      while parser.trailers(req, buf).nil?
-        # Don't worry about raising ClientShutdown here on EOFError, tee()
-        # will catch EOFError when app is processing it, otherwise in
-        # initialize we never get any chance to enter the app so the
-        # EOFError will just get trapped by Unicorn and not the Rack app
-        buf << socket.readpartial(Const::CHUNK_SIZE)
+  # tees off a +length+ chunk of data from the input into the IO
+  # backing store as well as returning it.  +dst+ must be specified.
+  # returns nil if reading from the input returns nil
+  def tee(length, dst)
+    unless parser.body_eof?
+      if parser.filter_body(dst, socket.readpartial(length, buf)).nil?
+        tmp.write(dst)
+        tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug
+        return dst
       end
-      self.socket = nil
     end
+    finalize_input
+    rescue => e
+      client_error(e)
+  end
 
-    # tee()s into +dst+ until it is of +length+ bytes (or until
-    # we've reached the Content-Length of the request body).
-    # Returns +dst+ (the exact object, not a duplicate)
-    # To continue supporting applications that need near-real-time
-    # streaming input bodies, this is a no-op for
-    # "Transfer-Encoding: chunked" requests.
-    def ensure_length(dst, length)
-      # len is nil for chunked bodies, so we can't ensure length for those
-      # since they could be streaming bidirectionally and we don't want to
-      # block the caller in that case.
-      return dst if dst.nil? || len.nil?
-
-      while dst.size < length && tee(length - dst.size, buf2)
-        dst << buf2
-      end
+  def finalize_input
+    while parser.trailers(req, buf).nil?
+      # Don't worry about raising ClientShutdown here on EOFError, tee()
+      # will catch EOFError when app is processing it, otherwise in
+      # initialize we never get any chance to enter the app so the
+      # EOFError will just get trapped by Unicorn and not the Rack app
+      buf << socket.readpartial(@@io_chunk_size)
+    end
+    self.socket = nil
+  end
 
-      dst
+  # tee()s into +dst+ until it is of +length+ bytes (or until
+  # we've reached the Content-Length of the request body).
+  # Returns +dst+ (the exact object, not a duplicate)
+  # To continue supporting applications that need near-real-time
+  # streaming input bodies, this is a no-op for
+  # "Transfer-Encoding: chunked" requests.
+  def ensure_length(dst, length)
+    # len is nil for chunked bodies, so we can't ensure length for those
+    # since they could be streaming bidirectionally and we don't want to
+    # block the caller in that case.
+    return dst if dst.nil? || len.nil?
+
+    while dst.size < length && tee(length - dst.size, buf2)
+      dst << buf2
     end
 
+    dst
   end
+
 end
diff --git a/test/test_helper.rb b/test/test_helper.rb
index f0da9c1..c4e56a2 100644
--- a/test/test_helper.rb
+++ b/test/test_helper.rb
@@ -32,7 +32,6 @@ require 'tempfile'
 require 'fileutils'
 require 'logger'
 require 'unicorn'
-require 'unicorn_http'
 
 if ENV['DEBUG']
   require 'ruby-debug'
diff --git a/test/unit/test_socket_helper.rb b/test/unit/test_socket_helper.rb
index 36b2dc2..bbce359 100644
--- a/test/unit/test_socket_helper.rb
+++ b/test/unit/test_socket_helper.rb
@@ -146,4 +146,28 @@ class TestSocketHelper < Test::Unit::TestCase
     sock_name(@unix_server)
   end
 
+  def test_tcp_defer_accept_default
+    port = unused_port @test_addr
+    name = "#@test_addr:#{port}"
+    sock = bind_listen(name)
+    cur = sock.getsockopt(Socket::SOL_TCP, TCP_DEFER_ACCEPT).unpack('i')[0]
+    assert cur >= 1
+  end if defined?(TCP_DEFER_ACCEPT)
+
+  def test_tcp_defer_accept_disable
+    port = unused_port @test_addr
+    name = "#@test_addr:#{port}"
+    sock = bind_listen(name, :tcp_defer_accept => false)
+    cur = sock.getsockopt(Socket::SOL_TCP, TCP_DEFER_ACCEPT).unpack('i')[0]
+    assert_equal 0, cur
+  end if defined?(TCP_DEFER_ACCEPT)
+
+  def test_tcp_defer_accept_nr
+    port = unused_port @test_addr
+    name = "#@test_addr:#{port}"
+    sock = bind_listen(name, :tcp_defer_accept => 60)
+    cur = sock.getsockopt(Socket::SOL_TCP, TCP_DEFER_ACCEPT).unpack('i')[0]
+    assert cur > 1
+  end if defined?(TCP_DEFER_ACCEPT)
+
 end