about summary refs log tree commit homepage
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/unicorn.rb22
-rw-r--r--lib/unicorn/app/exec_cgi.rb154
-rw-r--r--lib/unicorn/app/inetd.rb109
-rw-r--r--lib/unicorn/configurator.rb19
-rw-r--r--lib/unicorn/const.rb27
-rw-r--r--lib/unicorn/http_request.rb5
-rw-r--r--lib/unicorn/http_response.rb4
-rw-r--r--lib/unicorn/http_server.rb159
-rw-r--r--lib/unicorn/socket_helper.rb100
-rw-r--r--lib/unicorn/ssl_client.rb11
-rw-r--r--lib/unicorn/ssl_configurator.rb104
-rw-r--r--lib/unicorn/ssl_server.rb42
-rw-r--r--lib/unicorn/tmpio.rb5
-rw-r--r--lib/unicorn/util.rb1
-rw-r--r--lib/unicorn/worker.rb14
15 files changed, 126 insertions, 650 deletions
diff --git a/lib/unicorn.rb b/lib/unicorn.rb
index 358748f..9fdcb8e 100644
--- a/lib/unicorn.rb
+++ b/lib/unicorn.rb
@@ -1,5 +1,4 @@
 # -*- encoding: binary -*-
-require 'fcntl'
 require 'etc'
 require 'stringio'
 require 'rack'
@@ -22,8 +21,7 @@ module Unicorn
   # since there is nothing in the application stack that is responsible
   # for client shutdowns/disconnects.  This exception is visible to Rack
   # applications unless PrereadInput middleware is loaded.
-  class ClientShutdown < EOFError
-  end
+  ClientShutdown = Class.new(EOFError)
 
   # :stopdoc:
 
@@ -102,19 +100,13 @@ module Unicorn
 
   # remove this when we only support Ruby >= 2.0
   def self.pipe # :nodoc:
-    Kgio::Pipe.new.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
+    Kgio::Pipe.new.each { |io| io.close_on_exec = true }
   end
   # :startdoc:
 end
 # :enddoc:
-require 'unicorn/const'
-require 'unicorn/socket_helper'
-require 'unicorn/stream_input'
-require 'unicorn/tee_input'
-require 'unicorn/http_request'
-require 'unicorn/configurator'
-require 'unicorn/tmpio'
-require 'unicorn/util'
-require 'unicorn/http_response'
-require 'unicorn/worker'
-require 'unicorn/http_server'
+
+%w(const socket_helper stream_input tee_input http_request configurator
+   tmpio util http_response worker http_server).each do |s|
+  require_relative "unicorn/#{s}"
+end
diff --git a/lib/unicorn/app/exec_cgi.rb b/lib/unicorn/app/exec_cgi.rb
deleted file mode 100644
index 232b681..0000000
--- a/lib/unicorn/app/exec_cgi.rb
+++ /dev/null
@@ -1,154 +0,0 @@
-# -*- encoding: binary -*-
-# :enddoc:
-require 'unicorn'
-
-module Unicorn::App
-
-  # This class is highly experimental (even more so than the rest of Unicorn)
-  # and has never run anything other than cgit.
-  class ExecCgi < Struct.new(:args)
-
-    CHUNK_SIZE = 16384
-    PASS_VARS = %w(
-      CONTENT_LENGTH
-      CONTENT_TYPE
-      GATEWAY_INTERFACE
-      AUTH_TYPE
-      PATH_INFO
-      PATH_TRANSLATED
-      QUERY_STRING
-      REMOTE_ADDR
-      REMOTE_HOST
-      REMOTE_IDENT
-      REMOTE_USER
-      REQUEST_METHOD
-      SERVER_NAME
-      SERVER_PORT
-      SERVER_PROTOCOL
-      SERVER_SOFTWARE
-    ).map { |x| x.freeze } # frozen strings are faster for Hash assignments
-
-    class Body < Unicorn::TmpIO
-      def body_offset=(n)
-        sysseek(@body_offset = n)
-      end
-
-      def each
-        sysseek @body_offset
-        # don't use a preallocated buffer for sysread since we can't
-        # guarantee an actual socket is consuming the yielded string
-        # (or if somebody is pushing to an array for eventual concatenation
-        begin
-          yield sysread(CHUNK_SIZE)
-        rescue EOFError
-          break
-        end while true
-      end
-    end
-
-    # Intializes the app, example of usage in a config.ru
-    #   map "/cgit" do
-    #     run Unicorn::App::ExecCgi.new("/path/to/cgit.cgi")
-    #   end
-    def initialize(*args)
-      self.args = args
-      first = args[0] or
-        raise ArgumentError, "need path to executable"
-      first[0] == ?/ or args[0] = ::File.expand_path(first)
-      File.executable?(args[0]) or
-        raise ArgumentError, "#{args[0]} is not executable"
-    end
-
-    # Calls the app
-    def call(env)
-      out, err = Body.new, Unicorn::TmpIO.new
-      inp = force_file_input(env)
-      pid = fork { run_child(inp, out, err, env) }
-      inp.close
-      pid, status = Process.waitpid2(pid)
-      write_errors(env, err, status) if err.stat.size > 0
-      err.close
-
-      return parse_output!(out) if status.success?
-      out.close
-      [ 500, { 'Content-Length' => '0', 'Content-Type' => 'text/plain' }, [] ]
-    end
-
-    private
-
-    def run_child(inp, out, err, env)
-      PASS_VARS.each do |key|
-        val = env[key] or next
-        ENV[key] = val
-      end
-      ENV['SCRIPT_NAME'] = args[0]
-      ENV['GATEWAY_INTERFACE'] = 'CGI/1.1'
-      env.keys.grep(/^HTTP_/) { |key| ENV[key] = env[key] }
-
-      $stdin.reopen(inp)
-      $stdout.reopen(out)
-      $stderr.reopen(err)
-      exec(*args)
-    end
-
-    # Extracts headers from CGI out, will change the offset of out.
-    # This returns a standard Rack-compatible return value:
-    #   [ 200, HeadersHash, body ]
-    def parse_output!(out)
-      size = out.stat.size
-      out.sysseek(0)
-      head = out.sysread(CHUNK_SIZE)
-      offset = 2
-      head, body = head.split(/\n\n/, 2)
-      if body.nil?
-        head, body = head.split(/\r\n\r\n/, 2)
-        offset = 4
-      end
-      offset += head.length
-      out.body_offset = offset
-      size -= offset
-      prev = nil
-      headers = Rack::Utils::HeaderHash.new
-      head.split(/\r?\n/).each do |line|
-        case line
-        when /^([A-Za-z0-9-]+):\s*(.*)$/ then headers[prev = $1] = $2
-        when /^[ \t]/ then headers[prev] << "\n#{line}" if prev
-        end
-      end
-      status = headers.delete("Status") || 200
-      headers['Content-Length'] = size.to_s
-      [ status, headers, out ]
-    end
-
-    # ensures rack.input is a file handle that we can redirect stdin to
-    def force_file_input(env)
-      inp = env['rack.input']
-      # inp could be a StringIO or StringIO-like object
-      if inp.respond_to?(:size) && inp.size == 0
-        ::File.open('/dev/null', 'rb')
-      else
-        tmp = Unicorn::TmpIO.new
-
-        buf = inp.read(CHUNK_SIZE)
-        begin
-          tmp.syswrite(buf)
-        end while inp.read(CHUNK_SIZE, buf)
-        tmp.sysseek(0)
-        tmp
-      end
-    end
-
-    # rack.errors this may not be an IO object, so we couldn't
-    # just redirect the CGI executable to that earlier.
-    def write_errors(env, err, status)
-      err.seek(0)
-      dst = env['rack.errors']
-      pid = status.pid
-      dst.write("#{pid}: #{args.inspect} status=#{status} stderr:\n")
-      err.each_line { |line| dst.write("#{pid}: #{line}") }
-      dst.flush
-    end
-
-  end
-
-end
diff --git a/lib/unicorn/app/inetd.rb b/lib/unicorn/app/inetd.rb
deleted file mode 100644
index 13b6624..0000000
--- a/lib/unicorn/app/inetd.rb
+++ /dev/null
@@ -1,109 +0,0 @@
-# -*- encoding: binary -*-
-# :enddoc:
-# Copyright (c) 2009 Eric Wong
-# You can redistribute it and/or modify it under the same terms as Ruby 1.8 or
-# the GPLv2+ (GPLv3+ preferred)
-
-# this class *must* be used with Rack::Chunked
-module Unicorn::App
-  class Inetd < Struct.new(:cmd)
-
-    class CatBody < Struct.new(:errors, :err_rd, :out_rd, :pid_map)
-      def initialize(env, cmd)
-        self.errors = env['rack.errors']
-        in_rd, in_wr = IO.pipe
-        self.err_rd, err_wr = IO.pipe
-        self.out_rd, out_wr = IO.pipe
-
-        cmd_pid = fork {
-          inp, out, err = (0..2).map { |i| IO.new(i) }
-          inp.reopen(in_rd)
-          out.reopen(out_wr)
-          err.reopen(err_wr)
-          [ in_rd, in_wr, err_rd, err_wr, out_rd, out_wr ].each { |i| i.close }
-          exec(*cmd)
-        }
-        [ in_rd, err_wr, out_wr ].each { |io| io.close }
-        [ in_wr, err_rd, out_rd ].each { |io| io.binmode }
-        in_wr.sync = true
-
-        # Unfortunately, input here must be processed inside a seperate
-        # thread/process using blocking I/O since env['rack.input'] is not
-        # IO.select-able and attempting to make it so would trip Rack::Lint
-        inp_pid = fork {
-          input = env['rack.input']
-          [ err_rd, out_rd ].each { |io| io.close }
-
-          # this is dependent on input.read having readpartial semantics:
-          buf = input.read(16384)
-          begin
-            in_wr.write(buf)
-          end while input.read(16384, buf)
-        }
-        in_wr.close
-        self.pid_map = {
-          inp_pid => 'input streamer',
-          cmd_pid => cmd.inspect,
-        }
-      end
-
-      def each
-        begin
-          rd, = IO.select([err_rd, out_rd])
-          rd && rd.first or next
-
-          if rd.include?(err_rd)
-            begin
-              errors.write(err_rd.read_nonblock(16384))
-            rescue Errno::EINTR
-            rescue Errno::EAGAIN
-              break
-            end while true
-          end
-
-          rd.include?(out_rd) or next
-
-          begin
-            yield out_rd.read_nonblock(16384)
-          rescue Errno::EINTR
-          rescue Errno::EAGAIN
-            break
-          end while true
-        rescue EOFError,Errno::EPIPE,Errno::EBADF,Errno::EINVAL
-          break
-        end while true
-
-        self
-      end
-
-      def close
-        pid_map.each { |pid, str|
-          begin
-            pid, status = Process.waitpid2(pid)
-            status.success? or
-              errors.write("#{str}: #{status.inspect} (PID:#{pid})\n")
-          rescue Errno::ECHILD
-            errors.write("Failed to reap #{str} (PID:#{pid})\n")
-          end
-        }
-        out_rd.close
-        err_rd.close
-      end
-
-    end
-
-    def initialize(*cmd)
-      self.cmd = cmd
-    end
-
-    def call(env)
-      /\A100-continue\z/i =~ env[Unicorn::Const::HTTP_EXPECT] and
-          return [ 100, {} , [] ]
-
-      [ 200, { 'Content-Type' => 'application/octet-stream' },
-       CatBody.new(env, cmd) ]
-    end
-
-  end
-
-end
diff --git a/lib/unicorn/configurator.rb b/lib/unicorn/configurator.rb
index 0658c81..32e49c1 100644
--- a/lib/unicorn/configurator.rb
+++ b/lib/unicorn/configurator.rb
@@ -1,6 +1,5 @@
 # -*- encoding: binary -*-
 require 'logger'
-require 'unicorn/ssl_configurator'
 
 # Implements a simple DSL for configuring a \Unicorn server.
 #
@@ -13,7 +12,6 @@ require 'unicorn/ssl_configurator'
 # See the link:/TUNING.html document for more information on tuning unicorn.
 class Unicorn::Configurator
   include Unicorn
-  include Unicorn::SSLConfigurator
 
   # :stopdoc:
   attr_accessor :set, :config_file, :after_reload
@@ -48,7 +46,6 @@ class Unicorn::Configurator
     :check_client_connection => false,
     :rewindable_input => true, # for Rack 2.x: (Rack::VERSION[0] <= 1),
     :client_body_buffer_size => Unicorn::Const::MAX_BODY,
-    :trust_x_forwarded => true,
   }
   #:startdoc:
 
@@ -556,18 +553,6 @@ class Unicorn::Configurator
     set[:user] = [ user, group ]
   end
 
-  # Sets whether or not the parser will trust X-Forwarded-Proto and
-  # X-Forwarded-SSL headers and set "rack.url_scheme" to "https" accordingly.
-  # Rainbows!/Zbatery installations facing untrusted clients directly
-  # should set this to +false+.  This is +true+ by default as Unicorn
-  # is designed to only sit behind trusted nginx proxies.
-  #
-  # This has never been publically documented and is subject to removal
-  # in future releases.
-  def trust_x_forwarded(bool) # :nodoc:
-    set_bool(:trust_x_forwarded, bool)
-  end
-
   # expands "unix:path/to/foo" to a socket relative to the current path
   # expands pathnames of sockets if relative to "~" or "~username"
   # expands "*:port and ":port" to "0.0.0.0:port"
@@ -601,7 +586,7 @@ private
   def canonicalize_tcp(addr, port)
     packed = Socket.pack_sockaddr_in(port, addr)
     port, addr = Socket.unpack_sockaddr_in(packed)
-    /:/ =~ addr ? "[#{addr}]:#{port}" : "#{addr}:#{port}"
+    addr.include?(':') ? "[#{addr}]:#{port}" : "#{addr}:#{port}"
   end
 
   def set_path(var, path) #:nodoc:
@@ -657,7 +642,7 @@ private
       raise ArgumentError, "rackup file (#{ru}) not readable"
 
     # it could be a .rb file, too, we don't parse those manually
-    ru =~ /\.ru\z/ or return
+    ru.end_with?('.ru') or return
 
     /^#\\(.*)/ =~ File.read(ru) or return
     RACKUP[:optparse].parse!($1.split(/\s+/))
diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb
index 51d7394..33ab4ac 100644
--- a/lib/unicorn/const.rb
+++ b/lib/unicorn/const.rb
@@ -1,12 +1,6 @@
 # -*- encoding: binary -*-
 
-# :enddoc:
-# Frequently used constants when constructing requests or responses.
-# Many times the constant just refers to a string with the same
-# contents.  Using these constants gave about a 3% to 10% performance
-# improvement over using the strings directly.  Symbols did not really
-# improve things much compared to constants.
-module Unicorn::Const
+module Unicorn::Const # :nodoc:
   # default TCP listen host address (0.0.0.0, all interfaces)
   DEFAULT_HOST = "0.0.0.0"
 
@@ -23,22 +17,5 @@ module Unicorn::Const
   # temporary file for reading (112 kilobytes).  This is the default
   # value of client_body_buffer_size.
   MAX_BODY = 1024 * 112
-
-  # :stopdoc:
-  # common errors we'll send back
-  # (N.B. these are not used by unicorn, but we won't drop them until
-  #  unicorn 5.x to avoid breaking Rainbows!).
-  ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n"
-  ERROR_414_RESPONSE = "HTTP/1.1 414 Request-URI Too Long\r\n\r\n"
-  ERROR_413_RESPONSE = "HTTP/1.1 413 Request Entity Too Large\r\n\r\n"
-  ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
-
-  EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
-  EXPECT_100_RESPONSE_SUFFIXED = "100 Continue\r\n\r\nHTTP/1.1 "
-
-  HTTP_RESPONSE_START = ['HTTP', '/1.1 ']
-  HTTP_EXPECT = "HTTP_EXPECT"
-
-  # :startdoc:
 end
-require 'unicorn/version'
+require_relative 'version'
diff --git a/lib/unicorn/http_request.rb b/lib/unicorn/http_request.rb
index 6b20431..9888430 100644
--- a/lib/unicorn/http_request.rb
+++ b/lib/unicorn/http_request.rb
@@ -26,8 +26,11 @@ class Unicorn::HttpParser
 
   # :stopdoc:
   # A frozen format for this is about 15% faster
+  # Drop these frozen strings when Ruby 2.2 becomes more prevalent,
+  # 2.2+ optimizes hash assignments when used with literal string keys
   REMOTE_ADDR = 'REMOTE_ADDR'.freeze
   RACK_INPUT = 'rack.input'.freeze
+  HTTP_RESPONSE_START = [ 'HTTP', '/1.1 ']
   @@input_class = Unicorn::TeeInput
   @@check_client_connection = false
 
@@ -86,7 +89,7 @@ class Unicorn::HttpParser
     # detect if the socket is valid by writing a partial response:
     if @@check_client_connection && headers?
       @response_start_sent = true
-      Unicorn::Const::HTTP_RESPONSE_START.each { |c| socket.write(c) }
+      HTTP_RESPONSE_START.each { |c| socket.write(c) }
     end
 
     e[RACK_INPUT] = 0 == content_length ?
diff --git a/lib/unicorn/http_response.rb b/lib/unicorn/http_response.rb
index 083951c..cc027c5 100644
--- a/lib/unicorn/http_response.rb
+++ b/lib/unicorn/http_response.rb
@@ -24,14 +24,12 @@ module Unicorn::HttpResponse
   # writes the rack_response to socket as an HTTP response
   def http_response_write(socket, status, headers, body,
                           response_start_sent=false)
-    status = CODES[status.to_i] || status
     hijack = nil
 
     http_response_start = response_start_sent ? '' : 'HTTP/1.1 '
     if headers
-      buf = "#{http_response_start}#{status}\r\n" \
+      buf = "#{http_response_start}#{CODES[status.to_i] || status}\r\n" \
             "Date: #{httpdate}\r\n" \
-            "Status: #{status}\r\n" \
             "Connection: close\r\n"
       headers.each do |key, value|
         case key
diff --git a/lib/unicorn/http_server.rb b/lib/unicorn/http_server.rb
index 329c5bf..82747b8 100644
--- a/lib/unicorn/http_server.rb
+++ b/lib/unicorn/http_server.rb
@@ -1,5 +1,4 @@
 # -*- encoding: binary -*-
-require "unicorn/ssl_server"
 
 # This is the process manager of Unicorn. This manages worker
 # processes which in turn handle the I/O and application process.
@@ -21,10 +20,6 @@ class Unicorn::HttpServer
   attr_reader :pid, :logger
   include Unicorn::SocketHelper
   include Unicorn::HttpResponse
-  include Unicorn::SSLServer
-
-  # backwards compatibility with 1.x
-  Worker = Unicorn::Worker
 
   # all bound listener sockets
   LISTENERS = []
@@ -32,23 +27,6 @@ class Unicorn::HttpServer
   # listeners we have yet to bind
   NEW_LISTENERS = []
 
-  # This hash maps PIDs to Workers
-  WORKERS = {}
-
-  # We use SELF_PIPE differently in the master and worker processes:
-  #
-  # * The master process never closes or reinitializes this once
-  # initialized.  Signal handlers in the master process will write to
-  # it to wake up the master from IO.select in exactly the same manner
-  # djb describes in http://cr.yp.to/docs/selfpipe.html
-  #
-  # * The workers immediately close the pipe they inherit.  See the
-  # Unicorn::Worker class for the pipe workers use.
-  SELF_PIPE = []
-
-  # signal queue used for self-piping
-  SIG_QUEUE = []
-
   # list of signals we care about and trap in master.
   QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP, :TTIN, :TTOU ]
 
@@ -71,7 +49,7 @@ class Unicorn::HttpServer
   #
   #   Unicorn::HttpServer::START_CTX[0] = "/home/bofh/2.2.0/bin/unicorn"
   START_CTX = {
-    :argv => ARGV.map { |arg| arg.dup },
+    :argv => ARGV.map(&:dup),
     0 => $0.dup,
   }
   # We favor ENV['PWD'] since it is (usually) symlink aware for Capistrano
@@ -100,6 +78,19 @@ class Unicorn::HttpServer
     self.config = Unicorn::Configurator.new(options)
     self.listener_opts = {}
 
+    # We use @self_pipe differently in the master and worker processes:
+    #
+    # * The master process never closes or reinitializes this once
+    # initialized.  Signal handlers in the master process will write to
+    # it to wake up the master from IO.select in exactly the same manner
+    # djb describes in http://cr.yp.to/docs/selfpipe.html
+    #
+    # * The workers immediately close the pipe they inherit.  See the
+    # Unicorn::Worker class for the pipe workers use.
+    @self_pipe = []
+    @workers = {} # hash maps PIDs to Workers
+    @sig_queue = [] # signal queue used for self-piping
+
     # we try inheriting listeners first, so we bind them later.
     # we don't write the pid file until we've bound listeners in case
     # unicorn was started twice by mistake.  Even though our #pid= method
@@ -119,13 +110,13 @@ class Unicorn::HttpServer
     inherit_listeners!
     # this pipe is used to wake us up from select(2) in #join when signals
     # are trapped.  See trap_deferred.
-    SELF_PIPE.replace(Unicorn.pipe)
+    @self_pipe.replace(Unicorn.pipe)
     @master_pid = $$
 
     # setup signal handlers before writing pid file in case people get
     # trigger happy and send signals as soon as the pid file exists.
     # Note that signals don't actually get handled until the #join method
-    QUEUE_SIGS.each { |sig| trap(sig) { SIG_QUEUE << sig; awaken_master } }
+    QUEUE_SIGS.each { |sig| trap(sig) { @sig_queue << sig; awaken_master } }
     trap(:CHLD) { awaken_master }
 
     # write pid early for Mongrel compatibility if we're not inheriting sockets
@@ -158,9 +149,6 @@ class Unicorn::HttpServer
 
     LISTENERS.delete_if do |io|
       if dead_names.include?(sock_name(io))
-        IO_PURGATORY.delete_if do |pio|
-          pio.fileno == io.fileno && (pio.close rescue nil).nil? # true
-        end
         (io.close rescue nil).nil? # true
       else
         set_server_sockopt(io, listener_opts[sock_name(io)])
@@ -198,7 +186,7 @@ class Unicorn::HttpServer
     if path
       if x = valid_pid?(path)
         return path if pid && path == pid && x == $$
-        if x == reexec_pid && pid =~ /\.oldbin\z/
+        if x == reexec_pid && pid.end_with?('.oldbin')
           logger.warn("will not set pid=#{path} while reexec-ed "\
                       "child is running PID:#{x}")
           return
@@ -241,7 +229,7 @@ class Unicorn::HttpServer
     begin
       io = bind_listen(address, opt)
       unless Kgio::TCPServer === io || Kgio::UNIXServer === io
-        prevent_autoclose(io)
+        io.autoclose = false
         io = server_cast(io)
       end
       logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
@@ -267,7 +255,7 @@ class Unicorn::HttpServer
   # is signalling us too often.
   def join
     respawn = true
-    last_check = Time.now
+    last_check = time_now
 
     proc_name 'master'
     logger.info "master process ready" # test_exec.rb relies on this message
@@ -281,11 +269,11 @@ class Unicorn::HttpServer
     end
     begin
       reap_all_workers
-      case SIG_QUEUE.shift
+      case @sig_queue.shift
       when nil
         # avoid murdering workers after our master process (or the
         # machine) comes out of suspend/hibernation
-        if (last_check + @timeout) >= (last_check = Time.now)
+        if (last_check + @timeout) >= (last_check = time_now)
           sleep_time = murder_lazy_workers
         else
           sleep_time = @timeout/2.0 + 1
@@ -339,8 +327,8 @@ class Unicorn::HttpServer
   # Terminates all workers, but does not exit master process
   def stop(graceful = true)
     self.listeners = []
-    limit = Time.now + timeout
-    until WORKERS.empty? || Time.now > limit
+    limit = time_now + timeout
+    until @workers.empty? || time_now > limit
       if graceful
         soft_kill_each_worker(:QUIT)
       else
@@ -369,14 +357,6 @@ class Unicorn::HttpServer
     Unicorn::TeeInput.client_body_buffer_size = bytes
   end
 
-  def trust_x_forwarded
-    Unicorn::HttpParser.trust_x_forwarded?
-  end
-
-  def trust_x_forwarded=(bool)
-    Unicorn::HttpParser.trust_x_forwarded = bool
-  end
-
   def check_client_connection
     Unicorn::HttpRequest.check_client_connection
   end
@@ -389,17 +369,17 @@ class Unicorn::HttpServer
 
   # wait for a signal hander to wake us up and then consume the pipe
   def master_sleep(sec)
+    @self_pipe[0].kgio_wait_readable(sec) or return
     # 11 bytes is the maximum string length which can be embedded within
     # the Ruby itself and not require a separate malloc (on 32-bit MRI 1.9+).
     # Most reads are only one byte here and uncommon, so it's not worth a
     # persistent buffer, either:
-    IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
-    SELF_PIPE[0].kgio_tryread(11)
+    @self_pipe[0].kgio_tryread(11)
   end
 
   def awaken_master
     return if $$ != @master_pid
-    SELF_PIPE[1].kgio_trywrite('.') # wakeup master process from select
+    @self_pipe[1].kgio_trywrite('.') # wakeup master process from select
   end
 
   # reaps all unreaped workers
@@ -413,7 +393,7 @@ class Unicorn::HttpServer
         self.pid = pid.chomp('.oldbin') if pid
         proc_name 'master'
       else
-        worker = WORKERS.delete(wpid) and worker.close rescue nil
+        worker = @workers.delete(wpid) and worker.close rescue nil
         m = "reaped #{status.inspect} worker=#{worker.nr rescue 'unknown'}"
         status.success? ? logger.info(m) : logger.error(m)
       end
@@ -451,10 +431,7 @@ class Unicorn::HttpServer
     self.reexec_pid = fork do
       listener_fds = {}
       LISTENERS.each do |sock|
-        # IO#close_on_exec= will be available on any future version of
-        # Ruby that sets FD_CLOEXEC by default on new file descriptors
-        # ref: http://redmine.ruby-lang.org/issues/5041
-        sock.close_on_exec = false if sock.respond_to?(:close_on_exec=)
+        sock.close_on_exec = false
         listener_fds[sock.fileno] = sock
       end
       ENV['UNICORN_FD'] = listener_fds.keys.join(',')
@@ -467,13 +444,13 @@ class Unicorn::HttpServer
       (3..1024).each do |io|
         next if listener_fds.include?(io)
         io = IO.for_fd(io) rescue next
-        prevent_autoclose(io)
-        io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
+        io.autoclose = false
+        io.close_on_exec = true
       end
 
       # exec(command, hash) works in at least 1.9.1+, but will only be
       # required in 1.9.4/2.0.0 at earliest.
-      cmd << listener_fds if RUBY_VERSION >= "1.9.1"
+      cmd << listener_fds
       logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
       before_exec.call(self)
       exec(*cmd)
@@ -484,8 +461,8 @@ class Unicorn::HttpServer
   # forcibly terminate all workers that haven't checked in in timeout seconds.  The timeout is implemented using an unlinked File
   def murder_lazy_workers
     next_sleep = @timeout - 1
-    now = Time.now.to_i
-    WORKERS.dup.each_pair do |wpid, worker|
+    now = time_now.to_i
+    @workers.dup.each_pair do |wpid, worker|
       tick = worker.tick
       0 == tick and next # skip workers that haven't processed any clients
       diff = now - tick
@@ -503,7 +480,7 @@ class Unicorn::HttpServer
   end
 
   def after_fork_internal
-    SELF_PIPE.each { |io| io.close }.clear # this is master-only, now
+    @self_pipe.each(&:close).clear # this is master-only, now
     @ready_pipe.close if @ready_pipe
     Unicorn::Configurator::RACKUP.clear
     @ready_pipe = @init_listeners = @before_exec = @before_fork = nil
@@ -518,11 +495,11 @@ class Unicorn::HttpServer
   def spawn_missing_workers
     worker_nr = -1
     until (worker_nr += 1) == @worker_processes
-      WORKERS.value?(worker_nr) and next
-      worker = Worker.new(worker_nr)
+      @workers.value?(worker_nr) and next
+      worker = Unicorn::Worker.new(worker_nr)
       before_fork.call(self, worker)
       if pid = fork
-        WORKERS[pid] = worker
+        @workers[pid] = worker
         worker.atfork_parent
       else
         after_fork_internal
@@ -536,9 +513,9 @@ class Unicorn::HttpServer
   end
 
   def maintain_worker_count
-    (off = WORKERS.size - worker_processes) == 0 and return
+    (off = @workers.size - worker_processes) == 0 and return
     off < 0 and return spawn_missing_workers
-    WORKERS.each_value { |w| w.nr >= worker_processes and w.soft_kill(:QUIT) }
+    @workers.each_value { |w| w.nr >= worker_processes and w.soft_kill(:QUIT) }
   end
 
   # if we get any error, try to write something back to the client
@@ -566,12 +543,15 @@ class Unicorn::HttpServer
     rescue
   end
 
-  def expect_100_response
-    if @request.response_start_sent
-      Unicorn::Const::EXPECT_100_RESPONSE_SUFFIXED
-    else
-      Unicorn::Const::EXPECT_100_RESPONSE
-    end
+  def e100_response_write(client, env)
+    # We use String#freeze to avoid allocations under Ruby 2.1+
+    # Not many users hit this code path, so it's better to reduce the
+    # constant table sizes even for 1.9.3-2.0 users who'll hit extra
+    # allocations here.
+    client.write(@request.response_start_sent ?
+                 "100 Continue\r\n\r\nHTTP/1.1 ".freeze :
+                 "HTTP/1.1 100 Continue\r\n\r\n".freeze)
+    env.delete('HTTP_EXPECT'.freeze)
   end
 
   # once a client is accepted, it is processed in its entirety here
@@ -581,8 +561,7 @@ class Unicorn::HttpServer
     return if @request.hijacked?
 
     if 100 == status.to_i
-      client.write(expect_100_response)
-      env.delete(Unicorn::Const::HTTP_EXPECT)
+      e100_response_write(client, env)
       status, headers, body = @app.call(env)
       return if @request.hijacked?
     end
@@ -615,22 +594,21 @@ class Unicorn::HttpServer
     worker.atfork_child
     # we'll re-trap :QUIT later for graceful shutdown iff we accept clients
     EXIT_SIGS.each { |sig| trap(sig) { exit!(0) } }
-    exit!(0) if (SIG_QUEUE & EXIT_SIGS)[0]
+    exit!(0) if (@sig_queue & EXIT_SIGS)[0]
     WORKER_QUEUE_SIGS.each { |sig| trap(sig, nil) }
     trap(:CHLD, 'DEFAULT')
-    SIG_QUEUE.clear
+    @sig_queue.clear
     proc_name "worker[#{worker.nr}]"
     START_CTX.clear
-    WORKERS.clear
+    @workers.clear
 
     after_fork.call(self, worker) # can drop perms and create listeners
-    LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
+    LISTENERS.each { |sock| sock.close_on_exec = true }
 
     worker.user(*user) if user.kind_of?(Array) && ! worker.switched
     self.timeout /= 2.0 # halve it for select()
     @config = nil
     build_app! unless preload_app
-    ssl_enable!
     @after_fork = @listener_opts = @orig_app = nil
     readers = LISTENERS.dup
     readers << worker
@@ -665,7 +643,7 @@ class Unicorn::HttpServer
     begin
       nr < 0 and reopen_worker_logs(worker.nr)
       nr = 0
-      worker.tick = Time.now.to_i
+      worker.tick = time_now.to_i
       tmp = ready.dup
       while sock = tmp.shift
         # Unicorn::Worker#kgio_tryaccept is not like accept(2) at all,
@@ -673,7 +651,7 @@ class Unicorn::HttpServer
         if client = sock.kgio_tryaccept
           process_client(client)
           nr += 1
-          worker.tick = Time.now.to_i
+          worker.tick = time_now.to_i
         end
         break if nr < 0
       end
@@ -690,7 +668,7 @@ class Unicorn::HttpServer
       ppid == Process.ppid or return
 
       # timeout used so we can detect parent death:
-      worker.tick = Time.now.to_i
+      worker.tick = time_now.to_i
       ret = IO.select(readers, nil, nil, @timeout) and ready = ret[0]
     rescue => e
       redo if nr < 0 && readers[0]
@@ -702,17 +680,17 @@ class Unicorn::HttpServer
   # is no longer running.
   def kill_worker(signal, wpid)
     Process.kill(signal, wpid)
-    rescue Errno::ESRCH
-      worker = WORKERS.delete(wpid) and worker.close rescue nil
+  rescue Errno::ESRCH
+    worker = @workers.delete(wpid) and worker.close rescue nil
   end
 
   # delivers a signal to each worker
   def kill_each_worker(signal)
-    WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
+    @workers.keys.each { |wpid| kill_worker(signal, wpid) }
   end
 
   def soft_kill_each_worker(signal)
-    WORKERS.each_value { |worker| worker.soft_kill(signal) }
+    @workers.each_value { |worker| worker.soft_kill(signal) }
   end
 
   # unlinks a PID file at given +path+ if it contains the current PID
@@ -782,10 +760,10 @@ class Unicorn::HttpServer
   def inherit_listeners!
     # inherit sockets from parents, they need to be plain Socket objects
     # before they become Kgio::UNIXServer or Kgio::TCPServer
-    inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
+    inherited = ENV['UNICORN_FD'].to_s.split(',').map do |fd|
       io = Socket.for_fd(fd.to_i)
       set_server_sockopt(io, listener_opts[sock_name(io)])
-      prevent_autoclose(io)
+      io.autoclose = false
       logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
       server_cast(io)
     end
@@ -814,4 +792,17 @@ class Unicorn::HttpServer
     raise ArgumentError, "no listeners" if LISTENERS.empty?
     NEW_LISTENERS.clear
   end
+
+  # try to use the monotonic clock in Ruby >= 2.1, it is immune to clock
+  # offset adjustments and generates less garbage (Float vs Time object)
+  begin
+    Process.clock_gettime(Process::CLOCK_MONOTONIC)
+    def time_now
+      Process.clock_gettime(Process::CLOCK_MONOTONIC)
+    end
+  rescue NameError, NoMethodError
+    def time_now # Ruby <= 2.0
+      Time.now
+    end
+  end
 end
diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb
index 820b778..812ac53 100644
--- a/lib/unicorn/socket_helper.rb
+++ b/lib/unicorn/socket_helper.rb
@@ -4,12 +4,6 @@ require 'socket'
 
 module Unicorn
   module SocketHelper
-    # :stopdoc:
-    include Socket::Constants
-
-    # prevents IO objects in here from being GC-ed
-    # kill this when we drop 1.8 support
-    IO_PURGATORY = []
 
     # internal interface, only used by Rainbows!/Zbatery
     DEFAULTS = {
@@ -22,7 +16,7 @@ module Unicorn
       :tcp_defer_accept => 1,
 
       # FreeBSD, we need to override this to 'dataready' if we
-      # eventually get HTTPS support
+      # eventually support non-HTTP/1.x
       :accept_filter => 'httpready',
 
       # same default value as Mongrel
@@ -32,76 +26,47 @@ module Unicorn
       :tcp_nopush => nil,
       :tcp_nodelay => true,
     }
-    #:startdoc:
 
     # configure platform-specific options (only tested on Linux 2.6 so far)
-    case RUBY_PLATFORM
-    when /linux/
-      # from /usr/include/linux/tcp.h
-      TCP_DEFER_ACCEPT = 9 unless defined?(TCP_DEFER_ACCEPT)
-
-      # do not send out partial frames (Linux)
-      TCP_CORK = 3 unless defined?(TCP_CORK)
-
-      # Linux got SO_REUSEPORT in 3.9, BSDs have had it for ages
-      unless defined?(SO_REUSEPORT)
-        if RUBY_PLATFORM =~ /(?:alpha|mips|parisc|sparc)/
-          SO_REUSEPORT = 0x0200 # untested
-        else
-          SO_REUSEPORT = 15 # only tested on x86_64 and i686
-        end
-      end
-    when /freebsd/
-      # do not send out partial frames (FreeBSD)
-      TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH)
-
-      def accf_arg(af_name)
-        [ af_name, nil ].pack('a16a240')
-      end if defined?(SO_ACCEPTFILTER)
-    end
-
-    def prevent_autoclose(io)
-      if io.respond_to?(:autoclose=)
-        io.autoclose = false
-      else
-        IO_PURGATORY << io
-      end
-    end
+    def accf_arg(af_name)
+      [ af_name, nil ].pack('a16a240')
+    end if RUBY_PLATFORM =~ /freebsd/ && Socket.const_defined?(:SO_ACCEPTFILTER)
 
     def set_tcp_sockopt(sock, opt)
       # just in case, even LANs can break sometimes.  Linux sysadmins
       # can lower net.ipv4.tcp_keepalive_* sysctl knobs to very low values.
-      sock.setsockopt(SOL_SOCKET, SO_KEEPALIVE, 1) if defined?(SO_KEEPALIVE)
+      Socket.const_defined?(:SO_KEEPALIVE) and
+        sock.setsockopt(:SOL_SOCKET, :SO_KEEPALIVE, 1)
 
-      if defined?(TCP_NODELAY)
+      if Socket.const_defined?(:TCP_NODELAY)
         val = opt[:tcp_nodelay]
-        val = DEFAULTS[:tcp_nodelay] if nil == val
-        sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, val ? 1 : 0)
+        val = DEFAULTS[:tcp_nodelay] if val.nil?
+        sock.setsockopt(:IPPROTO_TCP, :TCP_NODELAY, val ? 1 : 0)
       end
 
       val = opt[:tcp_nopush]
       unless val.nil?
-        if defined?(TCP_CORK) # Linux
-          sock.setsockopt(IPPROTO_TCP, TCP_CORK, val)
-        elsif defined?(TCP_NOPUSH) # TCP_NOPUSH is lightly tested (FreeBSD)
-          sock.setsockopt(IPPROTO_TCP, TCP_NOPUSH, val)
+        if Socket.const_defined?(:TCP_CORK) # Linux
+          sock.setsockopt(:IPPROTO_TCP, :TCP_CORK, val)
+        elsif Socket.const_defined?(:TCP_NOPUSH) # FreeBSD
+          sock.setsockopt(:IPPROTO_TCP, :TCP_NOPUSH, val)
         end
       end
 
-      # No good reason to ever have deferred accepts off
-      # (except maybe benchmarking)
-      if defined?(TCP_DEFER_ACCEPT)
+      # No good reason to ever have deferred accepts off in single-threaded
+      # servers (except maybe benchmarking)
+      if Socket.const_defined?(:TCP_DEFER_ACCEPT)
         # this differs from nginx, since nginx doesn't allow us to
         # configure the the timeout...
         seconds = opt[:tcp_defer_accept]
         seconds = DEFAULTS[:tcp_defer_accept] if [true,nil].include?(seconds)
         seconds = 0 unless seconds # nil/false means disable this
-        sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds)
+        sock.setsockopt(:IPPROTO_TCP, :TCP_DEFER_ACCEPT, seconds)
       elsif respond_to?(:accf_arg)
         name = opt[:accept_filter]
-        name = DEFAULTS[:accept_filter] if nil == name
+        name = DEFAULTS[:accept_filter] if name.nil?
         begin
-          sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name))
+          sock.setsockopt(:SOL_SOCKET, :SO_ACCEPTFILTER, accf_arg(name))
         rescue => e
           logger.error("#{sock_name(sock)} " \
                        "failed to set accept_filter=#{name} (#{e.inspect})")
@@ -114,10 +79,11 @@ module Unicorn
 
       TCPSocket === sock and set_tcp_sockopt(sock, opt)
 
-      if opt[:rcvbuf] || opt[:sndbuf]
+      rcvbuf, sndbuf = opt.values_at(:rcvbuf, :sndbuf)
+      if rcvbuf || sndbuf
         log_buffer_sizes(sock, "before: ")
-        sock.setsockopt(SOL_SOCKET, SO_RCVBUF, opt[:rcvbuf]) if opt[:rcvbuf]
-        sock.setsockopt(SOL_SOCKET, SO_SNDBUF, opt[:sndbuf]) if opt[:sndbuf]
+        sock.setsockopt(:SOL_SOCKET, :SO_RCVBUF, rcvbuf) if rcvbuf
+        sock.setsockopt(:SOL_SOCKET, :SO_SNDBUF, sndbuf) if sndbuf
         log_buffer_sizes(sock, " after: ")
       end
       sock.listen(opt[:backlog])
@@ -126,8 +92,8 @@ module Unicorn
     end
 
     def log_buffer_sizes(sock, pfx = '')
-      rcvbuf = sock.getsockopt(SOL_SOCKET, SO_RCVBUF).unpack('i')
-      sndbuf = sock.getsockopt(SOL_SOCKET, SO_SNDBUF).unpack('i')
+      rcvbuf = sock.getsockopt(:SOL_SOCKET, :SO_RCVBUF).int
+      sndbuf = sock.getsockopt(:SOL_SOCKET, :SO_SNDBUF).int
       logger.info "#{pfx}#{sock_name(sock)} rcvbuf=#{rcvbuf} sndbuf=#{sndbuf}"
     end
 
@@ -172,25 +138,25 @@ module Unicorn
 
     def new_tcp_server(addr, port, opt)
       # n.b. we set FD_CLOEXEC in the workers
-      sock = Socket.new(opt[:ipv6] ? AF_INET6 : AF_INET, SOCK_STREAM, 0)
+      sock = Socket.new(opt[:ipv6] ? :AF_INET6 : :AF_INET, :SOCK_STREAM)
       if opt.key?(:ipv6only)
-        defined?(IPV6_V6ONLY) or
+        Socket.const_defined?(:IPV6_V6ONLY) or
           abort "Socket::IPV6_V6ONLY not defined, upgrade Ruby and/or your OS"
-        sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, opt[:ipv6only] ? 1 : 0)
+        sock.setsockopt(:IPPROTO_IPV6, :IPV6_V6ONLY, opt[:ipv6only] ? 1 : 0)
       end
-      sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
-      if defined?(SO_REUSEPORT) && opt[:reuseport]
-        sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
+      sock.setsockopt(:SOL_SOCKET, :SO_REUSEADDR, 1)
+      if Socket.const_defined?(:SO_REUSEPORT) && opt[:reuseport]
+        sock.setsockopt(:SOL_SOCKET, :SO_REUSEPORT, 1)
       end
       sock.bind(Socket.pack_sockaddr_in(port, addr))
-      prevent_autoclose(sock)
+      sock.autoclose = false
       Kgio::TCPServer.for_fd(sock.fileno)
     end
 
     # returns rfc2732-style (e.g. "[::1]:666") addresses for IPv6
     def tcp_name(sock)
       port, addr = Socket.unpack_sockaddr_in(sock.getsockname)
-      /:/ =~ addr ? "[#{addr}]:#{port}" : "#{addr}:#{port}"
+      addr.include?(':') ? "[#{addr}]:#{port}" : "#{addr}:#{port}"
     end
     module_function :tcp_name
 
diff --git a/lib/unicorn/ssl_client.rb b/lib/unicorn/ssl_client.rb
deleted file mode 100644
index a8c79e3..0000000
--- a/lib/unicorn/ssl_client.rb
+++ /dev/null
@@ -1,11 +0,0 @@
-# -*- encoding: binary -*-
-# :stopdoc:
-class Unicorn::SSLClient < Kgio::SSL
-  alias write kgio_write
-  alias close kgio_close
-
-  # this is no-op for now, to be fixed in kgio-monkey if people care
-  # about SSL support...
-  def shutdown(how = nil)
-  end
-end
diff --git a/lib/unicorn/ssl_configurator.rb b/lib/unicorn/ssl_configurator.rb
deleted file mode 100644
index 34f09ec..0000000
--- a/lib/unicorn/ssl_configurator.rb
+++ /dev/null
@@ -1,104 +0,0 @@
-# -*- encoding: binary -*-
-# :stopdoc:
-# This module is included in Unicorn::Configurator
-# :startdoc:
-#
-module Unicorn::SSLConfigurator
-  def ssl(&block)
-    ssl_require!
-    before = @set[:listeners].dup
-    opts = @set[:ssl_opts] = {}
-    yield
-    (@set[:listeners] - before).each do |address|
-      (@set[:listener_opts][address] ||= {})[:ssl_opts] = opts
-    end
-    ensure
-      @set.delete(:ssl_opts)
-  end
-
-  def ssl_certificate(file)
-    ssl_set(:ssl_certificate, file)
-  end
-
-  def ssl_certificate_key(file)
-    ssl_set(:ssl_certificate_key, file)
-  end
-
-  def ssl_client_certificate(file)
-    ssl_set(:ssl_client_certificate, file)
-  end
-
-  def ssl_dhparam(file)
-    ssl_set(:ssl_dhparam, file)
-  end
-
-  def ssl_ciphers(openssl_cipherlist_spec)
-    ssl_set(:ssl_ciphers, openssl_cipherlist_spec)
-  end
-
-  def ssl_crl(file)
-    ssl_set(:ssl_crl, file)
-  end
-
-  def ssl_prefer_server_ciphers(bool)
-    ssl_set(:ssl_prefer_server_ciphers, check_bool(bool))
-  end
-
-  def ssl_protocols(list)
-    ssl_set(:ssl_protocols, list)
-  end
-
-  def ssl_verify_client(on_off_optional)
-    ssl_set(:ssl_verify_client, on_off_optional)
-  end
-
-  def ssl_session_timeout(seconds)
-    ssl_set(:ssl_session_timeout, seconds)
-  end
-
-  def ssl_verify_depth(depth)
-    ssl_set(:ssl_verify_depth, depth)
-  end
-
-  # Allows specifying an engine for OpenSSL to use.  We have not been
-  # able to successfully test this feature due to a lack of hardware,
-  # Reports of success or patches to unicorn-public@bogomips.org is
-  # greatly appreciated.
-  def ssl_engine(engine)
-    ssl_warn_global(:ssl_engine)
-    ssl_require!
-    OpenSSL::Engine.load
-    OpenSSL::Engine.by_id(engine)
-    @set[:ssl_engine] = engine
-  end
-
-  def ssl_compression(bool)
-    # OpenSSL uses the SSL_OP_NO_COMPRESSION flag, Flipper follows suit
-    # with :ssl_no_compression, but we negate it to avoid exposing double
-    # negatives to the user.
-    ssl_set(:ssl_no_compression, check_bool(:ssl_compression, ! bool))
-  end
-
-private
-
-  def ssl_warn_global(func) # :nodoc:
-    Hash === @set[:ssl_opts] or return
-    warn("`#{func}' affects all SSL contexts in this process, " \
-         "not just this block")
-  end
-
-  def ssl_set(key, value) # :nodoc:
-    cur = @set[:ssl_opts]
-    Hash === cur or
-             raise ArgumentError, "#{key} must be called inside an `ssl' block"
-    cur[key] = value
-  end
-
-  def ssl_require! # :nodoc:
-    require "flipper"
-    require "unicorn/ssl_client"
-    rescue LoadError
-      warn "install 'kgio-monkey' for SSL support"
-      raise
-  end
-end
diff --git a/lib/unicorn/ssl_server.rb b/lib/unicorn/ssl_server.rb
deleted file mode 100644
index c00c3ae..0000000
--- a/lib/unicorn/ssl_server.rb
+++ /dev/null
@@ -1,42 +0,0 @@
-# -*- encoding: binary -*-
-# :stopdoc:
-# this module is meant to be included in Unicorn::HttpServer
-# It is an implementation detail and NOT meant for users.
-module Unicorn::SSLServer
-  attr_accessor :ssl_engine
-
-  def ssl_enable!
-    sni_hostnames = rack_sni_hostnames(@app)
-    seen = {} # we map a single SSLContext to multiple listeners
-    listener_ctx = {}
-    @listener_opts.each do |address, address_opts|
-      ssl_opts = address_opts[:ssl_opts] or next
-      listener_ctx[address] = seen[ssl_opts.object_id] ||= begin
-        unless sni_hostnames.empty?
-          ssl_opts = ssl_opts.dup
-          ssl_opts[:sni_hostnames] = sni_hostnames
-        end
-        ctx = Flipper.ssl_context(ssl_opts)
-        # FIXME: make configurable
-        ctx.session_cache_mode = OpenSSL::SSL::SSLContext::SESSION_CACHE_OFF
-        ctx
-      end
-    end
-    Unicorn::HttpServer::LISTENERS.each do |listener|
-      ctx = listener_ctx[sock_name(listener)] or next
-      listener.extend(Kgio::SSLServer)
-      listener.ssl_ctx = ctx
-      listener.kgio_ssl_class = Unicorn::SSLClient
-    end
-  end
-
-  # ugh, this depends on Rack internals...
-  def rack_sni_hostnames(rack_app) # :nodoc:
-    hostnames = {}
-    if Rack::URLMap === rack_app
-      mapping = rack_app.instance_variable_get(:@mapping)
-      mapping.each { |hostname,_,_,_| hostnames[hostname] = true }
-    end
-    hostnames.keys
-  end
-end
diff --git a/lib/unicorn/tmpio.rb b/lib/unicorn/tmpio.rb
index dcdf9da..db88ed3 100644
--- a/lib/unicorn/tmpio.rb
+++ b/lib/unicorn/tmpio.rb
@@ -22,11 +22,6 @@ class Unicorn::TmpIO < File
     fp
   end
 
-  # for easier env["rack.input"] compatibility with Rack <= 1.1
-  def size
-    stat.size
-  end unless File.method_defined?(:size)
-
   # pretend we're Tempfile for Rack::TempfileReaper
   alias close! close
 end
diff --git a/lib/unicorn/util.rb b/lib/unicorn/util.rb
index 94c4e37..c7784bd 100644
--- a/lib/unicorn/util.rb
+++ b/lib/unicorn/util.rb
@@ -1,5 +1,6 @@
 # -*- encoding: binary -*-
 
+require 'fcntl'
 module Unicorn::Util
 
 # :stopdoc:
diff --git a/lib/unicorn/worker.rb b/lib/unicorn/worker.rb
index e74a1c9..b3f8afe 100644
--- a/lib/unicorn/worker.rb
+++ b/lib/unicorn/worker.rb
@@ -11,7 +11,6 @@ require "raindrops"
 class Unicorn::Worker
   # :stopdoc:
   attr_accessor :nr, :switched
-  attr_writer :tmp
   attr_reader :to_io # IO.select-compatible
 
   PER_DROP = Raindrops::PAGE_SIZE / Raindrops::SIZE
@@ -23,7 +22,7 @@ class Unicorn::Worker
     @offset = nr % PER_DROP
     @raindrop[@offset] = 0
     @nr = nr
-    @tmp = @switched = false
+    @switched = false
     @to_io, @master = Unicorn.pipe
   end
 
@@ -101,18 +100,8 @@ class Unicorn::Worker
     @raindrop[@offset]
   end
 
-  # only exists for compatibility
-  def tmp # :nodoc:
-    @tmp ||= begin
-      tmp = Unicorn::TmpIO.new
-      tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-      tmp
-    end
-  end
-
   # called in both the master (reaping worker) and worker (SIGQUIT handler)
   def close # :nodoc:
-    @tmp.close if @tmp
     @master.close if @master
     @to_io.close if @to_io
   end
@@ -141,7 +130,6 @@ class Unicorn::Worker
     uid = Etc.getpwnam(user).uid
     gid = Etc.getgrnam(group).gid if group
     Unicorn::Util.chown_logs(uid, gid)
-    @tmp.chown(uid, gid) if @tmp
     if gid && Process.egid != gid
       Process.initgroups(user, gid)
       Process::GID.change_privilege(gid)