about summary refs log tree commit homepage
diff options
context:
space:
mode:
authorJeremy Evans <code@jeremyevans.net>2017-03-08 10:19:02 -0800
committerEric Wong <e@80x24.org>2017-03-10 20:27:41 +0000
commitea1a4360d66a833d75fbd887388d8cd4fe4ae299 (patch)
treec2f631143f2acb5fcfd092097ee6db42ad55f762
parent59e39cbc68c49623949b3e5c2d7e113e96e90a27 (diff)
downloadunicorn-ea1a4360d66a833d75fbd887388d8cd4fe4ae299.tar.gz
The worker_exec configuration option makes all worker processes
exec after forking.  This initializes the worker processes with
separate memory layouts, defeating address space discovery
attacks on operating systems supporting address space layout
randomization, such as Linux, MacOS X, NetBSD, OpenBSD, and
Solaris.

Support for execing workers is very similar to support for reexecing
the master process.  The main difference is the worker's to_i and
master pipes also need to be inherited after worker exec just as the
listening sockets need to be inherited after reexec.

Because execing working is similar to reexecing the master, this
extracts a couple of methods from reexec (listener_sockets and
close_sockets_on_exec), so they can be reused in worker_spawn.
-rw-r--r--lib/unicorn/configurator.rb10
-rw-r--r--lib/unicorn/http_server.rb83
-rw-r--r--lib/unicorn/worker.rb5
3 files changed, 77 insertions, 21 deletions
diff --git a/lib/unicorn/configurator.rb b/lib/unicorn/configurator.rb
index 7ed5ffa..f69f220 100644
--- a/lib/unicorn/configurator.rb
+++ b/lib/unicorn/configurator.rb
@@ -53,6 +53,7 @@ class Unicorn::Configurator
         server.logger.info("worker=#{worker.nr} ready")
       },
     :pid => nil,
+    :worker_exec => false,
     :preload_app => false,
     :check_client_connection => false,
     :rewindable_input => true, # for Rack 2.x: (Rack::VERSION[0] <= 1),
@@ -239,6 +240,15 @@ class Unicorn::Configurator
     set[:timeout] = seconds > max ? max : seconds
   end
 
+  # Whether to exec in each worker process after forking.  This changes the
+  # memory layout of each worker process, which is a security feature designed
+  # to defeat possible address space discovery attacks.  Note that using
+  # worker_exec only makes sense if you are not preloading the application,
+  # and will result in higher memory usage.
+  def worker_exec(bool)
+    set_bool(:worker_exec, bool)
+  end
+
   # sets the current number of worker_processes to +nr+.  Each worker
   # process will serve exactly one client at a time.  You can
   # increment or decrement this value at runtime by sending SIGTTIN
diff --git a/lib/unicorn/http_server.rb b/lib/unicorn/http_server.rb
index ef897ad..a5bd2c4 100644
--- a/lib/unicorn/http_server.rb
+++ b/lib/unicorn/http_server.rb
@@ -15,7 +15,7 @@ class Unicorn::HttpServer
                 :before_fork, :after_fork, :before_exec,
                 :listener_opts, :preload_app,
                 :orig_app, :config, :ready_pipe, :user
-  attr_writer   :after_worker_exit, :after_worker_ready
+  attr_writer   :after_worker_exit, :after_worker_ready, :worker_exec
 
   attr_reader :pid, :logger
   include Unicorn::SocketHelper
@@ -105,6 +105,14 @@ class Unicorn::HttpServer
     # list of signals we care about and trap in master.
     @queue_sigs = [
       :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP, :TTIN, :TTOU ]
+
+    @worker_data = if worker_data = ENV['UNICORN_WORKER']
+      worker_data = worker_data.split(',').map!(&:to_i)
+      worker_data[1] = worker_data.slice!(1..2).map do |i|
+        Kgio::Pipe.for_fd(i)
+      end
+      worker_data
+    end
   end
 
   # Runs the thing.  Returns self so you can run join on it
@@ -113,7 +121,7 @@ class Unicorn::HttpServer
     # this pipe is used to wake us up from select(2) in #join when signals
     # are trapped.  See trap_deferred.
     @self_pipe.replace(Unicorn.pipe)
-    @master_pid = $$
+    @master_pid = @worker_data ? Process.ppid : $$
 
     # setup signal handlers before writing pid file in case people get
     # trigger happy and send signals as soon as the pid file exists.
@@ -430,11 +438,7 @@ class Unicorn::HttpServer
     end
 
     @reexec_pid = fork do
-      listener_fds = {}
-      LISTENERS.each do |sock|
-        sock.close_on_exec = false
-        listener_fds[sock.fileno] = sock
-      end
+      listener_fds = listener_sockets
       ENV['UNICORN_FD'] = listener_fds.keys.join(',')
       Dir.chdir(START_CTX[:cwd])
       cmd = [ START_CTX[0] ].concat(START_CTX[:argv])
@@ -442,12 +446,7 @@ class Unicorn::HttpServer
       # avoid leaking FDs we don't know about, but let before_exec
       # unset FD_CLOEXEC, if anything else in the app eventually
       # relies on FD inheritence.
-      (3..1024).each do |io|
-        next if listener_fds.include?(io)
-        io = IO.for_fd(io) rescue next
-        io.autoclose = false
-        io.close_on_exec = true
-      end
+      close_sockets_on_exec(listener_fds)
 
       # exec(command, hash) works in at least 1.9.1+, but will only be
       # required in 1.9.4/2.0.0 at earliest.
@@ -459,6 +458,40 @@ class Unicorn::HttpServer
     proc_name 'master (old)'
   end
 
+  def worker_spawn(worker)
+    listener_fds = listener_sockets
+    env = {}
+    env['UNICORN_FD'] = listener_fds.keys.join(',')
+
+    listener_fds[worker.to_io.fileno] = worker.to_io
+    listener_fds[worker.master.fileno] = worker.master
+
+    worker_info = [worker.nr, worker.to_io.fileno, worker.master.fileno]
+    env['UNICORN_WORKER'] = worker_info.join(',')
+
+    close_sockets_on_exec(listener_fds)
+
+    Process.spawn(env, START_CTX[0], *START_CTX[:argv], listener_fds)
+  end
+
+  def listener_sockets
+    listener_fds = {}
+    LISTENERS.each do |sock|
+      sock.close_on_exec = false
+      listener_fds[sock.fileno] = sock
+    end
+    listener_fds
+  end
+
+  def close_sockets_on_exec(sockets)
+    (3..1024).each do |io|
+      next if sockets.include?(io)
+      io = IO.for_fd(io) rescue next
+      io.autoclose = false
+      io.close_on_exec = true
+    end
+  end
+
   # forcibly terminate all workers that haven't checked in in timeout seconds.  The timeout is implemented using an unlinked File
   def murder_lazy_workers
     next_sleep = @timeout - 1
@@ -495,19 +528,31 @@ class Unicorn::HttpServer
   end
 
   def spawn_missing_workers
+    if @worker_data
+      worker = Unicorn::Worker.new(*@worker_data)
+      after_fork_internal
+      worker_loop(worker)
+      exit
+    end
+
     worker_nr = -1
     until (worker_nr += 1) == @worker_processes
       @workers.value?(worker_nr) and next
       worker = Unicorn::Worker.new(worker_nr)
       before_fork.call(self, worker)
-      if pid = fork
-        @workers[pid] = worker
-        worker.atfork_parent
+
+      pid = if @worker_exec
+        worker_spawn(worker)
       else
-        after_fork_internal
-        worker_loop(worker)
-        exit
+        fork do
+          after_fork_internal
+          worker_loop(worker)
+          exit
+        end
       end
+
+      @workers[pid] = worker
+      worker.atfork_parent
     end
     rescue => e
       @logger.error(e) rescue nil
diff --git a/lib/unicorn/worker.rb b/lib/unicorn/worker.rb
index e22c1bf..8bbac5e 100644
--- a/lib/unicorn/worker.rb
+++ b/lib/unicorn/worker.rb
@@ -12,18 +12,19 @@ class Unicorn::Worker
   # :stopdoc:
   attr_accessor :nr, :switched
   attr_reader :to_io # IO.select-compatible
+  attr_reader :master
 
   PER_DROP = Raindrops::PAGE_SIZE / Raindrops::SIZE
   DROPS = []
 
-  def initialize(nr)
+  def initialize(nr, pipe=nil)
     drop_index = nr / PER_DROP
     @raindrop = DROPS[drop_index] ||= Raindrops.new(PER_DROP)
     @offset = nr % PER_DROP
     @raindrop[@offset] = 0
     @nr = nr
     @switched = false
-    @to_io, @master = Unicorn.pipe
+    @to_io, @master = pipe || Unicorn.pipe
   end
 
   def atfork_child # :nodoc: