diff options
49 files changed, 2336 insertions, 1907 deletions
diff --git a/Documentation/unicorn.1.txt b/Documentation/unicorn.1.txt index 24df7ab..c20a570 100644 --- a/Documentation/unicorn.1.txt +++ b/Documentation/unicorn.1.txt @@ -36,6 +36,9 @@ with rackup(1) but strongly discouraged. implemented as a Ruby DSL, so Ruby code may executed. See the RDoc/ri for the *Unicorn::Configurator* class for the full list of directives available from the DSL. + Using an absolute path for for CONFIG_FILE is recommended as it + makes multiple instances of Unicorn easily distinguishable when + viewing ps(1) output. -D, \--daemonize : Run daemonized in the background. The process is detached from diff --git a/Documentation/unicorn_rails.1.txt b/Documentation/unicorn_rails.1.txt index 267e425..f426b07 100644 --- a/Documentation/unicorn_rails.1.txt +++ b/Documentation/unicorn_rails.1.txt @@ -34,8 +34,11 @@ as much as possible. -c, \--config-file CONFIG_FILE : Path to the Unicorn-specific config file. The config file is implemented as a Ruby DSL, so Ruby code may executed. - See the RDoc/ri for the *Unicorn::Configurator* class for the - full list of directives available from the DSL. + See the RDoc/ri for the *Unicorn::Configurator* class for the full + list of directives available from the DSL. + Using an absolute path for for CONFIG_FILE is recommended as it + makes multiple instances of Unicorn easily distinguishable when + viewing ps(1) output. -D, \--daemonize : Run daemonized in the background. The process is detached from diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN index dd2cc51..ae8f539 100755 --- a/GIT-VERSION-GEN +++ b/GIT-VERSION-GEN @@ -1,7 +1,7 @@ #!/bin/sh GVF=GIT-VERSION-FILE -DEF_VER=v1.0.2.GIT +DEF_VER=v2.0.0pre3.GIT LF=' ' diff --git a/GNUmakefile b/GNUmakefile index a3b761a..b28bded 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -24,6 +24,12 @@ endif RUBY_ENGINE := $(shell $(RUBY) -e 'puts((RUBY_ENGINE rescue "ruby"))') +isolate_libs := tmp/isolate/$(RUBY_ENGINE)-$(RUBY_VERSION).mk +$(isolate_libs): script/isolate_for_tests + @$(RUBY) script/isolate_for_tests +-include $(isolate_libs) +MYLIBS = $(RUBYLIB):$(ISOLATE_LIBS) + # dunno how to implement this as concisely in Ruby, and hell, I love awk awk_slow := awk '/def test_/{print FILENAME"--"$$2".n"}' 2>/dev/null @@ -117,14 +123,14 @@ run_test = $(quiet_pre) \ %.n: arg = $(subst .n,,$(subst --, -n ,$@)) %.n: t = $(subst .n,$(log_suffix),$@) %.n: export PATH := $(test_prefix)/bin:$(PATH) -%.n: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB) +%.n: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS) %.n: $(test_prefix)/.stamp $(run_test) $(T): arg = $@ $(T): t = $(subst .rb,$(log_suffix),$@) $(T): export PATH := $(test_prefix)/bin:$(PATH) -$(T): export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB) +$(T): export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS) $(T): $(test_prefix)/.stamp $(run_test) @@ -169,7 +175,7 @@ NEWS: GIT-VERSION-FILE .manifest $(RAKE) -s news_rdoc > $@+ mv $@+ $@ -SINCE = 1.0.0 +SINCE = 1.1.4 ChangeLog: LOG_VERSION = \ $(shell git rev-parse -q "$(GIT_VERSION)" >/dev/null 2>&1 && \ echo $(GIT_VERSION) || git describe) @@ -251,7 +257,7 @@ $(T_r).%.r: rv = $(subst .r,,$(subst $(T_r).,,$@)) $(T_r).%.r: extra = ' 'v$(rv) $(T_r).%.r: arg = $(T_r) $(T_r).%.r: export PATH := $(test_prefix)/bin:$(PATH) -$(T_r).%.r: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB) +$(T_r).%.r: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS) $(T_r).%.r: export UNICORN_RAILS_TEST_VERSION = $(rv) $(T_r).%.r: export RAILS_GIT_REPO = $(CURDIR)/$(rails_git) $(T_r).%.r: $(test_prefix)/.stamp $(rails_git)/info/v2.3.8-stamp @@ -16,8 +16,8 @@ Tests are good, but slow tests make development slow, so we make tests faster (in parallel) with GNU make (instead of Rake) and avoiding RubyGems. -Users of GNU-based systems (such as GNU/Linux) usually have GNU make installed -as "make" instead of "gmake". +Users of GNU-based systems (such as GNU/Linux) usually have GNU make +installed as "make" instead of "gmake". Since we don't load RubyGems by default, loading Rack properly requires setting up RUBYLIB to point to where Rack is located. Not loading @@ -57,11 +57,18 @@ programming experience will come in handy (or be learned) here. === Documentation -We use RDoc 2.4.x with Darkfish for documentation as much as possible, +We use RDoc 2.5.x with Darkfish for documentation as much as possible, if you're on Ruby 1.8 you want to install the latest "rdoc" gem. Due to the lack of RDoc-to-manpage converters we know about, we're writing manpages in Markdown and converting to troff/HTML with Pandoc. +Please wrap documentation at 72 characters-per-line or less (long URLs +are exempt) so it is comfortably readable from terminals. + +When referencing mailing list posts, use +"http://mid.gmane.org/$MESSAGE_ID" if possible since the Message-ID +remains searchable even if Gmane becomes unavailable. + === Ruby/C Compatibility We target Ruby 1.8.6+, 1.9 and will target Rubinius as it becomes @@ -163,11 +163,12 @@ task :fm_update do req = { "auth_code" => api_token, "release" => { - "tag_list" => "Stable", + "tag_list" => "Experimental", "version" => version, "changelog" => changelog, }, }.to_json + if ! changelog.strip.empty? && version =~ %r{\A[\d\.]+\d+\z} Net::HTTP.start(uri.host, uri.port) do |http| p http.post(uri.path, req, {'Content-Type'=>'application/json'}) @@ -193,29 +194,3 @@ begin end rescue LoadError end - -task :isolate do - require 'isolate' - ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby' - opts = { - :system => false, - :path => "tmp/isolate/#{ruby_engine}-#{RUBY_VERSION}", - :multiruby => false, # we want "1.8.7" instead of "1.8" - } - fp = File.open(__FILE__, "rb") - fp.flock(File::LOCK_EX) - - # C extensions aren't binary-compatible across Ruby versions - pid = fork { Isolate.now!(opts) { gem 'sqlite3-ruby', '1.2.5' } } - _, status = Process.waitpid2(pid) - status.success? or abort status.inspect - - # pure Ruby gems can be shared across all Rubies - %w(3.0.0.beta4).each do |rails_ver| - opts[:path] = "tmp/isolate/rails-#{rails_ver}" - pid = fork { Isolate.now!(opts) { gem 'rails', rails_ver } } - _, status = Process.waitpid2(pid) - status.success? or abort status.inspect - end - fp.flock(File::LOCK_UN) -end @@ -24,6 +24,9 @@ this: Then use HUP to reload, and then continue with the USR2+QUIT upgrade sequence. +Environment variable pollution when exec-ing a new process (with USR2) +is the primary issue with sandboxing tools such as Bundler and Isolate. + == Bundler === Running @@ -42,6 +45,12 @@ This is no longer be an issue as of bundler 0.9.17 ref: http://mid.gmane.org/8FC34B23-5994-41CC-B5AF-7198EF06909E@tramchase.com +=== Other ENV pollution issues + +You may need to set or reset BUNDLE_GEMFILE, GEM_HOME, GEM_PATH and PATH +environment variables in the before_exec hook as illustrated by +http://gist.github.com/534668 + == Isolate === Running @@ -3,3 +3,10 @@ * performance validation (esp. TeeInput) * improve test suite + +* scalability to >= 1024 worker processes for crazy NUMA systems + +* Rack 2.x support (when Rack 2.x exists) + +* allow disabling "rack.input" rewindability for performance + (but violate the Rack 1.x SPEC) @@ -22,7 +22,13 @@ See Unicorn::Configurator for details on the config file format. listeners under Linux 2.6 because auto-tuning is enabled. UNIX domain sockets do not have auto-tuning buffer sizes; so increasing those will allow syscalls and task switches to be saved for larger requests - and responses. + and responses. If your app only generates small responses or expects + small requests, you may shrink the buffer sizes to save memory, too. + +* Having socket buffers too large can also be detrimental or have + little effect. Huge buffers can put more pressure on the allocator + and may also thrash CPU caches, cancelling out performance gains + one would normally expect. * Setting "preload_app true" can allow copy-on-write-friendly GC to be used to save memory. It will probably not work out of the box with diff --git a/bin/unicorn b/bin/unicorn index 2cc10b1..86c938b 100755 --- a/bin/unicorn +++ b/bin/unicorn @@ -4,15 +4,13 @@ require 'unicorn/launcher' require 'optparse' ENV["RACK_ENV"] ||= "development" -daemonize = false -options = { :listeners => [] } -host, port = Unicorn::Const::DEFAULT_HOST, Unicorn::Const::DEFAULT_PORT -set_listener = false +rackup_opts = Unicorn::Configurator::RACKUP +options = rackup_opts[:options] opts = OptionParser.new("", 24, ' ') do |opts| - opts.banner = "Usage: #{File.basename($0)} " \ - "[ruby options] [unicorn options] [rackup config file]" - + cmd = File.basename($0) + opts.banner = "Usage: #{cmd} " \ + "[ruby options] [#{cmd} options] [rackup config file]" opts.separator "Ruby options:" lineno = 1 @@ -39,29 +37,29 @@ opts = OptionParser.new("", 24, ' ') do |opts| require library end - opts.separator "Unicorn options:" + opts.separator "#{cmd} options:" # some of these switches exist for rackup command-line compatibility, opts.on("-o", "--host HOST", "listen on HOST (default: #{Unicorn::Const::DEFAULT_HOST})") do |h| - host = h - set_listener = true + rackup_opts[:host] = h + rackup_opts[:set_listener] = true end opts.on("-p", "--port PORT", "use PORT (default: #{Unicorn::Const::DEFAULT_PORT})") do |p| - port = p.to_i - set_listener = true + rackup_opts[:port] = p.to_i + rackup_opts[:set_listener] = true end - opts.on("-E", "--env ENVIRONMENT", - "use ENVIRONMENT for defaults (default: development)") do |e| + opts.on("-E", "--env RACK_ENV", + "use RACK_ENV for defaults (default: development)") do |e| ENV["RACK_ENV"] = e end opts.on("-D", "--daemonize", "run daemonized in the background") do |d| - daemonize = d ? true : false + rackup_opts[:daemonize] = !!d end opts.on("-P", "--pid FILE", "DEPRECATED") do |f| @@ -100,7 +98,7 @@ opts = OptionParser.new("", 24, ' ') do |opts| end opts.on_tail("-v", "--version", "Show version") do - puts "unicorn v#{Unicorn::Const::UNICORN_VERSION}" + puts "#{cmd} v#{Unicorn::Const::UNICORN_VERSION}" exit end @@ -108,16 +106,15 @@ opts = OptionParser.new("", 24, ' ') do |opts| end app = Unicorn.builder(ARGV[0] || 'config.ru', opts) -options[:listeners] << "#{host}:#{port}" if set_listener if $DEBUG require 'pp' pp({ :unicorn_options => options, :app => app, - :daemonize => daemonize, + :daemonize => rackup_opts[:daemonize], }) end -Unicorn::Launcher.daemonize!(options) if daemonize +Unicorn::Launcher.daemonize!(options) if rackup_opts[:daemonize] Unicorn.run(app, options) diff --git a/bin/unicorn_rails b/bin/unicorn_rails index 3614fd4..0294b59 100755 --- a/bin/unicorn_rails +++ b/bin/unicorn_rails @@ -4,11 +4,9 @@ require 'unicorn/launcher' require 'optparse' require 'fileutils' -daemonize = false -options = { :listeners => [] } -host, port = Unicorn::Const::DEFAULT_HOST, Unicorn::Const::DEFAULT_PORT -set_listener = false ENV['RAILS_ENV'] ||= "development" +rackup_opts = Unicorn::Configurator::RACKUP +options = rackup_opts[:options] opts = OptionParser.new("", 24, ' ') do |opts| cmd = File.basename($0) @@ -46,13 +44,14 @@ opts = OptionParser.new("", 24, ' ') do |opts| opts.on("-o", "--host HOST", "listen on HOST (default: #{Unicorn::Const::DEFAULT_HOST})") do |h| - host = h - set_listener = true + rackup_opts[:host] = h + rackup_opts[:set_listener] = true end - opts.on("-p", "--port PORT", "use PORT (default: #{port})") do |p| - port = p.to_i - set_listener = true + opts.on("-p", "--port PORT", + "use PORT (default: #{Unicorn::Const::DEFAULT_PORT})") do |p| + rackup_opts[:port] = p.to_i + rackup_opts[:set_listener] = true end opts.on("-E", "--env RAILS_ENV", @@ -61,7 +60,7 @@ opts = OptionParser.new("", 24, ' ') do |opts| end opts.on("-D", "--daemonize", "run daemonized in the background") do |d| - daemonize = d ? true : false + rackup_opts[:daemonize] = !!d end # Unicorn-specific stuff @@ -100,7 +99,7 @@ opts = OptionParser.new("", 24, ' ') do |opts| end opts.on_tail("-v", "--version", "Show version") do - puts " v#{Unicorn::Const::UNICORN_VERSION}" + puts "#{cmd} v#{Unicorn::Const::UNICORN_VERSION}" exit end @@ -186,15 +185,14 @@ def rails_builder(ru, opts, daemonize) end end -app = rails_builder(ARGV[0], opts, daemonize) -options[:listeners] << "#{host}:#{port}" if set_listener +app = rails_builder(ARGV[0], opts, rackup_opts[:daemonize]) if $DEBUG require 'pp' pp({ :unicorn_options => options, :app => app, - :daemonize => daemonize, + :daemonize => rackup_opts[:daemonize], }) end @@ -203,7 +201,7 @@ options[:after_reload] = lambda do FileUtils.mkdir_p(%w(cache pids sessions sockets).map! { |d| "tmp/#{d}" }) end -if daemonize +if rackup_opts[:daemonize] options[:pid] = "tmp/pids/unicorn.pid" Unicorn::Launcher.daemonize!(options) end diff --git a/examples/unicorn.conf.rb b/examples/unicorn.conf.rb index 37c3e81..28a9e65 100644 --- a/examples/unicorn.conf.rb +++ b/examples/unicorn.conf.rb @@ -63,7 +63,10 @@ before_fork do |server, worker| # end # end # - # # *optionally* throttle the master from forking too quickly by sleeping + # Throttle the master from forking too quickly by sleeping. Due + # to the implementation of standard Unix signal handlers, this + # helps (but does not completely) prevent identical, repeated signals + # from being lost when the receiving process is busy. # sleep 1 end diff --git a/ext/unicorn_http/ext_help.h b/ext/unicorn_http/ext_help.h index 3aa24a8..1f76f54 100644 --- a/ext/unicorn_http/ext_help.h +++ b/ext/unicorn_http/ext_help.h @@ -8,10 +8,6 @@ #define RSTRING_LEN(s) (RSTRING(s)->len) #endif /* !defined(RSTRING_LEN) */ -#ifndef RUBINIUS -# define rb_str_update(x) do {} while (0) -#endif /* !RUBINIUS */ - #ifndef HAVE_RB_STR_SET_LEN # ifdef RUBINIUS # error we should never get here with current Rubinius (1.x) diff --git a/ext/unicorn_http/global_variables.h b/ext/unicorn_http/global_variables.h index 7319bcd..8377704 100644 --- a/ext/unicorn_http/global_variables.h +++ b/ext/unicorn_http/global_variables.h @@ -35,13 +35,15 @@ static VALUE g_HEAD; static const char * const MAX_##N##_LENGTH_ERR = \ "HTTP element " # N " is longer than the " # length " allowed length." +NORETURN(static void parser_error(const char *)); + /** * Validates the max length of given input and throws an HttpParserError * exception if over. */ #define VALIDATE_MAX_LENGTH(len, N) do { \ if (len > MAX_##N##_LENGTH) \ - rb_raise(eHttpParserError, MAX_##N##_LENGTH_ERR); \ + parser_error(MAX_##N##_LENGTH_ERR); \ } while (0) /** Defines global strings in the init method. */ diff --git a/ext/unicorn_http/unicorn_http.rl b/ext/unicorn_http/unicorn_http.rl index f6c632f..236fbaa 100644 --- a/ext/unicorn_http/unicorn_http.rl +++ b/ext/unicorn_http/unicorn_http.rl @@ -39,6 +39,8 @@ struct http_parser { size_t field_len; /* only used during header processing */ size_t dest_offset; /* only used during body processing */ } s; + VALUE buf; + VALUE env; VALUE cont; /* Qfalse: unset, Qnil: ignored header, T_STRING: append */ union { off_t content; @@ -46,7 +48,18 @@ struct http_parser { } len; }; -static void finalize_header(struct http_parser *hp, VALUE req); +static ID id_clear; + +static void finalize_header(struct http_parser *hp); + +static void parser_error(const char *msg) +{ + VALUE exc = rb_exc_new2(eHttpParserError, msg); + VALUE bt = rb_ary_new(); + + rb_funcall(exc, rb_intern("set_backtrace"), 1, bt); + rb_exc_raise(exc); +} #define REMAINING (unsigned long)(pe - p) #define LEN(AT, FPC) (FPC - buffer - hp->AT) @@ -88,7 +101,7 @@ static void hp_keepalive_connection(struct http_parser *hp, VALUE val) } static void -request_method(struct http_parser *hp, VALUE req, const char *ptr, size_t len) +request_method(struct http_parser *hp, const char *ptr, size_t len) { VALUE v; @@ -106,11 +119,11 @@ request_method(struct http_parser *hp, VALUE req, const char *ptr, size_t len) } else { v = rb_str_new(ptr, len); } - rb_hash_aset(req, g_request_method, v); + rb_hash_aset(hp->env, g_request_method, v); } static void -http_version(struct http_parser *hp, VALUE req, const char *ptr, size_t len) +http_version(struct http_parser *hp, const char *ptr, size_t len) { VALUE v; @@ -125,14 +138,14 @@ http_version(struct http_parser *hp, VALUE req, const char *ptr, size_t len) } else { v = rb_str_new(ptr, len); } - rb_hash_aset(req, g_server_protocol, v); - rb_hash_aset(req, g_http_version, v); + rb_hash_aset(hp->env, g_server_protocol, v); + rb_hash_aset(hp->env, g_http_version, v); } static inline void hp_invalid_if_trailer(struct http_parser *hp) { if (HP_FL_TEST(hp, INTRAILER)) - rb_raise(eHttpParserError, "invalid Trailer"); + parser_error("invalid Trailer"); } static void write_cont_value(struct http_parser *hp, @@ -141,7 +154,7 @@ static void write_cont_value(struct http_parser *hp, char *vptr; if (hp->cont == Qfalse) - rb_raise(eHttpParserError, "invalid continuation line"); + parser_error("invalid continuation line"); if (NIL_P(hp->cont)) return; /* we're ignoring this header (probably Host:) */ @@ -163,7 +176,7 @@ static void write_cont_value(struct http_parser *hp, rb_str_buf_cat(hp->cont, vptr, LEN(mark, p)); } -static void write_value(VALUE req, struct http_parser *hp, +static void write_value(struct http_parser *hp, const char *buffer, const char *p) { VALUE f = find_common_field(PTR_TO(start.field), hp->s.field_len); @@ -192,7 +205,7 @@ static void write_value(VALUE req, struct http_parser *hp, } else if (f == g_content_length) { hp->len.content = parse_length(RSTRING_PTR(v), RSTRING_LEN(v)); if (hp->len.content < 0) - rb_raise(eHttpParserError, "invalid Content-Length"); + parser_error("invalid Content-Length"); HP_FL_SET(hp, HASBODY); hp_invalid_if_trailer(hp); } else if (f == g_http_transfer_encoding) { @@ -209,9 +222,9 @@ static void write_value(VALUE req, struct http_parser *hp, assert_frozen(f); } - e = rb_hash_aref(req, f); + e = rb_hash_aref(hp->env, f); if (NIL_P(e)) { - hp->cont = rb_hash_aset(req, f, v); + hp->cont = rb_hash_aset(hp->env, f, v); } else if (f == g_http_host) { /* * ignored, absolute URLs in REQUEST_URI take precedence over @@ -236,59 +249,55 @@ static void write_value(VALUE req, struct http_parser *hp, action downcase_char { downcase_char(deconst(fpc)); } action write_field { hp->s.field_len = LEN(start.field, fpc); } action start_value { MARK(mark, fpc); } - action write_value { write_value(req, hp, buffer, fpc); } + action write_value { write_value(hp, buffer, fpc); } action write_cont_value { write_cont_value(hp, buffer, fpc); } - action request_method { - request_method(hp, req, PTR_TO(mark), LEN(mark, fpc)); - } + action request_method { request_method(hp, PTR_TO(mark), LEN(mark, fpc)); } action scheme { - rb_hash_aset(req, g_rack_url_scheme, STR_NEW(mark, fpc)); - } - action host { - rb_hash_aset(req, g_http_host, STR_NEW(mark, fpc)); + rb_hash_aset(hp->env, g_rack_url_scheme, STR_NEW(mark, fpc)); } + action host { rb_hash_aset(hp->env, g_http_host, STR_NEW(mark, fpc)); } action request_uri { VALUE str; VALIDATE_MAX_LENGTH(LEN(mark, fpc), REQUEST_URI); - str = rb_hash_aset(req, g_request_uri, STR_NEW(mark, fpc)); + str = rb_hash_aset(hp->env, g_request_uri, STR_NEW(mark, fpc)); /* * "OPTIONS * HTTP/1.1\r\n" is a valid request, but we can't have '*' * in REQUEST_PATH or PATH_INFO or else Rack::Lint will complain */ if (STR_CSTR_EQ(str, "*")) { str = rb_str_new(NULL, 0); - rb_hash_aset(req, g_path_info, str); - rb_hash_aset(req, g_request_path, str); + rb_hash_aset(hp->env, g_path_info, str); + rb_hash_aset(hp->env, g_request_path, str); } } action fragment { VALIDATE_MAX_LENGTH(LEN(mark, fpc), FRAGMENT); - rb_hash_aset(req, g_fragment, STR_NEW(mark, fpc)); + rb_hash_aset(hp->env, g_fragment, STR_NEW(mark, fpc)); } action start_query {MARK(start.query, fpc); } action query_string { VALIDATE_MAX_LENGTH(LEN(start.query, fpc), QUERY_STRING); - rb_hash_aset(req, g_query_string, STR_NEW(start.query, fpc)); + rb_hash_aset(hp->env, g_query_string, STR_NEW(start.query, fpc)); } - action http_version { http_version(hp, req, PTR_TO(mark), LEN(mark, fpc)); } + action http_version { http_version(hp, PTR_TO(mark), LEN(mark, fpc)); } action request_path { VALUE val; VALIDATE_MAX_LENGTH(LEN(mark, fpc), REQUEST_PATH); - val = rb_hash_aset(req, g_request_path, STR_NEW(mark, fpc)); + val = rb_hash_aset(hp->env, g_request_path, STR_NEW(mark, fpc)); /* rack says PATH_INFO must start with "/" or be empty */ if (!STR_CSTR_EQ(val, "*")) - rb_hash_aset(req, g_path_info, val); + rb_hash_aset(hp->env, g_path_info, val); } action add_to_chunk_size { hp->len.chunk = step_incr(hp->len.chunk, fc, 16); if (hp->len.chunk < 0) - rb_raise(eHttpParserError, "invalid chunk size"); + parser_error("invalid chunk size"); } action header_done { - finalize_header(hp, req); + finalize_header(hp); cs = http_parser_first_final; if (HP_FL_TEST(hp, HASBODY)) { @@ -321,7 +330,7 @@ static void write_value(VALUE req, struct http_parser *hp, action skip_chunk_data { skip_chunk_data_hack: { size_t nr = MIN((size_t)hp->len.chunk, REMAINING); - memcpy(RSTRING_PTR(req) + hp->s.dest_offset, fpc, nr); + memcpy(RSTRING_PTR(hp->cont) + hp->s.dest_offset, fpc, nr); hp->s.dest_offset += nr; hp->len.chunk -= nr; p += nr; @@ -344,15 +353,20 @@ static void write_value(VALUE req, struct http_parser *hp, static void http_parser_init(struct http_parser *hp) { int cs = 0; - memset(hp, 0, sizeof(struct http_parser)); + hp->flags = 0; + hp->mark = 0; + hp->offset = 0; + hp->start.field = 0; + hp->s.field_len = 0; + hp->len.content = 0; hp->cont = Qfalse; /* zero on MRI, should be optimized away by above */ %% write init; hp->cs = cs; } /** exec **/ -static void http_parser_execute(struct http_parser *hp, - VALUE req, char *buffer, size_t len) +static void +http_parser_execute(struct http_parser *hp, char *buffer, size_t len) { const char *p, *pe; int cs = hp->cs; @@ -392,20 +406,20 @@ static struct http_parser *data_get(VALUE self) return hp; } -static void finalize_header(struct http_parser *hp, VALUE req) +static void finalize_header(struct http_parser *hp) { - VALUE temp = rb_hash_aref(req, g_rack_url_scheme); + VALUE temp = rb_hash_aref(hp->env, g_rack_url_scheme); VALUE server_name = g_localhost; VALUE server_port = g_port_80; /* set rack.url_scheme to "https" or "http", no others are allowed by Rack */ if (NIL_P(temp)) { - temp = rb_hash_aref(req, g_http_x_forwarded_proto); + temp = rb_hash_aref(hp->env, g_http_x_forwarded_proto); if (!NIL_P(temp) && STR_CSTR_EQ(temp, "https")) server_port = g_port_443; else temp = g_http; - rb_hash_aset(req, g_rack_url_scheme, temp); + rb_hash_aset(hp->env, g_rack_url_scheme, temp); } else if (STR_CSTR_EQ(temp, "https")) { server_port = g_port_443; } else { @@ -413,7 +427,7 @@ static void finalize_header(struct http_parser *hp, VALUE req) } /* parse and set the SERVER_NAME and SERVER_PORT variables */ - temp = rb_hash_aref(req, g_http_host); + temp = rb_hash_aref(hp->env, g_http_host); if (!NIL_P(temp)) { char *colon = memchr(RSTRING_PTR(temp), ':', RSTRING_LEN(temp)); if (colon) { @@ -426,20 +440,22 @@ static void finalize_header(struct http_parser *hp, VALUE req) server_name = temp; } } - rb_hash_aset(req, g_server_name, server_name); - rb_hash_aset(req, g_server_port, server_port); + rb_hash_aset(hp->env, g_server_name, server_name); + rb_hash_aset(hp->env, g_server_port, server_port); if (!HP_FL_TEST(hp, HASHEADER)) - rb_hash_aset(req, g_server_protocol, g_http_09); + rb_hash_aset(hp->env, g_server_protocol, g_http_09); /* rack requires QUERY_STRING */ - if (NIL_P(rb_hash_aref(req, g_query_string))) - rb_hash_aset(req, g_query_string, rb_str_new(NULL, 0)); + if (NIL_P(rb_hash_aref(hp->env, g_query_string))) + rb_hash_aset(hp->env, g_query_string, rb_str_new(NULL, 0)); } static void hp_mark(void *ptr) { struct http_parser *hp = ptr; + rb_gc_mark(hp->buf); + rb_gc_mark(hp->env); rb_gc_mark(hp->cont); } @@ -458,7 +474,11 @@ static VALUE HttpParser_alloc(VALUE klass) */ static VALUE HttpParser_init(VALUE self) { - http_parser_init(data_get(self)); + struct http_parser *hp = data_get(self); + + http_parser_init(hp); + hp->buf = rb_str_new(NULL, 0); + hp->env = rb_hash_new(); return self; } @@ -472,7 +492,10 @@ static VALUE HttpParser_init(VALUE self) */ static VALUE HttpParser_reset(VALUE self) { - http_parser_init(data_get(self)); + struct http_parser *hp = data_get(self); + + http_parser_init(hp); + rb_funcall(hp->env, id_clear, 0); return Qnil; } @@ -513,32 +536,23 @@ static VALUE HttpParser_content_length(VALUE self) } /** - * Document-method: trailers - * call-seq: - * parser.trailers(req, data) => req or nil - * - * This is an alias for HttpParser#headers - */ - -/** - * Document-method: headers + * Document-method: parse * call-seq: - * parser.headers(req, data) => req or nil + * parser.parse => env or nil * * Takes a Hash and a String of data, parses the String of data filling * in the Hash returning the Hash if parsing is finished, nil otherwise - * When returning the req Hash, it may modify data to point to where + * When returning the env Hash, it may modify data to point to where * body processing should begin. * * Raises HttpParserError if there are parsing errors. */ -static VALUE HttpParser_headers(VALUE self, VALUE req, VALUE data) +static VALUE HttpParser_parse(VALUE self) { struct http_parser *hp = data_get(self); + VALUE data = hp->buf; - rb_str_update(data); - - http_parser_execute(hp, req, RSTRING_PTR(data), RSTRING_LEN(data)); + http_parser_execute(hp, RSTRING_PTR(data), RSTRING_LEN(data)); VALIDATE_MAX_LENGTH(hp->offset, HEADER); if (hp->cs == http_parser_first_final || @@ -546,15 +560,36 @@ static VALUE HttpParser_headers(VALUE self, VALUE req, VALUE data) advance_str(data, hp->offset + 1); hp->offset = 0; - return req; + return hp->env; } if (hp->cs == http_parser_error) - rb_raise(eHttpParserError, "Invalid HTTP format, parsing fails."); + parser_error("Invalid HTTP format, parsing fails."); return Qnil; } +/** + * Document-method: trailers + * call-seq: + * parser.trailers(req, data) => req or nil + * + * This is an alias for HttpParser#headers + */ + +/** + * Document-method: headers + */ +static VALUE HttpParser_headers(VALUE self, VALUE env, VALUE buf) +{ + struct http_parser *hp = data_get(self); + + hp->env = env; + hp->buf = buf; + + return HttpParser_parse(self); +} + static int chunked_eof(struct http_parser *hp) { return ((hp->cs == http_parser_first_final) || HP_FL_TEST(hp, INTRAILER)); @@ -610,6 +645,16 @@ static VALUE HttpParser_has_headers(VALUE self) return HP_FL_TEST(hp, HASHEADER) ? Qtrue : Qfalse; } +static VALUE HttpParser_buf(VALUE self) +{ + return data_get(self)->buf; +} + +static VALUE HttpParser_env(VALUE self) +{ + return data_get(self)->env; +} + /** * call-seq: * parser.filter_body(buf, data) => nil/data @@ -630,7 +675,6 @@ static VALUE HttpParser_filter_body(VALUE self, VALUE buf, VALUE data) char *dptr; long dlen; - rb_str_update(data); dptr = RSTRING_PTR(data); dlen = RSTRING_LEN(data); @@ -641,9 +685,11 @@ static VALUE HttpParser_filter_body(VALUE self, VALUE buf, VALUE data) if (HP_FL_TEST(hp, CHUNKED)) { if (!chunked_eof(hp)) { hp->s.dest_offset = 0; - http_parser_execute(hp, buf, dptr, dlen); + hp->cont = buf; + hp->buf = data; + http_parser_execute(hp, dptr, dlen); if (hp->cs == http_parser_error) - rb_raise(eHttpParserError, "Invalid HTTP format, parsing fails."); + parser_error("Invalid HTTP format, parsing fails."); assert(hp->s.dest_offset <= hp->offset && "destination buffer overflow"); @@ -684,7 +730,7 @@ void Init_unicorn_http(void) { VALUE mUnicorn, cHttpParser; - mUnicorn = rb_define_module("Unicorn"); + mUnicorn = rb_const_get(rb_cObject, rb_intern("Unicorn")); cHttpParser = rb_define_class_under(mUnicorn, "HttpParser", rb_cObject); eHttpParserError = rb_define_class_under(mUnicorn, "HttpParserError", rb_eIOError); @@ -693,13 +739,16 @@ void Init_unicorn_http(void) rb_define_alloc_func(cHttpParser, HttpParser_alloc); rb_define_method(cHttpParser, "initialize", HttpParser_init,0); rb_define_method(cHttpParser, "reset", HttpParser_reset,0); + rb_define_method(cHttpParser, "parse", HttpParser_parse, 0); rb_define_method(cHttpParser, "headers", HttpParser_headers, 2); - rb_define_method(cHttpParser, "filter_body", HttpParser_filter_body, 2); rb_define_method(cHttpParser, "trailers", HttpParser_headers, 2); + rb_define_method(cHttpParser, "filter_body", HttpParser_filter_body, 2); rb_define_method(cHttpParser, "content_length", HttpParser_content_length, 0); rb_define_method(cHttpParser, "body_eof?", HttpParser_body_eof, 0); rb_define_method(cHttpParser, "keepalive?", HttpParser_keepalive, 0); rb_define_method(cHttpParser, "headers?", HttpParser_has_headers, 0); + rb_define_method(cHttpParser, "buf", HttpParser_buf, 0); + rb_define_method(cHttpParser, "env", HttpParser_env, 0); /* * The maximum size a single chunk when using chunked transfer encoding. @@ -722,5 +771,6 @@ void Init_unicorn_http(void) SET_GLOBAL(g_http_transfer_encoding, "TRANSFER_ENCODING"); SET_GLOBAL(g_content_length, "CONTENT_LENGTH"); SET_GLOBAL(g_http_connection, "CONNECTION"); + id_clear = rb_intern("clear"); } #undef SET_GLOBAL diff --git a/lib/unicorn.rb b/lib/unicorn.rb index f454eb7..622dc6c 100644 --- a/lib/unicorn.rb +++ b/lib/unicorn.rb @@ -1,834 +1,83 @@ # -*- encoding: binary -*- - require 'fcntl' require 'etc' +require 'stringio' require 'rack' -require 'unicorn/socket_helper' -require 'unicorn/const' -require 'unicorn/http_request' -require 'unicorn/configurator' -require 'unicorn/util' -require 'unicorn/tee_input' -require 'unicorn/http_response' +require 'kgio' -# Unicorn module containing all of the classes (include C extensions) for running -# a Unicorn web server. It contains a minimalist HTTP server with just enough -# functionality to service web application requests fast as possible. +# Unicorn module containing all of the classes (include C extensions) for +# running a Unicorn web server. It contains a minimalist HTTP server with just +# enough functionality to service web application requests fast as possible. module Unicorn - - # raised inside TeeInput when a client closes the socket inside the - # application dispatch. This is always raised with an empty backtrace - # since there is nothing in the application stack that is responsible - # for client shutdowns/disconnects. - class ClientShutdown < EOFError - end - - class << self - def run(app, options = {}) - HttpServer.new(app, options).start.join - end - - # This returns a lambda to pass in as the app, this does not "build" the - # app (which we defer based on the outcome of "preload_app" in the - # Unicorn config). The returned lambda will be called when it is - # time to build the app. - def builder(ru, opts) - # allow Configurator to parse cli switches embedded in the ru file - Unicorn::Configurator::RACKUP.update(:file => ru, :optparse => opts) - - # always called after config file parsing, may be called after forking - lambda do || - inner_app = case ru - when /\.ru$/ - raw = File.read(ru) - raw.sub!(/^__END__\n.*/, '') - eval("Rack::Builder.new {(#{raw}\n)}.to_app", TOPLEVEL_BINDING, ru) - else - require ru - Object.const_get(File.basename(ru, '.rb').capitalize) - end - - pp({ :inner_app => inner_app }) if $DEBUG - - # return value, matches rackup defaults based on env - case ENV["RACK_ENV"] - when "development" - Rack::Builder.new do - use Rack::CommonLogger, $stderr - use Rack::ShowExceptions - use Rack::Lint - run inner_app - end.to_app - when "deployment" - Rack::Builder.new do - use Rack::CommonLogger, $stderr - run inner_app - end.to_app - else - inner_app - end - end - end - - # returns an array of strings representing TCP listen socket addresses - # and Unix domain socket paths. This is useful for use with - # Raindrops::Middleware under Linux: http://raindrops.bogomips.org/ - def listener_names - HttpServer::LISTENERS.map { |io| SocketHelper.sock_name(io) } - end + def self.run(app, options = {}) + Unicorn::HttpServer.new(app, options).start.join end - # This is the process manager of Unicorn. This manages worker - # processes which in turn handle the I/O and application process. - # Listener sockets are started in the master process and shared with - # forked worker children. - - class HttpServer < Struct.new(:app, :timeout, :worker_processes, - :before_fork, :after_fork, :before_exec, - :logger, :pid, :listener_opts, :preload_app, - :reexec_pid, :orig_app, :init_listeners, - :master_pid, :config, :ready_pipe, :user) - include ::Unicorn::SocketHelper - - # prevents IO objects in here from being GC-ed - IO_PURGATORY = [] - - # all bound listener sockets - LISTENERS = [] - - # This hash maps PIDs to Workers - WORKERS = {} - - # We use SELF_PIPE differently in the master and worker processes: - # - # * The master process never closes or reinitializes this once - # initialized. Signal handlers in the master process will write to - # it to wake up the master from IO.select in exactly the same manner - # djb describes in http://cr.yp.to/docs/selfpipe.html - # - # * The workers immediately close the pipe they inherit from the - # master and replace it with a new pipe after forking. This new - # pipe is also used to wakeup from IO.select from inside (worker) - # signal handlers. However, workers *close* the pipe descriptors in - # the signal handlers to raise EBADF in IO.select instead of writing - # like we do in the master. We cannot easily use the reader set for - # IO.select because LISTENERS is already that set, and it's extra - # work (and cycles) to distinguish the pipe FD from the reader set - # once IO.select returns. So we're lazy and just close the pipe when - # a (rare) signal arrives in the worker and reinitialize the pipe later. - SELF_PIPE = [] - - # signal queue used for self-piping - SIG_QUEUE = [] - - # constant lookups are faster and we're single-threaded/non-reentrant - REQUEST = HttpRequest.new - - # We populate this at startup so we can figure out how to reexecute - # and upgrade the currently running instance of Unicorn - # This Hash is considered a stable interface and changing its contents - # will allow you to switch between different installations of Unicorn - # or even different installations of the same applications without - # downtime. Keys of this constant Hash are described as follows: - # - # * 0 - the path to the unicorn/unicorn_rails executable - # * :argv - a deep copy of the ARGV array the executable originally saw - # * :cwd - the working directory of the application, this is where - # you originally started Unicorn. - # - # To change your unicorn executable to a different path without downtime, - # you can set the following in your Unicorn config file, HUP and then - # continue with the traditional USR2 + QUIT upgrade steps: - # - # Unicorn::HttpServer::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn" - START_CTX = { - :argv => ARGV.map { |arg| arg.dup }, - :cwd => lambda { - # favor ENV['PWD'] since it is (usually) symlink aware for - # Capistrano and like systems - begin - a = File.stat(pwd = ENV['PWD']) - b = File.stat(Dir.pwd) - a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd - rescue - Dir.pwd - end - }.call, - 0 => $0.dup, - } - - # This class and its members can be considered a stable interface - # and will not change in a backwards-incompatible fashion between - # releases of Unicorn. You may need to access it in the - # before_fork/after_fork hooks. See the Unicorn::Configurator RDoc - # for examples. - class Worker < Struct.new(:nr, :tmp, :switched) - - # worker objects may be compared to just plain numbers - def ==(other_nr) - self.nr == other_nr - end - - # Changes the worker process to the specified +user+ and +group+ - # This is only intended to be called from within the worker - # process from the +after_fork+ hook. This should be called in - # the +after_fork+ hook after any priviledged functions need to be - # run (e.g. to set per-worker CPU affinity, niceness, etc) - # - # Any and all errors raised within this method will be propagated - # directly back to the caller (usually the +after_fork+ hook. - # These errors commonly include ArgumentError for specifying an - # invalid user/group and Errno::EPERM for insufficient priviledges - def user(user, group = nil) - # we do not protect the caller, checking Process.euid == 0 is - # insufficient because modern systems have fine-grained - # capabilities. Let the caller handle any and all errors. - uid = Etc.getpwnam(user).uid - gid = Etc.getgrnam(group).gid if group - Unicorn::Util.chown_logs(uid, gid) - tmp.chown(uid, gid) - if gid && Process.egid != gid - Process.initgroups(user, gid) - Process::GID.change_privilege(gid) - end - Process.euid != uid and Process::UID.change_privilege(uid) - self.switched = true - end - - end - - # Creates a working server on host:port (strange things happen if - # port isn't a Number). Use HttpServer::run to start the server and - # HttpServer.run.join to join the thread that's processing - # incoming requests on the socket. - def initialize(app, options = {}) - self.app = app - self.reexec_pid = 0 - self.ready_pipe = options.delete(:ready_pipe) - self.init_listeners = options[:listeners] ? options[:listeners].dup : [] - self.config = Configurator.new(options.merge(:use_defaults => true)) - self.listener_opts = {} - - # we try inheriting listeners first, so we bind them later. - # we don't write the pid file until we've bound listeners in case - # unicorn was started twice by mistake. Even though our #pid= method - # checks for stale/existing pid files, race conditions are still - # possible (and difficult/non-portable to avoid) and can be likely - # to clobber the pid if the second start was in quick succession - # after the first, so we rely on the listener binding to fail in - # that case. Some tests (in and outside of this source tree) and - # monitoring tools may also rely on pid files existing before we - # attempt to connect to the listener(s) - config.commit!(self, :skip => [:listeners, :pid]) - self.orig_app = app - end - - # Runs the thing. Returns self so you can run join on it - def start - BasicSocket.do_not_reverse_lookup = true - - # inherit sockets from parents, they need to be plain Socket objects - # before they become UNIXServer or TCPServer - inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd| - io = Socket.for_fd(fd.to_i) - set_server_sockopt(io, listener_opts[sock_name(io)]) - IO_PURGATORY << io - logger.info "inherited addr=#{sock_name(io)} fd=#{fd}" - server_cast(io) - end - - config_listeners = config[:listeners].dup - LISTENERS.replace(inherited) - - # we start out with generic Socket objects that get cast to either - # TCPServer or UNIXServer objects; but since the Socket objects - # share the same OS-level file descriptor as the higher-level *Server - # objects; we need to prevent Socket objects from being garbage-collected - config_listeners -= listener_names - if config_listeners.empty? && LISTENERS.empty? - config_listeners << Unicorn::Const::DEFAULT_LISTEN - init_listeners << Unicorn::Const::DEFAULT_LISTEN - START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}" - end - config_listeners.each { |addr| listen(addr) } - raise ArgumentError, "no listeners" if LISTENERS.empty? - - # this pipe is used to wake us up from select(2) in #join when signals - # are trapped. See trap_deferred. - init_self_pipe! - - # setup signal handlers before writing pid file in case people get - # trigger happy and send signals as soon as the pid file exists. - # Note that signals don't actually get handled until the #join method - QUEUE_SIGS.each { |sig| trap_deferred(sig) } - trap(:CHLD) { |_| awaken_master } - self.pid = config[:pid] - - self.master_pid = $$ - build_app! if preload_app - maintain_worker_count - self - end - - # replaces current listener set with +listeners+. This will - # close the socket if it will not exist in the new listener set - def listeners=(listeners) - cur_names, dead_names = [], [] - listener_names.each do |name| - if ?/ == name[0] - # mark unlinked sockets as dead so we can rebind them - (File.socket?(name) ? cur_names : dead_names) << name - else - cur_names << name - end - end - set_names = listener_names(listeners) - dead_names.concat(cur_names - set_names).uniq! - - LISTENERS.delete_if do |io| - if dead_names.include?(sock_name(io)) - IO_PURGATORY.delete_if do |pio| - pio.fileno == io.fileno && (pio.close rescue nil).nil? # true - end - (io.close rescue nil).nil? # true - else - set_server_sockopt(io, listener_opts[sock_name(io)]) - false - end - end - - (set_names - cur_names).each { |addr| listen(addr) } - end - - def stdout_path=(path); redirect_io($stdout, path); end - def stderr_path=(path); redirect_io($stderr, path); end - - def logger=(obj) - HttpRequest::DEFAULTS["rack.logger"] = super - end - - # sets the path for the PID file of the master process - def pid=(path) - if path - if x = valid_pid?(path) - return path if pid && path == pid && x == $$ - if x == reexec_pid && pid =~ /\.oldbin\z/ - logger.warn("will not set pid=#{path} while reexec-ed "\ - "child is running PID:#{x}") - return - end - raise ArgumentError, "Already running on PID:#{x} " \ - "(or pid=#{path} is stale)" - end - end - unlink_pid_safe(pid) if pid - - if path - fp = begin - tmp = "#{File.dirname(path)}/#{rand}.#$$" - File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644) - rescue Errno::EEXIST - retry - end - fp.syswrite("#$$\n") - File.rename(fp.path, path) - fp.close - end - super(path) - end - - # add a given address to the +listeners+ set, idempotently - # Allows workers to add a private, per-process listener via the - # after_fork hook. Very useful for debugging and testing. - # +:tries+ may be specified as an option for the number of times - # to retry, and +:delay+ may be specified as the time in seconds - # to delay between retries. - # A negative value for +:tries+ indicates the listen will be - # retried indefinitely, this is useful when workers belonging to - # different masters are spawned during a transparent upgrade. - def listen(address, opt = {}.merge(listener_opts[address] || {})) - address = config.expand_addr(address) - return if String === address && listener_names.include?(address) - - delay = opt[:delay] || 0.5 - tries = opt[:tries] || 5 - begin - io = bind_listen(address, opt) - unless TCPServer === io || UNIXServer === io - IO_PURGATORY << io - io = server_cast(io) - end - logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}" - LISTENERS << io - io - rescue Errno::EADDRINUSE => err - logger.error "adding listener failed addr=#{address} (in use)" - raise err if tries == 0 - tries -= 1 - logger.error "retrying in #{delay} seconds " \ - "(#{tries < 0 ? 'infinite' : tries} tries left)" - sleep(delay) - retry - rescue => err - logger.fatal "error adding listener addr=#{address}" - raise err - end - end - - # monitors children and receives signals forever - # (or until a termination signal is sent). This handles signals - # one-at-a-time time and we'll happily drop signals in case somebody - # is signalling us too often. - def join - respawn = true - last_check = Time.now - - proc_name 'master' - logger.info "master process ready" # test_exec.rb relies on this message - if ready_pipe - ready_pipe.syswrite($$.to_s) - ready_pipe.close rescue nil - self.ready_pipe = nil - end - begin - loop do - reap_all_workers - case SIG_QUEUE.shift - when nil - # avoid murdering workers after our master process (or the - # machine) comes out of suspend/hibernation - if (last_check + timeout) >= (last_check = Time.now) - murder_lazy_workers - else - # wait for workers to wakeup on suspend - master_sleep(timeout/2.0 + 1) - end - maintain_worker_count if respawn - master_sleep(1) - when :QUIT # graceful shutdown - break - when :TERM, :INT # immediate shutdown - stop(false) - break - when :USR1 # rotate logs - logger.info "master reopening logs..." - Unicorn::Util.reopen_logs - logger.info "master done reopening logs" - kill_each_worker(:USR1) - when :USR2 # exec binary, stay alive in case something went wrong - reexec - when :WINCH - if Process.ppid == 1 || Process.getpgrp != $$ - respawn = false - logger.info "gracefully stopping all workers" - kill_each_worker(:QUIT) - self.worker_processes = 0 - else - logger.info "SIGWINCH ignored because we're not daemonized" - end - when :TTIN - respawn = true - self.worker_processes += 1 - when :TTOU - self.worker_processes -= 1 if self.worker_processes > 0 - when :HUP - respawn = true - if config.config_file - load_config! - redo # immediate reaping since we may have QUIT workers - else # exec binary and exit if there's no config file - logger.info "config_file not present, reexecuting binary" - reexec - break - end - end - end - rescue Errno::EINTR - retry - rescue => e - logger.error "Unhandled master loop exception #{e.inspect}." - logger.error e.backtrace.join("\n") - retry - end - stop # gracefully shutdown all workers on our way out - logger.info "master complete" - unlink_pid_safe(pid) if pid - end - - # Terminates all workers, but does not exit master process - def stop(graceful = true) - self.listeners = [] - limit = Time.now + timeout - until WORKERS.empty? || Time.now > limit - kill_each_worker(graceful ? :QUIT : :TERM) - sleep(0.1) - reap_all_workers - end - kill_each_worker(:KILL) - end - - private - - # list of signals we care about and trap in master. - QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP, - :TTIN, :TTOU ] - - # defer a signal for later processing in #join (master process) - def trap_deferred(signal) - trap(signal) do |sig_nr| - if SIG_QUEUE.size < 5 - SIG_QUEUE << signal - awaken_master - else - logger.error "ignoring SIG#{signal}, queue=#{SIG_QUEUE.inspect}" - end - end - end - - # wait for a signal hander to wake us up and then consume the pipe - # Wake up every second anyways to run murder_lazy_workers - def master_sleep(sec) - IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return - SELF_PIPE[0].read_nonblock(Const::CHUNK_SIZE, HttpRequest::BUF) - rescue Errno::EAGAIN, Errno::EINTR - end - - def awaken_master - begin - SELF_PIPE.last.write_nonblock('.') # wakeup master process from select - rescue Errno::EAGAIN, Errno::EINTR - # pipe is full, master should wake up anyways - retry - end - end - - # reaps all unreaped workers - def reap_all_workers - begin - loop do - wpid, status = Process.waitpid2(-1, Process::WNOHANG) - wpid or break - if reexec_pid == wpid - logger.error "reaped #{status.inspect} exec()-ed" - self.reexec_pid = 0 - self.pid = pid.chomp('.oldbin') if pid - proc_name 'master' - else - worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil - logger.info "reaped #{status.inspect} " \ - "worker=#{worker.nr rescue 'unknown'}" - end - end - rescue Errno::ECHILD - end - end - - # reexecutes the START_CTX with a new binary - def reexec - if reexec_pid > 0 - begin - Process.kill(0, reexec_pid) - logger.error "reexec-ed child already running PID:#{reexec_pid}" - return - rescue Errno::ESRCH - self.reexec_pid = 0 - end - end - - if pid - old_pid = "#{pid}.oldbin" - prev_pid = pid.dup - begin - self.pid = old_pid # clear the path for a new pid file - rescue ArgumentError - logger.error "old PID:#{valid_pid?(old_pid)} running with " \ - "existing pid=#{old_pid}, refusing rexec" - return - rescue => e - logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}" - return - end - end - - self.reexec_pid = fork do - listener_fds = LISTENERS.map { |sock| sock.fileno } - ENV['UNICORN_FD'] = listener_fds.join(',') - Dir.chdir(START_CTX[:cwd]) - cmd = [ START_CTX[0] ].concat(START_CTX[:argv]) - - # avoid leaking FDs we don't know about, but let before_exec - # unset FD_CLOEXEC, if anything else in the app eventually - # relies on FD inheritence. - (3..1024).each do |io| - next if listener_fds.include?(io) - io = IO.for_fd(io) rescue nil - io or next - IO_PURGATORY << io - io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) - end - logger.info "executing #{cmd.inspect} (in #{Dir.pwd})" - before_exec.call(self) - exec(*cmd) - end - proc_name 'master (old)' - end - - # forcibly terminate all workers that haven't checked in in timeout - # seconds. The timeout is implemented using an unlinked File - # shared between the parent process and each worker. The worker - # runs File#chmod to modify the ctime of the File. If the ctime - # is stale for >timeout seconds, then we'll kill the corresponding - # worker. - def murder_lazy_workers - WORKERS.dup.each_pair do |wpid, worker| - stat = worker.tmp.stat - # skip workers that disable fchmod or have never fchmod-ed - stat.mode == 0100600 and next - (diff = (Time.now - stat.ctime)) <= timeout and next - logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \ - "(#{diff}s > #{timeout}s), killing" - kill_worker(:KILL, wpid) # take no prisoners for timeout violations - end - end - - def spawn_missing_workers - (0...worker_processes).each do |worker_nr| - WORKERS.values.include?(worker_nr) and next - worker = Worker.new(worker_nr, Unicorn::Util.tmpio) - before_fork.call(self, worker) - WORKERS[fork { - ready_pipe.close if ready_pipe - self.ready_pipe = nil - worker_loop(worker) - }] = worker - end - end - - def maintain_worker_count - (off = WORKERS.size - worker_processes) == 0 and return - off < 0 and return spawn_missing_workers - WORKERS.dup.each_pair { |wpid,w| - w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil - } - end - - # if we get any error, try to write something back to the client - # assuming we haven't closed the socket, but don't get hung up - # if the socket is already closed or broken. We'll always ensure - # the socket is closed at the end of this function - def handle_error(client, e) - msg = case e - when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF - Const::ERROR_500_RESPONSE - when HttpParserError # try to tell the client they're bad - Const::ERROR_400_RESPONSE + # This returns a lambda to pass in as the app, this does not "build" the + # app (which we defer based on the outcome of "preload_app" in the + # Unicorn config). The returned lambda will be called when it is + # time to build the app. + def self.builder(ru, opts) + # allow Configurator to parse cli switches embedded in the ru file + Unicorn::Configurator::RACKUP.update(:file => ru, :optparse => opts) + + # always called after config file parsing, may be called after forking + lambda do || + inner_app = case ru + when /\.ru$/ + raw = File.read(ru) + raw.sub!(/^__END__\n.*/, '') + eval("Rack::Builder.new {(#{raw}\n)}.to_app", TOPLEVEL_BINDING, ru) else - logger.error "Read error: #{e.inspect}" - logger.error e.backtrace.join("\n") - Const::ERROR_500_RESPONSE - end - client.write_nonblock(msg) - client.close - rescue - nil - end - - # once a client is accepted, it is processed in its entirety here - # in 3 easy steps: read request, call app, write app response - def process_client(client) - client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) - response = app.call(env = REQUEST.read(client)) - - if 100 == response.first.to_i - client.write(Const::EXPECT_100_RESPONSE) - env.delete(Const::HTTP_EXPECT) - response = app.call(env) - end - HttpResponse.write(client, response, HttpRequest::PARSER.headers?) - rescue => e - handle_error(client, e) - end - - # gets rid of stuff the worker has no business keeping track of - # to free some resources and drops all sig handlers. - # traps for USR1, USR2, and HUP may be set in the after_fork Proc - # by the user. - def init_worker_process(worker) - QUEUE_SIGS.each { |sig| trap(sig, nil) } - trap(:CHLD, 'DEFAULT') - SIG_QUEUE.clear - proc_name "worker[#{worker.nr}]" - START_CTX.clear - init_self_pipe! - WORKERS.values.each { |other| other.tmp.close rescue nil } - WORKERS.clear - LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) } - worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) - after_fork.call(self, worker) # can drop perms - worker.user(*user) if user.kind_of?(Array) && ! worker.switched - self.timeout /= 2.0 # halve it for select() - build_app! unless preload_app - end - - def reopen_worker_logs(worker_nr) - logger.info "worker=#{worker_nr} reopening logs..." - Unicorn::Util.reopen_logs - logger.info "worker=#{worker_nr} done reopening logs" - init_self_pipe! - end - - # runs inside each forked worker, this sits around and waits - # for connections and doesn't die until the parent dies (or is - # given a INT, QUIT, or TERM signal) - def worker_loop(worker) - ppid = master_pid - init_worker_process(worker) - nr = 0 # this becomes negative if we need to reopen logs - alive = worker.tmp # tmp is our lifeline to the master process - ready = LISTENERS - - # closing anything we IO.select on will raise EBADF - trap(:USR1) { nr = -65536; SELF_PIPE.first.close rescue nil } - trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } } - [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown - logger.info "worker=#{worker.nr} ready" - m = 0 - - begin - nr < 0 and reopen_worker_logs(worker.nr) - nr = 0 - - # we're a goner in timeout seconds anyways if alive.chmod - # breaks, so don't trap the exception. Using fchmod() since - # futimes() is not available in base Ruby and I very strongly - # prefer temporary files to be unlinked for security, - # performance and reliability reasons, so utime is out. No-op - # changes with chmod doesn't update ctime on all filesystems; so - # we change our counter each and every time (after process_client - # and before IO.select). - alive.chmod(m = 0 == m ? 1 : 0) - - ready.each do |sock| - begin - process_client(sock.accept_nonblock) - nr += 1 - alive.chmod(m = 0 == m ? 1 : 0) - rescue Errno::EAGAIN, Errno::ECONNABORTED - end - break if nr < 0 - end - - # make the following bet: if we accepted clients this round, - # we're probably reasonably busy, so avoid calling select() - # and do a speculative accept_nonblock on ready listeners - # before we sleep again in select(). - redo unless nr == 0 # (nr < 0) => reopen logs - - ppid == Process.ppid or return - alive.chmod(m = 0 == m ? 1 : 0) - begin - # timeout used so we can detect parent death: - ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) or redo - ready = ret.first - rescue Errno::EINTR - ready = LISTENERS - rescue Errno::EBADF - nr < 0 or return - end - rescue => e - if alive - logger.error "Unhandled listen loop exception #{e.inspect}." - logger.error e.backtrace.join("\n") - end - end while alive - end - - # delivers a signal to a worker and fails gracefully if the worker - # is no longer running. - def kill_worker(signal, wpid) - begin - Process.kill(signal, wpid) - rescue Errno::ESRCH - worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil - end - end - - # delivers a signal to each worker - def kill_each_worker(signal) - WORKERS.keys.each { |wpid| kill_worker(signal, wpid) } - end - - # unlinks a PID file at given +path+ if it contains the current PID - # still potentially racy without locking the directory (which is - # non-portable and may interact badly with other programs), but the - # window for hitting the race condition is small - def unlink_pid_safe(path) - (File.read(path).to_i == $$ and File.unlink(path)) rescue nil - end - - # returns a PID if a given path contains a non-stale PID file, - # nil otherwise. - def valid_pid?(path) - wpid = File.read(path).to_i - wpid <= 0 and return nil - begin - Process.kill(0, wpid) - wpid - rescue Errno::ESRCH - # don't unlink stale pid files, racy without non-portable locking... - end - rescue Errno::ENOENT - end - - def load_config! - loaded_app = app - begin - logger.info "reloading config_file=#{config.config_file}" - config[:listeners].replace(init_listeners) - config.reload - config.commit!(self) - kill_each_worker(:QUIT) - Unicorn::Util.reopen_logs - self.app = orig_app - build_app! if preload_app - logger.info "done reloading config_file=#{config.config_file}" - rescue StandardError, LoadError, SyntaxError => e - logger.error "error reloading config_file=#{config.config_file}: " \ - "#{e.class} #{e.message} #{e.backtrace}" - self.app = loaded_app - end - end - - # returns an array of string names for the given listener array - def listener_names(listeners = LISTENERS) - listeners.map { |io| sock_name(io) } - end - - def build_app! - if app.respond_to?(:arity) && app.arity == 0 - if defined?(Gem) && Gem.respond_to?(:refresh) - logger.info "Refreshing Gem list" - Gem.refresh - end - self.app = app.call + require ru + Object.const_get(File.basename(ru, '.rb').capitalize) + end + + pp({ :inner_app => inner_app }) if $DEBUG + + # return value, matches rackup defaults based on env + case ENV["RACK_ENV"] + when "development" + Rack::Builder.new do + use Rack::CommonLogger, $stderr + use Rack::ShowExceptions + use Rack::Lint + run inner_app + end.to_app + when "deployment" + Rack::Builder.new do + use Rack::CommonLogger, $stderr + run inner_app + end.to_app + else + inner_app end end + end - def proc_name(tag) - $0 = ([ File.basename(START_CTX[0]), tag - ]).concat(START_CTX[:argv]).join(' ') - end - - def redirect_io(io, path) - File.open(path, 'ab') { |fp| io.reopen(fp) } if path - io.sync = true - end - - def init_self_pipe! - SELF_PIPE.each { |io| io.close rescue nil } - SELF_PIPE.replace(IO.pipe) - SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) } + # returns an array of strings representing TCP listen socket addresses + # and Unix domain socket paths. This is useful for use with + # Raindrops::Middleware under Linux: http://raindrops.bogomips.org/ + def self.listener_names + Unicorn::HttpServer::LISTENERS.map do |io| + Unicorn::SocketHelper.sock_name(io) end - end end + +# raised inside TeeInput when a client closes the socket inside the +# application dispatch. This is always raised with an empty backtrace +# since there is nothing in the application stack that is responsible +# for client shutdowns/disconnects. +class Unicorn::ClientShutdown < EOFError; end + +require 'unicorn/const' +require 'unicorn/socket_helper' +require 'unicorn/tee_input' +require 'unicorn/http_request' +require 'unicorn/configurator' +require 'unicorn/tmpio' +require 'unicorn/util' +require 'unicorn/http_response' +require 'unicorn/worker' +require 'unicorn/http_server' diff --git a/lib/unicorn/app/exec_cgi.rb b/lib/unicorn/app/exec_cgi.rb index 412c1d9..fea22f6 100644 --- a/lib/unicorn/app/exec_cgi.rb +++ b/lib/unicorn/app/exec_cgi.rb @@ -43,7 +43,7 @@ module Unicorn::App # Calls the app def call(env) - out, err = Unicorn::Util.tmpio, Unicorn::Util.tmpio + out, err = Unicorn::TmpIO.new, Unicorn::TmpIO.new inp = force_file_input(env) pid = fork { run_child(inp, out, err, env) } inp.close @@ -124,7 +124,7 @@ module Unicorn::App if inp.respond_to?(:size) && inp.size == 0 ::File.open('/dev/null', 'rb') else - tmp = Unicorn::Util.tmpio + tmp = Unicorn::TmpIO.new buf = inp.read(CHUNK_SIZE) begin diff --git a/lib/unicorn/configurator.rb b/lib/unicorn/configurator.rb index 378a130..dd515a7 100644 --- a/lib/unicorn/configurator.rb +++ b/lib/unicorn/configurator.rb @@ -1,489 +1,521 @@ # -*- encoding: binary -*- - -require 'socket' require 'logger' -module Unicorn - - # Implements a simple DSL for configuring a Unicorn server. - # - # See http://unicorn.bogomips.org/examples/unicorn.conf.rb and - # http://unicorn.bogomips.org/examples/unicorn.conf.minimal.rb - # example configuration files. An example config file for use with - # nginx is also available at - # http://unicorn.bogomips.org/examples/nginx.conf - class Configurator < Struct.new(:set, :config_file, :after_reload) - # :stopdoc: - # used to stash stuff for deferred processing of cli options in - # config.ru after "working_directory" is bound. Do not rely on - # this being around later on... - RACKUP = {} - # :startdoc: - - # Default settings for Unicorn - DEFAULTS = { - :timeout => 60, - :logger => Logger.new($stderr), - :worker_processes => 1, - :after_fork => lambda { |server, worker| - server.logger.info("worker=#{worker.nr} spawned pid=#{$$}") - }, - :before_fork => lambda { |server, worker| - server.logger.info("worker=#{worker.nr} spawning...") - }, - :before_exec => lambda { |server| - server.logger.info("forked child re-executing...") - }, - :pid => nil, - :preload_app => false, - } - - def initialize(defaults = {}) #:nodoc: - self.set = Hash.new(:unset) - @use_defaults = defaults.delete(:use_defaults) - self.config_file = defaults.delete(:config_file) - - # after_reload is only used by unicorn_rails, unsupported otherwise - self.after_reload = defaults.delete(:after_reload) +# Implements a simple DSL for configuring a \Unicorn server. +# +# See http://unicorn.bogomips.org/examples/unicorn.conf.rb and +# http://unicorn.bogomips.org/examples/unicorn.conf.minimal.rb +# example configuration files. An example config file for use with +# nginx is also available at +# http://unicorn.bogomips.org/examples/nginx.conf +class Unicorn::Configurator + attr_accessor :set, :config_file, :after_reload + + # :stopdoc: + # used to stash stuff for deferred processing of cli options in + # config.ru after "working_directory" is bound. Do not rely on + # this being around later on... + RACKUP = { + :daemonize => false, + :host => Unicorn::Const::DEFAULT_HOST, + :port => Unicorn::Const::DEFAULT_PORT, + :set_listener => false, + :options => { :listeners => [] } + } + + # Default settings for Unicorn + DEFAULTS = { + :timeout => 60, + :logger => Logger.new($stderr), + :worker_processes => 1, + :after_fork => lambda { |server, worker| + server.logger.info("worker=#{worker.nr} spawned pid=#{$$}") + }, + :before_fork => lambda { |server, worker| + server.logger.info("worker=#{worker.nr} spawning...") + }, + :before_exec => lambda { |server| + server.logger.info("forked child re-executing...") + }, + :pid => nil, + :preload_app => false, + } + #:startdoc: + + def initialize(defaults = {}) #:nodoc: + self.set = Hash.new(:unset) + @use_defaults = defaults.delete(:use_defaults) + self.config_file = defaults.delete(:config_file) + + # after_reload is only used by unicorn_rails, unsupported otherwise + self.after_reload = defaults.delete(:after_reload) + + set.merge!(DEFAULTS) if @use_defaults + defaults.each { |key, value| self.__send__(key, value) } + Hash === set[:listener_opts] or + set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} } + Array === set[:listeners] or set[:listeners] = [] + reload(false) + end + def reload(merge_defaults = true) #:nodoc: + if merge_defaults && @use_defaults set.merge!(DEFAULTS) if @use_defaults - defaults.each { |key, value| self.__send__(key, value) } - Hash === set[:listener_opts] or - set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} } - Array === set[:listeners] or set[:listeners] = [] - reload(false) end + instance_eval(File.read(config_file), config_file) if config_file - def reload(merge_defaults = true) #:nodoc: - if merge_defaults && @use_defaults - set.merge!(DEFAULTS) if @use_defaults - end - instance_eval(File.read(config_file), config_file) if config_file + parse_rackup_file - parse_rackup_file + RACKUP[:set_listener] and + set[:listeners] << "#{RACKUP[:host]}:#{RACKUP[:port]}" - # unicorn_rails creates dirs here after working_directory is bound - after_reload.call if after_reload + # unicorn_rails creates dirs here after working_directory is bound + after_reload.call if after_reload - # working_directory binds immediately (easier error checking that way), - # now ensure any paths we changed are correctly set. - [ :pid, :stderr_path, :stdout_path ].each do |var| - String === (path = set[var]) or next - path = File.expand_path(path) - File.writable?(path) || File.writable?(File.dirname(path)) or \ - raise ArgumentError, "directory for #{var}=#{path} not writable" - end + # working_directory binds immediately (easier error checking that way), + # now ensure any paths we changed are correctly set. + [ :pid, :stderr_path, :stdout_path ].each do |var| + String === (path = set[var]) or next + path = File.expand_path(path) + File.writable?(path) || File.writable?(File.dirname(path)) or \ + raise ArgumentError, "directory for #{var}=#{path} not writable" end + end - def commit!(server, options = {}) #:nodoc: - skip = options[:skip] || [] - if ready_pipe = RACKUP.delete(:ready_pipe) - server.ready_pipe = ready_pipe - end - set.each do |key, value| - value == :unset and next - skip.include?(key) and next - server.__send__("#{key}=", value) - end + def commit!(server, options = {}) #:nodoc: + skip = options[:skip] || [] + if ready_pipe = RACKUP.delete(:ready_pipe) + server.ready_pipe = ready_pipe end - - def [](key) # :nodoc: - set[key] + set.each do |key, value| + value == :unset and next + skip.include?(key) and next + server.__send__("#{key}=", value) end + end - # sets object to the +new+ Logger-like object. The new logger-like - # object must respond to the following methods: - # +debug+, +info+, +warn+, +error+, +fatal+ - # The default Logger will log its output to the path specified - # by +stderr_path+. If you're running Unicorn daemonized, then - # you must specify a path to prevent error messages from going - # to /dev/null. - def logger(new) - %w(debug info warn error fatal).each do |m| - new.respond_to?(m) and next - raise ArgumentError, "logger=#{new} does not respond to method=#{m}" - end - - set[:logger] = new - end + def [](key) # :nodoc: + set[key] + end - # sets after_fork hook to a given block. This block will be called by - # the worker after forking. The following is an example hook which adds - # a per-process listener to every worker: - # - # after_fork do |server,worker| - # # per-process listener ports for debugging/admin: - # addr = "127.0.0.1:#{9293 + worker.nr}" - # - # # the negative :tries parameter indicates we will retry forever - # # waiting on the existing process to exit with a 5 second :delay - # # Existing options for Unicorn::Configurator#listen such as - # # :backlog, :rcvbuf, :sndbuf are available here as well. - # server.listen(addr, :tries => -1, :delay => 5, :backlog => 128) - # - # # drop permissions to "www-data" in the worker - # # generally there's no reason to start Unicorn as a priviledged user - # # as it is not recommended to expose Unicorn to public clients. - # worker.user('www-data', 'www-data') if Process.euid == 0 - # end - def after_fork(*args, &block) - set_hook(:after_fork, block_given? ? block : args[0]) + # sets object to the +new+ Logger-like object. The new logger-like + # object must respond to the following methods: + # +debug+, +info+, +warn+, +error+, +fatal+ + # The default Logger will log its output to the path specified + # by +stderr_path+. If you're running Unicorn daemonized, then + # you must specify a path to prevent error messages from going + # to /dev/null. + def logger(new) + %w(debug info warn error fatal).each do |m| + new.respond_to?(m) and next + raise ArgumentError, "logger=#{new} does not respond to method=#{m}" end - # sets before_fork got be a given Proc object. This Proc - # object will be called by the master process before forking - # each worker. - def before_fork(*args, &block) - set_hook(:before_fork, block_given? ? block : args[0]) - end + set[:logger] = new + end - # sets the before_exec hook to a given Proc object. This - # Proc object will be called by the master process right - # before exec()-ing the new unicorn binary. This is useful - # for freeing certain OS resources that you do NOT wish to - # share with the reexeced child process. - # There is no corresponding after_exec hook (for obvious reasons). - def before_exec(*args, &block) - set_hook(:before_exec, block_given? ? block : args[0], 1) - end + # sets after_fork hook to a given block. This block will be called by + # the worker after forking. The following is an example hook which adds + # a per-process listener to every worker: + # + # after_fork do |server,worker| + # # per-process listener ports for debugging/admin: + # addr = "127.0.0.1:#{9293 + worker.nr}" + # + # # the negative :tries parameter indicates we will retry forever + # # waiting on the existing process to exit with a 5 second :delay + # # Existing options for Unicorn::Configurator#listen such as + # # :backlog, :rcvbuf, :sndbuf are available here as well. + # server.listen(addr, :tries => -1, :delay => 5, :backlog => 128) + # + # # drop permissions to "www-data" in the worker + # # generally there's no reason to start Unicorn as a priviledged user + # # as it is not recommended to expose Unicorn to public clients. + # worker.user('www-data', 'www-data') if Process.euid == 0 + # end + def after_fork(*args, &block) + set_hook(:after_fork, block_given? ? block : args[0]) + end - # sets the timeout of worker processes to +seconds+. Workers - # handling the request/app.call/response cycle taking longer than - # this time period will be forcibly killed (via SIGKILL). This - # timeout is enforced by the master process itself and not subject - # to the scheduling limitations by the worker process. Due the - # low-complexity, low-overhead implementation, timeouts of less - # than 3.0 seconds can be considered inaccurate and unsafe. - # - # For running Unicorn behind nginx, it is recommended to set - # "fail_timeout=0" for in your nginx configuration like this - # to have nginx always retry backends that may have had workers - # SIGKILL-ed due to timeouts. - # - # # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details - # # on nginx upstream configuration: - # upstream unicorn_backend { - # # for UNIX domain socket setups: - # server unix:/path/to/unicorn.sock fail_timeout=0; - # - # # for TCP setups - # server 192.168.0.7:8080 fail_timeout=0; - # server 192.168.0.8:8080 fail_timeout=0; - # server 192.168.0.9:8080 fail_timeout=0; - # } - def timeout(seconds) - Numeric === seconds or raise ArgumentError, - "not numeric: timeout=#{seconds.inspect}" - seconds >= 3 or raise ArgumentError, - "too low: timeout=#{seconds.inspect}" - set[:timeout] = seconds - end + # sets before_fork got be a given Proc object. This Proc + # object will be called by the master process before forking + # each worker. + def before_fork(*args, &block) + set_hook(:before_fork, block_given? ? block : args[0]) + end - # sets the current number of worker_processes to +nr+. Each worker - # process will serve exactly one client at a time. You can - # increment or decrement this value at runtime by sending SIGTTIN - # or SIGTTOU respectively to the master process without reloading - # the rest of your Unicorn configuration. See the SIGNALS document - # for more information. - def worker_processes(nr) - Integer === nr or raise ArgumentError, - "not an integer: worker_processes=#{nr.inspect}" - nr >= 0 or raise ArgumentError, - "not non-negative: worker_processes=#{nr.inspect}" - set[:worker_processes] = nr - end + # sets the before_exec hook to a given Proc object. This + # Proc object will be called by the master process right + # before exec()-ing the new unicorn binary. This is useful + # for freeing certain OS resources that you do NOT wish to + # share with the reexeced child process. + # There is no corresponding after_exec hook (for obvious reasons). + def before_exec(*args, &block) + set_hook(:before_exec, block_given? ? block : args[0], 1) + end - # sets listeners to the given +addresses+, replacing or augmenting the - # current set. This is for the global listener pool shared by all - # worker processes. For per-worker listeners, see the after_fork example - # This is for internal API use only, do not use it in your Unicorn - # config file. Use listen instead. - def listeners(addresses) # :nodoc: - Array === addresses or addresses = Array(addresses) - addresses.map! { |addr| expand_addr(addr) } - set[:listeners] = addresses - end + # sets the timeout of worker processes to +seconds+. Workers + # handling the request/app.call/response cycle taking longer than + # this time period will be forcibly killed (via SIGKILL). This + # timeout is enforced by the master process itself and not subject + # to the scheduling limitations by the worker process. Due the + # low-complexity, low-overhead implementation, timeouts of less + # than 3.0 seconds can be considered inaccurate and unsafe. + # + # For running Unicorn behind nginx, it is recommended to set + # "fail_timeout=0" for in your nginx configuration like this + # to have nginx always retry backends that may have had workers + # SIGKILL-ed due to timeouts. + # + # # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details + # # on nginx upstream configuration: + # upstream unicorn_backend { + # # for UNIX domain socket setups: + # server unix:/path/to/unicorn.sock fail_timeout=0; + # + # # for TCP setups + # server 192.168.0.7:8080 fail_timeout=0; + # server 192.168.0.8:8080 fail_timeout=0; + # server 192.168.0.9:8080 fail_timeout=0; + # } + def timeout(seconds) + Numeric === seconds or raise ArgumentError, + "not numeric: timeout=#{seconds.inspect}" + seconds >= 3 or raise ArgumentError, + "too low: timeout=#{seconds.inspect}" + set[:timeout] = seconds + end - # adds an +address+ to the existing listener set. - # - # The following options may be specified (but are generally not needed): - # - # +:backlog+: this is the backlog of the listen() syscall. - # - # Some operating systems allow negative values here to specify the - # maximum allowable value. In most cases, this number is only - # recommendation and there are other OS-specific tunables and - # variables that can affect this number. See the listen(2) - # syscall documentation of your OS for the exact semantics of - # this. - # - # If you are running unicorn on multiple machines, lowering this number - # can help your load balancer detect when a machine is overloaded - # and give requests to a different machine. - # - # Default: 1024 - # - # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets - # - # These correspond to the SO_RCVBUF and SO_SNDBUF settings which - # can be set via the setsockopt(2) syscall. Some kernels - # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and - # there is no need (and it is sometimes detrimental) to specify them. - # - # See the socket API documentation of your operating system - # to determine the exact semantics of these settings and - # other operating system-specific knobs where they can be - # specified. - # - # Defaults: operating system defaults - # - # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets - # - # This has no effect on UNIX sockets. - # - # Default: operating system defaults (usually Nagle's algorithm enabled) - # - # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD - # - # This will prevent partial TCP frames from being sent out. - # Enabling +tcp_nopush+ is generally not needed or recommended as - # controlling +tcp_nodelay+ already provides sufficient latency - # reduction whereas Unicorn does not know when the best times are - # for flushing corked sockets. - # - # This has no effect on UNIX sockets. - # - # +:tries+: times to retry binding a socket if it is already in use - # - # A negative number indicates we will retry indefinitely, this is - # useful for migrations and upgrades when individual workers - # are binding to different ports. - # - # Default: 5 - # - # +:delay+: seconds to wait between successive +tries+ - # - # Default: 0.5 seconds - # - # +:umask+: sets the file mode creation mask for UNIX sockets - # - # Typically UNIX domain sockets are created with more liberal - # file permissions than the rest of the application. By default, - # we create UNIX domain sockets to be readable and writable by - # all local users to give them the same accessibility as - # locally-bound TCP listeners. - # - # This has no effect on TCP listeners. - # - # Default: 0 (world read/writable) - def listen(address, opt = {}) - address = expand_addr(address) - if String === address - [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key| - value = opt[key] or next - Integer === value or - raise ArgumentError, "not an integer: #{key}=#{value.inspect}" - end - [ :tcp_nodelay, :tcp_nopush ].each do |key| - (value = opt[key]).nil? and next - TrueClass === value || FalseClass === value or - raise ArgumentError, "not boolean: #{key}=#{value.inspect}" - end - unless (value = opt[:delay]).nil? - Numeric === value or - raise ArgumentError, "not numeric: delay=#{value.inspect}" - end - set[:listener_opts][address].merge!(opt) - end + # sets the current number of worker_processes to +nr+. Each worker + # process will serve exactly one client at a time. You can + # increment or decrement this value at runtime by sending SIGTTIN + # or SIGTTOU respectively to the master process without reloading + # the rest of your Unicorn configuration. See the SIGNALS document + # for more information. + def worker_processes(nr) + Integer === nr or raise ArgumentError, + "not an integer: worker_processes=#{nr.inspect}" + nr >= 0 or raise ArgumentError, + "not non-negative: worker_processes=#{nr.inspect}" + set[:worker_processes] = nr + end - set[:listeners] << address - end + # sets listeners to the given +addresses+, replacing or augmenting the + # current set. This is for the global listener pool shared by all + # worker processes. For per-worker listeners, see the after_fork example + # This is for internal API use only, do not use it in your Unicorn + # config file. Use listen instead. + def listeners(addresses) # :nodoc: + Array === addresses or addresses = Array(addresses) + addresses.map! { |addr| expand_addr(addr) } + set[:listeners] = addresses + end - # sets the +path+ for the PID file of the unicorn master process - def pid(path); set_path(:pid, path); end - - # Enabling this preloads an application before forking worker - # processes. This allows memory savings when using a - # copy-on-write-friendly GC but can cause bad things to happen when - # resources like sockets are opened at load time by the master - # process and shared by multiple children. People enabling this are - # highly encouraged to look at the before_fork/after_fork hooks to - # properly close/reopen sockets. Files opened for logging do not - # have to be reopened as (unbuffered-in-userspace) files opened with - # the File::APPEND flag are written to atomically on UNIX. - # - # In addition to reloading the unicorn-specific config settings, - # SIGHUP will reload application code in the working - # directory/symlink when workers are gracefully restarted when - # preload_app=false (the default). As reloading the application - # sometimes requires RubyGems updates, +Gem.refresh+ is always - # called before the application is loaded (for RubyGems users). - # - # During deployments, care should _always_ be taken to ensure your - # applications are properly deployed and running. Using - # preload_app=false (the default) means you _must_ check if - # your application is responding properly after a deployment. - # Improperly deployed applications can go into a spawn loop - # if the application fails to load. While your children are - # in a spawn loop, it is is possible to fix an application - # by properly deploying all required code and dependencies. - # Using preload_app=true means any application load error will - # cause the master process to exit with an error. - - def preload_app(bool) - case bool - when TrueClass, FalseClass - set[:preload_app] = bool - else - raise ArgumentError, "preload_app=#{bool.inspect} not a boolean" + # adds an +address+ to the existing listener set. + # + # The following options may be specified (but are generally not needed): + # + # +:backlog+: this is the backlog of the listen() syscall. + # + # Some operating systems allow negative values here to specify the + # maximum allowable value. In most cases, this number is only + # recommendation and there are other OS-specific tunables and + # variables that can affect this number. See the listen(2) + # syscall documentation of your OS for the exact semantics of + # this. + # + # If you are running unicorn on multiple machines, lowering this number + # can help your load balancer detect when a machine is overloaded + # and give requests to a different machine. + # + # Default: 1024 + # + # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets + # + # These correspond to the SO_RCVBUF and SO_SNDBUF settings which + # can be set via the setsockopt(2) syscall. Some kernels + # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and + # there is no need (and it is sometimes detrimental) to specify them. + # + # See the socket API documentation of your operating system + # to determine the exact semantics of these settings and + # other operating system-specific knobs where they can be + # specified. + # + # Defaults: operating system defaults + # + # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets + # + # This has no effect on UNIX sockets. + # + # Default: operating system defaults (usually Nagle's algorithm enabled) + # + # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD + # + # This will prevent partial TCP frames from being sent out. + # Enabling +tcp_nopush+ is generally not needed or recommended as + # controlling +tcp_nodelay+ already provides sufficient latency + # reduction whereas Unicorn does not know when the best times are + # for flushing corked sockets. + # + # This has no effect on UNIX sockets. + # + # +:tries+: times to retry binding a socket if it is already in use + # + # A negative number indicates we will retry indefinitely, this is + # useful for migrations and upgrades when individual workers + # are binding to different ports. + # + # Default: 5 + # + # +:delay+: seconds to wait between successive +tries+ + # + # Default: 0.5 seconds + # + # +:umask+: sets the file mode creation mask for UNIX sockets + # + # Typically UNIX domain sockets are created with more liberal + # file permissions than the rest of the application. By default, + # we create UNIX domain sockets to be readable and writable by + # all local users to give them the same accessibility as + # locally-bound TCP listeners. + # + # This has no effect on TCP listeners. + # + # Default: 0 (world read/writable) + # + # +:tcp_defer_accept:+ defer accept() until data is ready (Linux-only) + # + # For Linux 2.6.32 and later, this is the number of retransmits to + # defer an accept() for if no data arrives, but the client will + # eventually be accepted after the specified number of retransmits + # regardless of whether data is ready. + # + # For Linux before 2.6.32, this is a boolean option, and + # accepts are _always_ deferred indefinitely if no data arrives. + # This is similar to <code>:accept_filter => "dataready"</code> + # under FreeBSD. + # + # Specifying +true+ is synonymous for the default value(s) below, + # and +false+ or +nil+ is synonymous for a value of zero. + # + # A value of +1+ is a good optimization for local networks + # and trusted clients. For Rainbows! and Zbatery users, a higher + # value (e.g. +60+) provides more protection against some + # denial-of-service attacks. There is no good reason to ever + # disable this with a +zero+ value when serving HTTP. + # + # Default: 1 retransmit for \Unicorn, 60 for Rainbows! 0.95.0\+ + # + # +:accept_filter: defer accept() until data is ready (FreeBSD-only) + # + # This enables either the "dataready" or (default) "httpready" + # accept() filter under FreeBSD. This is intended as an + # optimization to reduce context switches with common GET/HEAD + # requests. For Rainbows! and Zbatery users, this provides + # some protection against certain denial-of-service attacks, too. + # + # There is no good reason to change from the default. + # + # Default: "httpready" + def listen(address, opt = {}) + address = expand_addr(address) + if String === address + [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key| + value = opt[key] or next + Integer === value or + raise ArgumentError, "not an integer: #{key}=#{value.inspect}" end + [ :tcp_nodelay, :tcp_nopush ].each do |key| + (value = opt[key]).nil? and next + TrueClass === value || FalseClass === value or + raise ArgumentError, "not boolean: #{key}=#{value.inspect}" + end + unless (value = opt[:delay]).nil? + Numeric === value or + raise ArgumentError, "not numeric: delay=#{value.inspect}" + end + set[:listener_opts][address].merge!(opt) end - # Allow redirecting $stderr to a given path. Unlike doing this from - # the shell, this allows the unicorn process to know the path its - # writing to and rotate the file if it is used for logging. The - # file will be opened with the File::APPEND flag and writes - # synchronized to the kernel (but not necessarily to _disk_) so - # multiple processes can safely append to it. - # - # If you are daemonizing and using the default +logger+, it is important - # to specify this as errors will otherwise be lost to /dev/null. - # Some applications/libraries may also triggering warnings that go to - # stderr, and they will end up here. - def stderr_path(path) - set_path(:stderr_path, path) + set[:listeners] << address + end + + # sets the +path+ for the PID file of the unicorn master process + def pid(path); set_path(:pid, path); end + + # Enabling this preloads an application before forking worker + # processes. This allows memory savings when using a + # copy-on-write-friendly GC but can cause bad things to happen when + # resources like sockets are opened at load time by the master + # process and shared by multiple children. People enabling this are + # highly encouraged to look at the before_fork/after_fork hooks to + # properly close/reopen sockets. Files opened for logging do not + # have to be reopened as (unbuffered-in-userspace) files opened with + # the File::APPEND flag are written to atomically on UNIX. + # + # In addition to reloading the unicorn-specific config settings, + # SIGHUP will reload application code in the working + # directory/symlink when workers are gracefully restarted when + # preload_app=false (the default). As reloading the application + # sometimes requires RubyGems updates, +Gem.refresh+ is always + # called before the application is loaded (for RubyGems users). + # + # During deployments, care should _always_ be taken to ensure your + # applications are properly deployed and running. Using + # preload_app=false (the default) means you _must_ check if + # your application is responding properly after a deployment. + # Improperly deployed applications can go into a spawn loop + # if the application fails to load. While your children are + # in a spawn loop, it is is possible to fix an application + # by properly deploying all required code and dependencies. + # Using preload_app=true means any application load error will + # cause the master process to exit with an error. + + def preload_app(bool) + case bool + when TrueClass, FalseClass + set[:preload_app] = bool + else + raise ArgumentError, "preload_app=#{bool.inspect} not a boolean" end + end + + # Allow redirecting $stderr to a given path. Unlike doing this from + # the shell, this allows the unicorn process to know the path its + # writing to and rotate the file if it is used for logging. The + # file will be opened with the File::APPEND flag and writes + # synchronized to the kernel (but not necessarily to _disk_) so + # multiple processes can safely append to it. + # + # If you are daemonizing and using the default +logger+, it is important + # to specify this as errors will otherwise be lost to /dev/null. + # Some applications/libraries may also triggering warnings that go to + # stderr, and they will end up here. + def stderr_path(path) + set_path(:stderr_path, path) + end + + # Same as stderr_path, except for $stdout. Not many Rack applications + # write to $stdout, but any that do will have their output written here. + # It is safe to point this to the same location a stderr_path. + # Like stderr_path, this defaults to /dev/null when daemonized. + def stdout_path(path) + set_path(:stdout_path, path) + end - # Same as stderr_path, except for $stdout. Not many Rack applications - # write to $stdout, but any that do will have their output written here. - # It is safe to point this to the same location a stderr_path. - # Like stderr_path, this defaults to /dev/null when daemonized. - def stdout_path(path) - set_path(:stdout_path, path) + # sets the working directory for Unicorn. This ensures SIGUSR2 will + # start a new instance of Unicorn in this directory. This may be + # a symlink, a common scenario for Capistrano users. Unlike + # all other Unicorn configuration directives, this binds immediately + # for error checking and cannot be undone by unsetting it in the + # configuration file and reloading. + def working_directory(path) + # just let chdir raise errors + path = File.expand_path(path) + if config_file && + config_file[0] != ?/ && + ! File.readable?("#{path}/#{config_file}") + raise ArgumentError, + "config_file=#{config_file} would not be accessible in" \ + " working_directory=#{path}" end + Dir.chdir(path) + Unicorn::HttpServer::START_CTX[:cwd] = ENV["PWD"] = path + end - # sets the working directory for Unicorn. This ensures SIGUSR2 will - # start a new instance of Unicorn in this directory. This may be - # a symlink, a common scenario for Capistrano users. Unlike - # all other Unicorn configuration directives, this binds immediately - # for error checking and cannot be undone by unsetting it in the - # configuration file and reloading. - def working_directory(path) - # just let chdir raise errors - path = File.expand_path(path) - if config_file && - config_file[0] != ?/ && - ! File.readable?("#{path}/#{config_file}") - raise ArgumentError, - "config_file=#{config_file} would not be accessible in" \ - " working_directory=#{path}" - end - Dir.chdir(path) - HttpServer::START_CTX[:cwd] = ENV["PWD"] = path + # Runs worker processes as the specified +user+ and +group+. + # The master process always stays running as the user who started it. + # This switch will occur after calling the after_fork hook, and only + # if the Worker#user method is not called in the after_fork hook + def user(user, group = nil) + # raises ArgumentError on invalid user/group + Etc.getpwnam(user) + Etc.getgrnam(group) if group + set[:user] = [ user, group ] + end + + # expands "unix:path/to/foo" to a socket relative to the current path + # expands pathnames of sockets if relative to "~" or "~username" + # expands "*:port and ":port" to "0.0.0.0:port" + def expand_addr(address) #:nodoc + return "0.0.0.0:#{address}" if Integer === address + return address unless String === address + + case address + when %r{\Aunix:(.*)\z} + File.expand_path($1) + when %r{\A~} + File.expand_path(address) + when %r{\A(?:\*:)?(\d+)\z} + "0.0.0.0:#$1" + when %r{\A(.*):(\d+)\z} + # canonicalize the name + packed = Socket.pack_sockaddr_in($2.to_i, $1) + Socket.unpack_sockaddr_in(packed).reverse!.join(':') + else + address end + end + +private - # Runs worker processes as the specified +user+ and +group+. - # The master process always stays running as the user who started it. - # This switch will occur after calling the after_fork hook, and only - # if the Worker#user method is not called in the after_fork hook - def user(user, group = nil) - # raises ArgumentError on invalid user/group - Etc.getpwnam(user) - Etc.getgrnam(group) if group - set[:user] = [ user, group ] + def set_path(var, path) #:nodoc: + case path + when NilClass, String + set[var] = path + else + raise ArgumentError end + end - # expands "unix:path/to/foo" to a socket relative to the current path - # expands pathnames of sockets if relative to "~" or "~username" - # expands "*:port and ":port" to "0.0.0.0:port" - def expand_addr(address) #:nodoc - return "0.0.0.0:#{address}" if Integer === address - return address unless String === address - - case address - when %r{\Aunix:(.*)\z} - File.expand_path($1) - when %r{\A~} - File.expand_path(address) - when %r{\A(?:\*:)?(\d+)\z} - "0.0.0.0:#$1" - when %r{\A(.*):(\d+)\z} - # canonicalize the name - packed = Socket.pack_sockaddr_in($2.to_i, $1) - Socket.unpack_sockaddr_in(packed).reverse!.join(':') - else - address - end + def set_hook(var, my_proc, req_arity = 2) #:nodoc: + case my_proc + when Proc + arity = my_proc.arity + (arity == req_arity) or \ + raise ArgumentError, + "#{var}=#{my_proc.inspect} has invalid arity: " \ + "#{arity} (need #{req_arity})" + when NilClass + my_proc = DEFAULTS[var] + else + raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}" end + set[var] = my_proc + end - private + # this is called _after_ working_directory is bound. This only + # parses the embedded switches in .ru files + # (for "rackup" compatibility) + def parse_rackup_file # :nodoc: + ru = RACKUP[:file] or return # we only return here in unit tests - def set_path(var, path) #:nodoc: - case path - when NilClass, String - set[var] = path - else - raise ArgumentError - end + # :rails means use (old) Rails autodetect + if ru == :rails + File.readable?('config.ru') or return + ru = 'config.ru' end - def set_hook(var, my_proc, req_arity = 2) #:nodoc: - case my_proc - when Proc - arity = my_proc.arity - (arity == req_arity) or \ - raise ArgumentError, - "#{var}=#{my_proc.inspect} has invalid arity: " \ - "#{arity} (need #{req_arity})" - when NilClass - my_proc = DEFAULTS[var] - else - raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}" - end - set[var] = my_proc - end + File.readable?(ru) or + raise ArgumentError, "rackup file (#{ru}) not readable" - # this is called _after_ working_directory is bound. This only - # parses the embedded switches in .ru files - # (for "rackup" compatibility) - def parse_rackup_file # :nodoc: - ru = RACKUP[:file] or return # we only return here in unit tests + # it could be a .rb file, too, we don't parse those manually + ru =~ /\.ru\z/ or return - # :rails means use (old) Rails autodetect - if ru == :rails - File.readable?('config.ru') or return - ru = 'config.ru' - end + /^#\\(.*)/ =~ File.read(ru) or return + RACKUP[:optparse].parse!($1.split(/\s+/)) - File.readable?(ru) or - raise ArgumentError, "rackup file (#{ru}) not readable" - - # it could be a .rb file, too, we don't parse those manually - ru =~ /\.ru\z/ or return - - /^#\\(.*)/ =~ File.read(ru) or return - RACKUP[:optparse].parse!($1.split(/\s+/)) - - # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery) - host, port, set_listener, options, daemonize = - eval("[ host, port, set_listener, options, daemonize ]", - TOPLEVEL_BINDING) - - # XXX duplicate code from bin/unicorn{,_rails} - set[:listeners] << "#{host}:#{port}" if set_listener - - if daemonize - # unicorn_rails wants a default pid path, (not plain 'unicorn') - if after_reload - spid = set[:pid] - pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset - end - unless RACKUP[:daemonized] - Unicorn::Launcher.daemonize!(options) - RACKUP[:ready_pipe] = options.delete(:ready_pipe) - end + if RACKUP[:daemonize] + # unicorn_rails wants a default pid path, (not plain 'unicorn') + if after_reload + spid = set[:pid] + pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset + end + unless RACKUP[:daemonized] + Unicorn::Launcher.daemonize!(RACKUP[:options]) + RACKUP[:ready_pipe] = RACKUP[:options].delete(:ready_pipe) end end - end end diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb index 907e9eb..e7908d6 100644 --- a/lib/unicorn/const.rb +++ b/lib/unicorn/const.rb @@ -1,36 +1,37 @@ # -*- encoding: binary -*- -module Unicorn +# Frequently used constants when constructing requests or responses. +# Many times the constant just refers to a string with the same +# contents. Using these constants gave about a 3% to 10% performance +# improvement over using the strings directly. Symbols did not really +# improve things much compared to constants. +module Unicorn::Const - # Frequently used constants when constructing requests or responses. Many times - # the constant just refers to a string with the same contents. Using these constants - # gave about a 3% to 10% performance improvement over using the strings directly. - # Symbols did not really improve things much compared to constants. - module Const + # The current version of Unicorn, currently 2.0.0pre3 + UNICORN_VERSION = "2.0.0pre3" - # The current version of Unicorn, currently 1.0.2 - UNICORN_VERSION="1.0.2" + # default TCP listen host address (0.0.0.0, all interfaces) + DEFAULT_HOST = "0.0.0.0" - DEFAULT_HOST = "0.0.0.0" # default TCP listen host address - DEFAULT_PORT = 8080 # default TCP listen port - DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}" + # default TCP listen port (8080) + DEFAULT_PORT = 8080 - # The basic max request size we'll try to read. - CHUNK_SIZE=(16 * 1024) + # default TCP listen address and port (0.0.0.0:8080) + DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}" - # Maximum request body size before it is moved out of memory and into a - # temporary file for reading (112 kilobytes). - MAX_BODY=1024 * 112 + # The basic request body size we'll try to read at once (16 kilobytes). + CHUNK_SIZE = 16 * 1024 - # common errors we'll send back - ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n" - ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n" - EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n" + # Maximum request body size before it is moved out of memory and into a + # temporary file for reading (112 kilobytes). + MAX_BODY = 1024 * 112 - # A frozen format for this is about 15% faster - REMOTE_ADDR="REMOTE_ADDR".freeze - RACK_INPUT="rack.input".freeze - HTTP_EXPECT="HTTP_EXPECT" - end + # :stopdoc: + # common errors we'll send back + ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n" + ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n" + EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n" + HTTP_EXPECT = "HTTP_EXPECT" + # :startdoc: end diff --git a/lib/unicorn/http_request.rb b/lib/unicorn/http_request.rb index 65b09fa..2dcd839 100644 --- a/lib/unicorn/http_request.rb +++ b/lib/unicorn/http_request.rb @@ -1,72 +1,69 @@ # -*- encoding: binary -*- -require 'stringio' require 'unicorn_http' -module Unicorn - class HttpRequest +# TODO: remove redundant names +Unicorn.const_set(:HttpRequest, Unicorn::HttpParser) +class Unicorn::HttpParser - # default parameters we merge into the request env for Rack handlers - DEFAULTS = { - "rack.errors" => $stderr, - "rack.multiprocess" => true, - "rack.multithread" => false, - "rack.run_once" => false, - "rack.version" => [1, 1], - "SCRIPT_NAME" => "", + # default parameters we merge into the request env for Rack handlers + DEFAULTS = { + "rack.errors" => $stderr, + "rack.multiprocess" => true, + "rack.multithread" => false, + "rack.run_once" => false, + "rack.version" => [1, 1], + "SCRIPT_NAME" => "", - # this is not in the Rack spec, but some apps may rely on it - "SERVER_SOFTWARE" => "Unicorn #{Const::UNICORN_VERSION}" - } + # this is not in the Rack spec, but some apps may rely on it + "SERVER_SOFTWARE" => "Unicorn #{Unicorn::Const::UNICORN_VERSION}" + } - NULL_IO = StringIO.new("") - LOCALHOST = '127.0.0.1' + NULL_IO = StringIO.new("") - # Being explicitly single-threaded, we have certain advantages in - # not having to worry about variables being clobbered :) - BUF = "" - PARSER = HttpParser.new - REQ = {} + # :stopdoc: + # A frozen format for this is about 15% faster + REMOTE_ADDR = 'REMOTE_ADDR'.freeze + RACK_INPUT = 'rack.input'.freeze + TeeInput = Unicorn::TeeInput + # :startdoc: - # Does the majority of the IO processing. It has been written in - # Ruby using about 8 different IO processing strategies. - # - # It is currently carefully constructed to make sure that it gets - # the best possible performance for the common case: GET requests - # that are fully complete after a single read(2) - # - # Anyone who thinks they can make it faster is more than welcome to - # take a crack at it. - # - # returns an environment hash suitable for Rack if successful - # This does minimal exception trapping and it is up to the caller - # to handle any socket errors (e.g. user aborted upload). - def read(socket) - REQ.clear - PARSER.reset + # Does the majority of the IO processing. It has been written in + # Ruby using about 8 different IO processing strategies. + # + # It is currently carefully constructed to make sure that it gets + # the best possible performance for the common case: GET requests + # that are fully complete after a single read(2) + # + # Anyone who thinks they can make it faster is more than welcome to + # take a crack at it. + # + # returns an environment hash suitable for Rack if successful + # This does minimal exception trapping and it is up to the caller + # to handle any socket errors (e.g. user aborted upload). + def read(socket) + reset + e = env - # From http://www.ietf.org/rfc/rfc3875: - # "Script authors should be aware that the REMOTE_ADDR and - # REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9) - # may not identify the ultimate source of the request. They - # identify the client for the immediate request to the server; - # that client may be a proxy, gateway, or other intermediary - # acting on behalf of the actual source client." - REQ[Const::REMOTE_ADDR] = - TCPSocket === socket ? socket.peeraddr.last : LOCALHOST + # From http://www.ietf.org/rfc/rfc3875: + # "Script authors should be aware that the REMOTE_ADDR and + # REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9) + # may not identify the ultimate source of the request. They + # identify the client for the immediate request to the server; + # that client may be a proxy, gateway, or other intermediary + # acting on behalf of the actual source client." + e[REMOTE_ADDR] = socket.kgio_addr - # short circuit the common case with small GET requests first - if PARSER.headers(REQ, socket.readpartial(Const::CHUNK_SIZE, BUF)).nil? - # Parser is not done, queue up more data to read and continue parsing - # an Exception thrown from the PARSER will throw us out of the loop - begin - BUF << socket.readpartial(Const::CHUNK_SIZE) - end while PARSER.headers(REQ, BUF).nil? - end - REQ[Const::RACK_INPUT] = 0 == PARSER.content_length ? - NULL_IO : Unicorn::TeeInput.new(socket, REQ, PARSER, BUF) - REQ.update(DEFAULTS) + # short circuit the common case with small GET requests first + socket.kgio_read!(16384, buf) + if parse.nil? + # Parser is not done, queue up more data to read and continue parsing + # an Exception thrown from the parser will throw us out of the loop + begin + buf << socket.kgio_read!(16384) + end while parse.nil? end - + e[RACK_INPUT] = 0 == content_length ? NULL_IO : TeeInput.new(socket, self) + e.merge!(DEFAULTS) end end diff --git a/lib/unicorn/http_response.rb b/lib/unicorn/http_response.rb index 96e484b..5725e25 100644 --- a/lib/unicorn/http_response.rb +++ b/lib/unicorn/http_response.rb @@ -1,75 +1,50 @@ # -*- encoding: binary -*- - require 'time' -module Unicorn - # Writes a Rack response to your client using the HTTP/1.1 specification. - # You use it by simply doing: - # - # status, headers, body = rack_app.call(env) - # HttpResponse.write(socket, [ status, headers, body ]) - # - # Most header correctness (including Content-Length and Content-Type) - # is the job of Rack, with the exception of the "Connection: close" - # and "Date" headers. - # - # A design decision was made to force the client to not pipeline or - # keepalive requests. HTTP/1.1 pipelining really kills the - # performance due to how it has to be handled and how unclear the - # standard is. To fix this the HttpResponse always gives a - # "Connection: close" header which forces the client to close right - # away. The bonus for this is that it gives a pretty nice speed boost - # to most clients since they can close their connection immediately. - - class HttpResponse - - # Every standard HTTP code mapped to the appropriate message. - CODES = Rack::Utils::HTTP_STATUS_CODES.inject({}) { |hash,(code,msg)| - hash[code] = "#{code} #{msg}" - hash - } - - # Rack does not set/require a Date: header. We always override the - # Connection: and Date: headers no matter what (if anything) our - # Rack application sent us. - SKIP = { 'connection' => true, 'date' => true, 'status' => true } - - # writes the rack_response to socket as an HTTP response - def self.write(socket, rack_response, have_header = true) - status, headers, body = rack_response - - if have_header - status = CODES[status.to_i] || status - out = [] - - # Don't bother enforcing duplicate supression, it's a Hash most of - # the time anyways so just hope our app knows what it's doing - headers.each do |key, value| - next if SKIP.include?(key.downcase) - if value =~ /\n/ - # avoiding blank, key-only cookies with /\n+/ - out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" }) - else - out << "#{key}: #{value}\r\n" - end +# Writes a Rack response to your client using the HTTP/1.1 specification. +# You use it by simply doing: +# +# status, headers, body = rack_app.call(env) +# http_response_write(socket, [ status, headers, body ]) +# +# Most header correctness (including Content-Length and Content-Type) +# is the job of Rack, with the exception of the "Date" and "Status" header. +# +# TODO: allow keepalive +module Unicorn::HttpResponse + + # Every standard HTTP code mapped to the appropriate message. + CODES = Rack::Utils::HTTP_STATUS_CODES.inject({}) { |hash,(code,msg)| + hash[code] = "#{code} #{msg}" + hash + } + CRLF = "\r\n" + + # writes the rack_response to socket as an HTTP response + def http_response_write(socket, rack_response) + status, headers, body = rack_response + status = CODES[status.to_i] || status + + if headers + buf = "HTTP/1.1 #{status}\r\n" \ + "Date: #{Time.now.httpdate}\r\n" \ + "Status: #{status}\r\n" \ + "Connection: close\r\n" + headers.each do |key, value| + next if %r{\A(?:Date\z|Status\z|Connection\z)}i =~ key + if value =~ /\n/ + # avoiding blank, key-only cookies with /\n+/ + buf << value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" }.join('') + else + buf << "#{key}: #{value}\r\n" end - - # Rack should enforce Content-Length or chunked transfer encoding, - # so don't worry or care about them. - # Date is required by HTTP/1.1 as long as our clock can be trusted. - # Some broken clients require a "Status" header so we accomodate them - socket.write("HTTP/1.1 #{status}\r\n" \ - "Date: #{Time.now.httpdate}\r\n" \ - "Status: #{status}\r\n" \ - "Connection: close\r\n" \ - "#{out.join('')}\r\n") end - - body.each { |chunk| socket.write(chunk) } - socket.close # flushes and uncorks the socket immediately - ensure - body.respond_to?(:close) and body.close + socket.write(buf << CRLF) end + body.each { |chunk| socket.write(chunk) } + socket.close # flushes and uncorks the socket immediately + ensure + body.respond_to?(:close) and body.close end end diff --git a/lib/unicorn/http_server.rb b/lib/unicorn/http_server.rb new file mode 100644 index 0000000..69b7cc8 --- /dev/null +++ b/lib/unicorn/http_server.rb @@ -0,0 +1,695 @@ +# -*- encoding: binary -*- + +# This is the process manager of Unicorn. This manages worker +# processes which in turn handle the I/O and application process. +# Listener sockets are started in the master process and shared with +# forked worker children. +class Unicorn::HttpServer + attr_accessor :app, :request, :timeout, :worker_processes, + :before_fork, :after_fork, :before_exec, + :listener_opts, :preload_app, + :reexec_pid, :orig_app, :init_listeners, + :master_pid, :config, :ready_pipe, :user + attr_reader :pid, :logger + + # :stopdoc: + include Unicorn::SocketHelper + include Unicorn::HttpResponse + + # backwards compatibility with 1.x + Worker = Unicorn::Worker + + # prevents IO objects in here from being GC-ed + IO_PURGATORY = [] + + # all bound listener sockets + LISTENERS = [] + + # This hash maps PIDs to Workers + WORKERS = {} + + # We use SELF_PIPE differently in the master and worker processes: + # + # * The master process never closes or reinitializes this once + # initialized. Signal handlers in the master process will write to + # it to wake up the master from IO.select in exactly the same manner + # djb describes in http://cr.yp.to/docs/selfpipe.html + # + # * The workers immediately close the pipe they inherit from the + # master and replace it with a new pipe after forking. This new + # pipe is also used to wakeup from IO.select from inside (worker) + # signal handlers. However, workers *close* the pipe descriptors in + # the signal handlers to raise EBADF in IO.select instead of writing + # like we do in the master. We cannot easily use the reader set for + # IO.select because LISTENERS is already that set, and it's extra + # work (and cycles) to distinguish the pipe FD from the reader set + # once IO.select returns. So we're lazy and just close the pipe when + # a (rare) signal arrives in the worker and reinitialize the pipe later. + SELF_PIPE = [] + + # signal queue used for self-piping + SIG_QUEUE = [] + + # list of signals we care about and trap in master. + QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP, :TTIN, :TTOU ] + + # :startdoc: + # We populate this at startup so we can figure out how to reexecute + # and upgrade the currently running instance of Unicorn + # This Hash is considered a stable interface and changing its contents + # will allow you to switch between different installations of Unicorn + # or even different installations of the same applications without + # downtime. Keys of this constant Hash are described as follows: + # + # * 0 - the path to the unicorn/unicorn_rails executable + # * :argv - a deep copy of the ARGV array the executable originally saw + # * :cwd - the working directory of the application, this is where + # you originally started Unicorn. + # + # To change your unicorn executable to a different path without downtime, + # you can set the following in your Unicorn config file, HUP and then + # continue with the traditional USR2 + QUIT upgrade steps: + # + # Unicorn::HttpServer::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn" + START_CTX = { + :argv => ARGV.map { |arg| arg.dup }, + :cwd => lambda { + # favor ENV['PWD'] since it is (usually) symlink aware for + # Capistrano and like systems + begin + a = File.stat(pwd = ENV['PWD']) + b = File.stat(Dir.pwd) + a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd + rescue + Dir.pwd + end + }.call, + 0 => $0.dup, + } + + # Creates a working server on host:port (strange things happen if + # port isn't a Number). Use HttpServer::run to start the server and + # HttpServer.run.join to join the thread that's processing + # incoming requests on the socket. + def initialize(app, options = {}) + @app = app + @request = Unicorn::HttpRequest.new + self.reexec_pid = 0 + options = options.dup + self.ready_pipe = options.delete(:ready_pipe) + self.init_listeners = options[:listeners] ? options[:listeners].dup : [] + options[:use_defaults] = true + self.config = Unicorn::Configurator.new(options) + self.listener_opts = {} + + # we try inheriting listeners first, so we bind them later. + # we don't write the pid file until we've bound listeners in case + # unicorn was started twice by mistake. Even though our #pid= method + # checks for stale/existing pid files, race conditions are still + # possible (and difficult/non-portable to avoid) and can be likely + # to clobber the pid if the second start was in quick succession + # after the first, so we rely on the listener binding to fail in + # that case. Some tests (in and outside of this source tree) and + # monitoring tools may also rely on pid files existing before we + # attempt to connect to the listener(s) + config.commit!(self, :skip => [:listeners, :pid]) + self.orig_app = app + end + + # Runs the thing. Returns self so you can run join on it + def start + BasicSocket.do_not_reverse_lookup = true + + # inherit sockets from parents, they need to be plain Socket objects + # before they become Kgio::UNIXServer or Kgio::TCPServer + inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd| + io = Socket.for_fd(fd.to_i) + set_server_sockopt(io, listener_opts[sock_name(io)]) + IO_PURGATORY << io + logger.info "inherited addr=#{sock_name(io)} fd=#{fd}" + server_cast(io) + end + + config_listeners = config[:listeners].dup + LISTENERS.replace(inherited) + + # we start out with generic Socket objects that get cast to either + # Kgio::TCPServer or Kgio::UNIXServer objects; but since the Socket + # objects share the same OS-level file descriptor as the higher-level + # *Server objects; we need to prevent Socket objects from being + # garbage-collected + config_listeners -= listener_names + if config_listeners.empty? && LISTENERS.empty? + config_listeners << Unicorn::Const::DEFAULT_LISTEN + init_listeners << Unicorn::Const::DEFAULT_LISTEN + START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}" + end + config_listeners.each { |addr| listen(addr) } + raise ArgumentError, "no listeners" if LISTENERS.empty? + + # this pipe is used to wake us up from select(2) in #join when signals + # are trapped. See trap_deferred. + init_self_pipe! + + # setup signal handlers before writing pid file in case people get + # trigger happy and send signals as soon as the pid file exists. + # Note that signals don't actually get handled until the #join method + QUEUE_SIGS.each { |sig| trap(sig) { SIG_QUEUE << sig; awaken_master } } + trap(:CHLD) { awaken_master } + self.pid = config[:pid] + + self.master_pid = $$ + build_app! if preload_app + maintain_worker_count + self + end + + # replaces current listener set with +listeners+. This will + # close the socket if it will not exist in the new listener set + def listeners=(listeners) + cur_names, dead_names = [], [] + listener_names.each do |name| + if ?/ == name[0] + # mark unlinked sockets as dead so we can rebind them + (File.socket?(name) ? cur_names : dead_names) << name + else + cur_names << name + end + end + set_names = listener_names(listeners) + dead_names.concat(cur_names - set_names).uniq! + + LISTENERS.delete_if do |io| + if dead_names.include?(sock_name(io)) + IO_PURGATORY.delete_if do |pio| + pio.fileno == io.fileno && (pio.close rescue nil).nil? # true + end + (io.close rescue nil).nil? # true + else + set_server_sockopt(io, listener_opts[sock_name(io)]) + false + end + end + + (set_names - cur_names).each { |addr| listen(addr) } + end + + def stdout_path=(path); redirect_io($stdout, path); end + def stderr_path=(path); redirect_io($stderr, path); end + + def logger=(obj) + Unicorn::HttpRequest::DEFAULTS["rack.logger"] = @logger = obj + end + + # sets the path for the PID file of the master process + def pid=(path) + if path + if x = valid_pid?(path) + return path if pid && path == pid && x == $$ + if x == reexec_pid && pid =~ /\.oldbin\z/ + logger.warn("will not set pid=#{path} while reexec-ed "\ + "child is running PID:#{x}") + return + end + raise ArgumentError, "Already running on PID:#{x} " \ + "(or pid=#{path} is stale)" + end + end + unlink_pid_safe(pid) if pid + + if path + fp = begin + tmp = "#{File.dirname(path)}/#{rand}.#$$" + File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644) + rescue Errno::EEXIST + retry + end + fp.syswrite("#$$\n") + File.rename(fp.path, path) + fp.close + end + @pid = path + end + + # add a given address to the +listeners+ set, idempotently + # Allows workers to add a private, per-process listener via the + # after_fork hook. Very useful for debugging and testing. + # +:tries+ may be specified as an option for the number of times + # to retry, and +:delay+ may be specified as the time in seconds + # to delay between retries. + # A negative value for +:tries+ indicates the listen will be + # retried indefinitely, this is useful when workers belonging to + # different masters are spawned during a transparent upgrade. + def listen(address, opt = {}.merge(listener_opts[address] || {})) + address = config.expand_addr(address) + return if String === address && listener_names.include?(address) + + delay = opt[:delay] || 0.5 + tries = opt[:tries] || 5 + begin + io = bind_listen(address, opt) + unless Kgio::TCPServer === io || Kgio::UNIXServer === io + IO_PURGATORY << io + io = server_cast(io) + end + logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}" + LISTENERS << io + io + rescue Errno::EADDRINUSE => err + logger.error "adding listener failed addr=#{address} (in use)" + raise err if tries == 0 + tries -= 1 + logger.error "retrying in #{delay} seconds " \ + "(#{tries < 0 ? 'infinite' : tries} tries left)" + sleep(delay) + retry + rescue => err + logger.fatal "error adding listener addr=#{address}" + raise err + end + end + + # monitors children and receives signals forever + # (or until a termination signal is sent). This handles signals + # one-at-a-time time and we'll happily drop signals in case somebody + # is signalling us too often. + def join + respawn = true + last_check = Time.now + + proc_name 'master' + logger.info "master process ready" # test_exec.rb relies on this message + if ready_pipe + ready_pipe.syswrite($$.to_s) + ready_pipe.close rescue nil + self.ready_pipe = nil + end + begin + reap_all_workers + case SIG_QUEUE.shift + when nil + # avoid murdering workers after our master process (or the + # machine) comes out of suspend/hibernation + if (last_check + @timeout) >= (last_check = Time.now) + sleep_time = murder_lazy_workers + else + # wait for workers to wakeup on suspend + sleep_time = @timeout/2.0 + 1 + end + maintain_worker_count if respawn + master_sleep(sleep_time) + when :QUIT # graceful shutdown + break + when :TERM, :INT # immediate shutdown + stop(false) + break + when :USR1 # rotate logs + logger.info "master reopening logs..." + Unicorn::Util.reopen_logs + logger.info "master done reopening logs" + kill_each_worker(:USR1) + when :USR2 # exec binary, stay alive in case something went wrong + reexec + when :WINCH + if Process.ppid == 1 || Process.getpgrp != $$ + respawn = false + logger.info "gracefully stopping all workers" + kill_each_worker(:QUIT) + self.worker_processes = 0 + else + logger.info "SIGWINCH ignored because we're not daemonized" + end + when :TTIN + respawn = true + self.worker_processes += 1 + when :TTOU + self.worker_processes -= 1 if self.worker_processes > 0 + when :HUP + respawn = true + if config.config_file + load_config! + else # exec binary and exit if there's no config file + logger.info "config_file not present, reexecuting binary" + reexec + end + end + rescue Errno::EINTR + rescue => e + logger.error "Unhandled master loop exception #{e.inspect}." + logger.error e.backtrace.join("\n") + end while true + stop # gracefully shutdown all workers on our way out + logger.info "master complete" + unlink_pid_safe(pid) if pid + end + + # Terminates all workers, but does not exit master process + def stop(graceful = true) + self.listeners = [] + limit = Time.now + timeout + until WORKERS.empty? || Time.now > limit + kill_each_worker(graceful ? :QUIT : :TERM) + sleep(0.1) + reap_all_workers + end + kill_each_worker(:KILL) + end + + private + + # wait for a signal hander to wake us up and then consume the pipe + def master_sleep(sec) + IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return + SELF_PIPE[0].kgio_tryread(11) + end + + def awaken_master + SELF_PIPE[1].kgio_trywrite('.') # wakeup master process from select + end + + # reaps all unreaped workers + def reap_all_workers + begin + wpid, status = Process.waitpid2(-1, Process::WNOHANG) + wpid or return + if reexec_pid == wpid + logger.error "reaped #{status.inspect} exec()-ed" + self.reexec_pid = 0 + self.pid = pid.chomp('.oldbin') if pid + proc_name 'master' + else + worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil + m = "reaped #{status.inspect} worker=#{worker.nr rescue 'unknown'}" + status.success? ? logger.info(m) : logger.error(m) + end + rescue Errno::ECHILD + break + end while true + end + + # reexecutes the START_CTX with a new binary + def reexec + if reexec_pid > 0 + begin + Process.kill(0, reexec_pid) + logger.error "reexec-ed child already running PID:#{reexec_pid}" + return + rescue Errno::ESRCH + self.reexec_pid = 0 + end + end + + if pid + old_pid = "#{pid}.oldbin" + prev_pid = pid.dup + begin + self.pid = old_pid # clear the path for a new pid file + rescue ArgumentError + logger.error "old PID:#{valid_pid?(old_pid)} running with " \ + "existing pid=#{old_pid}, refusing rexec" + return + rescue => e + logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}" + return + end + end + + self.reexec_pid = fork do + listener_fds = LISTENERS.map { |sock| sock.fileno } + ENV['UNICORN_FD'] = listener_fds.join(',') + Dir.chdir(START_CTX[:cwd]) + cmd = [ START_CTX[0] ].concat(START_CTX[:argv]) + + # avoid leaking FDs we don't know about, but let before_exec + # unset FD_CLOEXEC, if anything else in the app eventually + # relies on FD inheritence. + (3..1024).each do |io| + next if listener_fds.include?(io) + io = IO.for_fd(io) rescue next + IO_PURGATORY << io + io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) + end + logger.info "executing #{cmd.inspect} (in #{Dir.pwd})" + before_exec.call(self) + exec(*cmd) + end + proc_name 'master (old)' + end + + # forcibly terminate all workers that haven't checked in in timeout + # seconds. The timeout is implemented using an unlinked File + # shared between the parent process and each worker. The worker + # runs File#chmod to modify the ctime of the File. If the ctime + # is stale for >timeout seconds, then we'll kill the corresponding + # worker. + def murder_lazy_workers + t = @timeout + next_sleep = 1 + WORKERS.dup.each_pair do |wpid, worker| + stat = worker.tmp.stat + # skip workers that disable fchmod or have never fchmod-ed + stat.mode == 0100600 and next + diff = Time.now - stat.ctime + if diff <= t + tmp = t - diff + next_sleep < tmp and next_sleep = tmp + next + end + logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \ + "(#{diff}s > #{t}s), killing" + kill_worker(:KILL, wpid) # take no prisoners for timeout violations + end + next_sleep + end + + def spawn_missing_workers + (0...worker_processes).each do |worker_nr| + WORKERS.values.include?(worker_nr) and next + worker = Worker.new(worker_nr, Unicorn::TmpIO.new) + before_fork.call(self, worker) + WORKERS[fork { + ready_pipe.close if ready_pipe + self.ready_pipe = nil + worker_loop(worker) + }] = worker + end + end + + def maintain_worker_count + (off = WORKERS.size - worker_processes) == 0 and return + off < 0 and return spawn_missing_workers + WORKERS.dup.each_pair { |wpid,w| + w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil + } + end + + # if we get any error, try to write something back to the client + # assuming we haven't closed the socket, but don't get hung up + # if the socket is already closed or broken. We'll always ensure + # the socket is closed at the end of this function + def handle_error(client, e) + msg = case e + when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF + Unicorn::Const::ERROR_500_RESPONSE + when HttpParserError # try to tell the client they're bad + Unicorn::Const::ERROR_400_RESPONSE + else + logger.error "Read error: #{e.inspect}" + logger.error e.backtrace.join("\n") + Unicorn::Const::ERROR_500_RESPONSE + end + client.kgio_trywrite(msg) + client.close + rescue + nil + end + + # once a client is accepted, it is processed in its entirety here + # in 3 easy steps: read request, call app, write app response + def process_client(client) + r = @app.call(env = @request.read(client)) + + if 100 == r[0].to_i + client.write(Unicorn::Const::EXPECT_100_RESPONSE) + env.delete(Unicorn::Const::HTTP_EXPECT) + r = @app.call(env) + end + # r may be frozen or const, so don't modify it + @request.headers? or r = [ r[0], nil, r[2] ] + http_response_write(client, r) + rescue => e + handle_error(client, e) + end + + # gets rid of stuff the worker has no business keeping track of + # to free some resources and drops all sig handlers. + # traps for USR1, USR2, and HUP may be set in the after_fork Proc + # by the user. + def init_worker_process(worker) + QUEUE_SIGS.each { |sig| trap(sig, nil) } + trap(:CHLD, 'DEFAULT') + SIG_QUEUE.clear + proc_name "worker[#{worker.nr}]" + START_CTX.clear + init_self_pipe! + WORKERS.values.each { |other| other.tmp.close rescue nil } + WORKERS.clear + LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) } + worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) + after_fork.call(self, worker) # can drop perms + worker.user(*user) if user.kind_of?(Array) && ! worker.switched + self.timeout /= 2.0 # halve it for select() + build_app! unless preload_app + end + + def reopen_worker_logs(worker_nr) + logger.info "worker=#{worker_nr} reopening logs..." + Unicorn::Util.reopen_logs + logger.info "worker=#{worker_nr} done reopening logs" + init_self_pipe! + end + + # runs inside each forked worker, this sits around and waits + # for connections and doesn't die until the parent dies (or is + # given a INT, QUIT, or TERM signal) + def worker_loop(worker) + ppid = master_pid + init_worker_process(worker) + nr = 0 # this becomes negative if we need to reopen logs + alive = worker.tmp # tmp is our lifeline to the master process + ready = LISTENERS + + # closing anything we IO.select on will raise EBADF + trap(:USR1) { nr = -65536; SELF_PIPE[0].close rescue nil } + trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } } + [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown + logger.info "worker=#{worker.nr} ready" + m = 0 + + begin + nr < 0 and reopen_worker_logs(worker.nr) + nr = 0 + + # we're a goner in timeout seconds anyways if alive.chmod + # breaks, so don't trap the exception. Using fchmod() since + # futimes() is not available in base Ruby and I very strongly + # prefer temporary files to be unlinked for security, + # performance and reliability reasons, so utime is out. No-op + # changes with chmod doesn't update ctime on all filesystems; so + # we change our counter each and every time (after process_client + # and before IO.select). + alive.chmod(m = 0 == m ? 1 : 0) + + ready.each do |sock| + if client = sock.kgio_tryaccept + process_client(client) + nr += 1 + alive.chmod(m = 0 == m ? 1 : 0) + end + break if nr < 0 + end + + # make the following bet: if we accepted clients this round, + # we're probably reasonably busy, so avoid calling select() + # and do a speculative non-blocking accept() on ready listeners + # before we sleep again in select(). + redo unless nr == 0 # (nr < 0) => reopen logs + + ppid == Process.ppid or return + alive.chmod(m = 0 == m ? 1 : 0) + + # timeout used so we can detect parent death: + ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) and ready = ret[0] + rescue Errno::EINTR + ready = LISTENERS + rescue Errno::EBADF + nr < 0 or return + rescue => e + if alive + logger.error "Unhandled listen loop exception #{e.inspect}." + logger.error e.backtrace.join("\n") + end + end while alive + end + + # delivers a signal to a worker and fails gracefully if the worker + # is no longer running. + def kill_worker(signal, wpid) + Process.kill(signal, wpid) + rescue Errno::ESRCH + worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil + end + + # delivers a signal to each worker + def kill_each_worker(signal) + WORKERS.keys.each { |wpid| kill_worker(signal, wpid) } + end + + # unlinks a PID file at given +path+ if it contains the current PID + # still potentially racy without locking the directory (which is + # non-portable and may interact badly with other programs), but the + # window for hitting the race condition is small + def unlink_pid_safe(path) + (File.read(path).to_i == $$ and File.unlink(path)) rescue nil + end + + # returns a PID if a given path contains a non-stale PID file, + # nil otherwise. + def valid_pid?(path) + wpid = File.read(path).to_i + wpid <= 0 and return + Process.kill(0, wpid) + wpid + rescue Errno::ESRCH, Errno::ENOENT + # don't unlink stale pid files, racy without non-portable locking... + end + + def load_config! + loaded_app = app + logger.info "reloading config_file=#{config.config_file}" + config[:listeners].replace(init_listeners) + config.reload + config.commit!(self) + kill_each_worker(:QUIT) + Unicorn::Util.reopen_logs + self.app = orig_app + build_app! if preload_app + logger.info "done reloading config_file=#{config.config_file}" + rescue StandardError, LoadError, SyntaxError => e + logger.error "error reloading config_file=#{config.config_file}: " \ + "#{e.class} #{e.message} #{e.backtrace}" + self.app = loaded_app + end + + # returns an array of string names for the given listener array + def listener_names(listeners = LISTENERS) + listeners.map { |io| sock_name(io) } + end + + def build_app! + if app.respond_to?(:arity) && app.arity == 0 + if defined?(Gem) && Gem.respond_to?(:refresh) + logger.info "Refreshing Gem list" + Gem.refresh + end + self.app = app.call + end + end + + def proc_name(tag) + $0 = ([ File.basename(START_CTX[0]), tag + ]).concat(START_CTX[:argv]).join(' ') + end + + def redirect_io(io, path) + File.open(path, 'ab') { |fp| io.reopen(fp) } if path + io.sync = true + end + + def init_self_pipe! + SELF_PIPE.each { |io| io.close rescue nil } + SELF_PIPE.replace(Kgio::Pipe.new) + SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) } + end +end + diff --git a/lib/unicorn/launcher.rb b/lib/unicorn/launcher.rb index 0d415dd..662b603 100644 --- a/lib/unicorn/launcher.rb +++ b/lib/unicorn/launcher.rb @@ -20,6 +20,7 @@ module Unicorn::Launcher # to pickup code changes if the original deployment directory # is a symlink or otherwise got replaced. def self.daemonize!(options) + cfg = Unicorn::Configurator $stdin.reopen("/dev/null") # We only start a new process group if we're not being reexecuted @@ -52,9 +53,9 @@ module Unicorn::Launcher end end # $stderr/$stderr can/will be redirected separately in the Unicorn config - Unicorn::Configurator::DEFAULTS[:stderr_path] ||= "/dev/null" - Unicorn::Configurator::DEFAULTS[:stdout_path] ||= "/dev/null" - Unicorn::Configurator::RACKUP[:daemonized] = true + cfg::DEFAULTS[:stderr_path] ||= "/dev/null" + cfg::DEFAULTS[:stdout_path] ||= "/dev/null" + cfg::RACKUP[:daemonized] = true end end diff --git a/lib/unicorn/preread_input.rb b/lib/unicorn/preread_input.rb new file mode 100644 index 0000000..ec83cb2 --- /dev/null +++ b/lib/unicorn/preread_input.rb @@ -0,0 +1,30 @@ +# -*- encoding: binary -*- + +module Unicorn +# This middleware is used to ensure input is buffered to memory +# or disk (depending on size) before the application is dispatched +# by entirely consuming it (from TeeInput) beforehand. +# +# Usage (in config.ru): +# +# require 'unicorn/preread_input' +# if defined?(Unicorn) +# use Unicorn::PrereadInput +# end +# run YourApp.new +class PrereadInput + def initialize(app) + @app = app + end + + def call(env) + buf = "" + input = env["rack.input"] + if buf = input.read(16384) + true while input.read(16384, buf) + input.rewind + end + @app.call(env) + end +end +end diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb index 9a4266d..7364937 100644 --- a/lib/unicorn/socket_helper.rb +++ b/lib/unicorn/socket_helper.rb @@ -1,11 +1,28 @@ # -*- encoding: binary -*- - +# :enddoc: require 'socket' module Unicorn module SocketHelper include Socket::Constants + # :stopdoc: + # internal interface, only used by Rainbows!/Zbatery + DEFAULTS = { + # The semantics for TCP_DEFER_ACCEPT changed in Linux 2.6.32+ + # with commit d1b99ba41d6c5aa1ed2fc634323449dd656899e9 + # This change shouldn't affect Unicorn users behind nginx (a + # value of 1 remains an optimization), but Rainbows! users may + # want to use a higher value on Linux 2.6.32+ to protect against + # denial-of-service attacks + :tcp_defer_accept => 1, + + # FreeBSD, we need to override this to 'dataready' when we + # eventually get HTTPS support + :accept_filter => 'httpready', + } + #:startdoc: + # configure platform-specific options (only tested on Linux 2.6 so far) case RUBY_PLATFORM when /linux/ @@ -14,22 +31,13 @@ module Unicorn # do not send out partial frames (Linux) TCP_CORK = 3 unless defined?(TCP_CORK) - when /freebsd(([1-4]\..{1,2})|5\.[0-4])/ - # Do nothing for httpready, just closing a bug when freebsd <= 5.4 - TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH) # :nodoc: when /freebsd/ # do not send out partial frames (FreeBSD) TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH) - # Use the HTTP accept filter if available. - # The struct made by pack() is defined in /usr/include/sys/socket.h - # as accept_filter_arg - unless `/sbin/sysctl -nq net.inet.accf.http`.empty? - # set set the "httpready" accept filter in FreeBSD if available - # if other protocols are to be supported, this may be - # String#replace-d with "dataready" arguments instead - FILTER_ARG = ['httpready', nil].pack('a16a240') - end + def accf_arg(af_name) + [ af_name, nil ].pack('a16a240') + end if defined?(SO_ACCEPTFILTER) end def set_tcp_sockopt(sock, opt) @@ -49,10 +57,25 @@ module Unicorn end # No good reason to ever have deferred accepts off + # (except maybe benchmarking) if defined?(TCP_DEFER_ACCEPT) - sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, 1) - elsif defined?(SO_ACCEPTFILTER) && defined?(FILTER_ARG) - sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, FILTER_ARG) + # this differs from nginx, since nginx doesn't allow us to + # configure the the timeout... + tmp = DEFAULTS.merge(opt) + seconds = tmp[:tcp_defer_accept] + seconds = DEFAULTS[:tcp_defer_accept] if seconds == true + seconds = 0 unless seconds # nil/false means disable this + sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds) + elsif respond_to?(:accf_arg) + tmp = DEFAULTS.merge(opt) + if name = tmp[:accept_filter] + begin + sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name)) + rescue => e + logger.error("#{sock_name(sock)} " \ + "failed to set accept_filter=#{name} (#{e.inspect})") + end + end end end @@ -69,14 +92,11 @@ module Unicorn end sock.listen(opt[:backlog] || 1024) rescue => e - if respond_to?(:logger) - logger.error "error setting socket options: #{e.inspect}" - logger.error e.backtrace.join("\n") - end + logger.error "error setting socket options: #{e.inspect}" + logger.error e.backtrace.join("\n") end def log_buffer_sizes(sock, pfx = '') - respond_to?(:logger) or return rcvbuf = sock.getsockopt(SOL_SOCKET, SO_RCVBUF).unpack('i') sndbuf = sock.getsockopt(SOL_SOCKET, SO_SNDBUF).unpack('i') logger.info "#{pfx}#{sock_name(sock)} rcvbuf=#{rcvbuf} sndbuf=#{sndbuf}" @@ -91,10 +111,14 @@ module Unicorn sock = if address[0] == ?/ if File.exist?(address) if File.socket?(address) - if self.respond_to?(:logger) + begin + UNIXSocket.new(address).close + # fall through, try to bind(2) and fail with EADDRINUSE + # (or succeed from a small race condition we can't sanely avoid). + rescue Errno::ECONNREFUSED logger.info "unlinking existing socket=#{address}" + File.unlink(address) end - File.unlink(address) else raise ArgumentError, "socket=#{address} specified but it is not a socket!" @@ -102,12 +126,12 @@ module Unicorn end old_umask = File.umask(opt[:umask] || 0) begin - UNIXServer.new(address) + Kgio::UNIXServer.new(address) ensure File.umask(old_umask) end elsif address =~ /^(\d+\.\d+\.\d+\.\d+):(\d+)$/ - TCPServer.new($1, $2.to_i) + Kgio::TCPServer.new($1, $2.to_i) else raise ArgumentError, "Don't know how to bind: #{address}" end @@ -142,9 +166,9 @@ module Unicorn def server_cast(sock) begin Socket.unpack_sockaddr_in(sock.getsockname) - TCPServer.for_fd(sock.fileno) + Kgio::TCPServer.for_fd(sock.fileno) rescue ArgumentError - UNIXServer.for_fd(sock.fileno) + Kgio::UNIXServer.for_fd(sock.fileno) end end diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb index 563747c..a3e01d2 100644 --- a/lib/unicorn/tee_input.rb +++ b/lib/unicorn/tee_input.rb @@ -1,224 +1,224 @@ # -*- encoding: binary -*- -module Unicorn - - # acts like tee(1) on an input input to provide a input-like stream - # while providing rewindable semantics through a File/StringIO backing - # store. On the first pass, the input is only read on demand so your - # Rack application can use input notification (upload progress and - # like). This should fully conform to the Rack::Lint::InputWrapper - # specification on the public API. This class is intended to be a - # strict interpretation of Rack::Lint::InputWrapper functionality and - # will not support any deviations from it. - # - # When processing uploads, Unicorn exposes a TeeInput object under - # "rack.input" of the Rack environment. - class TeeInput < Struct.new(:socket, :req, :parser, :buf, :len, :tmp, :buf2) - - # Initializes a new TeeInput object. You normally do not have to call - # this unless you are writing an HTTP server. - def initialize(*args) - super(*args) - self.len = parser.content_length - self.tmp = len && len < Const::MAX_BODY ? StringIO.new("") : Util.tmpio - self.buf2 = "" - if buf.size > 0 - parser.filter_body(buf2, buf) and finalize_input - tmp.write(buf2) - tmp.seek(0) - end +# acts like tee(1) on an input input to provide a input-like stream +# while providing rewindable semantics through a File/StringIO backing +# store. On the first pass, the input is only read on demand so your +# Rack application can use input notification (upload progress and +# like). This should fully conform to the Rack::Lint::InputWrapper +# specification on the public API. This class is intended to be a +# strict interpretation of Rack::Lint::InputWrapper functionality and +# will not support any deviations from it. +# +# When processing uploads, Unicorn exposes a TeeInput object under +# "rack.input" of the Rack environment. +class Unicorn::TeeInput + attr_accessor :tmp, :socket, :parser, :env, :buf, :len, :buf2 + + # The maximum size (in +bytes+) to buffer in memory before + # resorting to a temporary file. Default is 112 kilobytes. + @@client_body_buffer_size = Unicorn::Const::MAX_BODY + + # The I/O chunk size (in +bytes+) for I/O operations where + # the size cannot be user-specified when a method is called. + # The default is 16 kilobytes. + @@io_chunk_size = Unicorn::Const::CHUNK_SIZE + + # Initializes a new TeeInput object. You normally do not have to call + # this unless you are writing an HTTP server. + def initialize(socket, request) + @socket = socket + @parser = request + @buf = request.buf + @env = request.env + @len = request.content_length + @tmp = @len && @len < @@client_body_buffer_size ? + StringIO.new("") : Unicorn::TmpIO.new + @buf2 = "" + if @buf.size > 0 + @parser.filter_body(@buf2, @buf) and finalize_input + @tmp.write(@buf2) + @tmp.rewind end + end - # :call-seq: - # ios.size => Integer - # - # Returns the size of the input. For requests with a Content-Length - # header value, this will not read data off the socket and just return - # the value of the Content-Length header as an Integer. - # - # For Transfer-Encoding:chunked requests, this requires consuming - # all of the input stream before returning since there's no other - # way to determine the size of the request body beforehand. - # - # This method is no longer part of the Rack specification as of - # Rack 1.2, so its use is not recommended. This method only exists - # for compatibility with Rack applications designed for Rack 1.1 and - # earlier. Most applications should only need to call +read+ with a - # specified +length+ in a loop until it returns +nil+. - def size - len and return len - - if socket - pos = tmp.pos - while tee(Const::CHUNK_SIZE, buf2) - end - tmp.seek(pos) + # :call-seq: + # ios.size => Integer + # + # Returns the size of the input. For requests with a Content-Length + # header value, this will not read data off the socket and just return + # the value of the Content-Length header as an Integer. + # + # For Transfer-Encoding:chunked requests, this requires consuming + # all of the input stream before returning since there's no other + # way to determine the size of the request body beforehand. + # + # This method is no longer part of the Rack specification as of + # Rack 1.2, so its use is not recommended. This method only exists + # for compatibility with Rack applications designed for Rack 1.1 and + # earlier. Most applications should only need to call +read+ with a + # specified +length+ in a loop until it returns +nil+. + def size + @len and return @len + + if socket + pos = @tmp.pos + while tee(@@io_chunk_size, @buf2) end - - self.len = tmp.size + @tmp.seek(pos) end - # :call-seq: - # ios.read([length [, buffer ]]) => string, buffer, or nil - # - # Reads at most length bytes from the I/O stream, or to the end of - # file if length is omitted or is nil. length must be a non-negative - # integer or nil. If the optional buffer argument is present, it - # must reference a String, which will receive the data. - # - # At end of file, it returns nil or "" depend on length. - # ios.read() and ios.read(nil) returns "". - # ios.read(length [, buffer]) returns nil. - # - # If the Content-Length of the HTTP request is known (as is the common - # case for POST requests), then ios.read(length [, buffer]) will block - # until the specified length is read (or it is the last chunk). - # Otherwise, for uncommon "Transfer-Encoding: chunked" requests, - # ios.read(length [, buffer]) will return immediately if there is - # any data and only block when nothing is available (providing - # IO#readpartial semantics). - def read(*args) - socket or return tmp.read(*args) - - length = args.shift - if nil == length - rv = tmp.read || "" - while tee(Const::CHUNK_SIZE, buf2) - rv << buf2 - end - rv + @len = @tmp.size + end + + # :call-seq: + # ios.read([length [, buffer ]]) => string, buffer, or nil + # + # Reads at most length bytes from the I/O stream, or to the end of + # file if length is omitted or is nil. length must be a non-negative + # integer or nil. If the optional buffer argument is present, it + # must reference a String, which will receive the data. + # + # At end of file, it returns nil or "" depend on length. + # ios.read() and ios.read(nil) returns "". + # ios.read(length [, buffer]) returns nil. + # + # If the Content-Length of the HTTP request is known (as is the common + # case for POST requests), then ios.read(length [, buffer]) will block + # until the specified length is read (or it is the last chunk). + # Otherwise, for uncommon "Transfer-Encoding: chunked" requests, + # ios.read(length [, buffer]) will return immediately if there is + # any data and only block when nothing is available (providing + # IO#readpartial semantics). + def read(*args) + @socket or return @tmp.read(*args) + + length = args.shift + if nil == length + rv = @tmp.read || "" + while tee(@@io_chunk_size, @buf2) + rv << @buf2 + end + rv + else + rv = args.shift || "" + diff = @tmp.size - @tmp.pos + if 0 == diff + ensure_length(tee(length, rv), length) else - rv = args.shift || "" - diff = tmp.size - tmp.pos - if 0 == diff - ensure_length(tee(length, rv), length) - else - ensure_length(tmp.read(diff > length ? length : diff, rv), length) - end + ensure_length(@tmp.read(diff > length ? length : diff, rv), length) end end + end - # :call-seq: - # ios.gets => string or nil - # - # Reads the next ``line'' from the I/O stream; lines are separated - # by the global record separator ($/, typically "\n"). A global - # record separator of nil reads the entire unread contents of ios. - # Returns nil if called at the end of file. - # This takes zero arguments for strict Rack::Lint compatibility, - # unlike IO#gets. - def gets - socket or return tmp.gets - sep = $/ or return read - - orig_size = tmp.size - if tmp.pos == orig_size - tee(Const::CHUNK_SIZE, buf2) or return nil - tmp.seek(orig_size) - end + # :call-seq: + # ios.gets => string or nil + # + # Reads the next ``line'' from the I/O stream; lines are separated + # by the global record separator ($/, typically "\n"). A global + # record separator of nil reads the entire unread contents of ios. + # Returns nil if called at the end of file. + # This takes zero arguments for strict Rack::Lint compatibility, + # unlike IO#gets. + def gets + @socket or return @tmp.gets + sep = $/ or return read + + orig_size = @tmp.size + if @tmp.pos == orig_size + tee(@@io_chunk_size, @buf2) or return nil + @tmp.seek(orig_size) + end - sep_size = Rack::Utils.bytesize(sep) - line = tmp.gets # cannot be nil here since size > pos - sep == line[-sep_size, sep_size] and return line + sep_size = Rack::Utils.bytesize(sep) + line = @tmp.gets # cannot be nil here since size > pos + sep == line[-sep_size, sep_size] and return line - # unlikely, if we got here, then tmp is at EOF - begin - orig_size = tmp.pos - tee(Const::CHUNK_SIZE, buf2) or break - tmp.seek(orig_size) - line << tmp.gets - sep == line[-sep_size, sep_size] and return line - # tmp is at EOF again here, retry the loop - end while true - - line - end + # unlikely, if we got here, then @tmp is at EOF + begin + orig_size = @tmp.pos + tee(@@io_chunk_size, @buf2) or break + @tmp.seek(orig_size) + line << @tmp.gets + sep == line[-sep_size, sep_size] and return line + # @tmp is at EOF again here, retry the loop + end while true - # :call-seq: - # ios.each { |line| block } => ios - # - # Executes the block for every ``line'' in *ios*, where lines are - # separated by the global record separator ($/, typically "\n"). - def each(&block) - while line = gets - yield line - end + line + end - self # Rack does not specify what the return value is here + # :call-seq: + # ios.each { |line| block } => ios + # + # Executes the block for every ``line'' in *ios*, where lines are + # separated by the global record separator ($/, typically "\n"). + def each(&block) + while line = gets + yield line end - # :call-seq: - # ios.rewind => 0 - # - # Positions the *ios* pointer to the beginning of input, returns - # the offset (zero) of the +ios+ pointer. Subsequent reads will - # start from the beginning of the previously-buffered input. - def rewind - tmp.rewind # Rack does not specify what the return value is here - end + self # Rack does not specify what the return value is here + end - private - - def client_error(e) - case e - when EOFError - # in case client only did a premature shutdown(SHUT_WR) - # we do support clients that shutdown(SHUT_WR) after the - # _entire_ request has been sent, and those will not have - # raised EOFError on us. - socket.close if socket - raise ClientShutdown, "bytes_read=#{tmp.size}", [] - when HttpParserError - e.set_backtrace([]) - end - raise e - end + # :call-seq: + # ios.rewind => 0 + # + # Positions the *ios* pointer to the beginning of input, returns + # the offset (zero) of the +ios+ pointer. Subsequent reads will + # start from the beginning of the previously-buffered input. + def rewind + @tmp.rewind # Rack does not specify what the return value is here + end - # tees off a +length+ chunk of data from the input into the IO - # backing store as well as returning it. +dst+ must be specified. - # returns nil if reading from the input returns nil - def tee(length, dst) - unless parser.body_eof? - if parser.filter_body(dst, socket.readpartial(length, buf)).nil? - tmp.write(dst) - tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug - return dst - end +private + + # tees off a +length+ chunk of data from the input into the IO + # backing store as well as returning it. +dst+ must be specified. + # returns nil if reading from the input returns nil + def tee(length, dst) + unless @parser.body_eof? + r = @socket.kgio_read(length, @buf) or eof! + unless @parser.filter_body(dst, @buf) + @tmp.write(dst) + @tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug + return dst end - finalize_input - rescue => e - client_error(e) end + finalize_input + end - def finalize_input - while parser.trailers(req, buf).nil? - # Don't worry about raising ClientShutdown here on EOFError, tee() - # will catch EOFError when app is processing it, otherwise in - # initialize we never get any chance to enter the app so the - # EOFError will just get trapped by Unicorn and not the Rack app - buf << socket.readpartial(Const::CHUNK_SIZE) - end - self.socket = nil + def finalize_input + while @parser.trailers(@env, @buf).nil? + r = @socket.kgio_read(@@io_chunk_size) or eof! + @buf << r end + @socket = nil + end - # tee()s into +dst+ until it is of +length+ bytes (or until - # we've reached the Content-Length of the request body). - # Returns +dst+ (the exact object, not a duplicate) - # To continue supporting applications that need near-real-time - # streaming input bodies, this is a no-op for - # "Transfer-Encoding: chunked" requests. - def ensure_length(dst, length) - # len is nil for chunked bodies, so we can't ensure length for those - # since they could be streaming bidirectionally and we don't want to - # block the caller in that case. - return dst if dst.nil? || len.nil? - - while dst.size < length && tee(length - dst.size, buf2) - dst << buf2 - end - - dst + # tee()s into +dst+ until it is of +length+ bytes (or until + # we've reached the Content-Length of the request body). + # Returns +dst+ (the exact object, not a duplicate) + # To continue supporting applications that need near-real-time + # streaming input bodies, this is a no-op for + # "Transfer-Encoding: chunked" requests. + def ensure_length(dst, length) + # len is nil for chunked bodies, so we can't ensure length for those + # since they could be streaming bidirectionally and we don't want to + # block the caller in that case. + return dst if dst.nil? || @len.nil? + + while dst.size < length && tee(length - dst.size, @buf2) + dst << @buf2 end + dst + end + + def eof! + # in case client only did a premature shutdown(SHUT_WR) + # we do support clients that shutdown(SHUT_WR) after the + # _entire_ request has been sent, and those will not have + # raised EOFError on us. + @socket.close if @socket + raise Unicorn::ClientShutdown, "bytes_read=#{@tmp.size}", [] end end diff --git a/lib/unicorn/tmpio.rb b/lib/unicorn/tmpio.rb new file mode 100644 index 0000000..a3c530d --- /dev/null +++ b/lib/unicorn/tmpio.rb @@ -0,0 +1,29 @@ +# -*- encoding: binary -*- +# :stopdoc: +require 'tmpdir' + +# some versions of Ruby had a broken Tempfile which didn't work +# well with unlinked files. This one is much shorter, easier +# to understand, and slightly faster. +class Unicorn::TmpIO < File + + # creates and returns a new File object. The File is unlinked + # immediately, switched to binary mode, and userspace output + # buffering is disabled + def self.new + fp = begin + super("#{Dir::tmpdir}/#{rand}", RDWR|CREAT|EXCL, 0600) + rescue Errno::EEXIST + retry + end + unlink(fp.path) + fp.binmode + fp.sync = true + fp + end + + # for easier env["rack.input"] compatibility with Rack <= 1.1 + def size + stat.size + end +end diff --git a/lib/unicorn/util.rb b/lib/unicorn/util.rb index e8c09d0..82329eb 100644 --- a/lib/unicorn/util.rb +++ b/lib/unicorn/util.rb @@ -1,87 +1,67 @@ # -*- encoding: binary -*- -require 'fcntl' -require 'tmpdir' +module Unicorn::Util -module Unicorn +# :stopdoc: + def self.is_log?(fp) + append_flags = File::WRONLY | File::APPEND - class TmpIO < ::File + ! fp.closed? && + fp.sync && + fp.path[0] == ?/ && + (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags + rescue IOError, Errno::EBADF + false + end - # for easier env["rack.input"] compatibility - def size - # flush if sync - stat.size + def self.chown_logs(uid, gid) + ObjectSpace.each_object(File) do |fp| + fp.chown(uid, gid) if is_log?(fp) end end +# :startdoc: - module Util - class << self - - def is_log?(fp) - append_flags = File::WRONLY | File::APPEND + # This reopens ALL logfiles in the process that have been rotated + # using logrotate(8) (without copytruncate) or similar tools. + # A +File+ object is considered for reopening if it is: + # 1) opened with the O_APPEND and O_WRONLY flags + # 2) opened with an absolute path (starts with "/") + # 3) the current open file handle does not match its original open path + # 4) unbuffered (as far as userspace buffering goes, not O_SYNC) + # Returns the number of files reopened + def self.reopen_logs + to_reopen = [] + nr = 0 + ObjectSpace.each_object(File) { |fp| is_log?(fp) and to_reopen << fp } - ! fp.closed? && - fp.sync && - fp.path[0] == ?/ && - (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags + to_reopen.each do |fp| + orig_st = begin + fp.stat + rescue IOError, Errno::EBADF + next end - def chown_logs(uid, gid) - ObjectSpace.each_object(File) do |fp| - fp.chown(uid, gid) if is_log?(fp) - end + begin + b = File.stat(fp.path) + next if orig_st.ino == b.ino && orig_st.dev == b.dev + rescue Errno::ENOENT end - # This reopens ALL logfiles in the process that have been rotated - # using logrotate(8) (without copytruncate) or similar tools. - # A +File+ object is considered for reopening if it is: - # 1) opened with the O_APPEND and O_WRONLY flags - # 2) opened with an absolute path (starts with "/") - # 3) the current open file handle does not match its original open path - # 4) unbuffered (as far as userspace buffering goes, not O_SYNC) - # Returns the number of files reopened - def reopen_logs - to_reopen = [] - nr = 0 - ObjectSpace.each_object(File) { |fp| is_log?(fp) and to_reopen << fp } - - to_reopen.each do |fp| - orig_st = fp.stat - begin - b = File.stat(fp.path) - next if orig_st.ino == b.ino && orig_st.dev == b.dev - rescue Errno::ENOENT - end + begin + File.open(fp.path, 'a') { |tmpfp| fp.reopen(tmpfp) } + fp.sync = true + new_st = fp.stat - File.open(fp.path, 'a') { |tmpfp| fp.reopen(tmpfp) } - fp.sync = true - new_st = fp.stat - if orig_st.uid != new_st.uid || orig_st.gid != new_st.gid - fp.chown(orig_st.uid, orig_st.gid) - end - nr += 1 + # this should only happen in the master: + if orig_st.uid != new_st.uid || orig_st.gid != new_st.gid + fp.chown(orig_st.uid, orig_st.gid) end - nr + nr += 1 + rescue IOError, Errno::EBADF + # not much we can do... end - - # creates and returns a new File object. The File is unlinked - # immediately, switched to binary mode, and userspace output - # buffering is disabled - def tmpio - fp = begin - TmpIO.open("#{Dir::tmpdir}/#{rand}", - File::RDWR|File::CREAT|File::EXCL, 0600) - rescue Errno::EEXIST - retry - end - File.unlink(fp.path) - fp.binmode - fp.sync = true - fp - end - end - + nr end end diff --git a/lib/unicorn/worker.rb b/lib/unicorn/worker.rb new file mode 100644 index 0000000..fd8d20e --- /dev/null +++ b/lib/unicorn/worker.rb @@ -0,0 +1,40 @@ +# -*- encoding: binary -*- + +# This class and its members can be considered a stable interface +# and will not change in a backwards-incompatible fashion between +# releases of Unicorn. You may need to access it in the +# before_fork/after_fork hooks. See the Unicorn::Configurator RDoc +# for examples. +class Unicorn::Worker < Struct.new(:nr, :tmp, :switched) + + # worker objects may be compared to just plain numbers + def ==(other_nr) + self.nr == other_nr + end + + # Changes the worker process to the specified +user+ and +group+ + # This is only intended to be called from within the worker + # process from the +after_fork+ hook. This should be called in + # the +after_fork+ hook after any priviledged functions need to be + # run (e.g. to set per-worker CPU affinity, niceness, etc) + # + # Any and all errors raised within this method will be propagated + # directly back to the caller (usually the +after_fork+ hook. + # These errors commonly include ArgumentError for specifying an + # invalid user/group and Errno::EPERM for insufficient priviledges + def user(user, group = nil) + # we do not protect the caller, checking Process.euid == 0 is + # insufficient because modern systems have fine-grained + # capabilities. Let the caller handle any and all errors. + uid = Etc.getpwnam(user).uid + gid = Etc.getgrnam(group).gid if group + Unicorn::Util.chown_logs(uid, gid) + tmp.chown(uid, gid) + if gid && Process.egid != gid + Process.initgroups(user, gid) + Process::GID.change_privilege(gid) + end + Process.euid != uid and Process::UID.change_privilege(uid) + self.switched = true + end +end diff --git a/local.mk.sample b/local.mk.sample index c950d87..25bca5d 100644 --- a/local.mk.sample +++ b/local.mk.sample @@ -37,15 +37,6 @@ else RUBY := $(prefix)/bin/ruby --disable-gems endif -# FIXME: use isolate more -ifndef RUBYLIB - gems := rack-1.1.0 - gem_paths := $(addprefix $(HOME)/lib/ruby/gems/1.8/gems/,$(gems)) - sp := - sp += - export RUBYLIB := $(subst $(sp),:,$(addsuffix /lib,$(gem_paths))) -endif - # pipefail is THE reason to use bash (v3+) or never revisions of ksh93 # SHELL := /bin/bash -e -o pipefail SHELL := /bin/ksh93 -e -o pipefail diff --git a/script/isolate_for_tests b/script/isolate_for_tests new file mode 100755 index 0000000..5cea47c --- /dev/null +++ b/script/isolate_for_tests @@ -0,0 +1,45 @@ +#!/usr/bin/env ruby +# scripts/Makefiles can read and eval the output of this script and +# use it as RUBYLIB +require 'rubygems' +require 'isolate' +fp = File.open(__FILE__, "rb") +fp.flock(File::LOCK_EX) + +ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby' +opts = { + :system => false, + # we want "ruby-1.8.7" and not "ruby-1.8", so disable :multiruby + :multiruby => false, + :path => "tmp/isolate/#{ruby_engine}-#{RUBY_VERSION}", +} + +pid = fork do + Isolate.now!(opts) do + gem 'sqlite3-ruby', '1.2.5' + gem 'kgio', '1.3.1' + gem 'rack', '1.1.0' + end +end +_, status = Process.waitpid2(pid) +status.success? or abort status.inspect +lib_paths = Dir["#{opts[:path]}/gems/*-*/lib"].map { |x| File.expand_path(x) } +dst = "tmp/isolate/#{ruby_engine}-#{RUBY_VERSION}.mk" +File.open("#{dst}.#$$", "w") do |fp| + fp.puts "ISOLATE_LIBS=#{lib_paths.join(':')}" +end +File.rename("#{dst}.#$$", dst) + +# pure Ruby gems can be shared across all Rubies +%w(3.0.0).each do |rails_ver| + opts[:path] = "tmp/isolate/rails-#{rails_ver}" + pid = fork do + Isolate.now!(opts) do + gem 'rails', rails_ver + end + end + _, status = Process.waitpid2(pid) + status.success? or abort status.inspect + more = Dir["#{opts[:path]}/gems/*-*/lib"].map { |x| File.expand_path(x) } + lib_paths.concat(more) +end diff --git a/t/GNUmakefile b/t/GNUmakefile index e80c43a..8f2668c 100644 --- a/t/GNUmakefile +++ b/t/GNUmakefile @@ -17,6 +17,12 @@ endif RUBY_ENGINE := $(shell $(RUBY) -e 'puts((RUBY_ENGINE rescue "ruby"))') export RUBY_ENGINE +isolate_libs := ../tmp/isolate/$(RUBY_ENGINE)-$(RUBY_VERSION).mk +$(isolate_libs): ../script/isolate_for_tests + @cd .. && $(RUBY) script/isolate_for_tests +-include $(isolate_libs) +MYLIBS := $(RUBYLIB):$(ISOLATE_LIBS) + T = $(wildcard t[0-9][0-9][0-9][0-9]-*.sh) all:: $(T) @@ -58,7 +64,7 @@ $(test_prefix)/.stamp: $(T): export RUBY := $(RUBY) $(T): export RAKE := $(RAKE) $(T): export PATH := $(test_prefix)/bin:$(PATH) -$(T): export RUBYLIB := $(test_prefix)/lib:$(RUBYLIB) +$(T): export RUBYLIB := $(test_prefix)/lib:$(MYLIBS) $(T): dep $(test_prefix)/.stamp trash/.gitignore $(TRACER) $(SHELL) $(SH_TEST_OPTS) $@ $(TEST_OPTS) diff --git a/t/preread_input.ru b/t/preread_input.ru new file mode 100644 index 0000000..79685c4 --- /dev/null +++ b/t/preread_input.ru @@ -0,0 +1,17 @@ +#\-E none +require 'digest/sha1' +require 'unicorn/preread_input' +use Rack::ContentLength +use Rack::ContentType, "text/plain" +use Unicorn::PrereadInput +nr = 0 +run lambda { |env| + $stderr.write "app dispatch: #{nr += 1}\n" + input = env["rack.input"] + dig = Digest::SHA1.new + while buf = input.read(16384) + dig.update(buf) + end + + [ 200, {}, [ "#{dig.hexdigest}\n" ] ] +} diff --git a/t/rails3-app/Gemfile b/t/rails3-app/Gemfile index 491e910..d996f34 100644 --- a/t/rails3-app/Gemfile +++ b/t/rails3-app/Gemfile @@ -1,6 +1,6 @@ source 'http://rubygems.org' -# gem 'rails', '3.0.0.beta3' +# gem 'rails', '3.0.0' # Bundle edge Rails instead: # gem 'rails', :git => 'git://github.com/rails/rails.git' diff --git a/t/t0003-working_directory.sh b/t/t0003-working_directory.sh index 53345ae..79988d8 100755 --- a/t/t0003-working_directory.sh +++ b/t/t0003-working_directory.sh @@ -1,9 +1,4 @@ #!/bin/sh -if test -n "$RBX_SKIP" -then - echo "$0 is broken under Rubinius for now" - exit 0 -fi . ./test-lib.sh t_plan 4 "config.ru inside alt working_directory" diff --git a/t/t0010-reap-logging.sh b/t/t0010-reap-logging.sh new file mode 100755 index 0000000..93d8c60 --- /dev/null +++ b/t/t0010-reap-logging.sh @@ -0,0 +1,55 @@ +#!/bin/sh +. ./test-lib.sh +t_plan 9 "reap worker logging messages" + +t_begin "setup and start" && { + unicorn_setup + cat >> $unicorn_config <<EOF +after_fork { |s,w| File.open('$fifo','w') { |f| f.write '.' } } +EOF + unicorn -c $unicorn_config pid.ru & + test '.' = $(cat $fifo) + unicorn_wait_start +} + +t_begin "kill 1st worker=0" && { + pid_1=$(curl http://$listen/) + kill -9 $pid_1 +} + +t_begin "wait for 2nd worker to start" && { + test '.' = $(cat $fifo) +} + +t_begin "ensure log of 1st reap is an ERROR" && { + dbgcat r_err + grep 'ERROR.*reaped.*worker=0' $r_err | grep $pid_1 + dbgcat r_err + > $r_err +} + +t_begin "kill 2nd worker gracefully" && { + pid_2=$(curl http://$listen/) + kill -QUIT $pid_2 +} + +t_begin "wait for 3rd worker=0 to start " && { + test '.' = $(cat $fifo) +} + +t_begin "ensure log of 2nd reap is a INFO" && { + grep 'INFO.*reaped.*worker=0' $r_err | grep $pid_2 + > $r_err +} + +t_begin "killing succeeds" && { + kill $unicorn_pid + wait + kill -0 $unicorn_pid && false +} + +t_begin "check stderr" && { + check_stderr +} + +t_done diff --git a/t/t0011-active-unix-socket.sh b/t/t0011-active-unix-socket.sh new file mode 100644 index 0000000..6f9ac53 --- /dev/null +++ b/t/t0011-active-unix-socket.sh @@ -0,0 +1,79 @@ +#!/bin/sh +. ./test-lib.sh +t_plan 11 "existing UNIX domain socket check" + +read_pid_unix () { + x=$(printf 'GET / HTTP/1.0\r\n\r\n' | \ + socat - UNIX:$unix_socket | \ + tail -1) + test -n "$x" + y="$(expr "$x" : '\([0-9]\+\)')" + test x"$x" = x"$y" + test -n "$y" + echo "$y" +} + +t_begin "setup and start" && { + rtmpfiles unix_socket unix_config + rm -f $unix_socket + unicorn_setup + grep -v ^listen < $unicorn_config > $unix_config + echo "listen '$unix_socket'" >> $unix_config + unicorn -D -c $unix_config pid.ru + unicorn_wait_start + orig_master_pid=$unicorn_pid +} + +t_begin "get pid of worker" && { + worker_pid=$(read_pid_unix) + t_info "worker_pid=$worker_pid" +} + +t_begin "fails to start with existing pid file" && { + rm -f $ok + unicorn -D -c $unix_config pid.ru || echo ok > $ok + test x"$(cat $ok)" = xok +} + +t_begin "worker pid unchanged" && { + test x"$(read_pid_unix)" = x$worker_pid + > $r_err +} + +t_begin "fails to start with listening UNIX domain socket bound" && { + rm $ok $pid + unicorn -D -c $unix_config pid.ru || echo ok > $ok + test x"$(cat $ok)" = xok + > $r_err +} + +t_begin "worker pid unchanged (again)" && { + test x"$(read_pid_unix)" = x$worker_pid +} + +t_begin "nuking the existing Unicorn succeeds" && { + kill -9 $unicorn_pid $worker_pid + while kill -0 $unicorn_pid + do + sleep 1 + done + check_stderr +} + +t_begin "succeeds in starting with leftover UNIX domain socket bound" && { + test -S $unix_socket + unicorn -D -c $unix_config pid.ru + unicorn_wait_start +} + +t_begin "worker pid changed" && { + test x"$(read_pid_unix)" != x$worker_pid +} + +t_begin "killing succeeds" && { + kill $unicorn_pid +} + +t_begin "no errors" && check_stderr + +t_done diff --git a/t/t0303-rails3-alt-working_directory_config.ru.sh b/t/t0303-rails3-alt-working_directory_config.ru.sh index 444f05a..1433f94 100755 --- a/t/t0303-rails3-alt-working_directory_config.ru.sh +++ b/t/t0303-rails3-alt-working_directory_config.ru.sh @@ -1,9 +1,4 @@ #!/bin/sh -if test -n "$RBX_SKIP" -then - echo "$0 is broken under Rubinius for now" - exit 0 -fi . ./test-rails3.sh t_plan 5 "Rails 3 (beta) inside alt working_directory (w/ config.ru)" diff --git a/t/t9000-preread-input.sh b/t/t9000-preread-input.sh new file mode 100755 index 0000000..b9da05e --- /dev/null +++ b/t/t9000-preread-input.sh @@ -0,0 +1,48 @@ +#!/bin/sh +. ./test-lib.sh +t_plan 9 "PrereadInput middleware tests" + +t_begin "setup and start" && { + random_blob_sha1=$(rsha1 < random_blob) + unicorn_setup + unicorn -D -c $unicorn_config preread_input.ru + unicorn_wait_start +} + +t_begin "single identity request" && { + curl -sSf -T random_blob http://$listen/ > $tmp +} + +t_begin "sha1 matches" && { + test x"$(cat $tmp)" = x"$random_blob_sha1" +} + +t_begin "single chunked request" && { + curl -sSf -T- < random_blob http://$listen/ > $tmp +} + +t_begin "sha1 matches" && { + test x"$(cat $tmp)" = x"$random_blob_sha1" +} + +t_begin "app only dispatched twice" && { + test 2 -eq "$(grep 'app dispatch:' < $r_err | wc -l )" +} + +t_begin "aborted chunked request" && { + rm -f $tmp + curl -sSf -T- < $fifo http://$listen/ > $tmp & + curl_pid=$! + kill -9 $curl_pid + wait +} + +t_begin "app only dispatched twice" && { + test 2 -eq "$(grep 'app dispatch:' < $r_err | wc -l )" +} + +t_begin "killing succeeds" && { + kill -QUIT $unicorn_pid +} + +t_done diff --git a/t/test-rails3.sh b/t/test-rails3.sh index beeccf5..907ef0d 100644 --- a/t/test-rails3.sh +++ b/t/test-rails3.sh @@ -1,5 +1,5 @@ . ./test-lib.sh -RAILS_VERSION=${RAILS_VERSION-3.0.0.beta4} +RAILS_VERSION=${RAILS_VERSION-3.0.0} case $RUBY_VERSION in 1.8.7|1.9.2) ;; *) @@ -13,7 +13,7 @@ rails_gems=../tmp/isolate/rails-$RAILS_VERSION/gems rails_bin="$rails_gems/rails-$RAILS_VERSION/bin/rails" if ! test -d "$arch_gems" || ! test -d "$rails_gems" || ! test -x "$rails_bin" then - ( cd ../ && $RAKE isolate ) + ( cd ../ && ./script/isolate_for_tests ) fi for i in $arch_gems/*-* $rails_gems/*-* diff --git a/test/exec/test_exec.rb b/test/exec/test_exec.rb index 1d24ca3..581d5d5 100644 --- a/test/exec/test_exec.rb +++ b/test/exec/test_exec.rb @@ -614,7 +614,7 @@ EOF results = retry_hit(["http://#{@addr}:#{@port}/"]) assert_equal String, results[0].class assert_shutdown(pid) - end unless ENV['RBX_SKIP'] + end def test_config_ru_alt_path config_path = "#{@tmpdir}/foo.ru" diff --git a/test/test_helper.rb b/test/test_helper.rb index f0da9c1..c4e56a2 100644 --- a/test/test_helper.rb +++ b/test/test_helper.rb @@ -32,7 +32,6 @@ require 'tempfile' require 'fileutils' require 'logger' require 'unicorn' -require 'unicorn_http' if ENV['DEBUG'] require 'ruby-debug' diff --git a/test/unit/test_http_parser_ng.rb b/test/unit/test_http_parser_ng.rb index cb30f32..65b843e 100644 --- a/test/unit/test_http_parser_ng.rb +++ b/test/unit/test_http_parser_ng.rb @@ -388,6 +388,7 @@ class HttpParserNgTest < Test::Unit::TestCase "*" => { qs => "", pi => "" }, }.each do |uri,expect| assert_equal req, @parser.headers(req.clear, str % [ uri ]) + req = req.dup @parser.reset assert_equal uri, req["REQUEST_URI"], "REQUEST_URI mismatch" assert_equal expect[qs], req[qs], "#{qs} mismatch" @@ -412,6 +413,7 @@ class HttpParserNgTest < Test::Unit::TestCase "/1?a=b;c=d&e=f" => { qs => "a=b;c=d&e=f", pi => "/1" }, }.each do |uri,expect| assert_equal req, @parser.headers(req.clear, str % [ uri ]) + req = req.dup @parser.reset assert_equal uri, req["REQUEST_URI"], "REQUEST_URI mismatch" assert_equal "example.com", req["HTTP_HOST"], "Host: mismatch" @@ -440,6 +442,17 @@ class HttpParserNgTest < Test::Unit::TestCase end end + def test_backtrace_is_empty + begin + @parser.headers({}, "AAADFSFDSFD\r\n\r\n") + assert false, "should never get here line:#{__LINE__}" + rescue HttpParserError => e + assert_equal [], e.backtrace + return + end + assert false, "should never get here line:#{__LINE__}" + end + def test_ignore_version_header http = "GET / HTTP/1.1\r\nVersion: hello\r\n\r\n" req = {} @@ -460,4 +473,29 @@ class HttpParserNgTest < Test::Unit::TestCase assert_equal expect, req end + def test_pipelined_requests + expect = { + "HTTP_HOST" => "example.com", + "SERVER_NAME" => "example.com", + "REQUEST_PATH" => "/", + "rack.url_scheme" => "http", + "SERVER_PROTOCOL" => "HTTP/1.1", + "PATH_INFO" => "/", + "HTTP_VERSION" => "HTTP/1.1", + "REQUEST_URI" => "/", + "SERVER_PORT" => "80", + "REQUEST_METHOD" => "GET", + "QUERY_STRING" => "" + } + str = "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n" + @parser.buf << (str * 2) + env1 = @parser.parse.dup + assert_equal expect, env1 + assert_equal str, @parser.buf + assert @parser.keepalive? + @parser.reset + env2 = @parser.parse.dup + assert_equal expect, env2 + assert_equal "", @parser.buf + end end diff --git a/test/unit/test_request.rb b/test/unit/test_request.rb index 1896300..67ac1b9 100644 --- a/test/unit/test_request.rb +++ b/test/unit/test_request.rb @@ -11,7 +11,11 @@ class RequestTest < Test::Unit::TestCase class MockRequest < StringIO alias_method :readpartial, :sysread + alias_method :kgio_read!, :sysread alias_method :read_nonblock, :sysread + def kgio_addr + '127.0.0.1' + end end def setup @@ -159,6 +163,14 @@ class RequestTest < Test::Unit::TestCase buf = (' ' * bs).freeze length = bs * count client = Tempfile.new('big_put') + def client.kgio_addr; '127.0.0.1'; end + def client.kgio_read(*args) + readpartial(*args) + rescue EOFError + end + def client.kgio_read!(*args) + readpartial(*args) + end client.syswrite( "PUT / HTTP/1.1\r\n" \ "Host: foo\r\n" \ diff --git a/test/unit/test_response.rb b/test/unit/test_response.rb index f9eda8e..e5245e8 100644 --- a/test/unit/test_response.rb +++ b/test/unit/test_response.rb @@ -11,10 +11,11 @@ require 'test/test_helper' include Unicorn class ResponseTest < Test::Unit::TestCase - + include Unicorn::HttpResponse + def test_response_headers out = StringIO.new - HttpResponse.write(out,[200, {"X-Whatever" => "stuff"}, ["cool"]]) + http_response_write(out,[200, {"X-Whatever" => "stuff"}, ["cool"]]) assert out.closed? assert out.length > 0, "output didn't have data" @@ -22,7 +23,7 @@ class ResponseTest < Test::Unit::TestCase def test_response_string_status out = StringIO.new - HttpResponse.write(out,['200', {}, []]) + http_response_write(out,['200', {}, []]) assert out.closed? assert out.length > 0, "output didn't have data" assert_equal 1, out.string.split(/\r\n/).grep(/^Status: 200 OK/).size @@ -32,7 +33,7 @@ class ResponseTest < Test::Unit::TestCase old_ofs = $, $, = "\f\v" out = StringIO.new - HttpResponse.write(out,[200, {"X-k" => "cd","X-y" => "z"}, ["cool"]]) + http_response_write(out,[200, {"X-k" => "cd","X-y" => "z"}, ["cool"]]) assert out.closed? resp = out.string assert ! resp.include?("\f\v"), "output didn't use $, ($OFS)" @@ -42,7 +43,7 @@ class ResponseTest < Test::Unit::TestCase def test_response_200 io = StringIO.new - HttpResponse.write(io, [200, {}, []]) + http_response_write(io, [200, {}, []]) assert io.closed? assert io.length > 0, "output didn't have data" end @@ -50,7 +51,7 @@ class ResponseTest < Test::Unit::TestCase def test_response_with_default_reason code = 400 io = StringIO.new - HttpResponse.write(io, [code, {}, []]) + http_response_write(io, [code, {}, []]) assert io.closed? lines = io.string.split(/\r\n/) assert_match(/.* Bad Request$/, lines.first, @@ -59,7 +60,7 @@ class ResponseTest < Test::Unit::TestCase def test_rack_multivalue_headers out = StringIO.new - HttpResponse.write(out,[200, {"X-Whatever" => "stuff\nbleh"}, []]) + http_response_write(out,[200, {"X-Whatever" => "stuff\nbleh"}, []]) assert out.closed? assert_match(/^X-Whatever: stuff\r\nX-Whatever: bleh\r\n/, out.string) end @@ -68,7 +69,7 @@ class ResponseTest < Test::Unit::TestCase # some broken clients still rely on it def test_status_header_added out = StringIO.new - HttpResponse.write(out,[200, {"X-Whatever" => "stuff"}, []]) + http_response_write(out,[200, {"X-Whatever" => "stuff"}, []]) assert out.closed? assert_equal 1, out.string.split(/\r\n/).grep(/^Status: 200 OK/i).size end @@ -79,7 +80,7 @@ class ResponseTest < Test::Unit::TestCase def test_status_header_ignores_app_hash out = StringIO.new header_hash = {"X-Whatever" => "stuff", 'StaTus' => "666" } - HttpResponse.write(out,[200, header_hash, []]) + http_response_write(out,[200, header_hash, []]) assert out.closed? assert_equal 1, out.string.split(/\r\n/).grep(/^Status: 200 OK/i).size assert_equal 1, out.string.split(/\r\n/).grep(/^Status:/i).size @@ -90,7 +91,7 @@ class ResponseTest < Test::Unit::TestCase body = StringIO.new(expect_body) body.rewind out = StringIO.new - HttpResponse.write(out,[200, {}, body]) + http_response_write(out,[200, {}, body]) assert out.closed? assert body.closed? assert_match(expect_body, out.string.split(/\r\n/).last) @@ -98,7 +99,7 @@ class ResponseTest < Test::Unit::TestCase def test_unknown_status_pass_through out = StringIO.new - HttpResponse.write(out,["666 I AM THE BEAST", {}, [] ]) + http_response_write(out,["666 I AM THE BEAST", {}, [] ]) assert out.closed? headers = out.string.split(/\r\n\r\n/).first.split(/\r\n/) assert %r{\AHTTP/\d\.\d 666 I AM THE BEAST\z}.match(headers[0]) diff --git a/test/unit/test_signals.rb b/test/unit/test_signals.rb index 7c78b44..71cf8f4 100644 --- a/test/unit/test_signals.rb +++ b/test/unit/test_signals.rb @@ -166,7 +166,7 @@ class SignalsTest < Test::Unit::TestCase expect = @bs * @count assert_equal(expect, got, "expect=#{expect} got=#{got}") assert_nothing_raised { sock.close } - end unless ENV['RBX_SKIP'] + end def test_request_read app = lambda { |env| diff --git a/test/unit/test_socket_helper.rb b/test/unit/test_socket_helper.rb index 36b2dc2..c6d0d42 100644 --- a/test/unit/test_socket_helper.rb +++ b/test/unit/test_socket_helper.rb @@ -101,7 +101,14 @@ class TestSocketHelper < Test::Unit::TestCase def test_bind_listen_unix_rebind test_bind_listen_unix - new_listener = bind_listen(@unix_listener_path) + new_listener = nil + assert_raises(Errno::EADDRINUSE) do + new_listener = bind_listen(@unix_listener_path) + end + assert_nothing_raised do + File.unlink(@unix_listener_path) + new_listener = bind_listen(@unix_listener_path) + end assert UNIXServer === new_listener assert new_listener.fileno != @unix_listener.fileno assert_equal sock_name(new_listener), sock_name(@unix_listener) @@ -146,4 +153,28 @@ class TestSocketHelper < Test::Unit::TestCase sock_name(@unix_server) end + def test_tcp_defer_accept_default + port = unused_port @test_addr + name = "#@test_addr:#{port}" + sock = bind_listen(name) + cur = sock.getsockopt(Socket::SOL_TCP, TCP_DEFER_ACCEPT).unpack('i')[0] + assert cur >= 1 + end if defined?(TCP_DEFER_ACCEPT) + + def test_tcp_defer_accept_disable + port = unused_port @test_addr + name = "#@test_addr:#{port}" + sock = bind_listen(name, :tcp_defer_accept => false) + cur = sock.getsockopt(Socket::SOL_TCP, TCP_DEFER_ACCEPT).unpack('i')[0] + assert_equal 0, cur + end if defined?(TCP_DEFER_ACCEPT) + + def test_tcp_defer_accept_nr + port = unused_port @test_addr + name = "#@test_addr:#{port}" + sock = bind_listen(name, :tcp_defer_accept => 60) + cur = sock.getsockopt(Socket::SOL_TCP, TCP_DEFER_ACCEPT).unpack('i')[0] + assert cur > 1 + end if defined?(TCP_DEFER_ACCEPT) + end diff --git a/test/unit/test_tee_input.rb b/test/unit/test_tee_input.rb index a127882..a10ca34 100644 --- a/test/unit/test_tee_input.rb +++ b/test/unit/test_tee_input.rb @@ -9,7 +9,7 @@ class TestTeeInput < Test::Unit::TestCase def setup @rs = $/ @env = {} - @rd, @wr = IO.pipe + @rd, @wr = Kgio::UNIXSocket.pair @rd.sync = @wr.sync = true @start_pid = $$ end @@ -27,8 +27,8 @@ class TestTeeInput < Test::Unit::TestCase end def test_gets_long - init_parser("hello", 5 + (4096 * 4 * 3) + "#$/foo#$/".size) - ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf) + r = init_request("hello", 5 + (4096 * 4 * 3) + "#$/foo#$/".size) + ti = Unicorn::TeeInput.new(@rd, r) status = line = nil pid = fork { @rd.close @@ -48,8 +48,8 @@ class TestTeeInput < Test::Unit::TestCase end def test_gets_short - init_parser("hello", 5 + "#$/foo".size) - ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf) + r = init_request("hello", 5 + "#$/foo".size) + ti = Unicorn::TeeInput.new(@rd, r) status = line = nil pid = fork { @rd.close @@ -67,8 +67,8 @@ class TestTeeInput < Test::Unit::TestCase end def test_small_body - init_parser('hello') - ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf) + r = init_request('hello') + ti = Unicorn::TeeInput.new(@rd, r) assert_equal 0, @parser.content_length assert @parser.body_eof? assert_equal StringIO, ti.tmp.class @@ -80,8 +80,8 @@ class TestTeeInput < Test::Unit::TestCase end def test_read_with_buffer - init_parser('hello') - ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf) + r = init_request('hello') + ti = Unicorn::TeeInput.new(@rd, r) buf = '' rv = ti.read(4, buf) assert_equal 'hell', rv @@ -95,8 +95,8 @@ class TestTeeInput < Test::Unit::TestCase end def test_big_body - init_parser('.' * Unicorn::Const::MAX_BODY << 'a') - ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf) + r = init_request('.' * Unicorn::Const::MAX_BODY << 'a') + ti = Unicorn::TeeInput.new(@rd, r) assert_equal 0, @parser.content_length assert @parser.body_eof? assert_kind_of File, ti.tmp @@ -106,9 +106,9 @@ class TestTeeInput < Test::Unit::TestCase def test_read_in_full_if_content_length a, b = 300, 3 - init_parser('.' * b, 300) + r = init_request('.' * b, 300) assert_equal 300, @parser.content_length - ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf) + ti = Unicorn::TeeInput.new(@rd, r) pid = fork { @wr.write('.' * 197) sleep 1 # still a *potential* race here that would make the test moot... @@ -121,8 +121,8 @@ class TestTeeInput < Test::Unit::TestCase end def test_big_body_multi - init_parser('.', Unicorn::Const::MAX_BODY + 1) - ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf) + r = init_request('.', Unicorn::Const::MAX_BODY + 1) + ti = Unicorn::TeeInput.new(@rd, r) assert_equal Unicorn::Const::MAX_BODY, @parser.content_length assert ! @parser.body_eof? assert_kind_of File, ti.tmp @@ -163,7 +163,7 @@ class TestTeeInput < Test::Unit::TestCase @wr.write("0\r\n\r\n") } @wr.close - ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf) + ti = Unicorn::TeeInput.new(@rd, @parser) assert_nil @parser.content_length assert_nil ti.len assert ! @parser.body_eof? @@ -201,7 +201,7 @@ class TestTeeInput < Test::Unit::TestCase end @wr.write("0\r\n\r\n") } - ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf) + ti = Unicorn::TeeInput.new(@rd, @parser) assert_nil @parser.content_length assert_nil ti.len assert ! @parser.body_eof? @@ -230,7 +230,7 @@ class TestTeeInput < Test::Unit::TestCase @wr.write("Hello: World\r\n\r\n") } @wr.close - ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf) + ti = Unicorn::TeeInput.new(@rd, @parser) assert_nil @parser.content_length assert_nil ti.len assert ! @parser.body_eof? @@ -243,7 +243,7 @@ class TestTeeInput < Test::Unit::TestCase private - def init_parser(body, size = nil) + def init_request(body, size = nil) @parser = Unicorn::HttpParser.new body = body.to_s.freeze @buf = "POST / HTTP/1.1\r\n" \ @@ -252,6 +252,7 @@ private "\r\n#{body}" assert_equal @env, @parser.headers(@env, @buf) assert_equal body, @buf + @parser end end diff --git a/unicorn.gemspec b/unicorn.gemspec index 973ca09..fd8f0e6 100644 --- a/unicorn.gemspec +++ b/unicorn.gemspec @@ -48,6 +48,7 @@ Gem::Specification.new do |s| # commented out. Nevertheless, upgrading to Rails 2.3.4 or later is # *strongly* recommended for security reasons. s.add_dependency(%q<rack>) + s.add_dependency(%q<kgio>, '~> 1.3.1') s.add_development_dependency('isolate', '~> 3.0.0') |