rainbows.git  about / heads / tags
Unicorn for sleepy apps and slow clients
blob 602a02a2aa33ec484e8f4e11ee564d0764f506d2 8230 bytes (raw)
$ git show v0.3.0:lib/rainbows/rev.rb	# shows this blob on the CLI

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
 
# -*- encoding: binary -*-
require 'rev'

# workaround revactor 0.1.4 still using the old Rev::Buffer
# ref: http://rubyforge.org/pipermail/revactor-talk/2009-October/000034.html
defined?(Rev::Buffer) or Rev::Buffer = IO::Buffer

module Rainbows

  # Implements a basic single-threaded event model with
  # {Rev}[http://rev.rubyforge.org/].  It is capable of handling
  # thousands of simultaneous client connections, but with only a
  # single-threaded app dispatch.  It is suited for slow clients and
  # fast applications (applications that do not have slow network
  # dependencies) or applications that use DevFdResponse for deferrable
  # response bodies.  It does not require your Rack application to be
  # thread-safe, reentrancy is only required for the DevFdResponse body
  # generator.
  #
  # Compatibility: Whatever \Rev itself supports, currently Ruby
  # 1.8/1.9.
  #
  # This model does not implement as streaming "rack.input" which
  # allows the Rack application to process data as it arrives.  This
  # means "rack.input" will be fully buffered in memory or to a
  # temporary file before the application is entered.

  module Rev

    include Base

    class Client < ::Rev::IO
      include Unicorn
      include Rainbows::Const
      G = Rainbows::G

      # queued, optional response bodies, it should only be unpollable "fast"
      # devices where read(2) is uninterruptable.  Unfortunately, NFS and ilk
      # are also part of this.  We'll also stick DeferredResponse bodies in
      # here to prevent connections from being closed on us.
      attr_reader :deferred_bodies

      def initialize(io)
        G.cur += 1
        super(io)
        @remote_addr = ::TCPSocket === io ? io.peeraddr.last : LOCALHOST
        @env = {}
        @hp = HttpParser.new
        @state = :headers # [ :body [ :trailers ] ] :app_call :close
        @buf = ""
        @deferred_bodies = [] # for (fast) regular files only
      end

      # graceful exit, like SIGQUIT
      def quit
        @deferred_bodies.clear
        @state = :close
      end

      def handle_error(e)
        quit
        msg = case e
        when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
          ERROR_500_RESPONSE
        when HttpParserError # try to tell the client they're bad
          ERROR_400_RESPONSE
        else
          G.logger.error "Read error: #{e.inspect}"
          G.logger.error e.backtrace.join("\n")
          ERROR_500_RESPONSE
        end
        write(msg)
      end

      def app_call
        begin
          (@env[RACK_INPUT] = @input).rewind
          alive = @hp.keepalive?
          @env[REMOTE_ADDR] = @remote_addr
          response = G.app.call(@env.update(RACK_DEFAULTS))
          alive &&= G.alive
          out = [ alive ? CONN_ALIVE : CONN_CLOSE ] if @hp.headers?

          DeferredResponse.write(self, response, out)
          if alive
            @env.clear
            @hp.reset
            @state = :headers
            # keepalive requests are always body-less, so @input is unchanged
            @hp.headers(@env, @buf) and next
          else
            @state = :close
          end
          return
        end while true
      end

      def on_write_complete
        if body = @deferred_bodies.first
          return if DeferredResponse === body
          begin
            begin
              write(body.sysread(CHUNK_SIZE))
            rescue EOFError # expected at file EOF
              @deferred_bodies.shift
              body.close
            end
          rescue Object => e
            handle_error(e)
          end
        else
          close if :close == @state
        end
      end

      def on_close
        G.cur -= 1
      end

      def tmpio
        io = Util.tmpio
        def io.size
          # already sync=true at creation, so no need to flush before stat
          stat.size
        end
        io
      end

      # TeeInput doesn't map too well to this right now...
      def on_read(data)
        case @state
        when :headers
          @hp.headers(@env, @buf << data) or return
          @state = :body
          len = @hp.content_length
          if len == 0
            @input = HttpRequest::NULL_IO
            app_call # common case
          else # nil or len > 0
            # since we don't do streaming input, we have no choice but
            # to take over 100-continue handling from the Rack application
            if @env[HTTP_EXPECT] =~ /\A100-continue\z/i
              write(EXPECT_100_RESPONSE)
              @env.delete(HTTP_EXPECT)
            end
            @input = len && len <= MAX_BODY ? StringIO.new("") : tmpio
            @hp.filter_body(@buf2 = @buf.dup, @buf)
            @input << @buf2
            on_read("")
          end
        when :body
          if @hp.body_eof?
            @state = :trailers
            on_read(data)
          elsif data.size > 0
            @hp.filter_body(@buf2, @buf << data)
            @input << @buf2
            on_read("")
          end
        when :trailers
          @hp.trailers(@env, @buf << data) and app_call
        end
        rescue Object => e
          handle_error(e)
      end
    end

    class Server < ::Rev::IO
      G = Rainbows::G

      def on_readable
        return if G.cur >= G.max
        begin
          Client.new(@_io.accept_nonblock).attach(::Rev::Loop.default)
        rescue Errno::EAGAIN, Errno::ECONNBORTED
        end
      end

    end

    class DeferredResponse < ::Rev::IO
      include Unicorn
      include Rainbows::Const
      G = Rainbows::G

      def self.defer!(client, response, out)
        body = response.last
        headers = Rack::Utils::HeaderHash.new(response[1])

        # to_io is not part of the Rack spec, but make an exception
        # here since we can't get here without checking to_path first
        io = body.to_io if body.respond_to?(:to_io)
        io ||= ::IO.new($1.to_i) if body.to_path =~ %r{\A/dev/fd/(\d+)\z}
        io ||= File.open(File.expand_path(body.to_path), 'rb')
        st = io.stat

        if st.socket? || st.pipe?
          do_chunk = !!(headers['Transfer-Encoding'] =~ %r{\Achunked\z}i)
          do_chunk = false if headers.delete('X-Rainbows-Autochunk') == 'no'
          # too tricky to support keepalive/pipelining when a response can
          # take an indeterminate amount of time here.
          out[0] = CONN_CLOSE

          io = new(io, client, do_chunk, body).attach(::Rev::Loop.default)
        elsif st.file?
          headers.delete('Transfer-Encoding')
          headers['Content-Length'] ||= st.size.to_s
        else # char/block device, directory, whatever... nobody cares
          return response
        end
        client.deferred_bodies << io
        [ response.first, headers.to_hash, [] ]
      end

      def self.write(client, response, out)
        response.last.respond_to?(:to_path) and
          response = defer!(client, response, out)
        HttpResponse.write(client, response, out)
      end

      def initialize(io, client, do_chunk, body)
        super(io)
        @client, @do_chunk, @body = client, do_chunk, body
      end

      def on_read(data)
        @do_chunk and @client.write(sprintf("%x\r\n", data.size))
        @client.write(data)
        @do_chunk and @client.write("\r\n")
      end

      def on_close
        @do_chunk and @client.write("0\r\n\r\n")
        @client.quit
        @body.respond_to?(:close) and @body.close
      end
    end

    # This timer handles the fchmod heartbeat to prevent our master
    # from killing us.
    class Heartbeat < ::Rev::TimerWatcher
      G = Rainbows::G

      def initialize(tmp)
        @m, @tmp = 0, tmp
        super(1, true)
      end

      def on_timer
        @tmp.chmod(@m = 0 == @m ? 1 : 0)
        exit if (! G.alive && G.cur <= 0)
      end
    end

    # runs inside each forked worker, this sits around and waits
    # for connections and doesn't die until the parent dies (or is
    # given a INT, QUIT, or TERM signal)
    def worker_loop(worker)
      init_worker_process(worker)
      rloop = ::Rev::Loop.default
      Heartbeat.new(worker.tmp).attach(rloop)
      LISTENERS.map! { |s| Server.new(s).attach(rloop) }
      rloop.run
    end

  end
end

git clone https://yhbt.net/rainbows.git