rainbows.git  about / heads / tags
Unicorn for sleepy apps and slow clients
blob cf59cbf9624a29a43ef52a3b25530dda4ef5f8d3 7680 bytes (raw)
$ git show v0.97.0:lib/rainbows/event_machine.rb	# shows this blob on the CLI

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
 
# -*- encoding: binary -*-
require 'eventmachine'
EM::VERSION >= '0.12.10' or abort 'eventmachine 0.12.10 is required'
require 'rainbows/ev_core'

module Rainbows

  # Implements a basic single-threaded event model with
  # {EventMachine}[http://rubyeventmachine.com/].  It is capable of
  # handling thousands of simultaneous client connections, but with only
  # a single-threaded app dispatch.  It is suited for slow clients,
  # and can work with slow applications via asynchronous libraries such as
  # {async_sinatra}[http://github.com/raggi/async_sinatra],
  # {Cramp}[http://m.onkey.org/2010/1/7/introducing-cramp],
  # and {rack-fiber_pool}[http://github.com/mperham/rack-fiber_pool].
  #
  # It does not require your Rack application to be thread-safe,
  # reentrancy is only required for the DevFdResponse body
  # generator.
  #
  # Compatibility: Whatever \EventMachine ~> 0.12.10 and Unicorn both
  # support, currently Ruby 1.8/1.9.
  #
  # This model is compatible with users of "async.callback" in the Rack
  # environment such as
  # {async_sinatra}[http://github.com/raggi/async_sinatra].
  #
  # For a complete asynchronous framework,
  # {Cramp}[http://m.onkey.org/2010/1/7/introducing-cramp] is fully
  # supported when using this concurrency model.
  #
  # This model is fully-compatible with
  # {rack-fiber_pool}[http://github.com/mperham/rack-fiber_pool]
  # which allows each request to run inside its own \Fiber after
  # all request processing is complete.
  #
  # Merb (and other frameworks/apps) supporting +deferred?+ execution as
  # documented at http://brainspl.at/articles/2008/04/18/deferred-requests-with-merb-ebb-and-thin
  # will also get the ability to conditionally defer request processing
  # to a separate thread.
  #
  # This model does not implement as streaming "rack.input" which allows
  # the Rack application to process data as it arrives.  This means
  # "rack.input" will be fully buffered in memory or to a temporary file
  # before the application is entered.

  module EventMachine

    include Base
    autoload :ResponsePipe, 'rainbows/event_machine/response_pipe'
    autoload :ResponseChunkPipe, 'rainbows/event_machine/response_chunk_pipe'
    autoload :TryDefer, 'rainbows/event_machine/try_defer'

    class Client < EM::Connection # :nodoc: all
      attr_writer :body
      include Rainbows::EvCore
      G = Rainbows::G

      def initialize(io)
        @_io = io
        @body = nil
      end

      alias write send_data

      def receive_data(data)
        # To avoid clobbering the current streaming response
        # (often a static file), we do not attempt to process another
        # request on the same connection until the first is complete
        if @body
          @buf << data
          @_io.shutdown(Socket::SHUT_RD) if @buf.size > 0x1c000
          return EM.next_tick { receive_data('') }
        else
          on_read(data)
        end
      end

      def quit
        super
        close_connection_after_writing
      end

      def app_call
        set_comm_inactivity_timeout 0
        @env[RACK_INPUT] = @input
        @env[REMOTE_ADDR] = @remote_addr
        @env[ASYNC_CALLBACK] = method(:em_write_response)
        @env[ASYNC_CLOSE] = EM::DefaultDeferrable.new

        response = catch(:async) { APP.call(@env.update(RACK_DEFAULTS)) }

        # too tricky to support pipelining with :async since the
        # second (pipelined) request could be a stuck behind a
        # long-running async response
        (response.nil? || -1 == response[0]) and return @state = :close

        alive = @hp.keepalive? && G.alive && G.kato > 0
        em_write_response(response, alive)
        if alive
          @env.clear
          @hp.reset
          @state = :headers
          if @buf.empty?
            set_comm_inactivity_timeout(G.kato)
          else
            EM.next_tick { receive_data('') }
          end
        end
      end

      def em_write_response(response, alive = false)
        status, headers, body = response
        if @hp.headers?
          headers = HH.new(headers)
          headers[CONNECTION] = alive ? KEEP_ALIVE : CLOSE
        else
          headers = nil
        end

        if body.respond_to?(:errback) && body.respond_to?(:callback)
          @body = body
          body.callback { quit }
          body.errback { quit }
          # async response, this could be a trickle as is in comet-style apps
          headers[CONNECTION] = CLOSE if headers
          alive = true
        elsif body.respond_to?(:to_path)
          st = File.stat(path = body.to_path)

          if st.file?
            write(response_header(status, headers)) if headers
            @body = stream_file_data(path)
            @body.errback do
              body.close if body.respond_to?(:close)
              quit
            end
            @body.callback do
              body.close if body.respond_to?(:close)
              @body = nil
              alive ? receive_data('') : quit
            end
            return
          elsif st.socket? || st.pipe?
            @body = io = body_to_io(body)
            chunk = stream_response_headers(status, headers) if headers
            m = chunk ? ResponseChunkPipe : ResponsePipe
            return EM.watch(io, m, self, alive, body).notify_readable = true
          end
          # char or block device... WTF? fall through to body.each
        end

        write(response_header(status, headers)) if headers
        write_body_each(self, body)
        quit unless alive
      end

      def unbind
        async_close = @env[ASYNC_CLOSE] and async_close.succeed
        @body.respond_to?(:fail) and @body.fail
        begin
          @_io.close
        rescue Errno::EBADF
          # EventMachine's EventableDescriptor::Close() may close
          # the underlying file descriptor without invalidating the
          # associated IO object on errors, so @_io.closed? isn't
          # sufficient.
        end
      end
    end

    module Server # :nodoc: all
      include Rainbows::Acceptor

      def close
        detach
        @io.close
      end

      def notify_readable
        return if CUR.size >= MAX
        io = accept(@io) or return
        sig = EM.attach_fd(io.fileno, false)
        CUR[sig] = CL.new(sig, io)
      end
    end

    def init_worker_process(worker) # :nodoc:
      Rainbows::Response.setup(Rainbows::EventMachine::Client)
      super
    end

    # runs inside each forked worker, this sits around and waits
    # for connections and doesn't die until the parent dies (or is
    # given a INT, QUIT, or TERM signal)
    def worker_loop(worker) # :nodoc:
      init_worker_process(worker)
      G.server.app.respond_to?(:deferred?) and
        G.server.app = TryDefer[G.server.app]

      # enable them both, should be non-fatal if not supported
      EM.epoll
      EM.kqueue
      logger.info "#@use: epoll=#{EM.epoll?} kqueue=#{EM.kqueue?}"
      client_class = Rainbows.const_get(@use).const_get(:Client)
      Server.const_set(:MAX, worker_connections + LISTENERS.size)
      Server.const_set(:CL, client_class)
      client_class.const_set(:APP, G.server.app)
      EM.run {
        conns = EM.instance_variable_get(:@conns) or
          raise RuntimeError, "EM @conns instance variable not accessible!"
        Server.const_set(:CUR, conns)
        EM.add_periodic_timer(1) do
          unless G.tick
            conns.each_value { |c| client_class === c and c.quit }
            EM.stop if conns.empty? && EM.reactor_running?
          end
        end
        LISTENERS.map! do |s|
          EM.watch(s, Server) { |c| c.notify_readable = true }
        end
      }
    end

  end
end

git clone https://yhbt.net/rainbows.git