From 5531ceb42a993ef8a68ed557fc77d052b89bccfb Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Mon, 19 Oct 2009 20:46:34 -0700 Subject: rev: fix static file responses under HTTP/0.9 Since HTTP/0.9 responses have no headers to write, the on_write_complete handler we rely on never got triggered, leading to additional reads to never get queued up. Additionally, we need to explicitly detect and close client sockets if we've written the last response body since HTTP/0.9 clients never know when it's time to close a connection. --- lib/rainbows/rev.rb | 24 ++++++++++++++++-------- t/lib-large-file-response.sh | 19 +++++++++++++++++-- 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/lib/rainbows/rev.rb b/lib/rainbows/rev.rb index c5a8b1e..257c686 100644 --- a/lib/rainbows/rev.rb +++ b/lib/rainbows/rev.rb @@ -30,12 +30,6 @@ module Rainbows include Rainbows::Const G = Rainbows::G - # queued, optional response bodies, it should only be unpollable "fast" - # devices where read(2) is uninterruptable. Unfortunately, NFS and ilk - # are also part of this. We'll also stick DeferredResponse bodies in - # here to prevent connections from being closed on us. - attr_reader :deferred_bodies - def initialize(io) G.cur += 1 super(io) @@ -53,6 +47,15 @@ module Rainbows @state = :close end + # queued, optional response bodies, it should only be unpollable "fast" + # devices where read(2) is uninterruptable. Unfortunately, NFS and ilk + # are also part of this. We'll also stick DeferredResponse bodies in + # here to prevent connections from being closed on us. + def defer_body(io) + @deferred_bodies << io + on_write_complete unless @hp.headers? # triggers a write + end + def handle_error(e) quit msg = case e @@ -100,6 +103,7 @@ module Rainbows rescue EOFError # expected at file EOF @deferred_bodies.shift body.close + close if :close == @state && @deferred_bodies.empty? end rescue Object => e handle_error(e) @@ -195,7 +199,11 @@ module Rainbows do_chunk = false if headers.delete('X-Rainbows-Autochunk') == 'no' # too tricky to support keepalive/pipelining when a response can # take an indeterminate amount of time here. - out[0] = CONN_CLOSE + if out.nil? + do_chunk = false + else + out[0] = CONN_CLOSE + end io = new(io, client, do_chunk, body).attach(::Rev::Loop.default) elsif st.file? @@ -204,7 +212,7 @@ module Rainbows else # char/block device, directory, whatever... nobody cares return response end - client.deferred_bodies << io + client.defer_body(io) [ response.first, headers.to_hash, [] ] end diff --git a/t/lib-large-file-response.sh b/t/lib-large-file-response.sh index 830812a..9866982 100644 --- a/t/lib-large-file-response.sh +++ b/t/lib-large-file-response.sh @@ -7,7 +7,7 @@ then fi echo "large file response slurp avoidance for model=$model" eval $(unused_listen) -rtmpfiles unicorn_config tmp r_err r_out pid ok +rtmpfiles unicorn_config tmp r_err r_out pid ok fifo cat > $unicorn_config < $ok) | wc -c) + size=$( (curl -sSfv http://$listen/random_blob && echo ok >$ok) |wc -c) test $size -eq $random_blob_size test xok = x$(cat $ok) done +echo "HTTP/1.0 test" # this was a problem during development +size=$( (curl -0 -sSfv http://$listen/random_blob && echo ok >$ok) |wc -c) +test $size -eq $random_blob_size +test xok = x$(cat $ok) + +echo "HTTP/0.9 test" +( + printf 'GET /random_blob\r\n' + cat $fifo > $tmp & + wait + echo ok > $ok +) | socat - TCP:$listen > $fifo +cmp $tmp random_blob +test xok = x$(cat $ok) + dbgcat r_err curl -v http://$listen/rss rss_after=$(curl -sSfv http://$listen/rss) -- cgit v1.2.3-24-ge0c7