From 160e768fe8d6043af1e435daeb35d5c92e05de11 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Thu, 27 Jun 2013 03:54:39 +0000 Subject: rbuf: reattach/reuse read buffers when possible Reattaching/reusing read buffers allows us to avoid repeated reallocation/growth/free when clients repeatedly send us large headers. This may also increase cache-hits by favoring recently-used buffers as long as fragmentation is kept in check. The fragmentation should be no worse that is currently, due to the existing detach nature of rbufs --- alloc.c | 27 +++++++++++++++++++++++++++ cmogstored.h | 1 + http.c | 2 +- http_put.c | 8 ++++---- mgmt.c | 6 +++--- 5 files changed, 36 insertions(+), 8 deletions(-) diff --git a/alloc.c b/alloc.c index bddad4d..7a14173 100644 --- a/alloc.c +++ b/alloc.c @@ -193,3 +193,30 @@ void *mog_fsbuf_get(size_t *size) return ptr; } + +/* + * attempts to reattach an rbuf belonging to a previously-idle client + * if it makes sense to reattach. + * + * We want to favor rbufs attached to clients if they are bigger than + * the thread-local one. + */ +void mog_rbuf_reattach_and_null(struct mog_rbuf **ptrptr) +{ + struct mog_rbuf *rbuf = *ptrptr; + + if (!rbuf) + return; + *ptrptr = NULL; + + assert(rbuf != tls_rbuf && "cannot reattach, already attached"); + if (tls_rbuf) { + /* we never want to swap a small buffer for a big buffer */ + if (rbuf->rcapa < tls_rbuf->rcapa) { + mog_rbuf_free(rbuf); + return; + } + free(tls_rbuf); + } + tls_rbuf = rbuf; +} diff --git a/cmogstored.h b/cmogstored.h index f37a84c..bb7455d 100644 --- a/cmogstored.h +++ b/cmogstored.h @@ -334,6 +334,7 @@ struct mog_rbuf *mog_rbuf_detach(struct mog_rbuf *rbuf); struct mog_rbuf *mog_rbuf_grow(struct mog_rbuf *); void mog_rbuf_free(struct mog_rbuf *); void mog_rbuf_free_and_null(struct mog_rbuf **); +void mog_rbuf_reattach_and_null(struct mog_rbuf **); void *mog_fsbuf_get(size_t *size); void mog_alloc_quit(void); void mog_oom_if_null(const void *); diff --git a/http.c b/http.c index 5509e08..2f0b435 100644 --- a/http.c +++ b/http.c @@ -46,7 +46,7 @@ http_defer_rbuf(struct mog_http *http, struct mog_rbuf *rbuf, size_t buf_len) assert(defer_bytes <= MOG_RBUF_MAX_SIZE && "defer bytes overflow"); if (defer_bytes == 0) { - mog_rbuf_free_and_null(&http->rbuf); + mog_rbuf_reattach_and_null(&http->rbuf); } else if (old) { /* no allocation needed, reuse existing */ assert(old == rbuf && "http->rbuf not reused properly"); memmove(old->rptr, src, defer_bytes); diff --git a/http_put.c b/http_put.c index 877e215..277faa7 100644 --- a/http_put.c +++ b/http_put.c @@ -177,7 +177,7 @@ static void stash_advance_rbuf(struct mog_http *http, char *buf, size_t buf_len) if (http->_p.line_end == 0 || buf_len <= end) { http->_p.buf_off = 0; - mog_rbuf_free_and_null(&http->rbuf); + mog_rbuf_reattach_and_null(&http->rbuf); return; } @@ -383,7 +383,7 @@ void mog_http_put(struct mog_fd *mfd, char *buf, size_t buf_len) if (buf_len == http->_p.buf_off) { /* we got the HTTP header in one read() */ if (http->_p.chunked) { - mog_rbuf_free_and_null(&http->rbuf); + mog_rbuf_reattach_and_null(&http->rbuf); mog_chunk_init(http); http->_p.buf_off = buf_len; } @@ -584,14 +584,14 @@ chunk_state_trailer: if (in_trailer) assert(0 && "bad chunk state: size"); /* client is trickling chunk size :< */ - mog_rbuf_free_and_null(&http->rbuf); + mog_rbuf_reattach_and_null(&http->rbuf); http->_p.buf_off = 0; goto again; case MOG_CHUNK_STATE_DATA: if (in_trailer) assert(0 && "bad chunk state: data"); /* client is trickling final chunk/trailer */ - mog_rbuf_free_and_null(&http->rbuf); + mog_rbuf_reattach_and_null(&http->rbuf); goto again; case MOG_CHUNK_STATE_TRAILER: stash_advance_rbuf(http, buf, buf_len); diff --git a/mgmt.c b/mgmt.c index 5d52a3e..1aacb41 100644 --- a/mgmt.c +++ b/mgmt.c @@ -72,7 +72,7 @@ MOG_NOINLINE static void mgmt_close(struct mog_fd *mfd) { struct mog_mgmt *mgmt = &mfd->as.mgmt; - mog_rbuf_free(mgmt->rbuf); + mog_rbuf_reattach_and_null(&mgmt->rbuf); assert((mgmt->wbuf == NULL || mgmt->wbuf == MOG_WR_ERROR) && "would leak mgmt->wbuf on close"); @@ -99,7 +99,7 @@ void mog_mgmt_writev(struct mog_mgmt *mgmt, struct iovec *iov, int iovcnt) static enum mog_next mgmt_iostat_forever(struct mog_mgmt *mgmt) { - mog_rbuf_free_and_null(&mgmt->rbuf); /* no coming back from this */ + mog_rbuf_reattach_and_null(&mgmt->rbuf); /* no coming back from this */ mog_notify(MOG_NOTIFY_DEVICE_REFRESH); mog_svc_devstats_subscribe(mgmt); @@ -136,7 +136,7 @@ mgmt_defer_rbuf(struct mog_mgmt *mgmt, struct mog_rbuf *rbuf, size_t buf_len) assert(defer_bytes <= MOG_RBUF_MAX_SIZE && "defer bytes overflow"); if (defer_bytes == 0) { - mog_rbuf_free_and_null(&mgmt->rbuf); + mog_rbuf_reattach_and_null(&mgmt->rbuf); } else if (old) { /* no allocation needed, reuse existing */ assert(old == rbuf && "mgmt->rbuf not reused properly"); memmove(old->rptr, src, defer_bytes); -- cgit v1.2.3-24-ge0c7