diff options
author | Eric Wong <normalperson@yhbt.net> | 2013-06-27 03:54:39 +0000 |
---|---|---|
committer | Eric Wong <normalperson@yhbt.net> | 2013-07-10 00:55:55 +0000 |
commit | 160e768fe8d6043af1e435daeb35d5c92e05de11 (patch) | |
tree | 13399e0ff1e82be38a531cbce29127b419d2954c | |
parent | 331e7a1300ae59a052763ffecc77b45a56e2deb3 (diff) | |
download | cmogstored-160e768fe8d6043af1e435daeb35d5c92e05de11.tar.gz |
Reattaching/reusing read buffers allows us to avoid repeated reallocation/growth/free when clients repeatedly send us large headers. This may also increase cache-hits by favoring recently-used buffers as long as fragmentation is kept in check. The fragmentation should be no worse that is currently, due to the existing detach nature of rbufs
-rw-r--r-- | alloc.c | 27 | ||||
-rw-r--r-- | cmogstored.h | 1 | ||||
-rw-r--r-- | http.c | 2 | ||||
-rw-r--r-- | http_put.c | 8 | ||||
-rw-r--r-- | mgmt.c | 6 |
5 files changed, 36 insertions, 8 deletions
@@ -193,3 +193,30 @@ void *mog_fsbuf_get(size_t *size) return ptr; } + +/* + * attempts to reattach an rbuf belonging to a previously-idle client + * if it makes sense to reattach. + * + * We want to favor rbufs attached to clients if they are bigger than + * the thread-local one. + */ +void mog_rbuf_reattach_and_null(struct mog_rbuf **ptrptr) +{ + struct mog_rbuf *rbuf = *ptrptr; + + if (!rbuf) + return; + *ptrptr = NULL; + + assert(rbuf != tls_rbuf && "cannot reattach, already attached"); + if (tls_rbuf) { + /* we never want to swap a small buffer for a big buffer */ + if (rbuf->rcapa < tls_rbuf->rcapa) { + mog_rbuf_free(rbuf); + return; + } + free(tls_rbuf); + } + tls_rbuf = rbuf; +} diff --git a/cmogstored.h b/cmogstored.h index f37a84c..bb7455d 100644 --- a/cmogstored.h +++ b/cmogstored.h @@ -334,6 +334,7 @@ struct mog_rbuf *mog_rbuf_detach(struct mog_rbuf *rbuf); struct mog_rbuf *mog_rbuf_grow(struct mog_rbuf *); void mog_rbuf_free(struct mog_rbuf *); void mog_rbuf_free_and_null(struct mog_rbuf **); +void mog_rbuf_reattach_and_null(struct mog_rbuf **); void *mog_fsbuf_get(size_t *size); void mog_alloc_quit(void); void mog_oom_if_null(const void *); @@ -46,7 +46,7 @@ http_defer_rbuf(struct mog_http *http, struct mog_rbuf *rbuf, size_t buf_len) assert(defer_bytes <= MOG_RBUF_MAX_SIZE && "defer bytes overflow"); if (defer_bytes == 0) { - mog_rbuf_free_and_null(&http->rbuf); + mog_rbuf_reattach_and_null(&http->rbuf); } else if (old) { /* no allocation needed, reuse existing */ assert(old == rbuf && "http->rbuf not reused properly"); memmove(old->rptr, src, defer_bytes); @@ -177,7 +177,7 @@ static void stash_advance_rbuf(struct mog_http *http, char *buf, size_t buf_len) if (http->_p.line_end == 0 || buf_len <= end) { http->_p.buf_off = 0; - mog_rbuf_free_and_null(&http->rbuf); + mog_rbuf_reattach_and_null(&http->rbuf); return; } @@ -383,7 +383,7 @@ void mog_http_put(struct mog_fd *mfd, char *buf, size_t buf_len) if (buf_len == http->_p.buf_off) { /* we got the HTTP header in one read() */ if (http->_p.chunked) { - mog_rbuf_free_and_null(&http->rbuf); + mog_rbuf_reattach_and_null(&http->rbuf); mog_chunk_init(http); http->_p.buf_off = buf_len; } @@ -584,14 +584,14 @@ chunk_state_trailer: if (in_trailer) assert(0 && "bad chunk state: size"); /* client is trickling chunk size :< */ - mog_rbuf_free_and_null(&http->rbuf); + mog_rbuf_reattach_and_null(&http->rbuf); http->_p.buf_off = 0; goto again; case MOG_CHUNK_STATE_DATA: if (in_trailer) assert(0 && "bad chunk state: data"); /* client is trickling final chunk/trailer */ - mog_rbuf_free_and_null(&http->rbuf); + mog_rbuf_reattach_and_null(&http->rbuf); goto again; case MOG_CHUNK_STATE_TRAILER: stash_advance_rbuf(http, buf, buf_len); @@ -72,7 +72,7 @@ MOG_NOINLINE static void mgmt_close(struct mog_fd *mfd) { struct mog_mgmt *mgmt = &mfd->as.mgmt; - mog_rbuf_free(mgmt->rbuf); + mog_rbuf_reattach_and_null(&mgmt->rbuf); assert((mgmt->wbuf == NULL || mgmt->wbuf == MOG_WR_ERROR) && "would leak mgmt->wbuf on close"); @@ -99,7 +99,7 @@ void mog_mgmt_writev(struct mog_mgmt *mgmt, struct iovec *iov, int iovcnt) static enum mog_next mgmt_iostat_forever(struct mog_mgmt *mgmt) { - mog_rbuf_free_and_null(&mgmt->rbuf); /* no coming back from this */ + mog_rbuf_reattach_and_null(&mgmt->rbuf); /* no coming back from this */ mog_notify(MOG_NOTIFY_DEVICE_REFRESH); mog_svc_devstats_subscribe(mgmt); @@ -136,7 +136,7 @@ mgmt_defer_rbuf(struct mog_mgmt *mgmt, struct mog_rbuf *rbuf, size_t buf_len) assert(defer_bytes <= MOG_RBUF_MAX_SIZE && "defer bytes overflow"); if (defer_bytes == 0) { - mog_rbuf_free_and_null(&mgmt->rbuf); + mog_rbuf_reattach_and_null(&mgmt->rbuf); } else if (old) { /* no allocation needed, reuse existing */ assert(old == rbuf && "mgmt->rbuf not reused properly"); memmove(old->rptr, src, defer_bytes); |