From 4edbdd6ba3686a60a8ddeed8f6f26e55abf0b207 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Sun, 14 Jul 2013 07:26:36 +0000 Subject: downgrade thread/device-count fields to unsigned int It's unlikely we'll even come close to see 2-4 billion devices in a MogileFS instance for a while. Meanwhile, it's also unlikely the kernel will ever run that many threads, either. So make it easier to pack and shrink data structures to save a few bytes and perhaps get better memory alignement. For reference, the POSIX semaphore API specifies initial values with unsigned (int) values, too. This leads to a minor size reduction (and we're not even packing): $ ~/linux/scripts/bloat-o-meter cmogstored.before cmogstored add/remove: 0/0 grow/shrink: 0/13 up/down: 0/-86 (-86) function old new delta mog_svc_dev_quit_prepare 13 12 -1 mog_mgmt_fn_aio_threads 147 146 -1 mog_dev_user_rescale_i 27 26 -1 mog_ioq_requeue_prepare 52 50 -2 mog_ioq_init 80 78 -2 mog_thrpool_start 101 96 -5 mog_svc_dev_user_rescale 143 137 -6 mog_svc_start_each 264 256 -8 mog_svc_aio_threads_handler 257 249 -8 mog_ioq_ready 263 255 -8 mog_ioq_next 303 295 -8 mog_svc_thrpool_rescale 206 197 -9 mog_thrpool_set_size 1028 1001 -27 --- cmogstored.h | 26 +++++++++++++------------- ioq.c | 2 +- mgmt_fn.c | 2 +- svc.c | 16 ++++++++-------- svc_dev.c | 10 +++++----- thrpool.c | 20 ++++++++++---------- 6 files changed, 38 insertions(+), 38 deletions(-) diff --git a/cmogstored.h b/cmogstored.h index ffea4b0..c3ebf80 100644 --- a/cmogstored.h +++ b/cmogstored.h @@ -97,8 +97,8 @@ enum mog_next { }; struct mog_ioq { - size_t cur; - size_t max; + unsigned cur; + unsigned max; pthread_mutex_t mtx; SIMPLEQ_HEAD(ioq_head, mog_fd) ioq_head; bool contended; @@ -156,10 +156,10 @@ struct mog_svc { int docroot_fd; const char *docroot; unsigned persist_client; - size_t nmogdev; - size_t user_set_aio_threads; /* only touched by main/notify thread */ - size_t user_req_aio_threads; /* protected by aio_threads_lock */ - size_t thr_per_dev; + unsigned nmogdev; + unsigned user_set_aio_threads; /* only touched by main/notify thread */ + unsigned user_req_aio_threads; /* protected by aio_threads_lock */ + unsigned thr_per_dev; /* private */ DIR *dir; @@ -232,8 +232,8 @@ struct mog_thread { struct mog_thrpool { pthread_mutex_t lock; - size_t n_threads; - size_t want_threads; + unsigned n_threads; + unsigned want_threads; struct mog_thread *threads; void *(*start_fn)(void *); void *start_arg; @@ -369,8 +369,8 @@ typedef int (*mog_scandev_cb)(const struct mog_dev *, struct mog_svc *); size_t mog_svc_each(Hash_processor processor, void *data); void mog_svc_upgrade_prepare(void); bool mog_svc_start_each(void *svc_ptr, void *have_mgmt_ptr); -void mog_svc_thrpool_rescale(struct mog_svc *, size_t ndev_new); -void mog_svc_aio_threads_enqueue(struct mog_svc *, size_t nr); +void mog_svc_thrpool_rescale(struct mog_svc *, unsigned ndev_new); +void mog_svc_aio_threads_enqueue(struct mog_svc *, unsigned nr); void mog_svc_aio_threads_handler(void); /* dev.c */ @@ -437,11 +437,11 @@ char *mog_canonpath_die(const char *path, enum canonicalize_mode_t canon_mode); /* thrpool.c */ void mog_thr_test_quit(void); bool mog_thr_prepare_quit(void) MOG_CHECK; -void mog_thrpool_start(struct mog_thrpool *, size_t n, +void mog_thrpool_start(struct mog_thrpool *, unsigned n, void *(*start_fn)(void *), void *arg); void mog_thrpool_quit(struct mog_thrpool *, struct mog_queue *); void mog_thrpool_process_queue(void); -void mog_thrpool_set_size(struct mog_thrpool *, size_t size); +void mog_thrpool_set_size(struct mog_thrpool *, unsigned size); /* mgmt.c */ void mog_mgmt_writev(struct mog_mgmt *, struct iovec *, int iovcnt); @@ -645,7 +645,7 @@ void mog_yield(void); /* ioq.c */ extern __thread struct mog_ioq *mog_ioq_current; -void mog_ioq_init(struct mog_ioq *, struct mog_svc *, size_t val); +void mog_ioq_init(struct mog_ioq *, struct mog_svc *, unsigned val); bool mog_ioq_ready(struct mog_ioq *, struct mog_fd *) MOG_CHECK; bool mog_ioq_contended(void) MOG_CHECK; void mog_ioq_next(struct mog_ioq *); diff --git a/ioq.c b/ioq.c index 0000bb8..9b0bd9a 100644 --- a/ioq.c +++ b/ioq.c @@ -14,7 +14,7 @@ */ __thread struct mog_ioq *mog_ioq_current; -void mog_ioq_init(struct mog_ioq *ioq, struct mog_svc *svc, size_t val) +void mog_ioq_init(struct mog_ioq *ioq, struct mog_svc *svc, unsigned val) { ioq->cur = val; ioq->max = val; diff --git a/mgmt_fn.c b/mgmt_fn.c index 81a1edf..8bdec2d 100644 --- a/mgmt_fn.c +++ b/mgmt_fn.c @@ -193,7 +193,7 @@ void mog_mgmt_fn_aio_threads(struct mog_mgmt *mgmt, char *buf) assert(*end == 0 && "ragel misfed mog_mgmt_fn_set_aio_threads"); if (nr > 0 && nr <= (size_t)INT_MAX) - mog_svc_aio_threads_enqueue(mgmt->svc, nr); + mog_svc_aio_threads_enqueue(mgmt->svc, (unsigned)nr); mog_mgmt_fn_blank(mgmt); } diff --git a/svc.c b/svc.c index 7d6a6ac..4a44493 100644 --- a/svc.c +++ b/svc.c @@ -163,9 +163,9 @@ void mog_svc_upgrade_prepare(void) } /* this is only called by the main (notify) thread */ -void mog_svc_thrpool_rescale(struct mog_svc *svc, size_t ndev_new) +void mog_svc_thrpool_rescale(struct mog_svc *svc, unsigned ndev_new) { - size_t size = ndev_new * svc->thr_per_dev; + unsigned size = ndev_new * svc->thr_per_dev; struct mog_thrpool *tp = &svc->queue->thrpool; /* respect user-setting */ @@ -175,7 +175,7 @@ void mog_svc_thrpool_rescale(struct mog_svc *svc, size_t ndev_new) return; syslog(LOG_WARNING, - "server aio_threads=%zu is less than devcount=%zu", + "server aio_threads=%u is less than devcount=%u", tp->n_threads, ndev_new); return; @@ -186,7 +186,7 @@ void mog_svc_thrpool_rescale(struct mog_svc *svc, size_t ndev_new) if (svc->nmogdev) syslog(LOG_INFO, - "devcount(%zu->%zu), updating server aio_threads=%zu", + "devcount(%u->%u), updating server aio_threads=%u", svc->nmogdev, ndev_new, size); mog_thrpool_set_size(tp, size); } @@ -248,9 +248,9 @@ bool mog_svc_start_each(void *svc_ptr, void *main_ptr) * * Called by threads inside the thrpool to wake-up the main/notify thread. */ -void mog_svc_aio_threads_enqueue(struct mog_svc *svc, size_t size) +void mog_svc_aio_threads_enqueue(struct mog_svc *svc, unsigned size) { - size_t prev_enq; + unsigned prev_enq; CHECK(int, 0, pthread_mutex_lock(&aio_threads_lock)); @@ -273,7 +273,7 @@ void mog_svc_aio_threads_handler(void) /* guard against requests bundled in one wakeup by looping here */ for (;;) { - size_t req_size = 0; + unsigned req_size = 0; CHECK(int, 0, pthread_mutex_lock(&aio_threads_lock)); svc = SIMPLEQ_FIRST(&aio_threads_qhead); @@ -291,7 +291,7 @@ void mog_svc_aio_threads_handler(void) if (svc == NULL || req_size == 0) return; - syslog(LOG_INFO, "server aio_threads=%zu", req_size); + syslog(LOG_INFO, "server aio_threads=%u", req_size); svc->user_set_aio_threads = req_size; if (svc->nmogdev) mog_svc_dev_user_rescale(svc, svc->nmogdev); diff --git a/svc_dev.c b/svc_dev.c index e57f0b6..b2beec3 100644 --- a/svc_dev.c +++ b/svc_dev.c @@ -95,7 +95,7 @@ static void svc_init_dev_hash(struct mog_svc *svc) mog_oom_if_null(svc->by_st_dev); } -static int svc_scandev(struct mog_svc *svc, size_t *nr, mog_scandev_cb cb) +static int svc_scandev(struct mog_svc *svc, unsigned *nr, mog_scandev_cb cb) { struct dirent *ent; int rc = 0; @@ -278,7 +278,7 @@ void mog_svc_dev_shutdown(void) static bool svc_mkusage_each(void *svcptr, void *ignored) { struct mog_svc *svc = svcptr; - size_t ndev = 0; + unsigned ndev = 0; svc_scandev(svc, &ndev, mog_dev_mkusage); @@ -295,16 +295,16 @@ void mog_mkusage_all(void) } /* we should never set ioq_max == 0 */ -static void svc_rescale_warn_fix_capa(struct mog_svc *svc, size_t ndev_new) +static void svc_rescale_warn_fix_capa(struct mog_svc *svc, unsigned ndev_new) { if (svc->thr_per_dev != 0) return; syslog(LOG_WARNING, - "serving %s with fewer aio_threads(%zu) than devices(%zu)", + "serving %s with fewer aio_threads(%u) than devices(%u)", svc->docroot, svc->user_set_aio_threads, ndev_new); syslog(LOG_WARNING, - "set \"server aio_threads = %zu\" or higher via sidechannel", + "set \"server aio_threads = %u\" or higher via sidechannel", ndev_new); svc->thr_per_dev = 1; diff --git a/thrpool.c b/thrpool.c index 918fef8..8ed5963 100644 --- a/thrpool.c +++ b/thrpool.c @@ -93,7 +93,7 @@ static void poke(pthread_t thr, int sig) } static bool -thr_create_fail_retry(struct mog_thrpool *tp, size_t size, +thr_create_fail_retry(struct mog_thrpool *tp, unsigned size, unsigned long *nr_eagain, int err) { /* do not leave the pool w/o threads at all */ @@ -108,14 +108,14 @@ thr_create_fail_retry(struct mog_thrpool *tp, size_t size, } else { errno = err; syslog(LOG_ERR, - "pthread_create: %m, only running %lu of %lu threads", - (unsigned long)tp->n_threads, (unsigned long)size); + "pthread_create: %m, only running %u of %u threads", + tp->n_threads, size); return false; } } static bool -thrpool_add(struct mog_thrpool *tp, size_t size, unsigned long *nr_eagain) +thrpool_add(struct mog_thrpool *tp, unsigned size, unsigned long *nr_eagain) { struct mog_thr_start_arg arg = { .mtx = PTHREAD_MUTEX_INITIALIZER, @@ -158,7 +158,7 @@ thrpool_add(struct mog_thrpool *tp, size_t size, unsigned long *nr_eagain) return true; } -void mog_thrpool_set_size(struct mog_thrpool *tp, size_t size) +void mog_thrpool_set_size(struct mog_thrpool *tp, unsigned size) { unsigned long nr_eagain = 0; @@ -168,7 +168,7 @@ void mog_thrpool_set_size(struct mog_thrpool *tp, size_t size) /* nothing */; if (tp->n_threads > size) { - size_t i; + unsigned i; int err; /* set the do_quit flag for all threads we kill */ @@ -197,19 +197,19 @@ void mog_thrpool_set_size(struct mog_thrpool *tp, size_t size) } void -mog_thrpool_start(struct mog_thrpool *tp, size_t n, +mog_thrpool_start(struct mog_thrpool *tp, unsigned nthr, void *(*start_fn)(void *), void *arg) { /* we may be started on a new server before device dirs exist */ - if (n == 0) - n = 1; + if (nthr == 0) + nthr = 1; tp->threads = NULL; tp->n_threads = 0; tp->start_fn = start_fn; tp->start_arg = arg; CHECK(int, 0, pthread_mutex_init(&tp->lock, NULL)); - mog_thrpool_set_size(tp, n); + mog_thrpool_set_size(tp, nthr); } void mog_thrpool_quit(struct mog_thrpool *tp, struct mog_queue *q) -- cgit v1.2.3-24-ge0c7