From: Alex Rusuf <yorha.op@gmail.com>
To: damon@lists.linux.dev
Cc: sj@kernel.org
Subject: [RFC PATCH v1 2/7] mm/damon/core: list-based contexts organization
Date: Wed, 15 May 2024 18:24:52 +0300 [thread overview]
Message-ID: <20240515152457.603724-3-yorha.op@gmail.com> (raw)
In-Reply-To: <20240515152457.603724-1-yorha.op@gmail.com>
This patch implements list-based approach to manage
damon contexts, described in previous patch.
Signed-off-by: Alex Rusuf <yorha.op@gmail.com>
---
include/linux/damon.h | 22 ++++++-
mm/damon/core.c | 98 ++++++++++++++++++++++-------
mm/damon/sysfs.c | 140 ++++++++++++++++++++++++++++++------------
3 files changed, 197 insertions(+), 63 deletions(-)
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 089d4a9cf..ec291a2eb 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -571,8 +571,7 @@ struct damon_attrs {
struct kdamond_struct {
struct mutex lock;
struct task_struct *self;
- /* TODO: support multiple contexts */
- struct damon_ctx *ctx;
+ struct list_head contexts;
size_t nr_ctxs;
/* private: */
@@ -626,9 +625,11 @@ struct damon_ctx {
* update
*/
unsigned long next_ops_update_sis;
+ unsigned long sz_limit;
/* public: */
struct kdamond_struct *kdamond;
+ struct list_head list;
struct damon_operations ops;
struct damon_callback callback;
@@ -662,6 +663,15 @@ static inline unsigned long damon_sz_region(struct damon_region *r)
return r->ar.end - r->ar.start;
}
+static inline struct damon_target *damon_first_target(struct damon_ctx *ctx)
+{
+ return list_first_entry(&ctx->adaptive_targets, struct damon_target, list);
+}
+
+static inline struct damon_ctx *damon_first_ctx(struct kdamond_struct *kdamond)
+{
+ return list_first_entry(&kdamond->contexts, struct damon_ctx, list);
+}
#define damon_for_each_region(r, t) \
list_for_each_entry(r, &t->regions_list, list)
@@ -684,6 +694,12 @@ static inline unsigned long damon_sz_region(struct damon_region *r)
#define damon_for_each_scheme_safe(s, next, ctx) \
list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
+#define damon_for_each_context(c, kdamond) \
+ list_for_each_entry(c, &(kdamond)->contexts, list)
+
+#define damon_for_each_context_safe(c, next, kdamond) \
+ list_for_each_entry_safe(c, next, &(kdamond)->contexts, list)
+
#define damos_for_each_quota_goal(goal, quota) \
list_for_each_entry(goal, "a->goals, list)
@@ -745,8 +761,10 @@ void damon_destroy_target(struct damon_target *t);
unsigned int damon_nr_regions(struct damon_target *t);
struct damon_ctx *damon_new_ctx(void);
+void damon_add_ctx(struct kdamond_struct *kdamond, struct damon_ctx *ctx);
struct kdamond_struct *damon_new_kdamond(void);
void damon_destroy_ctx(struct damon_ctx *ctx);
+void damon_destroy_ctxs(struct kdamond_struct *kdamond);
void damon_destroy_kdamond(struct kdamond_struct *kdamond);
bool damon_kdamond_running(struct kdamond_struct *kdamond);
int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs);
diff --git a/mm/damon/core.c b/mm/damon/core.c
index b592a2865..899487f38 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -502,10 +502,17 @@ struct damon_ctx *damon_new_ctx(void)
INIT_LIST_HEAD(&ctx->adaptive_targets);
INIT_LIST_HEAD(&ctx->schemes);
+ INIT_LIST_HEAD(&ctx->list);
return ctx;
}
+void damon_add_ctx(struct kdamond_struct *kdamond, struct damon_ctx *ctx)
+{
+ list_add_tail(&ctx->list, &kdamond->contexts);
+ ++kdamond->nr_ctxs;
+}
+
struct kdamond_struct *damon_new_kdamond(void)
{
struct kdamond_struct *kdamond;
@@ -517,6 +524,8 @@ struct kdamond_struct *damon_new_kdamond(void)
init_completion(&kdamond->kdamond_started);
mutex_init(&kdamond->lock);
+ INIT_LIST_HEAD(&kdamond->contexts);
+
return kdamond;
}
@@ -533,6 +542,11 @@ static void damon_destroy_targets(struct damon_ctx *ctx)
damon_destroy_target(t);
}
+static inline void damon_del_ctx(struct damon_ctx *ctx)
+{
+ list_del(&ctx->list);
+}
+
void damon_destroy_ctx(struct damon_ctx *ctx)
{
struct damos *s, *next_s;
@@ -542,12 +556,21 @@ void damon_destroy_ctx(struct damon_ctx *ctx)
damon_for_each_scheme_safe(s, next_s, ctx)
damon_destroy_scheme(s);
+ damon_del_ctx(ctx);
kfree(ctx);
}
+void damon_destroy_ctxs(struct kdamond_struct *kdamond)
+{
+ struct damon_ctx *c, *next;
+
+ damon_for_each_context_safe(c, next, kdamond)
+ damon_destroy_ctx(c);
+}
+
void damon_destroy_kdamond(struct kdamond_struct *kdamond)
{
- damon_destroy_ctx(kdamond->ctx);
+ damon_destroy_ctxs(kdamond);
mutex_destroy(&kdamond->lock);
kfree(kdamond);
}
@@ -1596,30 +1619,68 @@ static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
}
}
+static bool kdamond_init_ctx(struct damon_ctx *ctx)
+{
+ if (ctx->ops.init)
+ ctx->ops.init(ctx);
+ if (ctx->callback.before_start && ctx->callback.before_start(ctx))
+ return false;
+
+ kdamond_init_intervals_sis(ctx);
+ ctx->sz_limit = damon_region_sz_limit(ctx);
+
+ return true;
+}
+
+static bool kdamond_init_ctxs(struct kdamond_struct *kdamond)
+{
+ struct damon_ctx *c;
+
+ damon_for_each_context(c, kdamond)
+ if (!kdamond_init_ctx(c))
+ return false;
+ return true;
+}
+
+static void kdamond_finish_ctx(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ struct damon_region *r, *next;
+
+ damon_for_each_target(t, ctx) {
+ damon_for_each_region_safe(r, next, t)
+ damon_destroy_region(r, t);
+ }
+
+ if (ctx->callback.before_terminate)
+ ctx->callback.before_terminate(ctx);
+ if (ctx->ops.cleanup)
+ ctx->ops.cleanup(ctx);
+}
+
+static void kdamond_finish_ctxs(struct kdamond_struct *kdamond)
+{
+ struct damon_ctx *c;
+
+ damon_for_each_context(c, kdamond)
+ kdamond_finish_ctx(c);
+}
+
/*
* The monitoring daemon that runs as a kernel thread
*/
static int kdamond_fn(void *data)
{
struct kdamond_struct *kdamond = data;
- struct damon_ctx *ctx = kdamond->ctx;
- struct damon_target *t;
- struct damon_region *r, *next;
+ struct damon_ctx *ctx = damon_first_ctx(kdamond);
unsigned int max_nr_accesses = 0;
- unsigned long sz_limit = 0;
pr_debug("kdamond (%d) starts\n", current->pid);
complete(&kdamond->kdamond_started);
- kdamond_init_intervals_sis(ctx);
-
- if (ctx->ops.init)
- ctx->ops.init(ctx);
- if (ctx->callback.before_start && ctx->callback.before_start(ctx))
+ if (!kdamond_init_ctxs(kdamond))
goto done;
- sz_limit = damon_region_sz_limit(ctx);
-
while (!kdamond_need_stop(ctx)) {
/*
* ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
@@ -1631,6 +1692,7 @@ static int kdamond_fn(void *data)
unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
unsigned long sample_interval = ctx->attrs.sample_interval;
+ unsigned long sz_limit = ctx->sz_limit;
if (kdamond_wait_activation(ctx))
break;
@@ -1681,19 +1743,11 @@ static int kdamond_fn(void *data)
sample_interval;
if (ctx->ops.update)
ctx->ops.update(ctx);
- sz_limit = damon_region_sz_limit(ctx);
+ ctx->sz_limit = damon_region_sz_limit(ctx);
}
}
done:
- damon_for_each_target(t, ctx) {
- damon_for_each_region_safe(r, next, t)
- damon_destroy_region(r, t);
- }
-
- if (ctx->callback.before_terminate)
- ctx->callback.before_terminate(ctx);
- if (ctx->ops.cleanup)
- ctx->ops.cleanup(ctx);
+ kdamond_finish_ctxs(kdamond);
pr_debug("kdamond (%d) finishes\n", current->pid);
mutex_lock(&kdamond->lock);
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index c55c0b200..db2d48361 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1271,46 +1271,82 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
*/
static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *sys_kdamond)
{
- struct damon_ctx *ctx = sys_kdamond->kdamond->ctx;
+ struct damon_ctx *c;
+ struct damon_sysfs_context **sysfs_ctxs;
- if (!ctx)
+ if (!sys_kdamond->kdamond)
return -EINVAL;
- damon_sysfs_schemes_update_stats(
- sys_kdamond->contexts->contexts_arr[0]->schemes, ctx);
+
+ sysfs_ctxs = sys_kdamond->contexts->contexts_arr;
+ damon_for_each_context(c, sys_kdamond->kdamond) {
+ struct damon_sysfs_context *sysfs_ctx = *sysfs_ctxs;
+
+ damon_sysfs_schemes_update_stats(sysfs_ctx->schemes, c);
+ ++sysfs_ctxs;
+ }
return 0;
}
static int damon_sysfs_upd_schemes_regions_start(
struct damon_sysfs_kdamond *sys_kdamond, bool total_bytes_only)
{
- struct damon_ctx *ctx = sys_kdamond->kdamond->ctx;
+ struct damon_ctx *c;
+ struct damon_sysfs_context **sysfs_ctxs;
+ int err;
- if (!ctx)
+ if (!sys_kdamond->kdamond)
return -EINVAL;
- return damon_sysfs_schemes_update_regions_start(
- sys_kdamond->contexts->contexts_arr[0]->schemes, ctx,
- total_bytes_only);
+
+ sysfs_ctxs = sys_kdamond->contexts->contexts_arr;
+ damon_for_each_context(c, sys_kdamond->kdamond) {
+ struct damon_sysfs_context *sysfs_ctx = *sysfs_ctxs;
+
+ err = damon_sysfs_schemes_update_regions_start(sysfs_ctx->schemes, c,
+ total_bytes_only);
+ if (err)
+ return err;
+ ++sysfs_ctxs;
+ }
+ return 0;
}
static int damon_sysfs_upd_schemes_regions_stop(
struct damon_sysfs_kdamond *sys_kdamond)
{
- struct damon_ctx *ctx = sys_kdamond->kdamond->ctx;
+ struct damon_ctx *c;
+ int err;
- if (!ctx)
+ if (!sys_kdamond->kdamond)
return -EINVAL;
- return damon_sysfs_schemes_update_regions_stop(ctx);
+
+ damon_for_each_context(c, sys_kdamond->kdamond) {
+ err = damon_sysfs_schemes_update_regions_stop(c);
+ if (err)
+ return err;
+ }
+ return 0;
}
static int damon_sysfs_clear_schemes_regions(
struct damon_sysfs_kdamond *sys_kdamond)
{
- struct damon_ctx *ctx = sys_kdamond->kdamond->ctx;
+ struct damon_ctx *c;
+ struct damon_sysfs_context **sysfs_ctxs;
+ int err;
- if (!ctx)
+ if (!sys_kdamond->kdamond)
return -EINVAL;
- return damon_sysfs_schemes_clear_regions(
- sys_kdamond->contexts->contexts_arr[0]->schemes, ctx);
+
+ sysfs_ctxs = sys_kdamond->contexts->contexts_arr;
+ damon_for_each_context(c, sys_kdamond->kdamond) {
+ struct damon_sysfs_context *sysfs_ctx = *sysfs_ctxs;
+
+ err = damon_sysfs_schemes_clear_regions(sysfs_ctx->schemes, c);
+ if (err)
+ return err;
+ ++sysfs_ctxs;
+ }
+ return 0;
}
static inline bool damon_sysfs_kdamond_running(
@@ -1345,21 +1381,32 @@ static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
*/
static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *sys_kdamond)
{
+ struct damon_ctx *c;
+ struct damon_sysfs_context *sysfs_ctx;
+ int err;
+
if (!damon_sysfs_kdamond_running(sys_kdamond))
return -EINVAL;
/* TODO: Support multiple contexts per kdamond */
if (sys_kdamond->contexts->nr != 1)
return -EINVAL;
- return damon_sysfs_apply_inputs(sys_kdamond->kdamond->ctx,
- sys_kdamond->contexts->contexts_arr[0]);
+ sysfs_ctx = sys_kdamond->contexts->contexts_arr[0];
+ damon_for_each_context(c, sys_kdamond->kdamond) {
+ err = damon_sysfs_apply_inputs(c, sysfs_ctx);
+ if (err)
+ return err;
+ ++sysfs_ctx;
+ }
+ return 0;
}
static int damon_sysfs_commit_schemes_quota_goals(
struct damon_sysfs_kdamond *sysfs_kdamond)
{
- struct damon_ctx *ctx;
- struct damon_sysfs_context *sysfs_ctx;
+ struct damon_ctx *c;
+ struct damon_sysfs_context **sysfs_ctxs;
+ int err;
if (!damon_sysfs_kdamond_running(sysfs_kdamond))
return -EINVAL;
@@ -1367,9 +1414,16 @@ static int damon_sysfs_commit_schemes_quota_goals(
if (sysfs_kdamond->contexts->nr != 1)
return -EINVAL;
- ctx = sysfs_kdamond->kdamond->ctx;
- sysfs_ctx = sysfs_kdamond->contexts->contexts_arr[0];
- return damos_sysfs_set_quota_scores(sysfs_ctx->schemes, ctx);
+ sysfs_ctxs = sysfs_kdamond->contexts->contexts_arr;
+ damon_for_each_context(c, sysfs_kdamond->kdamond) {
+ struct damon_sysfs_context *sysfs_ctx = *sysfs_ctxs;
+
+ err = damos_sysfs_set_quota_scores(sysfs_ctx->schemes, c);
+ if (err)
+ return err;
+ ++sysfs_ctxs;
+ }
+ return 0;
}
/*
@@ -1385,12 +1439,19 @@ static int damon_sysfs_commit_schemes_quota_goals(
static int damon_sysfs_upd_schemes_effective_quotas(
struct damon_sysfs_kdamond *sys_kdamond)
{
- struct damon_ctx *ctx = sys_kdamond->kdamond->ctx;
+ struct damon_ctx *c;
+ struct damon_sysfs_context **sysfs_ctxs;
- if (!ctx)
+ if (!sys_kdamond->kdamond)
return -EINVAL;
- damos_sysfs_update_effective_quotas(
- sys_kdamond->contexts->contexts_arr[0]->schemes, ctx);
+
+ sysfs_ctxs = sys_kdamond->contexts->contexts_arr;
+ damon_for_each_context(c, sys_kdamond->kdamond) {
+ struct damon_sysfs_context *sysfs_ctx = *sysfs_ctxs;
+
+ damos_sysfs_update_effective_quotas(sysfs_ctx->schemes, c);
+ ++sysfs_ctxs;
+ }
return 0;
}
@@ -1522,24 +1583,24 @@ static struct damon_ctx *damon_sysfs_build_ctx(
}
static struct kdamond_struct *damon_sysfs_build_kdamond(
- struct damon_sysfs_context *sys_ctx)
+ struct damon_sysfs_context **sys_ctx, size_t nr_ctxs)
{
struct damon_ctx *ctx;
- struct kdamond_struct *kdamond = damon_new_kdamond();
+ struct kdamond_struct *kdamond;
+ kdamond = damon_new_kdamond();
if (!kdamond)
return ERR_PTR(-ENOMEM);
- ctx = damon_sysfs_build_ctx(sys_ctx);
- if (IS_ERR(ctx)) {
- damon_destroy_kdamond(kdamond);
- return ERR_PTR(PTR_ERR(ctx));
+ for (size_t i = 0; i < nr_ctxs; ++i) {
+ ctx = damon_sysfs_build_ctx(sys_ctx[i]);
+ if (IS_ERR(ctx)) {
+ damon_destroy_kdamond(kdamond);
+ return ERR_PTR(PTR_ERR(ctx));
+ }
+ ctx->kdamond = kdamond;
+ damon_add_ctx(kdamond, ctx);
}
- ctx->kdamond = kdamond;
-
- kdamond->ctx = ctx;
- kdamond->nr_ctxs = 1;
-
return kdamond;
}
@@ -1560,7 +1621,8 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *sys_kdamond)
damon_destroy_kdamond(sys_kdamond->kdamond);
sys_kdamond->kdamond = NULL;
- kdamond = damon_sysfs_build_kdamond(sys_kdamond->contexts->contexts_arr[0]);
+ kdamond = damon_sysfs_build_kdamond(sys_kdamond->contexts->contexts_arr,
+ sys_kdamond->contexts->nr);
if (IS_ERR(kdamond))
return PTR_ERR(kdamond);
err = damon_start(kdamond, false);
--
2.42.0
next prev parent reply other threads:[~2024-05-15 15:25 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-15 15:24 [RFC PATCH v1 0/7] DAMON multiple contexts support Alex Rusuf
2024-05-15 15:24 ` [RFC PATCH v1 1/7] mm/damon/core: kdamond_struct abstraction layer Alex Rusuf
2024-05-15 15:24 ` Alex Rusuf [this message]
2024-05-15 15:24 ` [RFC PATCH v1 3/7] mm/damon/lru_sort: " Alex Rusuf
2024-05-15 15:24 ` [RFC PATCH v1 4/7] mm/damon/reclaim: kdamon_struct " Alex Rusuf
2024-05-15 15:24 ` [RFC PATCH v1 5/7] mm/damon/core: rename nr_running_ctxs -> nr_running_kdamonds Alex Rusuf
2024-05-15 15:24 ` [RFC PATCH v1 6/7] mm/damon/core: multi-context support Alex Rusuf
2024-05-15 15:24 ` [RFC PATCH v1 7/7] mm/damon/core: multi-context awarness for trace events Alex Rusuf
2024-05-16 22:17 ` [RFC PATCH v1 0/7] DAMON multiple contexts support SeongJae Park
2024-05-17 8:51 ` Alex Rusuf
2024-05-17 22:59 ` SeongJae Park
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240515152457.603724-3-yorha.op@gmail.com \
--to=yorha.op@gmail.com \
--cc=damon@lists.linux.dev \
--cc=sj@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).