diff mbox series

[RFC,v2,3/9] mm/damon: Make monitoring target regions init/update configurable

Message ID 20200603141135.10575-4-sjpark@amazon.com (mailing list archive)
State New, archived
Headers show
Series DAMON: Support Access Monitoring of Any Address Space Including Physical Memory | expand

Commit Message

SeongJae Park June 3, 2020, 2:11 p.m. UTC
From: SeongJae Park <sjpark@amazon.de>

This commit allows DAMON users to configure their own monitoring target
regions initializer / updater.  Using this, users can confine the
monitoring address spaces as they want.  For example, users can track
only stack, heap, shared memory area, or specific file-backed area, as
they want.

Signed-off-by: SeongJae Park <sjpark@amazon.de>
---
 include/linux/damon.h | 13 +++++++++++++
 mm/damon.c            | 17 ++++++++++-------
 2 files changed, 23 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/damon.h b/include/linux/damon.h
index b4b06ca905a2..a1b6810ce0eb 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -158,9 +158,16 @@  struct damos {
  * @tasks_list:		Head of monitoring target tasks (&damon_task) list.
  * @schemes_list:	Head of schemes (&damos) list.
  *
+ * @init_target_regions:	Constructs initial monitoring target regions.
+ * @update_target_regions:	Updates monitoring target regions.
  * @sample_cb:			Called for each sampling interval.
  * @aggregate_cb:		Called for each aggregation interval.
  *
+ * The monitoring thread calls @init_target_regions before starting the
+ * monitoring, @update_target_regions for each @regions_update_interval.  By
+ * setting these callbacks to appropriate functions, therefore, users can
+ * monitor specific range of virtual address space.
+ *
  * @sample_cb and @aggregate_cb are called from @kdamond for each of the
  * sampling intervals and aggregation intervals, respectively.  Therefore,
  * users can safely access to the monitoring results via @tasks_list without
@@ -190,10 +197,16 @@  struct damon_ctx {
 	struct list_head schemes_list;	/* 'damos' objects */
 
 	/* callbacks */
+	void (*init_target_regions)(struct damon_ctx *context);
+	void (*update_target_regions)(struct damon_ctx *context);
 	void (*sample_cb)(struct damon_ctx *context);
 	void (*aggregate_cb)(struct damon_ctx *context);
 };
 
+/* Reference callback implementations for virtual memory */
+void kdamond_init_vm_regions(struct damon_ctx *ctx);
+void kdamond_update_vm_regions(struct damon_ctx *ctx);
+
 int damon_set_pids(struct damon_ctx *ctx, int *pids, ssize_t nr_pids);
 int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
 		unsigned long aggr_int, unsigned long regions_update_int,
diff --git a/mm/damon.c b/mm/damon.c
index f6dd34425185..2a3c1abb9b47 100644
--- a/mm/damon.c
+++ b/mm/damon.c
@@ -72,6 +72,9 @@  static struct damon_ctx damon_user_ctx = {
 	.regions_update_interval = 1000 * 1000,
 	.min_nr_regions = 10,
 	.max_nr_regions = 1000,
+
+	.init_target_regions = kdamond_init_vm_regions,
+	.update_target_regions = kdamond_update_vm_regions,
 };
 
 /*
@@ -324,7 +327,7 @@  static void swap_ranges(struct damon_addr_range *r1,
  *
  * This function receives an address space and finds three regions in it which
  * separated by the two biggest unmapped regions in the space.  Please refer to
- * below comments of 'damon_init_regions_of()' function to know why this is
+ * below comments of 'damon_init_vm_regions_of()' function to know why this is
  * necessary.
  *
  * Returns 0 if success, or negative error code otherwise.
@@ -436,7 +439,7 @@  static int damon_three_regions_of(struct damon_task *t,
  *   <BIG UNMAPPED REGION 2>
  *   <stack>
  */
-static void damon_init_regions_of(struct damon_ctx *c, struct damon_task *t)
+static void damon_init_vm_regions_of(struct damon_ctx *c, struct damon_task *t)
 {
 	struct damon_region *r, *m = NULL;
 	struct damon_addr_range regions[3];
@@ -465,12 +468,12 @@  static void damon_init_regions_of(struct damon_ctx *c, struct damon_task *t)
 }
 
 /* Initialize '->regions_list' of every task */
-static void kdamond_init_regions(struct damon_ctx *ctx)
+void kdamond_init_vm_regions(struct damon_ctx *ctx)
 {
 	struct damon_task *t;
 
 	damon_for_each_task(t, ctx)
-		damon_init_regions_of(ctx, t);
+		damon_init_vm_regions_of(ctx, t);
 }
 
 static void damon_mkold(struct mm_struct *mm, unsigned long addr)
@@ -1039,7 +1042,7 @@  static void damon_apply_three_regions(struct damon_ctx *ctx,
 /*
  * Update regions for current memory mappings
  */
-static void kdamond_update_regions(struct damon_ctx *ctx)
+void kdamond_update_vm_regions(struct damon_ctx *ctx)
 {
 	struct damon_addr_range three_regions[3];
 	struct damon_task *t;
@@ -1101,7 +1104,7 @@  static int kdamond_fn(void *data)
 	unsigned int max_nr_accesses = 0;
 
 	pr_info("kdamond (%d) starts\n", ctx->kdamond->pid);
-	kdamond_init_regions(ctx);
+	ctx->init_target_regions(ctx);
 
 	kdamond_write_record_header(ctx);
 
@@ -1124,7 +1127,7 @@  static int kdamond_fn(void *data)
 		}
 
 		if (kdamond_need_update_regions(ctx))
-			kdamond_update_regions(ctx);
+			ctx->update_target_regions(ctx);
 	}
 	damon_flush_rbuffer(ctx);
 	damon_for_each_task(t, ctx) {