@@ -12,6 +12,48 @@
#include <linux/time64.h>
#include <linux/types.h>
+/**
+ * struct damon_addr_range - Represents an address region of [@start, @end).
+ * @start: Start address of the region (inclusive).
+ * @end: End address of the region (exclusive).
+ */
+struct damon_addr_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+/**
+ * struct damon_region - Represents a monitoring target region.
+ * @ar: The address range of the region.
+ * @sampling_addr: Address of the sample for the next access check.
+ * @nr_accesses: Access frequency of this region.
+ * @list: List head for siblings.
+ */
+struct damon_region {
+ struct damon_addr_range ar;
+ unsigned long sampling_addr;
+ unsigned int nr_accesses;
+ struct list_head list;
+};
+
+/**
+ * struct damon_target - Represents a monitoring target.
+ * @id: Unique identifier for this target.
+ * @regions_list: Head of the monitoring target regions of this target.
+ * @list: List head for siblings.
+ *
+ * Each monitoring context could have multiple targets. For example, a context
+ * for virtual memory address spaces could have multiple target processes. The
+ * @id of each target should be unique among the targets of the context. For
+ * example, in the virtual address monitoring context, it could be a pidfd or
+ * an address of an mm_struct.
+ */
+struct damon_target {
+ unsigned long id;
+ struct list_head regions_list;
+ struct list_head list;
+};
+
struct damon_ctx;
/**
@@ -36,7 +78,7 @@ struct damon_ctx;
*
* @init should initialize primitive-internal data structures. For example,
* this could be used to construct proper monitoring target regions and link
- * those to @damon_ctx.target.
+ * those to @damon_ctx.adaptive_targets.
* @update should update the primitive-internal data structures. For example,
* this could be used to update monitoring target regions for current status.
* @prepare_access_checks should manipulate the monitoring regions to be
@@ -130,7 +172,7 @@ struct damon_callback {
* @primitive: Set of monitoring primitives for given use cases.
* @callback: Set of callbacks for monitoring events notifications.
*
- * @target: Pointer to the user-defined monitoring target.
+ * @region_targets: Head of monitoring targets (&damon_target) list.
*/
struct damon_ctx {
unsigned long sample_interval;
@@ -149,11 +191,40 @@ struct damon_ctx {
struct damon_primitive primitive;
struct damon_callback callback;
- void *target;
+ struct list_head region_targets;
};
+#define damon_next_region(r) \
+ (container_of(r->list.next, struct damon_region, list))
+
+#define damon_prev_region(r) \
+ (container_of(r->list.prev, struct damon_region, list))
+
+#define damon_for_each_region(r, t) \
+ list_for_each_entry(r, &t->regions_list, list)
+
+#define damon_for_each_region_safe(r, next, t) \
+ list_for_each_entry_safe(r, next, &t->regions_list, list)
+
+#define damon_for_each_target(t, ctx) \
+ list_for_each_entry(t, &(ctx)->region_targets, list)
+
+#define damon_for_each_target_safe(t, next, ctx) \
+ list_for_each_entry_safe(t, next, &(ctx)->region_targets, list)
+
#ifdef CONFIG_DAMON
+struct damon_region *damon_new_region(unsigned long start, unsigned long end);
+inline void damon_insert_region(struct damon_region *r,
+ struct damon_region *prev, struct damon_region *next);
+void damon_add_region(struct damon_region *r, struct damon_target *t);
+void damon_destroy_region(struct damon_region *r);
+
+struct damon_target *damon_new_target(unsigned long id);
+void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
+void damon_free_target(struct damon_target *t);
+void damon_destroy_target(struct damon_target *t);
+
struct damon_ctx *damon_new_ctx(void);
void damon_destroy_ctx(struct damon_ctx *ctx);
int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
@@ -15,6 +15,101 @@
static DEFINE_MUTEX(damon_lock);
static int nr_running_ctxs;
+/*
+ * Construct a damon_region struct
+ *
+ * Returns the pointer to the new struct if success, or NULL otherwise
+ */
+struct damon_region *damon_new_region(unsigned long start, unsigned long end)
+{
+ struct damon_region *region;
+
+ region = kmalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return NULL;
+
+ region->ar.start = start;
+ region->ar.end = end;
+ region->nr_accesses = 0;
+ INIT_LIST_HEAD(®ion->list);
+
+ return region;
+}
+
+/*
+ * Add a region between two other regions
+ */
+inline void damon_insert_region(struct damon_region *r,
+ struct damon_region *prev, struct damon_region *next)
+{
+ __list_add(&r->list, &prev->list, &next->list);
+}
+
+void damon_add_region(struct damon_region *r, struct damon_target *t)
+{
+ list_add_tail(&r->list, &t->regions_list);
+}
+
+static void damon_del_region(struct damon_region *r)
+{
+ list_del(&r->list);
+}
+
+static void damon_free_region(struct damon_region *r)
+{
+ kfree(r);
+}
+
+void damon_destroy_region(struct damon_region *r)
+{
+ damon_del_region(r);
+ damon_free_region(r);
+}
+
+/*
+ * Construct a damon_target struct
+ *
+ * Returns the pointer to the new struct if success, or NULL otherwise
+ */
+struct damon_target *damon_new_target(unsigned long id)
+{
+ struct damon_target *t;
+
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return NULL;
+
+ t->id = id;
+ INIT_LIST_HEAD(&t->regions_list);
+
+ return t;
+}
+
+void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
+{
+ list_add_tail(&t->list, &ctx->region_targets);
+}
+
+static void damon_del_target(struct damon_target *t)
+{
+ list_del(&t->list);
+}
+
+void damon_free_target(struct damon_target *t)
+{
+ struct damon_region *r, *next;
+
+ damon_for_each_region_safe(r, next, t)
+ damon_free_region(r);
+ kfree(t);
+}
+
+void damon_destroy_target(struct damon_target *t)
+{
+ damon_del_target(t);
+ damon_free_target(t);
+}
+
struct damon_ctx *damon_new_ctx(void)
{
struct damon_ctx *ctx;
@@ -32,15 +127,27 @@ struct damon_ctx *damon_new_ctx(void)
mutex_init(&ctx->kdamond_lock);
- ctx->target = NULL;
+ INIT_LIST_HEAD(&ctx->region_targets);
return ctx;
}
-void damon_destroy_ctx(struct damon_ctx *ctx)
+static void damon_destroy_targets(struct damon_ctx *ctx)
{
- if (ctx->primitive.cleanup)
+ struct damon_target *t, *next_t;
+
+ if (ctx->primitive.cleanup) {
ctx->primitive.cleanup(ctx);
+ return;
+ }
+
+ damon_for_each_target_safe(t, next_t, ctx)
+ damon_destroy_target(t);
+}
+
+void damon_destroy_ctx(struct damon_ctx *ctx)
+{
+ damon_destroy_targets(ctx);
kfree(ctx);
}
@@ -217,6 +324,21 @@ static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
ctx->aggr_interval);
}
+/*
+ * Reset the aggregated monitoring results ('nr_accesses' of each region).
+ */
+static void kdamond_reset_aggregated(struct damon_ctx *c)
+{
+ struct damon_target *t;
+
+ damon_for_each_target(t, c) {
+ struct damon_region *r;
+
+ damon_for_each_region(r, t)
+ r->nr_accesses = 0;
+ }
+}
+
/*
* Check whether it is time to check and apply the target monitoring regions
*
@@ -238,6 +360,7 @@ static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
*/
static bool kdamond_need_stop(struct damon_ctx *ctx)
{
+ struct damon_target *t;
bool stop;
mutex_lock(&ctx->kdamond_lock);
@@ -249,7 +372,12 @@ static bool kdamond_need_stop(struct damon_ctx *ctx)
if (!ctx->primitive.target_valid)
return false;
- return !ctx->primitive.target_valid(ctx->target);
+ damon_for_each_target(t, ctx) {
+ if (ctx->primitive.target_valid(t))
+ return false;
+ }
+
+ return true;
}
static void set_kdamond_stop(struct damon_ctx *ctx)
@@ -265,6 +393,8 @@ static void set_kdamond_stop(struct damon_ctx *ctx)
static int kdamond_fn(void *data)
{
struct damon_ctx *ctx = (struct damon_ctx *)data;
+ struct damon_target *t;
+ struct damon_region *r, *next;
pr_info("kdamond (%d) starts\n", ctx->kdamond->pid);
@@ -289,6 +419,7 @@ static int kdamond_fn(void *data)
if (ctx->callback.after_aggregation &&
ctx->callback.after_aggregation(ctx))
set_kdamond_stop(ctx);
+ kdamond_reset_aggregated(ctx);
if (ctx->primitive.reset_aggregated)
ctx->primitive.reset_aggregated(ctx);
}
@@ -298,6 +429,10 @@ static int kdamond_fn(void *data)
ctx->primitive.update(ctx);
}
}
+ damon_for_each_target(t, ctx) {
+ damon_for_each_region_safe(r, next, t)
+ damon_destroy_region(r);
+ }
if (ctx->callback.before_terminate &&
ctx->callback.before_terminate(ctx))