@@ -66,6 +66,14 @@ struct damon_task {
* in case of virtual memory monitoring) and applies the changes for each
* @regions_update_interval. All time intervals are in micro-seconds.
*
+ * @rbuf: In-memory buffer for monitoring result recording.
+ * @rbuf_len: The length of @rbuf.
+ * @rbuf_offset: The offset for next write to @rbuf.
+ * @rfile_path: Record file path.
+ *
+ * If @rbuf, @rbuf_len, and @rfile_path are set, the monitored results are
+ * automatically stored in @rfile_path file.
+ *
* @kdamond: Kernel thread who does the monitoring.
* @kdamond_stop: Notifies whether kdamond should stop.
* @kdamond_lock: Mutex for the synchronizations with @kdamond.
@@ -100,6 +108,11 @@ struct damon_ctx {
struct timespec64 last_aggregation;
struct timespec64 last_regions_update;
+ unsigned char *rbuf;
+ unsigned int rbuf_len;
+ unsigned int rbuf_offset;
+ char *rfile_path;
+
struct task_struct *kdamond;
bool kdamond_stop;
struct mutex kdamond_lock;
@@ -115,6 +128,8 @@ int damon_set_pids(struct damon_ctx *ctx, int *pids, ssize_t nr_pids);
int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
unsigned long aggr_int, unsigned long regions_update_int,
unsigned long min_nr_reg, unsigned long max_nr_reg);
+int damon_set_recording(struct damon_ctx *ctx,
+ unsigned int rbuf_len, char *rfile_path);
int damon_start(struct damon_ctx *ctx);
int damon_stop(struct damon_ctx *ctx);
@@ -58,6 +58,9 @@
#define damon_for_each_task_safe(t, next, ctx) \
list_for_each_entry_safe(t, next, &(ctx)->tasks_list, list)
+#define MAX_RECORD_BUFFER_LEN (4 * 1024 * 1024)
+#define MAX_RFILE_PATH_LEN 256
+
/* Get a random number in [l, r) */
#define damon_rand(l, r) (l + prandom_u32() % (r - l))
@@ -676,16 +679,80 @@ static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
}
/*
- * Reset the aggregated monitoring results
+ * Flush the content in the result buffer to the result file
+ */
+static void damon_flush_rbuffer(struct damon_ctx *ctx)
+{
+ ssize_t sz;
+ loff_t pos = 0;
+ struct file *rfile;
+
+ rfile = filp_open(ctx->rfile_path, O_CREAT | O_RDWR | O_APPEND, 0644);
+ if (IS_ERR(rfile)) {
+ pr_err("Cannot open the result file %s\n",
+ ctx->rfile_path);
+ return;
+ }
+
+ while (ctx->rbuf_offset) {
+ sz = kernel_write(rfile, ctx->rbuf, ctx->rbuf_offset, &pos);
+ if (sz < 0)
+ break;
+ ctx->rbuf_offset -= sz;
+ }
+ filp_close(rfile, NULL);
+}
+
+/*
+ * Write a data into the result buffer
+ */
+static void damon_write_rbuf(struct damon_ctx *ctx, void *data, ssize_t size)
+{
+ if (!ctx->rbuf_len || !ctx->rbuf)
+ return;
+ if (ctx->rbuf_offset + size > ctx->rbuf_len)
+ damon_flush_rbuffer(ctx);
+
+ memcpy(&ctx->rbuf[ctx->rbuf_offset], data, size);
+ ctx->rbuf_offset += size;
+}
+
+/*
+ * Flush the aggregated monitoring results to the result buffer
+ *
+ * Stores current tracking results to the result buffer and reset 'nr_accesses'
+ * of each region. The format for the result buffer is as below:
+ *
+ * <time> <number of tasks> <array of task infos>
+ *
+ * task info: <pid> <number of regions> <array of region infos>
+ * region info: <start address> <end address> <nr_accesses>
*/
static void kdamond_reset_aggregated(struct damon_ctx *c)
{
struct damon_task *t;
- struct damon_region *r;
+ struct timespec64 now;
+ unsigned int nr;
+
+ ktime_get_coarse_ts64(&now);
+
+ damon_write_rbuf(c, &now, sizeof(struct timespec64));
+ nr = nr_damon_tasks(c);
+ damon_write_rbuf(c, &nr, sizeof(nr));
damon_for_each_task(t, c) {
- damon_for_each_region(r, t)
+ struct damon_region *r;
+
+ damon_write_rbuf(c, &t->pid, sizeof(t->pid));
+ nr = nr_damon_regions(t);
+ damon_write_rbuf(c, &nr, sizeof(nr));
+ damon_for_each_region(r, t) {
+ damon_write_rbuf(c, &r->vm_start, sizeof(r->vm_start));
+ damon_write_rbuf(c, &r->vm_end, sizeof(r->vm_end));
+ damon_write_rbuf(c, &r->nr_accesses,
+ sizeof(r->nr_accesses));
r->nr_accesses = 0;
+ }
}
}
@@ -866,6 +933,14 @@ static bool kdamond_need_stop(struct damon_ctx *ctx)
return true;
}
+static void kdamond_write_record_header(struct damon_ctx *ctx)
+{
+ int recfmt_ver = 1;
+
+ damon_write_rbuf(ctx, "damon_recfmt_ver", 16);
+ damon_write_rbuf(ctx, &recfmt_ver, sizeof(recfmt_ver));
+}
+
/*
* The monitoring daemon that runs as a kernel thread
*/
@@ -878,6 +953,9 @@ static int kdamond_fn(void *data)
pr_info("kdamond (%d) starts\n", ctx->kdamond->pid);
kdamond_init_regions(ctx);
+
+ kdamond_write_record_header(ctx);
+
while (!kdamond_need_stop(ctx)) {
kdamond_prepare_access_checks(ctx);
if (ctx->sample_cb)
@@ -898,6 +976,7 @@ static int kdamond_fn(void *data)
if (kdamond_need_update_regions(ctx))
kdamond_update_regions(ctx);
}
+ damon_flush_rbuffer(ctx);
damon_for_each_task(t, ctx) {
damon_for_each_region_safe(r, next, t)
damon_destroy_region(r);
@@ -1000,6 +1079,52 @@ int damon_set_pids(struct damon_ctx *ctx, int *pids, ssize_t nr_pids)
return 0;
}
+/**
+ * damon_set_recording() - Set attributes for the recording.
+ * @ctx: target kdamond context
+ * @rbuf_len: length of the result buffer
+ * @rfile_path: path to the monitor result files
+ *
+ * Setting 'rbuf_len' 0 disables recording.
+ *
+ * This function should not be called while the kdamond is running.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int damon_set_recording(struct damon_ctx *ctx,
+ unsigned int rbuf_len, char *rfile_path)
+{
+ size_t rfile_path_len;
+
+ if (rbuf_len > MAX_RECORD_BUFFER_LEN) {
+ pr_err("too long (>%d) result buffer length\n",
+ MAX_RECORD_BUFFER_LEN);
+ return -EINVAL;
+ }
+ rfile_path_len = strnlen(rfile_path, MAX_RFILE_PATH_LEN);
+ if (rfile_path_len >= MAX_RFILE_PATH_LEN) {
+ pr_err("too long (>%d) result file path %s\n",
+ MAX_RFILE_PATH_LEN, rfile_path);
+ return -EINVAL;
+ }
+ ctx->rbuf_len = rbuf_len;
+ kfree(ctx->rbuf);
+ kfree(ctx->rfile_path);
+ ctx->rfile_path = NULL;
+ if (!rbuf_len) {
+ ctx->rbuf = NULL;
+ } else {
+ ctx->rbuf = kvmalloc(rbuf_len, GFP_KERNEL);
+ if (!ctx->rbuf)
+ return -ENOMEM;
+ }
+ ctx->rfile_path = kmalloc(rfile_path_len + 1, GFP_KERNEL);
+ if (!ctx->rfile_path)
+ return -ENOMEM;
+ strncpy(ctx->rfile_path, rfile_path, rfile_path_len + 1);
+ return 0;
+}
+
/**
* damon_set_attrs() - Set attributes for the monitoring.
* @ctx: monitoring context