diff mbox series

[v5,bpf-next,1/4] bpf: introduce task_vma bpf_iter

Message ID 20210208225255.3089073-2-songliubraving@fb.com (mailing list archive)
State New, archived
Headers show
Series introduce bpf_iter for task_vma | expand

Commit Message

Song Liu Feb. 8, 2021, 10:52 p.m. UTC
Introduce task_vma bpf_iter to print memory information of a process. It
can be used to print customized information similar to /proc/<pid>/maps.

Current /proc/<pid>/maps and /proc/<pid>/smaps provide information of
vma's of a process. However, these information are not flexible enough to
cover all use cases. For example, if a vma cover mixed 2MB pages and 4kB
pages (x86_64), there is no easy way to tell which address ranges are
backed by 2MB pages. task_vma solves the problem by enabling the user to
generate customize information based on the vma (and vma->vm_mm,
vma->vm_file, etc.).

To access the vma safely in the BPF program, task_vma iterator holds
target mmap_lock while calling the BPF program. If the mmap_lock is
contended, task_vma unlocks mmap_lock between iterations to unblock the
writer(s). This lock contention avoidance mechanism is similar to the one
used in show_smaps_rollup().

Signed-off-by: Song Liu <songliubraving@fb.com>
---
 kernel/bpf/task_iter.c | 217 ++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 216 insertions(+), 1 deletion(-)

Comments

Yonghong Song Feb. 8, 2021, 11:12 p.m. UTC | #1
On 2/8/21 2:52 PM, Song Liu wrote:
> Introduce task_vma bpf_iter to print memory information of a process. It
> can be used to print customized information similar to /proc/<pid>/maps.
> 
> Current /proc/<pid>/maps and /proc/<pid>/smaps provide information of
> vma's of a process. However, these information are not flexible enough to
> cover all use cases. For example, if a vma cover mixed 2MB pages and 4kB
> pages (x86_64), there is no easy way to tell which address ranges are
> backed by 2MB pages. task_vma solves the problem by enabling the user to
> generate customize information based on the vma (and vma->vm_mm,
> vma->vm_file, etc.).
> 
> To access the vma safely in the BPF program, task_vma iterator holds
> target mmap_lock while calling the BPF program. If the mmap_lock is
> contended, task_vma unlocks mmap_lock between iterations to unblock the
> writer(s). This lock contention avoidance mechanism is similar to the one
> used in show_smaps_rollup().
> 
> Signed-off-by: Song Liu <songliubraving@fb.com>

Acked-by: Yonghong Song <yhs@fb.com>
Alexei Starovoitov Feb. 9, 2021, 9:30 p.m. UTC | #2
On Mon, Feb 08, 2021 at 02:52:52PM -0800, Song Liu wrote:
> Introduce task_vma bpf_iter to print memory information of a process. It
> can be used to print customized information similar to /proc/<pid>/maps.
> 
> Current /proc/<pid>/maps and /proc/<pid>/smaps provide information of
> vma's of a process. However, these information are not flexible enough to
> cover all use cases. For example, if a vma cover mixed 2MB pages and 4kB
> pages (x86_64), there is no easy way to tell which address ranges are
> backed by 2MB pages. task_vma solves the problem by enabling the user to
> generate customize information based on the vma (and vma->vm_mm,
> vma->vm_file, etc.).
> 
> To access the vma safely in the BPF program, task_vma iterator holds
> target mmap_lock while calling the BPF program. If the mmap_lock is
> contended, task_vma unlocks mmap_lock between iterations to unblock the
> writer(s). This lock contention avoidance mechanism is similar to the one
> used in show_smaps_rollup().
> 
> Signed-off-by: Song Liu <songliubraving@fb.com>
> ---
>  kernel/bpf/task_iter.c | 217 ++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 216 insertions(+), 1 deletion(-)
> 
> diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
> index 175b7b42bfc46..a0d469f0f481c 100644
> --- a/kernel/bpf/task_iter.c
> +++ b/kernel/bpf/task_iter.c
> @@ -286,9 +286,198 @@ static const struct seq_operations task_file_seq_ops = {
>  	.show	= task_file_seq_show,
>  };
>  
> +struct bpf_iter_seq_task_vma_info {
> +	/* The first field must be struct bpf_iter_seq_task_common.
> +	 * this is assumed by {init, fini}_seq_pidns() callback functions.
> +	 */
> +	struct bpf_iter_seq_task_common common;
> +	struct task_struct *task;
> +	struct vm_area_struct *vma;
> +	u32 tid;
> +	unsigned long prev_vm_start;
> +	unsigned long prev_vm_end;
> +};
> +
> +enum bpf_task_vma_iter_find_op {
> +	task_vma_iter_first_vma,   /* use mm->mmap */
> +	task_vma_iter_next_vma,    /* use curr_vma->vm_next */
> +	task_vma_iter_find_vma,    /* use find_vma() to find next vma */
> +};
> +
> +static struct vm_area_struct *
> +task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
> +{
> +	struct pid_namespace *ns = info->common.ns;
> +	enum bpf_task_vma_iter_find_op op;
> +	struct vm_area_struct *curr_vma;
> +	struct task_struct *curr_task;
> +	u32 curr_tid = info->tid;
> +
> +	/* If this function returns a non-NULL vma, it holds a reference to
> +	 * the task_struct, and holds read lock on vma->mm->mmap_lock.
> +	 * If this function returns NULL, it does not hold any reference or
> +	 * lock.
> +	 */
> +	if (info->task) {
> +		curr_task = info->task;
> +		curr_vma = info->vma;
> +		/* In case of lock contention, drop mmap_lock to unblock
> +		 * the writer.
> +		 */
> +		if (mmap_lock_is_contended(curr_task->mm)) {
> +			info->prev_vm_start = curr_vma->vm_start;
> +			info->prev_vm_end = curr_vma->vm_end;
> +			op = task_vma_iter_find_vma;
> +			mmap_read_unlock(curr_task->mm);
> +			if (mmap_read_lock_killable(curr_task->mm))
> +				goto finish;

in case of contention the vma will be seen by bpf prog again?
It looks like the 4 cases of overlaping vmas (after newly acquired lock)
that show_smaps_rollup() is dealing with are not handled here?

> +		} else {
> +			op = task_vma_iter_next_vma;
> +		}
> +	} else {
> +again:
> +		curr_task = task_seq_get_next(ns, &curr_tid, true);
> +		if (!curr_task) {
> +			info->tid = curr_tid + 1;
> +			goto finish;
> +		}
> +
> +		if (curr_tid != info->tid) {
> +			info->tid = curr_tid;
> +			op = task_vma_iter_first_vma;
> +		} else {
> +			op = task_vma_iter_find_vma;

what will happen if there was no contetion on the lock and no seq_stop
when this line was hit and set op = find_vma; ?
If I'm reading this correctly prev_vm_start/end could still
belong to some previous task.
My understanding that if read buffer is big the bpf_seq_read()
will keep doing while(space in buffer) {seq->op->show(), seq->op->next();}
and task_vma_seq_get_next() will iterate over all vmas of one task and
will proceed into the next task, but if there was no contention and no stop
then prev_vm_end will either be still zero (so find_vma(mm, 0 - 1) will be lucky
and will go into first vma of the new task) or perf_vm_end is some address
of some previous task's vma. In this case find_vma may return wrong vma
for the new task.
It seems to me prev_vm_end/start should be set by this task_vma_seq_get_next()
function instead of relying on stop callback.

> +		}
> +
> +		if (!curr_task->mm)
> +			goto next_task;
> +
> +		if (mmap_read_lock_killable(curr_task->mm))
> +			goto finish;
> +	}
> +
> +	switch (op) {
> +	case task_vma_iter_first_vma:
> +		curr_vma = curr_task->mm->mmap;
> +		break;
> +	case task_vma_iter_next_vma:
> +		curr_vma = curr_vma->vm_next;
> +		break;
> +	case task_vma_iter_find_vma:
> +		/* We dropped mmap_lock so it is necessary to use find_vma
> +		 * to find the next vma. This is similar to the  mechanism
> +		 * in show_smaps_rollup().
> +		 */
> +		curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
> +
> +		if (curr_vma && (curr_vma->vm_start == info->prev_vm_start))
> +			curr_vma = curr_vma->vm_next;
> +		break;
> +	}
> +	if (!curr_vma) {
> +		mmap_read_unlock(curr_task->mm);
> +		goto next_task;
> +	}
> +	info->task = curr_task;
> +	info->vma = curr_vma;
> +	return curr_vma;
> +
> +next_task:
> +	put_task_struct(curr_task);
> +	info->task = NULL;
> +	curr_tid++;
> +	goto again;
> +
> +finish:
> +	if (curr_task)
> +		put_task_struct(curr_task);
> +	info->task = NULL;
> +	info->vma = NULL;
> +	return NULL;
> +}
> +
> +static void *task_vma_seq_start(struct seq_file *seq, loff_t *pos)
> +{
> +	struct bpf_iter_seq_task_vma_info *info = seq->private;
> +	struct vm_area_struct *vma;
> +
> +	vma = task_vma_seq_get_next(info);
> +	if (vma && *pos == 0)
> +		++*pos;
> +
> +	return vma;
> +}
> +
> +static void *task_vma_seq_next(struct seq_file *seq, void *v, loff_t *pos)
> +{
> +	struct bpf_iter_seq_task_vma_info *info = seq->private;
> +
> +	++*pos;
> +	return task_vma_seq_get_next(info);
> +}
> +
> +struct bpf_iter__task_vma {
> +	__bpf_md_ptr(struct bpf_iter_meta *, meta);
> +	__bpf_md_ptr(struct task_struct *, task);
> +	__bpf_md_ptr(struct vm_area_struct *, vma);
> +};
> +
> +DEFINE_BPF_ITER_FUNC(task_vma, struct bpf_iter_meta *meta,
> +		     struct task_struct *task, struct vm_area_struct *vma)
> +
> +static int __task_vma_seq_show(struct seq_file *seq, bool in_stop)
> +{
> +	struct bpf_iter_seq_task_vma_info *info = seq->private;
> +	struct bpf_iter__task_vma ctx;
> +	struct bpf_iter_meta meta;
> +	struct bpf_prog *prog;
> +
> +	meta.seq = seq;
> +	prog = bpf_iter_get_info(&meta, in_stop);
> +	if (!prog)
> +		return 0;
> +
> +	ctx.meta = &meta;
> +	ctx.task = info->task;
> +	ctx.vma = info->vma;
> +	return bpf_iter_run_prog(prog, &ctx);
> +}
> +
> +static int task_vma_seq_show(struct seq_file *seq, void *v)
> +{
> +	return __task_vma_seq_show(seq, false);
> +}
> +
> +static void task_vma_seq_stop(struct seq_file *seq, void *v)
> +{
> +	struct bpf_iter_seq_task_vma_info *info = seq->private;
> +
> +	if (!v) {
> +		(void)__task_vma_seq_show(seq, true);
> +	} else {
> +		/* Set prev_vm_start to ~0UL, so that we don't skip the
> +		 * vma returned by the next find_vma(). Please refer to
> +		 * case task_vma_iter_find_vma in task_vma_seq_get_next().
> +		 */
> +		info->prev_vm_start = ~0UL;
> +		info->prev_vm_end = info->vma->vm_end;
> +		mmap_read_unlock(info->task->mm);
> +		put_task_struct(info->task);
> +		info->task = NULL;
> +	}
> +}
> +
> +static const struct seq_operations task_vma_seq_ops = {
> +	.start	= task_vma_seq_start,
> +	.next	= task_vma_seq_next,
> +	.stop	= task_vma_seq_stop,
> +	.show	= task_vma_seq_show,
> +};
> +
>  BTF_ID_LIST(btf_task_file_ids)
>  BTF_ID(struct, task_struct)
>  BTF_ID(struct, file)
> +BTF_ID(struct, vm_area_struct)
>  
>  static const struct bpf_iter_seq_info task_seq_info = {
>  	.seq_ops		= &task_seq_ops,
> @@ -328,6 +517,26 @@ static struct bpf_iter_reg task_file_reg_info = {
>  	.seq_info		= &task_file_seq_info,
>  };
>  
> +static const struct bpf_iter_seq_info task_vma_seq_info = {
> +	.seq_ops		= &task_vma_seq_ops,
> +	.init_seq_private	= init_seq_pidns,
> +	.fini_seq_private	= fini_seq_pidns,
> +	.seq_priv_size		= sizeof(struct bpf_iter_seq_task_vma_info),
> +};
> +
> +static struct bpf_iter_reg task_vma_reg_info = {
> +	.target			= "task_vma",
> +	.feature		= BPF_ITER_RESCHED,
> +	.ctx_arg_info_size	= 2,
> +	.ctx_arg_info		= {
> +		{ offsetof(struct bpf_iter__task_vma, task),
> +		  PTR_TO_BTF_ID_OR_NULL },
> +		{ offsetof(struct bpf_iter__task_vma, vma),
> +		  PTR_TO_BTF_ID_OR_NULL },
> +	},
> +	.seq_info		= &task_vma_seq_info,
> +};
> +
>  static int __init task_iter_init(void)
>  {
>  	int ret;
> @@ -339,6 +548,12 @@ static int __init task_iter_init(void)
>  
>  	task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
>  	task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
> -	return bpf_iter_reg_target(&task_file_reg_info);
> +	ret =  bpf_iter_reg_target(&task_file_reg_info);
> +	if (ret)
> +		return ret;
> +
> +	task_vma_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
> +	task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[2];
> +	return bpf_iter_reg_target(&task_vma_reg_info);
>  }
>  late_initcall(task_iter_init);
> -- 
> 2.24.1
> 

--
Song Liu Feb. 9, 2021, 10:08 p.m. UTC | #3
> On Feb 9, 2021, at 1:30 PM, Alexei Starovoitov <alexei.starovoitov@gmail.com> wrote:
> 
> On Mon, Feb 08, 2021 at 02:52:52PM -0800, Song Liu wrote:
>> Introduce task_vma bpf_iter to print memory information of a process. It
>> can be used to print customized information similar to /proc/<pid>/maps.
>> 
>> Current /proc/<pid>/maps and /proc/<pid>/smaps provide information of
>> vma's of a process. However, these information are not flexible enough to
>> cover all use cases. For example, if a vma cover mixed 2MB pages and 4kB
>> pages (x86_64), there is no easy way to tell which address ranges are
>> backed by 2MB pages. task_vma solves the problem by enabling the user to
>> generate customize information based on the vma (and vma->vm_mm,
>> vma->vm_file, etc.).
>> 
>> To access the vma safely in the BPF program, task_vma iterator holds
>> target mmap_lock while calling the BPF program. If the mmap_lock is
>> contended, task_vma unlocks mmap_lock between iterations to unblock the
>> writer(s). This lock contention avoidance mechanism is similar to the one
>> used in show_smaps_rollup().
>> 
>> Signed-off-by: Song Liu <songliubraving@fb.com>
>> ---
>> kernel/bpf/task_iter.c | 217 ++++++++++++++++++++++++++++++++++++++++-
>> 1 file changed, 216 insertions(+), 1 deletion(-)
>> 
>> diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
>> index 175b7b42bfc46..a0d469f0f481c 100644
>> --- a/kernel/bpf/task_iter.c
>> +++ b/kernel/bpf/task_iter.c
>> @@ -286,9 +286,198 @@ static const struct seq_operations task_file_seq_ops = {
>> 	.show	= task_file_seq_show,
>> };
>> 
>> +struct bpf_iter_seq_task_vma_info {
>> +	/* The first field must be struct bpf_iter_seq_task_common.
>> +	 * this is assumed by {init, fini}_seq_pidns() callback functions.
>> +	 */
>> +	struct bpf_iter_seq_task_common common;
>> +	struct task_struct *task;
>> +	struct vm_area_struct *vma;
>> +	u32 tid;
>> +	unsigned long prev_vm_start;
>> +	unsigned long prev_vm_end;
>> +};
>> +
>> +enum bpf_task_vma_iter_find_op {
>> +	task_vma_iter_first_vma,   /* use mm->mmap */
>> +	task_vma_iter_next_vma,    /* use curr_vma->vm_next */
>> +	task_vma_iter_find_vma,    /* use find_vma() to find next vma */
>> +};
>> +
>> +static struct vm_area_struct *
>> +task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
>> +{
>> +	struct pid_namespace *ns = info->common.ns;
>> +	enum bpf_task_vma_iter_find_op op;
>> +	struct vm_area_struct *curr_vma;
>> +	struct task_struct *curr_task;
>> +	u32 curr_tid = info->tid;
>> +
>> +	/* If this function returns a non-NULL vma, it holds a reference to
>> +	 * the task_struct, and holds read lock on vma->mm->mmap_lock.
>> +	 * If this function returns NULL, it does not hold any reference or
>> +	 * lock.
>> +	 */
>> +	if (info->task) {
>> +		curr_task = info->task;
>> +		curr_vma = info->vma;
>> +		/* In case of lock contention, drop mmap_lock to unblock
>> +		 * the writer.
>> +		 */
>> +		if (mmap_lock_is_contended(curr_task->mm)) {
>> +			info->prev_vm_start = curr_vma->vm_start;
>> +			info->prev_vm_end = curr_vma->vm_end;
>> +			op = task_vma_iter_find_vma;
>> +			mmap_read_unlock(curr_task->mm);
>> +			if (mmap_read_lock_killable(curr_task->mm))
>> +				goto finish;
> 
> in case of contention the vma will be seen by bpf prog again?
> It looks like the 4 cases of overlaping vmas (after newly acquired lock)
> that show_smaps_rollup() is dealing with are not handled here?

I am not sure I am following here. The logic below should avoid showing 
the same vma again:
 
	curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
	if (curr_vma && (curr_vma->vm_start == info->prev_vm_start))
		curr_vma = curr_vma->vm_next;

This logic handles case 1, 2, 3 same as show_smaps_rollup(). For case 4, 
this logic skips the changed vma (from [prev_vm_start, prev_vm_end] to 
[prev_vm_start, prev_vm_end + something]); while show_smaps_rollup() will
process the new vma.  I think skipping or processing the new vma are both 
correct, as we already processed part of it [prev_vm_start, prev_vm_end] 
once. 

> 
>> +		} else {
>> +			op = task_vma_iter_next_vma;
>> +		}
>> +	} else {
>> +again:
>> +		curr_task = task_seq_get_next(ns, &curr_tid, true);
>> +		if (!curr_task) {
>> +			info->tid = curr_tid + 1;
>> +			goto finish;
>> +		}
>> +
>> +		if (curr_tid != info->tid) {
>> +			info->tid = curr_tid;
>> +			op = task_vma_iter_first_vma;
>> +		} else {
>> +			op = task_vma_iter_find_vma;
> 
> what will happen if there was no contetion on the lock and no seq_stop
> when this line was hit and set op = find_vma; ?
> If I'm reading this correctly prev_vm_start/end could still
> belong to some previous task.

In that case, we should be in "curr_tid != info->tid" path, no? 

> My understanding that if read buffer is big the bpf_seq_read()
> will keep doing while(space in buffer) {seq->op->show(), seq->op->next();}
> and task_vma_seq_get_next() will iterate over all vmas of one task and
> will proceed into the next task, but if there was no contention and no stop
> then prev_vm_end will either be still zero (so find_vma(mm, 0 - 1) will be lucky
> and will go into first vma of the new task) or perf_vm_end is some address
> of some previous task's vma. In this case find_vma may return wrong vma
> for the new task.
> It seems to me prev_vm_end/start should be set by this task_vma_seq_get_next()
> function instead of relying on stop callback.
> 
>> +		}
>> +
>> +		if (!curr_task->mm)
>> +			goto next_task;
>> +
>> +		if (mmap_read_lock_killable(curr_task->mm))
>> +			goto finish;
>> +	}
>> +
>> +	switch (op) {
>> +	case task_vma_iter_first_vma:
>> +		curr_vma = curr_task->mm->mmap;
>> +		break;
>> +	case task_vma_iter_next_vma:
>> +		curr_vma = curr_vma->vm_next;
>> +		break;
>> +	case task_vma_iter_find_vma:
>> +		/* We dropped mmap_lock so it is necessary to use find_vma
>> +		 * to find the next vma. This is similar to the  mechanism
>> +		 * in show_smaps_rollup().
>> +		 */
>> +		curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
>> +
>> +		if (curr_vma && (curr_vma->vm_start == info->prev_vm_start))
>> +			curr_vma = curr_vma->vm_next;
>> +		break;
>> +	}
>> +	if (!curr_vma) {
>> +		mmap_read_unlock(curr_task->mm);
>> +		goto next_task;
>> +	}
>> +	info->task = curr_task;
>> +	info->vma = curr_vma;
>> +	return curr_vma;
>> +
>> +next_task:
>> +	put_task_struct(curr_task);
>> +	info->task = NULL;
>> +	curr_tid++;
>> +	goto again;
>> +
>> +finish:
>> +	if (curr_task)
>> +		put_task_struct(curr_task);
>> +	info->task = NULL;
>> +	info->vma = NULL;
>> +	return NULL;
>> +}

[...]
Alexei Starovoitov Feb. 10, 2021, 3 a.m. UTC | #4
On 2/9/21 2:08 PM, Song Liu wrote:
> 
> 
>> On Feb 9, 2021, at 1:30 PM, Alexei Starovoitov <alexei.starovoitov@gmail.com> wrote:
>>
>> On Mon, Feb 08, 2021 at 02:52:52PM -0800, Song Liu wrote:
>>> Introduce task_vma bpf_iter to print memory information of a process. It
>>> can be used to print customized information similar to /proc/<pid>/maps.
>>>
>>> Current /proc/<pid>/maps and /proc/<pid>/smaps provide information of
>>> vma's of a process. However, these information are not flexible enough to
>>> cover all use cases. For example, if a vma cover mixed 2MB pages and 4kB
>>> pages (x86_64), there is no easy way to tell which address ranges are
>>> backed by 2MB pages. task_vma solves the problem by enabling the user to
>>> generate customize information based on the vma (and vma->vm_mm,
>>> vma->vm_file, etc.).
>>>
>>> To access the vma safely in the BPF program, task_vma iterator holds
>>> target mmap_lock while calling the BPF program. If the mmap_lock is
>>> contended, task_vma unlocks mmap_lock between iterations to unblock the
>>> writer(s). This lock contention avoidance mechanism is similar to the one
>>> used in show_smaps_rollup().
>>>
>>> Signed-off-by: Song Liu <songliubraving@fb.com>
>>> ---
>>> kernel/bpf/task_iter.c | 217 ++++++++++++++++++++++++++++++++++++++++-
>>> 1 file changed, 216 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
>>> index 175b7b42bfc46..a0d469f0f481c 100644
>>> --- a/kernel/bpf/task_iter.c
>>> +++ b/kernel/bpf/task_iter.c
>>> @@ -286,9 +286,198 @@ static const struct seq_operations task_file_seq_ops = {
>>> 	.show	= task_file_seq_show,
>>> };
>>>
>>> +struct bpf_iter_seq_task_vma_info {
>>> +	/* The first field must be struct bpf_iter_seq_task_common.
>>> +	 * this is assumed by {init, fini}_seq_pidns() callback functions.
>>> +	 */
>>> +	struct bpf_iter_seq_task_common common;
>>> +	struct task_struct *task;
>>> +	struct vm_area_struct *vma;
>>> +	u32 tid;
>>> +	unsigned long prev_vm_start;
>>> +	unsigned long prev_vm_end;
>>> +};
>>> +
>>> +enum bpf_task_vma_iter_find_op {
>>> +	task_vma_iter_first_vma,   /* use mm->mmap */
>>> +	task_vma_iter_next_vma,    /* use curr_vma->vm_next */
>>> +	task_vma_iter_find_vma,    /* use find_vma() to find next vma */
>>> +};
>>> +
>>> +static struct vm_area_struct *
>>> +task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
>>> +{
>>> +	struct pid_namespace *ns = info->common.ns;
>>> +	enum bpf_task_vma_iter_find_op op;
>>> +	struct vm_area_struct *curr_vma;
>>> +	struct task_struct *curr_task;
>>> +	u32 curr_tid = info->tid;
>>> +
>>> +	/* If this function returns a non-NULL vma, it holds a reference to
>>> +	 * the task_struct, and holds read lock on vma->mm->mmap_lock.
>>> +	 * If this function returns NULL, it does not hold any reference or
>>> +	 * lock.
>>> +	 */
>>> +	if (info->task) {
>>> +		curr_task = info->task;
>>> +		curr_vma = info->vma;
>>> +		/* In case of lock contention, drop mmap_lock to unblock
>>> +		 * the writer.
>>> +		 */
>>> +		if (mmap_lock_is_contended(curr_task->mm)) {
>>> +			info->prev_vm_start = curr_vma->vm_start;
>>> +			info->prev_vm_end = curr_vma->vm_end;
>>> +			op = task_vma_iter_find_vma;
>>> +			mmap_read_unlock(curr_task->mm);
>>> +			if (mmap_read_lock_killable(curr_task->mm))
>>> +				goto finish;
>>
>> in case of contention the vma will be seen by bpf prog again?
>> It looks like the 4 cases of overlaping vmas (after newly acquired lock)
>> that show_smaps_rollup() is dealing with are not handled here?
> 
> I am not sure I am following here. The logic below should avoid showing
> the same vma again:
>   
> 	curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
> 	if (curr_vma && (curr_vma->vm_start == info->prev_vm_start))
> 		curr_vma = curr_vma->vm_next;
> 
> This logic handles case 1, 2, 3 same as show_smaps_rollup(). For case 4,
> this logic skips the changed vma (from [prev_vm_start, prev_vm_end] to
> [prev_vm_start, prev_vm_end + something]); while show_smaps_rollup() will
> process the new vma.  I think skipping or processing the new vma are both
> correct, as we already processed part of it [prev_vm_start, prev_vm_end]
> once.

Got it. Yeah, if there is a new vma that has extra range after
prem_vm_end while prev_vm_start(s) are the same, arguably,
bpf prog shouldn't process the same range again,
but it will miss the upper part of the range.
In other words there is no equivalent here to 'start'
argument that smap_gather_stats has.
It's fine, but lets document this subtle difference.

>>
>>> +		} else {
>>> +			op = task_vma_iter_next_vma;
>>> +		}
>>> +	} else {
>>> +again:
>>> +		curr_task = task_seq_get_next(ns, &curr_tid, true);
>>> +		if (!curr_task) {
>>> +			info->tid = curr_tid + 1;
>>> +			goto finish;
>>> +		}
>>> +
>>> +		if (curr_tid != info->tid) {
>>> +			info->tid = curr_tid;
>>> +			op = task_vma_iter_first_vma;
>>> +		} else {
>>> +			op = task_vma_iter_find_vma;
>>
>> what will happen if there was no contetion on the lock and no seq_stop
>> when this line was hit and set op = find_vma; ?
>> If I'm reading this correctly prev_vm_start/end could still
>> belong to some previous task.
> 
> In that case, we should be in "curr_tid != info->tid" path, no?
> 
>> My understanding that if read buffer is big the bpf_seq_read()
>> will keep doing while(space in buffer) {seq->op->show(), seq->op->next();}
>> and task_vma_seq_get_next() will iterate over all vmas of one task and
>> will proceed into the next task, but if there was no contention and no stop
>> then prev_vm_end will either be still zero (so find_vma(mm, 0 - 1) will be lucky
>> and will go into first vma of the new task) or perf_vm_end is some address
>> of some previous task's vma. In this case find_vma may return wrong vma
>> for the new task.
>> It seems to me prev_vm_end/start should be set by this task_vma_seq_get_next()
>> function instead of relying on stop callback.

Yeah. I misread where the 'op' goes.
But I think the problem still exists. Consider the loop of
show;next;show;next;...
Here it will be: case first_vma; case next_vma; case next_vma;
Now it goes into new task and 'curr_tid != info->tid',
so it does op = first_vma and info->tid = curr_tid.
But we got unlucky and the process got suspended (with ctrl-z)
and mmap_read_lock_killable returned eintr.
The 'if' below will jump to finish.
It will set info->task = NULL
The process suppose to continue sys_read after resume.
It will come back here to 'again:', but now it will do 'case find_vma'
and will search for wrong prev_vm_end.

Maybe I'm missing something again.
It's hard for me to follow the code.
Could you please add diagrams like show_smaps_rollup() does and
document what happens with all the 'op's.

>>> +		}
>>> +
>>> +		if (!curr_task->mm)
>>> +			goto next_task;
>>> +
>>> +		if (mmap_read_lock_killable(curr_task->mm))
>>> +			goto finish;
>>> +	}
>>> +
>>> +	switch (op) {
>>> +	case task_vma_iter_first_vma:
>>> +		curr_vma = curr_task->mm->mmap;
>>> +		break;
>>> +	case task_vma_iter_next_vma:
>>> +		curr_vma = curr_vma->vm_next;
>>> +		break;
>>> +	case task_vma_iter_find_vma:
>>> +		/* We dropped mmap_lock so it is necessary to use find_vma
>>> +		 * to find the next vma. This is similar to the  mechanism
>>> +		 * in show_smaps_rollup().
>>> +		 */
>>> +		curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
>>> +
>>> +		if (curr_vma && (curr_vma->vm_start == info->prev_vm_start))
>>> +			curr_vma = curr_vma->vm_next;
>>> +		break;
>>> +	}
>>> +	if (!curr_vma) {
>>> +		mmap_read_unlock(curr_task->mm);
>>> +		goto next_task;
>>> +	}
>>> +	info->task = curr_task;
>>> +	info->vma = curr_vma;
>>> +	return curr_vma;
>>> +
>>> +next_task:
>>> +	put_task_struct(curr_task);
>>> +	info->task = NULL;
>>> +	curr_tid++;
>>> +	goto again;
>>> +
>>> +finish:
>>> +	if (curr_task)
>>> +		put_task_struct(curr_task);
>>> +	info->task = NULL;
>>> +	info->vma = NULL;
>>> +	return NULL;
>>> +}
> 
> [...]
>
Song Liu Feb. 10, 2021, 8 a.m. UTC | #5
> On Feb 9, 2021, at 7:00 PM, Alexei Starovoitov <ast@fb.com> wrote:
> 
> On 2/9/21 2:08 PM, Song Liu wrote:
>>> On Feb 9, 2021, at 1:30 PM, Alexei Starovoitov <alexei.starovoitov@gmail.com> wrote:
>>> 
>>> On Mon, Feb 08, 2021 at 02:52:52PM -0800, Song Liu wrote:
>>>> Introduce task_vma bpf_iter to print memory information of a process. It
>>>> can be used to print customized information similar to /proc/<pid>/maps.
>>>> 
>>>> Current /proc/<pid>/maps and /proc/<pid>/smaps provide information of
>>>> vma's of a process. However, these information are not flexible enough to
>>>> cover all use cases. For example, if a vma cover mixed 2MB pages and 4kB
>>>> pages (x86_64), there is no easy way to tell which address ranges are
>>>> backed by 2MB pages. task_vma solves the problem by enabling the user to
>>>> generate customize information based on the vma (and vma->vm_mm,
>>>> vma->vm_file, etc.).
>>>> 
>>>> To access the vma safely in the BPF program, task_vma iterator holds
>>>> target mmap_lock while calling the BPF program. If the mmap_lock is
>>>> contended, task_vma unlocks mmap_lock between iterations to unblock the
>>>> writer(s). This lock contention avoidance mechanism is similar to the one
>>>> used in show_smaps_rollup().
>>>> 
>>>> Signed-off-by: Song Liu <songliubraving@fb.com>
>>>> ---
>>>> kernel/bpf/task_iter.c | 217 ++++++++++++++++++++++++++++++++++++++++-
>>>> 1 file changed, 216 insertions(+), 1 deletion(-)
>>>> 
>>>> diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
>>>> index 175b7b42bfc46..a0d469f0f481c 100644
>>>> --- a/kernel/bpf/task_iter.c
>>>> +++ b/kernel/bpf/task_iter.c
>>>> @@ -286,9 +286,198 @@ static const struct seq_operations task_file_seq_ops = {
>>>> 	.show	= task_file_seq_show,
>>>> };
>>>> 
>>>> +struct bpf_iter_seq_task_vma_info {
>>>> +	/* The first field must be struct bpf_iter_seq_task_common.
>>>> +	 * this is assumed by {init, fini}_seq_pidns() callback functions.
>>>> +	 */
>>>> +	struct bpf_iter_seq_task_common common;
>>>> +	struct task_struct *task;
>>>> +	struct vm_area_struct *vma;
>>>> +	u32 tid;
>>>> +	unsigned long prev_vm_start;
>>>> +	unsigned long prev_vm_end;
>>>> +};
>>>> +
>>>> +enum bpf_task_vma_iter_find_op {
>>>> +	task_vma_iter_first_vma,   /* use mm->mmap */
>>>> +	task_vma_iter_next_vma,    /* use curr_vma->vm_next */
>>>> +	task_vma_iter_find_vma,    /* use find_vma() to find next vma */
>>>> +};
>>>> +
>>>> +static struct vm_area_struct *
>>>> +task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
>>>> +{
>>>> +	struct pid_namespace *ns = info->common.ns;
>>>> +	enum bpf_task_vma_iter_find_op op;
>>>> +	struct vm_area_struct *curr_vma;
>>>> +	struct task_struct *curr_task;
>>>> +	u32 curr_tid = info->tid;
>>>> +
>>>> +	/* If this function returns a non-NULL vma, it holds a reference to
>>>> +	 * the task_struct, and holds read lock on vma->mm->mmap_lock.
>>>> +	 * If this function returns NULL, it does not hold any reference or
>>>> +	 * lock.
>>>> +	 */
>>>> +	if (info->task) {
>>>> +		curr_task = info->task;
>>>> +		curr_vma = info->vma;
>>>> +		/* In case of lock contention, drop mmap_lock to unblock
>>>> +		 * the writer.
>>>> +		 */
>>>> +		if (mmap_lock_is_contended(curr_task->mm)) {
>>>> +			info->prev_vm_start = curr_vma->vm_start;
>>>> +			info->prev_vm_end = curr_vma->vm_end;
>>>> +			op = task_vma_iter_find_vma;
>>>> +			mmap_read_unlock(curr_task->mm);
>>>> +			if (mmap_read_lock_killable(curr_task->mm))
>>>> +				goto finish;
>>> 
>>> in case of contention the vma will be seen by bpf prog again?
>>> It looks like the 4 cases of overlaping vmas (after newly acquired lock)
>>> that show_smaps_rollup() is dealing with are not handled here?
>> I am not sure I am following here. The logic below should avoid showing
>> the same vma again:
>>  	curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
>> 	if (curr_vma && (curr_vma->vm_start == info->prev_vm_start))
>> 		curr_vma = curr_vma->vm_next;
>> This logic handles case 1, 2, 3 same as show_smaps_rollup(). For case 4,
>> this logic skips the changed vma (from [prev_vm_start, prev_vm_end] to
>> [prev_vm_start, prev_vm_end + something]); while show_smaps_rollup() will
>> process the new vma.  I think skipping or processing the new vma are both
>> correct, as we already processed part of it [prev_vm_start, prev_vm_end]
>> once.
> 
> Got it. Yeah, if there is a new vma that has extra range after
> prem_vm_end while prev_vm_start(s) are the same, arguably,
> bpf prog shouldn't process the same range again,
> but it will miss the upper part of the range.
> In other words there is no equivalent here to 'start'
> argument that smap_gather_stats has.
> It's fine, but lets document this subtle difference.

Make sense. I will add information in the comment.  

> 
>>> 
>>>> +		} else {
>>>> +			op = task_vma_iter_next_vma;
>>>> +		}
>>>> +	} else {
>>>> +again:
>>>> +		curr_task = task_seq_get_next(ns, &curr_tid, true);
>>>> +		if (!curr_task) {
>>>> +			info->tid = curr_tid + 1;
>>>> +			goto finish;
>>>> +		}
>>>> +
>>>> +		if (curr_tid != info->tid) {
>>>> +			info->tid = curr_tid;
>>>> +			op = task_vma_iter_first_vma;
>>>> +		} else {
>>>> +			op = task_vma_iter_find_vma;
>>> 
>>> what will happen if there was no contetion on the lock and no seq_stop
>>> when this line was hit and set op = find_vma; ?
>>> If I'm reading this correctly prev_vm_start/end could still
>>> belong to some previous task.
>> In that case, we should be in "curr_tid != info->tid" path, no?
>>> My understanding that if read buffer is big the bpf_seq_read()
>>> will keep doing while(space in buffer) {seq->op->show(), seq->op->next();}
>>> and task_vma_seq_get_next() will iterate over all vmas of one task and
>>> will proceed into the next task, but if there was no contention and no stop
>>> then prev_vm_end will either be still zero (so find_vma(mm, 0 - 1) will be lucky
>>> and will go into first vma of the new task) or perf_vm_end is some address
>>> of some previous task's vma. In this case find_vma may return wrong vma
>>> for the new task.
>>> It seems to me prev_vm_end/start should be set by this task_vma_seq_get_next()
>>> function instead of relying on stop callback.
> 
> Yeah. I misread where the 'op' goes.
> But I think the problem still exists. Consider the loop of
> show;next;show;next;...
> Here it will be: case first_vma; case next_vma; case next_vma;
> Now it goes into new task and 'curr_tid != info->tid',
> so it does op = first_vma and info->tid = curr_tid.
> But we got unlucky and the process got suspended (with ctrl-z)
> and mmap_read_lock_killable returned eintr.
> The 'if' below will jump to finish.
> It will set info->task = NULL
> The process suppose to continue sys_read after resume.
> It will come back here to 'again:', but now it will do 'case find_vma'
> and will search for wrong prev_vm_end.

You are right. We will hit the issue in this case. Let me fix it in 
the next version. 

> 
> Maybe I'm missing something again.
> It's hard for me to follow the code.
> Could you please add diagrams like show_smaps_rollup() does and
> document what happens with all the 'op's.

Will also add more documents here. 

Thanks,
Song
Yonghong Song Feb. 10, 2021, 11:02 p.m. UTC | #6
On 2/9/21 7:00 PM, Alexei Starovoitov wrote:
> On 2/9/21 2:08 PM, Song Liu wrote:
>>
>>
>>> On Feb 9, 2021, at 1:30 PM, Alexei Starovoitov 
>>> <alexei.starovoitov@gmail.com> wrote:
>>>
>>> On Mon, Feb 08, 2021 at 02:52:52PM -0800, Song Liu wrote:
>>>> Introduce task_vma bpf_iter to print memory information of a 
>>>> process. It
>>>> can be used to print customized information similar to 
>>>> /proc/<pid>/maps.
>>>>
>>>> Current /proc/<pid>/maps and /proc/<pid>/smaps provide information of
>>>> vma's of a process. However, these information are not flexible 
>>>> enough to
>>>> cover all use cases. For example, if a vma cover mixed 2MB pages and 
>>>> 4kB
>>>> pages (x86_64), there is no easy way to tell which address ranges are
>>>> backed by 2MB pages. task_vma solves the problem by enabling the 
>>>> user to
>>>> generate customize information based on the vma (and vma->vm_mm,
>>>> vma->vm_file, etc.).
>>>>
>>>> To access the vma safely in the BPF program, task_vma iterator holds
>>>> target mmap_lock while calling the BPF program. If the mmap_lock is
>>>> contended, task_vma unlocks mmap_lock between iterations to unblock the
>>>> writer(s). This lock contention avoidance mechanism is similar to 
>>>> the one
>>>> used in show_smaps_rollup().
>>>>
>>>> Signed-off-by: Song Liu <songliubraving@fb.com>
>>>> ---
>>>> kernel/bpf/task_iter.c | 217 ++++++++++++++++++++++++++++++++++++++++-
>>>> 1 file changed, 216 insertions(+), 1 deletion(-)
>>>>
>>>> diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
>>>> index 175b7b42bfc46..a0d469f0f481c 100644
>>>> --- a/kernel/bpf/task_iter.c
>>>> +++ b/kernel/bpf/task_iter.c
>>>> @@ -286,9 +286,198 @@ static const struct seq_operations 
>>>> task_file_seq_ops = {
>>>>     .show    = task_file_seq_show,
>>>> };
>>>>
>>>> +struct bpf_iter_seq_task_vma_info {
>>>> +    /* The first field must be struct bpf_iter_seq_task_common.
>>>> +     * this is assumed by {init, fini}_seq_pidns() callback functions.
>>>> +     */
>>>> +    struct bpf_iter_seq_task_common common;
>>>> +    struct task_struct *task;
>>>> +    struct vm_area_struct *vma;
>>>> +    u32 tid;
>>>> +    unsigned long prev_vm_start;
>>>> +    unsigned long prev_vm_end;
>>>> +};
>>>> +
>>>> +enum bpf_task_vma_iter_find_op {
>>>> +    task_vma_iter_first_vma,   /* use mm->mmap */
>>>> +    task_vma_iter_next_vma,    /* use curr_vma->vm_next */
>>>> +    task_vma_iter_find_vma,    /* use find_vma() to find next vma */
>>>> +};
>>>> +
>>>> +static struct vm_area_struct *
>>>> +task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
>>>> +{
>>>> +    struct pid_namespace *ns = info->common.ns;
>>>> +    enum bpf_task_vma_iter_find_op op;
>>>> +    struct vm_area_struct *curr_vma;
>>>> +    struct task_struct *curr_task;
>>>> +    u32 curr_tid = info->tid;
>>>> +
>>>> +    /* If this function returns a non-NULL vma, it holds a 
>>>> reference to
>>>> +     * the task_struct, and holds read lock on vma->mm->mmap_lock.
>>>> +     * If this function returns NULL, it does not hold any 
>>>> reference or
>>>> +     * lock.
>>>> +     */
>>>> +    if (info->task) {
>>>> +        curr_task = info->task;
>>>> +        curr_vma = info->vma;
>>>> +        /* In case of lock contention, drop mmap_lock to unblock
>>>> +         * the writer.
>>>> +         */
>>>> +        if (mmap_lock_is_contended(curr_task->mm)) {
>>>> +            info->prev_vm_start = curr_vma->vm_start;
>>>> +            info->prev_vm_end = curr_vma->vm_end;
>>>> +            op = task_vma_iter_find_vma;
>>>> +            mmap_read_unlock(curr_task->mm);
>>>> +            if (mmap_read_lock_killable(curr_task->mm))
>>>> +                goto finish;
>>>
>>> in case of contention the vma will be seen by bpf prog again?
>>> It looks like the 4 cases of overlaping vmas (after newly acquired lock)
>>> that show_smaps_rollup() is dealing with are not handled here?
>>
>> I am not sure I am following here. The logic below should avoid showing
>> the same vma again:
>>     curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
>>     if (curr_vma && (curr_vma->vm_start == info->prev_vm_start))
>>         curr_vma = curr_vma->vm_next;
>>
>> This logic handles case 1, 2, 3 same as show_smaps_rollup(). For case 4,
>> this logic skips the changed vma (from [prev_vm_start, prev_vm_end] to
>> [prev_vm_start, prev_vm_end + something]); while show_smaps_rollup() will
>> process the new vma.  I think skipping or processing the new vma are both
>> correct, as we already processed part of it [prev_vm_start, prev_vm_end]
>> once.
> 
> Got it. Yeah, if there is a new vma that has extra range after
> prem_vm_end while prev_vm_start(s) are the same, arguably,
> bpf prog shouldn't process the same range again,
> but it will miss the upper part of the range.
> In other words there is no equivalent here to 'start'
> argument that smap_gather_stats has.
> It's fine, but lets document this subtle difference.
> 
>>>
>>>> +        } else {
>>>> +            op = task_vma_iter_next_vma;
>>>> +        }
>>>> +    } else {
>>>> +again:
>>>> +        curr_task = task_seq_get_next(ns, &curr_tid, true);
>>>> +        if (!curr_task) {
>>>> +            info->tid = curr_tid + 1;
>>>> +            goto finish;
>>>> +        }
>>>> +
>>>> +        if (curr_tid != info->tid) {
>>>> +            info->tid = curr_tid;
>>>> +            op = task_vma_iter_first_vma;
>>>> +        } else {
>>>> +            op = task_vma_iter_find_vma;
>>>
>>> what will happen if there was no contetion on the lock and no seq_stop
>>> when this line was hit and set op = find_vma; ?
>>> If I'm reading this correctly prev_vm_start/end could still
>>> belong to some previous task.
>>
>> In that case, we should be in "curr_tid != info->tid" path, no?
>>
>>> My understanding that if read buffer is big the bpf_seq_read()
>>> will keep doing while(space in buffer) {seq->op->show(), 
>>> seq->op->next();}
>>> and task_vma_seq_get_next() will iterate over all vmas of one task and
>>> will proceed into the next task, but if there was no contention and 
>>> no stop
>>> then prev_vm_end will either be still zero (so find_vma(mm, 0 - 1) 
>>> will be lucky
>>> and will go into first vma of the new task) or perf_vm_end is some 
>>> address
>>> of some previous task's vma. In this case find_vma may return wrong vma
>>> for the new task.
>>> It seems to me prev_vm_end/start should be set by this 
>>> task_vma_seq_get_next()
>>> function instead of relying on stop callback.
> 
> Yeah. I misread where the 'op' goes.
> But I think the problem still exists. Consider the loop of
> show;next;show;next;...
> Here it will be: case first_vma; case next_vma; case next_vma;
> Now it goes into new task and 'curr_tid != info->tid',
> so it does op = first_vma and info->tid = curr_tid.
> But we got unlucky and the process got suspended (with ctrl-z)
> and mmap_read_lock_killable returned eintr.
> The 'if' below will jump to finish.
> It will set info->task = NULL
> The process suppose to continue sys_read after resume.
> It will come back here to 'again:', but now it will do 'case find_vma'
> and will search for wrong prev_vm_end.

Thanks for catching this. I have discussed with Song about return value
for mmap_read_lock_killable(). We only considered ctrl-c case but
did not realize ctrl-z case :-(

Song, you can return a ERR_PTR(-EAGAIN) here. This ERR_PTR will be
available to your seq_ops->stop() function as well so you can handle
properly there too.

> 
> Maybe I'm missing something again.
> It's hard for me to follow the code.
> Could you please add diagrams like show_smaps_rollup() does and
> document what happens with all the 'op's.
> 
[...]
Song Liu Feb. 12, 2021, 1:41 a.m. UTC | #7
> On Feb 10, 2021, at 3:02 PM, Yonghong Song <yhs@fb.com> wrote:
> 
> 
> 
> On 2/9/21 7:00 PM, Alexei Starovoitov wrote:
>> On 2/9/21 2:08 PM, Song Liu wrote:
>>> 
>>> 
>>>> On Feb 9, 2021, at 1:30 PM, Alexei Starovoitov <alexei.starovoitov@gmail.com> wrote:
>>>> 
>>>> On Mon, Feb 08, 2021 at 02:52:52PM -0800, Song Liu wrote:
>>>>> Introduce task_vma bpf_iter to print memory information of a process. It
>>>>> can be used to print customized information similar to /proc/<pid>/maps.
>>>>> 
>>>>> Current /proc/<pid>/maps and /proc/<pid>/smaps provide information of
>>>>> vma's of a process. However, these information are not flexible enough to
>>>>> cover all use cases. For example, if a vma cover mixed 2MB pages and 4kB
>>>>> pages (x86_64), there is no easy way to tell which address ranges are
>>>>> backed by 2MB pages. task_vma solves the problem by enabling the user to
>>>>> generate customize information based on the vma (and vma->vm_mm,
>>>>> vma->vm_file, etc.).
>>>>> 
>>>>> To access the vma safely in the BPF program, task_vma iterator holds
>>>>> target mmap_lock while calling the BPF program. If the mmap_lock is
>>>>> contended, task_vma unlocks mmap_lock between iterations to unblock the
>>>>> writer(s). This lock contention avoidance mechanism is similar to the one
>>>>> used in show_smaps_rollup().
>>>>> 
>>>>> Signed-off-by: Song Liu <songliubraving@fb.com>
>>>>> ---
>>>>> kernel/bpf/task_iter.c | 217 ++++++++++++++++++++++++++++++++++++++++-
>>>>> 1 file changed, 216 insertions(+), 1 deletion(-)
>>>>> 
>>>>> diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
>>>>> index 175b7b42bfc46..a0d469f0f481c 100644
>>>>> --- a/kernel/bpf/task_iter.c
>>>>> +++ b/kernel/bpf/task_iter.c
>>>>> @@ -286,9 +286,198 @@ static const struct seq_operations task_file_seq_ops = {
>>>>>     .show    = task_file_seq_show,
>>>>> };
>>>>> 
>>>>> +struct bpf_iter_seq_task_vma_info {
>>>>> +    /* The first field must be struct bpf_iter_seq_task_common.
>>>>> +     * this is assumed by {init, fini}_seq_pidns() callback functions.
>>>>> +     */
>>>>> +    struct bpf_iter_seq_task_common common;
>>>>> +    struct task_struct *task;
>>>>> +    struct vm_area_struct *vma;
>>>>> +    u32 tid;
>>>>> +    unsigned long prev_vm_start;
>>>>> +    unsigned long prev_vm_end;
>>>>> +};
>>>>> +
>>>>> +enum bpf_task_vma_iter_find_op {
>>>>> +    task_vma_iter_first_vma,   /* use mm->mmap */
>>>>> +    task_vma_iter_next_vma,    /* use curr_vma->vm_next */
>>>>> +    task_vma_iter_find_vma,    /* use find_vma() to find next vma */
>>>>> +};
>>>>> +
>>>>> +static struct vm_area_struct *
>>>>> +task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
>>>>> +{
>>>>> +    struct pid_namespace *ns = info->common.ns;
>>>>> +    enum bpf_task_vma_iter_find_op op;
>>>>> +    struct vm_area_struct *curr_vma;
>>>>> +    struct task_struct *curr_task;
>>>>> +    u32 curr_tid = info->tid;
>>>>> +
>>>>> +    /* If this function returns a non-NULL vma, it holds a reference to
>>>>> +     * the task_struct, and holds read lock on vma->mm->mmap_lock.
>>>>> +     * If this function returns NULL, it does not hold any reference or
>>>>> +     * lock.
>>>>> +     */
>>>>> +    if (info->task) {
>>>>> +        curr_task = info->task;
>>>>> +        curr_vma = info->vma;
>>>>> +        /* In case of lock contention, drop mmap_lock to unblock
>>>>> +         * the writer.
>>>>> +         */
>>>>> +        if (mmap_lock_is_contended(curr_task->mm)) {
>>>>> +            info->prev_vm_start = curr_vma->vm_start;
>>>>> +            info->prev_vm_end = curr_vma->vm_end;
>>>>> +            op = task_vma_iter_find_vma;
>>>>> +            mmap_read_unlock(curr_task->mm);
>>>>> +            if (mmap_read_lock_killable(curr_task->mm))
>>>>> +                goto finish;
>>>> 
>>>> in case of contention the vma will be seen by bpf prog again?
>>>> It looks like the 4 cases of overlaping vmas (after newly acquired lock)
>>>> that show_smaps_rollup() is dealing with are not handled here?
>>> 
>>> I am not sure I am following here. The logic below should avoid showing
>>> the same vma again:
>>>     curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
>>>     if (curr_vma && (curr_vma->vm_start == info->prev_vm_start))
>>>         curr_vma = curr_vma->vm_next;
>>> 
>>> This logic handles case 1, 2, 3 same as show_smaps_rollup(). For case 4,
>>> this logic skips the changed vma (from [prev_vm_start, prev_vm_end] to
>>> [prev_vm_start, prev_vm_end + something]); while show_smaps_rollup() will
>>> process the new vma.  I think skipping or processing the new vma are both
>>> correct, as we already processed part of it [prev_vm_start, prev_vm_end]
>>> once.
>> Got it. Yeah, if there is a new vma that has extra range after
>> prem_vm_end while prev_vm_start(s) are the same, arguably,
>> bpf prog shouldn't process the same range again,
>> but it will miss the upper part of the range.
>> In other words there is no equivalent here to 'start'
>> argument that smap_gather_stats has.
>> It's fine, but lets document this subtle difference.
>>>> 
>>>>> +        } else {
>>>>> +            op = task_vma_iter_next_vma;
>>>>> +        }
>>>>> +    } else {
>>>>> +again:
>>>>> +        curr_task = task_seq_get_next(ns, &curr_tid, true);
>>>>> +        if (!curr_task) {
>>>>> +            info->tid = curr_tid + 1;
>>>>> +            goto finish;
>>>>> +        }
>>>>> +
>>>>> +        if (curr_tid != info->tid) {
>>>>> +            info->tid = curr_tid;
>>>>> +            op = task_vma_iter_first_vma;
>>>>> +        } else {
>>>>> +            op = task_vma_iter_find_vma;
>>>> 
>>>> what will happen if there was no contetion on the lock and no seq_stop
>>>> when this line was hit and set op = find_vma; ?
>>>> If I'm reading this correctly prev_vm_start/end could still
>>>> belong to some previous task.
>>> 
>>> In that case, we should be in "curr_tid != info->tid" path, no?
>>> 
>>>> My understanding that if read buffer is big the bpf_seq_read()
>>>> will keep doing while(space in buffer) {seq->op->show(), seq->op->next();}
>>>> and task_vma_seq_get_next() will iterate over all vmas of one task and
>>>> will proceed into the next task, but if there was no contention and no stop
>>>> then prev_vm_end will either be still zero (so find_vma(mm, 0 - 1) will be lucky
>>>> and will go into first vma of the new task) or perf_vm_end is some address
>>>> of some previous task's vma. In this case find_vma may return wrong vma
>>>> for the new task.
>>>> It seems to me prev_vm_end/start should be set by this task_vma_seq_get_next()
>>>> function instead of relying on stop callback.
>> Yeah. I misread where the 'op' goes.
>> But I think the problem still exists. Consider the loop of
>> show;next;show;next;...
>> Here it will be: case first_vma; case next_vma; case next_vma;
>> Now it goes into new task and 'curr_tid != info->tid',
>> so it does op = first_vma and info->tid = curr_tid.
>> But we got unlucky and the process got suspended (with ctrl-z)
>> and mmap_read_lock_killable returned eintr.
>> The 'if' below will jump to finish.
>> It will set info->task = NULL
>> The process suppose to continue sys_read after resume.
>> It will come back here to 'again:', but now it will do 'case find_vma'
>> and will search for wrong prev_vm_end.
> 
> Thanks for catching this. I have discussed with Song about return value
> for mmap_read_lock_killable(). We only considered ctrl-c case but
> did not realize ctrl-z case :-(

Actually, we don't need to handle the ctrl-z case. Ctrl-z (or kill -STOP) 
will not cause mmap_read_lock_killable() to return -EINTR. I also confirmed 
this in the experiments. Something like the following will occasionally 
trigger mmap_read_lock_killable() to return -EINTR,

  cat /sys/fs/bpf/task_vma & sleep 0.0001 ; kill $!

while the following (using kill -STOP) will not:

  cat /sys/fs/bpf/task_vma & sleep 0.0001 ; kill -STOP $!

Thanks,
Song

> 
> Song, you can return a ERR_PTR(-EAGAIN) here. This ERR_PTR will be
> available to your seq_ops->stop() function as well so you can handle
> properly there too.
> 
>> Maybe I'm missing something again.
>> It's hard for me to follow the code.
>> Could you please add diagrams like show_smaps_rollup() does and
>> document what happens with all the 'op's.
> [...]
diff mbox series

Patch

diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index 175b7b42bfc46..a0d469f0f481c 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -286,9 +286,198 @@  static const struct seq_operations task_file_seq_ops = {
 	.show	= task_file_seq_show,
 };
 
+struct bpf_iter_seq_task_vma_info {
+	/* The first field must be struct bpf_iter_seq_task_common.
+	 * this is assumed by {init, fini}_seq_pidns() callback functions.
+	 */
+	struct bpf_iter_seq_task_common common;
+	struct task_struct *task;
+	struct vm_area_struct *vma;
+	u32 tid;
+	unsigned long prev_vm_start;
+	unsigned long prev_vm_end;
+};
+
+enum bpf_task_vma_iter_find_op {
+	task_vma_iter_first_vma,   /* use mm->mmap */
+	task_vma_iter_next_vma,    /* use curr_vma->vm_next */
+	task_vma_iter_find_vma,    /* use find_vma() to find next vma */
+};
+
+static struct vm_area_struct *
+task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
+{
+	struct pid_namespace *ns = info->common.ns;
+	enum bpf_task_vma_iter_find_op op;
+	struct vm_area_struct *curr_vma;
+	struct task_struct *curr_task;
+	u32 curr_tid = info->tid;
+
+	/* If this function returns a non-NULL vma, it holds a reference to
+	 * the task_struct, and holds read lock on vma->mm->mmap_lock.
+	 * If this function returns NULL, it does not hold any reference or
+	 * lock.
+	 */
+	if (info->task) {
+		curr_task = info->task;
+		curr_vma = info->vma;
+		/* In case of lock contention, drop mmap_lock to unblock
+		 * the writer.
+		 */
+		if (mmap_lock_is_contended(curr_task->mm)) {
+			info->prev_vm_start = curr_vma->vm_start;
+			info->prev_vm_end = curr_vma->vm_end;
+			op = task_vma_iter_find_vma;
+			mmap_read_unlock(curr_task->mm);
+			if (mmap_read_lock_killable(curr_task->mm))
+				goto finish;
+		} else {
+			op = task_vma_iter_next_vma;
+		}
+	} else {
+again:
+		curr_task = task_seq_get_next(ns, &curr_tid, true);
+		if (!curr_task) {
+			info->tid = curr_tid + 1;
+			goto finish;
+		}
+
+		if (curr_tid != info->tid) {
+			info->tid = curr_tid;
+			op = task_vma_iter_first_vma;
+		} else {
+			op = task_vma_iter_find_vma;
+		}
+
+		if (!curr_task->mm)
+			goto next_task;
+
+		if (mmap_read_lock_killable(curr_task->mm))
+			goto finish;
+	}
+
+	switch (op) {
+	case task_vma_iter_first_vma:
+		curr_vma = curr_task->mm->mmap;
+		break;
+	case task_vma_iter_next_vma:
+		curr_vma = curr_vma->vm_next;
+		break;
+	case task_vma_iter_find_vma:
+		/* We dropped mmap_lock so it is necessary to use find_vma
+		 * to find the next vma. This is similar to the  mechanism
+		 * in show_smaps_rollup().
+		 */
+		curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
+
+		if (curr_vma && (curr_vma->vm_start == info->prev_vm_start))
+			curr_vma = curr_vma->vm_next;
+		break;
+	}
+	if (!curr_vma) {
+		mmap_read_unlock(curr_task->mm);
+		goto next_task;
+	}
+	info->task = curr_task;
+	info->vma = curr_vma;
+	return curr_vma;
+
+next_task:
+	put_task_struct(curr_task);
+	info->task = NULL;
+	curr_tid++;
+	goto again;
+
+finish:
+	if (curr_task)
+		put_task_struct(curr_task);
+	info->task = NULL;
+	info->vma = NULL;
+	return NULL;
+}
+
+static void *task_vma_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct bpf_iter_seq_task_vma_info *info = seq->private;
+	struct vm_area_struct *vma;
+
+	vma = task_vma_seq_get_next(info);
+	if (vma && *pos == 0)
+		++*pos;
+
+	return vma;
+}
+
+static void *task_vma_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct bpf_iter_seq_task_vma_info *info = seq->private;
+
+	++*pos;
+	return task_vma_seq_get_next(info);
+}
+
+struct bpf_iter__task_vma {
+	__bpf_md_ptr(struct bpf_iter_meta *, meta);
+	__bpf_md_ptr(struct task_struct *, task);
+	__bpf_md_ptr(struct vm_area_struct *, vma);
+};
+
+DEFINE_BPF_ITER_FUNC(task_vma, struct bpf_iter_meta *meta,
+		     struct task_struct *task, struct vm_area_struct *vma)
+
+static int __task_vma_seq_show(struct seq_file *seq, bool in_stop)
+{
+	struct bpf_iter_seq_task_vma_info *info = seq->private;
+	struct bpf_iter__task_vma ctx;
+	struct bpf_iter_meta meta;
+	struct bpf_prog *prog;
+
+	meta.seq = seq;
+	prog = bpf_iter_get_info(&meta, in_stop);
+	if (!prog)
+		return 0;
+
+	ctx.meta = &meta;
+	ctx.task = info->task;
+	ctx.vma = info->vma;
+	return bpf_iter_run_prog(prog, &ctx);
+}
+
+static int task_vma_seq_show(struct seq_file *seq, void *v)
+{
+	return __task_vma_seq_show(seq, false);
+}
+
+static void task_vma_seq_stop(struct seq_file *seq, void *v)
+{
+	struct bpf_iter_seq_task_vma_info *info = seq->private;
+
+	if (!v) {
+		(void)__task_vma_seq_show(seq, true);
+	} else {
+		/* Set prev_vm_start to ~0UL, so that we don't skip the
+		 * vma returned by the next find_vma(). Please refer to
+		 * case task_vma_iter_find_vma in task_vma_seq_get_next().
+		 */
+		info->prev_vm_start = ~0UL;
+		info->prev_vm_end = info->vma->vm_end;
+		mmap_read_unlock(info->task->mm);
+		put_task_struct(info->task);
+		info->task = NULL;
+	}
+}
+
+static const struct seq_operations task_vma_seq_ops = {
+	.start	= task_vma_seq_start,
+	.next	= task_vma_seq_next,
+	.stop	= task_vma_seq_stop,
+	.show	= task_vma_seq_show,
+};
+
 BTF_ID_LIST(btf_task_file_ids)
 BTF_ID(struct, task_struct)
 BTF_ID(struct, file)
+BTF_ID(struct, vm_area_struct)
 
 static const struct bpf_iter_seq_info task_seq_info = {
 	.seq_ops		= &task_seq_ops,
@@ -328,6 +517,26 @@  static struct bpf_iter_reg task_file_reg_info = {
 	.seq_info		= &task_file_seq_info,
 };
 
+static const struct bpf_iter_seq_info task_vma_seq_info = {
+	.seq_ops		= &task_vma_seq_ops,
+	.init_seq_private	= init_seq_pidns,
+	.fini_seq_private	= fini_seq_pidns,
+	.seq_priv_size		= sizeof(struct bpf_iter_seq_task_vma_info),
+};
+
+static struct bpf_iter_reg task_vma_reg_info = {
+	.target			= "task_vma",
+	.feature		= BPF_ITER_RESCHED,
+	.ctx_arg_info_size	= 2,
+	.ctx_arg_info		= {
+		{ offsetof(struct bpf_iter__task_vma, task),
+		  PTR_TO_BTF_ID_OR_NULL },
+		{ offsetof(struct bpf_iter__task_vma, vma),
+		  PTR_TO_BTF_ID_OR_NULL },
+	},
+	.seq_info		= &task_vma_seq_info,
+};
+
 static int __init task_iter_init(void)
 {
 	int ret;
@@ -339,6 +548,12 @@  static int __init task_iter_init(void)
 
 	task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
 	task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
-	return bpf_iter_reg_target(&task_file_reg_info);
+	ret =  bpf_iter_reg_target(&task_file_reg_info);
+	if (ret)
+		return ret;
+
+	task_vma_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
+	task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[2];
+	return bpf_iter_reg_target(&task_vma_reg_info);
 }
 late_initcall(task_iter_init);