diff mbox series

fs/proc/task_mmu: move mmu notification mechanism inside mm lock

Message ID 20240109112445.590736-1-usama.anjum@collabora.com (mailing list archive)
State New
Headers show
Series fs/proc/task_mmu: move mmu notification mechanism inside mm lock | expand

Commit Message

Muhammad Usama Anjum Jan. 9, 2024, 11:24 a.m. UTC
Move mmu notification mechanism inside mm lock to prevent race condition
in other components which depend on it. The notifier will invalidate
memory range. Depending upon the number of iterations, different memory
ranges would be invalidated.

The following warning would be removed by this patch:
WARNING: CPU: 0 PID: 5067 at arch/x86/kvm/../../../virt/kvm/kvm_main.c:734 kvm_mmu_notifier_change_pte+0x860/0x960 arch/x86/kvm/../../../virt/kvm/kvm_main.c:734

There is no behavioural and performance change with this patch when
there is no component registered with the mmu notifier.

Fixes: 52526ca7fdb9 ("fs/proc/task_mmu: implement IOCTL to get and optionally clear info about PTEs")
Reported-by: syzbot+81227d2bd69e9dedb802@syzkaller.appspotmail.com
Link: https://lore.kernel.org/all/000000000000f6d051060c6785bc@google.com/
Cc: Sean Christopherson <seanjc@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
---
 fs/proc/task_mmu.c | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

Comments

Sean Christopherson Jan. 9, 2024, 4:28 p.m. UTC | #1
On Tue, Jan 09, 2024, Muhammad Usama Anjum wrote:
> Move mmu notification mechanism inside mm lock to prevent race condition
> in other components which depend on it. The notifier will invalidate
> memory range. Depending upon the number of iterations, different memory
> ranges would be invalidated.
> 
> The following warning would be removed by this patch:
> WARNING: CPU: 0 PID: 5067 at arch/x86/kvm/../../../virt/kvm/kvm_main.c:734 kvm_mmu_notifier_change_pte+0x860/0x960 arch/x86/kvm/../../../virt/kvm/kvm_main.c:734
> 
> There is no behavioural and performance change with this patch when
> there is no component registered with the mmu notifier.
> 
> Fixes: 52526ca7fdb9 ("fs/proc/task_mmu: implement IOCTL to get and optionally clear info about PTEs")
> Reported-by: syzbot+81227d2bd69e9dedb802@syzkaller.appspotmail.com
> Link: https://lore.kernel.org/all/000000000000f6d051060c6785bc@google.com/
> Cc: Sean Christopherson <seanjc@google.com>
> Cc: stable@vger.kernel.org
> Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
> ---
>  fs/proc/task_mmu.c | 22 ++++++++++++----------
>  1 file changed, 12 insertions(+), 10 deletions(-)
> 
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 62b16f42d5d2..56c2e7357494 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -2448,13 +2448,6 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
>  	if (ret)
>  		return ret;
>  
> -	/* Protection change for the range is going to happen. */
> -	if (p.arg.flags & PM_SCAN_WP_MATCHING) {
> -		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
> -					mm, p.arg.start, p.arg.end);
> -		mmu_notifier_invalidate_range_start(&range);
> -	}
> -
>  	for (walk_start = p.arg.start; walk_start < p.arg.end;
>  			walk_start = p.arg.walk_end) {
>  		long n_out;

Nit, might be worth moving

		struct mmu_notifier_range range;

inside the loop to guard against stale usage, but that's definitely optional.

Reviewed-by: Sean Christopherson <seanjc@google.com>
Andrew Morton Jan. 10, 2024, 4:43 a.m. UTC | #2
On Tue, 9 Jan 2024 08:28:06 -0800 Sean Christopherson <seanjc@google.com> wrote:

> > -	/* Protection change for the range is going to happen. */
> > -	if (p.arg.flags & PM_SCAN_WP_MATCHING) {
> > -		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
> > -					mm, p.arg.start, p.arg.end);
> > -		mmu_notifier_invalidate_range_start(&range);
> > -	}
> > -
> >  	for (walk_start = p.arg.start; walk_start < p.arg.end;
> >  			walk_start = p.arg.walk_end) {
> >  		long n_out;
> 
> Nit, might be worth moving
> 
> 		struct mmu_notifier_range range;
> 
> inside the loop to guard against stale usage, but that's definitely optional.

Yes, I think that's nicer.

--- a/fs/proc/task_mmu.c~fs-proc-task_mmu-move-mmu-notification-mechanism-inside-mm-lock-fix
+++ a/fs/proc/task_mmu.c
@@ -2432,7 +2432,6 @@ static long pagemap_scan_flush_buffer(st
 
 static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
 {
-	struct mmu_notifier_range range;
 	struct pagemap_scan_private p = {0};
 	unsigned long walk_start;
 	size_t n_ranges_out = 0;
@@ -2450,6 +2449,7 @@ static long do_pagemap_scan(struct mm_st
 
 	for (walk_start = p.arg.start; walk_start < p.arg.end;
 			walk_start = p.arg.walk_end) {
+		struct mmu_notifier_range range;
 		long n_out;
 
 		if (fatal_signal_pending(current)) {
diff mbox series

Patch

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 62b16f42d5d2..56c2e7357494 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -2448,13 +2448,6 @@  static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
 	if (ret)
 		return ret;
 
-	/* Protection change for the range is going to happen. */
-	if (p.arg.flags & PM_SCAN_WP_MATCHING) {
-		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
-					mm, p.arg.start, p.arg.end);
-		mmu_notifier_invalidate_range_start(&range);
-	}
-
 	for (walk_start = p.arg.start; walk_start < p.arg.end;
 			walk_start = p.arg.walk_end) {
 		long n_out;
@@ -2467,8 +2460,20 @@  static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
 		ret = mmap_read_lock_killable(mm);
 		if (ret)
 			break;
+
+		/* Protection change for the range is going to happen. */
+		if (p.arg.flags & PM_SCAN_WP_MATCHING) {
+			mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
+						mm, walk_start, p.arg.end);
+			mmu_notifier_invalidate_range_start(&range);
+		}
+
 		ret = walk_page_range(mm, walk_start, p.arg.end,
 				      &pagemap_scan_ops, &p);
+
+		if (p.arg.flags & PM_SCAN_WP_MATCHING)
+			mmu_notifier_invalidate_range_end(&range);
+
 		mmap_read_unlock(mm);
 
 		n_out = pagemap_scan_flush_buffer(&p);
@@ -2494,9 +2499,6 @@  static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
 	if (pagemap_scan_writeback_args(&p.arg, uarg))
 		ret = -EFAULT;
 
-	if (p.arg.flags & PM_SCAN_WP_MATCHING)
-		mmu_notifier_invalidate_range_end(&range);
-
 	kfree(p.vec_buf);
 	return ret;
 }