diff mbox series

arm: use mmap_write_(un)lock for copy_to_user

Message ID 20200926192854.22164-1-chunkeey@gmail.com (mailing list archive)
State New, archived
Headers show
Series arm: use mmap_write_(un)lock for copy_to_user | expand

Commit Message

Christian Lamparter Sept. 26, 2020, 7:28 p.m. UTC
changes ARM's copy_to_user to use mmap_*write*_lock
variants. This is because the data is written to
user-space and not read.

Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
 arch/arm/lib/uaccess_with_memcpy.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

Comments

Mike Rapoport Sept. 29, 2020, 9:26 a.m. UTC | #1
On Sat, Sep 26, 2020 at 09:28:54PM +0200, Christian Lamparter wrote:
> changes ARM's copy_to_user to use mmap_*write*_lock
> variants. This is because the data is written to
> user-space and not read.

The mmap lock protects internals of 'struct mm_struct' and they do not
change when the data is copied regardless of its direction.

> Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
> ---
>  arch/arm/lib/uaccess_with_memcpy.c | 8 ++++----
>  1 file changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
> index 106f83a5ea6d..7491c13fdf0e 100644
> --- a/arch/arm/lib/uaccess_with_memcpy.c
> +++ b/arch/arm/lib/uaccess_with_memcpy.c
> @@ -101,7 +101,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
>  	atomic = faulthandler_disabled();
>  
>  	if (!atomic)
> -		mmap_read_lock(current->mm);
> +		mmap_write_lock(current->mm);
>  	while (n) {
>  		pte_t *pte;
>  		spinlock_t *ptl;
> @@ -109,11 +109,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
>  
>  		while (!pin_page_for_write(to, &pte, &ptl)) {
>  			if (!atomic)
> -				mmap_read_unlock(current->mm);
> +				mmap_write_unlock(current->mm);
>  			if (__put_user(0, (char __user *)to))
>  				goto out;
>  			if (!atomic)
> -				mmap_read_lock(current->mm);
> +				mmap_write_lock(current->mm);
>  		}
>  
>  		tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
> @@ -133,7 +133,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
>  			spin_unlock(ptl);
>  	}
>  	if (!atomic)
> -		mmap_read_unlock(current->mm);
> +		mmap_write_unlock(current->mm);
>  
>  out:
>  	return n;
> -- 
> 2.28.0
>
Russell King (Oracle) Sept. 29, 2020, 9:31 a.m. UTC | #2
On Sat, Sep 26, 2020 at 09:28:54PM +0200, Christian Lamparter wrote:
> changes ARM's copy_to_user to use mmap_*write*_lock
> variants. This is because the data is written to
> user-space and not read.

The "read" lock is there to ensure that the page tables are not changed
(e.g. due to a page fault in another thread) while we are making changes
to the page. It is a "read" lock because we can tolerate other threads
reading the page tables and mm structures, but not making changes to
those structures.

This has nothing to do with whether we are reading or writing userspace.

Therefore, your patch is incorrect.

What problem are you seeing?

> 
> Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
> ---
>  arch/arm/lib/uaccess_with_memcpy.c | 8 ++++----
>  1 file changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
> index 106f83a5ea6d..7491c13fdf0e 100644
> --- a/arch/arm/lib/uaccess_with_memcpy.c
> +++ b/arch/arm/lib/uaccess_with_memcpy.c
> @@ -101,7 +101,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
>  	atomic = faulthandler_disabled();
>  
>  	if (!atomic)
> -		mmap_read_lock(current->mm);
> +		mmap_write_lock(current->mm);
>  	while (n) {
>  		pte_t *pte;
>  		spinlock_t *ptl;
> @@ -109,11 +109,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
>  
>  		while (!pin_page_for_write(to, &pte, &ptl)) {
>  			if (!atomic)
> -				mmap_read_unlock(current->mm);
> +				mmap_write_unlock(current->mm);
>  			if (__put_user(0, (char __user *)to))
>  				goto out;
>  			if (!atomic)
> -				mmap_read_lock(current->mm);
> +				mmap_write_lock(current->mm);
>  		}
>  
>  		tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
> @@ -133,7 +133,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
>  			spin_unlock(ptl);
>  	}
>  	if (!atomic)
> -		mmap_read_unlock(current->mm);
> +		mmap_write_unlock(current->mm);
>  
>  out:
>  	return n;
> -- 
> 2.28.0
> 
>
Christian Lamparter Sept. 29, 2020, 6:56 p.m. UTC | #3
Hello,

On Tue, Sep 29, 2020 at 11:32 AM Russell King - ARM Linux admin
<linux@armlinux.org.uk> wrote:
>
> On Sat, Sep 26, 2020 at 09:28:54PM +0200, Christian Lamparter wrote:
> > changes ARM's copy_to_user to use mmap_*write*_lock
> > variants. This is because the data is written to
> > user-space and not read.
>
> The "read" lock is there to ensure that the page tables are not changed
> (e.g. due to a page fault in another thread) while we are making changes
> to the page. It is a "read" lock because we can tolerate other threads
> reading the page tables and mm structures, but not making changes to
> those structures.
>
> This has nothing to do with whether we are reading or writing userspace.
>
> Therefore, your patch is incorrect.

I was looking at ARM's copy_to_user, because a faulty out-of-tree RPI
patch that mixed read and write locks and this got me confused. Thanks
to your excellent explanation, I now know as well that this patch is incorrect.

Cheers,
Christian
diff mbox series

Patch

diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 106f83a5ea6d..7491c13fdf0e 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -101,7 +101,7 @@  __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 	atomic = faulthandler_disabled();
 
 	if (!atomic)
-		mmap_read_lock(current->mm);
+		mmap_write_lock(current->mm);
 	while (n) {
 		pte_t *pte;
 		spinlock_t *ptl;
@@ -109,11 +109,11 @@  __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 
 		while (!pin_page_for_write(to, &pte, &ptl)) {
 			if (!atomic)
-				mmap_read_unlock(current->mm);
+				mmap_write_unlock(current->mm);
 			if (__put_user(0, (char __user *)to))
 				goto out;
 			if (!atomic)
-				mmap_read_lock(current->mm);
+				mmap_write_lock(current->mm);
 		}
 
 		tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
@@ -133,7 +133,7 @@  __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 			spin_unlock(ptl);
 	}
 	if (!atomic)
-		mmap_read_unlock(current->mm);
+		mmap_write_unlock(current->mm);
 
 out:
 	return n;