diff mbox series

[v2,1/3] userfaultfd: use vma_pagesize for all huge page size calculation

Message ID 20190927070032.2129-1-richardw.yang@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series [v2,1/3] userfaultfd: use vma_pagesize for all huge page size calculation | expand

Commit Message

Wei Yang Sept. 27, 2019, 7 a.m. UTC
In function __mcopy_atomic_hugetlb, we use two variables to deal with
huge page size: vma_hpagesize and huge_page_size.

Since they are the same, it is not necessary to use two different
mechanism. This patch makes it consistent by all using vma_hpagesize.

Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
---
 mm/userfaultfd.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

Comments

Andrew Morton Sept. 27, 2019, 10:10 p.m. UTC | #1
On Fri, 27 Sep 2019 15:00:30 +0800 Wei Yang <richardw.yang@linux.intel.com> wrote:

> In function __mcopy_atomic_hugetlb, we use two variables to deal with
> huge page size: vma_hpagesize and huge_page_size.
> 
> Since they are the same, it is not necessary to use two different
> mechanism. This patch makes it consistent by all using vma_hpagesize.
> 
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -262,7 +262,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>  		pte_t dst_pteval;
>  
>  		BUG_ON(dst_addr >= dst_start + len);
> -		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
> +		VM_BUG_ON(dst_addr & (vma_hpagesize - 1));
>  
>  		/*
>  		 * Serialize via hugetlb_fault_mutex
> @@ -273,7 +273,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>  		mutex_lock(&hugetlb_fault_mutex_table[hash]);
>  
>  		err = -ENOMEM;
> -		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
> +		dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
>  		if (!dst_pte) {
>  			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
>  			goto out_unlock;
> @@ -300,7 +300,8 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>  
>  			err = copy_huge_page_from_user(page,
>  						(const void __user *)src_addr,
> -						pages_per_huge_page(h), true);
> +						vma_hpagesize / PAGE_SIZE,
> +						true);
>  			if (unlikely(err)) {
>  				err = -EFAULT;
>  				goto out;

Looks right.

We could go ahead and remove local variable `h', given that
hugetlb_fault_mutex_hash() doesn't actually use its first arg..
Mike Kravetz Sept. 27, 2019, 10:21 p.m. UTC | #2
On 9/27/19 3:10 PM, Andrew Morton wrote:
> On Fri, 27 Sep 2019 15:00:30 +0800 Wei Yang <richardw.yang@linux.intel.com> wrote:
> 
>> In function __mcopy_atomic_hugetlb, we use two variables to deal with
>> huge page size: vma_hpagesize and huge_page_size.
>>
>> Since they are the same, it is not necessary to use two different
>> mechanism. This patch makes it consistent by all using vma_hpagesize.
>>
>> --- a/mm/userfaultfd.c
>> +++ b/mm/userfaultfd.c
>> @@ -262,7 +262,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>  		pte_t dst_pteval;
>>  
>>  		BUG_ON(dst_addr >= dst_start + len);
>> -		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
>> +		VM_BUG_ON(dst_addr & (vma_hpagesize - 1));
>>  
>>  		/*
>>  		 * Serialize via hugetlb_fault_mutex
>> @@ -273,7 +273,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>  		mutex_lock(&hugetlb_fault_mutex_table[hash]);
>>  
>>  		err = -ENOMEM;
>> -		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
>> +		dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
>>  		if (!dst_pte) {
>>  			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
>>  			goto out_unlock;
>> @@ -300,7 +300,8 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>  
>>  			err = copy_huge_page_from_user(page,
>>  						(const void __user *)src_addr,
>> -						pages_per_huge_page(h), true);
>> +						vma_hpagesize / PAGE_SIZE,
>> +						true);
>>  			if (unlikely(err)) {
>>  				err = -EFAULT;
>>  				goto out;
> 
> Looks right.
> 
> We could go ahead and remove local variable `h', given that
> hugetlb_fault_mutex_hash() doesn't actually use its first arg..

Good catch Andrew.  I missed that, but I also wrote the original code that
is being cleaned up. :)

You can add,
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
to the series.
Wei Yang Sept. 29, 2019, 12:45 a.m. UTC | #3
On Fri, Sep 27, 2019 at 03:10:33PM -0700, Andrew Morton wrote:
>On Fri, 27 Sep 2019 15:00:30 +0800 Wei Yang <richardw.yang@linux.intel.com> wrote:
>
>> In function __mcopy_atomic_hugetlb, we use two variables to deal with
>> huge page size: vma_hpagesize and huge_page_size.
>> 
>> Since they are the same, it is not necessary to use two different
>> mechanism. This patch makes it consistent by all using vma_hpagesize.
>> 
>> --- a/mm/userfaultfd.c
>> +++ b/mm/userfaultfd.c
>> @@ -262,7 +262,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>  		pte_t dst_pteval;
>>  
>>  		BUG_ON(dst_addr >= dst_start + len);
>> -		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
>> +		VM_BUG_ON(dst_addr & (vma_hpagesize - 1));
>>  
>>  		/*
>>  		 * Serialize via hugetlb_fault_mutex
>> @@ -273,7 +273,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>  		mutex_lock(&hugetlb_fault_mutex_table[hash]);
>>  
>>  		err = -ENOMEM;
>> -		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
>> +		dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
>>  		if (!dst_pte) {
>>  			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
>>  			goto out_unlock;
>> @@ -300,7 +300,8 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>  
>>  			err = copy_huge_page_from_user(page,
>>  						(const void __user *)src_addr,
>> -						pages_per_huge_page(h), true);
>> +						vma_hpagesize / PAGE_SIZE,
>> +						true);
>>  			if (unlikely(err)) {
>>  				err = -EFAULT;
>>  				goto out;
>
>Looks right.
>
>We could go ahead and remove local variable `h', given that
>hugetlb_fault_mutex_hash() doesn't actually use its first arg..

Oops, haven't imagine h is not used in the function.


Any historical reason to pass h in hugetlb_fault_mutex_hash()? Neither these
two definition use it.
Wei Yang Oct. 5, 2019, 12:34 a.m. UTC | #4
On Fri, Sep 27, 2019 at 03:21:38PM -0700, Mike Kravetz wrote:
>On 9/27/19 3:10 PM, Andrew Morton wrote:
>> On Fri, 27 Sep 2019 15:00:30 +0800 Wei Yang <richardw.yang@linux.intel.com> wrote:
>> 
>>> In function __mcopy_atomic_hugetlb, we use two variables to deal with
>>> huge page size: vma_hpagesize and huge_page_size.
>>>
>>> Since they are the same, it is not necessary to use two different
>>> mechanism. This patch makes it consistent by all using vma_hpagesize.
>>>
>>> --- a/mm/userfaultfd.c
>>> +++ b/mm/userfaultfd.c
>>> @@ -262,7 +262,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>>  		pte_t dst_pteval;
>>>  
>>>  		BUG_ON(dst_addr >= dst_start + len);
>>> -		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
>>> +		VM_BUG_ON(dst_addr & (vma_hpagesize - 1));
>>>  
>>>  		/*
>>>  		 * Serialize via hugetlb_fault_mutex
>>> @@ -273,7 +273,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>>  		mutex_lock(&hugetlb_fault_mutex_table[hash]);
>>>  
>>>  		err = -ENOMEM;
>>> -		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
>>> +		dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
>>>  		if (!dst_pte) {
>>>  			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
>>>  			goto out_unlock;
>>> @@ -300,7 +300,8 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>>  
>>>  			err = copy_huge_page_from_user(page,
>>>  						(const void __user *)src_addr,
>>> -						pages_per_huge_page(h), true);
>>> +						vma_hpagesize / PAGE_SIZE,
>>> +						true);
>>>  			if (unlikely(err)) {
>>>  				err = -EFAULT;
>>>  				goto out;
>> 
>> Looks right.
>> 
>> We could go ahead and remove local variable `h', given that
>> hugetlb_fault_mutex_hash() doesn't actually use its first arg..
>
>Good catch Andrew.  I missed that, but I also wrote the original code that
>is being cleaned up. :)
>

I did a cleanup to remove the first parameter of function
hugetlb_fault_mutex_hash(). Look forward your comment.

>You can add,
>Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
>to the series.
>-- 
>Mike Kravetz
Mike Kravetz Oct. 7, 2019, 10:55 p.m. UTC | #5
On 9/28/19 5:45 PM, Wei Yang wrote:
> On Fri, Sep 27, 2019 at 03:10:33PM -0700, Andrew Morton wrote:
>> On Fri, 27 Sep 2019 15:00:30 +0800 Wei Yang <richardw.yang@linux.intel.com> wrote:
>>
>>> In function __mcopy_atomic_hugetlb, we use two variables to deal with
>>> huge page size: vma_hpagesize and huge_page_size.
>>>
>>> Since they are the same, it is not necessary to use two different
>>> mechanism. This patch makes it consistent by all using vma_hpagesize.
>>>
>>> --- a/mm/userfaultfd.c
>>> +++ b/mm/userfaultfd.c
>>> @@ -262,7 +262,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>>  		pte_t dst_pteval;
>>>  
>>>  		BUG_ON(dst_addr >= dst_start + len);
>>> -		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
>>> +		VM_BUG_ON(dst_addr & (vma_hpagesize - 1));
>>>  
>>>  		/*
>>>  		 * Serialize via hugetlb_fault_mutex
>>> @@ -273,7 +273,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>>  		mutex_lock(&hugetlb_fault_mutex_table[hash]);
>>>  
>>>  		err = -ENOMEM;
>>> -		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
>>> +		dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
>>>  		if (!dst_pte) {
>>>  			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
>>>  			goto out_unlock;
>>> @@ -300,7 +300,8 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>>  
>>>  			err = copy_huge_page_from_user(page,
>>>  						(const void __user *)src_addr,
>>> -						pages_per_huge_page(h), true);
>>> +						vma_hpagesize / PAGE_SIZE,
>>> +						true);
>>>  			if (unlikely(err)) {
>>>  				err = -EFAULT;
>>>  				goto out;
>>
>> Looks right.
>>
>> We could go ahead and remove local variable `h', given that
>> hugetlb_fault_mutex_hash() doesn't actually use its first arg..
> 
> Oops, haven't imagine h is not used in the function.
> 
> 
> Any historical reason to pass h in hugetlb_fault_mutex_hash()? Neither these
> two definition use it.

See 1b426bac66e6 ("hugetlb: use same fault hash key for shared and private
mappings").  Prior to that change, the hash key for private mappings was
created by:

	key[0] = (unsigned long) mm;
	key[1] = address >> huge_page_shift(h);

When removing that code, I should have removed 'h'.
Wei Yang Oct. 8, 2019, 12:57 a.m. UTC | #6
On Mon, Oct 07, 2019 at 03:55:21PM -0700, Mike Kravetz wrote:
>On 9/28/19 5:45 PM, Wei Yang wrote:
>> On Fri, Sep 27, 2019 at 03:10:33PM -0700, Andrew Morton wrote:
>>> On Fri, 27 Sep 2019 15:00:30 +0800 Wei Yang <richardw.yang@linux.intel.com> wrote:
>>>
>>>> In function __mcopy_atomic_hugetlb, we use two variables to deal with
>>>> huge page size: vma_hpagesize and huge_page_size.
>>>>
>>>> Since they are the same, it is not necessary to use two different
>>>> mechanism. This patch makes it consistent by all using vma_hpagesize.
>>>>
>>>> --- a/mm/userfaultfd.c
>>>> +++ b/mm/userfaultfd.c
>>>> @@ -262,7 +262,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>>>  		pte_t dst_pteval;
>>>>  
>>>>  		BUG_ON(dst_addr >= dst_start + len);
>>>> -		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
>>>> +		VM_BUG_ON(dst_addr & (vma_hpagesize - 1));
>>>>  
>>>>  		/*
>>>>  		 * Serialize via hugetlb_fault_mutex
>>>> @@ -273,7 +273,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>>>  		mutex_lock(&hugetlb_fault_mutex_table[hash]);
>>>>  
>>>>  		err = -ENOMEM;
>>>> -		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
>>>> +		dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
>>>>  		if (!dst_pte) {
>>>>  			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
>>>>  			goto out_unlock;
>>>> @@ -300,7 +300,8 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
>>>>  
>>>>  			err = copy_huge_page_from_user(page,
>>>>  						(const void __user *)src_addr,
>>>> -						pages_per_huge_page(h), true);
>>>> +						vma_hpagesize / PAGE_SIZE,
>>>> +						true);
>>>>  			if (unlikely(err)) {
>>>>  				err = -EFAULT;
>>>>  				goto out;
>>>
>>> Looks right.
>>>
>>> We could go ahead and remove local variable `h', given that
>>> hugetlb_fault_mutex_hash() doesn't actually use its first arg..
>> 
>> Oops, haven't imagine h is not used in the function.
>> 
>> 
>> Any historical reason to pass h in hugetlb_fault_mutex_hash()? Neither these
>> two definition use it.
>
>See 1b426bac66e6 ("hugetlb: use same fault hash key for shared and private
>mappings").  Prior to that change, the hash key for private mappings was
>created by:
>
>	key[0] = (unsigned long) mm;
>	key[1] = address >> huge_page_shift(h);
>
>When removing that code, I should have removed 'h'.

Thanks for this information.

>-- 
>Mike Kravetz
diff mbox series

Patch

diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index a998c1b4d8a1..01ad48621bb7 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -262,7 +262,7 @@  static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
 		pte_t dst_pteval;
 
 		BUG_ON(dst_addr >= dst_start + len);
-		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
+		VM_BUG_ON(dst_addr & (vma_hpagesize - 1));
 
 		/*
 		 * Serialize via hugetlb_fault_mutex
@@ -273,7 +273,7 @@  static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
 		err = -ENOMEM;
-		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
+		dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
 		if (!dst_pte) {
 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 			goto out_unlock;
@@ -300,7 +300,8 @@  static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
 
 			err = copy_huge_page_from_user(page,
 						(const void __user *)src_addr,
-						pages_per_huge_page(h), true);
+						vma_hpagesize / PAGE_SIZE,
+						true);
 			if (unlikely(err)) {
 				err = -EFAULT;
 				goto out;