diff mbox series

[v3,14/16] mm/mmap: Use PHYS_PFN in mmap_region()

Message ID 20240704182718.2653918-15-Liam.Howlett@oracle.com (mailing list archive)
State New
Headers show
Series Avoid MAP_FIXED gap exposure | expand

Commit Message

Liam R. Howlett July 4, 2024, 6:27 p.m. UTC
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Instead of shifting the length by PAGE_SIZE, use PHYS_PFN.  Also use the
existing local variable everywhere instead of some of the time.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
 mm/mmap.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

Comments

Lorenzo Stoakes July 8, 2024, 12:21 p.m. UTC | #1
On Thu, Jul 04, 2024 at 02:27:16PM GMT, Liam R. Howlett wrote:
> From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
>
> Instead of shifting the length by PAGE_SIZE, use PHYS_PFN.  Also use the
> existing local variable everywhere instead of some of the time.
>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> ---
>  mm/mmap.c | 10 +++++-----
>  1 file changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 0c334eeae8cd..b14da6bd257f 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -2935,7 +2935,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
>  	struct mm_struct *mm = current->mm;
>  	struct vm_area_struct *vma = NULL;
>  	struct vm_area_struct *next, *prev, *merge;
> -	pgoff_t pglen = len >> PAGE_SHIFT;
> +	pgoff_t pglen = PHYS_PFN(len);
>  	unsigned long charged = 0;
>  	struct vma_munmap_struct vms;
>  	struct ma_state mas_detach;
> @@ -2955,7 +2955,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
>  	 * MAP_FIXED may remove pages of mappings that intersects with requested
>  	 * mapping. Account for the pages it would unmap.
>  	 */
> -	if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages))
> +	if (!may_expand_vm(mm, vm_flags, pglen - nr_pages))
>  		return -ENOMEM;
>
>  	if (unlikely(!can_modify_mm(mm, addr, end)))
> @@ -2990,7 +2990,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
>  	 * Private writable mapping: check memory availability
>  	 */
>  	if (accountable_mapping(file, vm_flags)) {
> -		charged = len >> PAGE_SHIFT;
> +		charged = pglen;
>  		charged -= nr_accounted;
>  		if (security_vm_enough_memory_mm(mm, charged))
>  			goto abort_munmap;
> @@ -3149,14 +3149,14 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
>  	if (vms.nr_pages)
>  		vms_complete_munmap_vmas(&vms, &mas_detach);
>
> -	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
> +	vm_stat_account(mm, vm_flags, pglen);
>  	if (vm_flags & VM_LOCKED) {
>  		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
>  					is_vm_hugetlb_page(vma) ||
>  					vma == get_gate_vma(current->mm))
>  			vm_flags_clear(vma, VM_LOCKED_MASK);
>  		else
> -			mm->locked_vm += (len >> PAGE_SHIFT);
> +			mm->locked_vm += pglen;
>  	}
>
>  	if (file)
> --
> 2.43.0
>

Maybe I should literally look ahead before making comments :)) thanks for
reading my mind and doing what I asked though! ;)

However I don't think you've fixed the duplication of PHYS_PFN(vm_end -
vm_start) in count_vma_pages_range() - still worth doing I think.

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Liam R. Howlett July 9, 2024, 6:35 p.m. UTC | #2
* Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [240708 08:21]:
> On Thu, Jul 04, 2024 at 02:27:16PM GMT, Liam R. Howlett wrote:
> > From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
> >
> > Instead of shifting the length by PAGE_SIZE, use PHYS_PFN.  Also use the
> > existing local variable everywhere instead of some of the time.
> >
> > Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> > ---
> >  mm/mmap.c | 10 +++++-----
> >  1 file changed, 5 insertions(+), 5 deletions(-)
> >
> > diff --git a/mm/mmap.c b/mm/mmap.c
> > index 0c334eeae8cd..b14da6bd257f 100644
> > --- a/mm/mmap.c
> > +++ b/mm/mmap.c
> > @@ -2935,7 +2935,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
> >  	struct mm_struct *mm = current->mm;
> >  	struct vm_area_struct *vma = NULL;
> >  	struct vm_area_struct *next, *prev, *merge;
> > -	pgoff_t pglen = len >> PAGE_SHIFT;
> > +	pgoff_t pglen = PHYS_PFN(len);
> >  	unsigned long charged = 0;
> >  	struct vma_munmap_struct vms;
> >  	struct ma_state mas_detach;
> > @@ -2955,7 +2955,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
> >  	 * MAP_FIXED may remove pages of mappings that intersects with requested
> >  	 * mapping. Account for the pages it would unmap.
> >  	 */
> > -	if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages))
> > +	if (!may_expand_vm(mm, vm_flags, pglen - nr_pages))
> >  		return -ENOMEM;
> >
> >  	if (unlikely(!can_modify_mm(mm, addr, end)))
> > @@ -2990,7 +2990,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
> >  	 * Private writable mapping: check memory availability
> >  	 */
> >  	if (accountable_mapping(file, vm_flags)) {
> > -		charged = len >> PAGE_SHIFT;
> > +		charged = pglen;
> >  		charged -= nr_accounted;
> >  		if (security_vm_enough_memory_mm(mm, charged))
> >  			goto abort_munmap;
> > @@ -3149,14 +3149,14 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
> >  	if (vms.nr_pages)
> >  		vms_complete_munmap_vmas(&vms, &mas_detach);
> >
> > -	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
> > +	vm_stat_account(mm, vm_flags, pglen);
> >  	if (vm_flags & VM_LOCKED) {
> >  		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
> >  					is_vm_hugetlb_page(vma) ||
> >  					vma == get_gate_vma(current->mm))
> >  			vm_flags_clear(vma, VM_LOCKED_MASK);
> >  		else
> > -			mm->locked_vm += (len >> PAGE_SHIFT);
> > +			mm->locked_vm += pglen;
> >  	}
> >
> >  	if (file)
> > --
> > 2.43.0
> >
> 
> Maybe I should literally look ahead before making comments :)) thanks for
> reading my mind and doing what I asked though! ;)
> 
> However I don't think you've fixed the duplication of PHYS_PFN(vm_end -
> vm_start) in count_vma_pages_range() - still worth doing I think.

I drop that function in the last patch so probably not worth doing.
This is just a few patches before the axe drops.

> 
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Lorenzo Stoakes July 9, 2024, 6:42 p.m. UTC | #3
On Tue, Jul 09, 2024 at 02:35:16PM GMT, Liam R. Howlett wrote:
[snip]

> >
> > Maybe I should literally look ahead before making comments :)) thanks for
> > reading my mind and doing what I asked though! ;)
> >
> > However I don't think you've fixed the duplication of PHYS_PFN(vm_end -
> > vm_start) in count_vma_pages_range() - still worth doing I think.
>
> I drop that function in the last patch so probably not worth doing.
> This is just a few patches before the axe drops.
>

Actually that's a fair point - I think its fine to do without this nit with
that context!

This is the peril of reviewing forwards through the series and being
surprised later when things are addressed in subsequent patches (or become,
ultimately, irrelevant!).

> >
> > Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Suren Baghdasaryan July 10, 2024, 5:32 p.m. UTC | #4
On Mon, Jul 8, 2024 at 5:21 AM Lorenzo Stoakes
<lorenzo.stoakes@oracle.com> wrote:
>
> On Thu, Jul 04, 2024 at 02:27:16PM GMT, Liam R. Howlett wrote:
> > From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
> >
> > Instead of shifting the length by PAGE_SIZE, use PHYS_PFN.  Also use the
> > existing local variable everywhere instead of some of the time.
> >
> > Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> > ---
> >  mm/mmap.c | 10 +++++-----
> >  1 file changed, 5 insertions(+), 5 deletions(-)
> >
> > diff --git a/mm/mmap.c b/mm/mmap.c
> > index 0c334eeae8cd..b14da6bd257f 100644
> > --- a/mm/mmap.c
> > +++ b/mm/mmap.c
> > @@ -2935,7 +2935,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
> >       struct mm_struct *mm = current->mm;
> >       struct vm_area_struct *vma = NULL;
> >       struct vm_area_struct *next, *prev, *merge;
> > -     pgoff_t pglen = len >> PAGE_SHIFT;
> > +     pgoff_t pglen = PHYS_PFN(len);
> >       unsigned long charged = 0;
> >       struct vma_munmap_struct vms;
> >       struct ma_state mas_detach;
> > @@ -2955,7 +2955,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
> >        * MAP_FIXED may remove pages of mappings that intersects with requested
> >        * mapping. Account for the pages it would unmap.
> >        */
> > -     if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages))
> > +     if (!may_expand_vm(mm, vm_flags, pglen - nr_pages))
> >               return -ENOMEM;
> >
> >       if (unlikely(!can_modify_mm(mm, addr, end)))
> > @@ -2990,7 +2990,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
> >        * Private writable mapping: check memory availability
> >        */
> >       if (accountable_mapping(file, vm_flags)) {
> > -             charged = len >> PAGE_SHIFT;
> > +             charged = pglen;
> >               charged -= nr_accounted;
> >               if (security_vm_enough_memory_mm(mm, charged))
> >                       goto abort_munmap;
> > @@ -3149,14 +3149,14 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
> >       if (vms.nr_pages)
> >               vms_complete_munmap_vmas(&vms, &mas_detach);
> >
> > -     vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
> > +     vm_stat_account(mm, vm_flags, pglen);
> >       if (vm_flags & VM_LOCKED) {
> >               if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
> >                                       is_vm_hugetlb_page(vma) ||
> >                                       vma == get_gate_vma(current->mm))
> >                       vm_flags_clear(vma, VM_LOCKED_MASK);
> >               else
> > -                     mm->locked_vm += (len >> PAGE_SHIFT);
> > +                     mm->locked_vm += pglen;
> >       }
> >
> >       if (file)
> > --
> > 2.43.0
> >
>
> Maybe I should literally look ahead before making comments :)) thanks for
> reading my mind and doing what I asked though! ;)
>
> However I don't think you've fixed the duplication of PHYS_PFN(vm_end -
> vm_start) in count_vma_pages_range() - still worth doing I think.
>
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>

Reviewed-by: Suren Baghdasaryan <surenb@google.com>
diff mbox series

Patch

diff --git a/mm/mmap.c b/mm/mmap.c
index 0c334eeae8cd..b14da6bd257f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2935,7 +2935,7 @@  unsigned long mmap_region(struct file *file, unsigned long addr,
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma = NULL;
 	struct vm_area_struct *next, *prev, *merge;
-	pgoff_t pglen = len >> PAGE_SHIFT;
+	pgoff_t pglen = PHYS_PFN(len);
 	unsigned long charged = 0;
 	struct vma_munmap_struct vms;
 	struct ma_state mas_detach;
@@ -2955,7 +2955,7 @@  unsigned long mmap_region(struct file *file, unsigned long addr,
 	 * MAP_FIXED may remove pages of mappings that intersects with requested
 	 * mapping. Account for the pages it would unmap.
 	 */
-	if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages))
+	if (!may_expand_vm(mm, vm_flags, pglen - nr_pages))
 		return -ENOMEM;
 
 	if (unlikely(!can_modify_mm(mm, addr, end)))
@@ -2990,7 +2990,7 @@  unsigned long mmap_region(struct file *file, unsigned long addr,
 	 * Private writable mapping: check memory availability
 	 */
 	if (accountable_mapping(file, vm_flags)) {
-		charged = len >> PAGE_SHIFT;
+		charged = pglen;
 		charged -= nr_accounted;
 		if (security_vm_enough_memory_mm(mm, charged))
 			goto abort_munmap;
@@ -3149,14 +3149,14 @@  unsigned long mmap_region(struct file *file, unsigned long addr,
 	if (vms.nr_pages)
 		vms_complete_munmap_vmas(&vms, &mas_detach);
 
-	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
+	vm_stat_account(mm, vm_flags, pglen);
 	if (vm_flags & VM_LOCKED) {
 		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
 					is_vm_hugetlb_page(vma) ||
 					vma == get_gate_vma(current->mm))
 			vm_flags_clear(vma, VM_LOCKED_MASK);
 		else
-			mm->locked_vm += (len >> PAGE_SHIFT);
+			mm->locked_vm += pglen;
 	}
 
 	if (file)