Message ID | 20240704182718.2653918-13-Liam.Howlett@oracle.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Avoid MAP_FIXED gap exposure | expand |
On Thu, Jul 04, 2024 at 02:27:14PM GMT, Liam R. Howlett wrote: > From: "Liam R. Howlett" <Liam.Howlett@Oracle.com> > > With the only caller to unmap_region() being the error path of > mmap_region(), the argument list can be significantly reduced. > > There is also no need to forward declare the static function any > longer. > > Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> > --- > mm/mmap.c | 25 +++++++++---------------- > 1 file changed, 9 insertions(+), 16 deletions(-) > > diff --git a/mm/mmap.c b/mm/mmap.c > index 45443a53be76..5d458c5f080e 100644 > --- a/mm/mmap.c > +++ b/mm/mmap.c > @@ -76,11 +76,6 @@ int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; > static bool ignore_rlimit_data; > core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); > > -static void unmap_region(struct mm_struct *mm, struct ma_state *mas, > - struct vm_area_struct *vma, struct vm_area_struct *prev, > - struct vm_area_struct *next, unsigned long start, > - unsigned long end, unsigned long tree_end, bool mm_wr_locked); > - > static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) > { > return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); > @@ -2398,22 +2393,21 @@ struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) > * > * Called with the mm semaphore held. > */ > -static void unmap_region(struct mm_struct *mm, struct ma_state *mas, > - struct vm_area_struct *vma, struct vm_area_struct *prev, > - struct vm_area_struct *next, unsigned long start, > - unsigned long end, unsigned long tree_end, bool mm_wr_locked) > +static void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, > + struct vm_area_struct *prev, struct vm_area_struct *next) > { > + struct mm_struct *mm = vma->vm_mm; > struct mmu_gather tlb; > - unsigned long mt_start = mas->index; > > lru_add_drain(); > tlb_gather_mmu(&tlb, mm); > update_hiwater_rss(mm); > - unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked); > - mas_set(mas, mt_start); OK so the 'raise something in one patch only for it to be removed in the next' pattern continues :) more aesthetically plasing. > + unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, > + /* mm_wr_locked = */ true); > + mas_set(mas, vma->vm_end); > free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, > - next ? next->vm_start : USER_PGTABLES_CEILING, > - mm_wr_locked); > + next ? next->vm_start : USER_PGTABLES_CEILING, > + /* mm_wr_locked = */ true); > tlb_finish_mmu(&tlb); > } > > @@ -3186,8 +3180,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, > > vma_iter_set(&vmi, vma->vm_end); > /* Undo any partial mapping done by a device driver. */ > - unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start, > - vma->vm_end, vma->vm_end, true); > + unmap_region(&vmi.mas, vma, prev, next); > } > if (writable_file_mapping) > mapping_unmap_writable(file->f_mapping); > -- > 2.43.0 > Lovely reduction in parameters. Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
diff --git a/mm/mmap.c b/mm/mmap.c index 45443a53be76..5d458c5f080e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -76,11 +76,6 @@ int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; static bool ignore_rlimit_data; core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); -static void unmap_region(struct mm_struct *mm, struct ma_state *mas, - struct vm_area_struct *vma, struct vm_area_struct *prev, - struct vm_area_struct *next, unsigned long start, - unsigned long end, unsigned long tree_end, bool mm_wr_locked); - static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) { return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); @@ -2398,22 +2393,21 @@ struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) * * Called with the mm semaphore held. */ -static void unmap_region(struct mm_struct *mm, struct ma_state *mas, - struct vm_area_struct *vma, struct vm_area_struct *prev, - struct vm_area_struct *next, unsigned long start, - unsigned long end, unsigned long tree_end, bool mm_wr_locked) +static void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, + struct vm_area_struct *prev, struct vm_area_struct *next) { + struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; - unsigned long mt_start = mas->index; lru_add_drain(); tlb_gather_mmu(&tlb, mm); update_hiwater_rss(mm); - unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked); - mas_set(mas, mt_start); + unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, + /* mm_wr_locked = */ true); + mas_set(mas, vma->vm_end); free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, - next ? next->vm_start : USER_PGTABLES_CEILING, - mm_wr_locked); + next ? next->vm_start : USER_PGTABLES_CEILING, + /* mm_wr_locked = */ true); tlb_finish_mmu(&tlb); } @@ -3186,8 +3180,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma_iter_set(&vmi, vma->vm_end); /* Undo any partial mapping done by a device driver. */ - unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start, - vma->vm_end, vma->vm_end, true); + unmap_region(&vmi.mas, vma, prev, next); } if (writable_file_mapping) mapping_unmap_writable(file->f_mapping);