Message ID | 20230125083851.27759-3-surenb@google.com (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | introduce vm_flags modifier functions | expand |
On Wed 25-01-23 00:38:47, Suren Baghdasaryan wrote: > To simplify the usage of VM_LOCKED_CLEAR_MASK in clear_vm_flags(), > replace it with VM_LOCKED_MASK bitmask and convert all users. > > Signed-off-by: Suren Baghdasaryan <surenb@google.com> Acked-by: Michal Hocko <mhocko@suse.com> > --- > include/linux/mm.h | 4 ++-- > kernel/fork.c | 2 +- > mm/hugetlb.c | 4 ++-- > mm/mlock.c | 6 +++--- > mm/mmap.c | 6 +++--- > mm/mremap.c | 2 +- > 6 files changed, 12 insertions(+), 12 deletions(-) > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index b71f2809caac..da62bdd627bf 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -421,8 +421,8 @@ extern unsigned int kobjsize(const void *objp); > /* This mask defines which mm->def_flags a process can inherit its parent */ > #define VM_INIT_DEF_MASK VM_NOHUGEPAGE > > -/* This mask is used to clear all the VMA flags used by mlock */ > -#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) > +/* This mask represents all the VMA flag bits used by mlock */ > +#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) > > /* Arch-specific flags to clear when updating VM flags on protection change */ > #ifndef VM_ARCH_CLEAR > diff --git a/kernel/fork.c b/kernel/fork.c > index 6683c1b0f460..03d472051236 100644 > --- a/kernel/fork.c > +++ b/kernel/fork.c > @@ -669,7 +669,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, > tmp->anon_vma = NULL; > } else if (anon_vma_fork(tmp, mpnt)) > goto fail_nomem_anon_vma_fork; > - tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); > + clear_vm_flags(tmp, VM_LOCKED_MASK); > file = tmp->vm_file; > if (file) { > struct address_space *mapping = file->f_mapping; > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index d20c8b09890e..4ecdbad9a451 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -6973,8 +6973,8 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma, > unsigned long s_end = sbase + PUD_SIZE; > > /* Allow segments to share if only one is marked locked */ > - unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; > - unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; > + unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; > + unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; > > /* > * match the virtual addresses, permission and the alignment of the > diff --git a/mm/mlock.c b/mm/mlock.c > index 0336f52e03d7..5c4fff93cd6b 100644 > --- a/mm/mlock.c > +++ b/mm/mlock.c > @@ -497,7 +497,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len, > if (vma->vm_start != tmp) > return -ENOMEM; > > - newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; > + newflags = vma->vm_flags & ~VM_LOCKED_MASK; > newflags |= flags; > /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ > tmp = vma->vm_end; > @@ -661,7 +661,7 @@ static int apply_mlockall_flags(int flags) > struct vm_area_struct *vma, *prev = NULL; > vm_flags_t to_add = 0; > > - current->mm->def_flags &= VM_LOCKED_CLEAR_MASK; > + current->mm->def_flags &= ~VM_LOCKED_MASK; > if (flags & MCL_FUTURE) { > current->mm->def_flags |= VM_LOCKED; > > @@ -681,7 +681,7 @@ static int apply_mlockall_flags(int flags) > for_each_vma(vmi, vma) { > vm_flags_t newflags; > > - newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; > + newflags = vma->vm_flags & ~VM_LOCKED_MASK; > newflags |= to_add; > > /* Ignore errors */ > diff --git a/mm/mmap.c b/mm/mmap.c > index d4abc6feced1..323bd253b25a 100644 > --- a/mm/mmap.c > +++ b/mm/mmap.c > @@ -2671,7 +2671,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, > if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || > is_vm_hugetlb_page(vma) || > vma == get_gate_vma(current->mm)) > - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; > + clear_vm_flags(vma, VM_LOCKED_MASK); > else > mm->locked_vm += (len >> PAGE_SHIFT); > } > @@ -3340,8 +3340,8 @@ static struct vm_area_struct *__install_special_mapping( > vma->vm_start = addr; > vma->vm_end = addr + len; > > - vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; > - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; > + init_vm_flags(vma, (vm_flags | mm->def_flags | > + VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); > vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); > > vma->vm_ops = ops; > diff --git a/mm/mremap.c b/mm/mremap.c > index 1b3ee02bead7..35db9752cb6a 100644 > --- a/mm/mremap.c > +++ b/mm/mremap.c > @@ -687,7 +687,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, > > if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { > /* We always clear VM_LOCKED[ONFAULT] on the old vma */ > - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; > + clear_vm_flags(vma, VM_LOCKED_MASK); > > /* > * anon_vma links of the old vma is no longer needed after its page > -- > 2.39.1
On Wed, Jan 25, 2023 at 12:38:47AM -0800, Suren Baghdasaryan wrote: > To simplify the usage of VM_LOCKED_CLEAR_MASK in clear_vm_flags(), > replace it with VM_LOCKED_MASK bitmask and convert all users. > > Signed-off-by: Suren Baghdasaryan <surenb@google.com> Acked-by: Mike Rapoport (IBM) <rppt@kernel.org> > --- > include/linux/mm.h | 4 ++-- > kernel/fork.c | 2 +- > mm/hugetlb.c | 4 ++-- > mm/mlock.c | 6 +++--- > mm/mmap.c | 6 +++--- > mm/mremap.c | 2 +- > 6 files changed, 12 insertions(+), 12 deletions(-) > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index b71f2809caac..da62bdd627bf 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -421,8 +421,8 @@ extern unsigned int kobjsize(const void *objp); > /* This mask defines which mm->def_flags a process can inherit its parent */ > #define VM_INIT_DEF_MASK VM_NOHUGEPAGE > > -/* This mask is used to clear all the VMA flags used by mlock */ > -#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) > +/* This mask represents all the VMA flag bits used by mlock */ > +#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) > > /* Arch-specific flags to clear when updating VM flags on protection change */ > #ifndef VM_ARCH_CLEAR > diff --git a/kernel/fork.c b/kernel/fork.c > index 6683c1b0f460..03d472051236 100644 > --- a/kernel/fork.c > +++ b/kernel/fork.c > @@ -669,7 +669,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, > tmp->anon_vma = NULL; > } else if (anon_vma_fork(tmp, mpnt)) > goto fail_nomem_anon_vma_fork; > - tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); > + clear_vm_flags(tmp, VM_LOCKED_MASK); > file = tmp->vm_file; > if (file) { > struct address_space *mapping = file->f_mapping; > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index d20c8b09890e..4ecdbad9a451 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -6973,8 +6973,8 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma, > unsigned long s_end = sbase + PUD_SIZE; > > /* Allow segments to share if only one is marked locked */ > - unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; > - unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; > + unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; > + unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; > > /* > * match the virtual addresses, permission and the alignment of the > diff --git a/mm/mlock.c b/mm/mlock.c > index 0336f52e03d7..5c4fff93cd6b 100644 > --- a/mm/mlock.c > +++ b/mm/mlock.c > @@ -497,7 +497,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len, > if (vma->vm_start != tmp) > return -ENOMEM; > > - newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; > + newflags = vma->vm_flags & ~VM_LOCKED_MASK; > newflags |= flags; > /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ > tmp = vma->vm_end; > @@ -661,7 +661,7 @@ static int apply_mlockall_flags(int flags) > struct vm_area_struct *vma, *prev = NULL; > vm_flags_t to_add = 0; > > - current->mm->def_flags &= VM_LOCKED_CLEAR_MASK; > + current->mm->def_flags &= ~VM_LOCKED_MASK; > if (flags & MCL_FUTURE) { > current->mm->def_flags |= VM_LOCKED; > > @@ -681,7 +681,7 @@ static int apply_mlockall_flags(int flags) > for_each_vma(vmi, vma) { > vm_flags_t newflags; > > - newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; > + newflags = vma->vm_flags & ~VM_LOCKED_MASK; > newflags |= to_add; > > /* Ignore errors */ > diff --git a/mm/mmap.c b/mm/mmap.c > index d4abc6feced1..323bd253b25a 100644 > --- a/mm/mmap.c > +++ b/mm/mmap.c > @@ -2671,7 +2671,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, > if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || > is_vm_hugetlb_page(vma) || > vma == get_gate_vma(current->mm)) > - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; > + clear_vm_flags(vma, VM_LOCKED_MASK); > else > mm->locked_vm += (len >> PAGE_SHIFT); > } > @@ -3340,8 +3340,8 @@ static struct vm_area_struct *__install_special_mapping( > vma->vm_start = addr; > vma->vm_end = addr + len; > > - vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; > - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; > + init_vm_flags(vma, (vm_flags | mm->def_flags | > + VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); > vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); > > vma->vm_ops = ops; > diff --git a/mm/mremap.c b/mm/mremap.c > index 1b3ee02bead7..35db9752cb6a 100644 > --- a/mm/mremap.c > +++ b/mm/mremap.c > @@ -687,7 +687,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, > > if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { > /* We always clear VM_LOCKED[ONFAULT] on the old vma */ > - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; > + clear_vm_flags(vma, VM_LOCKED_MASK); > > /* > * anon_vma links of the old vma is no longer needed after its page > -- > 2.39.1 > >
diff --git a/include/linux/mm.h b/include/linux/mm.h index b71f2809caac..da62bdd627bf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -421,8 +421,8 @@ extern unsigned int kobjsize(const void *objp); /* This mask defines which mm->def_flags a process can inherit its parent */ #define VM_INIT_DEF_MASK VM_NOHUGEPAGE -/* This mask is used to clear all the VMA flags used by mlock */ -#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) +/* This mask represents all the VMA flag bits used by mlock */ +#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) /* Arch-specific flags to clear when updating VM flags on protection change */ #ifndef VM_ARCH_CLEAR diff --git a/kernel/fork.c b/kernel/fork.c index 6683c1b0f460..03d472051236 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -669,7 +669,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, tmp->anon_vma = NULL; } else if (anon_vma_fork(tmp, mpnt)) goto fail_nomem_anon_vma_fork; - tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); + clear_vm_flags(tmp, VM_LOCKED_MASK); file = tmp->vm_file; if (file) { struct address_space *mapping = file->f_mapping; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d20c8b09890e..4ecdbad9a451 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6973,8 +6973,8 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma, unsigned long s_end = sbase + PUD_SIZE; /* Allow segments to share if only one is marked locked */ - unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; - unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; + unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; + unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; /* * match the virtual addresses, permission and the alignment of the diff --git a/mm/mlock.c b/mm/mlock.c index 0336f52e03d7..5c4fff93cd6b 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -497,7 +497,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len, if (vma->vm_start != tmp) return -ENOMEM; - newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; + newflags = vma->vm_flags & ~VM_LOCKED_MASK; newflags |= flags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ tmp = vma->vm_end; @@ -661,7 +661,7 @@ static int apply_mlockall_flags(int flags) struct vm_area_struct *vma, *prev = NULL; vm_flags_t to_add = 0; - current->mm->def_flags &= VM_LOCKED_CLEAR_MASK; + current->mm->def_flags &= ~VM_LOCKED_MASK; if (flags & MCL_FUTURE) { current->mm->def_flags |= VM_LOCKED; @@ -681,7 +681,7 @@ static int apply_mlockall_flags(int flags) for_each_vma(vmi, vma) { vm_flags_t newflags; - newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; + newflags = vma->vm_flags & ~VM_LOCKED_MASK; newflags |= to_add; /* Ignore errors */ diff --git a/mm/mmap.c b/mm/mmap.c index d4abc6feced1..323bd253b25a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2671,7 +2671,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; + clear_vm_flags(vma, VM_LOCKED_MASK); else mm->locked_vm += (len >> PAGE_SHIFT); } @@ -3340,8 +3340,8 @@ static struct vm_area_struct *__install_special_mapping( vma->vm_start = addr; vma->vm_end = addr + len; - vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; + init_vm_flags(vma, (vm_flags | mm->def_flags | + VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = ops; diff --git a/mm/mremap.c b/mm/mremap.c index 1b3ee02bead7..35db9752cb6a 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -687,7 +687,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { /* We always clear VM_LOCKED[ONFAULT] on the old vma */ - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; + clear_vm_flags(vma, VM_LOCKED_MASK); /* * anon_vma links of the old vma is no longer needed after its page
To simplify the usage of VM_LOCKED_CLEAR_MASK in clear_vm_flags(), replace it with VM_LOCKED_MASK bitmask and convert all users. Signed-off-by: Suren Baghdasaryan <surenb@google.com> --- include/linux/mm.h | 4 ++-- kernel/fork.c | 2 +- mm/hugetlb.c | 4 ++-- mm/mlock.c | 6 +++--- mm/mmap.c | 6 +++--- mm/mremap.c | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-)