Message ID | 20241112194635.444146-4-surenb@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | move per-vma lock into vm_area_struct | expand |
On Tue, Nov 12, 2024 at 11:46:33AM -0800, Suren Baghdasaryan wrote: > Current implementation does not set detached flag when a VMA is first > allocated. This does not represent the real state of the VMA, which is > detached until it is added into mm's VMA tree. Fix this by marking new > VMAs as detached and resetting detached flag only after VMA is added > into a tree. > This seems very sensible. > Signed-off-by: Suren Baghdasaryan <surenb@google.com> Aside from nits/refactoring suggestions below: Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> > --- > include/linux/mm.h | 10 +++++++++- > mm/memory.c | 2 +- > mm/mmap.c | 2 ++ > mm/nommu.c | 2 ++ > mm/vma.c | 3 +++ > tools/testing/vma/vma_internal.h | 3 ++- Just want to say THANK YOU for taking the time to update the testing stubs :) Much appreciated! > 6 files changed, 19 insertions(+), 3 deletions(-) > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index a5eb0be3e351..245a85caf4c3 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -812,6 +812,11 @@ static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) > vma->detached = detached; > } > > +static inline bool is_vma_detached(struct vm_area_struct *vma) > +{ > + return vma->detached; > +} > + > static inline void release_fault_lock(struct vm_fault *vmf) > { > if (vmf->flags & FAULT_FLAG_VMA_LOCK) > @@ -874,7 +879,10 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) > vma->vm_mm = mm; > vma->vm_ops = &vma_dummy_vm_ops; > INIT_LIST_HEAD(&vma->anon_vma_chain); > - vma_mark_detached(vma, false); How did this work before? Oh I guess we initialised the VMA lock earlier right? > +#ifdef CONFIG_PER_VMA_LOCK > + /* vma is not locked, can't use vma_mark_detached() */ > + vma->detached = true; > +#endif > vma_numab_state_init(vma); > vma_lock_init(vma); > } > diff --git a/mm/memory.c b/mm/memory.c > index 209885a4134f..d0197a0c0996 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -6279,7 +6279,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, > goto inval; > > /* Check if the VMA got isolated after we found it */ > - if (vma->detached) { > + if (is_vma_detached(vma)) { > vma_end_read(vma); > count_vm_vma_lock_event(VMA_LOCK_MISS); > /* The area was replaced with another one */ > diff --git a/mm/mmap.c b/mm/mmap.c > index 386429f7db5a..1295c4cedaf4 100644 > --- a/mm/mmap.c > +++ b/mm/mmap.c > @@ -1570,6 +1570,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, > if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) > goto mas_store_fail; > > + vma_mark_detached(vma, false); > mm->map_count++; > validate_mm(mm); > ksm_add_vma(vma); > @@ -1890,6 +1891,7 @@ static struct vm_area_struct *__install_special_mapping( > if (ret) > goto out; > > + vma_mark_detached(vma, false); similar to vma_iter_store() comment, maybe worht putting in insert_vm_struct()? > vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); > > perf_event_mmap(vma); > diff --git a/mm/nommu.c b/mm/nommu.c > index 9cb6e99215e2..6afd5c2bd97d 100644 > --- a/mm/nommu.c > +++ b/mm/nommu.c > @@ -1192,6 +1192,7 @@ unsigned long do_mmap(struct file *file, > current->mm->map_count++; > /* add the VMA to the tree */ > vma_iter_store(&vmi, vma); > + vma_mark_detached(vma, false); Since we to seem always to do this with vma_iter_store() do we want to put this there? Or maybe make a wrapper around the two if that seems not to separate concerns enough? > > /* we flush the region from the icache only when the first executable > * mapping of it is made */ > @@ -1357,6 +1358,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, > setup_vma_to_mm(vma, mm); > setup_vma_to_mm(new, mm); > vma_iter_store(vmi, new); > + vma_mark_detached(new, false); > mm->map_count++; > return 0; > > diff --git a/mm/vma.c b/mm/vma.c > index 8a454a7bbc80..1426871fa6e0 100644 > --- a/mm/vma.c > +++ b/mm/vma.c > @@ -275,6 +275,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, > * (it may either follow vma or precede it). > */ > vma_iter_store(vmi, vp->insert); > + vma_mark_detached(vp->insert, false); > mm->map_count++; > } > > @@ -1690,6 +1691,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) > > vma_start_write(vma); > vma_iter_store(&vmi, vma); > + vma_mark_detached(vma, false); > vma_link_file(vma); > mm->map_count++; > validate_mm(mm); > @@ -2369,6 +2371,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap) > /* Lock the VMA since it is modified after insertion into VMA tree */ > vma_start_write(vma); > vma_iter_store(vmi, vma); > + vma_mark_detached(vma, false); > map->mm->map_count++; > vma_link_file(vma); > > diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h > index 1d9fc97b8e80..fdb60978821f 100644 > --- a/tools/testing/vma/vma_internal.h > +++ b/tools/testing/vma/vma_internal.h > @@ -438,7 +438,8 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) > vma->vm_mm = mm; > vma->vm_ops = &vma_dummy_vm_ops; > INIT_LIST_HEAD(&vma->anon_vma_chain); > - vma_mark_detached(vma, false); > + /* vma is not locked, can't use vma_mark_detached() */ > + vma->detached = true; You're the best :) > } > > static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) > -- > 2.47.0.277.g8800431eea-goog >
On Wed, Nov 13, 2024 at 6:43 AM Lorenzo Stoakes <lorenzo.stoakes@oracle.com> wrote: > > On Tue, Nov 12, 2024 at 11:46:33AM -0800, Suren Baghdasaryan wrote: > > Current implementation does not set detached flag when a VMA is first > > allocated. This does not represent the real state of the VMA, which is > > detached until it is added into mm's VMA tree. Fix this by marking new > > VMAs as detached and resetting detached flag only after VMA is added > > into a tree. > > > > This seems very sensible. > > > Signed-off-by: Suren Baghdasaryan <surenb@google.com> > > Aside from nits/refactoring suggestions below: > > Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> > > > --- > > include/linux/mm.h | 10 +++++++++- > > mm/memory.c | 2 +- > > mm/mmap.c | 2 ++ > > mm/nommu.c | 2 ++ > > mm/vma.c | 3 +++ > > tools/testing/vma/vma_internal.h | 3 ++- > > Just want to say THANK YOU for taking the time to update the testing stubs :) > Much appreciated! > > > 6 files changed, 19 insertions(+), 3 deletions(-) > > > > diff --git a/include/linux/mm.h b/include/linux/mm.h > > index a5eb0be3e351..245a85caf4c3 100644 > > --- a/include/linux/mm.h > > +++ b/include/linux/mm.h > > @@ -812,6 +812,11 @@ static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) > > vma->detached = detached; > > } > > > > +static inline bool is_vma_detached(struct vm_area_struct *vma) > > +{ > > + return vma->detached; > > +} > > + > > static inline void release_fault_lock(struct vm_fault *vmf) > > { > > if (vmf->flags & FAULT_FLAG_VMA_LOCK) > > @@ -874,7 +879,10 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) > > vma->vm_mm = mm; > > vma->vm_ops = &vma_dummy_vm_ops; > > INIT_LIST_HEAD(&vma->anon_vma_chain); > > - vma_mark_detached(vma, false); > > How did this work before? Oh I guess we initialised the VMA lock earlier right? Yes. > > > +#ifdef CONFIG_PER_VMA_LOCK > > + /* vma is not locked, can't use vma_mark_detached() */ > > + vma->detached = true; > > +#endif > > vma_numab_state_init(vma); > > vma_lock_init(vma); > > } > > diff --git a/mm/memory.c b/mm/memory.c > > index 209885a4134f..d0197a0c0996 100644 > > --- a/mm/memory.c > > +++ b/mm/memory.c > > @@ -6279,7 +6279,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, > > goto inval; > > > > /* Check if the VMA got isolated after we found it */ > > - if (vma->detached) { > > + if (is_vma_detached(vma)) { > > vma_end_read(vma); > > count_vm_vma_lock_event(VMA_LOCK_MISS); > > /* The area was replaced with another one */ > > diff --git a/mm/mmap.c b/mm/mmap.c > > index 386429f7db5a..1295c4cedaf4 100644 > > --- a/mm/mmap.c > > +++ b/mm/mmap.c > > @@ -1570,6 +1570,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, > > if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) > > goto mas_store_fail; > > > > + vma_mark_detached(vma, false); > > mm->map_count++; > > validate_mm(mm); > > ksm_add_vma(vma); > > @@ -1890,6 +1891,7 @@ static struct vm_area_struct *__install_special_mapping( > > if (ret) > > goto out; > > > > + vma_mark_detached(vma, false); > > similar to vma_iter_store() comment, maybe worht putting in insert_vm_struct()? Ah, this one I think is not needed because we already have insert_vm_struct() -> vma_link() -> vma_mark_detached(vma, false) > > > vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); > > > > perf_event_mmap(vma); > > diff --git a/mm/nommu.c b/mm/nommu.c > > index 9cb6e99215e2..6afd5c2bd97d 100644 > > --- a/mm/nommu.c > > +++ b/mm/nommu.c > > @@ -1192,6 +1192,7 @@ unsigned long do_mmap(struct file *file, > > current->mm->map_count++; > > /* add the VMA to the tree */ > > vma_iter_store(&vmi, vma); > > + vma_mark_detached(vma, false); > > Since we to seem always to do this with vma_iter_store() do we want to put this > there? Or maybe make a wrapper around the two if that seems not to separate > concerns enough? I think wrapper would be helpful. I'll try that and see if that looks better. > > > > > /* we flush the region from the icache only when the first executable > > * mapping of it is made */ > > @@ -1357,6 +1358,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, > > setup_vma_to_mm(vma, mm); > > setup_vma_to_mm(new, mm); > > vma_iter_store(vmi, new); > > + vma_mark_detached(new, false); > > mm->map_count++; > > return 0; > > > > diff --git a/mm/vma.c b/mm/vma.c > > index 8a454a7bbc80..1426871fa6e0 100644 > > --- a/mm/vma.c > > +++ b/mm/vma.c > > @@ -275,6 +275,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, > > * (it may either follow vma or precede it). > > */ > > vma_iter_store(vmi, vp->insert); > > + vma_mark_detached(vp->insert, false); > > mm->map_count++; > > } > > > > @@ -1690,6 +1691,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) > > > > vma_start_write(vma); > > vma_iter_store(&vmi, vma); > > + vma_mark_detached(vma, false); > > vma_link_file(vma); > > mm->map_count++; > > validate_mm(mm); > > @@ -2369,6 +2371,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap) > > /* Lock the VMA since it is modified after insertion into VMA tree */ > > vma_start_write(vma); > > vma_iter_store(vmi, vma); > > + vma_mark_detached(vma, false); > > map->mm->map_count++; > > vma_link_file(vma); > > > > diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h > > index 1d9fc97b8e80..fdb60978821f 100644 > > --- a/tools/testing/vma/vma_internal.h > > +++ b/tools/testing/vma/vma_internal.h > > @@ -438,7 +438,8 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) > > vma->vm_mm = mm; > > vma->vm_ops = &vma_dummy_vm_ops; > > INIT_LIST_HEAD(&vma->anon_vma_chain); > > - vma_mark_detached(vma, false); > > + /* vma is not locked, can't use vma_mark_detached() */ > > + vma->detached = true; > > You're the best :) Thanks! > > > } > > > > static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) > > -- > > 2.47.0.277.g8800431eea-goog > >
diff --git a/include/linux/mm.h b/include/linux/mm.h index a5eb0be3e351..245a85caf4c3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -812,6 +812,11 @@ static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) vma->detached = detached; } +static inline bool is_vma_detached(struct vm_area_struct *vma) +{ + return vma->detached; +} + static inline void release_fault_lock(struct vm_fault *vmf) { if (vmf->flags & FAULT_FLAG_VMA_LOCK) @@ -874,7 +879,10 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - vma_mark_detached(vma, false); +#ifdef CONFIG_PER_VMA_LOCK + /* vma is not locked, can't use vma_mark_detached() */ + vma->detached = true; +#endif vma_numab_state_init(vma); vma_lock_init(vma); } diff --git a/mm/memory.c b/mm/memory.c index 209885a4134f..d0197a0c0996 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6279,7 +6279,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, goto inval; /* Check if the VMA got isolated after we found it */ - if (vma->detached) { + if (is_vma_detached(vma)) { vma_end_read(vma); count_vm_vma_lock_event(VMA_LOCK_MISS); /* The area was replaced with another one */ diff --git a/mm/mmap.c b/mm/mmap.c index 386429f7db5a..1295c4cedaf4 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1570,6 +1570,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) goto mas_store_fail; + vma_mark_detached(vma, false); mm->map_count++; validate_mm(mm); ksm_add_vma(vma); @@ -1890,6 +1891,7 @@ static struct vm_area_struct *__install_special_mapping( if (ret) goto out; + vma_mark_detached(vma, false); vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); perf_event_mmap(vma); diff --git a/mm/nommu.c b/mm/nommu.c index 9cb6e99215e2..6afd5c2bd97d 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1192,6 +1192,7 @@ unsigned long do_mmap(struct file *file, current->mm->map_count++; /* add the VMA to the tree */ vma_iter_store(&vmi, vma); + vma_mark_detached(vma, false); /* we flush the region from the icache only when the first executable * mapping of it is made */ @@ -1357,6 +1358,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, setup_vma_to_mm(vma, mm); setup_vma_to_mm(new, mm); vma_iter_store(vmi, new); + vma_mark_detached(new, false); mm->map_count++; return 0; diff --git a/mm/vma.c b/mm/vma.c index 8a454a7bbc80..1426871fa6e0 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -275,6 +275,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, * (it may either follow vma or precede it). */ vma_iter_store(vmi, vp->insert); + vma_mark_detached(vp->insert, false); mm->map_count++; } @@ -1690,6 +1691,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) vma_start_write(vma); vma_iter_store(&vmi, vma); + vma_mark_detached(vma, false); vma_link_file(vma); mm->map_count++; validate_mm(mm); @@ -2369,6 +2371,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap) /* Lock the VMA since it is modified after insertion into VMA tree */ vma_start_write(vma); vma_iter_store(vmi, vma); + vma_mark_detached(vma, false); map->mm->map_count++; vma_link_file(vma); diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index 1d9fc97b8e80..fdb60978821f 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -438,7 +438,8 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - vma_mark_detached(vma, false); + /* vma is not locked, can't use vma_mark_detached() */ + vma->detached = true; } static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
Current implementation does not set detached flag when a VMA is first allocated. This does not represent the real state of the VMA, which is detached until it is added into mm's VMA tree. Fix this by marking new VMAs as detached and resetting detached flag only after VMA is added into a tree. Signed-off-by: Suren Baghdasaryan <surenb@google.com> --- include/linux/mm.h | 10 +++++++++- mm/memory.c | 2 +- mm/mmap.c | 2 ++ mm/nommu.c | 2 ++ mm/vma.c | 3 +++ tools/testing/vma/vma_internal.h | 3 ++- 6 files changed, 19 insertions(+), 3 deletions(-)