Message ID | 20230711202047.3818697-7-willy@infradead.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Avoid the mmap lock for fault-around | expand |
On Tue, Jul 11, 2023 at 1:21 PM Matthew Wilcox (Oracle) <willy@infradead.org> wrote: > > Perform the check at the start of do_read_fault(), do_cow_fault() > and do_shared_fault() instead. Should be no performance change from > the last commit. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Suren Baghdasaryan <surenb@google.com> > --- > mm/memory.c | 20 +++++++++++++++----- > 1 file changed, 15 insertions(+), 5 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index 88cf9860f17e..709bffee8aa2 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -4547,6 +4547,11 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf) > vm_fault_t ret = 0; > struct folio *folio; > > + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { > + vma_end_read(vmf->vma); > + return VM_FAULT_RETRY; > + } > + > /* > * Let's call ->map_pages() first and use ->fault() as fallback > * if page by the offset is not ready to be mapped (cold cache or > @@ -4575,6 +4580,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) > struct vm_area_struct *vma = vmf->vma; > vm_fault_t ret; > > + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { > + vma_end_read(vma); > + return VM_FAULT_RETRY; > + } > + > if (unlikely(anon_vma_prepare(vma))) > return VM_FAULT_OOM; > > @@ -4615,6 +4625,11 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf) > vm_fault_t ret, tmp; > struct folio *folio; > > + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { > + vma_end_read(vma); > + return VM_FAULT_RETRY; > + } > + > ret = __do_fault(vmf); > if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) > return ret; > @@ -4661,11 +4676,6 @@ static vm_fault_t do_fault(struct vm_fault *vmf) > struct mm_struct *vm_mm = vma->vm_mm; > vm_fault_t ret; > > - if (vmf->flags & FAULT_FLAG_VMA_LOCK){ > - vma_end_read(vma); > - return VM_FAULT_RETRY; > - } > - > /* > * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND > */ > -- > 2.39.2 >
diff --git a/mm/memory.c b/mm/memory.c index 88cf9860f17e..709bffee8aa2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4547,6 +4547,11 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf) vm_fault_t ret = 0; struct folio *folio; + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vmf->vma); + return VM_FAULT_RETRY; + } + /* * Let's call ->map_pages() first and use ->fault() as fallback * if page by the offset is not ready to be mapped (cold cache or @@ -4575,6 +4580,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; vm_fault_t ret; + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; @@ -4615,6 +4625,11 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf) vm_fault_t ret, tmp; struct folio *folio; + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; @@ -4661,11 +4676,6 @@ static vm_fault_t do_fault(struct vm_fault *vmf) struct mm_struct *vm_mm = vma->vm_mm; vm_fault_t ret; - if (vmf->flags & FAULT_FLAG_VMA_LOCK){ - vma_end_read(vma); - return VM_FAULT_RETRY; - } - /* * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
Perform the check at the start of do_read_fault(), do_cow_fault() and do_shared_fault() instead. Should be no performance change from the last commit. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/memory.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-)