Message ID | 20240402075142.196265-5-wangkefeng.wang@huawei.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | arch/mm/fault: accelerate pagefault when badaccess | expand |
On Tue, Apr 2, 2024 at 12:53 AM Kefeng Wang <wangkefeng.wang@huawei.com> wrote: > > The vm_flags of vma already checked under per-VMA lock, if it is a > bad access, directly handle error and return, there is no need to > lock_mm_and_find_vma() and check vm_flags again. > > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Code-wise looks correct to me and almost identical to x86 change but someone with more experience with this architecture should review. > --- > arch/powerpc/mm/fault.c | 33 ++++++++++++++++++++------------- > 1 file changed, 20 insertions(+), 13 deletions(-) > > diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c > index 53335ae21a40..215690452495 100644 > --- a/arch/powerpc/mm/fault.c > +++ b/arch/powerpc/mm/fault.c > @@ -71,23 +71,26 @@ static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long add > return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); > } > > -static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) > +static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, > + struct mm_struct *mm, struct vm_area_struct *vma) > { > - struct mm_struct *mm = current->mm; > > /* > * Something tried to access memory that isn't in our memory map.. > * Fix it, but check if it's kernel or user first.. > */ > - mmap_read_unlock(mm); > + if (mm) > + mmap_read_unlock(mm); > + else > + vma_end_read(vma); > > return __bad_area_nosemaphore(regs, address, si_code); > } > > static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, > + struct mm_struct *mm, > struct vm_area_struct *vma) > { > - struct mm_struct *mm = current->mm; > int pkey; > > /* > @@ -109,7 +112,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, > */ > pkey = vma_pkey(vma); > > - mmap_read_unlock(mm); > + if (mm) > + mmap_read_unlock(mm); > + else > + vma_end_read(vma); > > /* > * If we are in kernel mode, bail out with a SEGV, this will > @@ -124,9 +130,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, > return 0; > } > > -static noinline int bad_access(struct pt_regs *regs, unsigned long address) > +static noinline int bad_access(struct pt_regs *regs, unsigned long address, > + struct mm_struct *mm, struct vm_area_struct *vma) > { > - return __bad_area(regs, address, SEGV_ACCERR); > + return __bad_area(regs, address, SEGV_ACCERR, mm, vma); > } > > static int do_sigbus(struct pt_regs *regs, unsigned long address, > @@ -479,13 +486,13 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, > > if (unlikely(access_pkey_error(is_write, is_exec, > (error_code & DSISR_KEYFAULT), vma))) { > - vma_end_read(vma); > - goto lock_mmap; > + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); > + return bad_access_pkey(regs, address, NULL, vma); > } > > if (unlikely(access_error(is_write, is_exec, vma))) { > - vma_end_read(vma); > - goto lock_mmap; > + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); > + return bad_access(regs, address, NULL, vma); > } > > fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); > @@ -521,10 +528,10 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, > > if (unlikely(access_pkey_error(is_write, is_exec, > (error_code & DSISR_KEYFAULT), vma))) > - return bad_access_pkey(regs, address, vma); > + return bad_access_pkey(regs, address, mm, vma); > > if (unlikely(access_error(is_write, is_exec, vma))) > - return bad_access(regs, address); > + return bad_access(regs, address, mm, vma); > > /* > * If for any reason at all we couldn't handle the fault, > -- > 2.27.0 >
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 53335ae21a40..215690452495 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -71,23 +71,26 @@ static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long add return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); } -static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) +static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, + struct mm_struct *mm, struct vm_area_struct *vma) { - struct mm_struct *mm = current->mm; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); return __bad_area_nosemaphore(regs, address, si_code); } static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, + struct mm_struct *mm, struct vm_area_struct *vma) { - struct mm_struct *mm = current->mm; int pkey; /* @@ -109,7 +112,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, */ pkey = vma_pkey(vma); - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); /* * If we are in kernel mode, bail out with a SEGV, this will @@ -124,9 +130,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, return 0; } -static noinline int bad_access(struct pt_regs *regs, unsigned long address) +static noinline int bad_access(struct pt_regs *regs, unsigned long address, + struct mm_struct *mm, struct vm_area_struct *vma) { - return __bad_area(regs, address, SEGV_ACCERR); + return __bad_area(regs, address, SEGV_ACCERR, mm, vma); } static int do_sigbus(struct pt_regs *regs, unsigned long address, @@ -479,13 +486,13 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) { - vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return bad_access_pkey(regs, address, NULL, vma); } if (unlikely(access_error(is_write, is_exec, vma))) { - vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return bad_access(regs, address, NULL, vma); } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); @@ -521,10 +528,10 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) - return bad_access_pkey(regs, address, vma); + return bad_access_pkey(regs, address, mm, vma); if (unlikely(access_error(is_write, is_exec, vma))) - return bad_access(regs, address); + return bad_access(regs, address, mm, vma); /* * If for any reason at all we couldn't handle the fault,
The vm_flags of vma already checked under per-VMA lock, if it is a bad access, directly handle error and return, there is no need to lock_mm_and_find_vma() and check vm_flags again. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- arch/powerpc/mm/fault.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-)