Message ID | 20240402075142.196265-2-wangkefeng.wang@huawei.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | arch/mm/fault: accelerate pagefault when badaccess | expand |
On Tue, Apr 2, 2024 at 12:53 AM Kefeng Wang <wangkefeng.wang@huawei.com> wrote: > > The __do_page_fault() only check vma->flags and call handle_mm_fault(), > and only called by do_page_fault(), let's squash it into do_page_fault() > to cleanup code. > > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Suren Baghdasaryan <surenb@google.com> > --- > arch/arm64/mm/fault.c | 27 +++++++-------------------- > 1 file changed, 7 insertions(+), 20 deletions(-) > > diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c > index 8251e2fea9c7..9bb9f395351a 100644 > --- a/arch/arm64/mm/fault.c > +++ b/arch/arm64/mm/fault.c > @@ -486,25 +486,6 @@ static void do_bad_area(unsigned long far, unsigned long esr, > } > } > > -#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) > -#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) > - > -static vm_fault_t __do_page_fault(struct mm_struct *mm, > - struct vm_area_struct *vma, unsigned long addr, > - unsigned int mm_flags, unsigned long vm_flags, > - struct pt_regs *regs) > -{ > - /* > - * Ok, we have a good vm_area for this memory access, so we can handle > - * it. > - * Check that the permissions on the VMA allow for the fault which > - * occurred. > - */ > - if (!(vma->vm_flags & vm_flags)) > - return VM_FAULT_BADACCESS; > - return handle_mm_fault(vma, addr, mm_flags, regs); > -} > - > static bool is_el0_instruction_abort(unsigned long esr) > { > return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; > @@ -519,6 +500,9 @@ static bool is_write_abort(unsigned long esr) > return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); > } > > +#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) > +#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) > + > static int __kprobes do_page_fault(unsigned long far, unsigned long esr, > struct pt_regs *regs) > { > @@ -617,7 +601,10 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, > goto done; > } > > - fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs); > + if (!(vma->vm_flags & vm_flags)) > + fault = VM_FAULT_BADACCESS; > + else > + fault = handle_mm_fault(vma, addr, mm_flags, regs); > > /* Quick path to respond to signals */ > if (fault_signal_pending(fault, regs)) { > -- > 2.27.0 >
On Tue, Apr 02, 2024 at 03:51:36PM +0800, Kefeng Wang wrote: > The __do_page_fault() only check vma->flags and call handle_mm_fault(), > and only called by do_page_fault(), let's squash it into do_page_fault() > to cleanup code. > > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 8251e2fea9c7..9bb9f395351a 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -486,25 +486,6 @@ static void do_bad_area(unsigned long far, unsigned long esr, } } -#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) -#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) - -static vm_fault_t __do_page_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long addr, - unsigned int mm_flags, unsigned long vm_flags, - struct pt_regs *regs) -{ - /* - * Ok, we have a good vm_area for this memory access, so we can handle - * it. - * Check that the permissions on the VMA allow for the fault which - * occurred. - */ - if (!(vma->vm_flags & vm_flags)) - return VM_FAULT_BADACCESS; - return handle_mm_fault(vma, addr, mm_flags, regs); -} - static bool is_el0_instruction_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; @@ -519,6 +500,9 @@ static bool is_write_abort(unsigned long esr) return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); } +#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) +#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) + static int __kprobes do_page_fault(unsigned long far, unsigned long esr, struct pt_regs *regs) { @@ -617,7 +601,10 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, goto done; } - fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs); + if (!(vma->vm_flags & vm_flags)) + fault = VM_FAULT_BADACCESS; + else + fault = handle_mm_fault(vma, addr, mm_flags, regs); /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) {
The __do_page_fault() only check vma->flags and call handle_mm_fault(), and only called by do_page_fault(), let's squash it into do_page_fault() to cleanup code. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- arch/arm64/mm/fault.c | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-)