@@ -537,6 +537,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
struct vm_area_struct *vma;
+ struct vm_locked_fault vmlf;
if (kprobe_page_fault(regs, esr))
return 0;
@@ -587,27 +588,11 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
-#ifdef CONFIG_PER_VMA_LOCK
- if (!(mm_flags & FAULT_FLAG_USER))
- goto lock_mmap;
-
- vma = lock_vma_under_rcu(mm, addr);
- if (!vma)
- goto lock_mmap;
-
- if (!(vma->vm_flags & vm_flags)) {
- vma_end_read(vma);
- goto lock_mmap;
- }
- fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
- if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
- vma_end_read(vma);
-
- if (!(fault & VM_FAULT_RETRY)) {
- count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ VM_LOCKED_FAULT_INIT(vmlf, mm, addr, mm_flags, vm_flags, regs, esr);
+ if (try_vma_locked_page_fault(&vmlf, &fault))
+ goto retry;
+ else if (!(fault | VM_FAULT_RETRY))
goto done;
- }
- count_vm_vma_lock_event(VMA_LOCK_RETRY);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
@@ -615,9 +600,6 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
goto no_context;
return 0;
}
-lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
-
retry:
vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) {
Use new try_vma_locked_page_fault() helper to simplify code. No functional change intended. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- arch/arm64/mm/fault.c | 28 +++++----------------------- 1 file changed, 5 insertions(+), 23 deletions(-)