@@ -215,6 +215,13 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
return false;
}
+#ifdef CONFIG_PER_VMA_LOCK
+bool arch_vma_access_error(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ return access_error(vmf->fault_code, vma);
+}
+#endif
+
/*
* This routine handles page faults. It determines the address and the
* problem, and then passes it off to one of the appropriate routines.
@@ -223,17 +230,16 @@ void handle_page_fault(struct pt_regs *regs)
{
struct task_struct *tsk;
struct vm_area_struct *vma;
- struct mm_struct *mm;
- unsigned long addr, cause;
- unsigned int flags = FAULT_FLAG_DEFAULT;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = regs->badaddr;
+ unsigned long cause = regs->cause;
int code = SEGV_MAPERR;
vm_fault_t fault;
-
- cause = regs->cause;
- addr = regs->badaddr;
-
- tsk = current;
- mm = tsk->mm;
+ struct vm_fault vmf = {
+ .real_address = addr,
+ .fault_code = cause,
+ .flags = FAULT_FLAG_DEFAULT,
+ };
if (kprobe_page_fault(regs, cause))
return;
@@ -268,7 +274,7 @@ void handle_page_fault(struct pt_regs *regs)
}
if (user_mode(regs))
- flags |= FAULT_FLAG_USER;
+ vmf.flags |= FAULT_FLAG_USER;
if (!user_mode(regs) && addr < TASK_SIZE && unlikely(!(regs->status & SR_SUM))) {
if (fixup_exception(regs))
@@ -280,37 +286,21 @@ void handle_page_fault(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
if (cause == EXC_STORE_PAGE_FAULT)
- flags |= FAULT_FLAG_WRITE;
+ vmf.flags |= FAULT_FLAG_WRITE;
else if (cause == EXC_INST_PAGE_FAULT)
- flags |= FAULT_FLAG_INSTRUCTION;
- if (!(flags & FAULT_FLAG_USER))
- goto lock_mmap;
-
- vma = lock_vma_under_rcu(mm, addr);
- if (!vma)
- goto lock_mmap;
+ vmf.flags |= FAULT_FLAG_INSTRUCTION;
- if (unlikely(access_error(cause, vma))) {
- vma_end_read(vma);
- goto lock_mmap;
- }
-
- fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
- if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
- vma_end_read(vma);
-
- if (!(fault & VM_FAULT_RETRY)) {
- count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ fault = try_vma_locked_page_fault(&vmf);
+ if (fault == VM_FAULT_NONE)
+ goto retry;
+ if (!(fault & VM_FAULT_RETRY))
goto done;
- }
- count_vm_vma_lock_event(VMA_LOCK_RETRY);
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
no_context(regs, addr);
return;
}
-lock_mmap:
retry:
vma = lock_mm_and_find_vma(mm, addr, regs);
@@ -337,7 +327,7 @@ void handle_page_fault(struct pt_regs *regs)
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(vma, addr, flags, regs);
+ fault = handle_mm_fault(vma, addr, vmf.flags, regs);
/*
* If we need to retry but a fatal signal is pending, handle the
@@ -355,7 +345,7 @@ void handle_page_fault(struct pt_regs *regs)
return;
if (unlikely(fault & VM_FAULT_RETRY)) {
- flags |= FAULT_FLAG_TRIED;
+ vmf.flags |= FAULT_FLAG_TRIED;
/*
* No need to mmap_read_unlock(mm) as we would
Use new try_vma_locked_page_fault() helper to simplify code. No functional change intended. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- arch/riscv/mm/fault.c | 58 ++++++++++++++++++------------------------- 1 file changed, 24 insertions(+), 34 deletions(-)