@@ -56,6 +56,7 @@ config LOONGARCH
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
@@ -142,6 +142,13 @@ static inline bool access_error(unsigned int flags, struct pt_regs *regs,
return false;
}
+#ifdef CONFIG_PER_VMA_LOCK
+bool arch_vma_access_error(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ return access_error(vmf->flags, vmf->regs, vmf->real_address, vma);
+}
+#endif
+
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -151,11 +158,15 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
int si_code = SEGV_MAPERR;
- unsigned int flags = FAULT_FLAG_DEFAULT;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
struct vm_area_struct *vma = NULL;
vm_fault_t fault;
+ struct vm_fault vmf = {
+ .real_address = address,
+ .regs = regs,
+ .flags = FAULT_FLAG_DEFAULT,
+ };
if (kprobe_page_fault(regs, current->thread.trap_nr))
return;
@@ -184,11 +195,24 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
if (user_mode(regs))
- flags |= FAULT_FLAG_USER;
+ vmf.flags |= FAULT_FLAG_USER;
if (write)
- flags |= FAULT_FLAG_WRITE;
+ vmf.flags |= FAULT_FLAG_WRITE;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
+ fault = try_vma_locked_page_fault(&vmf);
+ if (fault == VM_FAULT_NONE)
+ goto retry;
+ if (!(fault & VM_FAULT_RETRY))
+ goto done;
+
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, write, address);
+ return;
+ }
+
retry:
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma))
@@ -196,7 +220,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
si_code = SEGV_ACCERR;
- if (access_error(flags, regs, vma))
+ if (access_error(vmf.flags, regs, address, vma))
goto bad_area;
/*
@@ -204,7 +228,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(vma, address, flags, regs);
+ fault = handle_mm_fault(vma, address, vmf.flags, regs);
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
@@ -217,7 +241,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
return;
if (unlikely(fault & VM_FAULT_RETRY)) {
- flags |= FAULT_FLAG_TRIED;
+ vmf.flags |= FAULT_FLAG_TRIED;
/*
* No need to mmap_read_unlock(mm) as we would
@@ -229,6 +253,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
mmap_read_unlock(mm);
+done:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
do_out_of_memory(regs, write, address);
Attempt VMA lock-based page fault handling first, and fall back to the existing mmap_lock-based handling if that fails. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- arch/loongarch/Kconfig | 1 + arch/loongarch/mm/fault.c | 37 +++++++++++++++++++++++++++++++------ 2 files changed, 32 insertions(+), 6 deletions(-)