@@ -587,7 +587,6 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
-#ifdef CONFIG_PER_VMA_LOCK
if (!(mm_flags & FAULT_FLAG_USER))
goto lock_mmap;
@@ -616,7 +615,6 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
return 0;
}
lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
retry:
vma = lock_mm_and_find_vma(mm, addr, regs);
@@ -469,7 +469,6 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
if (is_exec)
flags |= FAULT_FLAG_INSTRUCTION;
-#ifdef CONFIG_PER_VMA_LOCK
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
@@ -502,7 +501,6 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
return user_mode(regs) ? 0 : SIGBUS;
lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
@@ -552,9 +550,7 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
mmap_read_unlock(current->mm);
-#ifdef CONFIG_PER_VMA_LOCK
done:
-#endif
if (unlikely(fault & VM_FAULT_ERROR))
return mm_fault_error(regs, address, fault);
@@ -283,7 +283,6 @@ void handle_page_fault(struct pt_regs *regs)
flags |= FAULT_FLAG_WRITE;
else if (cause == EXC_INST_PAGE_FAULT)
flags |= FAULT_FLAG_INSTRUCTION;
-#ifdef CONFIG_PER_VMA_LOCK
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
@@ -311,7 +310,6 @@ void handle_page_fault(struct pt_regs *regs)
return;
}
lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
retry:
vma = lock_mm_and_find_vma(mm, addr, regs);
@@ -368,9 +366,7 @@ void handle_page_fault(struct pt_regs *regs)
mmap_read_unlock(mm);
-#ifdef CONFIG_PER_VMA_LOCK
done:
-#endif
if (unlikely(fault & VM_FAULT_ERROR)) {
tsk->thread.bad_cause = cause;
mm_fault_error(regs, addr, fault);
@@ -407,7 +407,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
access = VM_WRITE;
if (access == VM_WRITE)
flags |= FAULT_FLAG_WRITE;
-#ifdef CONFIG_PER_VMA_LOCK
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
vma = lock_vma_under_rcu(mm, address);
@@ -433,7 +432,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
goto out;
}
lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
mmap_read_lock(mm);
gmap = NULL;
@@ -1328,7 +1328,6 @@ void do_user_addr_fault(struct pt_regs *regs,
}
#endif
-#ifdef CONFIG_PER_VMA_LOCK
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
@@ -1359,7 +1358,6 @@ void do_user_addr_fault(struct pt_regs *regs,
return;
}
lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
retry:
vma = lock_mm_and_find_vma(mm, address, regs);
@@ -1419,9 +1417,7 @@ void do_user_addr_fault(struct pt_regs *regs,
}
mmap_read_unlock(mm);
-#ifdef CONFIG_PER_VMA_LOCK
done:
-#endif
if (likely(!(fault & VM_FAULT_ERROR)))
return;
@@ -753,6 +753,12 @@ static inline void assert_fault_locked(struct vm_fault *vmf)
mmap_assert_locked(vmf->vma->vm_mm);
}
+static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address)
+{
+ return NULL;
+}
+
#endif /* CONFIG_PER_VMA_LOCK */
/*