@@ -6089,6 +6089,12 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
int need_wait_lock = 0;
unsigned long haddr = address & huge_page_mask(h);
+ /* TODO: Handle faults under the VMA lock */
+ if (flags & FAULT_FLAG_VMA_LOCK) {
+ vma_end_read(vma);
+ return VM_FAULT_RETRY;
+ }
+
/*
* Serialize hugepage allocation and instantiation, so that we don't
* get spurious allocation failures if two CPUs race to instantiate
@@ -5112,10 +5112,10 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
}
/*
- * By the time we get here, we already hold the mm semaphore
- *
- * The mmap_lock may have been released depending on flags and our
- * return value. See filemap_fault() and __folio_lock_or_retry().
+ * On entry, we hold either the VMA lock or the mmap_lock
+ * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in
+ * the result, the mmap_lock is not held on exit. See filemap_fault()
+ * and __folio_lock_or_retry().
*/
static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
@@ -5134,6 +5134,11 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
p4d_t *p4d;
vm_fault_t ret;
+ if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) {
+ vma_end_read(vma);
+ return VM_FAULT_RETRY;
+ }
+
pgd = pgd_offset(mm, address);
p4d = p4d_alloc(mm, pgd, address);
if (!p4d)
@@ -5361,11 +5366,6 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
goto out;
}
- if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) {
- vma_end_read(vma);
- return VM_FAULT_RETRY;
- }
-
/*
* Enable the memcg OOM handling for faults triggered in user
* space. Kernel faults are handled more gracefully.