diff mbox series

[RFC,5/6] mm: Allocate large folios for anonymous memory

Message ID 20230317105802.2634004-6-ryan.roberts@arm.com (mailing list archive)
State New
Headers show
Series variable-order, large folios for anonymous memory | expand

Commit Message

Ryan Roberts March 17, 2023, 10:58 a.m. UTC
Add the machinery to determine what order of folio to allocate within
do_anonymous_page() and deal with racing faults to the same region.

TODO: For now, the maximum order is set to 4. This should probably be
set per-vma based on factors, and adjusted dynamically.

Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 mm/memory.c | 140 ++++++++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 124 insertions(+), 16 deletions(-)

--
2.25.1
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index c9e09415ee18..3d01eab46d9c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4013,6 +4013,77 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	return ret;
 }

+/*
+ * Returns index of first pte that is not none, or nr if all are none.
+ */
+static int check_all_ptes_none(pte_t *pte, int nr)
+{
+	int i;
+
+	for (i = 0; i < nr; i++) {
+		if (!pte_none(*pte++))
+			return i;
+	}
+
+	return nr;
+}
+
+static void calc_anonymous_folio_order(struct vm_fault *vmf,
+				       int *order_out,
+				       unsigned long *addr_out)
+{
+	/*
+	 * The aim here is to determine what size of folio we should allocate
+	 * for this fault. Factors include:
+	 * - Folio must be naturally aligned within VA space
+	 * - Folio must not breach boundaries of vma
+	 * - Folio must be fully contained inside one pmd entry
+	 * - Folio must not overlap any non-none ptes
+	 * - Order must not be higher than *order_out upon entry
+	 *
+	 * Note that the caller may or may not choose to lock the pte. If
+	 * unlocked, the calculation should be considered an estimate that will
+	 * need to be validated under the lock.
+	 */
+
+	struct vm_area_struct *vma = vmf->vma;
+	int nr;
+	int order = min(*order_out, PMD_SHIFT - PAGE_SHIFT);
+	unsigned long addr;
+	pte_t *pte;
+	pte_t *first_set = NULL;
+	int ret;
+
+	for (; order > 0; order--) {
+		nr = 1 << order;
+		addr = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
+		pte = vmf->pte - ((vmf->address - addr) >> PAGE_SHIFT);
+
+		/* Check vma bounds. */
+		if (addr < vma->vm_start ||
+		    addr + nr * PAGE_SIZE > vma->vm_end)
+			continue;
+
+		/* All ptes covered by order already known to be none. */
+		if (pte + nr <= first_set)
+			break;
+
+		/* Already found set pte in range covered by order. */
+		if (pte <= first_set)
+			continue;
+
+		/* Need to check if all the ptes are none. */
+		ret = check_all_ptes_none(pte, nr);
+		if (ret == nr)
+			break;
+
+		first_set = pte + ret;
+	}
+
+	*order_out = order;
+	*addr_out = order > 0 ? addr : vmf->address;
+}
+
 /*
  * We enter with non-exclusive mmap_lock (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
@@ -4024,6 +4095,9 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	struct folio *folio;
 	vm_fault_t ret = 0;
 	pte_t entry;
+	unsigned long addr;
+	int order = 4; // TODO: Policy for maximum folio order.
+	int pgcount;

 	/* File mapping without ->vm_ops ? */
 	if (vma->vm_flags & VM_SHARED)
@@ -4065,24 +4139,41 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 			pte_unmap_unlock(vmf->pte, vmf->ptl);
 			return handle_userfault(vmf, VM_UFFD_MISSING);
 		}
-		goto setpte;
+		set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
+
+		/* No need to invalidate - it was non-present before */
+		update_mmu_cache(vma, vmf->address, vmf->pte);
+		goto unlock;
 	}

-	/* Allocate our own private page. */
+retry:
+	/*
+	 * Estimate the folio order to allocate. We are not under the ptl here
+	 * so this estiamte needs to be re-checked later once we have the lock.
+	 */
+	vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
+	calc_anonymous_folio_order(vmf, &order, &addr);
+	pte_unmap(vmf->pte);
+
+	/* Allocate our own private folio. */
 	if (unlikely(anon_vma_prepare(vma)))
 		goto oom;
-	folio = try_vma_alloc_zeroed_movable_folio(vma, vmf->address, 0);
+	folio = try_vma_alloc_zeroed_movable_folio(vma, addr, order);
 	if (!folio)
 		goto oom;

+	/* We may have been granted less than we asked for. */
+	order = folio_order(folio);
+	pgcount = folio_nr_pages(folio);
+
 	if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
 		goto oom_free_page;
-	cgroup_throttle_swaprate(&folio->page, GFP_KERNEL);
+	folio_throttle_swaprate(folio, GFP_KERNEL);

 	/*
 	 * The memory barrier inside __folio_mark_uptodate makes sure that
-	 * preceding stores to the page contents become visible before
-	 * the set_pte_at() write.
+	 * preceding stores to the folio contents become visible before
+	 * the set_ptes() write.
 	 */
 	__folio_mark_uptodate(folio);

@@ -4091,11 +4182,26 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	if (vma->vm_flags & VM_WRITE)
 		entry = pte_mkwrite(pte_mkdirty(entry));

-	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
-			&vmf->ptl);
-	if (!pte_none(*vmf->pte)) {
-		update_mmu_tlb(vma, vmf->address, vmf->pte);
-		goto release;
+	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
+
+	/*
+	 * Ensure our estimate above is still correct; we could have raced with
+	 * another thread to service a fault in the region.
+	 */
+	if (check_all_ptes_none(vmf->pte, pgcount) != pgcount) {
+		pte_t *pte = vmf->pte + ((vmf->address - addr) >> PAGE_SHIFT);
+
+		/* If faulting pte was allocated by another, exit early. */
+		if (!pte_none(*pte)) {
+			update_mmu_tlb(vma, vmf->address, pte);
+			goto release;
+		}
+
+		/* Else try again, with a lower order. */
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
+		folio_put(folio);
+		order--;
+		goto retry;
 	}

 	ret = check_stable_address_space(vma->vm_mm);
@@ -4109,14 +4215,16 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 		return handle_userfault(vmf, VM_UFFD_MISSING);
 	}

-	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
-	folio_add_new_anon_rmap(folio, vma, vmf->address);
+	folio_ref_add(folio, pgcount - 1);
+
+	add_mm_counter(vma->vm_mm, MM_ANONPAGES, pgcount);
+	folio_add_new_anon_rmap_range(folio, vma, addr);
 	folio_add_lru_vma(folio, vma);
-setpte:
-	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
+
+	set_ptes(vma->vm_mm, addr, vmf->pte, entry, pgcount);

 	/* No need to invalidate - it was non-present before */
-	update_mmu_cache(vma, vmf->address, vmf->pte);
+	update_mmu_cache_range(vma, addr, vmf->pte, pgcount);
 unlock:
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
 	return ret;