@@ -2173,16 +2173,6 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
}
EXPORT_SYMBOL(filemap_get_folios);
-static inline
-bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
-{
- if (!folio_test_large(folio) || folio_test_hugetlb(folio))
- return false;
- if (index >= max)
- return false;
- return index < folio_next_index(folio) - 1;
-}
-
/**
* filemap_get_folios_contig - Get a batch of contiguous folios
* @mapping: The address_space to search
@@ -3441,10 +3431,10 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
return false;
}
-static struct folio *next_uptodate_page(struct folio *folio,
- struct address_space *mapping,
- struct xa_state *xas, pgoff_t end_pgoff)
+static struct folio *next_uptodate_folio(struct xa_state *xas,
+ struct address_space *mapping, pgoff_t end_pgoff)
{
+ struct folio *folio = xas_next_entry(xas, end_pgoff);
unsigned long max_idx;
do {
@@ -3482,20 +3472,51 @@ static struct folio *next_uptodate_page(struct folio *folio,
return NULL;
}
-static inline struct folio *first_map_page(struct address_space *mapping,
- struct xa_state *xas,
- pgoff_t end_pgoff)
+/*
+ * Map page range [start_page, start_page + nr_pages) of folio.
+ * start_page is gotten from start by folio_page(folio, start)
+ */
+static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
+ struct folio *folio, unsigned long start,
+ unsigned long addr, unsigned int nr_pages)
{
- return next_uptodate_page(xas_find(xas, end_pgoff),
- mapping, xas, end_pgoff);
-}
+ vm_fault_t ret = 0;
+ struct vm_area_struct *vma = vmf->vma;
+ struct file *file = vma->vm_file;
+ struct page *page = folio_page(folio, start);
+ unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
+ unsigned int ref_count = 0, count = 0;
-static inline struct folio *next_map_page(struct address_space *mapping,
- struct xa_state *xas,
- pgoff_t end_pgoff)
-{
- return next_uptodate_page(xas_next_entry(xas, end_pgoff),
- mapping, xas, end_pgoff);
+ do {
+ if (PageHWPoison(page))
+ continue;
+
+ if (mmap_miss > 0)
+ mmap_miss--;
+
+ /*
+ * NOTE: If there're PTE markers, we'll leave them to be
+ * handled in the specific fault path, and it'll prohibit the
+ * fault-around logic.
+ */
+ if (!pte_none(*vmf->pte))
+ continue;
+
+ if (vmf->address == addr)
+ ret = VM_FAULT_NOPAGE;
+
+ ref_count++;
+ do_set_pte(vmf, page, addr);
+ update_mmu_cache(vma, addr, vmf->pte);
+ } while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);
+
+ /* Restore the vmf->pte */
+ vmf->pte -= nr_pages;
+
+ folio_ref_add(folio, ref_count);
+ WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
+
+ return ret;
}
vm_fault_t filemap_map_pages(struct vm_fault *vmf,
@@ -3508,12 +3529,11 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
unsigned long addr;
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct folio *folio;
- struct page *page;
- unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
vm_fault_t ret = 0;
+ int nr_pages = 0;
rcu_read_lock();
- folio = first_map_page(mapping, &xas, end_pgoff);
+ folio = next_uptodate_folio(&xas, mapping, end_pgoff);
if (!folio)
goto out;
@@ -3530,17 +3550,13 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
goto out;
}
do {
-again:
- page = folio_file_page(folio, xas.xa_index);
- if (PageHWPoison(page))
- goto unlock;
-
- if (mmap_miss > 0)
- mmap_miss--;
+ unsigned long end;
addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
vmf->pte += xas.xa_index - last_pgoff;
last_pgoff = xas.xa_index;
+ end = folio->index + folio_nr_pages(folio) - 1;
+ nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
/*
* NOTE: If there're PTE markers, we'll leave them to be
@@ -3550,32 +3566,17 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
if (!pte_none(ptep_get(vmf->pte)))
goto unlock;
- /* We're about to handle the fault */
- if (vmf->address == addr)
- ret = VM_FAULT_NOPAGE;
+ ret |= filemap_map_folio_range(vmf, folio,
+ xas.xa_index - folio->index, addr, nr_pages);
- do_set_pte(vmf, page, addr);
- /* no need to invalidate: a not-present page won't be cached */
- update_mmu_cache(vma, addr, vmf->pte);
- if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
- xas.xa_index++;
- folio_ref_inc(folio);
- goto again;
- }
- folio_unlock(folio);
- continue;
unlock:
- if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
- xas.xa_index++;
- goto again;
- }
folio_unlock(folio);
folio_put(folio);
- } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
+ folio = next_uptodate_folio(&xas, mapping, end_pgoff);
+ } while (folio);
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
rcu_read_unlock();
- WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
return ret;
}
EXPORT_SYMBOL(filemap_map_pages);