diff mbox series

[v2,2/8] mm/hugetlb: convert hugetlbfs_pagecache_present() to folios

Message ID 20230125170537.96973-3-sidhartha.kumar@oracle.com (mailing list archive)
State New
Headers show
Series convert hugetlb fault functions to folios | expand

Commit Message

Sidhartha Kumar Jan. 25, 2023, 5:05 p.m. UTC
Refactor hugetlbfs_pagecache_present() to avoid getting and dropping
a refcount on a page. Use RCU and page_cache_next_miss() instead.

Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
---
 mm/hugetlb.c | 16 +++++++---------
 1 file changed, 7 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 849206e94742..e506a46b7871 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5651,17 +5651,15 @@  static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
 static bool hugetlbfs_pagecache_present(struct hstate *h,
 			struct vm_area_struct *vma, unsigned long address)
 {
-	struct address_space *mapping;
-	pgoff_t idx;
-	struct page *page;
+	struct address_space *mapping = vma->vm_file->f_mapping;
+	pgoff_t idx = vma_hugecache_offset(h, vma, address);
+	bool present;
 
-	mapping = vma->vm_file->f_mapping;
-	idx = vma_hugecache_offset(h, vma, address);
+	rcu_read_lock();
+	present = page_cache_next_miss(mapping, idx, 1) != idx;
+	rcu_read_unlock();
 
-	page = find_get_page(mapping, idx);
-	if (page)
-		put_page(page);
-	return page != NULL;
+	return present;
 }
 
 int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,