diff mbox series

[24/25] mm: Add read_cache_folio and read_mapping_folio

Message ID 20201216182335.27227-25-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Page folios | expand

Commit Message

Matthew Wilcox Dec. 16, 2020, 6:23 p.m. UTC
Reimplement read_cache_page() as a wrapper around read_cache_folio().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/pagemap.h | 17 ++++++++-
 mm/filemap.c            | 81 +++++++++++++++++++----------------------
 2 files changed, 53 insertions(+), 45 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 22f9774d8a83..ae20b6fa46f0 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -518,19 +518,32 @@  static inline struct page *grab_cache_page(struct address_space *mapping,
 	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
 }
 
-extern struct page * read_cache_page(struct address_space *mapping,
-				pgoff_t index, filler_t *filler, void *data);
+struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
+		filler_t *filler, void *data);
 extern struct page * read_cache_page_gfp(struct address_space *mapping,
 				pgoff_t index, gfp_t gfp_mask);
 extern int read_cache_pages(struct address_space *mapping,
 		struct list_head *pages, filler_t *filler, void *data);
 
+static inline struct page *read_cache_page(struct address_space *mapping,
+				pgoff_t index, filler_t *filler, void *data)
+{
+	struct folio *folio = read_cache_folio(mapping, index, filler, data);
+	return folio_page(folio, index);
+}
+
 static inline struct page *read_mapping_page(struct address_space *mapping,
 				pgoff_t index, void *data)
 {
 	return read_cache_page(mapping, index, NULL, data);
 }
 
+static inline struct folio *read_mapping_folio(struct address_space *mapping,
+				pgoff_t index, void *data)
+{
+	return read_cache_folio(mapping, index, NULL, data);
+}
+
 /*
  * Get index of the page with in radix-tree
  * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
diff --git a/mm/filemap.c b/mm/filemap.c
index a5925450ee13..0131208e45f7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3174,32 +3174,20 @@  EXPORT_SYMBOL(filemap_page_mkwrite);
 EXPORT_SYMBOL(generic_file_mmap);
 EXPORT_SYMBOL(generic_file_readonly_mmap);
 
-static struct page *wait_on_page_read(struct page *page)
-{
-	if (!IS_ERR(page)) {
-		wait_on_page_locked(page);
-		if (!PageUptodate(page)) {
-			put_page(page);
-			page = ERR_PTR(-EIO);
-		}
-	}
-	return page;
-}
-
-static struct page *do_read_cache_page(struct address_space *mapping,
+static struct folio *do_read_cache_folio(struct address_space *mapping,
 		pgoff_t index, filler_t filler, void *data, gfp_t gfp)
 {
-	struct page *page;
+	struct folio *folio;
 	int err;
 repeat:
-	page = find_get_page(mapping, index);
-	if (!page) {
-		page = &__page_cache_alloc(gfp, 0)->page;
-		if (!page)
+	folio = find_get_folio(mapping, index);
+	if (!folio) {
+		folio = __page_cache_alloc(gfp, 0);
+		if (!folio)
 			return ERR_PTR(-ENOMEM);
-		err = add_to_page_cache_lru(page, mapping, index, gfp);
+		err = folio_add_to_page_cache(folio, mapping, index, gfp);
 		if (unlikely(err)) {
-			put_page(page);
+			put_folio(folio);
 			if (err == -EEXIST)
 				goto repeat;
 			/* Presumably ENOMEM for xarray node */
@@ -3208,21 +3196,24 @@  static struct page *do_read_cache_page(struct address_space *mapping,
 
 filler:
 		if (filler)
-			err = filler(data, page_folio(page));
+			err = filler(data, folio);
 		else
-			err = mapping->a_ops->readpage(data, page_folio(page));
+			err = mapping->a_ops->readpage(data, folio);
 
 		if (err < 0) {
-			put_page(page);
+			put_folio(folio);
 			return ERR_PTR(err);
 		}
 
-		page = wait_on_page_read(page);
-		if (IS_ERR(page))
-			return page;
+		wait_on_folio_locked(folio);
+		if (!FolioUptodate(folio)) {
+			put_folio(folio);
+			return ERR_PTR(-EIO);
+		}
+
 		goto out;
 	}
-	if (PageUptodate(page))
+	if (FolioUptodate(folio))
 		goto out;
 
 	/*
@@ -3256,23 +3247,23 @@  static struct page *do_read_cache_page(struct address_space *mapping,
 	 * avoid spurious serialisations and wakeups when multiple processes
 	 * wait on the same page for IO to complete.
 	 */
-	wait_on_page_locked(page);
-	if (PageUptodate(page))
+	wait_on_folio_locked(folio);
+	if (FolioUptodate(folio))
 		goto out;
 
 	/* Distinguish between all the cases under the safety of the lock */
-	lock_page(page);
+	lock_folio(folio);
 
 	/* Case c or d, restart the operation */
-	if (!page->mapping) {
-		unlock_page(page);
-		put_page(page);
+	if (!folio->page.mapping) {
+		unlock_folio(folio);
+		put_folio(folio);
 		goto repeat;
 	}
 
 	/* Someone else locked and filled the page in a very small window */
-	if (PageUptodate(page)) {
-		unlock_page(page);
+	if (FolioUptodate(folio)) {
+		unlock_folio(folio);
 		goto out;
 	}
 
@@ -3282,16 +3273,16 @@  static struct page *do_read_cache_page(struct address_space *mapping,
 	 * Clear page error before actual read, PG_error will be
 	 * set again if read page fails.
 	 */
-	ClearPageError(page);
+	ClearFolioError(folio);
 	goto filler;
 
 out:
-	mark_page_accessed(page);
-	return page;
+	mark_folio_accessed(folio);
+	return folio;
 }
 
 /**
- * read_cache_page - read into page cache, fill it if needed
+ * read_cache_folio - read into page cache, fill it if needed
  * @mapping:	the page's address_space
  * @index:	the page index
  * @filler:	function to perform the read
@@ -3304,13 +3295,13 @@  static struct page *do_read_cache_page(struct address_space *mapping,
  *
  * Return: up to date page on success, ERR_PTR() on failure.
  */
-struct page *read_cache_page(struct address_space *mapping, pgoff_t index,
+struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
 		filler_t filler, void *data)
 {
-	return do_read_cache_page(mapping, index, filler, data,
+	return do_read_cache_folio(mapping, index, filler, data,
 			mapping_gfp_mask(mapping));
 }
-EXPORT_SYMBOL(read_cache_page);
+EXPORT_SYMBOL(read_cache_folio);
 
 /**
  * read_cache_page_gfp - read into page cache, using specified page allocation flags.
@@ -3329,7 +3320,11 @@  struct page *read_cache_page_gfp(struct address_space *mapping,
 				pgoff_t index,
 				gfp_t gfp)
 {
-	return do_read_cache_page(mapping, index, NULL, NULL, gfp);
+	struct folio *folio = do_read_cache_folio(mapping, index, NULL, NULL,
+									gfp);
+	if (IS_ERR(folio))
+		return &folio->page;
+	return folio_page(folio, index);
 }
 EXPORT_SYMBOL(read_cache_page_gfp);