diff mbox series

[4/6] memory-failure: Use a folio in me_huge_page()

Message ID 20231117161447.2461643-5-willy@infradead.org (mailing list archive)
State New
Headers show
Series Convert aops->error_remove_page to ->error_remove_folio | expand

Commit Message

Matthew Wilcox Nov. 17, 2023, 4:14 p.m. UTC
This function was already explicitly calling compound_head();
unfortunately the compiler can't know that and elide the redundant
calls to compound_head() buried in page_mapping(), unlock_page(), etc.
Switch to using a folio, which does let us elide these calls.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/memory-failure.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index e73f2047ffcb..d97d247c0224 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1182,25 +1182,25 @@  static int me_swapcache_clean(struct page_state *ps, struct page *p)
  */
 static int me_huge_page(struct page_state *ps, struct page *p)
 {
+	struct folio *folio = page_folio(p);
 	int res;
-	struct page *hpage = compound_head(p);
 	struct address_space *mapping;
 	bool extra_pins = false;
 
-	mapping = page_mapping(hpage);
+	mapping = folio_mapping(folio);
 	if (mapping) {
-		res = truncate_error_page(hpage, page_to_pfn(p), mapping);
+		res = truncate_error_page(&folio->page, page_to_pfn(p), mapping);
 		/* The page is kept in page cache. */
 		extra_pins = true;
-		unlock_page(hpage);
+		folio_unlock(folio);
 	} else {
-		unlock_page(hpage);
+		folio_unlock(folio);
 		/*
 		 * migration entry prevents later access on error hugepage,
 		 * so we can free and dissolve it into buddy to save healthy
 		 * subpages.
 		 */
-		put_page(hpage);
+		folio_put(folio);
 		if (__page_handle_poison(p) >= 0) {
 			page_ref_inc(p);
 			res = MF_RECOVERED;