diff mbox series

[7/8] mm/memory-failure: Convert hwpoison_user_mappings to take a folio

Message ID 20240229212036.2160900-8-willy@infradead.org (mailing list archive)
State New
Headers show
Series Some cleanups for memory-failure | expand

Commit Message

Matthew Wilcox Feb. 29, 2024, 9:20 p.m. UTC
Pass the folio from the callers, and use it throughout instead of hpage.
Saves dozens of calls to compound_head().
---
 mm/memory-failure.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

Comments

Miaohe Lin March 11, 2024, 11:44 a.m. UTC | #1
On 2024/3/1 5:20, Matthew Wilcox (Oracle) wrote:
> Pass the folio from the callers, and use it throughout instead of hpage.
> Saves dozens of calls to compound_head().
> ---
>  mm/memory-failure.c | 30 +++++++++++++++---------------
>  1 file changed, 15 insertions(+), 15 deletions(-)
> 
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 74e87a0a792c..56bc83372e30 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1559,24 +1559,24 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
>   * Do all that is necessary to remove user space mappings. Unmap
>   * the pages and send SIGBUS to the processes if the data was dirty.
>   */
> -static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
> -				  int flags, struct page *hpage)
> +static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
> +		unsigned long pfn, int flags)

hwpoison_user_mappings() is called with folio refcnt held, so I think it should be safe to use folio directly.

>  {
> -	struct folio *folio = page_folio(hpage);
>  	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
>  	struct address_space *mapping;
>  	LIST_HEAD(tokill);
>  	bool unmap_success;
>  	int forcekill;
> -	bool mlocked = PageMlocked(hpage);
> +	bool mlocked = folio_test_mlocked(folio);
>  
>  	/*
>  	 * Here we are interested only in user-mapped pages, so skip any
>  	 * other types of pages.
>  	 */
> -	if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
> +	if (folio_test_reserved(folio) || folio_test_slab(folio) ||
> +	    folio_test_pgtable(folio) || folio_test_offline(folio))
>  		return true;
> -	if (!(PageLRU(hpage) || PageHuge(p)))
> +	if (!(folio_test_lru(folio) || folio_test_hugetlb(folio)))
>  		return true;
>  
>  	/*
> @@ -1586,7 +1586,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>  	if (!page_mapped(p))
>  		return true;
>  
> -	if (PageSwapCache(p)) {
> +	if (folio_test_swapcache(folio)) {
>  		pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
>  		ttu &= ~TTU_HWPOISON;
>  	}
> @@ -1597,11 +1597,11 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>  	 * XXX: the dirty test could be racy: set_page_dirty() may not always
>  	 * be called inside page lock (it's recommended but not enforced).
>  	 */
> -	mapping = page_mapping(hpage);
> -	if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
> +	mapping = folio_mapping(folio);
> +	if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping &&
>  	    mapping_can_writeback(mapping)) {
> -		if (page_mkclean(hpage)) {
> -			SetPageDirty(hpage);
> +		if (folio_mkclean(folio)) {
> +			folio_set_dirty(folio);
>  		} else {
>  			ttu &= ~TTU_HWPOISON;
>  			pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
> @@ -1616,7 +1616,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>  	 */
>  	collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
>  
> -	if (PageHuge(hpage) && !PageAnon(hpage)) {
> +	if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
>  		/*
>  		 * For hugetlb pages in shared mappings, try_to_unmap
>  		 * could potentially call huge_pmd_unshare.  Because of
> @@ -1656,7 +1656,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>  	 * use a more force-full uncatchable kill to prevent
>  	 * any accesses to the poisoned memory.
>  	 */
> -	forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
> +	forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) ||
>  		    !unmap_success;
>  	kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
>  
> @@ -2100,7 +2100,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
>  
>  	page_flags = folio->flags;
>  
> -	if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
> +	if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
>  		folio_unlock(folio);
>  		return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
>  	}
> @@ -2367,7 +2367,7 @@ int memory_failure(unsigned long pfn, int flags)
>  	 * Now take care of user space mappings.
>  	 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
>  	 */
> -	if (!hwpoison_user_mappings(p, pfn, flags, p)) {
> +	if (!hwpoison_user_mappings(folio, p, pfn, flags)) {

folio should always be quivalent to p in normal 4k page case.

>  		res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
>  		goto unlock_page;
>  	}
> 

This patch looks good to me. Thanks.

Acked-by: Miaohe Lin <linmiaohe@huawei.com>
diff mbox series

Patch

diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 74e87a0a792c..56bc83372e30 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1559,24 +1559,24 @@  static int get_hwpoison_page(struct page *p, unsigned long flags)
  * Do all that is necessary to remove user space mappings. Unmap
  * the pages and send SIGBUS to the processes if the data was dirty.
  */
-static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
-				  int flags, struct page *hpage)
+static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
+		unsigned long pfn, int flags)
 {
-	struct folio *folio = page_folio(hpage);
 	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
 	struct address_space *mapping;
 	LIST_HEAD(tokill);
 	bool unmap_success;
 	int forcekill;
-	bool mlocked = PageMlocked(hpage);
+	bool mlocked = folio_test_mlocked(folio);
 
 	/*
 	 * Here we are interested only in user-mapped pages, so skip any
 	 * other types of pages.
 	 */
-	if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
+	if (folio_test_reserved(folio) || folio_test_slab(folio) ||
+	    folio_test_pgtable(folio) || folio_test_offline(folio))
 		return true;
-	if (!(PageLRU(hpage) || PageHuge(p)))
+	if (!(folio_test_lru(folio) || folio_test_hugetlb(folio)))
 		return true;
 
 	/*
@@ -1586,7 +1586,7 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 	if (!page_mapped(p))
 		return true;
 
-	if (PageSwapCache(p)) {
+	if (folio_test_swapcache(folio)) {
 		pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
 		ttu &= ~TTU_HWPOISON;
 	}
@@ -1597,11 +1597,11 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 	 * XXX: the dirty test could be racy: set_page_dirty() may not always
 	 * be called inside page lock (it's recommended but not enforced).
 	 */
-	mapping = page_mapping(hpage);
-	if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
+	mapping = folio_mapping(folio);
+	if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping &&
 	    mapping_can_writeback(mapping)) {
-		if (page_mkclean(hpage)) {
-			SetPageDirty(hpage);
+		if (folio_mkclean(folio)) {
+			folio_set_dirty(folio);
 		} else {
 			ttu &= ~TTU_HWPOISON;
 			pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
@@ -1616,7 +1616,7 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 	 */
 	collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
 
-	if (PageHuge(hpage) && !PageAnon(hpage)) {
+	if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
 		/*
 		 * For hugetlb pages in shared mappings, try_to_unmap
 		 * could potentially call huge_pmd_unshare.  Because of
@@ -1656,7 +1656,7 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 	 * use a more force-full uncatchable kill to prevent
 	 * any accesses to the poisoned memory.
 	 */
-	forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
+	forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) ||
 		    !unmap_success;
 	kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
 
@@ -2100,7 +2100,7 @@  static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
 
 	page_flags = folio->flags;
 
-	if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
+	if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
 		folio_unlock(folio);
 		return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
 	}
@@ -2367,7 +2367,7 @@  int memory_failure(unsigned long pfn, int flags)
 	 * Now take care of user space mappings.
 	 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
 	 */
-	if (!hwpoison_user_mappings(p, pfn, flags, p)) {
+	if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
 		res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
 		goto unlock_page;
 	}