diff mbox series

[v13,2/7] mm: factor helpers for memory_failure_dev_pagemap

Message ID 20220419045045.1664996-3-ruansy.fnst@fujitsu.com (mailing list archive)
State New, archived
Headers show
Series fsdax: introduce fs query to support reflink | expand

Commit Message

Shiyang Ruan April 19, 2022, 4:50 a.m. UTC
memory_failure_dev_pagemap code is a bit complex before introduce RMAP
feature for fsdax.  So it is needed to factor some helper functions to
simplify these code.

Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
---
 mm/memory-failure.c | 157 ++++++++++++++++++++++++--------------------
 1 file changed, 87 insertions(+), 70 deletions(-)

Comments

HORIGUCHI NAOYA(堀口 直也) April 21, 2022, 6:13 a.m. UTC | #1
On Tue, Apr 19, 2022 at 12:50:40PM +0800, Shiyang Ruan wrote:
> memory_failure_dev_pagemap code is a bit complex before introduce RMAP
> feature for fsdax.  So it is needed to factor some helper functions to
> simplify these code.
> 
> Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> Reviewed-by: Dan Williams <dan.j.williams@intel.com>

Thanks for the refactoring.  As I commented to 0/7, the conflict with
"mm/hwpoison: fix race between hugetlb free/demotion and memory_failure_hugetlb()"
can be trivially resolved.

Another few comment below ...

> ---
>  mm/memory-failure.c | 157 ++++++++++++++++++++++++--------------------
>  1 file changed, 87 insertions(+), 70 deletions(-)
> 
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index e3fbff5bd467..7c8c047bfdc8 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1498,6 +1498,90 @@ static int try_to_split_thp_page(struct page *page, const char *msg)
>  	return 0;
>  }
> 
> +static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
> +		struct address_space *mapping, pgoff_t index, int flags)
> +{
> +	struct to_kill *tk;
> +	unsigned long size = 0;
> +
> +	list_for_each_entry(tk, to_kill, nd)
> +		if (tk->size_shift)
> +			size = max(size, 1UL << tk->size_shift);
> +
> +	if (size) {
> +		/*
> +		 * Unmap the largest mapping to avoid breaking up device-dax
> +		 * mappings which are constant size. The actual size of the
> +		 * mapping being torn down is communicated in siginfo, see
> +		 * kill_proc()
> +		 */
> +		loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
> +
> +		unmap_mapping_range(mapping, start, size, 0);
> +	}
> +
> +	kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
> +}
> +
> +static int mf_generic_kill_procs(unsigned long long pfn, int flags,
> +		struct dev_pagemap *pgmap)
> +{
> +	struct page *page = pfn_to_page(pfn);
> +	LIST_HEAD(to_kill);
> +	dax_entry_t cookie;
> +	int rc = 0;
> +
> +	/*
> +	 * Pages instantiated by device-dax (not filesystem-dax)
> +	 * may be compound pages.
> +	 */
> +	page = compound_head(page);
> +
> +	/*
> +	 * Prevent the inode from being freed while we are interrogating
> +	 * the address_space, typically this would be handled by
> +	 * lock_page(), but dax pages do not use the page lock. This
> +	 * also prevents changes to the mapping of this pfn until
> +	 * poison signaling is complete.
> +	 */
> +	cookie = dax_lock_page(page);
> +	if (!cookie)
> +		return -EBUSY;
> +
> +	if (hwpoison_filter(page)) {
> +		rc = -EOPNOTSUPP;
> +		goto unlock;
> +	}
> +
> +	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
> +		/*
> +		 * TODO: Handle HMM pages which may need coordination
> +		 * with device-side memory.
> +		 */
> +		return -EBUSY;

Don't we need to go to dax_unlock_page() as the origincal code do?

> +	}
> +
> +	/*
> +	 * Use this flag as an indication that the dax page has been
> +	 * remapped UC to prevent speculative consumption of poison.
> +	 */
> +	SetPageHWPoison(page);
> +
> +	/*
> +	 * Unlike System-RAM there is no possibility to swap in a
> +	 * different physical page at a given virtual address, so all
> +	 * userspace consumption of ZONE_DEVICE memory necessitates
> +	 * SIGBUS (i.e. MF_MUST_KILL)
> +	 */
> +	flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
> +	collect_procs(page, &to_kill, true);
> +
> +	unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
> +unlock:
> +	dax_unlock_page(page, cookie);
> +	return rc;
> +}
> +
>  /*
>   * Called from hugetlb code with hugetlb_lock held.
>   *
> @@ -1644,12 +1728,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
>  		struct dev_pagemap *pgmap)
>  {
>  	struct page *page = pfn_to_page(pfn);
> -	unsigned long size = 0;
> -	struct to_kill *tk;
>  	LIST_HEAD(tokill);

Is this variable unused in this function?

Thanks,
Naoya Horiguchi
Shiyang Ruan April 21, 2022, 8:10 a.m. UTC | #2
在 2022/4/21 14:13, HORIGUCHI NAOYA(堀口 直也) 写道:
> On Tue, Apr 19, 2022 at 12:50:40PM +0800, Shiyang Ruan wrote:
>> memory_failure_dev_pagemap code is a bit complex before introduce RMAP
>> feature for fsdax.  So it is needed to factor some helper functions to
>> simplify these code.
>>
>> Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
>> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
>> Reviewed-by: Christoph Hellwig <hch@lst.de>
>> Reviewed-by: Dan Williams <dan.j.williams@intel.com>
> 
> Thanks for the refactoring.  As I commented to 0/7, the conflict with
> "mm/hwpoison: fix race between hugetlb free/demotion and memory_failure_hugetlb()"
> can be trivially resolved.
> 
> Another few comment below ...
> 
>> ---
>>   mm/memory-failure.c | 157 ++++++++++++++++++++++++--------------------
>>   1 file changed, 87 insertions(+), 70 deletions(-)
>>
>> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
>> index e3fbff5bd467..7c8c047bfdc8 100644
>> --- a/mm/memory-failure.c
>> +++ b/mm/memory-failure.c
>> @@ -1498,6 +1498,90 @@ static int try_to_split_thp_page(struct page *page, const char *msg)
>>   	return 0;
>>   }
>>
>> +static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
>> +		struct address_space *mapping, pgoff_t index, int flags)
>> +{
>> +	struct to_kill *tk;
>> +	unsigned long size = 0;
>> +
>> +	list_for_each_entry(tk, to_kill, nd)
>> +		if (tk->size_shift)
>> +			size = max(size, 1UL << tk->size_shift);
>> +
>> +	if (size) {
>> +		/*
>> +		 * Unmap the largest mapping to avoid breaking up device-dax
>> +		 * mappings which are constant size. The actual size of the
>> +		 * mapping being torn down is communicated in siginfo, see
>> +		 * kill_proc()
>> +		 */
>> +		loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
>> +
>> +		unmap_mapping_range(mapping, start, size, 0);
>> +	}
>> +
>> +	kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
>> +}
>> +
>> +static int mf_generic_kill_procs(unsigned long long pfn, int flags,
>> +		struct dev_pagemap *pgmap)
>> +{
>> +	struct page *page = pfn_to_page(pfn);
>> +	LIST_HEAD(to_kill);
>> +	dax_entry_t cookie;
>> +	int rc = 0;
>> +
>> +	/*
>> +	 * Pages instantiated by device-dax (not filesystem-dax)
>> +	 * may be compound pages.
>> +	 */
>> +	page = compound_head(page);
>> +
>> +	/*
>> +	 * Prevent the inode from being freed while we are interrogating
>> +	 * the address_space, typically this would be handled by
>> +	 * lock_page(), but dax pages do not use the page lock. This
>> +	 * also prevents changes to the mapping of this pfn until
>> +	 * poison signaling is complete.
>> +	 */
>> +	cookie = dax_lock_page(page);
>> +	if (!cookie)
>> +		return -EBUSY;
>> +
>> +	if (hwpoison_filter(page)) {
>> +		rc = -EOPNOTSUPP;
>> +		goto unlock;
>> +	}
>> +
>> +	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
>> +		/*
>> +		 * TODO: Handle HMM pages which may need coordination
>> +		 * with device-side memory.
>> +		 */
>> +		return -EBUSY;
> 
> Don't we need to go to dax_unlock_page() as the origincal code do?
> 
>> +	}
>> +
>> +	/*
>> +	 * Use this flag as an indication that the dax page has been
>> +	 * remapped UC to prevent speculative consumption of poison.
>> +	 */
>> +	SetPageHWPoison(page);
>> +
>> +	/*
>> +	 * Unlike System-RAM there is no possibility to swap in a
>> +	 * different physical page at a given virtual address, so all
>> +	 * userspace consumption of ZONE_DEVICE memory necessitates
>> +	 * SIGBUS (i.e. MF_MUST_KILL)
>> +	 */
>> +	flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
>> +	collect_procs(page, &to_kill, true);
>> +
>> +	unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
>> +unlock:
>> +	dax_unlock_page(page, cookie);
>> +	return rc;
>> +}
>> +
>>   /*
>>    * Called from hugetlb code with hugetlb_lock held.
>>    *
>> @@ -1644,12 +1728,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
>>   		struct dev_pagemap *pgmap)
>>   {
>>   	struct page *page = pfn_to_page(pfn);
>> -	unsigned long size = 0;
>> -	struct to_kill *tk;
>>   	LIST_HEAD(tokill);
> 
> Is this variable unused in this function?

Yes, this one and the one above are mistakes I didn't notice when I 
resolving conflicts with the newer next- branch.  I'll fix them in next 
version.


--
Thanks,
Ruan.

> 
> Thanks,
> Naoya Horiguchi
Miaohe Lin April 21, 2022, 8:12 a.m. UTC | #3
On 2022/4/21 14:13, HORIGUCHI NAOYA(堀口 直也) wrote:
> On Tue, Apr 19, 2022 at 12:50:40PM +0800, Shiyang Ruan wrote:
>> memory_failure_dev_pagemap code is a bit complex before introduce RMAP
>> feature for fsdax.  So it is needed to factor some helper functions to
>> simplify these code.
>>
>> Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
>> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
>> Reviewed-by: Christoph Hellwig <hch@lst.de>
>> Reviewed-by: Dan Williams <dan.j.williams@intel.com>
> 
> Thanks for the refactoring.  As I commented to 0/7, the conflict with
> "mm/hwpoison: fix race between hugetlb free/demotion and memory_failure_hugetlb()"
> can be trivially resolved.
> 
> Another few comment below ...
> 
>> ---
>>  mm/memory-failure.c | 157 ++++++++++++++++++++++++--------------------
>>  1 file changed, 87 insertions(+), 70 deletions(-)
>>
>> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
>> index e3fbff5bd467..7c8c047bfdc8 100644
>> --- a/mm/memory-failure.c
>> +++ b/mm/memory-failure.c
>> @@ -1498,6 +1498,90 @@ static int try_to_split_thp_page(struct page *page, const char *msg)
>>  	return 0;
>>  }
>>
>> +static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
>> +		struct address_space *mapping, pgoff_t index, int flags)
>> +{
>> +	struct to_kill *tk;
>> +	unsigned long size = 0;
>> +
>> +	list_for_each_entry(tk, to_kill, nd)
>> +		if (tk->size_shift)
>> +			size = max(size, 1UL << tk->size_shift);
>> +
>> +	if (size) {
>> +		/*
>> +		 * Unmap the largest mapping to avoid breaking up device-dax
>> +		 * mappings which are constant size. The actual size of the
>> +		 * mapping being torn down is communicated in siginfo, see
>> +		 * kill_proc()
>> +		 */
>> +		loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
>> +
>> +		unmap_mapping_range(mapping, start, size, 0);
>> +	}
>> +
>> +	kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
>> +}
>> +
>> +static int mf_generic_kill_procs(unsigned long long pfn, int flags,
>> +		struct dev_pagemap *pgmap)
>> +{
>> +	struct page *page = pfn_to_page(pfn);
>> +	LIST_HEAD(to_kill);
>> +	dax_entry_t cookie;
>> +	int rc = 0;
>> +
>> +	/*
>> +	 * Pages instantiated by device-dax (not filesystem-dax)
>> +	 * may be compound pages.
>> +	 */
>> +	page = compound_head(page);
>> +
>> +	/*
>> +	 * Prevent the inode from being freed while we are interrogating
>> +	 * the address_space, typically this would be handled by
>> +	 * lock_page(), but dax pages do not use the page lock. This
>> +	 * also prevents changes to the mapping of this pfn until
>> +	 * poison signaling is complete.
>> +	 */
>> +	cookie = dax_lock_page(page);
>> +	if (!cookie)
>> +		return -EBUSY;
>> +
>> +	if (hwpoison_filter(page)) {
>> +		rc = -EOPNOTSUPP;
>> +		goto unlock;
>> +	}
>> +
>> +	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
>> +		/*
>> +		 * TODO: Handle HMM pages which may need coordination
>> +		 * with device-side memory.
>> +		 */
>> +		return -EBUSY;
> 
> Don't we need to go to dax_unlock_page() as the origincal code do?

I think dax_unlock_page is needed too and please remember set rc to -EBUSY before out.

> 
>> +	}
>> +
>> +	/*
>> +	 * Use this flag as an indication that the dax page has been
>> +	 * remapped UC to prevent speculative consumption of poison.
>> +	 */
>> +	SetPageHWPoison(page);
>> +
>> +	/*
>> +	 * Unlike System-RAM there is no possibility to swap in a
>> +	 * different physical page at a given virtual address, so all
>> +	 * userspace consumption of ZONE_DEVICE memory necessitates
>> +	 * SIGBUS (i.e. MF_MUST_KILL)
>> +	 */
>> +	flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
>> +	collect_procs(page, &to_kill, true);
>> +
>> +	unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
>> +unlock:
>> +	dax_unlock_page(page, cookie);
>> +	return rc;
>> +}
>> +
>>  /*
>>   * Called from hugetlb code with hugetlb_lock held.
>>   *
>> @@ -1644,12 +1728,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
>>  		struct dev_pagemap *pgmap)
>>  {
>>  	struct page *page = pfn_to_page(pfn);
>> -	unsigned long size = 0;
>> -	struct to_kill *tk;
>>  	LIST_HEAD(tokill);
> 
> Is this variable unused in this function?

There has a to_kill in mf_generic_kill_procs. So this one is unneeded. We should remove it.

> 
> Thanks,
> Naoya Horiguchi
> 

Except for the above nit, the patch looks good to me. Thanks!

Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
diff mbox series

Patch

diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index e3fbff5bd467..7c8c047bfdc8 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1498,6 +1498,90 @@  static int try_to_split_thp_page(struct page *page, const char *msg)
 	return 0;
 }
 
+static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
+		struct address_space *mapping, pgoff_t index, int flags)
+{
+	struct to_kill *tk;
+	unsigned long size = 0;
+
+	list_for_each_entry(tk, to_kill, nd)
+		if (tk->size_shift)
+			size = max(size, 1UL << tk->size_shift);
+
+	if (size) {
+		/*
+		 * Unmap the largest mapping to avoid breaking up device-dax
+		 * mappings which are constant size. The actual size of the
+		 * mapping being torn down is communicated in siginfo, see
+		 * kill_proc()
+		 */
+		loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
+
+		unmap_mapping_range(mapping, start, size, 0);
+	}
+
+	kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
+}
+
+static int mf_generic_kill_procs(unsigned long long pfn, int flags,
+		struct dev_pagemap *pgmap)
+{
+	struct page *page = pfn_to_page(pfn);
+	LIST_HEAD(to_kill);
+	dax_entry_t cookie;
+	int rc = 0;
+
+	/*
+	 * Pages instantiated by device-dax (not filesystem-dax)
+	 * may be compound pages.
+	 */
+	page = compound_head(page);
+
+	/*
+	 * Prevent the inode from being freed while we are interrogating
+	 * the address_space, typically this would be handled by
+	 * lock_page(), but dax pages do not use the page lock. This
+	 * also prevents changes to the mapping of this pfn until
+	 * poison signaling is complete.
+	 */
+	cookie = dax_lock_page(page);
+	if (!cookie)
+		return -EBUSY;
+
+	if (hwpoison_filter(page)) {
+		rc = -EOPNOTSUPP;
+		goto unlock;
+	}
+
+	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+		/*
+		 * TODO: Handle HMM pages which may need coordination
+		 * with device-side memory.
+		 */
+		return -EBUSY;
+	}
+
+	/*
+	 * Use this flag as an indication that the dax page has been
+	 * remapped UC to prevent speculative consumption of poison.
+	 */
+	SetPageHWPoison(page);
+
+	/*
+	 * Unlike System-RAM there is no possibility to swap in a
+	 * different physical page at a given virtual address, so all
+	 * userspace consumption of ZONE_DEVICE memory necessitates
+	 * SIGBUS (i.e. MF_MUST_KILL)
+	 */
+	flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
+	collect_procs(page, &to_kill, true);
+
+	unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
+unlock:
+	dax_unlock_page(page, cookie);
+	return rc;
+}
+
 /*
  * Called from hugetlb code with hugetlb_lock held.
  *
@@ -1644,12 +1728,8 @@  static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
 		struct dev_pagemap *pgmap)
 {
 	struct page *page = pfn_to_page(pfn);
-	unsigned long size = 0;
-	struct to_kill *tk;
 	LIST_HEAD(tokill);
-	int rc = -EBUSY;
-	loff_t start;
-	dax_entry_t cookie;
+	int rc = -ENXIO;
 
 	if (flags & MF_COUNT_INCREASED)
 		/*
@@ -1658,73 +1738,10 @@  static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
 		put_page(page);
 
 	/* device metadata space is not recoverable */
-	if (!pgmap_pfn_valid(pgmap, pfn)) {
-		rc = -ENXIO;
-		goto out;
-	}
-
-	/*
-	 * Pages instantiated by device-dax (not filesystem-dax)
-	 * may be compound pages.
-	 */
-	page = compound_head(page);
-
-	/*
-	 * Prevent the inode from being freed while we are interrogating
-	 * the address_space, typically this would be handled by
-	 * lock_page(), but dax pages do not use the page lock. This
-	 * also prevents changes to the mapping of this pfn until
-	 * poison signaling is complete.
-	 */
-	cookie = dax_lock_page(page);
-	if (!cookie)
+	if (!pgmap_pfn_valid(pgmap, pfn))
 		goto out;
 
-	if (hwpoison_filter(page)) {
-		rc = -EOPNOTSUPP;
-		goto unlock;
-	}
-
-	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
-		/*
-		 * TODO: Handle HMM pages which may need coordination
-		 * with device-side memory.
-		 */
-		goto unlock;
-	}
-
-	/*
-	 * Use this flag as an indication that the dax page has been
-	 * remapped UC to prevent speculative consumption of poison.
-	 */
-	SetPageHWPoison(page);
-
-	/*
-	 * Unlike System-RAM there is no possibility to swap in a
-	 * different physical page at a given virtual address, so all
-	 * userspace consumption of ZONE_DEVICE memory necessitates
-	 * SIGBUS (i.e. MF_MUST_KILL)
-	 */
-	flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
-	collect_procs(page, &tokill, true);
-
-	list_for_each_entry(tk, &tokill, nd)
-		if (tk->size_shift)
-			size = max(size, 1UL << tk->size_shift);
-	if (size) {
-		/*
-		 * Unmap the largest mapping to avoid breaking up
-		 * device-dax mappings which are constant size. The
-		 * actual size of the mapping being torn down is
-		 * communicated in siginfo, see kill_proc()
-		 */
-		start = (page->index << PAGE_SHIFT) & ~(size - 1);
-		unmap_mapping_range(page->mapping, start, size, 0);
-	}
-	kill_procs(&tokill, true, false, pfn, flags);
-	rc = 0;
-unlock:
-	dax_unlock_page(page, cookie);
+	rc = mf_generic_kill_procs(pfn, flags, pgmap);
 out:
 	/* drop pgmap ref acquired in caller */
 	put_dev_pagemap(pgmap);