diff mbox series

[v2,05/14] mm: Introduce mf_dax_kill_procs() for fsdax case

Message ID 20220603053738.1218681-6-ruansy.fnst@fujitsu.com (mailing list archive)
State New, archived
Headers show
Series [v2,01/14] dax: Introduce holder for dax_device | expand

Commit Message

Shiyang Ruan June 3, 2022, 5:37 a.m. UTC
This new function is a variant of mf_generic_kill_procs that accepts a
file, offset pair instead of a struct to support multiple files sharing
a DAX mapping.  It is intended to be called by the file systems as part
of the memory_failure handler after the file system performed a reverse
mapping from the storage address to the file and file offset.

Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
---
 include/linux/mm.h  |  2 +
 mm/memory-failure.c | 96 ++++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 88 insertions(+), 10 deletions(-)

Comments

Dan Williams Aug. 24, 2022, 9:52 p.m. UTC | #1
Shiyang Ruan wrote:
> This new function is a variant of mf_generic_kill_procs that accepts a
> file, offset pair instead of a struct to support multiple files sharing
> a DAX mapping.  It is intended to be called by the file systems as part
> of the memory_failure handler after the file system performed a reverse
> mapping from the storage address to the file and file offset.
> 
> Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
> Reviewed-by: Dan Williams <dan.j.williams@intel.com>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
> Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
> ---
>  include/linux/mm.h  |  2 +
>  mm/memory-failure.c | 96 ++++++++++++++++++++++++++++++++++++++++-----
>  2 files changed, 88 insertions(+), 10 deletions(-)

Unfortunately my test suite was only running the "non-destructive" set
of 'ndctl' tests which skipped some of the complex memory-failure cases.
Upon fixing that, bisect flags this commit as the source of the following
crash regression:

 kernel BUG at mm/memory-failure.c:310!
 invalid opcode: 0000 [#1] PREEMPT SMP PTI
 CPU: 26 PID: 1252 Comm: dax-pmd Tainted: G           OE     5.19.0-rc4+ #58
 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
 RIP: 0010:add_to_kill+0x304/0x400
[..]
 Call Trace:
  <TASK>
  collect_procs.part.0+0x2c8/0x470
  memory_failure+0x979/0xf30
  do_madvise.part.0.cold+0x9c/0xd3
  ? lock_is_held_type+0xe3/0x140
  ? find_held_lock+0x2b/0x80
  ? lock_release+0x145/0x2f0
  ? lock_is_held_type+0xe3/0x140
  ? syscall_enter_from_user_mode+0x20/0x70
  __x64_sys_madvise+0x56/0x70
  do_syscall_64+0x3a/0x80
  entry_SYSCALL_64_after_hwframe+0x46/0xb0

This is from running:

  meson test -C build dax-ext4.sh

...from the ndctl repo.

I will take look, and posting it here in case I do not find it tonight
and Ruan can take a look.
HORIGUCHI NAOYA(堀口 直也) Aug. 24, 2022, 11:42 p.m. UTC | #2
On Wed, Aug 24, 2022 at 02:52:51PM -0700, Dan Williams wrote:
> Shiyang Ruan wrote:
> > This new function is a variant of mf_generic_kill_procs that accepts a
> > file, offset pair instead of a struct to support multiple files sharing
> > a DAX mapping.  It is intended to be called by the file systems as part
> > of the memory_failure handler after the file system performed a reverse
> > mapping from the storage address to the file and file offset.
> > 
> > Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
> > Reviewed-by: Dan Williams <dan.j.williams@intel.com>
> > Reviewed-by: Christoph Hellwig <hch@lst.de>
> > Reviewed-by: Darrick J. Wong <djwong@kernel.org>
> > Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
> > ---
> >  include/linux/mm.h  |  2 +
> >  mm/memory-failure.c | 96 ++++++++++++++++++++++++++++++++++++++++-----
> >  2 files changed, 88 insertions(+), 10 deletions(-)
> 
> Unfortunately my test suite was only running the "non-destructive" set
> of 'ndctl' tests which skipped some of the complex memory-failure cases.
> Upon fixing that, bisect flags this commit as the source of the following
> crash regression:

Thank you for testing/reporting.

> 
>  kernel BUG at mm/memory-failure.c:310!
>  invalid opcode: 0000 [#1] PREEMPT SMP PTI
>  CPU: 26 PID: 1252 Comm: dax-pmd Tainted: G           OE     5.19.0-rc4+ #58
>  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
>  RIP: 0010:add_to_kill+0x304/0x400
> [..]
>  Call Trace:
>   <TASK>
>   collect_procs.part.0+0x2c8/0x470
>   memory_failure+0x979/0xf30
>   do_madvise.part.0.cold+0x9c/0xd3
>   ? lock_is_held_type+0xe3/0x140
>   ? find_held_lock+0x2b/0x80
>   ? lock_release+0x145/0x2f0
>   ? lock_is_held_type+0xe3/0x140
>   ? syscall_enter_from_user_mode+0x20/0x70
>   __x64_sys_madvise+0x56/0x70
>   do_syscall_64+0x3a/0x80
>   entry_SYSCALL_64_after_hwframe+0x46/0xb0

This stacktrace shows that VM_BUG_ON_VMA() in dev_pagemap_mapping_shift()
was triggered.  I think that BUG_ON is too harsh here because address ==
-EFAULT means that there's no mapping for the address.  The subsequent
code considers "tk->size_shift == 0" as "no mapping" cases, so
dev_pagemap_mapping_shift() can return 0 in such a case?

Could the following diff work for the issue?

diff --git a/mm/memory-failure.c b/mm/memory-failure.c
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -316,7 +316,8 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
        pmd_t *pmd;
        pte_t *pte;

-       VM_BUG_ON_VMA(address == -EFAULT, vma);
+       if (address == -EFAULT)
+               return 0;
        pgd = pgd_offset(vma->vm_mm, address);
        if (!pgd_present(*pgd))
                return 0;
@@ -390,7 +391,8 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
        if (tk->addr == -EFAULT) {
                pr_info("Unable to find user space address %lx in %s\n",
                        page_to_pfn(p), tsk->comm);
-       } else if (tk->size_shift == 0) {
+       }
+       if (tk->size_shift == 0) {
                kfree(tk);
                return;
        }

Thanks,
Naoya Horiguchi

> 
> This is from running:
> 
>   meson test -C build dax-ext4.sh
> 
> ...from the ndctl repo.
> 
> I will take look, and posting it here in case I do not find it tonight
> and Ruan can take a look.
Dan Williams Aug. 25, 2022, 4:33 a.m. UTC | #3
HORIGUCHI NAOYA(堀口 直也) wrote:
> On Wed, Aug 24, 2022 at 02:52:51PM -0700, Dan Williams wrote:
> > Shiyang Ruan wrote:
> > > This new function is a variant of mf_generic_kill_procs that accepts a
> > > file, offset pair instead of a struct to support multiple files sharing
> > > a DAX mapping.  It is intended to be called by the file systems as part
> > > of the memory_failure handler after the file system performed a reverse
> > > mapping from the storage address to the file and file offset.
> > > 
> > > Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
> > > Reviewed-by: Dan Williams <dan.j.williams@intel.com>
> > > Reviewed-by: Christoph Hellwig <hch@lst.de>
> > > Reviewed-by: Darrick J. Wong <djwong@kernel.org>
> > > Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
> > > ---
> > >  include/linux/mm.h  |  2 +
> > >  mm/memory-failure.c | 96 ++++++++++++++++++++++++++++++++++++++++-----
> > >  2 files changed, 88 insertions(+), 10 deletions(-)
> > 
> > Unfortunately my test suite was only running the "non-destructive" set
> > of 'ndctl' tests which skipped some of the complex memory-failure cases.
> > Upon fixing that, bisect flags this commit as the source of the following
> > crash regression:
> 
> Thank you for testing/reporting.
> 
> > 
> >  kernel BUG at mm/memory-failure.c:310!
> >  invalid opcode: 0000 [#1] PREEMPT SMP PTI
> >  CPU: 26 PID: 1252 Comm: dax-pmd Tainted: G           OE     5.19.0-rc4+ #58
> >  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
> >  RIP: 0010:add_to_kill+0x304/0x400
> > [..]
> >  Call Trace:
> >   <TASK>
> >   collect_procs.part.0+0x2c8/0x470
> >   memory_failure+0x979/0xf30
> >   do_madvise.part.0.cold+0x9c/0xd3
> >   ? lock_is_held_type+0xe3/0x140
> >   ? find_held_lock+0x2b/0x80
> >   ? lock_release+0x145/0x2f0
> >   ? lock_is_held_type+0xe3/0x140
> >   ? syscall_enter_from_user_mode+0x20/0x70
> >   __x64_sys_madvise+0x56/0x70
> >   do_syscall_64+0x3a/0x80
> >   entry_SYSCALL_64_after_hwframe+0x46/0xb0
> 
> This stacktrace shows that VM_BUG_ON_VMA() in dev_pagemap_mapping_shift()
> was triggered.  I think that BUG_ON is too harsh here because address ==
> -EFAULT means that there's no mapping for the address.  The subsequent
> code considers "tk->size_shift == 0" as "no mapping" cases, so
> dev_pagemap_mapping_shift() can return 0 in such a case?
> 
> Could the following diff work for the issue?

This passes the "dax-ext4.sh" and "dax-xfs.sh" tests from the ndctl
suite.

It then fails on the "device-dax" test with this signature:

 BUG: kernel NULL pointer dereference, address: 0000000000000010
 #PF: supervisor read access in kernel mode
 #PF: error_code(0x0000) - not-present page
 PGD 8000000205073067 P4D 8000000205073067 PUD 2062b3067 PMD 0 
 Oops: 0000 [#1] PREEMPT SMP PTI
 CPU: 22 PID: 4535 Comm: device-dax Tainted: G           OE    N 6.0.0-rc2+ #59
 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
 RIP: 0010:memory_failure+0x667/0xba0
[..]
 Call Trace:
  <TASK>
  ? _printk+0x58/0x73
  do_madvise.part.0.cold+0xaf/0xc5

Which is:

(gdb) li *(memory_failure+0x667)
0xffffffff813b7f17 is in memory_failure (mm/memory-failure.c:1933).
1928
1929            /*
1930             * Call driver's implementation to handle the memory failure, otherwise
1931             * fall back to generic handler.
1932             */
1933            if (pgmap->ops->memory_failure) {
1934                    rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);


...I think this is just a simple matter of:

@@ -1928,7 +1930,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
         * Call driver's implementation to handle the memory failure, otherwise
         * fall back to generic handler.
         */
-       if (pgmap->ops->memory_failure) {
+       if (pgmap->ops && pgmap->ops->memory_failure) {
                rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
                /*
                 * Fall back to generic handler too if operation is not


...since device-dax does not implement pagemap ops.

I will see what else pops up and make sure that this regression always
runs going forward.
Dan Williams Aug. 25, 2022, 5:05 a.m. UTC | #4
Dan Williams wrote:
> HORIGUCHI NAOYA(堀口 直也) wrote:
> > On Wed, Aug 24, 2022 at 02:52:51PM -0700, Dan Williams wrote:
> > > Shiyang Ruan wrote:
> > > > This new function is a variant of mf_generic_kill_procs that accepts a
> > > > file, offset pair instead of a struct to support multiple files sharing
> > > > a DAX mapping.  It is intended to be called by the file systems as part
> > > > of the memory_failure handler after the file system performed a reverse
> > > > mapping from the storage address to the file and file offset.
> > > > 
> > > > Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
> > > > Reviewed-by: Dan Williams <dan.j.williams@intel.com>
> > > > Reviewed-by: Christoph Hellwig <hch@lst.de>
> > > > Reviewed-by: Darrick J. Wong <djwong@kernel.org>
> > > > Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
> > > > ---
> > > >  include/linux/mm.h  |  2 +
> > > >  mm/memory-failure.c | 96 ++++++++++++++++++++++++++++++++++++++++-----
> > > >  2 files changed, 88 insertions(+), 10 deletions(-)
> > > 
> > > Unfortunately my test suite was only running the "non-destructive" set
> > > of 'ndctl' tests which skipped some of the complex memory-failure cases.
> > > Upon fixing that, bisect flags this commit as the source of the following
> > > crash regression:
> > 
> > Thank you for testing/reporting.
> > 
> > > 
> > >  kernel BUG at mm/memory-failure.c:310!
> > >  invalid opcode: 0000 [#1] PREEMPT SMP PTI
> > >  CPU: 26 PID: 1252 Comm: dax-pmd Tainted: G           OE     5.19.0-rc4+ #58
> > >  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
> > >  RIP: 0010:add_to_kill+0x304/0x400
> > > [..]
> > >  Call Trace:
> > >   <TASK>
> > >   collect_procs.part.0+0x2c8/0x470
> > >   memory_failure+0x979/0xf30
> > >   do_madvise.part.0.cold+0x9c/0xd3
> > >   ? lock_is_held_type+0xe3/0x140
> > >   ? find_held_lock+0x2b/0x80
> > >   ? lock_release+0x145/0x2f0
> > >   ? lock_is_held_type+0xe3/0x140
> > >   ? syscall_enter_from_user_mode+0x20/0x70
> > >   __x64_sys_madvise+0x56/0x70
> > >   do_syscall_64+0x3a/0x80
> > >   entry_SYSCALL_64_after_hwframe+0x46/0xb0
> > 
> > This stacktrace shows that VM_BUG_ON_VMA() in dev_pagemap_mapping_shift()
> > was triggered.  I think that BUG_ON is too harsh here because address ==
> > -EFAULT means that there's no mapping for the address.  The subsequent
> > code considers "tk->size_shift == 0" as "no mapping" cases, so
> > dev_pagemap_mapping_shift() can return 0 in such a case?
> > 
> > Could the following diff work for the issue?
> 
> This passes the "dax-ext4.sh" and "dax-xfs.sh" tests from the ndctl
> suite.
> 
> It then fails on the "device-dax" test with this signature:
> 
>  BUG: kernel NULL pointer dereference, address: 0000000000000010
>  #PF: supervisor read access in kernel mode
>  #PF: error_code(0x0000) - not-present page
>  PGD 8000000205073067 P4D 8000000205073067 PUD 2062b3067 PMD 0 
>  Oops: 0000 [#1] PREEMPT SMP PTI
>  CPU: 22 PID: 4535 Comm: device-dax Tainted: G           OE    N 6.0.0-rc2+ #59
>  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
>  RIP: 0010:memory_failure+0x667/0xba0
> [..]
>  Call Trace:
>   <TASK>
>   ? _printk+0x58/0x73
>   do_madvise.part.0.cold+0xaf/0xc5
> 
> Which is:
> 
> (gdb) li *(memory_failure+0x667)
> 0xffffffff813b7f17 is in memory_failure (mm/memory-failure.c:1933).
> 1928
> 1929            /*
> 1930             * Call driver's implementation to handle the memory failure, otherwise
> 1931             * fall back to generic handler.
> 1932             */
> 1933            if (pgmap->ops->memory_failure) {
> 1934                    rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
> 
> 
> ...I think this is just a simple matter of:
> 
> @@ -1928,7 +1930,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
>          * Call driver's implementation to handle the memory failure, otherwise
>          * fall back to generic handler.
>          */
> -       if (pgmap->ops->memory_failure) {
> +       if (pgmap->ops && pgmap->ops->memory_failure) {
>                 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
>                 /*
>                  * Fall back to generic handler too if operation is not
> 
> 
> ...since device-dax does not implement pagemap ops.
> 
> I will see what else pops up and make sure that this regression always
> runs going forward.

Ok, that was last of the regression fallout that I could find.
Dan Williams Aug. 25, 2022, 7:28 p.m. UTC | #5
Dan Williams wrote:
> Dan Williams wrote:
> > HORIGUCHI NAOYA(堀口 直也) wrote:
> > > On Wed, Aug 24, 2022 at 02:52:51PM -0700, Dan Williams wrote:
> > > > Shiyang Ruan wrote:
> > > > > This new function is a variant of mf_generic_kill_procs that accepts a
> > > > > file, offset pair instead of a struct to support multiple files sharing
> > > > > a DAX mapping.  It is intended to be called by the file systems as part
> > > > > of the memory_failure handler after the file system performed a reverse
> > > > > mapping from the storage address to the file and file offset.
> > > > > 
> > > > > Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
> > > > > Reviewed-by: Dan Williams <dan.j.williams@intel.com>
> > > > > Reviewed-by: Christoph Hellwig <hch@lst.de>
> > > > > Reviewed-by: Darrick J. Wong <djwong@kernel.org>
> > > > > Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
> > > > > ---
> > > > >  include/linux/mm.h  |  2 +
> > > > >  mm/memory-failure.c | 96 ++++++++++++++++++++++++++++++++++++++++-----
> > > > >  2 files changed, 88 insertions(+), 10 deletions(-)
> > > > 
> > > > Unfortunately my test suite was only running the "non-destructive" set
> > > > of 'ndctl' tests which skipped some of the complex memory-failure cases.
> > > > Upon fixing that, bisect flags this commit as the source of the following
> > > > crash regression:
> > > 
> > > Thank you for testing/reporting.
> > > 
> > > > 
> > > >  kernel BUG at mm/memory-failure.c:310!
> > > >  invalid opcode: 0000 [#1] PREEMPT SMP PTI
> > > >  CPU: 26 PID: 1252 Comm: dax-pmd Tainted: G           OE     5.19.0-rc4+ #58
> > > >  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
> > > >  RIP: 0010:add_to_kill+0x304/0x400
> > > > [..]
> > > >  Call Trace:
> > > >   <TASK>
> > > >   collect_procs.part.0+0x2c8/0x470
> > > >   memory_failure+0x979/0xf30
> > > >   do_madvise.part.0.cold+0x9c/0xd3
> > > >   ? lock_is_held_type+0xe3/0x140
> > > >   ? find_held_lock+0x2b/0x80
> > > >   ? lock_release+0x145/0x2f0
> > > >   ? lock_is_held_type+0xe3/0x140
> > > >   ? syscall_enter_from_user_mode+0x20/0x70
> > > >   __x64_sys_madvise+0x56/0x70
> > > >   do_syscall_64+0x3a/0x80
> > > >   entry_SYSCALL_64_after_hwframe+0x46/0xb0
> > > 
> > > This stacktrace shows that VM_BUG_ON_VMA() in dev_pagemap_mapping_shift()
> > > was triggered.  I think that BUG_ON is too harsh here because address ==
> > > -EFAULT means that there's no mapping for the address.  The subsequent
> > > code considers "tk->size_shift == 0" as "no mapping" cases, so
> > > dev_pagemap_mapping_shift() can return 0 in such a case?
> > > 
> > > Could the following diff work for the issue?
> > 
> > This passes the "dax-ext4.sh" and "dax-xfs.sh" tests from the ndctl
> > suite.

So that diff works to avoid the BUG_ON, but it does not work to handle
the error case. I think the problem comes from:

    vma->vm_file->f_mapping != folio->mapping

...where page_folio(page)->mapping is likely not setup correctly for DAX
pages. This goes back to the broken nature of DAX page reference
counting which I am fixing now, but this folio association also needs to
be fixed up.
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8a96197b9afd..623c2ee8330a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3250,6 +3250,8 @@  enum mf_flags {
 	MF_UNPOISON = 1 << 4,
 	MF_NO_RETRY = 1 << 5,
 };
+int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
+		      unsigned long count, int mf_flags);
 extern int memory_failure(unsigned long pfn, int flags);
 extern void memory_failure_queue(unsigned long pfn, int flags);
 extern void memory_failure_queue_kick(int cpu);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index a9d93c30a1e4..5d015e1387bd 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -301,10 +301,9 @@  void shake_page(struct page *p)
 }
 EXPORT_SYMBOL_GPL(shake_page);
 
-static unsigned long dev_pagemap_mapping_shift(struct page *page,
-		struct vm_area_struct *vma)
+static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
+		unsigned long address)
 {
-	unsigned long address = vma_address(page, vma);
 	unsigned long ret = 0;
 	pgd_t *pgd;
 	p4d_t *p4d;
@@ -344,10 +343,14 @@  static unsigned long dev_pagemap_mapping_shift(struct page *page,
 /*
  * Schedule a process for later kill.
  * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
+ *
+ * Notice: @fsdax_pgoff is used only when @p is a fsdax page.
+ *   In other cases, such as anonymous and file-backend page, the address to be
+ *   killed can be caculated by @p itself.
  */
 static void add_to_kill(struct task_struct *tsk, struct page *p,
-		       struct vm_area_struct *vma,
-		       struct list_head *to_kill)
+			pgoff_t fsdax_pgoff, struct vm_area_struct *vma,
+			struct list_head *to_kill)
 {
 	struct to_kill *tk;
 
@@ -358,9 +361,15 @@  static void add_to_kill(struct task_struct *tsk, struct page *p,
 	}
 
 	tk->addr = page_address_in_vma(p, vma);
-	if (is_zone_device_page(p))
-		tk->size_shift = dev_pagemap_mapping_shift(p, vma);
-	else
+	if (is_zone_device_page(p)) {
+		/*
+		 * Since page->mapping is not used for fsdax, we need
+		 * calculate the address based on the vma.
+		 */
+		if (p->pgmap->type == MEMORY_DEVICE_FS_DAX)
+			tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
+		tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
+	} else
 		tk->size_shift = page_shift(compound_head(p));
 
 	/*
@@ -509,7 +518,7 @@  static void collect_procs_anon(struct page *page, struct list_head *to_kill,
 			if (!page_mapped_in_vma(page, vma))
 				continue;
 			if (vma->vm_mm == t->mm)
-				add_to_kill(t, page, vma, to_kill);
+				add_to_kill(t, page, 0, vma, to_kill);
 		}
 	}
 	read_unlock(&tasklist_lock);
@@ -545,13 +554,41 @@  static void collect_procs_file(struct page *page, struct list_head *to_kill,
 			 * to be informed of all such data corruptions.
 			 */
 			if (vma->vm_mm == t->mm)
-				add_to_kill(t, page, vma, to_kill);
+				add_to_kill(t, page, 0, vma, to_kill);
 		}
 	}
 	read_unlock(&tasklist_lock);
 	i_mmap_unlock_read(mapping);
 }
 
+#ifdef CONFIG_FS_DAX
+/*
+ * Collect processes when the error hit a fsdax page.
+ */
+static void collect_procs_fsdax(struct page *page,
+		struct address_space *mapping, pgoff_t pgoff,
+		struct list_head *to_kill)
+{
+	struct vm_area_struct *vma;
+	struct task_struct *tsk;
+
+	i_mmap_lock_read(mapping);
+	read_lock(&tasklist_lock);
+	for_each_process(tsk) {
+		struct task_struct *t = task_early_kill(tsk, true);
+
+		if (!t)
+			continue;
+		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
+			if (vma->vm_mm == t->mm)
+				add_to_kill(t, page, pgoff, vma, to_kill);
+		}
+	}
+	read_unlock(&tasklist_lock);
+	i_mmap_unlock_read(mapping);
+}
+#endif /* CONFIG_FS_DAX */
+
 /*
  * Collect the processes who have the corrupted page mapped to kill.
  */
@@ -1591,6 +1628,45 @@  static int mf_generic_kill_procs(unsigned long long pfn, int flags,
 	return rc;
 }
 
+#ifdef CONFIG_FS_DAX
+/**
+ * mf_dax_kill_procs - Collect and kill processes who are using this file range
+ * @mapping:	address_space of the file in use
+ * @index:	start pgoff of the range within the file
+ * @count:	length of the range, in unit of PAGE_SIZE
+ * @mf_flags:	memory failure flags
+ */
+int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
+		unsigned long count, int mf_flags)
+{
+	LIST_HEAD(to_kill);
+	dax_entry_t cookie;
+	struct page *page;
+	size_t end = index + count;
+
+	mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
+
+	for (; index < end; index++) {
+		page = NULL;
+		cookie = dax_lock_mapping_entry(mapping, index, &page);
+		if (!cookie)
+			return -EBUSY;
+		if (!page)
+			goto unlock;
+
+		SetPageHWPoison(page);
+
+		collect_procs_fsdax(page, mapping, index, &to_kill);
+		unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
+				index, mf_flags);
+unlock:
+		dax_unlock_mapping_entry(mapping, index, cookie);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
+#endif /* CONFIG_FS_DAX */
+
 /*
  * Called from hugetlb code with hugetlb_lock held.
  *