@@ -373,6 +373,19 @@ int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
}
EXPORT_SYMBOL_GPL(dax_zero_page_range);
+int dax_clear_poison(struct dax_device *dax_dev, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ if (!dax_alive(dax_dev))
+ return -ENXIO;
+
+ if (!dax_dev->ops->clear_poison)
+ return -EOPNOTSUPP;
+
+ return dax_dev->ops->clear_poison(dax_dev, pgoff, nr_pages);
+}
+EXPORT_SYMBOL_GPL(dax_clear_poison);
+
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
@@ -36,6 +36,11 @@ struct dax_operations {
struct iov_iter *);
/* zero_page_range: required operation. Zero page range */
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
+ /*
+ * clear_poison: clear media error in the given page aligned range via
+ * vendor appropriate method. Optional operation.
+ */
+ int (*clear_poison)(struct dax_device *, pgoff_t, size_t);
};
extern struct attribute_group dax_attribute_group;
@@ -226,6 +231,7 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages);
+int dax_clear_poison(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
Though not all dax backend hardware has the capability of clearing poison on the fly, but dax backed by Intel DCPMEM has such capability, and it's desirable to, first, speed up repairing by means of it; second, maintain backend continuity instead of fragmenting it in search for clean blocks. Signed-off-by: Jane Chu <jane.chu@oracle.com> --- drivers/dax/super.c | 13 +++++++++++++ include/linux/dax.h | 6 ++++++ 2 files changed, 19 insertions(+)