@@ -109,6 +109,8 @@ enum dax_device_flags {
DAXDEV_NOCACHE,
/* handle CPU fetch exceptions during reads */
DAXDEV_NOMC,
+ /* flag to indicate device capable of poison recovery */
+ DAXDEV_RECOVERY,
};
/**
@@ -311,6 +313,28 @@ static void dax_destroy_inode(struct inode *inode)
"kill_dax() must be called before final iput()\n");
}
+void set_dax_recovery(struct dax_device *dax_dev)
+{
+ set_bit(DAXDEV_RECOVERY, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(set_dax_recovery);
+
+bool dax_recovery_capable(struct dax_device *dax_dev)
+{
+ return test_bit(DAXDEV_RECOVERY, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(dax_recovery_capable);
+
+int dax_prep_recovery(struct dax_device *dax_dev, void **kaddr)
+{
+ if (dax_recovery_capable(dax_dev)) {
+ set_bit(DAXDEV_RECOVERY, (unsigned long *)kaddr);
+ return 0;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(dax_prep_recovery);
+
static const struct super_operations dax_sops = {
.statfs = simple_statfs,
.alloc_inode = dax_alloc_inode,
@@ -487,6 +487,7 @@ static int pmem_attach_disk(struct device *dev,
if (rc)
goto out_cleanup_dax;
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
+ set_dax_recovery(dax_dev);
pmem->dax_dev = dax_dev;
rc = device_add_disk(dev, disk, pmem_attribute_groups);
@@ -40,6 +40,8 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc);
bool dax_write_cache_enabled(struct dax_device *dax_dev);
bool dax_synchronous(struct dax_device *dax_dev);
void set_dax_synchronous(struct dax_device *dax_dev);
+void set_dax_recovery(struct dax_device *dax_dev);
+bool dax_recovery_capable(struct dax_device *dax_dev);
/*
* Check if given mapping is supported by the file / underlying device.
*/
@@ -87,6 +89,13 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
{
return !(vma->vm_flags & VM_SYNC);
}
+static inline void set_dax_recovery(struct dax_device *dax_dev);
+{
+}
+static inline bool dax_recovery_capable(struct dax_device *dax_dev)
+{
+ return false;
+}
#endif
void set_dax_nocache(struct dax_device *dax_dev);
@@ -128,6 +137,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping);
struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_page(struct page *page, dax_entry_t cookie);
+int dax_prep_recovery(struct dax_device *dax_dev, void **kaddr);
#else
static inline struct page *dax_layout_busy_page(struct address_space *mapping)
{
@@ -155,6 +165,11 @@ static inline dax_entry_t dax_lock_page(struct page *page)
static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
{
}
+
+static inline int dax_prep_recovery(struct dax_device *dax_dev, void **kaddr)
+{
+ return -EINVAL;
+}
#endif
int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
Introduce dax device flag DAXDEV_RECOVERY to indicate a device that is capable of recoverying from media poison. For MD raid DAX devices, the capability is allowed for partial device as oppose to the entire device. And the final poison detection and repair rely on the provisioning base drivers. Signed-off-by: Jane Chu <jane.chu@oracle.com> --- drivers/dax/super.c | 24 ++++++++++++++++++++++++ drivers/nvdimm/pmem.c | 1 + include/linux/dax.h | 15 +++++++++++++++ 3 files changed, 40 insertions(+)