@@ -552,6 +552,21 @@ static void *grab_mapping_entry(struct xa_state *xas,
return xa_mk_internal(VM_FAULT_FALLBACK);
}
+bool dax_mapping_is_dax(struct address_space *mapping)
+{
+ /*
+ * In the 'limited' case get_user_pages() for dax is disabled.
+ */
+ if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
+ return false;
+
+ if (!dax_mapping(mapping) || !mapping_mapped(mapping))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(dax_mapping_is_dax);
+
/**
* dax_layout_busy_page - find first pinned page in @mapping
* @mapping: address space to scan for a page with ref count > 1
@@ -574,13 +589,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
unsigned int scanned = 0;
struct page *page = NULL;
- /*
- * In the 'limited' case get_user_pages() for dax is disabled.
- */
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
- return NULL;
-
- if (!dax_mapping(mapping) || !mapping_mapped(mapping))
+ if (!dax_mapping_is_dax(mapping))
return NULL;
/*
@@ -4241,6 +4241,10 @@ int ext4_break_layouts(struct inode *inode)
if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
return -EINVAL;
+ /* Break layout leases if active */
+ if (dax_mapping_is_dax(inode->i_mapping))
+ break_layout(inode, true);
+
do {
page = dax_layout_busy_page(inode->i_mapping);
if (!page)
@@ -106,6 +106,7 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc);
+bool dax_mapping_is_dax(struct address_space *mapping);
struct page *dax_layout_busy_page(struct address_space *mapping);
dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_page(struct page *page, dax_entry_t cookie);
@@ -137,6 +138,11 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
return NULL;
}
+bool dax_mapping_is_dax(struct address_space *mapping)
+{
+ return false;
+}
+
static inline struct page *dax_layout_busy_page(struct address_space *mapping)
{
return NULL;