@@ -845,6 +845,35 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
return ret;
}
+void dax_delete_mapping_range(struct address_space *mapping,
+ loff_t start, loff_t end)
+{
+ void *entry;
+ pgoff_t start_idx = start >> PAGE_SHIFT;
+ pgoff_t end_idx;
+ XA_STATE(xas, &mapping->i_pages, start_idx);
+
+ /* If end == LLONG_MAX, all pages from start to till end of file */
+ if (end == LLONG_MAX)
+ end_idx = ULONG_MAX;
+ else
+ end_idx = end >> PAGE_SHIFT;
+
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, entry, end_idx) {
+ if (!xa_is_value(entry))
+ continue;
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
+ if (!entry)
+ continue;
+ dax_disassociate_entry(entry, mapping, true);
+ xas_store(&xas, NULL);
+ mapping->nrpages -= 1UL << dax_entry_order(entry);
+ put_unlocked_entry(&xas, entry, WAKE_ALL);
+ }
+ xas_unlock_irq(&xas);
+}
+
static int wait_page_idle(struct page *page,
void (cb)(struct inode *),
struct inode *inode)
@@ -871,6 +900,9 @@ int dax_break_mapping(struct inode *inode, loff_t start, loff_t end,
error = wait_page_idle(page, cb, inode);
} while (error == 0);
+ if (!page)
+ dax_delete_mapping_range(inode->i_mapping, start, end);
+
return error;
}
@@ -2735,6 +2735,12 @@ xfs_mmaplock_two_inodes_and_break_dax_layout(
goto again;
}
+ /*
+ * Normally xfs_break_dax_layouts() would delete the mapping entries as well so
+ * do that here.
+ */
+ dax_delete_mapping_range(VFS_I(ip2)->i_mapping, 0, LLONG_MAX);
+
return 0;
}
@@ -255,6 +255,8 @@ vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
unsigned int order, pfn_t pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+void dax_delete_mapping_range(struct address_space *mapping,
+ loff_t start, loff_t end);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
int __must_check dax_break_mapping(struct inode *inode, loff_t start,
@@ -102,6 +102,18 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
}
if (unlikely(dax)) {
+ /*
+ * File systems should already have called
+ * dax_break_mapping_entry() to remove all DAX entries
+ * while holding a lock to prevent establishing new
+ * entries. Therefore we shouldn't find any here.
+ */
+ WARN_ON_ONCE(1);
+
+ /*
+ * Delete the mapping so truncate_pagecache() doesn't
+ * loop forever.
+ */
dax_delete_mapping_entry(mapping, index);
continue;
}
Prior to any truncation operations file systems call dax_break_mapping() to ensure pages in the range are not under going DMA. Later DAX page-cache entries will be removed by truncate_folio_batch_exceptionals() in the generic page-cache code. However this makes it possible for folios to be removed from the page-cache even though they are still DMA busy if the file-system hasn't called dax_break_mapping(). It also means they can never be waited on in future because FS DAX will lose track of them once the page-cache entry has been deleted. Instead it is better to delete the FS DAX entry when the file-system calls dax_break_mapping() as part of it's truncate operation. This ensures only idle pages can be removed from the FS DAX page-cache and makes it easy to detect if a file-system hasn't called dax_break_mapping() prior to a truncate operation. Signed-off-by: Alistair Popple <apopple@nvidia.com> --- Ideally I think we would move the whole wait-for-idle logic directly into the truncate paths. However this is difficult for a few reasons. Each filesystem needs it's own wait callback, although a new address space operation could address that. More problematic is that the wait-for-idle can fail as the wait is TASK_INTERRUPTIBLE, but none of the generic truncate paths allow for failure. So it ends up being easier to continue to let file systems call this and check that they behave as expected. --- fs/dax.c | 32 ++++++++++++++++++++++++++++++++ fs/xfs/xfs_inode.c | 6 ++++++ include/linux/dax.h | 2 ++ mm/truncate.c | 12 ++++++++++++ 4 files changed, 52 insertions(+)