@@ -437,6 +437,38 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
return entry;
}
+static unsigned long dax_entry_size(void *entry)
+{
+ if (dax_is_zero_entry(entry))
+ return 0;
+ else if (dax_is_pmd_entry(entry))
+ return HPAGE_SIZE;
+ else
+ return PAGE_SIZE;
+}
+
+static void dax_check_truncate(void *entry)
+{
+ unsigned long pfn = dax_radix_pfn(entry);
+ unsigned long size = dax_entry_size(entry);
+ unsigned long end_pfn;
+
+ if (!size)
+ return;
+ end_pfn = pfn + size / PAGE_SIZE;
+ for (; pfn < end_pfn; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+
+ /*
+ * devmap pages are idle when their count is 1 and the
+ * only path that increases their count is
+ * get_user_pages().
+ */
+ WARN_ONCE(page_ref_count(page) > 1,
+ "dax-dma truncate collision\n");
+ }
+}
+
static int __dax_invalidate_mapping_entry(struct address_space *mapping,
pgoff_t index, bool trunc)
{
@@ -452,6 +484,7 @@ static int __dax_invalidate_mapping_entry(struct address_space *mapping,
(radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
goto out;
+ dax_check_truncate(entry);
radix_tree_delete(page_tree, index);
mapping->nrexceptional--;
ret = 1;
Catch cases where truncate encounters pages that are still under active dma. This warning is a canary for potential data corruption as truncated blocks could be allocated to a new file while the device is still perform i/o. Cc: Jan Kara <jack@suse.cz> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- fs/dax.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) -- To unsubscribe from this list: send the line "unsubscribe linux-xfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html