@@ -395,7 +395,7 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
for_each_mapped_pfn(entry, pfn) {
struct page *page = pfn_to_page(pfn);
- WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
+ WARN_ON_ONCE(trunc && !dax_page_idle(page));
if (dax_mapping_is_cow(page->mapping)) {
/* keep the CoW flag if this page is still shared */
if (page->index-- > 0)
@@ -414,7 +414,7 @@ static struct page *dax_busy_page(void *entry)
for_each_mapped_pfn(entry, pfn) {
struct page *page = pfn_to_page(pfn);
- if (page_ref_count(page) > 1)
+ if (!dax_page_idle(page))
return page;
}
return NULL;
@@ -3961,8 +3961,7 @@ int ext4_break_layouts(struct inode *inode)
if (!page)
return 0;
- error = ___wait_var_event(page,
- atomic_read(&page->_refcount) == 1,
+ error = ___wait_var_event(page, dax_page_idle(page),
TASK_INTERRUPTIBLE, 0, 0,
ext4_wait_dax_page(inode));
} while (error == 0);
@@ -676,9 +676,8 @@ static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
return 0;
*retry = true;
- return ___wait_var_event(page, atomic_read(&page->_refcount) == 1,
- TASK_INTERRUPTIBLE, 0, 0,
- fuse_wait_dax_page(inode));
+ return ___wait_var_event(page, dax_page_idle(page), TASK_INTERRUPTIBLE,
+ 0, 0, fuse_wait_dax_page(inode));
}
/* dmap_end == 0 leads to unmapping of whole file */
@@ -827,9 +827,8 @@ xfs_break_dax_layouts(
return 0;
*retry = true;
- return ___wait_var_event(page, atomic_read(&page->_refcount) == 1,
- TASK_INTERRUPTIBLE, 0, 0,
- xfs_wait_dax_page(inode));
+ return ___wait_var_event(page, dax_page_idle(page), TASK_INTERRUPTIBLE,
+ 0, 0, xfs_wait_dax_page(inode));
}
int
@@ -210,6 +210,15 @@ int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops);
+/*
+ * Document all the code locations that want know when a dax page is
+ * unreferenced.
+ */
+static inline bool dax_page_idle(struct page *page)
+{
+ return page_ref_count(page) == 1;
+}
+
#if IS_ENABLED(CONFIG_DAX)
int dax_read_lock(void);
void dax_read_unlock(int id);
In advance of converting DAX pages to be 0-based, use a new dax_page_idle() helper to both simplify that future conversion, but also document all the kernel locations that are watching for DAX page idle events. Cc: Matthew Wilcox <willy@infradead.org> Cc: Jan Kara <jack@suse.cz> Cc: "Darrick J. Wong" <djwong@kernel.org> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Christoph Hellwig <hch@lst.de> Cc: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- fs/dax.c | 4 ++-- fs/ext4/inode.c | 3 +-- fs/fuse/dax.c | 5 ++--- fs/xfs/xfs_file.c | 5 ++--- include/linux/dax.h | 9 +++++++++ 5 files changed, 16 insertions(+), 10 deletions(-)