diff mbox series

[RFC,08/12] netfs: Keep dirty mark for pages with more than one dirty region

Message ID 162687518862.276387.262991356873597293.stgit@warthog.procyon.org.uk (mailing list archive)
State New
Headers show
Series [RFC,01/12] afs: Sort out symlink reading | expand

Commit Message

David Howells July 21, 2021, 1:46 p.m. UTC
If a page has more than one dirty region overlapping it, then we mustn't
clear the dirty mark when we want to flush one of them.

Make netfs_set_page_writeback() check the adjacent dirty regions to see if
they overlap the page(s) the region we're interested in, and if they do,
leave the page marked dirty.

NOTES:

 (1) Might want to discount the overlapping regions if they're being
     flushed (in which case they wouldn't normally want to hold the dirty
     bit).

 (2) Similarly, the writeback mark should not be cleared if the page is
     still being written back by another, overlapping region.

Signed-off-by: David Howells <dhowells@redhat.com>
---

 fs/netfs/write_back.c |   41 ++++++++++++++++++++++++++++++++++++++---
 1 file changed, 38 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/fs/netfs/write_back.c b/fs/netfs/write_back.c
index 9fcb2ac50ebb..5c779cb12345 100644
--- a/fs/netfs/write_back.c
+++ b/fs/netfs/write_back.c
@@ -135,12 +135,47 @@  static int netfs_lock_pages(struct address_space *mapping,
 	return ret;
 }
 
-static int netfs_set_page_writeback(struct page *page)
+static int netfs_set_page_writeback(struct page *page,
+				    struct netfs_i_context *ctx,
+				    struct netfs_write_request *wreq)
 {
+	struct netfs_dirty_region *region = wreq->region, *r;
+	loff_t pos = page_offset(page);
+	bool clear_dirty = true;
+
 	/* Now we need to clear the dirty flags on any page that's not shared
 	 * with any other dirty region.
 	 */
-	if (!clear_page_dirty_for_io(page))
+	spin_lock(&ctx->lock);
+	if (pos < region->dirty.start) {
+		r = region;
+		list_for_each_entry_continue_reverse(r, &ctx->dirty_regions, dirty_link) {
+			if (r->dirty.end <= pos)
+				break;
+			if (r->state < NETFS_REGION_IS_DIRTY)
+				continue;
+			kdebug("keep-dirty-b %lx reg=%x r=%x",
+			       page->index, region->debug_id, r->debug_id);
+			clear_dirty = false;
+		}
+	}
+
+	pos += thp_size(page);
+	if (pos > region->dirty.end) {
+		r = region;
+		list_for_each_entry_continue(r, &ctx->dirty_regions, dirty_link) {
+			if (r->dirty.start >= pos)
+				break;
+			if (r->state < NETFS_REGION_IS_DIRTY)
+				continue;
+			kdebug("keep-dirty-f %lx reg=%x r=%x",
+			       page->index, region->debug_id, r->debug_id);
+			clear_dirty = false;
+		}
+	}
+	spin_unlock(&ctx->lock);
+
+	if (clear_dirty && !clear_page_dirty_for_io(page))
 		BUG();
 
 	/* We set writeback unconditionally because a page may participate in
@@ -225,7 +260,7 @@  static int netfs_begin_write(struct address_space *mapping,
 	trace_netfs_wreq(wreq);
 
 	netfs_iterate_pages(mapping, wreq->first, wreq->last,
-			    netfs_set_page_writeback);
+			    netfs_set_page_writeback, ctx, wreq);
 	netfs_unlock_pages(mapping, wreq->first, wreq->last);
 	iov_iter_xarray(&wreq->source, WRITE, &wreq->mapping->i_pages,
 			wreq->start, wreq->len);