diff mbox series

[04/14] btrfs: raid56: add the interfaces to submit recovery rbio

Message ID 33215d978e2d8423aedde0c79d733204010c2adb.1688368617.git.wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: scrub: introduce SCRUB_LOGICAL flag | expand

Commit Message

Qu Wenruo July 3, 2023, 7:32 a.m. UTC
The incoming scrub_logical would need to recover raid56 data sectors
with cached pages.

This means we can not go regular btrfs_submit_bio() path, but go a
similar path like raid56_parity_alloc_scrub_rbio().

So this patch adds the following new functions to allow recover rbio to
be allocated and submitted out of the btrfs_submit_bio() path:

- raid56_parity_alloc_recover_rbio()
- raid56_parity_submit_scrub_rbio()

This means now we can go a full cached recover without reading any pages
from disk.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/raid56.c | 31 +++++++++++++++++++++++++++++++
 fs/btrfs/raid56.h |  3 +++
 2 files changed, 34 insertions(+)
diff mbox series

Patch

diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index b2eb7e60a137..0340220463c6 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2383,6 +2383,31 @@  struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
 	return rbio;
 }
 
+/*
+ * Alloc a recovery rbio out of the regular btrfs_submit_bio() path.
+ *
+ * This allows scrub caller to use cached pages to reduce IO.
+ */
+struct btrfs_raid_bio *raid56_parity_alloc_recover_rbio(struct bio *bio,
+				struct btrfs_io_context *bioc, int mirror_num)
+{
+	struct btrfs_fs_info *fs_info = bioc->fs_info;
+	struct btrfs_raid_bio *rbio;
+
+	rbio = alloc_rbio(fs_info, bioc);
+	if (IS_ERR(rbio))
+		return NULL;
+	/* We should have some sectors that really need to be recovered. */
+	ASSERT(bio->bi_iter.bi_size);
+	bio_list_add(&rbio->bio_list, bio);
+	set_rbio_range_error(rbio, bio);
+	rbio->operation = BTRFS_RBIO_READ_REBUILD;
+	if (mirror_num > 2)
+		set_rbio_raid6_extra_error(rbio, mirror_num);
+
+	return rbio;
+}
+
 /*
  * We just scrub the parity that we have correct data on the same horizontal,
  * so we needn't allocate all pages for all the stripes.
@@ -2771,6 +2796,12 @@  void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
 		start_async_work(rbio, scrub_rbio_work_locked);
 }
 
+void raid56_parity_submit_recover_rbio(struct btrfs_raid_bio *rbio)
+{
+	if (!lock_stripe_add(rbio))
+		start_async_work(rbio, recover_rbio_work_locked);
+}
+
 /*
  * This is for scrub call sites where we already have correct stripe contents.
  * This allows us to avoid reading on-disk stripes again.
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index 41354a9f158a..6f146eb86832 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -197,7 +197,10 @@  struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
 				struct btrfs_io_context *bioc,
 				struct btrfs_device *scrub_dev,
 				unsigned long *dbitmap);
+struct btrfs_raid_bio *raid56_parity_alloc_recover_rbio(struct bio *bio,
+				struct btrfs_io_context *bioc, int mirror_num);
 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
+void raid56_parity_submit_recover_rbio(struct btrfs_raid_bio *rbio);
 void raid56_parity_cache_pages(struct btrfs_raid_bio *rbio, struct page **pages,
 			       int stripe_num);