diff mbox series

[RFC,1/5] btrfs: scrub: make scrub_ctx::stripes dynamically allocated

Message ID 93e32d86ead00d7596072aa2f1fbc59022ebb3ad.1686977659.git.wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: scrub: introduce SCRUB_LOGICAL flag | expand

Commit Message

Qu Wenruo June 17, 2023, 5:07 a.m. UTC
Currently scrub_ctx::stripes are a fixed size array, this is fine for
most use cases, but later we may want to allocate one larger sized array
for logical bytenr based scrub.

So here we change the member to a dynamically allocated array.

This also affects the lifespan of the member.
Now it only needs to be allocated and initialized at the beginning of
scrub_stripe() function.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/scrub.c | 65 +++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 51 insertions(+), 14 deletions(-)

Comments

Johannes Thumshirn June 20, 2023, 8:43 a.m. UTC | #1
On 17.06.23 07:08, Qu Wenruo wrote:
> @@ -315,9 +318,10 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
>  	if (!sctx)
>  		return;
>  
> -	for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++)
> -		release_scrub_stripe(&sctx->stripes[i]);
> -
> +	if (sctx->stripes)
> +		for (i = 0; i < sctx->nr_stripes; i++)
> +			release_scrub_stripe(&sctx->stripes[i]);

I think we're usually guarding the inner part of the if by {}.

> +	kfree(sctx->stripes);
>  	kfree(sctx);

But it could even be simplified to:

static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
{
	int i;

	if (!sctx)
		return;

	free_scrub_stripes(sctx);
	kfree(sctx);
}
	
> +static void free_scrub_stripes(struct scrub_ctx *sctx)
> +{

  +	if (!sctx->stripes)
		return;

> +	for (int i = 0; i < sctx->nr_stripes; i++)
> +		release_scrub_stripe(&sctx->stripes[i]);
> +	kfree(sctx->stripes);
> +	sctx->nr_stripes = 0;
> +	sctx->stripes = NULL;
> +}
> +


Otherwise looks good to me
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
diff mbox series

Patch

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 093823aa8d2c..d83bcc396bb0 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -172,7 +172,7 @@  struct scrub_stripe {
 };
 
 struct scrub_ctx {
-	struct scrub_stripe	stripes[SCRUB_STRIPES_PER_SCTX];
+	struct scrub_stripe	*stripes;
 	struct scrub_stripe	*raid56_data_stripes;
 	struct btrfs_fs_info	*fs_info;
 	int			first_free;
@@ -181,6 +181,9 @@  struct scrub_ctx {
 	int			readonly;
 	int			sectors_per_bio;
 
+	/* Number of stripes we have in @stripes. */
+	unsigned int		nr_stripes;
+
 	/* State of IO submission throttling affecting the associated device */
 	ktime_t			throttle_deadline;
 	u64			throttle_sent;
@@ -315,9 +318,10 @@  static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 	if (!sctx)
 		return;
 
-	for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++)
-		release_scrub_stripe(&sctx->stripes[i]);
-
+	if (sctx->stripes)
+		for (i = 0; i < sctx->nr_stripes; i++)
+			release_scrub_stripe(&sctx->stripes[i]);
+	kfree(sctx->stripes);
 	kfree(sctx);
 }
 
@@ -331,7 +335,6 @@  static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
 		struct btrfs_fs_info *fs_info, int is_dev_replace)
 {
 	struct scrub_ctx *sctx;
-	int		i;
 
 	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
 	if (!sctx)
@@ -339,14 +342,6 @@  static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
 	refcount_set(&sctx->refs, 1);
 	sctx->is_dev_replace = is_dev_replace;
 	sctx->fs_info = fs_info;
-	for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++) {
-		int ret;
-
-		ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
-		if (ret < 0)
-			goto nomem;
-		sctx->stripes[i].sctx = sctx;
-	}
 	sctx->first_free = 0;
 	atomic_set(&sctx->cancel_req, 0);
 
@@ -1660,6 +1655,7 @@  static int flush_scrub_stripes(struct scrub_ctx *sctx)
 	const int nr_stripes = sctx->cur_stripe;
 	int ret = 0;
 
+	ASSERT(nr_stripes <= sctx->nr_stripes);
 	if (!nr_stripes)
 		return 0;
 
@@ -1754,8 +1750,11 @@  static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
 	struct scrub_stripe *stripe;
 	int ret;
 
+	ASSERT(sctx->stripes);
+	ASSERT(sctx->nr_stripes);
+
 	/* No available slot, submit all stripes and wait for them. */
-	if (sctx->cur_stripe >= SCRUB_STRIPES_PER_SCTX) {
+	if (sctx->cur_stripe >= sctx->nr_stripes) {
 		ret = flush_scrub_stripes(sctx);
 		if (ret < 0)
 			return ret;
@@ -2080,6 +2079,39 @@  static int scrub_simple_stripe(struct scrub_ctx *sctx,
 	return ret;
 }
 
+static void free_scrub_stripes(struct scrub_ctx *sctx)
+{
+	for (int i = 0; i < sctx->nr_stripes; i++)
+		release_scrub_stripe(&sctx->stripes[i]);
+	kfree(sctx->stripes);
+	sctx->nr_stripes = 0;
+	sctx->stripes = NULL;
+}
+
+static int alloc_scrub_stripes(struct scrub_ctx *sctx, int nr_stripes)
+{
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
+	int ret;
+
+	ASSERT(!sctx->stripes);
+	ASSERT(!sctx->nr_stripes);
+	sctx->stripes = kcalloc(nr_stripes, sizeof(struct scrub_stripe),
+				GFP_KERNEL);
+	if (!sctx->stripes)
+		return -ENOMEM;
+	sctx->nr_stripes = nr_stripes;
+	for (int i = 0; i < sctx->nr_stripes; i++) {
+		ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
+		if (ret < 0)
+			goto cleanup;
+		sctx->stripes[i].sctx = sctx;
+	}
+	return 0;
+cleanup:
+	free_scrub_stripes(sctx);
+	return -ENOMEM;
+}
+
 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 					   struct btrfs_block_group *bg,
 					   struct extent_map *em,
@@ -2106,6 +2138,10 @@  static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 
 	scrub_blocked_if_needed(fs_info);
 
+	ret = alloc_scrub_stripes(sctx, SCRUB_STRIPES_PER_SCTX);
+	if (ret < 0)
+		return ret;
+
 	if (sctx->is_dev_replace &&
 	    btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
 		mutex_lock(&sctx->wr_lock);
@@ -2228,6 +2264,7 @@  static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 		kfree(sctx->raid56_data_stripes);
 		sctx->raid56_data_stripes = NULL;
 	}
+	free_scrub_stripes(sctx);
 
 	if (sctx->is_dev_replace && ret >= 0) {
 		int ret2;