diff mbox series

[4/8] btrfs: scrub: refactor scrub_find_csum()

Message ID 20201026071115.57225-5-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: scrub: support subpage scrub (completely independent version) | expand

Commit Message

Qu Wenruo Oct. 26, 2020, 7:11 a.m. UTC
Function scrub_find_csum() is to locate the csum for bytenr @logical
from sctx->csum_list.

However it lacks a lot of comments to explaining things like how the
csum_list is organized and why we need to drop csum range which is
before us.

Refactor the function by:
- Add more comment explaining the behavior
- Add comment explaining why we need to drop the csum range
- Put the csum copy in the main loop
  This is mostly for the incoming patches to make scrub_find_csum() able
  to find multiple checksums.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/scrub.c | 70 +++++++++++++++++++++++++++++++++++-------------
 1 file changed, 51 insertions(+), 19 deletions(-)

Comments

Josef Bacik Oct. 26, 2020, 2:39 p.m. UTC | #1
On 10/26/20 3:11 AM, Qu Wenruo wrote:
> Function scrub_find_csum() is to locate the csum for bytenr @logical
> from sctx->csum_list.
> 
> However it lacks a lot of comments to explaining things like how the
> csum_list is organized and why we need to drop csum range which is
> before us.
> 
> Refactor the function by:
> - Add more comment explaining the behavior
> - Add comment explaining why we need to drop the csum range
> - Put the csum copy in the main loop
>    This is mostly for the incoming patches to make scrub_find_csum() able
>    to find multiple checksums.
> 
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> ---
>   fs/btrfs/scrub.c | 70 +++++++++++++++++++++++++++++++++++-------------
>   1 file changed, 51 insertions(+), 19 deletions(-)
> 
> diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
> index 321d6d457942..0d078393f986 100644
> --- a/fs/btrfs/scrub.c
> +++ b/fs/btrfs/scrub.c
> @@ -2386,37 +2386,69 @@ static void scrub_block_complete(struct scrub_block *sblock)
>   	}
>   }
>   
> +static void drop_csum_range(struct scrub_ctx *sctx,
> +			    struct btrfs_ordered_sum *sum)
> +{
> +	u32 sectorsize = sctx->fs_info->sectorsize;
> +
> +	sctx->stat.csum_discards += sum->len / sectorsize;
> +	list_del(&sum->list);
> +	kfree(sum);
> +}
> +
> +/*
> + * Find the desired csum for range [@logical, @logical + sectorsize), and
> + * store the csum into @csum.
> + *
> + * The search source is sctx->csum_list, which is a pre-populated list
> + * storing bytenr ordered csum ranges.
> + * We're reponsible to cleanup any range that is before @logical.
> + *
> + * Return 0 if there is no csum for the range.
> + * Return 1 if there is csum for the range and copied to @csum.
> + */
>   static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
>   {
> -	struct btrfs_ordered_sum *sum = NULL;
> -	unsigned long index;
> -	unsigned long num_sectors;
> +	u32 sectorsize = sctx->fs_info->sectorsize;
> +	u32 csum_size = sctx->csum_size;
> +	bool found = false;
>   
>   	while (!list_empty(&sctx->csum_list)) {
> +		struct btrfs_ordered_sum *sum = NULL;
> +		unsigned long index;
> +		unsigned long num_sectors;
> +
>   		sum = list_first_entry(&sctx->csum_list,
>   				       struct btrfs_ordered_sum, list);
> +		/* The current csum range is beyond our range, no csum found */
>   		if (sum->bytenr > logical)
> -			return 0;
> -		if (sum->bytenr + sum->len > logical)
>   			break;
>   
> -		++sctx->stat.csum_discards;
> -		list_del(&sum->list);
> -		kfree(sum);
> -		sum = NULL;
> -	}
> -	if (!sum)
> -		return 0;
> +		/*
> +		 * The current sum is before our bytenr, since scrub is
> +		 * always done in bytenr order, the csum will never be used
> +		 * anymore, clean it up so that later calls won't bother the
> +		 * range, and continue search the next range.
> +		 */
> +		if (sum->bytenr + sum->len <= logical) {
> +			drop_csum_range(sctx, sum);
> +			continue;
> +		}
>   
> -	index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
> -	ASSERT(index < UINT_MAX);
> +		/* Now the csum range covers our bytenr, copy the csum */
> +		found = true;
> +		index = div_u64(logical - sum->bytenr, sectorsize);
> +		num_sectors = sum->len / sectorsize;
>   
> -	num_sectors = sum->len / sctx->fs_info->sectorsize;
> -	memcpy(csum, sum->sums + index * sctx->csum_size, sctx->csum_size);
> -	if (index == num_sectors - 1) {
> -		list_del(&sum->list);
> -		kfree(sum);
> +		memcpy(csum, sum->sums + index * csum_size, csum_size);
> +
> +		/* Cleanup the range if we're at the end of the csum range */
> +		if (index == num_sectors - 1)
> +			drop_csum_range(sctx, sum);
> +		break;
>   	}
> +	if (!found)
> +		return 0;
>   	return 1;
>   }

If it's just a bool we're returning, change this to

static bool scrub_find_csum()

and do

return found.

Thanks,

Josef
diff mbox series

Patch

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 321d6d457942..0d078393f986 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2386,37 +2386,69 @@  static void scrub_block_complete(struct scrub_block *sblock)
 	}
 }
 
+static void drop_csum_range(struct scrub_ctx *sctx,
+			    struct btrfs_ordered_sum *sum)
+{
+	u32 sectorsize = sctx->fs_info->sectorsize;
+
+	sctx->stat.csum_discards += sum->len / sectorsize;
+	list_del(&sum->list);
+	kfree(sum);
+}
+
+/*
+ * Find the desired csum for range [@logical, @logical + sectorsize), and
+ * store the csum into @csum.
+ *
+ * The search source is sctx->csum_list, which is a pre-populated list
+ * storing bytenr ordered csum ranges.
+ * We're reponsible to cleanup any range that is before @logical.
+ *
+ * Return 0 if there is no csum for the range.
+ * Return 1 if there is csum for the range and copied to @csum.
+ */
 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
 {
-	struct btrfs_ordered_sum *sum = NULL;
-	unsigned long index;
-	unsigned long num_sectors;
+	u32 sectorsize = sctx->fs_info->sectorsize;
+	u32 csum_size = sctx->csum_size;
+	bool found = false;
 
 	while (!list_empty(&sctx->csum_list)) {
+		struct btrfs_ordered_sum *sum = NULL;
+		unsigned long index;
+		unsigned long num_sectors;
+
 		sum = list_first_entry(&sctx->csum_list,
 				       struct btrfs_ordered_sum, list);
+		/* The current csum range is beyond our range, no csum found */
 		if (sum->bytenr > logical)
-			return 0;
-		if (sum->bytenr + sum->len > logical)
 			break;
 
-		++sctx->stat.csum_discards;
-		list_del(&sum->list);
-		kfree(sum);
-		sum = NULL;
-	}
-	if (!sum)
-		return 0;
+		/*
+		 * The current sum is before our bytenr, since scrub is
+		 * always done in bytenr order, the csum will never be used
+		 * anymore, clean it up so that later calls won't bother the
+		 * range, and continue search the next range.
+		 */
+		if (sum->bytenr + sum->len <= logical) {
+			drop_csum_range(sctx, sum);
+			continue;
+		}
 
-	index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
-	ASSERT(index < UINT_MAX);
+		/* Now the csum range covers our bytenr, copy the csum */
+		found = true;
+		index = div_u64(logical - sum->bytenr, sectorsize);
+		num_sectors = sum->len / sectorsize;
 
-	num_sectors = sum->len / sctx->fs_info->sectorsize;
-	memcpy(csum, sum->sums + index * sctx->csum_size, sctx->csum_size);
-	if (index == num_sectors - 1) {
-		list_del(&sum->list);
-		kfree(sum);
+		memcpy(csum, sum->sums + index * csum_size, csum_size);
+
+		/* Cleanup the range if we're at the end of the csum range */
+		if (index == num_sectors - 1)
+			drop_csum_range(sctx, sum);
+		break;
 	}
+	if (!found)
+		return 0;
 	return 1;
 }