diff mbox series

[3/8] btrfs: scrub: use flexible array for scrub_page::csums

Message ID 20201026071115.57225-4-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: scrub: support subpage scrub (completely independent version) | expand

Commit Message

Qu Wenruo Oct. 26, 2020, 7:11 a.m. UTC
There are several factors affecting how many checksum bytes are needed
for one scrub_page:

- Sector size and page size
  For subpage case, one page can contain several sectors, thus the csum
  size will differ.

- Checksum size
  Since btrfs supports different csum size now, which can vary from 4
  bytes for CRC32 to 32 bytes for SHA256.

So instead of using fixed BTRFS_CSUM_SIZE, now use flexible array for
scrub_page::csums, and determine the size at scrub_page allocation time.

This does not only provide the basis for later subpage scrub support,
but also reduce the memory usage for default btrfs on x86_64.
As the default CRC32 only uses 4 bytes, thus we can save 28 bytes for
each scrub page.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/scrub.c | 41 ++++++++++++++++++++++++++++++-----------
 1 file changed, 30 insertions(+), 11 deletions(-)

Comments

Josef Bacik Oct. 26, 2020, 2:23 p.m. UTC | #1
On 10/26/20 3:11 AM, Qu Wenruo wrote:
> There are several factors affecting how many checksum bytes are needed
> for one scrub_page:
> 
> - Sector size and page size
>    For subpage case, one page can contain several sectors, thus the csum
>    size will differ.
> 
> - Checksum size
>    Since btrfs supports different csum size now, which can vary from 4
>    bytes for CRC32 to 32 bytes for SHA256.
> 
> So instead of using fixed BTRFS_CSUM_SIZE, now use flexible array for
> scrub_page::csums, and determine the size at scrub_page allocation time.
> 
> This does not only provide the basis for later subpage scrub support,
> but also reduce the memory usage for default btrfs on x86_64.
> As the default CRC32 only uses 4 bytes, thus we can save 28 bytes for
> each scrub page.
> 
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> ---
>   fs/btrfs/scrub.c | 41 ++++++++++++++++++++++++++++++-----------
>   1 file changed, 30 insertions(+), 11 deletions(-)
> 
> diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
> index 3cb51d1f061f..321d6d457942 100644
> --- a/fs/btrfs/scrub.c
> +++ b/fs/btrfs/scrub.c
> @@ -76,9 +76,14 @@ struct scrub_page {
>   		unsigned int	have_csum:1;
>   		unsigned int	io_error:1;
>   	};
> -	u8			csum[BTRFS_CSUM_SIZE];
> -
>   	struct scrub_recover	*recover;
> +
> +	/*
> +	 * The csums size for the page is deteremined by page size,
> +	 * sector size and csum size.
> +	 * Thus the length has to be determined at runtime.
> +	 */
> +	u8			csums[];
>   };
>   
>   struct scrub_bio {
> @@ -207,6 +212,19 @@ struct full_stripe_lock {
>   	struct mutex mutex;
>   };
>   
> +static struct scrub_page *alloc_scrub_page(struct scrub_ctx *sctx, gfp_t mask)
> +{
> +	u32 sectorsize = sctx->fs_info->sectorsize;
> +	size_t size;
> +
> +	/* No support for multi-page sector size yet */
> +	ASSERT(PAGE_SIZE >= sectorsize && IS_ALIGNED(PAGE_SIZE, sectorsize));

Check patch complains about this

WARNING: Comparisons should place the constant on the right side of the test
#32: FILE: fs/btrfs/scrub.c:221:
+       ASSERT(PAGE_SIZE >= sectorsize && IS_ALIGNED(PAGE_SIZE, sectorsize));


Once you fix that you can add

Reviewed-by: Josef Bacik <josef@toxicpanda.com>

Thanks,

Josef
diff mbox series

Patch

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 3cb51d1f061f..321d6d457942 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -76,9 +76,14 @@  struct scrub_page {
 		unsigned int	have_csum:1;
 		unsigned int	io_error:1;
 	};
-	u8			csum[BTRFS_CSUM_SIZE];
-
 	struct scrub_recover	*recover;
+
+	/*
+	 * The csums size for the page is deteremined by page size,
+	 * sector size and csum size.
+	 * Thus the length has to be determined at runtime.
+	 */
+	u8			csums[];
 };
 
 struct scrub_bio {
@@ -207,6 +212,19 @@  struct full_stripe_lock {
 	struct mutex mutex;
 };
 
+static struct scrub_page *alloc_scrub_page(struct scrub_ctx *sctx, gfp_t mask)
+{
+	u32 sectorsize = sctx->fs_info->sectorsize;
+	size_t size;
+
+	/* No support for multi-page sector size yet */
+	ASSERT(PAGE_SIZE >= sectorsize && IS_ALIGNED(PAGE_SIZE, sectorsize));
+
+	size = sizeof(struct scrub_page);
+	size += (PAGE_SIZE / sectorsize) * sctx->csum_size;
+	return kzalloc(size, mask);
+}
+
 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
@@ -1330,7 +1348,7 @@  static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 			sblock = sblocks_for_recheck + mirror_index;
 			sblock->sctx = sctx;
 
-			spage = kzalloc(sizeof(*spage), GFP_NOFS);
+			spage = alloc_scrub_page(sctx, GFP_NOFS);
 			if (!spage) {
 leave_nomem:
 				spin_lock(&sctx->stat_lock);
@@ -1347,8 +1365,8 @@  static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 			spage->logical = logical;
 			spage->have_csum = have_csum;
 			if (have_csum)
-				memcpy(spage->csum,
-				       original_sblock->pagev[0]->csum,
+				memcpy(spage->csums,
+				       original_sblock->pagev[0]->csums,
 				       sctx->csum_size);
 
 			scrub_stripe_index_and_offset(logical,
@@ -1800,7 +1818,7 @@  static int scrub_checksum_data(struct scrub_block *sblock)
 	crypto_shash_init(shash);
 	crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum);
 
-	if (memcmp(csum, spage->csum, sctx->csum_size))
+	if (memcmp(csum, spage->csums, sctx->csum_size))
 		sblock->checksum_error = 1;
 
 	return sblock->checksum_error;
@@ -2180,7 +2198,7 @@  static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 		struct scrub_page *spage;
 		u64 l = min_t(u64, len, PAGE_SIZE);
 
-		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
+		spage = alloc_scrub_page(sctx, GFP_KERNEL);
 		if (!spage) {
 leave_nomem:
 			spin_lock(&sctx->stat_lock);
@@ -2202,7 +2220,7 @@  static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 		spage->mirror_num = mirror_num;
 		if (csum) {
 			spage->have_csum = 1;
-			memcpy(spage->csum, csum, sctx->csum_size);
+			memcpy(spage->csums, csum, sctx->csum_size);
 		} else {
 			spage->have_csum = 0;
 		}
@@ -2487,7 +2505,9 @@  static int scrub_pages_for_parity(struct scrub_parity *sparity,
 		struct scrub_page *spage;
 		u64 l = min_t(u64, len, PAGE_SIZE);
 
-		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
+		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
+
+		spage = alloc_scrub_page(sctx, GFP_KERNEL);
 		if (!spage) {
 leave_nomem:
 			spin_lock(&sctx->stat_lock);
@@ -2496,7 +2516,6 @@  static int scrub_pages_for_parity(struct scrub_parity *sparity,
 			scrub_block_put(sblock);
 			return -ENOMEM;
 		}
-		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
 		/* For scrub block */
 		scrub_page_get(spage);
 		sblock->pagev[index] = spage;
@@ -2512,7 +2531,7 @@  static int scrub_pages_for_parity(struct scrub_parity *sparity,
 		spage->mirror_num = mirror_num;
 		if (csum) {
 			spage->have_csum = 1;
-			memcpy(spage->csum, csum, sctx->csum_size);
+			memcpy(spage->csums, csum, sctx->csum_size);
 		} else {
 			spage->have_csum = 0;
 		}