@@ -217,6 +217,33 @@ static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx)
return sblock;
}
+/* Allocate a new scrub sector and attach it to @sblock */
+static struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock,
+ gfp_t gfp)
+{
+ struct scrub_sector *ssector;
+
+ ssector = kzalloc(sizeof(*ssector), gfp);
+ if (!ssector)
+ return NULL;
+ ssector->page = alloc_page(gfp);
+ if (!ssector->page) {
+ kfree(ssector);
+ return NULL;
+ }
+ atomic_set(&ssector->refs, 1);
+ ssector->sblock = sblock;
+ /* This sector to be added should not be used */
+ ASSERT(sblock->sectorv[sblock->sector_count] == NULL);
+ /* And the sector count should be smaller than the limit */
+ ASSERT(sblock->sector_count < SCRUB_MAX_SECTORS_PER_BLOCK);
+
+ sblock->sectorv[sblock->sector_count] = ssector;
+ sblock->sector_count++;
+
+ return ssector;
+}
+
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
struct scrub_block *sblocks_for_recheck[]);
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
@@ -1336,18 +1363,14 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
sblock = sblocks_for_recheck[mirror_index];
sblock->sctx = sctx;
- ssector = kzalloc(sizeof(*ssector), GFP_NOFS);
+ ssector = alloc_scrub_sector(sblock, GFP_NOFS);
if (!ssector) {
-leave_nomem:
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
spin_unlock(&sctx->stat_lock);
scrub_put_recover(fs_info, recover);
return -ENOMEM;
}
- scrub_sector_get(ssector);
- sblock->sectorv[sector_index] = ssector;
- ssector->sblock = sblock;
ssector->flags = flags;
ssector->generation = generation;
ssector->logical = logical;
@@ -1376,11 +1399,6 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
physical_for_dev_replace;
/* for missing devices, dev->bdev is NULL */
ssector->mirror_num = mirror_index + 1;
- sblock->sector_count++;
- ssector->page = alloc_page(GFP_NOFS);
- if (!ssector->page)
- goto leave_nomem;
-
scrub_get_recover(recover);
ssector->recover = recover;
}
@@ -2293,19 +2311,14 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
*/
u32 l = min(sectorsize, len);
- ssector = kzalloc(sizeof(*ssector), GFP_KERNEL);
+ ssector = alloc_scrub_sector(sblock, GFP_KERNEL);
if (!ssector) {
-leave_nomem:
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
spin_unlock(&sctx->stat_lock);
scrub_block_put(sblock);
return -ENOMEM;
}
- ASSERT(index < SCRUB_MAX_SECTORS_PER_BLOCK);
- scrub_sector_get(ssector);
- sblock->sectorv[index] = ssector;
- ssector->sblock = sblock;
ssector->dev = dev;
ssector->flags = flags;
ssector->generation = gen;
@@ -2319,10 +2332,6 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
} else {
ssector->have_csum = 0;
}
- sblock->sector_count++;
- ssector->page = alloc_page(GFP_KERNEL);
- if (!ssector->page)
- goto leave_nomem;
len -= l;
logical += l;
physical += l;
@@ -2637,23 +2646,18 @@ static int scrub_sectors_for_parity(struct scrub_parity *sparity,
for (index = 0; len > 0; index++) {
struct scrub_sector *ssector;
- ssector = kzalloc(sizeof(*ssector), GFP_KERNEL);
+ ssector = alloc_scrub_sector(sblock, GFP_KERNEL);
if (!ssector) {
-leave_nomem:
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
spin_unlock(&sctx->stat_lock);
scrub_block_put(sblock);
return -ENOMEM;
}
- ASSERT(index < SCRUB_MAX_SECTORS_PER_BLOCK);
- /* For scrub block */
- scrub_sector_get(ssector);
sblock->sectorv[index] = ssector;
/* For scrub parity */
scrub_sector_get(ssector);
list_add_tail(&ssector->list, &sparity->ssectors);
- ssector->sblock = sblock;
ssector->dev = dev;
ssector->flags = flags;
ssector->generation = gen;
@@ -2666,11 +2670,6 @@ static int scrub_sectors_for_parity(struct scrub_parity *sparity,
} else {
ssector->have_csum = 0;
}
- sblock->sector_count++;
- ssector->page = alloc_page(GFP_KERNEL);
- if (!ssector->page)
- goto leave_nomem;
-
/* Iterate over the stripe range in sectorsize steps */
len -= sectorsize;
The allocation and initialization is shared by 3 call sites, and we're going to change the initialization of some members in the upcoming patches. So extra the allocation and initialization of scrub_sector into a helper, alloc_scrub_sector(), which will do the following work: - Allocate the memory for scrub_sector - Allocate a page for scrub_sector::page - Initialize scrub_sector::refs to 1 - Attach the allocated scrub_sector to scrub_block The attachment is bidirectional, which means scrub_block::sectorv[] will be updated and scrub_sector::sblock will also be updated. - Update scrub_block::sector_count and do extra sanity check on it Signed-off-by: Qu Wenruo <wqu@suse.com> --- fs/btrfs/scrub.c | 61 ++++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 31 deletions(-)