diff mbox series

[2/3] xfs_repair: simplify free space btree calculations in init_freespace_cursors

Message ID 159370362331.3579756.9359456822795462355.stgit@magnolia (mailing list archive)
State Accepted
Headers show
Series xfs_repair: more fixes | expand

Commit Message

Darrick J. Wong July 2, 2020, 3:27 p.m. UTC
From: Darrick J. Wong <darrick.wong@oracle.com>

Add a summary variable to the bulkload structure so that we can track
the number of blocks that have been reserved for a particular (btree)
bulkload operation.  Doing so enables us to simplify the logic in
init_freespace_cursors that deals with figuring out how many more blocks
we need to fill the bnobt/cntbt properly.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 repair/agbtree.c  |   33 +++++++++++++++++----------------
 repair/bulkload.c |    2 ++
 repair/bulkload.h |    3 +++
 3 files changed, 22 insertions(+), 16 deletions(-)

Comments

Allison Henderson July 6, 2020, 10:53 p.m. UTC | #1
On 7/2/20 8:27 AM, Darrick J. Wong wrote:
> From: Darrick J. Wong <darrick.wong@oracle.com>
> 
> Add a summary variable to the bulkload structure so that we can track
> the number of blocks that have been reserved for a particular (btree)
> bulkload operation.  Doing so enables us to simplify the logic in
> init_freespace_cursors that deals with figuring out how many more blocks
> we need to fill the bnobt/cntbt properly.
> 
> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Alrighty, looks good
Reviewed-by: Allison Collins <allison.henderson@oracle.com>

> ---
>   repair/agbtree.c  |   33 +++++++++++++++++----------------
>   repair/bulkload.c |    2 ++
>   repair/bulkload.h |    3 +++
>   3 files changed, 22 insertions(+), 16 deletions(-)
> 
> 
> diff --git a/repair/agbtree.c b/repair/agbtree.c
> index 339b1489..de8015ec 100644
> --- a/repair/agbtree.c
> +++ b/repair/agbtree.c
> @@ -217,8 +217,6 @@ init_freespace_cursors(
>   	struct bt_rebuild	*btr_bno,
>   	struct bt_rebuild	*btr_cnt)
>   {
> -	unsigned int		bno_blocks;
> -	unsigned int		cnt_blocks;
>   	int			error;
>   
>   	init_rebuild(sc, &XFS_RMAP_OINFO_AG, free_space, btr_bno);
> @@ -244,9 +242,7 @@ init_freespace_cursors(
>   	 */
>   	do {
>   		unsigned int	num_freeblocks;
> -
> -		bno_blocks = btr_bno->bload.nr_blocks;
> -		cnt_blocks = btr_cnt->bload.nr_blocks;
> +		int		delta_bno, delta_cnt;
>   
>   		/* Compute how many bnobt blocks we'll need. */
>   		error = -libxfs_btree_bload_compute_geometry(btr_bno->cur,
> @@ -262,25 +258,30 @@ _("Unable to compute free space by block btree geometry, error %d.\n"), -error);
>   			do_error(
>   _("Unable to compute free space by length btree geometry, error %d.\n"), -error);
>   
> +		/*
> +		 * Compute the deficit between the number of blocks reserved
> +		 * and the number of blocks we think we need for the btree.
> +		 */
> +		delta_bno = (int)btr_bno->newbt.nr_reserved -
> +				 btr_bno->bload.nr_blocks;
> +		delta_cnt = (int)btr_cnt->newbt.nr_reserved -
> +				 btr_cnt->bload.nr_blocks;
> +
>   		/* We don't need any more blocks, so we're done. */
> -		if (bno_blocks >= btr_bno->bload.nr_blocks &&
> -		    cnt_blocks >= btr_cnt->bload.nr_blocks)
> +		if (delta_bno >= 0 && delta_cnt >= 0) {
> +			*extra_blocks = delta_bno + delta_cnt;
>   			break;
> +		}
>   
>   		/* Allocate however many more blocks we need this time. */
> -		if (bno_blocks < btr_bno->bload.nr_blocks)
> -			reserve_btblocks(sc->mp, agno, btr_bno,
> -					btr_bno->bload.nr_blocks - bno_blocks);
> -		if (cnt_blocks < btr_cnt->bload.nr_blocks)
> -			reserve_btblocks(sc->mp, agno, btr_cnt,
> -					btr_cnt->bload.nr_blocks - cnt_blocks);
> +		if (delta_bno < 0)
> +			reserve_btblocks(sc->mp, agno, btr_bno, -delta_bno);
> +		if (delta_cnt < 0)
> +			reserve_btblocks(sc->mp, agno, btr_cnt, -delta_cnt);
>   
>   		/* Ok, now how many free space records do we have? */
>   		*nr_extents = count_bno_extents_blocks(agno, &num_freeblocks);
>   	} while (1);
> -
> -	*extra_blocks = (bno_blocks - btr_bno->bload.nr_blocks) +
> -			(cnt_blocks - btr_cnt->bload.nr_blocks);
>   }
>   
>   /* Rebuild the free space btrees. */
> diff --git a/repair/bulkload.c b/repair/bulkload.c
> index 81d67e62..8dd0a0c3 100644
> --- a/repair/bulkload.c
> +++ b/repair/bulkload.c
> @@ -40,6 +40,8 @@ bulkload_add_blocks(
>   	resv->len = len;
>   	resv->used = 0;
>   	list_add_tail(&resv->list, &bkl->resv_list);
> +	bkl->nr_reserved += len;
> +
>   	return 0;
>   }
>   
> diff --git a/repair/bulkload.h b/repair/bulkload.h
> index 01f67279..a84e99b8 100644
> --- a/repair/bulkload.h
> +++ b/repair/bulkload.h
> @@ -41,6 +41,9 @@ struct bulkload {
>   
>   	/* The last reservation we allocated from. */
>   	struct bulkload_resv	*last_resv;
> +
> +	/* Number of blocks reserved via resv_list. */
> +	unsigned int		nr_reserved;
>   };
>   
>   #define for_each_bulkload_reservation(bkl, resv, n)	\
>
Brian Foster July 7, 2020, 12:58 p.m. UTC | #2
On Thu, Jul 02, 2020 at 08:27:03AM -0700, Darrick J. Wong wrote:
> From: Darrick J. Wong <darrick.wong@oracle.com>
> 
> Add a summary variable to the bulkload structure so that we can track
> the number of blocks that have been reserved for a particular (btree)
> bulkload operation.  Doing so enables us to simplify the logic in
> init_freespace_cursors that deals with figuring out how many more blocks
> we need to fill the bnobt/cntbt properly.
> 
> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> ---

Nice simplification:

Reviewed-by: Brian Foster <bfoster@redhat.com>

>  repair/agbtree.c  |   33 +++++++++++++++++----------------
>  repair/bulkload.c |    2 ++
>  repair/bulkload.h |    3 +++
>  3 files changed, 22 insertions(+), 16 deletions(-)
> 
> 
> diff --git a/repair/agbtree.c b/repair/agbtree.c
> index 339b1489..de8015ec 100644
> --- a/repair/agbtree.c
> +++ b/repair/agbtree.c
> @@ -217,8 +217,6 @@ init_freespace_cursors(
>  	struct bt_rebuild	*btr_bno,
>  	struct bt_rebuild	*btr_cnt)
>  {
> -	unsigned int		bno_blocks;
> -	unsigned int		cnt_blocks;
>  	int			error;
>  
>  	init_rebuild(sc, &XFS_RMAP_OINFO_AG, free_space, btr_bno);
> @@ -244,9 +242,7 @@ init_freespace_cursors(
>  	 */
>  	do {
>  		unsigned int	num_freeblocks;
> -
> -		bno_blocks = btr_bno->bload.nr_blocks;
> -		cnt_blocks = btr_cnt->bload.nr_blocks;
> +		int		delta_bno, delta_cnt;
>  
>  		/* Compute how many bnobt blocks we'll need. */
>  		error = -libxfs_btree_bload_compute_geometry(btr_bno->cur,
> @@ -262,25 +258,30 @@ _("Unable to compute free space by block btree geometry, error %d.\n"), -error);
>  			do_error(
>  _("Unable to compute free space by length btree geometry, error %d.\n"), -error);
>  
> +		/*
> +		 * Compute the deficit between the number of blocks reserved
> +		 * and the number of blocks we think we need for the btree.
> +		 */
> +		delta_bno = (int)btr_bno->newbt.nr_reserved -
> +				 btr_bno->bload.nr_blocks;
> +		delta_cnt = (int)btr_cnt->newbt.nr_reserved -
> +				 btr_cnt->bload.nr_blocks;
> +
>  		/* We don't need any more blocks, so we're done. */
> -		if (bno_blocks >= btr_bno->bload.nr_blocks &&
> -		    cnt_blocks >= btr_cnt->bload.nr_blocks)
> +		if (delta_bno >= 0 && delta_cnt >= 0) {
> +			*extra_blocks = delta_bno + delta_cnt;
>  			break;
> +		}
>  
>  		/* Allocate however many more blocks we need this time. */
> -		if (bno_blocks < btr_bno->bload.nr_blocks)
> -			reserve_btblocks(sc->mp, agno, btr_bno,
> -					btr_bno->bload.nr_blocks - bno_blocks);
> -		if (cnt_blocks < btr_cnt->bload.nr_blocks)
> -			reserve_btblocks(sc->mp, agno, btr_cnt,
> -					btr_cnt->bload.nr_blocks - cnt_blocks);
> +		if (delta_bno < 0)
> +			reserve_btblocks(sc->mp, agno, btr_bno, -delta_bno);
> +		if (delta_cnt < 0)
> +			reserve_btblocks(sc->mp, agno, btr_cnt, -delta_cnt);
>  
>  		/* Ok, now how many free space records do we have? */
>  		*nr_extents = count_bno_extents_blocks(agno, &num_freeblocks);
>  	} while (1);
> -
> -	*extra_blocks = (bno_blocks - btr_bno->bload.nr_blocks) +
> -			(cnt_blocks - btr_cnt->bload.nr_blocks);
>  }
>  
>  /* Rebuild the free space btrees. */
> diff --git a/repair/bulkload.c b/repair/bulkload.c
> index 81d67e62..8dd0a0c3 100644
> --- a/repair/bulkload.c
> +++ b/repair/bulkload.c
> @@ -40,6 +40,8 @@ bulkload_add_blocks(
>  	resv->len = len;
>  	resv->used = 0;
>  	list_add_tail(&resv->list, &bkl->resv_list);
> +	bkl->nr_reserved += len;
> +
>  	return 0;
>  }
>  
> diff --git a/repair/bulkload.h b/repair/bulkload.h
> index 01f67279..a84e99b8 100644
> --- a/repair/bulkload.h
> +++ b/repair/bulkload.h
> @@ -41,6 +41,9 @@ struct bulkload {
>  
>  	/* The last reservation we allocated from. */
>  	struct bulkload_resv	*last_resv;
> +
> +	/* Number of blocks reserved via resv_list. */
> +	unsigned int		nr_reserved;
>  };
>  
>  #define for_each_bulkload_reservation(bkl, resv, n)	\
>
Christoph Hellwig July 8, 2020, 6:40 a.m. UTC | #3
On Thu, Jul 02, 2020 at 08:27:03AM -0700, Darrick J. Wong wrote:
> From: Darrick J. Wong <darrick.wong@oracle.com>
> 
> Add a summary variable to the bulkload structure so that we can track
> the number of blocks that have been reserved for a particular (btree)
> bulkload operation.  Doing so enables us to simplify the logic in
> init_freespace_cursors that deals with figuring out how many more blocks
> we need to fill the bnobt/cntbt properly.
> 
> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>

Looks good,

Reviewed-by: Christoph Hellwig <hch@lst.de>
diff mbox series

Patch

diff --git a/repair/agbtree.c b/repair/agbtree.c
index 339b1489..de8015ec 100644
--- a/repair/agbtree.c
+++ b/repair/agbtree.c
@@ -217,8 +217,6 @@  init_freespace_cursors(
 	struct bt_rebuild	*btr_bno,
 	struct bt_rebuild	*btr_cnt)
 {
-	unsigned int		bno_blocks;
-	unsigned int		cnt_blocks;
 	int			error;
 
 	init_rebuild(sc, &XFS_RMAP_OINFO_AG, free_space, btr_bno);
@@ -244,9 +242,7 @@  init_freespace_cursors(
 	 */
 	do {
 		unsigned int	num_freeblocks;
-
-		bno_blocks = btr_bno->bload.nr_blocks;
-		cnt_blocks = btr_cnt->bload.nr_blocks;
+		int		delta_bno, delta_cnt;
 
 		/* Compute how many bnobt blocks we'll need. */
 		error = -libxfs_btree_bload_compute_geometry(btr_bno->cur,
@@ -262,25 +258,30 @@  _("Unable to compute free space by block btree geometry, error %d.\n"), -error);
 			do_error(
 _("Unable to compute free space by length btree geometry, error %d.\n"), -error);
 
+		/*
+		 * Compute the deficit between the number of blocks reserved
+		 * and the number of blocks we think we need for the btree.
+		 */
+		delta_bno = (int)btr_bno->newbt.nr_reserved -
+				 btr_bno->bload.nr_blocks;
+		delta_cnt = (int)btr_cnt->newbt.nr_reserved -
+				 btr_cnt->bload.nr_blocks;
+
 		/* We don't need any more blocks, so we're done. */
-		if (bno_blocks >= btr_bno->bload.nr_blocks &&
-		    cnt_blocks >= btr_cnt->bload.nr_blocks)
+		if (delta_bno >= 0 && delta_cnt >= 0) {
+			*extra_blocks = delta_bno + delta_cnt;
 			break;
+		}
 
 		/* Allocate however many more blocks we need this time. */
-		if (bno_blocks < btr_bno->bload.nr_blocks)
-			reserve_btblocks(sc->mp, agno, btr_bno,
-					btr_bno->bload.nr_blocks - bno_blocks);
-		if (cnt_blocks < btr_cnt->bload.nr_blocks)
-			reserve_btblocks(sc->mp, agno, btr_cnt,
-					btr_cnt->bload.nr_blocks - cnt_blocks);
+		if (delta_bno < 0)
+			reserve_btblocks(sc->mp, agno, btr_bno, -delta_bno);
+		if (delta_cnt < 0)
+			reserve_btblocks(sc->mp, agno, btr_cnt, -delta_cnt);
 
 		/* Ok, now how many free space records do we have? */
 		*nr_extents = count_bno_extents_blocks(agno, &num_freeblocks);
 	} while (1);
-
-	*extra_blocks = (bno_blocks - btr_bno->bload.nr_blocks) +
-			(cnt_blocks - btr_cnt->bload.nr_blocks);
 }
 
 /* Rebuild the free space btrees. */
diff --git a/repair/bulkload.c b/repair/bulkload.c
index 81d67e62..8dd0a0c3 100644
--- a/repair/bulkload.c
+++ b/repair/bulkload.c
@@ -40,6 +40,8 @@  bulkload_add_blocks(
 	resv->len = len;
 	resv->used = 0;
 	list_add_tail(&resv->list, &bkl->resv_list);
+	bkl->nr_reserved += len;
+
 	return 0;
 }
 
diff --git a/repair/bulkload.h b/repair/bulkload.h
index 01f67279..a84e99b8 100644
--- a/repair/bulkload.h
+++ b/repair/bulkload.h
@@ -41,6 +41,9 @@  struct bulkload {
 
 	/* The last reservation we allocated from. */
 	struct bulkload_resv	*last_resv;
+
+	/* Number of blocks reserved via resv_list. */
+	unsigned int		nr_reserved;
 };
 
 #define for_each_bulkload_reservation(bkl, resv, n)	\