diff mbox series

[03/14] xfs: dynamically allocate btree scrub context structure

Message ID 163192856634.416199.12496831484611764326.stgit@magnolia (mailing list archive)
State Superseded, archived
Headers show
Series xfs: support dynamic btree cursor height | expand

Commit Message

Darrick J. Wong Sept. 18, 2021, 1:29 a.m. UTC
From: Darrick J. Wong <djwong@kernel.org>

Reorganize struct xchk_btree so that we can dynamically size the context
structure to fit the type of btree cursor that we have.  This will
enable us to use memory more efficiently once we start adding very tall
btree types.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
---
 fs/xfs/scrub/btree.c |   38 +++++++++++++++++---------------------
 fs/xfs/scrub/btree.h |   16 +++++++++++++---
 2 files changed, 30 insertions(+), 24 deletions(-)

Comments

Chandan Babu R Sept. 20, 2021, 9:53 a.m. UTC | #1
On 18 Sep 2021 at 06:59, Darrick J. Wong wrote:
> From: Darrick J. Wong <djwong@kernel.org>
>
> Reorganize struct xchk_btree so that we can dynamically size the context
> structure to fit the type of btree cursor that we have.  This will
> enable us to use memory more efficiently once we start adding very tall
> btree types.

The changes look good to me from the perspective of functional correctness.

Reviewed-by: Chandan Babu R <chandan.babu@oracle.com>

>
> Signed-off-by: Darrick J. Wong <djwong@kernel.org>
> ---
>  fs/xfs/scrub/btree.c |   38 +++++++++++++++++---------------------
>  fs/xfs/scrub/btree.h |   16 +++++++++++++---
>  2 files changed, 30 insertions(+), 24 deletions(-)
>
>
> diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
> index 26dcb4691e31..7b7762ae22e5 100644
> --- a/fs/xfs/scrub/btree.c
> +++ b/fs/xfs/scrub/btree.c
> @@ -141,9 +141,10 @@ xchk_btree_rec(
>  	trace_xchk_btree_rec(bs->sc, cur, 0);
>  
>  	/* If this isn't the first record, are they in order? */
> -	if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
> +	if (bs->levels[0].has_lastkey &&
> +	    !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
>  		xchk_btree_set_corrupt(bs->sc, cur, 0);
> -	bs->firstrec = false;
> +	bs->levels[0].has_lastkey = true;
>  	memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len);
>  
>  	if (cur->bc_nlevels == 1)
> @@ -188,11 +189,11 @@ xchk_btree_key(
>  	trace_xchk_btree_key(bs->sc, cur, level);
>  
>  	/* If this isn't the first key, are they in order? */
> -	if (!bs->firstkey[level] &&
> -	    !cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key))
> +	if (bs->levels[level].has_lastkey &&
> +	    !cur->bc_ops->keys_inorder(cur, &bs->levels[level].lastkey, key))
>  		xchk_btree_set_corrupt(bs->sc, cur, level);
> -	bs->firstkey[level] = false;
> -	memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len);
> +	bs->levels[level].has_lastkey = true;
> +	memcpy(&bs->levels[level].lastkey, key, cur->bc_ops->key_len);
>  
>  	if (level + 1 >= cur->bc_nlevels)
>  		return;
> @@ -632,38 +633,33 @@ xchk_btree(
>  	union xfs_btree_ptr		*pp;
>  	union xfs_btree_rec		*recp;
>  	struct xfs_btree_block		*block;
> -	int				level;
>  	struct xfs_buf			*bp;
>  	struct check_owner		*co;
>  	struct check_owner		*n;
> -	int				i;
> +	size_t				cur_sz;
> +	int				level;
>  	int				error = 0;
>  
>  	/*
>  	 * Allocate the btree scrub context from the heap, because this
> -	 * structure can get rather large.
> +	 * structure can get rather large.  Don't let a caller feed us a
> +	 * totally absurd size.
>  	 */
> -	bs = kmem_zalloc(sizeof(struct xchk_btree), KM_NOFS | KM_MAYFAIL);
> +	cur_sz = xchk_btree_sizeof(cur->bc_nlevels);
> +	if (cur_sz > PAGE_SIZE) {
> +		xchk_btree_set_corrupt(sc, cur, 0);
> +		return 0;
> +	}
> +	bs = kmem_zalloc(cur_sz, KM_NOFS | KM_MAYFAIL);
>  	if (!bs)
>  		return -ENOMEM;
>  	bs->cur = cur;
>  	bs->scrub_rec = scrub_fn;
>  	bs->oinfo = oinfo;
> -	bs->firstrec = true;
>  	bs->private = private;
>  	bs->sc = sc;
> -
> -	/* Initialize scrub state */
> -	for (i = 0; i < XFS_BTREE_MAXLEVELS; i++)
> -		bs->firstkey[i] = true;
>  	INIT_LIST_HEAD(&bs->to_check);
>  
> -	/* Don't try to check a tree with a height we can't handle. */
> -	if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) {
> -		xchk_btree_set_corrupt(sc, cur, 0);
> -		goto out;
> -	}
> -
>  	/*
>  	 * Load the root of the btree.  The helper function absorbs
>  	 * error codes for us.
> diff --git a/fs/xfs/scrub/btree.h b/fs/xfs/scrub/btree.h
> index d5c0b0cbc505..7f8c54d8020e 100644
> --- a/fs/xfs/scrub/btree.h
> +++ b/fs/xfs/scrub/btree.h
> @@ -29,6 +29,11 @@ typedef int (*xchk_btree_rec_fn)(
>  	struct xchk_btree		*bs,
>  	const union xfs_btree_rec	*rec);
>  
> +struct xchk_btree_levels {
> +	union xfs_btree_key		lastkey;
> +	bool				has_lastkey;
> +};
> +
>  struct xchk_btree {
>  	/* caller-provided scrub state */
>  	struct xfs_scrub		*sc;
> @@ -39,12 +44,17 @@ struct xchk_btree {
>  
>  	/* internal scrub state */
>  	union xfs_btree_rec		lastrec;
> -	bool				firstrec;
> -	union xfs_btree_key		lastkey[XFS_BTREE_MAXLEVELS];
> -	bool				firstkey[XFS_BTREE_MAXLEVELS];
>  	struct list_head		to_check;
> +	struct xchk_btree_levels	levels[];
>  };
>  
> +static inline size_t
> +xchk_btree_sizeof(unsigned int levels)
> +{
> +	return sizeof(struct xchk_btree) +
> +				(levels * sizeof(struct xchk_btree_levels));
> +}
> +
>  int xchk_btree(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
>  		xchk_btree_rec_fn scrub_fn, const struct xfs_owner_info *oinfo,
>  		void *private);
Christoph Hellwig Sept. 21, 2021, 8:43 a.m. UTC | #2
On Fri, Sep 17, 2021 at 06:29:26PM -0700, Darrick J. Wong wrote:
> From: Darrick J. Wong <djwong@kernel.org>
> 
> Reorganize struct xchk_btree so that we can dynamically size the context
> structure to fit the type of btree cursor that we have.  This will
> enable us to use memory more efficiently once we start adding very tall
> btree types.

So bs->levels[0].has_lastkey replaces bs->firstkey?  Can you explain
a bit more how this works for someone not too familiar with the scrub
code.

> +static inline size_t
> +xchk_btree_sizeof(unsigned int levels)
> +{
> +	return sizeof(struct xchk_btree) +
> +				(levels * sizeof(struct xchk_btree_levels));

This should probably use struct_size().
Darrick J. Wong Sept. 22, 2021, 4:17 p.m. UTC | #3
On Tue, Sep 21, 2021 at 09:43:18AM +0100, Christoph Hellwig wrote:
> On Fri, Sep 17, 2021 at 06:29:26PM -0700, Darrick J. Wong wrote:
> > From: Darrick J. Wong <djwong@kernel.org>
> > 
> > Reorganize struct xchk_btree so that we can dynamically size the context
> > structure to fit the type of btree cursor that we have.  This will
> > enable us to use memory more efficiently once we start adding very tall
> > btree types.
> 
> So bs->levels[0].has_lastkey replaces bs->firstkey?  Can you explain
> a bit more how this works for someone not too familiar with the scrub
> code.

For each record and key that the btree scrubber encounters, it needs to
know if it should call ->{recs,keys}_inorder to check the ordering of
each item in the btree block.

Hmm.  Come to think of it, we could use "cur->bc_ptrs[level] > 0"
instead of tracking it separately.  Ok, that'll become a separate
cleanup patch to reduce memory further.  Good question!

> > +static inline size_t
> > +xchk_btree_sizeof(unsigned int levels)
> > +{
> > +	return sizeof(struct xchk_btree) +
> > +				(levels * sizeof(struct xchk_btree_levels));
> 
> This should probably use struct_size().

Assuming it's ok with sending a typed null pointer into a macro:

	return struct_size((struct xchk_btree *)NULL, levels, nr_levels);

Then ok.

--D
diff mbox series

Patch

diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index 26dcb4691e31..7b7762ae22e5 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -141,9 +141,10 @@  xchk_btree_rec(
 	trace_xchk_btree_rec(bs->sc, cur, 0);
 
 	/* If this isn't the first record, are they in order? */
-	if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
+	if (bs->levels[0].has_lastkey &&
+	    !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
 		xchk_btree_set_corrupt(bs->sc, cur, 0);
-	bs->firstrec = false;
+	bs->levels[0].has_lastkey = true;
 	memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len);
 
 	if (cur->bc_nlevels == 1)
@@ -188,11 +189,11 @@  xchk_btree_key(
 	trace_xchk_btree_key(bs->sc, cur, level);
 
 	/* If this isn't the first key, are they in order? */
-	if (!bs->firstkey[level] &&
-	    !cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key))
+	if (bs->levels[level].has_lastkey &&
+	    !cur->bc_ops->keys_inorder(cur, &bs->levels[level].lastkey, key))
 		xchk_btree_set_corrupt(bs->sc, cur, level);
-	bs->firstkey[level] = false;
-	memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len);
+	bs->levels[level].has_lastkey = true;
+	memcpy(&bs->levels[level].lastkey, key, cur->bc_ops->key_len);
 
 	if (level + 1 >= cur->bc_nlevels)
 		return;
@@ -632,38 +633,33 @@  xchk_btree(
 	union xfs_btree_ptr		*pp;
 	union xfs_btree_rec		*recp;
 	struct xfs_btree_block		*block;
-	int				level;
 	struct xfs_buf			*bp;
 	struct check_owner		*co;
 	struct check_owner		*n;
-	int				i;
+	size_t				cur_sz;
+	int				level;
 	int				error = 0;
 
 	/*
 	 * Allocate the btree scrub context from the heap, because this
-	 * structure can get rather large.
+	 * structure can get rather large.  Don't let a caller feed us a
+	 * totally absurd size.
 	 */
-	bs = kmem_zalloc(sizeof(struct xchk_btree), KM_NOFS | KM_MAYFAIL);
+	cur_sz = xchk_btree_sizeof(cur->bc_nlevels);
+	if (cur_sz > PAGE_SIZE) {
+		xchk_btree_set_corrupt(sc, cur, 0);
+		return 0;
+	}
+	bs = kmem_zalloc(cur_sz, KM_NOFS | KM_MAYFAIL);
 	if (!bs)
 		return -ENOMEM;
 	bs->cur = cur;
 	bs->scrub_rec = scrub_fn;
 	bs->oinfo = oinfo;
-	bs->firstrec = true;
 	bs->private = private;
 	bs->sc = sc;
-
-	/* Initialize scrub state */
-	for (i = 0; i < XFS_BTREE_MAXLEVELS; i++)
-		bs->firstkey[i] = true;
 	INIT_LIST_HEAD(&bs->to_check);
 
-	/* Don't try to check a tree with a height we can't handle. */
-	if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) {
-		xchk_btree_set_corrupt(sc, cur, 0);
-		goto out;
-	}
-
 	/*
 	 * Load the root of the btree.  The helper function absorbs
 	 * error codes for us.
diff --git a/fs/xfs/scrub/btree.h b/fs/xfs/scrub/btree.h
index d5c0b0cbc505..7f8c54d8020e 100644
--- a/fs/xfs/scrub/btree.h
+++ b/fs/xfs/scrub/btree.h
@@ -29,6 +29,11 @@  typedef int (*xchk_btree_rec_fn)(
 	struct xchk_btree		*bs,
 	const union xfs_btree_rec	*rec);
 
+struct xchk_btree_levels {
+	union xfs_btree_key		lastkey;
+	bool				has_lastkey;
+};
+
 struct xchk_btree {
 	/* caller-provided scrub state */
 	struct xfs_scrub		*sc;
@@ -39,12 +44,17 @@  struct xchk_btree {
 
 	/* internal scrub state */
 	union xfs_btree_rec		lastrec;
-	bool				firstrec;
-	union xfs_btree_key		lastkey[XFS_BTREE_MAXLEVELS];
-	bool				firstkey[XFS_BTREE_MAXLEVELS];
 	struct list_head		to_check;
+	struct xchk_btree_levels	levels[];
 };
 
+static inline size_t
+xchk_btree_sizeof(unsigned int levels)
+{
+	return sizeof(struct xchk_btree) +
+				(levels * sizeof(struct xchk_btree_levels));
+}
+
 int xchk_btree(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
 		xchk_btree_rec_fn scrub_fn, const struct xfs_owner_info *oinfo,
 		void *private);