diff mbox series

[03/19] xfs: refactor log recovery item dispatch for pass2 readhead functions

Message ID 158752118180.2140829.13805128567366066982.stgit@magnolia (mailing list archive)
State Superseded
Headers show
Series xfs: refactor log recovery | expand

Commit Message

Darrick J. Wong April 22, 2020, 2:06 a.m. UTC
From: Darrick J. Wong <darrick.wong@oracle.com>

Move the pass2 readhead code into the per-item source code files and use
the dispatch function to call them.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 fs/xfs/libxfs/xfs_log_recover.h |   19 ++++++
 fs/xfs/xfs_buf_item.c           |   18 +++++
 fs/xfs/xfs_dquot_item.c         |   39 ++++++++++++
 fs/xfs/xfs_inode_item.c         |   28 ++++++++
 fs/xfs/xfs_log_recover.c        |  129 +--------------------------------------
 5 files changed, 108 insertions(+), 125 deletions(-)

Comments

Christoph Hellwig April 25, 2020, 6:19 p.m. UTC | #1
On Tue, Apr 21, 2020 at 07:06:21PM -0700, Darrick J. Wong wrote:
>  typedef enum xlog_recover_reorder (*xlog_recover_reorder_fn)(
>  		struct xlog_recover_item *item);
> +typedef void (*xlog_recover_ra_pass2_fn)(struct xlog *log,
> +		struct xlog_recover_item *item);

Same comment for this one - or the other two following.

> +/*
> + * This structure is used during recovery to record the buf log items which
> + * have been canceled and should not be replayed.
> + */
> +struct xfs_buf_cancel {
> +	xfs_daddr_t		bc_blkno;
> +	uint			bc_len;
> +	int			bc_refcount;
> +	struct list_head	bc_list;
> +};
> +
> +struct xfs_buf_cancel *xlog_peek_buffer_cancelled(struct xlog *log,
> +		xfs_daddr_t blkno, uint len, unsigned short flags);

None of the callers moved in this patch even needs the xfs_buf_cancel
structure, it just uses the return value as a boolean.  I think they
all should be switched to use xlog_check_buffer_cancelled instead, which
means that struct xfs_buf_cancel and xlog_peek_buffer_cancelled can stay
private in xfs_log_recovery.c (and later xfs_buf_item.c).

> +STATIC void
> +xlog_recover_buffer_ra_pass2(
> +	struct xlog                     *log,
> +	struct xlog_recover_item        *item)
> +{
> +	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
> +	struct xfs_mount		*mp = log->l_mp;
> +
> +	if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
> +			buf_f->blf_len, buf_f->blf_flags)) {
> +		return;
> +	}
> +
> +	xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
> +				buf_f->blf_len, NULL);

Why not:

	if (!xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
			buf_f->blf_len, buf_f->blf_flags)) {
		xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
				buf_f->blf_len, NULL);
	}

> +STATIC void
> +xlog_recover_dquot_ra_pass2(
> +	struct xlog			*log,
> +	struct xlog_recover_item	*item)
> +{
> +	struct xfs_mount	*mp = log->l_mp;
> +	struct xfs_disk_dquot	*recddq;
> +	struct xfs_dq_logformat	*dq_f;
> +	uint			type;
> +	int			len;
> +
> +
> +	if (mp->m_qflags == 0)

Double empty line above.

> +	if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
> +		return;
> +
> +	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
> +			  &xfs_dquot_buf_ra_ops);

Same comment as above, no real need for the early return here.

> +	if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
> +		return;
> +
> +	xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
> +				ilfp->ilf_len, &xfs_inode_buf_ra_ops);

Here again.

> -	unsigned short			flags)
> +	unsigned short		flags)

Spurious whitespace change.

>  		case XLOG_RECOVER_PASS2:
> -			xlog_recover_ra_pass2(log, item);
> +			if (item->ri_type && item->ri_type->ra_pass2_fn)
> +				item->ri_type->ra_pass2_fn(log, item);

Shouldn't we ensure eatly on that we always have a valid ->ri_type?
Darrick J. Wong April 28, 2020, 8:54 p.m. UTC | #2
On Sat, Apr 25, 2020 at 11:19:29AM -0700, Christoph Hellwig wrote:
> On Tue, Apr 21, 2020 at 07:06:21PM -0700, Darrick J. Wong wrote:
> >  typedef enum xlog_recover_reorder (*xlog_recover_reorder_fn)(
> >  		struct xlog_recover_item *item);
> > +typedef void (*xlog_recover_ra_pass2_fn)(struct xlog *log,
> > +		struct xlog_recover_item *item);
> 
> Same comment for this one - or the other two following.

<nod> I'll skip the typedefs.

> > +/*
> > + * This structure is used during recovery to record the buf log items which
> > + * have been canceled and should not be replayed.
> > + */
> > +struct xfs_buf_cancel {
> > +	xfs_daddr_t		bc_blkno;
> > +	uint			bc_len;
> > +	int			bc_refcount;
> > +	struct list_head	bc_list;
> > +};
> > +
> > +struct xfs_buf_cancel *xlog_peek_buffer_cancelled(struct xlog *log,
> > +		xfs_daddr_t blkno, uint len, unsigned short flags);
> 
> None of the callers moved in this patch even needs the xfs_buf_cancel
> structure, it just uses the return value as a boolean.  I think they
> all should be switched to use xlog_check_buffer_cancelled instead, which
> means that struct xfs_buf_cancel and xlog_peek_buffer_cancelled can stay
> private in xfs_log_recovery.c (and later xfs_buf_item.c).

They now all use your new xlog_buf_readahead function.

> > +STATIC void
> > +xlog_recover_buffer_ra_pass2(
> > +	struct xlog                     *log,
> > +	struct xlog_recover_item        *item)
> > +{
> > +	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
> > +	struct xfs_mount		*mp = log->l_mp;
> > +
> > +	if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
> > +			buf_f->blf_len, buf_f->blf_flags)) {
> > +		return;
> > +	}
> > +
> > +	xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
> > +				buf_f->blf_len, NULL);
> 
> Why not:
> 
> 	if (!xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
> 			buf_f->blf_len, buf_f->blf_flags)) {
> 		xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
> 				buf_f->blf_len, NULL);
> 	}
> 
> > +STATIC void
> > +xlog_recover_dquot_ra_pass2(
> > +	struct xlog			*log,
> > +	struct xlog_recover_item	*item)
> > +{
> > +	struct xfs_mount	*mp = log->l_mp;
> > +	struct xfs_disk_dquot	*recddq;
> > +	struct xfs_dq_logformat	*dq_f;
> > +	uint			type;
> > +	int			len;
> > +
> > +
> > +	if (mp->m_qflags == 0)
> 
> Double empty line above.
> 
> > +	if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
> > +		return;
> > +
> > +	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
> > +			  &xfs_dquot_buf_ra_ops);
> 
> Same comment as above, no real need for the early return here.
> 
> > +	if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
> > +		return;
> > +
> > +	xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
> > +				ilfp->ilf_len, &xfs_inode_buf_ra_ops);
> 
> Here again.

Changed to xlog_buf_readahead.

> 
> > -	unsigned short			flags)
> > +	unsigned short		flags)
> 
> Spurious whitespace change.
> 
> >  		case XLOG_RECOVER_PASS2:
> > -			xlog_recover_ra_pass2(log, item);
> > +			if (item->ri_type && item->ri_type->ra_pass2_fn)
> > +				item->ri_type->ra_pass2_fn(log, item);
> 
> Shouldn't we ensure eatly on that we always have a valid ->ri_type?

Item sorting will bail out on unrecognized log item types, in which case
we will discard the transaction (and all its items) and abort the mount,
right?  That should suffice to ensure that we always have a valid
ri_type long before we get to starting readahead for pass 2.

--D
Christoph Hellwig April 29, 2020, 6:07 a.m. UTC | #3
On Tue, Apr 28, 2020 at 01:54:24PM -0700, Darrick J. Wong wrote:
> > > +			if (item->ri_type && item->ri_type->ra_pass2_fn)
> > > +				item->ri_type->ra_pass2_fn(log, item);
> > 
> > Shouldn't we ensure eatly on that we always have a valid ->ri_type?
> 
> Item sorting will bail out on unrecognized log item types, in which case
> we will discard the transaction (and all its items) and abort the mount,
> right?  That should suffice to ensure that we always have a valid
> ri_type long before we get to starting readahead for pass 2.

Yes, I think so.
diff mbox series

Patch

diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h
index 60a6afb93049..010fc22e5fdf 100644
--- a/fs/xfs/libxfs/xfs_log_recover.h
+++ b/fs/xfs/libxfs/xfs_log_recover.h
@@ -23,6 +23,8 @@  enum xlog_recover_reorder {
 
 typedef enum xlog_recover_reorder (*xlog_recover_reorder_fn)(
 		struct xlog_recover_item *item);
+typedef void (*xlog_recover_ra_pass2_fn)(struct xlog *log,
+		struct xlog_recover_item *item);
 
 struct xlog_recover_item_type {
 	/*
@@ -34,6 +36,9 @@  struct xlog_recover_item_type {
 	 */
 	enum xlog_recover_reorder	reorder;
 	xlog_recover_reorder_fn		reorder_fn;
+
+	/* Start readahead for pass2, if provided. */
+	xlog_recover_ra_pass2_fn	ra_pass2_fn;
 };
 
 extern const struct xlog_recover_item_type xlog_icreate_item_type;
@@ -88,4 +93,18 @@  struct xlog_recover {
 #define	XLOG_RECOVER_PASS1	1
 #define	XLOG_RECOVER_PASS2	2
 
+/*
+ * This structure is used during recovery to record the buf log items which
+ * have been canceled and should not be replayed.
+ */
+struct xfs_buf_cancel {
+	xfs_daddr_t		bc_blkno;
+	uint			bc_len;
+	int			bc_refcount;
+	struct list_head	bc_list;
+};
+
+struct xfs_buf_cancel *xlog_peek_buffer_cancelled(struct xlog *log,
+		xfs_daddr_t blkno, uint len, unsigned short flags);
+
 #endif	/* __XFS_LOG_RECOVER_H__ */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index bf7480e18889..1ad514cc501c 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -1302,6 +1302,24 @@  xlog_buf_reorder_fn(
 	return XLOG_REORDER_BUFFER_LIST;
 }
 
+STATIC void
+xlog_recover_buffer_ra_pass2(
+	struct xlog                     *log,
+	struct xlog_recover_item        *item)
+{
+	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
+	struct xfs_mount		*mp = log->l_mp;
+
+	if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
+			buf_f->blf_len, buf_f->blf_flags)) {
+		return;
+	}
+
+	xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
+				buf_f->blf_len, NULL);
+}
+
 const struct xlog_recover_item_type xlog_buf_item_type = {
 	.reorder_fn		= xlog_buf_reorder_fn,
+	.ra_pass2_fn		= xlog_recover_buffer_ra_pass2,
 };
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 2558586b4d45..d4c794abf900 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -386,8 +386,47 @@  xfs_qm_qoff_logitem_init(
 	return qf;
 }
 
+STATIC void
+xlog_recover_dquot_ra_pass2(
+	struct xlog			*log,
+	struct xlog_recover_item	*item)
+{
+	struct xfs_mount	*mp = log->l_mp;
+	struct xfs_disk_dquot	*recddq;
+	struct xfs_dq_logformat	*dq_f;
+	uint			type;
+	int			len;
+
+
+	if (mp->m_qflags == 0)
+		return;
+
+	recddq = item->ri_buf[1].i_addr;
+	if (recddq == NULL)
+		return;
+	if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
+		return;
+
+	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
+	ASSERT(type);
+	if (log->l_quotaoffs_flag & type)
+		return;
+
+	dq_f = item->ri_buf[0].i_addr;
+	ASSERT(dq_f);
+	ASSERT(dq_f->qlf_len == 1);
+
+	len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
+	if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
+		return;
+
+	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
+			  &xfs_dquot_buf_ra_ops);
+}
+
 const struct xlog_recover_item_type xlog_dquot_item_type = {
 	.reorder		= XLOG_REORDER_INODE_LIST,
+	.ra_pass2_fn		= xlog_recover_dquot_ra_pass2,
 };
 
 const struct xlog_recover_item_type xlog_quotaoff_item_type = {
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index b04e9c5330b7..bb0fe064aa1b 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -846,6 +846,34 @@  xfs_inode_item_format_convert(
 	return 0;
 }
 
+STATIC void
+xlog_recover_inode_ra_pass2(
+	struct xlog                     *log,
+	struct xlog_recover_item        *item)
+{
+	struct xfs_inode_log_format	ilf_buf;
+	struct xfs_inode_log_format	*ilfp;
+	struct xfs_mount		*mp = log->l_mp;
+	int			error;
+
+	if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
+		ilfp = item->ri_buf[0].i_addr;
+	} else {
+		ilfp = &ilf_buf;
+		memset(ilfp, 0, sizeof(*ilfp));
+		error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
+		if (error)
+			return;
+	}
+
+	if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
+		return;
+
+	xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
+				ilfp->ilf_len, &xfs_inode_buf_ra_ops);
+}
+
 const struct xlog_recover_item_type xlog_inode_item_type = {
 	.reorder		= XLOG_REORDER_INODE_LIST,
+	.ra_pass2_fn		= xlog_recover_inode_ra_pass2,
 };
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index e7a9f899f657..9ee8eb9b93a2 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -55,17 +55,6 @@  STATIC int
 xlog_do_recovery_pass(
         struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
 
-/*
- * This structure is used during recovery to record the buf log items which
- * have been canceled and should not be replayed.
- */
-struct xfs_buf_cancel {
-	xfs_daddr_t		bc_blkno;
-	uint			bc_len;
-	int			bc_refcount;
-	struct list_head	bc_list;
-};
-
 /*
  * Sector aligned buffer routines for buffer create/read/write/access
  */
@@ -2009,12 +1998,12 @@  xlog_recover_buffer_pass1(
  * entry in the buffer cancel record table. If it is, return the cancel
  * buffer structure to the caller.
  */
-STATIC struct xfs_buf_cancel *
+struct xfs_buf_cancel *
 xlog_peek_buffer_cancelled(
 	struct xlog		*log,
 	xfs_daddr_t		blkno,
 	uint			len,
-	unsigned short			flags)
+	unsigned short		flags)
 {
 	struct list_head	*bucket;
 	struct xfs_buf_cancel	*bcp;
@@ -3900,117 +3889,6 @@  xlog_recover_do_icreate_pass2(
 				     length, be32_to_cpu(icl->icl_gen));
 }
 
-STATIC void
-xlog_recover_buffer_ra_pass2(
-	struct xlog                     *log,
-	struct xlog_recover_item        *item)
-{
-	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
-	struct xfs_mount		*mp = log->l_mp;
-
-	if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
-			buf_f->blf_len, buf_f->blf_flags)) {
-		return;
-	}
-
-	xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
-				buf_f->blf_len, NULL);
-}
-
-STATIC void
-xlog_recover_inode_ra_pass2(
-	struct xlog                     *log,
-	struct xlog_recover_item        *item)
-{
-	struct xfs_inode_log_format	ilf_buf;
-	struct xfs_inode_log_format	*ilfp;
-	struct xfs_mount		*mp = log->l_mp;
-	int			error;
-
-	if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
-		ilfp = item->ri_buf[0].i_addr;
-	} else {
-		ilfp = &ilf_buf;
-		memset(ilfp, 0, sizeof(*ilfp));
-		error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
-		if (error)
-			return;
-	}
-
-	if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
-		return;
-
-	xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
-				ilfp->ilf_len, &xfs_inode_buf_ra_ops);
-}
-
-STATIC void
-xlog_recover_dquot_ra_pass2(
-	struct xlog			*log,
-	struct xlog_recover_item	*item)
-{
-	struct xfs_mount	*mp = log->l_mp;
-	struct xfs_disk_dquot	*recddq;
-	struct xfs_dq_logformat	*dq_f;
-	uint			type;
-	int			len;
-
-
-	if (mp->m_qflags == 0)
-		return;
-
-	recddq = item->ri_buf[1].i_addr;
-	if (recddq == NULL)
-		return;
-	if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
-		return;
-
-	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
-	ASSERT(type);
-	if (log->l_quotaoffs_flag & type)
-		return;
-
-	dq_f = item->ri_buf[0].i_addr;
-	ASSERT(dq_f);
-	ASSERT(dq_f->qlf_len == 1);
-
-	len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
-	if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
-		return;
-
-	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
-			  &xfs_dquot_buf_ra_ops);
-}
-
-STATIC void
-xlog_recover_ra_pass2(
-	struct xlog			*log,
-	struct xlog_recover_item	*item)
-{
-	switch (ITEM_TYPE(item)) {
-	case XFS_LI_BUF:
-		xlog_recover_buffer_ra_pass2(log, item);
-		break;
-	case XFS_LI_INODE:
-		xlog_recover_inode_ra_pass2(log, item);
-		break;
-	case XFS_LI_DQUOT:
-		xlog_recover_dquot_ra_pass2(log, item);
-		break;
-	case XFS_LI_EFI:
-	case XFS_LI_EFD:
-	case XFS_LI_QUOTAOFF:
-	case XFS_LI_RUI:
-	case XFS_LI_RUD:
-	case XFS_LI_CUI:
-	case XFS_LI_CUD:
-	case XFS_LI_BUI:
-	case XFS_LI_BUD:
-	default:
-		break;
-	}
-}
-
 STATIC int
 xlog_recover_commit_pass1(
 	struct xlog			*log,
@@ -4147,7 +4025,8 @@  xlog_recover_commit_trans(
 			error = xlog_recover_commit_pass1(log, trans, item);
 			break;
 		case XLOG_RECOVER_PASS2:
-			xlog_recover_ra_pass2(log, item);
+			if (item->ri_type && item->ri_type->ra_pass2_fn)
+				item->ri_type->ra_pass2_fn(log, item);
 			list_move_tail(&item->ri_list, &ra_list);
 			items_queued++;
 			if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {