diff mbox series

[02/10] libfrog: support vectored scrub

Message ID 171988123165.2012546.18193316836899391961.stgit@frogsfrogsfrogs (mailing list archive)
State New
Headers show
Series [01/10] man: document vectored scrub mode | expand

Commit Message

Darrick J. Wong July 2, 2024, 1:22 a.m. UTC
From: Darrick J. Wong <djwong@kernel.org>

Enhance libfrog to support performing vectored metadata scrub.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
---
 libfrog/fsgeom.h |    6 ++
 libfrog/scrub.c  |  137 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 libfrog/scrub.h  |   35 ++++++++++++++
 3 files changed, 178 insertions(+)

Comments

Christoph Hellwig July 2, 2024, 6:56 a.m. UTC | #1
Looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
diff mbox series

Patch

diff --git a/libfrog/fsgeom.h b/libfrog/fsgeom.h
index f327dc7d70d0..df2ca2a408e7 100644
--- a/libfrog/fsgeom.h
+++ b/libfrog/fsgeom.h
@@ -50,6 +50,12 @@  struct xfs_fd {
 /* Only use v5 bulkstat/inumbers ioctls. */
 #define XFROG_FLAG_BULKSTAT_FORCE_V5	(1 << 1)
 
+/* Only use the older one-at-a-time scrub ioctl. */
+#define XFROG_FLAG_SCRUB_FORCE_SINGLE	(1 << 2)
+
+/* Only use the vectored scrub ioctl. */
+#define XFROG_FLAG_SCRUB_FORCE_VECTOR	(1 << 3)
+
 /* Static initializers */
 #define XFS_FD_INIT(_fd)	{ .fd = (_fd), }
 #define XFS_FD_INIT_EMPTY	XFS_FD_INIT(-1)
diff --git a/libfrog/scrub.c b/libfrog/scrub.c
index a2146e228f5b..e233c0f9c8e1 100644
--- a/libfrog/scrub.c
+++ b/libfrog/scrub.c
@@ -171,3 +171,140 @@  xfrog_scrub_metadata(
 
 	return 0;
 }
+
+/* Decide if there have been any scrub failures up to this point. */
+static inline int
+xfrog_scrubv_check_barrier(
+	const struct xfs_scrub_vec	*vectors,
+	const struct xfs_scrub_vec	*stop_vec)
+{
+	const struct xfs_scrub_vec	*v;
+	__u32				failmask;
+
+	failmask = stop_vec->sv_flags & XFS_SCRUB_FLAGS_OUT;
+
+	for (v = vectors; v < stop_vec; v++) {
+		if (v->sv_type == XFS_SCRUB_TYPE_BARRIER)
+			continue;
+
+		/*
+		 * Runtime errors count as a previous failure, except the ones
+		 * used to ask userspace to retry.
+		 */
+		switch (v->sv_ret) {
+		case -EBUSY:
+		case -ENOENT:
+		case -EUSERS:
+		case 0:
+			break;
+		default:
+			return -ECANCELED;
+		}
+
+		/*
+		 * If any of the out-flags on the scrub vector match the mask
+		 * that was set on the barrier vector, that's a previous fail.
+		 */
+		if (v->sv_flags & failmask)
+			return -ECANCELED;
+	}
+
+	return 0;
+}
+
+static int
+xfrog_scrubv_fallback(
+	struct xfs_fd			*xfd,
+	struct xfrog_scrubv		*scrubv)
+{
+	struct xfs_scrub_vec		*vectors = scrubv->vectors;
+	struct xfs_scrub_vec		*v;
+	unsigned int			i;
+
+	if (scrubv->head.svh_flags & ~XFS_SCRUB_VEC_FLAGS_ALL)
+		return -EINVAL;
+
+	foreach_xfrog_scrubv_vec(scrubv, i, v) {
+		if (v->sv_reserved)
+			return -EINVAL;
+
+		if (v->sv_type == XFS_SCRUB_TYPE_BARRIER &&
+		    (v->sv_flags & ~XFS_SCRUB_FLAGS_OUT))
+			return -EINVAL;
+	}
+
+	/* Run all the scrubbers. */
+	foreach_xfrog_scrubv_vec(scrubv, i, v) {
+		struct xfs_scrub_metadata	sm = {
+			.sm_type		= v->sv_type,
+			.sm_flags		= v->sv_flags,
+			.sm_ino			= scrubv->head.svh_ino,
+			.sm_gen			= scrubv->head.svh_gen,
+			.sm_agno		= scrubv->head.svh_agno,
+		};
+		struct timespec	tv;
+
+		if (v->sv_type == XFS_SCRUB_TYPE_BARRIER) {
+			v->sv_ret = xfrog_scrubv_check_barrier(vectors, v);
+			if (v->sv_ret)
+				break;
+			continue;
+		}
+
+		v->sv_ret = xfrog_scrub_metadata(xfd, &sm);
+		v->sv_flags = sm.sm_flags;
+
+		if (scrubv->head.svh_rest_us) {
+			tv.tv_sec = 0;
+			tv.tv_nsec = scrubv->head.svh_rest_us * 1000;
+			nanosleep(&tv, NULL);
+		}
+	}
+
+	return 0;
+}
+
+/* Invoke the vectored scrub ioctl. */
+static int
+xfrog_scrubv_call(
+	struct xfs_fd			*xfd,
+	struct xfs_scrub_vec_head	*vhead)
+{
+	int				ret;
+
+	ret = ioctl(xfd->fd, XFS_IOC_SCRUBV_METADATA, vhead);
+	if (ret)
+		return -errno;
+
+	return 0;
+}
+
+/* Invoke the vectored scrub ioctl.  Returns zero or negative error code. */
+int
+xfrog_scrubv_metadata(
+	struct xfs_fd			*xfd,
+	struct xfrog_scrubv		*scrubv)
+{
+	int				error = 0;
+
+	if (scrubv->head.svh_nr > XFROG_SCRUBV_MAX_VECTORS)
+		return -EINVAL;
+
+	if (xfd->flags & XFROG_FLAG_SCRUB_FORCE_SINGLE)
+		goto try_single;
+
+	error = xfrog_scrubv_call(xfd, &scrubv->head);
+	if (error == 0 || (xfd->flags & XFROG_FLAG_SCRUB_FORCE_VECTOR))
+		return error;
+
+	/* If the vectored scrub ioctl wasn't found, force single mode. */
+	switch (error) {
+	case -EOPNOTSUPP:
+	case -ENOTTY:
+		xfd->flags |= XFROG_FLAG_SCRUB_FORCE_SINGLE;
+		break;
+	}
+
+try_single:
+	return xfrog_scrubv_fallback(xfd, scrubv);
+}
diff --git a/libfrog/scrub.h b/libfrog/scrub.h
index 27230c62f71a..b564c0d7bd0f 100644
--- a/libfrog/scrub.h
+++ b/libfrog/scrub.h
@@ -28,4 +28,39 @@  extern const struct xfrog_scrub_descr xfrog_scrubbers[XFS_SCRUB_TYPE_NR];
 
 int xfrog_scrub_metadata(struct xfs_fd *xfd, struct xfs_scrub_metadata *meta);
 
+/*
+ * Allow enough space to call all scrub types with a barrier between each.
+ * This is overkill for every caller in xfsprogs.
+ */
+#define XFROG_SCRUBV_MAX_VECTORS	(XFS_SCRUB_TYPE_NR * 2)
+
+struct xfrog_scrubv {
+	struct xfs_scrub_vec_head	head;
+	struct xfs_scrub_vec		vectors[XFROG_SCRUBV_MAX_VECTORS];
+};
+
+/* Initialize a scrubv structure; callers must have zeroed @scrubv. */
+static inline void
+xfrog_scrubv_init(struct xfrog_scrubv *scrubv)
+{
+	scrubv->head.svh_vectors = (uintptr_t)scrubv->vectors;
+}
+
+/* Return the next free vector from the scrubv structure. */
+static inline struct xfs_scrub_vec *
+xfrog_scrubv_next_vector(struct xfrog_scrubv *scrubv)
+{
+	if (scrubv->head.svh_nr >= XFROG_SCRUBV_MAX_VECTORS)
+		return NULL;
+
+	return &scrubv->vectors[scrubv->head.svh_nr++];
+}
+
+#define foreach_xfrog_scrubv_vec(scrubv, i, vec) \
+	for ((i) = 0, (vec) = (scrubv)->vectors; \
+	     (i) < (scrubv)->head.svh_nr; \
+	     (i)++, (vec)++)
+
+int xfrog_scrubv_metadata(struct xfs_fd *xfd, struct xfrog_scrubv *scrubv);
+
 #endif	/* __LIBFROG_SCRUB_H__ */