@@ -58,6 +58,12 @@ struct xfs_fd {
/* Only use FIEXCHANGE_RANGE for file data exchanges. */
#define XFROG_FLAG_FORCE_FIEXCHANGE (1 << 3)
+/* Only use the older one-at-a-time scrub ioctl. */
+#define XFROG_FLAG_SCRUB_FORCE_SINGLE (1 << 4)
+
+/* Only use the vectored scrub ioctl. */
+#define XFROG_FLAG_SCRUB_FORCE_VECTOR (1 << 5)
+
/* Static initializers */
#define XFS_FD_INIT(_fd) { .fd = (_fd), }
#define XFS_FD_INIT_EMPTY XFS_FD_INIT(-1)
@@ -186,3 +186,127 @@ xfrog_scrub_metadata(
return 0;
}
+
+/* Decide if there have been any scrub failures up to this point. */
+static inline int
+xfrog_scrubv_previous_failures(
+ struct xfs_scrub_vec_head *vhead,
+ struct xfs_scrub_vec *barrier_vec)
+{
+ struct xfs_scrub_vec *v;
+ __u32 failmask;
+
+ failmask = barrier_vec->sv_flags & XFS_SCRUB_FLAGS_OUT;
+ for (v = vhead->svh_vecs; v < barrier_vec; v++) {
+ if (v->sv_type == XFS_SCRUB_TYPE_BARRIER)
+ continue;
+
+ /*
+ * Runtime errors count as a previous failure, except the ones
+ * used to ask userspace to retry.
+ */
+ if (v->sv_ret && v->sv_ret != -EBUSY && v->sv_ret != -ENOENT &&
+ v->sv_ret != -EUSERS)
+ return -ECANCELED;
+
+ /*
+ * If any of the out-flags on the scrub vector match the mask
+ * that was set on the barrier vector, that's a previous fail.
+ */
+ if (v->sv_flags & failmask)
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static int
+xfrog_scrubv_fallback(
+ struct xfs_fd *xfd,
+ struct xfs_scrub_vec_head *vhead)
+{
+ struct xfs_scrub_vec *v;
+ unsigned int i;
+
+ if (vhead->svh_flags & ~XFS_SCRUB_VEC_FLAGS_ALL)
+ return -EINVAL;
+ for (i = 0, v = vhead->svh_vecs; i < vhead->svh_nr; i++, v++) {
+ if (v->sv_reserved)
+ return -EINVAL;
+ if (v->sv_type == XFS_SCRUB_TYPE_BARRIER &&
+ (v->sv_flags & ~XFS_SCRUB_FLAGS_OUT))
+ return -EINVAL;
+ }
+
+ /* Run all the scrubbers. */
+ for (i = 0, v = vhead->svh_vecs; i < vhead->svh_nr; i++, v++) {
+ struct xfs_scrub_metadata sm = {
+ .sm_type = v->sv_type,
+ .sm_flags = v->sv_flags,
+ .sm_ino = vhead->svh_ino,
+ .sm_gen = vhead->svh_gen,
+ .sm_agno = vhead->svh_agno,
+ };
+ struct timespec tv;
+
+ if (v->sv_type == XFS_SCRUB_TYPE_BARRIER) {
+ v->sv_ret = xfrog_scrubv_previous_failures(vhead, v);
+ if (v->sv_ret)
+ break;
+ continue;
+ }
+
+ v->sv_ret = xfrog_scrub_metadata(xfd, &sm);
+ v->sv_flags = sm.sm_flags;
+
+ if (vhead->svh_rest_us) {
+ tv.tv_sec = 0;
+ tv.tv_nsec = vhead->svh_rest_us * 1000;
+ nanosleep(&tv, NULL);
+ }
+ }
+
+ return 0;
+}
+
+/* Invoke the vectored scrub ioctl. */
+static int
+xfrog_scrubv_call(
+ struct xfs_fd *xfd,
+ struct xfs_scrub_vec_head *vhead)
+{
+ int ret;
+
+ ret = ioctl(xfd->fd, XFS_IOC_SCRUBV_METADATA, vhead);
+ if (ret)
+ return -errno;
+
+ return 0;
+}
+
+/* Invoke the vectored scrub ioctl. Returns zero or negative error code. */
+int
+xfrog_scrubv_metadata(
+ struct xfs_fd *xfd,
+ struct xfs_scrub_vec_head *vhead)
+{
+ int error = 0;
+
+ if (xfd->flags & XFROG_FLAG_SCRUB_FORCE_SINGLE)
+ goto try_single;
+
+ error = xfrog_scrubv_call(xfd, vhead);
+ if (error == 0 || (xfd->flags & XFROG_FLAG_SCRUB_FORCE_VECTOR))
+ return error;
+
+ /* If the vectored scrub ioctl wasn't found, force single mode. */
+ switch (error) {
+ case -EOPNOTSUPP:
+ case -ENOTTY:
+ xfd->flags |= XFROG_FLAG_SCRUB_FORCE_SINGLE;
+ break;
+ }
+
+try_single:
+ return xfrog_scrubv_fallback(xfd, vhead);
+}
@@ -28,5 +28,6 @@ struct xfrog_scrub_descr {
extern const struct xfrog_scrub_descr xfrog_scrubbers[XFS_SCRUB_TYPE_NR];
int xfrog_scrub_metadata(struct xfs_fd *xfd, struct xfs_scrub_metadata *meta);
+int xfrog_scrubv_metadata(struct xfs_fd *xfd, struct xfs_scrub_vec_head *vhead);
#endif /* __LIBFROG_SCRUB_H__ */