@@ -99,21 +99,12 @@ scan_ag_metadata(
snprintf(descr, DESCR_BUFSZ, _("AG %u"), agno);
/*
- * First we scrub and fix the AG headers, because we need
- * them to work well enough to check the AG btrees.
+ * First we scrub and fix the AG headers, because we need them to work
+ * well enough to check the AG btrees. Then scrub the AG btrees.
*/
scrub_item_schedule_group(&sri, XFROG_SCRUB_GROUP_AGHEADER);
- ret = scrub_item_check(ctx, &sri);
- if (ret)
- goto err;
-
- /* Repair header damage. */
- ret = repair_item_corruption(ctx, &sri);
- if (ret)
- goto err;
-
- /* Now scrub the AG btrees. */
scrub_item_schedule_group(&sri, XFROG_SCRUB_GROUP_PERAG);
+
ret = scrub_item_check(ctx, &sri);
if (ret)
goto err;
@@ -145,25 +145,11 @@ scrub_inode(
/* Scrub the inode. */
scrub_item_schedule(&sri, XFS_SCRUB_TYPE_INODE);
- error = scrub_item_check_file(ctx, &sri, fd);
- if (error)
- goto out;
-
- error = try_inode_repair(ictx, &sri, fd);
- if (error)
- goto out;
/* Scrub all block mappings. */
scrub_item_schedule(&sri, XFS_SCRUB_TYPE_BMBTD);
scrub_item_schedule(&sri, XFS_SCRUB_TYPE_BMBTA);
scrub_item_schedule(&sri, XFS_SCRUB_TYPE_BMBTC);
- error = scrub_item_check_file(ctx, &sri, fd);
- if (error)
- goto out;
-
- error = try_inode_repair(ictx, &sri, fd);
- if (error)
- goto out;
/* Check everything accessible via file mapping. */
if (S_ISLNK(bstat->bs_mode))
@@ -173,11 +159,12 @@ scrub_inode(
scrub_item_schedule(&sri, XFS_SCRUB_TYPE_XATTR);
scrub_item_schedule(&sri, XFS_SCRUB_TYPE_PARENT);
+
+ /* Try to check and repair the file while it's open. */
error = scrub_item_check_file(ctx, &sri, fd);
if (error)
goto out;
- /* Try to repair the file while it's open. */
error = try_inode_repair(ictx, &sri, fd);
if (error)
goto out;
@@ -323,6 +323,7 @@ repair_call_kernel(
struct scrubv_head bh = { };
struct xfs_scrub_vec *v;
unsigned int scrub_type;
+ bool need_barrier = false;
int error;
assert(!debug_tweak_on("XFS_SCRUB_NO_KERNEL"));
@@ -338,6 +339,11 @@ repair_call_kernel(
repair_flags))
continue;
+ if (need_barrier) {
+ scrub_vhead_add_barrier(&bh);
+ need_barrier = false;
+ }
+
scrub_vhead_add(&bh, sri, scrub_type, true);
if (sri->sri_state[scrub_type] & SCRUB_ITEM_NEEDSREPAIR)
@@ -350,6 +356,17 @@ repair_call_kernel(
dbg_printf("repair %s flags %xh tries %u\n", descr_render(&dsc),
sri->sri_state[scrub_type],
sri->sri_tries[scrub_type]);
+
+ /*
+ * One of the other scrub types depends on this one. Set us up
+ * to add a repair barrier if we decide to schedule a repair
+ * after this one. If the UNFIXED flag is set, that means this
+ * is our last chance to fix things, so we skip the barriers
+ * just let everything run.
+ */
+ if (!(repair_flags & XRM_FINAL_WARNING) &&
+ (sri->sri_state[scrub_type] & SCRUB_ITEM_BARRIER))
+ need_barrier = true;
}
error = -xfrog_scrubv_metadata(xfdp, &bh.head);
@@ -357,6 +374,16 @@ repair_call_kernel(
return error;
foreach_bighead_vec(&bh, v) {
+ /* Deal with barriers separately. */
+ if (v->sv_type == XFS_SCRUB_TYPE_BARRIER) {
+ /* -ECANCELED means the kernel stopped here. */
+ if (v->sv_ret == -ECANCELED)
+ return 0;
+ if (v->sv_ret)
+ return -v->sv_ret;
+ continue;
+ }
+
error = repair_epilogue(ctx, &dsc, sri, repair_flags, v);
if (error)
return error;
@@ -445,7 +472,8 @@ repair_item_boost_priorities(
* bits are left untouched to force a rescan in phase 4.
*/
#define MUSTFIX_STATES (SCRUB_ITEM_CORRUPT | \
- SCRUB_ITEM_BOOST_REPAIR)
+ SCRUB_ITEM_BOOST_REPAIR | \
+ SCRUB_ITEM_BARRIER)
/*
* Figure out which AG metadata must be fixed before we can move on
* to the inode scan.
@@ -730,7 +758,7 @@ repair_item_class(
return 0;
if (ctx->mode == SCRUB_MODE_PREEN && !(repair_mask & SCRUB_ITEM_PREEN))
return 0;
- if (!scrub_item_schedule_work(sri, repair_mask))
+ if (!scrub_item_schedule_work(sri, repair_mask, repair_deps))
return 0;
/*
@@ -24,6 +24,35 @@
/* Online scrub and repair wrappers. */
+/*
+ * Bitmap showing the correctness dependencies between scrub types for scrubs.
+ * Dependencies cannot cross scrub groups.
+ */
+#define DEP(x) (1U << (x))
+static const unsigned int scrub_deps[XFS_SCRUB_TYPE_NR] = {
+ [XFS_SCRUB_TYPE_AGF] = DEP(XFS_SCRUB_TYPE_SB),
+ [XFS_SCRUB_TYPE_AGFL] = DEP(XFS_SCRUB_TYPE_SB) |
+ DEP(XFS_SCRUB_TYPE_AGF),
+ [XFS_SCRUB_TYPE_AGI] = DEP(XFS_SCRUB_TYPE_SB),
+ [XFS_SCRUB_TYPE_BNOBT] = DEP(XFS_SCRUB_TYPE_AGF),
+ [XFS_SCRUB_TYPE_CNTBT] = DEP(XFS_SCRUB_TYPE_AGF),
+ [XFS_SCRUB_TYPE_INOBT] = DEP(XFS_SCRUB_TYPE_AGI),
+ [XFS_SCRUB_TYPE_FINOBT] = DEP(XFS_SCRUB_TYPE_AGI),
+ [XFS_SCRUB_TYPE_RMAPBT] = DEP(XFS_SCRUB_TYPE_AGF),
+ [XFS_SCRUB_TYPE_REFCNTBT] = DEP(XFS_SCRUB_TYPE_AGF),
+ [XFS_SCRUB_TYPE_BMBTD] = DEP(XFS_SCRUB_TYPE_INODE),
+ [XFS_SCRUB_TYPE_BMBTA] = DEP(XFS_SCRUB_TYPE_INODE),
+ [XFS_SCRUB_TYPE_BMBTC] = DEP(XFS_SCRUB_TYPE_INODE),
+ [XFS_SCRUB_TYPE_DIR] = DEP(XFS_SCRUB_TYPE_BMBTD),
+ [XFS_SCRUB_TYPE_XATTR] = DEP(XFS_SCRUB_TYPE_BMBTA),
+ [XFS_SCRUB_TYPE_SYMLINK] = DEP(XFS_SCRUB_TYPE_BMBTD),
+ [XFS_SCRUB_TYPE_PARENT] = DEP(XFS_SCRUB_TYPE_BMBTD),
+ [XFS_SCRUB_TYPE_QUOTACHECK] = DEP(XFS_SCRUB_TYPE_UQUOTA) |
+ DEP(XFS_SCRUB_TYPE_GQUOTA) |
+ DEP(XFS_SCRUB_TYPE_PQUOTA),
+};
+#undef DEP
+
/* Describe the current state of a vectored scrub. */
int
format_scrubv_descr(
@@ -251,6 +280,21 @@ scrub_vhead_add(
bighead->i = v - vhead->svh_vecs;
}
+/* Add a barrier to the scrub vector. */
+void
+scrub_vhead_add_barrier(
+ struct scrubv_head *bighead)
+{
+ struct xfs_scrub_vec_head *vhead = &bighead->head;
+ struct xfs_scrub_vec *v;
+
+ v = &vhead->svh_vecs[vhead->svh_nr++];
+ v->sv_type = XFS_SCRUB_TYPE_BARRIER;
+ v->sv_flags = XFS_SCRUB_OFLAG_CORRUPT | XFS_SCRUB_OFLAG_XFAIL |
+ XFS_SCRUB_OFLAG_XCORRUPT | XFS_SCRUB_OFLAG_INCOMPLETE;
+ bighead->i = v - vhead->svh_vecs;
+}
+
/* Do a read-only check of some metadata. */
static int
scrub_call_kernel(
@@ -262,6 +306,7 @@ scrub_call_kernel(
struct scrubv_head bh = { };
struct xfs_scrub_vec *v;
unsigned int scrub_type;
+ bool need_barrier = false;
int error;
assert(!debug_tweak_on("XFS_SCRUB_NO_KERNEL"));
@@ -272,8 +317,17 @@ scrub_call_kernel(
foreach_scrub_type(scrub_type) {
if (!(sri->sri_state[scrub_type] & SCRUB_ITEM_NEEDSCHECK))
continue;
+
+ if (need_barrier) {
+ scrub_vhead_add_barrier(&bh);
+ need_barrier = false;
+ }
+
scrub_vhead_add(&bh, sri, scrub_type, false);
+ if (sri->sri_state[scrub_type] & SCRUB_ITEM_BARRIER)
+ need_barrier = true;
+
dbg_printf("check %s flags %xh tries %u\n", descr_render(&dsc),
sri->sri_state[scrub_type],
sri->sri_tries[scrub_type]);
@@ -284,6 +338,16 @@ scrub_call_kernel(
return error;
foreach_bighead_vec(&bh, v) {
+ /* Deal with barriers separately. */
+ if (v->sv_type == XFS_SCRUB_TYPE_BARRIER) {
+ /* -ECANCELED means the kernel stopped here. */
+ if (v->sv_ret == -ECANCELED)
+ return 0;
+ if (v->sv_ret)
+ return -v->sv_ret;
+ continue;
+ }
+
error = scrub_epilogue(ctx, &dsc, sri, v);
if (error)
return error;
@@ -378,15 +442,25 @@ scrub_item_call_kernel_again(
bool
scrub_item_schedule_work(
struct scrub_item *sri,
- uint8_t state_flags)
+ uint8_t state_flags,
+ const unsigned int *schedule_deps)
{
unsigned int scrub_type;
unsigned int nr = 0;
foreach_scrub_type(scrub_type) {
+ unsigned int j;
+
+ sri->sri_state[scrub_type] &= ~SCRUB_ITEM_BARRIER;
+
if (!(sri->sri_state[scrub_type] & state_flags))
continue;
+ foreach_scrub_type(j) {
+ if (schedule_deps[scrub_type] & (1U << j))
+ sri->sri_state[j] |= SCRUB_ITEM_BARRIER;
+ }
+
sri->sri_tries[scrub_type] = SCRUB_ITEM_MAX_RETRIES;
nr++;
}
@@ -406,7 +480,7 @@ scrub_item_check_file(
struct xfs_fd *xfdp = &ctx->mnt;
int error = 0;
- if (!scrub_item_schedule_work(sri, SCRUB_ITEM_NEEDSCHECK))
+ if (!scrub_item_schedule_work(sri, SCRUB_ITEM_NEEDSCHECK, scrub_deps))
return 0;
/*
@@ -630,6 +704,9 @@ check_scrubv(
{
struct xfs_scrub_vec_head head = { };
+ if (debug_tweak_on("XFS_SCRUB_FORCE_SINGLE"))
+ ctx->mnt.flags |= XFROG_FLAG_SCRUB_FORCE_SINGLE;
+
/* We set the fallback flag if this doesn't work. */
xfrog_scrubv_metadata(&ctx->mnt, &head);
}
@@ -30,6 +30,9 @@ enum xfrog_scrub_group;
/* This scrub type needs to be checked. */
#define SCRUB_ITEM_NEEDSCHECK (1 << 5)
+/* Scrub barrier. */
+#define SCRUB_ITEM_BARRIER (1 << 6)
+
/* All of the state flags that we need to prioritize repair work. */
#define SCRUB_ITEM_REPAIR_ANY (SCRUB_ITEM_CORRUPT | \
SCRUB_ITEM_PREEN | \
@@ -135,6 +138,20 @@ scrub_item_check(struct scrub_ctx *ctx, struct scrub_item *sri)
return scrub_item_check_file(ctx, sri, -1);
}
+/* Count the number of metadata objects still needing a scrub. */
+static inline unsigned int
+scrub_item_count_needscheck(
+ const struct scrub_item *sri)
+{
+ unsigned int ret = 0;
+ unsigned int i;
+
+ foreach_scrub_type(i)
+ if (sri->sri_state[i] & SCRUB_ITEM_NEEDSCHECK)
+ ret++;
+ return ret;
+}
+
void scrub_report_preen_triggers(struct scrub_ctx *ctx);
bool can_scrub_fs_metadata(struct scrub_ctx *ctx);
@@ -27,6 +27,7 @@ void scrub_item_to_vhead(struct scrubv_head *bighead,
const struct scrub_item *sri);
void scrub_vhead_add(struct scrubv_head *bighead, const struct scrub_item *sri,
unsigned int scrub_type, bool repair);
+void scrub_vhead_add_barrier(struct scrubv_head *bighead);
int format_scrubv_descr(struct scrub_ctx *ctx, char *buf, size_t buflen,
void *where);
@@ -123,6 +124,7 @@ scrub_item_schedule_retry(struct scrub_item *sri, unsigned int scrub_type)
bool scrub_item_call_kernel_again(struct scrub_item *sri, uint8_t work_mask,
const struct scrub_item *old);
-bool scrub_item_schedule_work(struct scrub_item *sri, uint8_t state_flags);
+bool scrub_item_schedule_work(struct scrub_item *sri, uint8_t state_flags,
+ const unsigned int *schedule_deps);
#endif /* XFS_SCRUB_SCRUB_PRIVATE_H_ */