@@ -476,6 +476,11 @@ xfs_scrub_agf(
/* AGFL */
+struct xfs_scrub_agfl_info {
+ unsigned int nr_entries;
+ xfs_agblock_t *entries;
+};
+
/* Scrub an AGFL block. */
STATIC int
xfs_scrub_agfl_block(
@@ -484,20 +489,39 @@ xfs_scrub_agfl_block(
void *priv)
{
struct xfs_mount *mp = sc->mp;
+ struct xfs_scrub_agfl_info *sai = priv;
xfs_agnumber_t agno = sc->sa.agno;
- if (!xfs_verify_agbno(mp, agno, agbno))
+ if (xfs_verify_agbno(mp, agno, agbno))
+ sai->entries[sai->nr_entries++] = agbno;
+ else
xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
return 0;
}
+static int
+xfs_scrub_agblock_cmp(
+ const void *pa,
+ const void *pb)
+{
+ const xfs_agblock_t *a = pa;
+ const xfs_agblock_t *b = pb;
+
+ return (int)*a - (int)*b;
+}
+
/* Scrub the AGFL. */
int
xfs_scrub_agfl(
struct xfs_scrub_context *sc)
{
+ struct xfs_scrub_agfl_info sai = {0};
+ struct xfs_agf *agf;
+ size_t sz;
xfs_agnumber_t agno;
+ unsigned int agflcount;
+ unsigned int i;
int error;
agno = sc->sa.agno = sc->sm->sm_agno;
@@ -508,8 +532,33 @@ xfs_scrub_agfl(
if (!sc->sa.agf_bp)
return -EFSCORRUPTED;
+ /* Allocate buffer to ensure uniqueness of AGFL entries. */
+ agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ agflcount = be32_to_cpu(agf->agf_flcount);
+ sz = sizeof(xfs_agblock_t) * agflcount;
+ if (sz > sc->mp->m_sb.sb_sectsize) {
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ goto out;
+ }
+ sai.entries = kmem_zalloc(sz, KM_NOFS);
+
/* Check the blocks in the AGFL. */
- return xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, NULL);
+ error = xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, &sai);
+ if (error)
+ goto out_free;
+
+ /* Sort entries, check for duplicates. */
+ sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
+ xfs_scrub_agblock_cmp, NULL);
+ for (i = 1; i < sai.nr_entries; i++) {
+ if (sai.entries[i] == sai.entries[i - 1]) {
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ break;
+ }
+ }
+
+out_free:
+ kmem_free(sai.entries);
out:
return error;
}