From patchwork Thu Jul 27 22:21:40 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13330855 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id CA2B2C0015E for ; Thu, 27 Jul 2023 22:21:47 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230333AbjG0WVr (ORCPT ); Thu, 27 Jul 2023 18:21:47 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43180 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229499AbjG0WVq (ORCPT ); Thu, 27 Jul 2023 18:21:46 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id AF656187 for ; Thu, 27 Jul 2023 15:21:41 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 4491B61F50 for ; Thu, 27 Jul 2023 22:21:41 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id A66A5C433CC; Thu, 27 Jul 2023 22:21:40 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1690496500; bh=lJz26EQjmNymtQiJtUzRLYxUq0yEMsSiAoZkYkDgorw=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=WSxf+AbaxeF8i+Bg/4HoRUzB18O0fLMZz23+9C4X1zLzq2KQPjf26Fxjp0FBScE4e vVzCv2qxhe4tFhb50TDr7leijQm0WTRYff1PAb+bD4BV/P7FzBbiniYBqcv1F8igMZ JS+Y6086XMmvVv9B0pyAAGjvFn+GXvBlWbHs90YEjQczWBoRssbA5pb4FJeoE2HvgY tARk86hH7a+Pn0kLsgQ7rwsuEZnZ+oSueGBFNZHXg098KNhO3YmbN24BuyEkigVkb+ U8yTUIlqBY74MQ8XjeWvC0hiBM5pizaiuf23L3BjsTgh/IUG0W5zhqacH1vZJZ1roQ xNyfsFAQTaJ0w== Date: Thu, 27 Jul 2023 15:21:40 -0700 Subject: [PATCH 1/9] xfs: cull repair code that will never get used From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <169049622743.921010.3781302624859700238.stgit@frogsfrogsfrogs> In-Reply-To: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> References: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong These two functions date from the era when I thought that we could rebuild btrees by creating an alternate root and adding records one by one. In other words, they predate the btree bulk loader. They're not necessary now, so remove them. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/repair.c | 83 ------------------------------------------------- fs/xfs/scrub/repair.h | 6 ---- fs/xfs/scrub/trace.h | 22 ------------- 3 files changed, 111 deletions(-) diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index ac6d8803e660c..eedb3863b4efd 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -297,89 +297,6 @@ xrep_calc_ag_resblks( return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz)); } -/* Allocate a block in an AG. */ -int -xrep_alloc_ag_block( - struct xfs_scrub *sc, - const struct xfs_owner_info *oinfo, - xfs_fsblock_t *fsbno, - enum xfs_ag_resv_type resv) -{ - struct xfs_alloc_arg args = {0}; - xfs_agblock_t bno; - int error; - - switch (resv) { - case XFS_AG_RESV_AGFL: - case XFS_AG_RESV_RMAPBT: - error = xfs_alloc_get_freelist(sc->sa.pag, sc->tp, - sc->sa.agf_bp, &bno, 1); - if (error) - return error; - if (bno == NULLAGBLOCK) - return -ENOSPC; - xfs_extent_busy_reuse(sc->mp, sc->sa.pag, bno, 1, false); - *fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno, bno); - if (resv == XFS_AG_RESV_RMAPBT) - xfs_ag_resv_rmapbt_alloc(sc->mp, sc->sa.pag->pag_agno); - return 0; - default: - break; - } - - args.tp = sc->tp; - args.mp = sc->mp; - args.pag = sc->sa.pag; - args.oinfo = *oinfo; - args.minlen = 1; - args.maxlen = 1; - args.prod = 1; - args.resv = resv; - - error = xfs_alloc_vextent_this_ag(&args, sc->sa.pag->pag_agno); - if (error) - return error; - if (args.fsbno == NULLFSBLOCK) - return -ENOSPC; - ASSERT(args.len == 1); - *fsbno = args.fsbno; - - return 0; -} - -/* Initialize a new AG btree root block with zero entries. */ -int -xrep_init_btblock( - struct xfs_scrub *sc, - xfs_fsblock_t fsb, - struct xfs_buf **bpp, - xfs_btnum_t btnum, - const struct xfs_buf_ops *ops) -{ - struct xfs_trans *tp = sc->tp; - struct xfs_mount *mp = sc->mp; - struct xfs_buf *bp; - int error; - - trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb), - XFS_FSB_TO_AGBNO(mp, fsb), btnum); - - ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.pag->pag_agno); - error = xfs_trans_get_buf(tp, mp->m_ddev_targp, - XFS_FSB_TO_DADDR(mp, fsb), XFS_FSB_TO_BB(mp, 1), 0, - &bp); - if (error) - return error; - xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); - xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.pag->pag_agno); - xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF); - xfs_trans_log_buf(tp, bp, 0, BBTOB(bp->b_length) - 1); - bp->b_ops = ops; - *bpp = bp; - - return 0; -} - /* * Reconstructing per-AG Btrees * diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index dce791c679eeb..fdccad54936f5 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -23,12 +23,6 @@ int xrep_roll_ag_trans(struct xfs_scrub *sc); bool xrep_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks, enum xfs_ag_resv_type type); xfs_extlen_t xrep_calc_ag_resblks(struct xfs_scrub *sc); -int xrep_alloc_ag_block(struct xfs_scrub *sc, - const struct xfs_owner_info *oinfo, xfs_fsblock_t *fsbno, - enum xfs_ag_resv_type resv); -int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb, - struct xfs_buf **bpp, xfs_btnum_t btnum, - const struct xfs_buf_ops *ops); struct xbitmap; struct xagb_bitmap; diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index b3894daeb86a9..9c8c7dd0f2622 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -827,28 +827,6 @@ TRACE_EVENT(xrep_refcount_extent_fn, __entry->refcount) ) -TRACE_EVENT(xrep_init_btblock, - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, - xfs_btnum_t btnum), - TP_ARGS(mp, agno, agbno, btnum), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) - __field(uint32_t, btnum) - ), - TP_fast_assign( - __entry->dev = mp->m_super->s_dev; - __entry->agno = agno; - __entry->agbno = agbno; - __entry->btnum = btnum; - ), - TP_printk("dev %d:%d agno 0x%x agbno 0x%x btree %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->agno, - __entry->agbno, - __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS)) -) TRACE_EVENT(xrep_findroot_block, TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, uint32_t magic, uint16_t level), From patchwork Thu Jul 27 22:21:55 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13330862 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D470EC00528 for ; Thu, 27 Jul 2023 22:22:02 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230504AbjG0WWB (ORCPT ); Thu, 27 Jul 2023 18:22:01 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43262 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229499AbjG0WV7 (ORCPT ); Thu, 27 Jul 2023 18:21:59 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 67861187 for ; Thu, 27 Jul 2023 15:21:57 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id E297C61F6A for ; Thu, 27 Jul 2023 22:21:56 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4BE9AC433C8; Thu, 27 Jul 2023 22:21:56 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1690496516; bh=B9eHh7Hgs/lAPd8qbHSi4H+k9/0RJWhCrBFv4/Ps8iM=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=daGUuB5pKRoALIX3rdPeknIexDO3Gmp2n+7i5M0LuKJl/xnMKflZU42msQ48Xcoc7 bzTyGV9VKK/XTU4V4BloZITUnC0Bbwpn5+dAui3hLweYwaq9PTpDgOzk2CGP+nGLNi 4Zw/qoakKLPtBhFuW3AZyJ12LwaL/RR7c0McaM7xSOoPbc6mEQsshXAK5zKJh1RIvc s78LZAkLcWFSFpdNLXwJno6pzfJBL+f+tdThIw+lxJQY3bHNDaC/6EFUvmcIK+aHry mCjDq3crIfPReFPnJ9XLaQdVJmsrjLhoYbTCCNWlQei8B80AXChC8a5TJbTkWvMnmK sVCyt//DKpPJA== Date: Thu, 27 Jul 2023 15:21:55 -0700 Subject: [PATCH 2/9] xfs: move the post-repair block reaping code to a separate file From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <169049622757.921010.2331752124094558744.stgit@frogsfrogsfrogs> In-Reply-To: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> References: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong Reaping blocks after a repair is a complicated affair involving a lot of rmap btree lookups and figuring out if we're going to unmap or free old metadata blocks that might be crosslinked. Eventually, we will need to be able to reap per-AG metadata blocks, bmbt blocks from inode forks, garbage CoW staging extents, and (even later) blocks from btrees rooted in inodes. This results in a lot of reaping code, so we might as well split that off while it's easy. Signed-off-by: Darrick J. Wong --- fs/xfs/Makefile | 1 fs/xfs/scrub/agheader_repair.c | 1 fs/xfs/scrub/reap.c | 268 ++++++++++++++++++++++++++++++++++++++++ fs/xfs/scrub/reap.h | 13 ++ fs/xfs/scrub/repair.c | 232 ----------------------------------- fs/xfs/scrub/repair.h | 2 6 files changed, 283 insertions(+), 234 deletions(-) create mode 100644 fs/xfs/scrub/reap.c create mode 100644 fs/xfs/scrub/reap.h diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 16e4eb4312304..0a5cebb9802b1 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -173,6 +173,7 @@ xfs-$(CONFIG_XFS_QUOTA) += scrub/quota.o ifeq ($(CONFIG_XFS_ONLINE_REPAIR),y) xfs-y += $(addprefix scrub/, \ agheader_repair.o \ + reap.o \ repair.o \ ) endif diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index bbaa65422c4ff..c902a5dee57f5 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -26,6 +26,7 @@ #include "scrub/trace.h" #include "scrub/repair.h" #include "scrub/bitmap.h" +#include "scrub/reap.h" /* Superblock */ diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c new file mode 100644 index 0000000000000..774dd8a12b2a6 --- /dev/null +++ b/fs/xfs/scrub/reap.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2022-2023 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc.h" +#include "xfs_ialloc_btree.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" +#include "xfs_refcount_btree.h" +#include "xfs_extent_busy.h" +#include "xfs_ag.h" +#include "xfs_ag_resv.h" +#include "xfs_quota.h" +#include "xfs_qm.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/trace.h" +#include "scrub/repair.h" +#include "scrub/bitmap.h" +#include "scrub/reap.h" + +/* + * Disposal of Blocks from Old Metadata + * + * Now that we've constructed a new btree to replace the damaged one, we want + * to dispose of the blocks that (we think) the old btree was using. + * Previously, we used the rmapbt to collect the extents (bitmap) with the + * rmap owner corresponding to the tree we rebuilt, collected extents for any + * blocks with the same rmap owner that are owned by another data structure + * (sublist), and subtracted sublist from bitmap. In theory the extents + * remaining in bitmap are the old btree's blocks. + * + * Unfortunately, it's possible that the btree was crosslinked with other + * blocks on disk. The rmap data can tell us if there are multiple owners, so + * if the rmapbt says there is an owner of this block other than @oinfo, then + * the block is crosslinked. Remove the reverse mapping and continue. + * + * If there is one rmap record, we can free the block, which removes the + * reverse mapping but doesn't add the block to the free space. Our repair + * strategy is to hope the other metadata objects crosslinked on this block + * will be rebuilt (atop different blocks), thereby removing all the cross + * links. + * + * If there are no rmap records at all, we also free the block. If the btree + * being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't + * supposed to be a rmap record and everything is ok. For other btrees there + * had to have been an rmap entry for the block to have ended up on @bitmap, + * so if it's gone now there's something wrong and the fs will shut down. + * + * Note: If there are multiple rmap records with only the same rmap owner as + * the btree we're trying to rebuild and the block is indeed owned by another + * data structure with the same rmap owner, then the block will be in sublist + * and therefore doesn't need disposal. If there are multiple rmap records + * with only the same rmap owner but the block is not owned by something with + * the same rmap owner, the block will be freed. + * + * The caller is responsible for locking the AG headers for the entire rebuild + * operation so that nothing else can sneak in and change the AG state while + * we're not looking. We also assume that the caller already invalidated any + * buffers associated with @bitmap. + */ + +static int +xrep_invalidate_block( + uint64_t fsbno, + void *priv) +{ + struct xfs_scrub *sc = priv; + struct xfs_buf *bp; + int error; + + /* Skip AG headers and post-EOFS blocks */ + if (!xfs_verify_fsbno(sc->mp, fsbno)) + return 0; + + error = xfs_buf_incore(sc->mp->m_ddev_targp, + XFS_FSB_TO_DADDR(sc->mp, fsbno), + XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp); + if (error) + return 0; + + xfs_trans_bjoin(sc->tp, bp); + xfs_trans_binval(sc->tp, bp); + return 0; +} + +/* + * Invalidate buffers for per-AG btree blocks we're dumping. This function + * is not intended for use with file data repairs; we have bunmapi for that. + */ +int +xrep_invalidate_blocks( + struct xfs_scrub *sc, + struct xbitmap *bitmap) +{ + /* + * For each block in each extent, see if there's an incore buffer for + * exactly that block; if so, invalidate it. The buffer cache only + * lets us look for one buffer at a time, so we have to look one block + * at a time. Avoid invalidating AG headers and post-EOFS blocks + * because we never own those; and if we can't TRYLOCK the buffer we + * assume it's owned by someone else. + */ + return xbitmap_walk_bits(bitmap, xrep_invalidate_block, sc); +} + +/* Information about reaping extents after a repair. */ +struct xrep_reap_state { + struct xfs_scrub *sc; + + /* Reverse mapping owner and metadata reservation type. */ + const struct xfs_owner_info *oinfo; + enum xfs_ag_resv_type resv; +}; + +/* + * Put a block back on the AGFL. + */ +STATIC int +xrep_put_freelist( + struct xfs_scrub *sc, + xfs_agblock_t agbno) +{ + struct xfs_buf *agfl_bp; + int error; + + /* Make sure there's space on the freelist. */ + error = xrep_fix_freelist(sc, true); + if (error) + return error; + + /* + * Since we're "freeing" a lost block onto the AGFL, we have to + * create an rmap for the block prior to merging it or else other + * parts will break. + */ + error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1, + &XFS_RMAP_OINFO_AG); + if (error) + return error; + + /* Put the block on the AGFL. */ + error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); + if (error) + return error; + + error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp, + agfl_bp, agbno, 0); + if (error) + return error; + xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1, + XFS_EXTENT_BUSY_SKIP_DISCARD); + + return 0; +} + +/* Dispose of a single block. */ +STATIC int +xrep_reap_block( + uint64_t fsbno, + void *priv) +{ + struct xrep_reap_state *rs = priv; + struct xfs_scrub *sc = rs->sc; + struct xfs_btree_cur *cur; + struct xfs_buf *agf_bp = NULL; + xfs_agblock_t agbno; + bool has_other_rmap; + int error; + + ASSERT(sc->ip != NULL || + XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); + trace_xrep_dispose_btree_extent(sc->mp, + XFS_FSB_TO_AGNO(sc->mp, fsbno), + XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1); + + agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); + ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); + + /* + * If we are repairing per-inode metadata, we need to read in the AGF + * buffer. Otherwise, we're repairing a per-AG structure, so reuse + * the AGF buffer that the setup functions already grabbed. + */ + if (sc->ip) { + error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp); + if (error) + return error; + } else { + agf_bp = sc->sa.agf_bp; + } + cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag); + + /* Can we find any other rmappings? */ + error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo, + &has_other_rmap); + xfs_btree_del_cursor(cur, error); + if (error) + goto out_free; + + /* + * If there are other rmappings, this block is cross linked and must + * not be freed. Remove the reverse mapping and move on. Otherwise, + * we were the only owner of the block, so free the extent, which will + * also remove the rmap. + * + * XXX: XFS doesn't support detecting the case where a single block + * metadata structure is crosslinked with a multi-block structure + * because the buffer cache doesn't detect aliasing problems, so we + * can't fix 100% of crosslinking problems (yet). The verifiers will + * blow on writeout, the filesystem will shut down, and the admin gets + * to run xfs_repair. + */ + if (has_other_rmap) + error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, + 1, rs->oinfo); + else if (rs->resv == XFS_AG_RESV_AGFL) + error = xrep_put_freelist(sc, agbno); + else + error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo, + rs->resv); + if (agf_bp != sc->sa.agf_bp) + xfs_trans_brelse(sc->tp, agf_bp); + if (error) + return error; + + if (sc->ip) + return xfs_trans_roll_inode(&sc->tp, sc->ip); + return xrep_roll_ag_trans(sc); + +out_free: + if (agf_bp != sc->sa.agf_bp) + xfs_trans_brelse(sc->tp, agf_bp); + return error; +} + +/* Dispose of every block of every extent in the bitmap. */ +int +xrep_reap_extents( + struct xfs_scrub *sc, + struct xbitmap *bitmap, + const struct xfs_owner_info *oinfo, + enum xfs_ag_resv_type type) +{ + struct xrep_reap_state rs = { + .sc = sc, + .oinfo = oinfo, + .resv = type, + }; + + ASSERT(xfs_has_rmapbt(sc->mp)); + + return xbitmap_walk_bits(bitmap, xrep_reap_block, &rs); +} diff --git a/fs/xfs/scrub/reap.h b/fs/xfs/scrub/reap.h new file mode 100644 index 0000000000000..85c8d8a5fe389 --- /dev/null +++ b/fs/xfs/scrub/reap.h @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2022-2023 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_REAP_H__ +#define __XFS_SCRUB_REAP_H__ + +int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *bitmap, + const struct xfs_owner_info *oinfo, + enum xfs_ag_resv_type type); + +#endif /* __XFS_SCRUB_REAP_H__ */ diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index eedb3863b4efd..a3eddfcb42fc1 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -321,91 +321,8 @@ xrep_calc_ag_resblks( * sublist. As with the other btrees we subtract sublist from bitmap, and the * result (since the rmapbt lives in the free space) are the blocks from the * old rmapbt. - * - * Disposal of Blocks from Old per-AG Btrees - * - * Now that we've constructed a new btree to replace the damaged one, we want - * to dispose of the blocks that (we think) the old btree was using. - * Previously, we used the rmapbt to collect the extents (bitmap) with the - * rmap owner corresponding to the tree we rebuilt, collected extents for any - * blocks with the same rmap owner that are owned by another data structure - * (sublist), and subtracted sublist from bitmap. In theory the extents - * remaining in bitmap are the old btree's blocks. - * - * Unfortunately, it's possible that the btree was crosslinked with other - * blocks on disk. The rmap data can tell us if there are multiple owners, so - * if the rmapbt says there is an owner of this block other than @oinfo, then - * the block is crosslinked. Remove the reverse mapping and continue. - * - * If there is one rmap record, we can free the block, which removes the - * reverse mapping but doesn't add the block to the free space. Our repair - * strategy is to hope the other metadata objects crosslinked on this block - * will be rebuilt (atop different blocks), thereby removing all the cross - * links. - * - * If there are no rmap records at all, we also free the block. If the btree - * being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't - * supposed to be a rmap record and everything is ok. For other btrees there - * had to have been an rmap entry for the block to have ended up on @bitmap, - * so if it's gone now there's something wrong and the fs will shut down. - * - * Note: If there are multiple rmap records with only the same rmap owner as - * the btree we're trying to rebuild and the block is indeed owned by another - * data structure with the same rmap owner, then the block will be in sublist - * and therefore doesn't need disposal. If there are multiple rmap records - * with only the same rmap owner but the block is not owned by something with - * the same rmap owner, the block will be freed. - * - * The caller is responsible for locking the AG headers for the entire rebuild - * operation so that nothing else can sneak in and change the AG state while - * we're not looking. We also assume that the caller already invalidated any - * buffers associated with @bitmap. */ -static int -xrep_invalidate_block( - uint64_t fsbno, - void *priv) -{ - struct xfs_scrub *sc = priv; - struct xfs_buf *bp; - int error; - - /* Skip AG headers and post-EOFS blocks */ - if (!xfs_verify_fsbno(sc->mp, fsbno)) - return 0; - - error = xfs_buf_incore(sc->mp->m_ddev_targp, - XFS_FSB_TO_DADDR(sc->mp, fsbno), - XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp); - if (error) - return 0; - - xfs_trans_bjoin(sc->tp, bp); - xfs_trans_binval(sc->tp, bp); - return 0; -} - -/* - * Invalidate buffers for per-AG btree blocks we're dumping. This function - * is not intended for use with file data repairs; we have bunmapi for that. - */ -int -xrep_invalidate_blocks( - struct xfs_scrub *sc, - struct xbitmap *bitmap) -{ - /* - * For each block in each extent, see if there's an incore buffer for - * exactly that block; if so, invalidate it. The buffer cache only - * lets us look for one buffer at a time, so we have to look one block - * at a time. Avoid invalidating AG headers and post-EOFS blocks - * because we never own those; and if we can't TRYLOCK the buffer we - * assume it's owned by someone else. - */ - return xbitmap_walk_bits(bitmap, xrep_invalidate_block, sc); -} - /* Ensure the freelist is the correct size. */ int xrep_fix_freelist( @@ -424,155 +341,6 @@ xrep_fix_freelist( can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK); } -/* Information about reaping extents after a repair. */ -struct xrep_reap_state { - struct xfs_scrub *sc; - - /* Reverse mapping owner and metadata reservation type. */ - const struct xfs_owner_info *oinfo; - enum xfs_ag_resv_type resv; -}; - -/* - * Put a block back on the AGFL. - */ -STATIC int -xrep_put_freelist( - struct xfs_scrub *sc, - xfs_agblock_t agbno) -{ - struct xfs_buf *agfl_bp; - int error; - - /* Make sure there's space on the freelist. */ - error = xrep_fix_freelist(sc, true); - if (error) - return error; - - /* - * Since we're "freeing" a lost block onto the AGFL, we have to - * create an rmap for the block prior to merging it or else other - * parts will break. - */ - error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1, - &XFS_RMAP_OINFO_AG); - if (error) - return error; - - /* Put the block on the AGFL. */ - error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); - if (error) - return error; - - error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp, - agfl_bp, agbno, 0); - if (error) - return error; - xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1, - XFS_EXTENT_BUSY_SKIP_DISCARD); - - return 0; -} - -/* Dispose of a single block. */ -STATIC int -xrep_reap_block( - uint64_t fsbno, - void *priv) -{ - struct xrep_reap_state *rs = priv; - struct xfs_scrub *sc = rs->sc; - struct xfs_btree_cur *cur; - struct xfs_buf *agf_bp = NULL; - xfs_agblock_t agbno; - bool has_other_rmap; - int error; - - ASSERT(sc->ip != NULL || - XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); - trace_xrep_dispose_btree_extent(sc->mp, - XFS_FSB_TO_AGNO(sc->mp, fsbno), - XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1); - - agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); - ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); - - /* - * If we are repairing per-inode metadata, we need to read in the AGF - * buffer. Otherwise, we're repairing a per-AG structure, so reuse - * the AGF buffer that the setup functions already grabbed. - */ - if (sc->ip) { - error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp); - if (error) - return error; - } else { - agf_bp = sc->sa.agf_bp; - } - cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag); - - /* Can we find any other rmappings? */ - error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo, - &has_other_rmap); - xfs_btree_del_cursor(cur, error); - if (error) - goto out_free; - - /* - * If there are other rmappings, this block is cross linked and must - * not be freed. Remove the reverse mapping and move on. Otherwise, - * we were the only owner of the block, so free the extent, which will - * also remove the rmap. - * - * XXX: XFS doesn't support detecting the case where a single block - * metadata structure is crosslinked with a multi-block structure - * because the buffer cache doesn't detect aliasing problems, so we - * can't fix 100% of crosslinking problems (yet). The verifiers will - * blow on writeout, the filesystem will shut down, and the admin gets - * to run xfs_repair. - */ - if (has_other_rmap) - error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, - 1, rs->oinfo); - else if (rs->resv == XFS_AG_RESV_AGFL) - error = xrep_put_freelist(sc, agbno); - else - error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo, - rs->resv); - if (agf_bp != sc->sa.agf_bp) - xfs_trans_brelse(sc->tp, agf_bp); - if (error) - return error; - - if (sc->ip) - return xfs_trans_roll_inode(&sc->tp, sc->ip); - return xrep_roll_ag_trans(sc); - -out_free: - if (agf_bp != sc->sa.agf_bp) - xfs_trans_brelse(sc->tp, agf_bp); - return error; -} - -/* Dispose of every block of every extent in the bitmap. */ -int -xrep_reap_extents( - struct xfs_scrub *sc, - struct xbitmap *bitmap, - const struct xfs_owner_info *oinfo, - enum xfs_ag_resv_type type) -{ - struct xrep_reap_state rs = { - .sc = sc, - .oinfo = oinfo, - .resv = type, - }; - - ASSERT(xfs_has_rmapbt(sc->mp)); - - return xbitmap_walk_bits(bitmap, xrep_reap_block, &rs); -} - /* * Finding per-AG Btree Roots for AGF/AGI Reconstruction * diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index fdccad54936f5..601caa70f8703 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -29,8 +29,6 @@ struct xagb_bitmap; int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink); int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xbitmap *btlist); -int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *exlist, - const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type); struct xrep_find_ag_btree { /* in: rmap owner of the btree we're looking for */ From patchwork Thu Jul 27 22:22:11 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13330863 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A4BE9EB64DD for ; Thu, 27 Jul 2023 22:22:19 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231500AbjG0WWS (ORCPT ); Thu, 27 Jul 2023 18:22:18 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43610 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230407AbjG0WWR (ORCPT ); Thu, 27 Jul 2023 18:22:17 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 10B16187 for ; Thu, 27 Jul 2023 15:22:13 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 980A661F50 for ; Thu, 27 Jul 2023 22:22:12 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id F3D2EC433C7; Thu, 27 Jul 2023 22:22:11 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1690496532; bh=PB/elfiq8eiu4VqnIUPvUyu7reLdWofmSR9+Di2ws10=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=B5B1dh/+uJ7eAAGZJzUIKb1rZ4jCjuWiUIEMDrYvyvNUEtFvcyu1TN2TqyOhhZL4e wvtUlEEL/iHvxyCA4IrqIKx1by4T/jrBMa6o1hroU6kzXRgdDw55aONCnQ+S5k7PtM VHmwEO23abKtZy+9JDYzyRlJiILaXVMGZt9bE73SvoSb4kP2dTMG3Od1wm3QNVOBTv iylWlWBSRUg5j5MjLcaRvqz2/zYDuErAAWnjkMWGQ6QG1LRUGa2BCyQh0lJWpqX1PY /WoQcgvnPACmBRYZZAa8gfyinyQsahc9PlG1onuR1rV16aXziP48/96+16N0vI4q2Z mMHX5WcZ+Q0Kg== Date: Thu, 27 Jul 2023 15:22:11 -0700 Subject: [PATCH 3/9] xfs: only invalidate blocks if we're going to free them From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <169049622772.921010.16279660343912714127.stgit@frogsfrogsfrogs> In-Reply-To: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> References: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong When we're discarding old btree blocks after a repair, only invalidate the buffers for the ones that we're freeing -- if the metadata was crosslinked with another data structure, we don't want to touch it. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/reap.c | 96 +++++++++++++++++++++---------------------------- fs/xfs/scrub/repair.h | 1 - 2 files changed, 42 insertions(+), 55 deletions(-) diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index 774dd8a12b2a6..b332b0e8e2594 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -70,54 +70,10 @@ * * The caller is responsible for locking the AG headers for the entire rebuild * operation so that nothing else can sneak in and change the AG state while - * we're not looking. We also assume that the caller already invalidated any - * buffers associated with @bitmap. + * we're not looking. We must also invalidate any buffers associated with + * @bitmap. */ -static int -xrep_invalidate_block( - uint64_t fsbno, - void *priv) -{ - struct xfs_scrub *sc = priv; - struct xfs_buf *bp; - int error; - - /* Skip AG headers and post-EOFS blocks */ - if (!xfs_verify_fsbno(sc->mp, fsbno)) - return 0; - - error = xfs_buf_incore(sc->mp->m_ddev_targp, - XFS_FSB_TO_DADDR(sc->mp, fsbno), - XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp); - if (error) - return 0; - - xfs_trans_bjoin(sc->tp, bp); - xfs_trans_binval(sc->tp, bp); - return 0; -} - -/* - * Invalidate buffers for per-AG btree blocks we're dumping. This function - * is not intended for use with file data repairs; we have bunmapi for that. - */ -int -xrep_invalidate_blocks( - struct xfs_scrub *sc, - struct xbitmap *bitmap) -{ - /* - * For each block in each extent, see if there's an incore buffer for - * exactly that block; if so, invalidate it. The buffer cache only - * lets us look for one buffer at a time, so we have to look one block - * at a time. Avoid invalidating AG headers and post-EOFS blocks - * because we never own those; and if we can't TRYLOCK the buffer we - * assume it's owned by someone else. - */ - return xbitmap_walk_bits(bitmap, xrep_invalidate_block, sc); -} - /* Information about reaping extents after a repair. */ struct xrep_reap_state { struct xfs_scrub *sc; @@ -127,9 +83,7 @@ struct xrep_reap_state { enum xfs_ag_resv_type resv; }; -/* - * Put a block back on the AGFL. - */ +/* Put a block back on the AGFL. */ STATIC int xrep_put_freelist( struct xfs_scrub *sc, @@ -168,6 +122,37 @@ xrep_put_freelist( return 0; } +/* Try to invalidate the incore buffer for a block that we're about to free. */ +STATIC void +xrep_block_reap_binval( + struct xfs_scrub *sc, + xfs_fsblock_t fsbno) +{ + struct xfs_buf *bp = NULL; + int error; + + /* + * If there's an incore buffer for exactly this block, invalidate it. + * Avoid invalidating AG headers and post-EOFS blocks because we never + * own those. + */ + if (!xfs_verify_fsbno(sc->mp, fsbno)) + return; + + /* + * We assume that the lack of any other known owners means that the + * buffer can be locked without risk of deadlocking. + */ + error = xfs_buf_incore(sc->mp->m_ddev_targp, + XFS_FSB_TO_DADDR(sc->mp, fsbno), + XFS_FSB_TO_BB(sc->mp, 1), 0, &bp); + if (error) + return; + + xfs_trans_bjoin(sc->tp, bp); + xfs_trans_binval(sc->tp, bp); +} + /* Dispose of a single block. */ STATIC int xrep_reap_block( @@ -225,14 +210,17 @@ xrep_reap_block( * blow on writeout, the filesystem will shut down, and the admin gets * to run xfs_repair. */ - if (has_other_rmap) - error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, - 1, rs->oinfo); - else if (rs->resv == XFS_AG_RESV_AGFL) + if (has_other_rmap) { + error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, 1, + rs->oinfo); + } else if (rs->resv == XFS_AG_RESV_AGFL) { + xrep_block_reap_binval(sc, fsbno); error = xrep_put_freelist(sc, agbno); - else + } else { + xrep_block_reap_binval(sc, fsbno); error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo, rs->resv); + } if (agf_bp != sc->sa.agf_bp) xfs_trans_brelse(sc->tp, agf_bp); if (error) diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index 601caa70f8703..e01d63a4a93b4 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -28,7 +28,6 @@ struct xbitmap; struct xagb_bitmap; int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink); -int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xbitmap *btlist); struct xrep_find_ag_btree { /* in: rmap owner of the btree we're looking for */ From patchwork Thu Jul 27 22:22:27 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13330864 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1A3E9EB64DD for ; Thu, 27 Jul 2023 22:22:31 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230270AbjG0WWa (ORCPT ); Thu, 27 Jul 2023 18:22:30 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43718 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229499AbjG0WW3 (ORCPT ); Thu, 27 Jul 2023 18:22:29 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A22522D5E for ; Thu, 27 Jul 2023 15:22:28 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 3B6C161F6A for ; Thu, 27 Jul 2023 22:22:28 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 95E3CC433C8; Thu, 27 Jul 2023 22:22:27 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1690496547; bh=DjKvtcv1t6Hldv9itAGDM62bjgDMJg+I7bmheIURERM=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=hhmDte5KlwxdJIIXr+yBl5/dIEc9yNaUBb9jS2JS8M7GnFfLQj1ZiDjq2F/nGI6Pt Iqmja743N2OqqVID/31lJomTsDghqDAX0rEILBKiQ/GQ8isSnJQrDYHs5kShR0qak7 DjogxheKEHsi7AyduyB/eb2b2NAsm8wTVFMDgUKYAHbfezP2eFgoZ9ZF6zIkr1MJKM RnmS8CgyPmE6QRT+8u7XtquHCBfH8jBz0W2qsK+GrvmJJ26lQcfAsYXXHY3JCVMkXU hcAYbBEDzdfz0zKQ7pvTq7/vfG4qc7yhs2GqhexkSqBSqT3UWpjYbL29rGYeyUltlu AzARpnhYRcv7Q== Date: Thu, 27 Jul 2023 15:22:27 -0700 Subject: [PATCH 4/9] xfs: only allow reaping of per-AG blocks in xrep_reap_extents From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <169049622786.921010.15243435568993927889.stgit@frogsfrogsfrogs> In-Reply-To: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> References: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong Now that we've refactored btree cursors to require the caller to pass in a perag structure, there are numerous problems in xrep_reap_extents if it's being called to reap extents for an inode metadata repair. We don't have any repair functions that can do that, so drop the support for now. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/reap.c | 45 +++++++++++++-------------------------------- 1 file changed, 13 insertions(+), 32 deletions(-) diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index b332b0e8e2594..bc180171d0cb7 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -162,40 +162,30 @@ xrep_reap_block( struct xrep_reap_state *rs = priv; struct xfs_scrub *sc = rs->sc; struct xfs_btree_cur *cur; - struct xfs_buf *agf_bp = NULL; + xfs_agnumber_t agno; xfs_agblock_t agbno; bool has_other_rmap; int error; - ASSERT(sc->ip != NULL || - XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); - trace_xrep_dispose_btree_extent(sc->mp, - XFS_FSB_TO_AGNO(sc->mp, fsbno), - XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1); - + agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); - ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); - /* - * If we are repairing per-inode metadata, we need to read in the AGF - * buffer. Otherwise, we're repairing a per-AG structure, so reuse - * the AGF buffer that the setup functions already grabbed. - */ - if (sc->ip) { - error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp); - if (error) - return error; - } else { - agf_bp = sc->sa.agf_bp; + trace_xrep_dispose_btree_extent(sc->mp, agno, agbno, 1); + + /* We don't support reaping file extents yet. */ + if (sc->ip != NULL || sc->sa.pag->pag_agno != agno) { + ASSERT(0); + return -EFSCORRUPTED; } - cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag); + + cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, sc->sa.pag); /* Can we find any other rmappings? */ error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo, &has_other_rmap); xfs_btree_del_cursor(cur, error); if (error) - goto out_free; + return error; /* * If there are other rmappings, this block is cross linked and must @@ -211,8 +201,8 @@ xrep_reap_block( * to run xfs_repair. */ if (has_other_rmap) { - error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, 1, - rs->oinfo); + error = xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, + 1, rs->oinfo); } else if (rs->resv == XFS_AG_RESV_AGFL) { xrep_block_reap_binval(sc, fsbno); error = xrep_put_freelist(sc, agbno); @@ -221,19 +211,10 @@ xrep_reap_block( error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo, rs->resv); } - if (agf_bp != sc->sa.agf_bp) - xfs_trans_brelse(sc->tp, agf_bp); if (error) return error; - if (sc->ip) - return xfs_trans_roll_inode(&sc->tp, sc->ip); return xrep_roll_ag_trans(sc); - -out_free: - if (agf_bp != sc->sa.agf_bp) - xfs_trans_brelse(sc->tp, agf_bp); - return error; } /* Dispose of every block of every extent in the bitmap. */ From patchwork Thu Jul 27 22:22:42 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13330865 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4AEEDC0015E for ; Thu, 27 Jul 2023 22:22:47 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229499AbjG0WWq (ORCPT ); Thu, 27 Jul 2023 18:22:46 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43808 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230037AbjG0WWp (ORCPT ); Thu, 27 Jul 2023 18:22:45 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 4D24CF0 for ; Thu, 27 Jul 2023 15:22:44 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id DE42161EBC for ; Thu, 27 Jul 2023 22:22:43 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 46614C433C8; Thu, 27 Jul 2023 22:22:43 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1690496563; bh=t0hmGmkPwk5IJ2TYVvuG0sTEJzmE/A+wDMuIPKH4RWA=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=h3upYgUyAn+t35vFQzit1pUAgD1T2KlkYSVHrhzxq0CrWAMBprLsodxr2t+bmlRUE LjNggFIUbSfAtFPDglsJisGjFVgoXjwTfWBQKtnt11dmhoqiP/0UnlW9HwOmgpUBM5 +auYrzcTm9haMuFmQcz+QwmkcQX0KBPo/SjD71pfB5PsByNfyNw0Fq6WjymsTSa7GH O3r6hukv4yJXLDGURnbh9mZChAUZuMIFpn6ILVpq/9aRQT5qOwbLerUM/F0Hwf1lvZ 1pregRdtN7WKNTXhEkW/43TgCYvlORsYxuck8izKwFKcpOUQkOrnXEcd1/ymP3dIJF wo0a71yRajbTQ== Date: Thu, 27 Jul 2023 15:22:42 -0700 Subject: [PATCH 5/9] xfs: use deferred frees to reap old btree blocks From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <169049622801.921010.8309169117326130251.stgit@frogsfrogsfrogs> In-Reply-To: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> References: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong Use deferred frees (EFIs) to reap the blocks of a btree that we just replaced. This helps us to shrink the window in which those old blocks could be lost due to a system crash, though we try to flush the EFIs every few hundred blocks so that we don't also overflow the transaction reservations during and after we commit the new btree. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/reap.c | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index bc180171d0cb7..9b0373dde7ab1 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -26,6 +26,7 @@ #include "xfs_ag_resv.h" #include "xfs_quota.h" #include "xfs_qm.h" +#include "xfs_bmap.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" @@ -81,6 +82,9 @@ struct xrep_reap_state { /* Reverse mapping owner and metadata reservation type. */ const struct xfs_owner_info *oinfo; enum xfs_ag_resv_type resv; + + /* Number of deferred reaps attached to the current transaction. */ + unsigned int deferred; }; /* Put a block back on the AGFL. */ @@ -165,6 +169,7 @@ xrep_reap_block( xfs_agnumber_t agno; xfs_agblock_t agbno; bool has_other_rmap; + bool need_roll = true; int error; agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); @@ -207,13 +212,25 @@ xrep_reap_block( xrep_block_reap_binval(sc, fsbno); error = xrep_put_freelist(sc, agbno); } else { + /* + * Use deferred frees to get rid of the old btree blocks to try + * to minimize the window in which we could crash and lose the + * old blocks. However, we still need to roll the transaction + * every 100 or so EFIs so that we don't exceed the log + * reservation. + */ xrep_block_reap_binval(sc, fsbno); - error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo, - rs->resv); + error = __xfs_free_extent_later(sc->tp, fsbno, 1, rs->oinfo, + rs->resv, true); + if (error) + return error; + rs->deferred++; + need_roll = rs->deferred > 100; } - if (error) + if (error || !need_roll) return error; + rs->deferred = 0; return xrep_roll_ag_trans(sc); } @@ -230,8 +247,13 @@ xrep_reap_extents( .oinfo = oinfo, .resv = type, }; + int error; ASSERT(xfs_has_rmapbt(sc->mp)); - return xbitmap_walk_bits(bitmap, xrep_reap_block, &rs); + error = xbitmap_walk_bits(bitmap, xrep_reap_block, &rs); + if (error || rs.deferred == 0) + return error; + + return xrep_roll_ag_trans(sc); } From patchwork Thu Jul 27 22:22:58 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13330866 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E11D7EB64DD for ; Thu, 27 Jul 2023 22:23:05 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231206AbjG0WXE (ORCPT ); Thu, 27 Jul 2023 18:23:04 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43848 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232483AbjG0WXB (ORCPT ); Thu, 27 Jul 2023 18:23:01 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EFB57F3 for ; Thu, 27 Jul 2023 15:22:59 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 8E2C261F6A for ; Thu, 27 Jul 2023 22:22:59 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id E9C56C433C8; Thu, 27 Jul 2023 22:22:58 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1690496579; bh=iDTsVz2Dhguj/vhZP/pcsDSj9o7jhjZ+We69T8X3n9s=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=ngqj8j/zyCWuONZt+dTdpe4wESUWGxcGjahaElZ53sVO/I3miQ6iYYP/Z7ymbUpqq vqpsL7yqNBE4HrY1/EtRnaNzFd86WpMwfqHiF+AC8k/w3EpRUUuIQDr8TB0CkDH1Hx GiAUK/maVKRGWZzMREq1WXOT1J5GhFX1CwkiMVIsBxCdstRpQCTSCGINpPRck+AXHL 053oW1/CRp9dJiKBgM/dkn/vNSD7Scw+43jXHnvByMojoqueI482gDrH3T9vMkmpVu eJq6Iec1h4/Th8vD3aB9Ps9d7lT+HjuK3jQuuarhgtdm23dHUzlNOZ50g9llMvGIZV KtiWkHbkKZrQw== Date: Thu, 27 Jul 2023 15:22:58 -0700 Subject: [PATCH 6/9] xfs: rearrange xrep_reap_block to make future code flow easier From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <169049622816.921010.17902296776901601216.stgit@frogsfrogsfrogs> In-Reply-To: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> References: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong Rearrange the logic inside xrep_reap_block to make it more obvious that crosslinked metadata blocks are handled differently. Add a couple of tracepoints so that we can tell what's going on at the end of a btree rebuild operation. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/agheader_repair.c | 6 +++--- fs/xfs/scrub/reap.c | 19 ++++++++++++++----- fs/xfs/scrub/trace.h | 17 ++++++++--------- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index c902a5dee57f5..b8d28cfec2866 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -646,13 +646,13 @@ xrep_agfl_fill( xfs_fsblock_t fsbno = start; int error; + trace_xrep_agfl_insert(sc->sa.pag, XFS_FSB_TO_AGBNO(sc->mp, start), + len); + while (fsbno < start + len && af->fl_off < af->flcount) af->agfl_bno[af->fl_off++] = cpu_to_be32(XFS_FSB_TO_AGBNO(sc->mp, fsbno++)); - trace_xrep_agfl_insert(sc->mp, sc->sa.pag->pag_agno, - XFS_FSB_TO_AGBNO(sc->mp, start), len); - error = xbitmap_set(&af->used_extents, start, fsbno - 1); if (error) return error; diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index 9b0373dde7ab1..847c6f8361021 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -175,8 +175,6 @@ xrep_reap_block( agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); - trace_xrep_dispose_btree_extent(sc->mp, agno, agbno, 1); - /* We don't support reaping file extents yet. */ if (sc->ip != NULL || sc->sa.pag->pag_agno != agno) { ASSERT(0); @@ -206,10 +204,21 @@ xrep_reap_block( * to run xfs_repair. */ if (has_other_rmap) { + trace_xrep_dispose_unmap_extent(sc->sa.pag, agbno, 1); + error = xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1, rs->oinfo); - } else if (rs->resv == XFS_AG_RESV_AGFL) { - xrep_block_reap_binval(sc, fsbno); + if (error) + return error; + + goto roll_out; + } + + trace_xrep_dispose_free_extent(sc->sa.pag, agbno, 1); + + xrep_block_reap_binval(sc, fsbno); + + if (rs->resv == XFS_AG_RESV_AGFL) { error = xrep_put_freelist(sc, agbno); } else { /* @@ -219,7 +228,6 @@ xrep_reap_block( * every 100 or so EFIs so that we don't exceed the log * reservation. */ - xrep_block_reap_binval(sc, fsbno); error = __xfs_free_extent_later(sc->tp, fsbno, 1, rs->oinfo, rs->resv, true); if (error) @@ -230,6 +238,7 @@ xrep_reap_block( if (error || !need_roll) return error; +roll_out: rs->deferred = 0; return xrep_roll_ag_trans(sc); } diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 9c8c7dd0f2622..71bfab3d2d290 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -729,9 +729,8 @@ TRACE_EVENT(xchk_refcount_incorrect, #if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) DECLARE_EVENT_CLASS(xrep_extent_class, - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, - xfs_agblock_t agbno, xfs_extlen_t len), - TP_ARGS(mp, agno, agbno, len), + TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len), + TP_ARGS(pag, agbno, len), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) @@ -739,8 +738,8 @@ DECLARE_EVENT_CLASS(xrep_extent_class, __field(xfs_extlen_t, len) ), TP_fast_assign( - __entry->dev = mp->m_super->s_dev; - __entry->agno = agno; + __entry->dev = pag->pag_mount->m_super->s_dev; + __entry->agno = pag->pag_agno; __entry->agbno = agbno; __entry->len = len; ), @@ -752,10 +751,10 @@ DECLARE_EVENT_CLASS(xrep_extent_class, ); #define DEFINE_REPAIR_EXTENT_EVENT(name) \ DEFINE_EVENT(xrep_extent_class, name, \ - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \ - xfs_agblock_t agbno, xfs_extlen_t len), \ - TP_ARGS(mp, agno, agbno, len)) -DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_btree_extent); + TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len), \ + TP_ARGS(pag, agbno, len)) +DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_unmap_extent); +DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_free_extent); DEFINE_REPAIR_EXTENT_EVENT(xrep_agfl_insert); DECLARE_EVENT_CLASS(xrep_rmap_class, From patchwork Thu Jul 27 22:23:14 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13330867 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2F601EB64DD for ; Thu, 27 Jul 2023 22:23:19 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230037AbjG0WXS (ORCPT ); Thu, 27 Jul 2023 18:23:18 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43898 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231531AbjG0WXR (ORCPT ); Thu, 27 Jul 2023 18:23:17 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id BE19C2D71 for ; Thu, 27 Jul 2023 15:23:15 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 3BB9161F3E for ; Thu, 27 Jul 2023 22:23:15 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 99034C433C7; Thu, 27 Jul 2023 22:23:14 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1690496594; bh=fkA+szUV8WYI0DHJozqKUtn2WJqEdjPGs068xUDJv5c=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=r03/2QDaDm/sGtMnrBkd+3m9t93sdKzxp02V0aa8zicwZDa5UcDujS57viNKSVVAo ylLSPKQkzM8F4HgYKI2HWSWXDAGKuHP5OBJxa5p3y39pQGq4kpwe04LlLZaTDZ3eXZ BOfGosjakRhsWuOU3dCWYdGP1oLo/vvuNxRJR+6ss+qKlVKiZvyQ8quSeINBDkeoDR wdu9mLMEYL2MO2LNv7KgQDRzlHozXdczc+wDVcgdONNR4d26/Y0EWaF7cX+/YUGuJO XMdQGC2gVdnDiBFDk+PlrfH5CD6VPrLQ5ca7AFwRaARhAGpiJSX3aFaS7lpZCdD34/ z3+iaG6wD1ukQ== Date: Thu, 27 Jul 2023 15:23:14 -0700 Subject: [PATCH 7/9] xfs: allow scanning ranges of the buffer cache for live buffers From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <169049622831.921010.410578297835897388.stgit@frogsfrogsfrogs> In-Reply-To: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> References: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong After an online repair, we need to invalidate buffers representing the blocks from the old metadata that we're replacing. It's possible that parts of a tree that were previously cached in memory are no longer accessible due to media failure or other corruption on interior nodes, so repair figures out the old blocks from the reverse mapping data and scans the buffer cache directly. In other words, online fsck needs to find all the live (i.e. non-stale) buffers for a range of fsblocks so that it can invalidate them. Unfortunately, the current buffer cache code triggers asserts if the rhashtable lookup finds a non-stale buffer of a different length than the key we searched for. For regular operation this is desirable, but for this repair procedure, we don't care since we're going to forcibly stale the buffer anyway. Add an internal lookup flag to avoid the assert. Skip buffers that are already XBF_STALE. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/reap.c | 2 +- fs/xfs/xfs_buf.c | 9 ++++++++- fs/xfs/xfs_buf.h | 13 +++++++++++++ 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index 847c6f8361021..df13a9e0fe86a 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -149,7 +149,7 @@ xrep_block_reap_binval( */ error = xfs_buf_incore(sc->mp->m_ddev_targp, XFS_FSB_TO_DADDR(sc->mp, fsbno), - XFS_FSB_TO_BB(sc->mp, 1), 0, &bp); + XFS_FSB_TO_BB(sc->mp, 1), XBF_LIVESCAN, &bp); if (error) return; diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 15d1e5a7c2d34..fa392c43ba166 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -481,7 +481,8 @@ _xfs_buf_obj_cmp( * reallocating a busy extent. Skip this buffer and * continue searching for an exact match. */ - ASSERT(bp->b_flags & XBF_STALE); + if (!(map->bm_flags & XBM_LIVESCAN)) + ASSERT(bp->b_flags & XBF_STALE); return 1; } return 0; @@ -559,6 +560,10 @@ xfs_buf_find_lock( * intact here. */ if (bp->b_flags & XBF_STALE) { + if (flags & XBF_LIVESCAN) { + xfs_buf_unlock(bp); + return -ENOENT; + } ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); bp->b_flags &= _XBF_KMEM | _XBF_PAGES; bp->b_ops = NULL; @@ -682,6 +687,8 @@ xfs_buf_get_map( int error; int i; + if (flags & XBF_LIVESCAN) + cmap.bm_flags |= XBM_LIVESCAN; for (i = 0; i < nmaps; i++) cmap.bm_len += map[i].bm_len; diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 549c60942208b..df8f47953bb4e 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -44,6 +44,11 @@ struct xfs_buf; #define _XBF_DELWRI_Q (1u << 22)/* buffer on a delwri queue */ /* flags used only as arguments to access routines */ +/* + * Online fsck is scanning the buffer cache for live buffers. Do not warn + * about length mismatches during lookups and do not return stale buffers. + */ +#define XBF_LIVESCAN (1u << 28) #define XBF_INCORE (1u << 29)/* lookup only, return if found in cache */ #define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */ #define XBF_UNMAPPED (1u << 31)/* do not map the buffer */ @@ -67,6 +72,7 @@ typedef unsigned int xfs_buf_flags_t; { _XBF_KMEM, "KMEM" }, \ { _XBF_DELWRI_Q, "DELWRI_Q" }, \ /* The following interface flags should never be set */ \ + { XBF_LIVESCAN, "LIVESCAN" }, \ { XBF_INCORE, "INCORE" }, \ { XBF_TRYLOCK, "TRYLOCK" }, \ { XBF_UNMAPPED, "UNMAPPED" } @@ -114,8 +120,15 @@ typedef struct xfs_buftarg { struct xfs_buf_map { xfs_daddr_t bm_bn; /* block number for I/O */ int bm_len; /* size of I/O */ + unsigned int bm_flags; }; +/* + * Online fsck is scanning the buffer cache for live buffers. Do not warn + * about length mismatches during lookups and do not return stale buffers. + */ +#define XBM_LIVESCAN (1U << 0) + #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; From patchwork Thu Jul 27 22:23:29 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13330868 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 690E7C0015E for ; Thu, 27 Jul 2023 22:23:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231590AbjG0WXe (ORCPT ); Thu, 27 Jul 2023 18:23:34 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43972 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229712AbjG0WXd (ORCPT ); Thu, 27 Jul 2023 18:23:33 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 5EAD12D4D for ; Thu, 27 Jul 2023 15:23:31 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id D99F761F6A for ; Thu, 27 Jul 2023 22:23:30 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3F0D8C433C8; Thu, 27 Jul 2023 22:23:30 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1690496610; bh=a90DKJW5kL90Sl+vmjB2bhf1PzEKzsCEM51z97NqtUI=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=B2K0s3V6uQC4CqARtKyrOT8OOSx4yg3yjf9dQJv/etksUbLjv4ExbOVZlSdFcrG/S mtmImFNTdxnufYZOCpJBlfJ5hgWUNkoGM/RUfwHTVrlC/r4sjOrs6+i+cB38zJ9e6A NUNZsNNTR8Ql+Co4Nfu1+qfz0H+FSDdycpHcWJR13TF/5chnzuFh/+385l3Oph8xM6 TcNXhZjymH0zmxqGA7iV2SQf1uY6BE99KXKF775Cl/fR9zDtErAH/HBpqxJvr1PNyp qySh0DqqiQyyjdcngse50ncedu9fdxCZO3APPaCfrBMbIE3h1okPkNNaCChQxCdGDh kf1x2B30hueJg== Date: Thu, 27 Jul 2023 15:23:29 -0700 Subject: [PATCH 8/9] xfs: reap large AG metadata extents when possible From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <169049622845.921010.286166239346686392.stgit@frogsfrogsfrogs> In-Reply-To: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> References: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong When we're freeing extents that have been set in a bitmap, break the bitmap extent into multiple sub-extents organized by fate, and reap the extents. This enables us to dispose of old resources more efficiently than doing them block by block. While we're at it, rename the reaping functions to make it clear that they're reaping per-AG extents. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/agheader_repair.c | 2 fs/xfs/scrub/bitmap.c | 37 ---- fs/xfs/scrub/bitmap.h | 4 fs/xfs/scrub/reap.c | 399 ++++++++++++++++++++++++++++++++-------- fs/xfs/scrub/reap.h | 2 fs/xfs/scrub/repair.c | 51 +++++ fs/xfs/scrub/repair.h | 1 fs/xfs/scrub/trace.h | 37 ++++ 8 files changed, 407 insertions(+), 126 deletions(-) diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index b8d28cfec2866..9ad2987ed6e5a 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -775,7 +775,7 @@ xrep_agfl( goto err; /* Dump any AGFL overflow. */ - error = xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG, + error = xrep_reap_ag_metadata(sc, &agfl_extents, &XFS_RMAP_OINFO_AG, XFS_AG_RESV_AGFL); err: xbitmap_destroy(&agfl_extents); diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c index 0c959be396eab..d926d708f2956 100644 --- a/fs/xfs/scrub/bitmap.c +++ b/fs/xfs/scrub/bitmap.c @@ -385,43 +385,6 @@ xbitmap_walk( return error; } -struct xbitmap_walk_bits { - xbitmap_walk_bits_fn fn; - void *priv; -}; - -/* Walk all the bits in a run. */ -static int -xbitmap_walk_bits_in_run( - uint64_t start, - uint64_t len, - void *priv) -{ - struct xbitmap_walk_bits *wb = priv; - uint64_t i; - int error = 0; - - for (i = start; i < start + len; i++) { - error = wb->fn(i, wb->priv); - if (error) - break; - } - - return error; -} - -/* Call a function for every set bit in this bitmap. */ -int -xbitmap_walk_bits( - struct xbitmap *bitmap, - xbitmap_walk_bits_fn fn, - void *priv) -{ - struct xbitmap_walk_bits wb = {.fn = fn, .priv = priv}; - - return xbitmap_walk(bitmap, xbitmap_walk_bits_in_run, &wb); -} - /* Does this bitmap have no bits set at all? */ bool xbitmap_empty( diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h index 84981724ecafd..a3ad564d94b7f 100644 --- a/fs/xfs/scrub/bitmap.h +++ b/fs/xfs/scrub/bitmap.h @@ -33,10 +33,6 @@ typedef int (*xbitmap_walk_fn)(uint64_t start, uint64_t len, void *priv); int xbitmap_walk(struct xbitmap *bitmap, xbitmap_walk_fn fn, void *priv); -typedef int (*xbitmap_walk_bits_fn)(uint64_t bit, void *priv); -int xbitmap_walk_bits(struct xbitmap *bitmap, xbitmap_walk_bits_fn fn, - void *priv); - bool xbitmap_empty(struct xbitmap *bitmap); bool xbitmap_test(struct xbitmap *bitmap, uint64_t start, uint64_t *len); diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index df13a9e0fe86a..f62f00f500540 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -27,6 +27,10 @@ #include "xfs_quota.h" #include "xfs_qm.h" #include "xfs_bmap.h" +#include "xfs_da_format.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_remote.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" @@ -76,20 +80,29 @@ */ /* Information about reaping extents after a repair. */ -struct xrep_reap_state { +struct xreap_state { struct xfs_scrub *sc; /* Reverse mapping owner and metadata reservation type. */ const struct xfs_owner_info *oinfo; enum xfs_ag_resv_type resv; + /* If true, roll the transaction before reaping the next extent. */ + bool force_roll; + /* Number of deferred reaps attached to the current transaction. */ unsigned int deferred; + + /* Number of invalidated buffers logged to the current transaction. */ + unsigned int invalidated; + + /* Number of deferred reaps queued during the whole reap sequence. */ + unsigned long long total_deferred; }; /* Put a block back on the AGFL. */ STATIC int -xrep_put_freelist( +xreap_put_freelist( struct xfs_scrub *sc, xfs_agblock_t agbno) { @@ -126,69 +139,227 @@ xrep_put_freelist( return 0; } -/* Try to invalidate the incore buffer for a block that we're about to free. */ +/* Are there any uncommitted reap operations? */ +static inline bool xreap_dirty(const struct xreap_state *rs) +{ + if (rs->force_roll) + return true; + if (rs->deferred) + return true; + if (rs->invalidated) + return true; + if (rs->total_deferred) + return true; + return false; +} + +#define XREAP_MAX_DEFERRED (128) +#define XREAP_MAX_BINVAL (2048) + +/* + * Decide if we want to roll the transaction after reaping an extent. We don't + * want to overrun the transaction reservation, so we prohibit more than + * 128 EFIs per transaction. For the same reason, we limit the number + * of buffer invalidations to 2048. + */ +static inline bool xreap_want_roll(const struct xreap_state *rs) +{ + if (rs->force_roll) + return true; + if (rs->deferred > XREAP_MAX_DEFERRED) + return true; + if (rs->invalidated > XREAP_MAX_BINVAL) + return true; + return false; +} + +static inline void xreap_reset(struct xreap_state *rs) +{ + rs->total_deferred += rs->deferred; + rs->deferred = 0; + rs->invalidated = 0; + rs->force_roll = false; +} + +#define XREAP_MAX_DEFER_CHAIN (2048) + +/* + * Decide if we want to finish the deferred ops that are attached to the scrub + * transaction. We don't want to queue huge chains of deferred ops because + * that can consume a lot of log space and kernel memory. Hence we trigger a + * xfs_defer_finish if there are more than 2048 deferred reap operations or the + * caller did some real work. + */ +static inline bool +xreap_want_defer_finish(const struct xreap_state *rs) +{ + if (rs->force_roll) + return true; + if (rs->total_deferred > XREAP_MAX_DEFER_CHAIN) + return true; + return false; +} + +static inline void xreap_defer_finish_reset(struct xreap_state *rs) +{ + rs->total_deferred = 0; + rs->deferred = 0; + rs->invalidated = 0; + rs->force_roll = false; +} + +/* Try to invalidate the incore buffers for an extent that we're freeing. */ STATIC void -xrep_block_reap_binval( - struct xfs_scrub *sc, - xfs_fsblock_t fsbno) +xreap_agextent_binval( + struct xreap_state *rs, + xfs_agblock_t agbno, + xfs_extlen_t *aglenp) { - struct xfs_buf *bp = NULL; - int error; + struct xfs_scrub *sc = rs->sc; + struct xfs_perag *pag = sc->sa.pag; + struct xfs_mount *mp = sc->mp; + xfs_agnumber_t agno = sc->sa.pag->pag_agno; + xfs_agblock_t agbno_next = agbno + *aglenp; + xfs_agblock_t bno = agbno; /* - * If there's an incore buffer for exactly this block, invalidate it. * Avoid invalidating AG headers and post-EOFS blocks because we never * own those. */ - if (!xfs_verify_fsbno(sc->mp, fsbno)) + if (!xfs_verify_agbno(pag, agbno) || + !xfs_verify_agbno(pag, agbno_next - 1)) return; /* - * We assume that the lack of any other known owners means that the - * buffer can be locked without risk of deadlocking. + * If there are incore buffers for these blocks, invalidate them. We + * assume that the lack of any other known owners means that the buffer + * can be locked without risk of deadlocking. The buffer cache cannot + * detect aliasing, so employ nested loops to scan for incore buffers + * of any plausible size. */ - error = xfs_buf_incore(sc->mp->m_ddev_targp, - XFS_FSB_TO_DADDR(sc->mp, fsbno), - XFS_FSB_TO_BB(sc->mp, 1), XBF_LIVESCAN, &bp); - if (error) - return; - - xfs_trans_bjoin(sc->tp, bp); - xfs_trans_binval(sc->tp, bp); + while (bno < agbno_next) { + xfs_agblock_t fsbcount; + xfs_agblock_t max_fsbs; + + /* + * Max buffer size is the max remote xattr buffer size, which + * is one fs block larger than 64k. + */ + max_fsbs = min_t(xfs_agblock_t, agbno_next - bno, + xfs_attr3_rmt_blocks(mp, XFS_XATTR_SIZE_MAX)); + + for (fsbcount = 1; fsbcount < max_fsbs; fsbcount++) { + struct xfs_buf *bp = NULL; + xfs_daddr_t daddr; + int error; + + daddr = XFS_AGB_TO_DADDR(mp, agno, bno); + error = xfs_buf_incore(mp->m_ddev_targp, daddr, + XFS_FSB_TO_BB(mp, fsbcount), + XBF_LIVESCAN, &bp); + if (error) + continue; + + xfs_trans_bjoin(sc->tp, bp); + xfs_trans_binval(sc->tp, bp); + rs->invalidated++; + + /* + * Stop invalidating if we've hit the limit; we should + * still have enough reservation left to free however + * far we've gotten. + */ + if (rs->invalidated > XREAP_MAX_BINVAL) { + *aglenp -= agbno_next - bno; + goto out; + } + } + + bno++; + } + +out: + trace_xreap_agextent_binval(sc->sa.pag, agbno, *aglenp); } -/* Dispose of a single block. */ +/* + * Figure out the longest run of blocks that we can dispose of with a single + * call. Cross-linked blocks should have their reverse mappings removed, but + * single-owner extents can be freed. AGFL blocks can only be put back one at + * a time. + */ STATIC int -xrep_reap_block( - uint64_t fsbno, - void *priv) +xreap_agextent_select( + struct xreap_state *rs, + xfs_agblock_t agbno, + xfs_agblock_t agbno_next, + bool *crosslinked, + xfs_extlen_t *aglenp) { - struct xrep_reap_state *rs = priv; - struct xfs_scrub *sc = rs->sc; - struct xfs_btree_cur *cur; - xfs_agnumber_t agno; - xfs_agblock_t agbno; - bool has_other_rmap; - bool need_roll = true; - int error; - - agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); - agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); - - /* We don't support reaping file extents yet. */ - if (sc->ip != NULL || sc->sa.pag->pag_agno != agno) { - ASSERT(0); - return -EFSCORRUPTED; - } - - cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, sc->sa.pag); - - /* Can we find any other rmappings? */ + struct xfs_scrub *sc = rs->sc; + struct xfs_btree_cur *cur; + xfs_agblock_t bno = agbno + 1; + xfs_extlen_t len = 1; + int error; + + /* + * Determine if there are any other rmap records covering the first + * block of this extent. If so, the block is crosslinked. + */ + cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, + sc->sa.pag); error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo, - &has_other_rmap); + crosslinked); + if (error) + goto out_cur; + + /* AGFL blocks can only be deal with one at a time. */ + if (rs->resv == XFS_AG_RESV_AGFL) + goto out_found; + + /* + * Figure out how many of the subsequent blocks have the same crosslink + * status. + */ + while (bno < agbno_next) { + bool also_crosslinked; + + error = xfs_rmap_has_other_keys(cur, bno, 1, rs->oinfo, + &also_crosslinked); + if (error) + goto out_cur; + + if (*crosslinked != also_crosslinked) + break; + + len++; + bno++; + } + +out_found: + *aglenp = len; + trace_xreap_agextent_select(sc->sa.pag, agbno, len, *crosslinked); +out_cur: xfs_btree_del_cursor(cur, error); - if (error) - return error; + return error; +} + +/* + * Dispose of as much of the beginning of this AG extent as possible. The + * number of blocks disposed of will be returned in @aglenp. + */ +STATIC int +xreap_agextent_iter( + struct xreap_state *rs, + xfs_agblock_t agbno, + xfs_extlen_t *aglenp, + bool crosslinked) +{ + struct xfs_scrub *sc = rs->sc; + xfs_fsblock_t fsbno; + int error = 0; + + fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno, agbno); /* * If there are other rmappings, this block is cross linked and must @@ -203,55 +374,117 @@ xrep_reap_block( * blow on writeout, the filesystem will shut down, and the admin gets * to run xfs_repair. */ - if (has_other_rmap) { - trace_xrep_dispose_unmap_extent(sc->sa.pag, agbno, 1); + if (crosslinked) { + trace_xreap_dispose_unmap_extent(sc->sa.pag, agbno, *aglenp); - error = xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, - 1, rs->oinfo); - if (error) - return error; - - goto roll_out; + rs->force_roll = true; + return xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, + *aglenp, rs->oinfo); } - trace_xrep_dispose_free_extent(sc->sa.pag, agbno, 1); + trace_xreap_dispose_free_extent(sc->sa.pag, agbno, *aglenp); - xrep_block_reap_binval(sc, fsbno); + /* + * Invalidate as many buffers as we can, starting at agbno. If this + * function sets *aglenp to zero, the transaction is full of logged + * buffer invalidations, so we need to return early so that we can + * roll and retry. + */ + xreap_agextent_binval(rs, agbno, aglenp); + if (*aglenp == 0) { + ASSERT(xreap_want_roll(rs)); + return 0; + } + /* Put blocks back on the AGFL one at a time. */ if (rs->resv == XFS_AG_RESV_AGFL) { - error = xrep_put_freelist(sc, agbno); - } else { - /* - * Use deferred frees to get rid of the old btree blocks to try - * to minimize the window in which we could crash and lose the - * old blocks. However, we still need to roll the transaction - * every 100 or so EFIs so that we don't exceed the log - * reservation. - */ - error = __xfs_free_extent_later(sc->tp, fsbno, 1, rs->oinfo, - rs->resv, true); + ASSERT(*aglenp == 1); + error = xreap_put_freelist(sc, agbno); if (error) return error; - rs->deferred++; - need_roll = rs->deferred > 100; + + rs->force_roll = true; + return 0; } - if (error || !need_roll) + + /* + * Use deferred frees to get rid of the old btree blocks to try to + * minimize the window in which we could crash and lose the old blocks. + */ + error = __xfs_free_extent_later(sc->tp, fsbno, *aglenp, rs->oinfo, + rs->resv, true); + if (error) return error; -roll_out: - rs->deferred = 0; - return xrep_roll_ag_trans(sc); + rs->deferred++; + return 0; } -/* Dispose of every block of every extent in the bitmap. */ +/* + * Break an AG metadata extent into sub-extents by fate (crosslinked, not + * crosslinked), and dispose of each sub-extent separately. + */ +STATIC int +xreap_agmeta_extent( + uint64_t fsbno, + uint64_t len, + void *priv) +{ + struct xreap_state *rs = priv; + struct xfs_scrub *sc = rs->sc; + xfs_agnumber_t agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); + xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); + xfs_agblock_t agbno_next = agbno + len; + int error = 0; + + ASSERT(len <= XFS_MAX_BMBT_EXTLEN); + ASSERT(sc->ip == NULL); + + if (agno != sc->sa.pag->pag_agno) { + ASSERT(sc->sa.pag->pag_agno == agno); + return -EFSCORRUPTED; + } + + while (agbno < agbno_next) { + xfs_extlen_t aglen; + bool crosslinked; + + error = xreap_agextent_select(rs, agbno, agbno_next, + &crosslinked, &aglen); + if (error) + return error; + + error = xreap_agextent_iter(rs, agbno, &aglen, crosslinked); + if (error) + return error; + + if (xreap_want_defer_finish(rs)) { + error = xrep_defer_finish(sc); + if (error) + return error; + xreap_defer_finish_reset(rs); + } else if (xreap_want_roll(rs)) { + error = xrep_roll_ag_trans(sc); + if (error) + return error; + xreap_reset(rs); + } + + agbno += aglen; + } + + return 0; +} + +/* Dispose of every block of every AG metadata extent in the bitmap. */ int -xrep_reap_extents( +xrep_reap_ag_metadata( struct xfs_scrub *sc, struct xbitmap *bitmap, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type) { - struct xrep_reap_state rs = { + struct xreap_state rs = { .sc = sc, .oinfo = oinfo, .resv = type, @@ -259,10 +492,14 @@ xrep_reap_extents( int error; ASSERT(xfs_has_rmapbt(sc->mp)); + ASSERT(sc->ip == NULL); - error = xbitmap_walk_bits(bitmap, xrep_reap_block, &rs); - if (error || rs.deferred == 0) + error = xbitmap_walk(bitmap, xreap_agmeta_extent, &rs); + if (error) return error; - return xrep_roll_ag_trans(sc); + if (xreap_dirty(&rs)) + return xrep_defer_finish(sc); + + return 0; } diff --git a/fs/xfs/scrub/reap.h b/fs/xfs/scrub/reap.h index 85c8d8a5fe389..7f234abfa78d1 100644 --- a/fs/xfs/scrub/reap.h +++ b/fs/xfs/scrub/reap.h @@ -6,7 +6,7 @@ #ifndef __XFS_SCRUB_REAP_H__ #define __XFS_SCRUB_REAP_H__ -int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *bitmap, +int xrep_reap_ag_metadata(struct xfs_scrub *sc, struct xbitmap *bitmap, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type); diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index a3eddfcb42fc1..83a1b1437a4fa 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -26,6 +26,7 @@ #include "xfs_ag_resv.h" #include "xfs_quota.h" #include "xfs_qm.h" +#include "xfs_defer.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" @@ -166,6 +167,56 @@ xrep_roll_ag_trans( return 0; } +/* Finish all deferred work attached to the repair transaction. */ +int +xrep_defer_finish( + struct xfs_scrub *sc) +{ + int error; + + /* + * Keep the AG header buffers locked while we complete deferred work + * items. Ensure that both AG buffers are dirty and held when we roll + * the transaction so that they move forward in the log without losing + * the bli (and hence the bli type) when the transaction commits. + * + * Normal code would never hold clean buffers across a roll, but repair + * needs both buffers to maintain a total lock on the AG. + */ + if (sc->sa.agi_bp) { + xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM); + xfs_trans_bhold(sc->tp, sc->sa.agi_bp); + } + + if (sc->sa.agf_bp) { + xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM); + xfs_trans_bhold(sc->tp, sc->sa.agf_bp); + } + + /* + * Finish all deferred work items. We still hold the AG header buffers + * locked regardless of whether or not that succeeds. On failure, the + * buffers will be released during teardown on our way out of the + * kernel. If successful, join the buffers to the new transaction + * and move on. + */ + error = xfs_defer_finish(&sc->tp); + if (error) + return error; + + /* + * Release the hold that we set above because defer_finish won't do + * that for us. The defer roll code redirties held buffers after each + * roll, so the AG header buffers should be ready for logging. + */ + if (sc->sa.agi_bp) + xfs_trans_bhold_release(sc->tp, sc->sa.agi_bp); + if (sc->sa.agf_bp) + xfs_trans_bhold_release(sc->tp, sc->sa.agf_bp); + + return 0; +} + /* * Does the given AG have enough space to rebuild a btree? Neither AG * reservation can be critical, and we must have enough space (factoring diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index e01d63a4a93b4..dc89164d10a63 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -20,6 +20,7 @@ static inline int xrep_notsupported(struct xfs_scrub *sc) int xrep_attempt(struct xfs_scrub *sc); void xrep_failure(struct xfs_mount *mp); int xrep_roll_ag_trans(struct xfs_scrub *sc); +int xrep_defer_finish(struct xfs_scrub *sc); bool xrep_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks, enum xfs_ag_resv_type type); xfs_extlen_t xrep_calc_ag_resblks(struct xfs_scrub *sc); diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 71bfab3d2d290..73cf1002bd94a 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -753,10 +753,43 @@ DECLARE_EVENT_CLASS(xrep_extent_class, DEFINE_EVENT(xrep_extent_class, name, \ TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len), \ TP_ARGS(pag, agbno, len)) -DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_unmap_extent); -DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_free_extent); +DEFINE_REPAIR_EXTENT_EVENT(xreap_dispose_unmap_extent); +DEFINE_REPAIR_EXTENT_EVENT(xreap_dispose_free_extent); +DEFINE_REPAIR_EXTENT_EVENT(xreap_agextent_binval); DEFINE_REPAIR_EXTENT_EVENT(xrep_agfl_insert); +DECLARE_EVENT_CLASS(xrep_reap_find_class, + TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len, + bool crosslinked), + TP_ARGS(pag, agbno, len, crosslinked), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + __field(bool, crosslinked) + ), + TP_fast_assign( + __entry->dev = pag->pag_mount->m_super->s_dev; + __entry->agno = pag->pag_agno; + __entry->agbno = agbno; + __entry->len = len; + __entry->crosslinked = crosslinked; + ), + TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x crosslinked %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len, + __entry->crosslinked ? 1 : 0) +); +#define DEFINE_REPAIR_REAP_FIND_EVENT(name) \ +DEFINE_EVENT(xrep_reap_find_class, name, \ + TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len, \ + bool crosslinked), \ + TP_ARGS(pag, agbno, len, crosslinked)) +DEFINE_REPAIR_REAP_FIND_EVENT(xreap_agextent_select); + DECLARE_EVENT_CLASS(xrep_rmap_class, TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, xfs_extlen_t len, From patchwork Thu Jul 27 22:23:45 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13330869 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 871B1EB64DD for ; Thu, 27 Jul 2023 22:23:50 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229712AbjG0WXt (ORCPT ); Thu, 27 Jul 2023 18:23:49 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44050 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229514AbjG0WXs (ORCPT ); Thu, 27 Jul 2023 18:23:48 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0274D2D4D for ; Thu, 27 Jul 2023 15:23:47 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 84AB961F6A for ; Thu, 27 Jul 2023 22:23:46 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id DFBF6C433C8; Thu, 27 Jul 2023 22:23:45 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1690496626; bh=YxE58km5ErH+HB0fwEL904TthTMO7THmP+sIRfg8qCI=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=IWS2kxXQEVboJPnv6FlyyRzsriynBNJQmJDsjxITSI0BS2UPZvXKjUDCFDYVSFSMk GPA/R3AtOWlfEfrnKY03uSWvt0Ky+3JM3bnkdib9I6o1J8pTRLevptOo2RgUtrVx4I KO4iGH8nCYUkSljPbKCxhemXiencM1+iMO1LeMnMzLnRQQNo5Jo5OG6EYwhUZhYHsD nNbQ+gOtQpGzMp8XpXQRXndqN9XLhT5jRhqUhyMkoXiJEDpp76TfjUIAS972Cn0CGJ 7sNb+fTHkVgKg0/99NrQFmAX9k4Cke+5bFf866ZRXWivrn0sjZlp/AlcIVVp1PthHC /zzhZPx/4tRNA== Date: Thu, 27 Jul 2023 15:23:45 -0700 Subject: [PATCH 9/9] xfs: use per-AG bitmaps to reap unused AG metadata blocks during repair From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <169049622861.921010.5756435166223444650.stgit@frogsfrogsfrogs> In-Reply-To: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> References: <169049622719.921010.16542808514375882520.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong The AGFL repair code uses a series of bitmaps to figure out where there are OWN_AG blocks that are not claimed by the free space and rmap btrees. These blocks become the new AGFL, and any overflow is reaped. The bitmaps current track xfs_fsblock_t even though we already know the AG number. In the last patch, we introduced a new bitmap "type" for tracking xfs_agblock_t extents. Port the reaping code and the AGFL repair to use this new type, which makes it very obvious what we're tracking. This also eliminates a bunch of unnecessary agblock <-> fsblock conversions. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/agheader_repair.c | 74 ++++++++++++++++++---------------------- fs/xfs/scrub/bitmap.c | 41 ++-------------------- fs/xfs/scrub/bitmap.h | 6 +-- fs/xfs/scrub/reap.c | 14 ++------ fs/xfs/scrub/reap.h | 5 +-- 5 files changed, 45 insertions(+), 95 deletions(-) diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index 9ad2987ed6e5a..9e99486b5f20b 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -445,13 +445,13 @@ xrep_agf( struct xrep_agfl { /* Bitmap of alleged AGFL blocks that we're not going to add. */ - struct xbitmap crossed; + struct xagb_bitmap crossed; /* Bitmap of other OWN_AG metadata blocks. */ - struct xbitmap agmetablocks; + struct xagb_bitmap agmetablocks; /* Bitmap of free space. */ - struct xbitmap *freesp; + struct xagb_bitmap *freesp; /* rmapbt cursor for finding crosslinked blocks */ struct xfs_btree_cur *rmap_cur; @@ -467,7 +467,6 @@ xrep_agfl_walk_rmap( void *priv) { struct xrep_agfl *ra = priv; - xfs_fsblock_t fsb; int error = 0; if (xchk_should_terminate(ra->sc, &error)) @@ -475,14 +474,13 @@ xrep_agfl_walk_rmap( /* Record all the OWN_AG blocks. */ if (rec->rm_owner == XFS_RMAP_OWN_AG) { - fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno, - rec->rm_startblock); - error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount); + error = xagb_bitmap_set(ra->freesp, rec->rm_startblock, + rec->rm_blockcount); if (error) return error; } - return xbitmap_set_btcur_path(&ra->agmetablocks, cur); + return xagb_bitmap_set_btcur_path(&ra->agmetablocks, cur); } /* Strike out the blocks that are cross-linked according to the rmapbt. */ @@ -493,12 +491,10 @@ xrep_agfl_check_extent( void *priv) { struct xrep_agfl *ra = priv; - xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(ra->sc->mp, start); + xfs_agblock_t agbno = start; xfs_agblock_t last_agbno = agbno + len - 1; int error; - ASSERT(XFS_FSB_TO_AGNO(ra->sc->mp, start) == ra->sc->sa.pag->pag_agno); - while (agbno <= last_agbno) { bool other_owners; @@ -508,7 +504,7 @@ xrep_agfl_check_extent( return error; if (other_owners) { - error = xbitmap_set(&ra->crossed, agbno, 1); + error = xagb_bitmap_set(&ra->crossed, agbno, 1); if (error) return error; } @@ -534,7 +530,7 @@ STATIC int xrep_agfl_collect_blocks( struct xfs_scrub *sc, struct xfs_buf *agf_bp, - struct xbitmap *agfl_extents, + struct xagb_bitmap *agfl_extents, xfs_agblock_t *flcount) { struct xrep_agfl ra; @@ -544,8 +540,8 @@ xrep_agfl_collect_blocks( ra.sc = sc; ra.freesp = agfl_extents; - xbitmap_init(&ra.agmetablocks); - xbitmap_init(&ra.crossed); + xagb_bitmap_init(&ra.agmetablocks); + xagb_bitmap_init(&ra.crossed); /* Find all space used by the free space btrees & rmapbt. */ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); @@ -557,7 +553,7 @@ xrep_agfl_collect_blocks( /* Find all blocks currently being used by the bnobt. */ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag, XFS_BTNUM_BNO); - error = xbitmap_set_btblocks(&ra.agmetablocks, cur); + error = xagb_bitmap_set_btblocks(&ra.agmetablocks, cur); xfs_btree_del_cursor(cur, error); if (error) goto out_bmp; @@ -565,7 +561,7 @@ xrep_agfl_collect_blocks( /* Find all blocks currently being used by the cntbt. */ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag, XFS_BTNUM_CNT); - error = xbitmap_set_btblocks(&ra.agmetablocks, cur); + error = xagb_bitmap_set_btblocks(&ra.agmetablocks, cur); xfs_btree_del_cursor(cur, error); if (error) goto out_bmp; @@ -574,17 +570,17 @@ xrep_agfl_collect_blocks( * Drop the freesp meta blocks that are in use by btrees. * The remaining blocks /should/ be AGFL blocks. */ - error = xbitmap_disunion(agfl_extents, &ra.agmetablocks); + error = xagb_bitmap_disunion(agfl_extents, &ra.agmetablocks); if (error) goto out_bmp; /* Strike out the blocks that are cross-linked. */ ra.rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); - error = xbitmap_walk(agfl_extents, xrep_agfl_check_extent, &ra); + error = xagb_bitmap_walk(agfl_extents, xrep_agfl_check_extent, &ra); xfs_btree_del_cursor(ra.rmap_cur, error); if (error) goto out_bmp; - error = xbitmap_disunion(agfl_extents, &ra.crossed); + error = xagb_bitmap_disunion(agfl_extents, &ra.crossed); if (error) goto out_bmp; @@ -592,12 +588,12 @@ xrep_agfl_collect_blocks( * Calculate the new AGFL size. If we found more blocks than fit in * the AGFL we'll free them later. */ - *flcount = min_t(uint64_t, xbitmap_hweight(agfl_extents), + *flcount = min_t(uint64_t, xagb_bitmap_hweight(agfl_extents), xfs_agfl_size(mp)); out_bmp: - xbitmap_destroy(&ra.crossed); - xbitmap_destroy(&ra.agmetablocks); + xagb_bitmap_destroy(&ra.crossed); + xagb_bitmap_destroy(&ra.agmetablocks); return error; } @@ -627,7 +623,7 @@ xrep_agfl_update_agf( } struct xrep_agfl_fill { - struct xbitmap used_extents; + struct xagb_bitmap used_extents; struct xfs_scrub *sc; __be32 *agfl_bno; xfs_agblock_t flcount; @@ -643,17 +639,15 @@ xrep_agfl_fill( { struct xrep_agfl_fill *af = priv; struct xfs_scrub *sc = af->sc; - xfs_fsblock_t fsbno = start; + xfs_agblock_t agbno = start; int error; - trace_xrep_agfl_insert(sc->sa.pag, XFS_FSB_TO_AGBNO(sc->mp, start), - len); + trace_xrep_agfl_insert(sc->sa.pag, agbno, len); - while (fsbno < start + len && af->fl_off < af->flcount) - af->agfl_bno[af->fl_off++] = - cpu_to_be32(XFS_FSB_TO_AGBNO(sc->mp, fsbno++)); + while (agbno < start + len && af->fl_off < af->flcount) + af->agfl_bno[af->fl_off++] = cpu_to_be32(agbno++); - error = xbitmap_set(&af->used_extents, start, fsbno - 1); + error = xagb_bitmap_set(&af->used_extents, start, agbno - 1); if (error) return error; @@ -668,7 +662,7 @@ STATIC int xrep_agfl_init_header( struct xfs_scrub *sc, struct xfs_buf *agfl_bp, - struct xbitmap *agfl_extents, + struct xagb_bitmap *agfl_extents, xfs_agblock_t flcount) { struct xrep_agfl_fill af = { @@ -696,17 +690,17 @@ xrep_agfl_init_header( * blocks than fit in the AGFL, they will be freed in a subsequent * step. */ - xbitmap_init(&af.used_extents); + xagb_bitmap_init(&af.used_extents); af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp), - xbitmap_walk(agfl_extents, xrep_agfl_fill, &af); - error = xbitmap_disunion(agfl_extents, &af.used_extents); + xagb_bitmap_walk(agfl_extents, xrep_agfl_fill, &af); + error = xagb_bitmap_disunion(agfl_extents, &af.used_extents); if (error) return error; /* Write new AGFL to disk. */ xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF); xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1); - xbitmap_destroy(&af.used_extents); + xagb_bitmap_destroy(&af.used_extents); return 0; } @@ -715,7 +709,7 @@ int xrep_agfl( struct xfs_scrub *sc) { - struct xbitmap agfl_extents; + struct xagb_bitmap agfl_extents; struct xfs_mount *mp = sc->mp; struct xfs_buf *agf_bp; struct xfs_buf *agfl_bp; @@ -726,7 +720,7 @@ xrep_agfl( if (!xfs_has_rmapbt(mp)) return -EOPNOTSUPP; - xbitmap_init(&agfl_extents); + xagb_bitmap_init(&agfl_extents); /* * Read the AGF so that we can query the rmapbt. We hope that there's @@ -775,10 +769,10 @@ xrep_agfl( goto err; /* Dump any AGFL overflow. */ - error = xrep_reap_ag_metadata(sc, &agfl_extents, &XFS_RMAP_OINFO_AG, + error = xrep_reap_agblocks(sc, &agfl_extents, &XFS_RMAP_OINFO_AG, XFS_AG_RESV_AGFL); err: - xbitmap_destroy(&agfl_extents); + xagb_bitmap_destroy(&agfl_extents); return error; } diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c index d926d708f2956..e0c89a9a0ca07 100644 --- a/fs/xfs/scrub/bitmap.c +++ b/fs/xfs/scrub/bitmap.c @@ -301,21 +301,15 @@ xagb_bitmap_set_btblocks( * blocks going from the leaf towards the root. */ int -xbitmap_set_btcur_path( - struct xbitmap *bitmap, +xagb_bitmap_set_btcur_path( + struct xagb_bitmap *bitmap, struct xfs_btree_cur *cur) { - struct xfs_buf *bp; - xfs_fsblock_t fsb; int i; int error; for (i = 0; i < cur->bc_nlevels && cur->bc_levels[i].ptr == 1; i++) { - xfs_btree_get_block(cur, i, &bp); - if (!bp) - continue; - fsb = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); - error = xbitmap_set(bitmap, fsb, 1); + error = xagb_bitmap_visit_btblock(cur, i, bitmap); if (error) return error; } @@ -323,35 +317,6 @@ xbitmap_set_btcur_path( return 0; } -/* Collect a btree's block in the bitmap. */ -STATIC int -xbitmap_collect_btblock( - struct xfs_btree_cur *cur, - int level, - void *priv) -{ - struct xbitmap *bitmap = priv; - struct xfs_buf *bp; - xfs_fsblock_t fsbno; - - xfs_btree_get_block(cur, level, &bp); - if (!bp) - return 0; - - fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); - return xbitmap_set(bitmap, fsbno, 1); -} - -/* Walk the btree and mark the bitmap wherever a btree block is found. */ -int -xbitmap_set_btblocks( - struct xbitmap *bitmap, - struct xfs_btree_cur *cur) -{ - return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock, - XFS_BTREE_VISIT_ALL, bitmap); -} - /* How many bits are set in this bitmap? */ uint64_t xbitmap_hweight( diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h index a3ad564d94b7f..4fe58bad67345 100644 --- a/fs/xfs/scrub/bitmap.h +++ b/fs/xfs/scrub/bitmap.h @@ -16,10 +16,6 @@ void xbitmap_destroy(struct xbitmap *bitmap); int xbitmap_clear(struct xbitmap *bitmap, uint64_t start, uint64_t len); int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len); int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub); -int xbitmap_set_btcur_path(struct xbitmap *bitmap, - struct xfs_btree_cur *cur); -int xbitmap_set_btblocks(struct xbitmap *bitmap, - struct xfs_btree_cur *cur); uint64_t xbitmap_hweight(struct xbitmap *bitmap); /* @@ -106,5 +102,7 @@ static inline int xagb_bitmap_walk(struct xagb_bitmap *bitmap, int xagb_bitmap_set_btblocks(struct xagb_bitmap *bitmap, struct xfs_btree_cur *cur); +int xagb_bitmap_set_btcur_path(struct xagb_bitmap *bitmap, + struct xfs_btree_cur *cur); #endif /* __XFS_SCRUB_BITMAP_H__ */ diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index f62f00f500540..a33a9bc5a1bea 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -432,19 +432,13 @@ xreap_agmeta_extent( { struct xreap_state *rs = priv; struct xfs_scrub *sc = rs->sc; - xfs_agnumber_t agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); - xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); + xfs_agblock_t agbno = fsbno; xfs_agblock_t agbno_next = agbno + len; int error = 0; ASSERT(len <= XFS_MAX_BMBT_EXTLEN); ASSERT(sc->ip == NULL); - if (agno != sc->sa.pag->pag_agno) { - ASSERT(sc->sa.pag->pag_agno == agno); - return -EFSCORRUPTED; - } - while (agbno < agbno_next) { xfs_extlen_t aglen; bool crosslinked; @@ -478,9 +472,9 @@ xreap_agmeta_extent( /* Dispose of every block of every AG metadata extent in the bitmap. */ int -xrep_reap_ag_metadata( +xrep_reap_agblocks( struct xfs_scrub *sc, - struct xbitmap *bitmap, + struct xagb_bitmap *bitmap, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type) { @@ -494,7 +488,7 @@ xrep_reap_ag_metadata( ASSERT(xfs_has_rmapbt(sc->mp)); ASSERT(sc->ip == NULL); - error = xbitmap_walk(bitmap, xreap_agmeta_extent, &rs); + error = xagb_bitmap_walk(bitmap, xreap_agmeta_extent, &rs); if (error) return error; diff --git a/fs/xfs/scrub/reap.h b/fs/xfs/scrub/reap.h index 7f234abfa78d1..fe24626af1649 100644 --- a/fs/xfs/scrub/reap.h +++ b/fs/xfs/scrub/reap.h @@ -6,8 +6,7 @@ #ifndef __XFS_SCRUB_REAP_H__ #define __XFS_SCRUB_REAP_H__ -int xrep_reap_ag_metadata(struct xfs_scrub *sc, struct xbitmap *bitmap, - const struct xfs_owner_info *oinfo, - enum xfs_ag_resv_type type); +int xrep_reap_agblocks(struct xfs_scrub *sc, struct xagb_bitmap *bitmap, + const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type); #endif /* __XFS_SCRUB_REAP_H__ */