From patchwork Fri May 26 00:43:14 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13255868 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D8C66C77B7A for ; Fri, 26 May 2023 00:43:22 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229999AbjEZAnW (ORCPT ); Thu, 25 May 2023 20:43:22 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:46706 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233384AbjEZAnV (ORCPT ); Thu, 25 May 2023 20:43:21 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 305FA194 for ; Thu, 25 May 2023 17:43:16 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id BB25A64BE8 for ; Fri, 26 May 2023 00:43:15 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 28D0EC433EF; Fri, 26 May 2023 00:43:15 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1685061795; bh=sRXGazUoHSC5lasAnH8OXBxLKX7jRPxDysfvTo3npK8=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=FLOpZs6Uz+AHzRMl5g7eGFEruXuwzmQ5yhkNPpQKsHhis7MVEF2kQrSR1tsFt3Qe6 s73BrhUDftxrVAc+E7ABkB8BnbOvOGOCN/XHjXGc2avXWH/tdEAwIbtLuC0yy4OyWP cQIj1k1Jk/JH0dVZg7LhaU0aSImWCmxhQc/ifezZBmoW6v62sQ8Yl2B2ngs45iZT2O Sc8P6ywQ3LcoM5nv2lPQJh0O8rL4V3vFQ1Ss+dlD5txpx0OjXsHLIKeWyCC2VgYkzr 3woBqhEwRWRsukfQTLyvVEy3zteUUq2eNHcRZiGIZXGR3IqbAZFfW3K9LzDfpYJ7MX QljfYZ72jlRtg== Date: Thu, 25 May 2023 17:43:14 -0700 Subject: [PATCH 1/9] xfs: cull repair code that will never get used From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <168506055631.3728180.16921290558396159405.stgit@frogsfrogsfrogs> In-Reply-To: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> References: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong These two functions date from the era when I thought that we could rebuild btrees by creating an alternate root and adding records one by one. In other words, they predate the btree bulk loader. They're not necessary now, so remove them. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/repair.c | 83 ------------------------------------------------- fs/xfs/scrub/repair.h | 6 ---- fs/xfs/scrub/trace.h | 22 ------------- 3 files changed, 111 deletions(-) diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index ac6d8803e660..eedb3863b4ef 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -297,89 +297,6 @@ xrep_calc_ag_resblks( return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz)); } -/* Allocate a block in an AG. */ -int -xrep_alloc_ag_block( - struct xfs_scrub *sc, - const struct xfs_owner_info *oinfo, - xfs_fsblock_t *fsbno, - enum xfs_ag_resv_type resv) -{ - struct xfs_alloc_arg args = {0}; - xfs_agblock_t bno; - int error; - - switch (resv) { - case XFS_AG_RESV_AGFL: - case XFS_AG_RESV_RMAPBT: - error = xfs_alloc_get_freelist(sc->sa.pag, sc->tp, - sc->sa.agf_bp, &bno, 1); - if (error) - return error; - if (bno == NULLAGBLOCK) - return -ENOSPC; - xfs_extent_busy_reuse(sc->mp, sc->sa.pag, bno, 1, false); - *fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno, bno); - if (resv == XFS_AG_RESV_RMAPBT) - xfs_ag_resv_rmapbt_alloc(sc->mp, sc->sa.pag->pag_agno); - return 0; - default: - break; - } - - args.tp = sc->tp; - args.mp = sc->mp; - args.pag = sc->sa.pag; - args.oinfo = *oinfo; - args.minlen = 1; - args.maxlen = 1; - args.prod = 1; - args.resv = resv; - - error = xfs_alloc_vextent_this_ag(&args, sc->sa.pag->pag_agno); - if (error) - return error; - if (args.fsbno == NULLFSBLOCK) - return -ENOSPC; - ASSERT(args.len == 1); - *fsbno = args.fsbno; - - return 0; -} - -/* Initialize a new AG btree root block with zero entries. */ -int -xrep_init_btblock( - struct xfs_scrub *sc, - xfs_fsblock_t fsb, - struct xfs_buf **bpp, - xfs_btnum_t btnum, - const struct xfs_buf_ops *ops) -{ - struct xfs_trans *tp = sc->tp; - struct xfs_mount *mp = sc->mp; - struct xfs_buf *bp; - int error; - - trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb), - XFS_FSB_TO_AGBNO(mp, fsb), btnum); - - ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.pag->pag_agno); - error = xfs_trans_get_buf(tp, mp->m_ddev_targp, - XFS_FSB_TO_DADDR(mp, fsb), XFS_FSB_TO_BB(mp, 1), 0, - &bp); - if (error) - return error; - xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); - xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.pag->pag_agno); - xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF); - xfs_trans_log_buf(tp, bp, 0, BBTOB(bp->b_length) - 1); - bp->b_ops = ops; - *bpp = bp; - - return 0; -} - /* * Reconstructing per-AG Btrees * diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index dce791c679ee..fdccad54936f 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -23,12 +23,6 @@ int xrep_roll_ag_trans(struct xfs_scrub *sc); bool xrep_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks, enum xfs_ag_resv_type type); xfs_extlen_t xrep_calc_ag_resblks(struct xfs_scrub *sc); -int xrep_alloc_ag_block(struct xfs_scrub *sc, - const struct xfs_owner_info *oinfo, xfs_fsblock_t *fsbno, - enum xfs_ag_resv_type resv); -int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb, - struct xfs_buf **bpp, xfs_btnum_t btnum, - const struct xfs_buf_ops *ops); struct xbitmap; struct xagb_bitmap; diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index b3894daeb86a..9c8c7dd0f262 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -827,28 +827,6 @@ TRACE_EVENT(xrep_refcount_extent_fn, __entry->refcount) ) -TRACE_EVENT(xrep_init_btblock, - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, - xfs_btnum_t btnum), - TP_ARGS(mp, agno, agbno, btnum), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) - __field(uint32_t, btnum) - ), - TP_fast_assign( - __entry->dev = mp->m_super->s_dev; - __entry->agno = agno; - __entry->agbno = agbno; - __entry->btnum = btnum; - ), - TP_printk("dev %d:%d agno 0x%x agbno 0x%x btree %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->agno, - __entry->agbno, - __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS)) -) TRACE_EVENT(xrep_findroot_block, TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, uint32_t magic, uint16_t level), From patchwork Fri May 26 00:43:30 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13255869 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id CABF1C77B7E for ; Fri, 26 May 2023 00:43:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229942AbjEZAne (ORCPT ); Thu, 25 May 2023 20:43:34 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:46790 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232547AbjEZAne (ORCPT ); Thu, 25 May 2023 20:43:34 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id C1CDB12E for ; Thu, 25 May 2023 17:43:31 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 51BDE63A6B for ; Fri, 26 May 2023 00:43:31 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id B4CB8C433EF; Fri, 26 May 2023 00:43:30 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1685061810; bh=DVT9uKkPHLlul3IQYM57rSIvrmUqBfjxMI0QfbHISUM=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=bi4ZHcUaVckct/vD9NBQXOS//wg4A1tukeSJAOrFDMRNYKduaUhD1LzECObEVcQqx ttrS634wddAIz6cVwD4RF76Q3UtvtgigXKa8pFcBEeoUNfdwC1DBZNYNrY+ozboA0a Omk+DOlqQI9JwylhPu+NqzfO8QLDx/zSxhx6nYr6dEC6bL14xqiL4Nercz9GyzZ/8+ QwMIeL7ThW4iTwV+1STGudX/umkKp0MpRAGjyf1W0tI0u1Aj4Rrv0ETvoS7AK8PT7Q Wf7lDgJBz7dBs/EoTIKwPloQOxgTAYEY1n5XaPdaxrMWT30jOooqdOYrxPibf+UZ4r XXgHVqh7p16fA== Date: Thu, 25 May 2023 17:43:30 -0700 Subject: [PATCH 2/9] xfs: move the post-repair block reaping code to a separate file From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <168506055645.3728180.4825705637972537659.stgit@frogsfrogsfrogs> In-Reply-To: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> References: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong Reaping blocks after a repair is a complicated affair involving a lot of rmap btree lookups and figuring out if we're going to unmap or free old metadata blocks that might be crosslinked. Eventually, we will need to be able to reap per-AG metadata blocks, bmbt blocks from inode forks, garbage CoW staging extents, and (even later) blocks from btrees rooted in inodes. This results in a lot of reaping code, so we might as well split that off while it's easy. Signed-off-by: Darrick J. Wong --- fs/xfs/Makefile | 1 fs/xfs/scrub/agheader_repair.c | 1 fs/xfs/scrub/reap.c | 268 ++++++++++++++++++++++++++++++++++++++++ fs/xfs/scrub/reap.h | 13 ++ fs/xfs/scrub/repair.c | 232 ----------------------------------- fs/xfs/scrub/repair.h | 2 6 files changed, 283 insertions(+), 234 deletions(-) create mode 100644 fs/xfs/scrub/reap.c create mode 100644 fs/xfs/scrub/reap.h diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 16e4eb431230..0a5cebb9802b 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -173,6 +173,7 @@ xfs-$(CONFIG_XFS_QUOTA) += scrub/quota.o ifeq ($(CONFIG_XFS_ONLINE_REPAIR),y) xfs-y += $(addprefix scrub/, \ agheader_repair.o \ + reap.o \ repair.o \ ) endif diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index bbaa65422c4f..c902a5dee57f 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -26,6 +26,7 @@ #include "scrub/trace.h" #include "scrub/repair.h" #include "scrub/bitmap.h" +#include "scrub/reap.h" /* Superblock */ diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c new file mode 100644 index 000000000000..774dd8a12b2a --- /dev/null +++ b/fs/xfs/scrub/reap.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2022-2023 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc.h" +#include "xfs_ialloc_btree.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" +#include "xfs_refcount_btree.h" +#include "xfs_extent_busy.h" +#include "xfs_ag.h" +#include "xfs_ag_resv.h" +#include "xfs_quota.h" +#include "xfs_qm.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/trace.h" +#include "scrub/repair.h" +#include "scrub/bitmap.h" +#include "scrub/reap.h" + +/* + * Disposal of Blocks from Old Metadata + * + * Now that we've constructed a new btree to replace the damaged one, we want + * to dispose of the blocks that (we think) the old btree was using. + * Previously, we used the rmapbt to collect the extents (bitmap) with the + * rmap owner corresponding to the tree we rebuilt, collected extents for any + * blocks with the same rmap owner that are owned by another data structure + * (sublist), and subtracted sublist from bitmap. In theory the extents + * remaining in bitmap are the old btree's blocks. + * + * Unfortunately, it's possible that the btree was crosslinked with other + * blocks on disk. The rmap data can tell us if there are multiple owners, so + * if the rmapbt says there is an owner of this block other than @oinfo, then + * the block is crosslinked. Remove the reverse mapping and continue. + * + * If there is one rmap record, we can free the block, which removes the + * reverse mapping but doesn't add the block to the free space. Our repair + * strategy is to hope the other metadata objects crosslinked on this block + * will be rebuilt (atop different blocks), thereby removing all the cross + * links. + * + * If there are no rmap records at all, we also free the block. If the btree + * being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't + * supposed to be a rmap record and everything is ok. For other btrees there + * had to have been an rmap entry for the block to have ended up on @bitmap, + * so if it's gone now there's something wrong and the fs will shut down. + * + * Note: If there are multiple rmap records with only the same rmap owner as + * the btree we're trying to rebuild and the block is indeed owned by another + * data structure with the same rmap owner, then the block will be in sublist + * and therefore doesn't need disposal. If there are multiple rmap records + * with only the same rmap owner but the block is not owned by something with + * the same rmap owner, the block will be freed. + * + * The caller is responsible for locking the AG headers for the entire rebuild + * operation so that nothing else can sneak in and change the AG state while + * we're not looking. We also assume that the caller already invalidated any + * buffers associated with @bitmap. + */ + +static int +xrep_invalidate_block( + uint64_t fsbno, + void *priv) +{ + struct xfs_scrub *sc = priv; + struct xfs_buf *bp; + int error; + + /* Skip AG headers and post-EOFS blocks */ + if (!xfs_verify_fsbno(sc->mp, fsbno)) + return 0; + + error = xfs_buf_incore(sc->mp->m_ddev_targp, + XFS_FSB_TO_DADDR(sc->mp, fsbno), + XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp); + if (error) + return 0; + + xfs_trans_bjoin(sc->tp, bp); + xfs_trans_binval(sc->tp, bp); + return 0; +} + +/* + * Invalidate buffers for per-AG btree blocks we're dumping. This function + * is not intended for use with file data repairs; we have bunmapi for that. + */ +int +xrep_invalidate_blocks( + struct xfs_scrub *sc, + struct xbitmap *bitmap) +{ + /* + * For each block in each extent, see if there's an incore buffer for + * exactly that block; if so, invalidate it. The buffer cache only + * lets us look for one buffer at a time, so we have to look one block + * at a time. Avoid invalidating AG headers and post-EOFS blocks + * because we never own those; and if we can't TRYLOCK the buffer we + * assume it's owned by someone else. + */ + return xbitmap_walk_bits(bitmap, xrep_invalidate_block, sc); +} + +/* Information about reaping extents after a repair. */ +struct xrep_reap_state { + struct xfs_scrub *sc; + + /* Reverse mapping owner and metadata reservation type. */ + const struct xfs_owner_info *oinfo; + enum xfs_ag_resv_type resv; +}; + +/* + * Put a block back on the AGFL. + */ +STATIC int +xrep_put_freelist( + struct xfs_scrub *sc, + xfs_agblock_t agbno) +{ + struct xfs_buf *agfl_bp; + int error; + + /* Make sure there's space on the freelist. */ + error = xrep_fix_freelist(sc, true); + if (error) + return error; + + /* + * Since we're "freeing" a lost block onto the AGFL, we have to + * create an rmap for the block prior to merging it or else other + * parts will break. + */ + error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1, + &XFS_RMAP_OINFO_AG); + if (error) + return error; + + /* Put the block on the AGFL. */ + error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); + if (error) + return error; + + error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp, + agfl_bp, agbno, 0); + if (error) + return error; + xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1, + XFS_EXTENT_BUSY_SKIP_DISCARD); + + return 0; +} + +/* Dispose of a single block. */ +STATIC int +xrep_reap_block( + uint64_t fsbno, + void *priv) +{ + struct xrep_reap_state *rs = priv; + struct xfs_scrub *sc = rs->sc; + struct xfs_btree_cur *cur; + struct xfs_buf *agf_bp = NULL; + xfs_agblock_t agbno; + bool has_other_rmap; + int error; + + ASSERT(sc->ip != NULL || + XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); + trace_xrep_dispose_btree_extent(sc->mp, + XFS_FSB_TO_AGNO(sc->mp, fsbno), + XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1); + + agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); + ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); + + /* + * If we are repairing per-inode metadata, we need to read in the AGF + * buffer. Otherwise, we're repairing a per-AG structure, so reuse + * the AGF buffer that the setup functions already grabbed. + */ + if (sc->ip) { + error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp); + if (error) + return error; + } else { + agf_bp = sc->sa.agf_bp; + } + cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag); + + /* Can we find any other rmappings? */ + error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo, + &has_other_rmap); + xfs_btree_del_cursor(cur, error); + if (error) + goto out_free; + + /* + * If there are other rmappings, this block is cross linked and must + * not be freed. Remove the reverse mapping and move on. Otherwise, + * we were the only owner of the block, so free the extent, which will + * also remove the rmap. + * + * XXX: XFS doesn't support detecting the case where a single block + * metadata structure is crosslinked with a multi-block structure + * because the buffer cache doesn't detect aliasing problems, so we + * can't fix 100% of crosslinking problems (yet). The verifiers will + * blow on writeout, the filesystem will shut down, and the admin gets + * to run xfs_repair. + */ + if (has_other_rmap) + error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, + 1, rs->oinfo); + else if (rs->resv == XFS_AG_RESV_AGFL) + error = xrep_put_freelist(sc, agbno); + else + error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo, + rs->resv); + if (agf_bp != sc->sa.agf_bp) + xfs_trans_brelse(sc->tp, agf_bp); + if (error) + return error; + + if (sc->ip) + return xfs_trans_roll_inode(&sc->tp, sc->ip); + return xrep_roll_ag_trans(sc); + +out_free: + if (agf_bp != sc->sa.agf_bp) + xfs_trans_brelse(sc->tp, agf_bp); + return error; +} + +/* Dispose of every block of every extent in the bitmap. */ +int +xrep_reap_extents( + struct xfs_scrub *sc, + struct xbitmap *bitmap, + const struct xfs_owner_info *oinfo, + enum xfs_ag_resv_type type) +{ + struct xrep_reap_state rs = { + .sc = sc, + .oinfo = oinfo, + .resv = type, + }; + + ASSERT(xfs_has_rmapbt(sc->mp)); + + return xbitmap_walk_bits(bitmap, xrep_reap_block, &rs); +} diff --git a/fs/xfs/scrub/reap.h b/fs/xfs/scrub/reap.h new file mode 100644 index 000000000000..85c8d8a5fe38 --- /dev/null +++ b/fs/xfs/scrub/reap.h @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2022-2023 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_REAP_H__ +#define __XFS_SCRUB_REAP_H__ + +int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *bitmap, + const struct xfs_owner_info *oinfo, + enum xfs_ag_resv_type type); + +#endif /* __XFS_SCRUB_REAP_H__ */ diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index eedb3863b4ef..a3eddfcb42fc 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -321,91 +321,8 @@ xrep_calc_ag_resblks( * sublist. As with the other btrees we subtract sublist from bitmap, and the * result (since the rmapbt lives in the free space) are the blocks from the * old rmapbt. - * - * Disposal of Blocks from Old per-AG Btrees - * - * Now that we've constructed a new btree to replace the damaged one, we want - * to dispose of the blocks that (we think) the old btree was using. - * Previously, we used the rmapbt to collect the extents (bitmap) with the - * rmap owner corresponding to the tree we rebuilt, collected extents for any - * blocks with the same rmap owner that are owned by another data structure - * (sublist), and subtracted sublist from bitmap. In theory the extents - * remaining in bitmap are the old btree's blocks. - * - * Unfortunately, it's possible that the btree was crosslinked with other - * blocks on disk. The rmap data can tell us if there are multiple owners, so - * if the rmapbt says there is an owner of this block other than @oinfo, then - * the block is crosslinked. Remove the reverse mapping and continue. - * - * If there is one rmap record, we can free the block, which removes the - * reverse mapping but doesn't add the block to the free space. Our repair - * strategy is to hope the other metadata objects crosslinked on this block - * will be rebuilt (atop different blocks), thereby removing all the cross - * links. - * - * If there are no rmap records at all, we also free the block. If the btree - * being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't - * supposed to be a rmap record and everything is ok. For other btrees there - * had to have been an rmap entry for the block to have ended up on @bitmap, - * so if it's gone now there's something wrong and the fs will shut down. - * - * Note: If there are multiple rmap records with only the same rmap owner as - * the btree we're trying to rebuild and the block is indeed owned by another - * data structure with the same rmap owner, then the block will be in sublist - * and therefore doesn't need disposal. If there are multiple rmap records - * with only the same rmap owner but the block is not owned by something with - * the same rmap owner, the block will be freed. - * - * The caller is responsible for locking the AG headers for the entire rebuild - * operation so that nothing else can sneak in and change the AG state while - * we're not looking. We also assume that the caller already invalidated any - * buffers associated with @bitmap. */ -static int -xrep_invalidate_block( - uint64_t fsbno, - void *priv) -{ - struct xfs_scrub *sc = priv; - struct xfs_buf *bp; - int error; - - /* Skip AG headers and post-EOFS blocks */ - if (!xfs_verify_fsbno(sc->mp, fsbno)) - return 0; - - error = xfs_buf_incore(sc->mp->m_ddev_targp, - XFS_FSB_TO_DADDR(sc->mp, fsbno), - XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp); - if (error) - return 0; - - xfs_trans_bjoin(sc->tp, bp); - xfs_trans_binval(sc->tp, bp); - return 0; -} - -/* - * Invalidate buffers for per-AG btree blocks we're dumping. This function - * is not intended for use with file data repairs; we have bunmapi for that. - */ -int -xrep_invalidate_blocks( - struct xfs_scrub *sc, - struct xbitmap *bitmap) -{ - /* - * For each block in each extent, see if there's an incore buffer for - * exactly that block; if so, invalidate it. The buffer cache only - * lets us look for one buffer at a time, so we have to look one block - * at a time. Avoid invalidating AG headers and post-EOFS blocks - * because we never own those; and if we can't TRYLOCK the buffer we - * assume it's owned by someone else. - */ - return xbitmap_walk_bits(bitmap, xrep_invalidate_block, sc); -} - /* Ensure the freelist is the correct size. */ int xrep_fix_freelist( @@ -424,155 +341,6 @@ xrep_fix_freelist( can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK); } -/* Information about reaping extents after a repair. */ -struct xrep_reap_state { - struct xfs_scrub *sc; - - /* Reverse mapping owner and metadata reservation type. */ - const struct xfs_owner_info *oinfo; - enum xfs_ag_resv_type resv; -}; - -/* - * Put a block back on the AGFL. - */ -STATIC int -xrep_put_freelist( - struct xfs_scrub *sc, - xfs_agblock_t agbno) -{ - struct xfs_buf *agfl_bp; - int error; - - /* Make sure there's space on the freelist. */ - error = xrep_fix_freelist(sc, true); - if (error) - return error; - - /* - * Since we're "freeing" a lost block onto the AGFL, we have to - * create an rmap for the block prior to merging it or else other - * parts will break. - */ - error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1, - &XFS_RMAP_OINFO_AG); - if (error) - return error; - - /* Put the block on the AGFL. */ - error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); - if (error) - return error; - - error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp, - agfl_bp, agbno, 0); - if (error) - return error; - xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1, - XFS_EXTENT_BUSY_SKIP_DISCARD); - - return 0; -} - -/* Dispose of a single block. */ -STATIC int -xrep_reap_block( - uint64_t fsbno, - void *priv) -{ - struct xrep_reap_state *rs = priv; - struct xfs_scrub *sc = rs->sc; - struct xfs_btree_cur *cur; - struct xfs_buf *agf_bp = NULL; - xfs_agblock_t agbno; - bool has_other_rmap; - int error; - - ASSERT(sc->ip != NULL || - XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); - trace_xrep_dispose_btree_extent(sc->mp, - XFS_FSB_TO_AGNO(sc->mp, fsbno), - XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1); - - agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); - ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); - - /* - * If we are repairing per-inode metadata, we need to read in the AGF - * buffer. Otherwise, we're repairing a per-AG structure, so reuse - * the AGF buffer that the setup functions already grabbed. - */ - if (sc->ip) { - error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp); - if (error) - return error; - } else { - agf_bp = sc->sa.agf_bp; - } - cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag); - - /* Can we find any other rmappings? */ - error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo, - &has_other_rmap); - xfs_btree_del_cursor(cur, error); - if (error) - goto out_free; - - /* - * If there are other rmappings, this block is cross linked and must - * not be freed. Remove the reverse mapping and move on. Otherwise, - * we were the only owner of the block, so free the extent, which will - * also remove the rmap. - * - * XXX: XFS doesn't support detecting the case where a single block - * metadata structure is crosslinked with a multi-block structure - * because the buffer cache doesn't detect aliasing problems, so we - * can't fix 100% of crosslinking problems (yet). The verifiers will - * blow on writeout, the filesystem will shut down, and the admin gets - * to run xfs_repair. - */ - if (has_other_rmap) - error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, - 1, rs->oinfo); - else if (rs->resv == XFS_AG_RESV_AGFL) - error = xrep_put_freelist(sc, agbno); - else - error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo, - rs->resv); - if (agf_bp != sc->sa.agf_bp) - xfs_trans_brelse(sc->tp, agf_bp); - if (error) - return error; - - if (sc->ip) - return xfs_trans_roll_inode(&sc->tp, sc->ip); - return xrep_roll_ag_trans(sc); - -out_free: - if (agf_bp != sc->sa.agf_bp) - xfs_trans_brelse(sc->tp, agf_bp); - return error; -} - -/* Dispose of every block of every extent in the bitmap. */ -int -xrep_reap_extents( - struct xfs_scrub *sc, - struct xbitmap *bitmap, - const struct xfs_owner_info *oinfo, - enum xfs_ag_resv_type type) -{ - struct xrep_reap_state rs = { - .sc = sc, - .oinfo = oinfo, - .resv = type, - }; - - ASSERT(xfs_has_rmapbt(sc->mp)); - - return xbitmap_walk_bits(bitmap, xrep_reap_block, &rs); -} - /* * Finding per-AG Btree Roots for AGF/AGI Reconstruction * diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index fdccad54936f..601caa70f870 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -29,8 +29,6 @@ struct xagb_bitmap; int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink); int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xbitmap *btlist); -int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *exlist, - const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type); struct xrep_find_ag_btree { /* in: rmap owner of the btree we're looking for */ From patchwork Fri May 26 00:43:45 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13255870 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id F1B96C77B7E for ; Fri, 26 May 2023 00:43:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232713AbjEZAnt (ORCPT ); Thu, 25 May 2023 20:43:49 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:46848 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232547AbjEZAns (ORCPT ); Thu, 25 May 2023 20:43:48 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 5097D12E for ; Thu, 25 May 2023 17:43:47 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id E0E1D63A6B for ; Fri, 26 May 2023 00:43:46 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 51488C433D2; Fri, 26 May 2023 00:43:46 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1685061826; bh=Eg5a5AoxMbHJq4hCkQ8GU5CGP2+b7xRlY8Bpnca4638=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=sznZSQolJ+QCTKTWJHR5/jZ04N/Zm6KKVxoSNT+RyzhZcj+r5uD5OtMsTkjuiJro/ HfSoNk03yW9eqtBdkogwFtNW53kQFDqQ8C+BlJSrJ75ghL107CiE2yqnSrnQnrYs44 il0JhU9/uhF9NPM+gkt1+K+Lg3uYtEQleq/H67q1Nf3SniyCFTcVDNG2qR361hCX7q 0WalJnG81OYATgwJqIkTDaNQp6KO2OXp1ohNwBsg+Dc3t01iiTREznZruG2gnv3xEb I/bSBSaK5p0FO3LUZAvdks9k4sIPHlus/VbQnzpPfvV6rW7d5+DinYx0yCdhGqHFBZ QYTZF1r+FBQwA== Date: Thu, 25 May 2023 17:43:45 -0700 Subject: [PATCH 3/9] xfs: only invalidate blocks if we're going to free them From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <168506055660.3728180.10913177215578833888.stgit@frogsfrogsfrogs> In-Reply-To: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> References: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong When we're discarding old btree blocks after a repair, only invalidate the buffers for the ones that we're freeing -- if the metadata was crosslinked with another data structure, we don't want to touch it. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/reap.c | 96 +++++++++++++++++++++---------------------------- fs/xfs/scrub/repair.h | 1 - 2 files changed, 42 insertions(+), 55 deletions(-) diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index 774dd8a12b2a..b332b0e8e259 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -70,54 +70,10 @@ * * The caller is responsible for locking the AG headers for the entire rebuild * operation so that nothing else can sneak in and change the AG state while - * we're not looking. We also assume that the caller already invalidated any - * buffers associated with @bitmap. + * we're not looking. We must also invalidate any buffers associated with + * @bitmap. */ -static int -xrep_invalidate_block( - uint64_t fsbno, - void *priv) -{ - struct xfs_scrub *sc = priv; - struct xfs_buf *bp; - int error; - - /* Skip AG headers and post-EOFS blocks */ - if (!xfs_verify_fsbno(sc->mp, fsbno)) - return 0; - - error = xfs_buf_incore(sc->mp->m_ddev_targp, - XFS_FSB_TO_DADDR(sc->mp, fsbno), - XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp); - if (error) - return 0; - - xfs_trans_bjoin(sc->tp, bp); - xfs_trans_binval(sc->tp, bp); - return 0; -} - -/* - * Invalidate buffers for per-AG btree blocks we're dumping. This function - * is not intended for use with file data repairs; we have bunmapi for that. - */ -int -xrep_invalidate_blocks( - struct xfs_scrub *sc, - struct xbitmap *bitmap) -{ - /* - * For each block in each extent, see if there's an incore buffer for - * exactly that block; if so, invalidate it. The buffer cache only - * lets us look for one buffer at a time, so we have to look one block - * at a time. Avoid invalidating AG headers and post-EOFS blocks - * because we never own those; and if we can't TRYLOCK the buffer we - * assume it's owned by someone else. - */ - return xbitmap_walk_bits(bitmap, xrep_invalidate_block, sc); -} - /* Information about reaping extents after a repair. */ struct xrep_reap_state { struct xfs_scrub *sc; @@ -127,9 +83,7 @@ struct xrep_reap_state { enum xfs_ag_resv_type resv; }; -/* - * Put a block back on the AGFL. - */ +/* Put a block back on the AGFL. */ STATIC int xrep_put_freelist( struct xfs_scrub *sc, @@ -168,6 +122,37 @@ xrep_put_freelist( return 0; } +/* Try to invalidate the incore buffer for a block that we're about to free. */ +STATIC void +xrep_block_reap_binval( + struct xfs_scrub *sc, + xfs_fsblock_t fsbno) +{ + struct xfs_buf *bp = NULL; + int error; + + /* + * If there's an incore buffer for exactly this block, invalidate it. + * Avoid invalidating AG headers and post-EOFS blocks because we never + * own those. + */ + if (!xfs_verify_fsbno(sc->mp, fsbno)) + return; + + /* + * We assume that the lack of any other known owners means that the + * buffer can be locked without risk of deadlocking. + */ + error = xfs_buf_incore(sc->mp->m_ddev_targp, + XFS_FSB_TO_DADDR(sc->mp, fsbno), + XFS_FSB_TO_BB(sc->mp, 1), 0, &bp); + if (error) + return; + + xfs_trans_bjoin(sc->tp, bp); + xfs_trans_binval(sc->tp, bp); +} + /* Dispose of a single block. */ STATIC int xrep_reap_block( @@ -225,14 +210,17 @@ xrep_reap_block( * blow on writeout, the filesystem will shut down, and the admin gets * to run xfs_repair. */ - if (has_other_rmap) - error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, - 1, rs->oinfo); - else if (rs->resv == XFS_AG_RESV_AGFL) + if (has_other_rmap) { + error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, 1, + rs->oinfo); + } else if (rs->resv == XFS_AG_RESV_AGFL) { + xrep_block_reap_binval(sc, fsbno); error = xrep_put_freelist(sc, agbno); - else + } else { + xrep_block_reap_binval(sc, fsbno); error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo, rs->resv); + } if (agf_bp != sc->sa.agf_bp) xfs_trans_brelse(sc->tp, agf_bp); if (error) diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index 601caa70f870..e01d63a4a93b 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -28,7 +28,6 @@ struct xbitmap; struct xagb_bitmap; int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink); -int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xbitmap *btlist); struct xrep_find_ag_btree { /* in: rmap owner of the btree we're looking for */ From patchwork Fri May 26 00:44:01 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13255871 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 24220C77B7E for ; Fri, 26 May 2023 00:44:05 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233104AbjEZAoE (ORCPT ); Thu, 25 May 2023 20:44:04 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:46882 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232547AbjEZAoD (ORCPT ); Thu, 25 May 2023 20:44:03 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 00FEF12E for ; Thu, 25 May 2023 17:44:02 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 8BEFC616EF for ; Fri, 26 May 2023 00:44:02 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id ED5EEC433EF; Fri, 26 May 2023 00:44:01 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1685061842; bh=mDJalDCepRk2uXidX8eOdnYfn2Eel0GSGjaiMeEyYE4=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=MHiZ02C9Mc9xH+KvbwftwCLkqYQuhnhxySRuhqpP43fjePhoZrHfl8af9VAqmeuHh f6yslT/I7z4ynywu849IZN/lrORXMan/BE0wBAeBorqIr5uCnT99cwSF6q9PW2woKc FaxlZ4NoDFNFyS33pLhXyef/pzxstw2MX6GxrnOcHKH4mZ60BBto2KVNXaBdeuMFZQ Qe3T5UyRavAYkErObTJ7Fvre4QWSpLg6ZqQV3OEg8R044yALIRazRYXmq/AlK8A5N0 De8fLBHy3PbrEItuEJCrDNiDs2XDTCSvW4L4yCmG0FcZtnxGaLZPNiLKl4jSodPQNu 9615KcdhfcWPw== Date: Thu, 25 May 2023 17:44:01 -0700 Subject: [PATCH 4/9] xfs: only allow reaping of per-AG blocks in xrep_reap_extents From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <168506055675.3728180.51096833426731588.stgit@frogsfrogsfrogs> In-Reply-To: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> References: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong Now that we've refactored btree cursors to require the caller to pass in a perag structure, there are numerous problems in xrep_reap_extents if it's being called to reap extents for an inode metadata repair. We don't have any repair functions that can do that, so drop the support for now. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/reap.c | 45 +++++++++++++-------------------------------- 1 file changed, 13 insertions(+), 32 deletions(-) diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index b332b0e8e259..bc180171d0cb 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -162,40 +162,30 @@ xrep_reap_block( struct xrep_reap_state *rs = priv; struct xfs_scrub *sc = rs->sc; struct xfs_btree_cur *cur; - struct xfs_buf *agf_bp = NULL; + xfs_agnumber_t agno; xfs_agblock_t agbno; bool has_other_rmap; int error; - ASSERT(sc->ip != NULL || - XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); - trace_xrep_dispose_btree_extent(sc->mp, - XFS_FSB_TO_AGNO(sc->mp, fsbno), - XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1); - + agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); - ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); - /* - * If we are repairing per-inode metadata, we need to read in the AGF - * buffer. Otherwise, we're repairing a per-AG structure, so reuse - * the AGF buffer that the setup functions already grabbed. - */ - if (sc->ip) { - error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp); - if (error) - return error; - } else { - agf_bp = sc->sa.agf_bp; + trace_xrep_dispose_btree_extent(sc->mp, agno, agbno, 1); + + /* We don't support reaping file extents yet. */ + if (sc->ip != NULL || sc->sa.pag->pag_agno != agno) { + ASSERT(0); + return -EFSCORRUPTED; } - cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag); + + cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, sc->sa.pag); /* Can we find any other rmappings? */ error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo, &has_other_rmap); xfs_btree_del_cursor(cur, error); if (error) - goto out_free; + return error; /* * If there are other rmappings, this block is cross linked and must @@ -211,8 +201,8 @@ xrep_reap_block( * to run xfs_repair. */ if (has_other_rmap) { - error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, 1, - rs->oinfo); + error = xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, + 1, rs->oinfo); } else if (rs->resv == XFS_AG_RESV_AGFL) { xrep_block_reap_binval(sc, fsbno); error = xrep_put_freelist(sc, agbno); @@ -221,19 +211,10 @@ xrep_reap_block( error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo, rs->resv); } - if (agf_bp != sc->sa.agf_bp) - xfs_trans_brelse(sc->tp, agf_bp); if (error) return error; - if (sc->ip) - return xfs_trans_roll_inode(&sc->tp, sc->ip); return xrep_roll_ag_trans(sc); - -out_free: - if (agf_bp != sc->sa.agf_bp) - xfs_trans_brelse(sc->tp, agf_bp); - return error; } /* Dispose of every block of every extent in the bitmap. */ From patchwork Fri May 26 00:44:17 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13255872 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 94E07C77B7A for ; Fri, 26 May 2023 00:44:21 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233318AbjEZAoU (ORCPT ); Thu, 25 May 2023 20:44:20 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:46936 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232547AbjEZAoT (ORCPT ); Thu, 25 May 2023 20:44:19 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 8590012E for ; Thu, 25 May 2023 17:44:18 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 1C5B963A6B for ; Fri, 26 May 2023 00:44:18 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 8560FC433EF; Fri, 26 May 2023 00:44:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1685061857; bh=VPYlxp2OwhsreJ/5RuUutByrmdgRfdeIpMRtwnQ7c4I=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=nUY7SHKXTjtGUcLEEIMJlGCsgO7Uf5I7zPSXxOIkv90Ew+SlX5I3rxVZuR6iOyY6v cPiDNXixLS8II8yJUukiwnC8P0ILtkmz7ljqy+epziI1VxZLY2Wkqqj8Ce0ZLvCShw KAwfRTZdLU/WWAlfmkTOSBMkQTp0rInZ8ki/24coqPW+Xao1KyG6msAK934KPLhqfX FoNR4pKVOP9MaMo9EyQ051d0AunyVbBs4S3q4YmTKSXd8CMuK9B5V9WD+7MITvTeiv U8L5giLRRNFMG8qLsHjKGUef5jzp7LF5hst4pX6uOz8I6+Sq4Z8TnOA7mWrACPtJWL bVF+4Y7JrIIXQ== Date: Thu, 25 May 2023 17:44:17 -0700 Subject: [PATCH 5/9] xfs: use deferred frees to reap old btree blocks From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <168506055689.3728180.5922242663769047903.stgit@frogsfrogsfrogs> In-Reply-To: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> References: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong Use deferred frees (EFIs) to reap the blocks of a btree that we just replaced. This helps us to shrink the window in which those old blocks could be lost due to a system crash, though we try to flush the EFIs every few hundred blocks so that we don't also overflow the transaction reservations during and after we commit the new btree. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/reap.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index bc180171d0cb..e9839f3905e7 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -26,6 +26,7 @@ #include "xfs_ag_resv.h" #include "xfs_quota.h" #include "xfs_qm.h" +#include "xfs_bmap.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" @@ -81,6 +82,9 @@ struct xrep_reap_state { /* Reverse mapping owner and metadata reservation type. */ const struct xfs_owner_info *oinfo; enum xfs_ag_resv_type resv; + + /* Number of deferred reaps attached to the current transaction. */ + unsigned int deferred; }; /* Put a block back on the AGFL. */ @@ -165,6 +169,7 @@ xrep_reap_block( xfs_agnumber_t agno; xfs_agblock_t agbno; bool has_other_rmap; + bool need_roll = true; int error; agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); @@ -207,13 +212,22 @@ xrep_reap_block( xrep_block_reap_binval(sc, fsbno); error = xrep_put_freelist(sc, agbno); } else { + /* + * Use deferred frees to get rid of the old btree blocks to try + * to minimize the window in which we could crash and lose the + * old blocks. However, we still need to roll the transaction + * every 100 or so EFIs so that we don't exceed the log + * reservation. + */ xrep_block_reap_binval(sc, fsbno); - error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo, - rs->resv); + __xfs_free_extent_later(sc->tp, fsbno, 1, rs->oinfo, true); + rs->deferred++; + need_roll = rs->deferred > 100; } - if (error) + if (error || !need_roll) return error; + rs->deferred = 0; return xrep_roll_ag_trans(sc); } @@ -230,8 +244,13 @@ xrep_reap_extents( .oinfo = oinfo, .resv = type, }; + int error; ASSERT(xfs_has_rmapbt(sc->mp)); - return xbitmap_walk_bits(bitmap, xrep_reap_block, &rs); + error = xbitmap_walk_bits(bitmap, xrep_reap_block, &rs); + if (error || rs.deferred == 0) + return error; + + return xrep_roll_ag_trans(sc); } From patchwork Fri May 26 00:44:32 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13255873 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2D43AC77B7A for ; Fri, 26 May 2023 00:44:37 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233384AbjEZAof (ORCPT ); Thu, 25 May 2023 20:44:35 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47004 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229885AbjEZAof (ORCPT ); Thu, 25 May 2023 20:44:35 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 2677612E for ; Thu, 25 May 2023 17:44:34 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id AF87A64BE0 for ; Fri, 26 May 2023 00:44:33 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 1A2C4C433EF; Fri, 26 May 2023 00:44:33 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1685061873; bh=LgKaEv3yQEsA29q5yHD5QVtJ/H7Zwqtb7O1kgqO9I9A=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=DRPqFjMovEm0IW5ghfQfAC1TCrHLY5ZlLBo5epgQgLhMj0RSdV/ZD+9z/4+QDYFHd uBtvsNOALpuGtfQBRQUHTddsxJjp4EWgBEohqcqzoTbMaJBB5wyJpBgy1Igt5CtkIH PRVH3sB6BFfzoiAgw1JmppdvkyN0dNqibDB+u8r+mNmsnkR5f1xO9Zl7bX+nNr05pi CuOirmPk1PjG3IJYDII8EbRGIJJeYuocZiVR9M/tUJYxdcjWXyuQY15ob6ifJz8/Z1 BIadR0/1J+O4aStLj5VZPOvurhxnH0yLjwUjIkfJCvU8P5O71A2jPikqEmLHBs9kNT wGfojt+rgEjNA== Date: Thu, 25 May 2023 17:44:32 -0700 Subject: [PATCH 6/9] xfs: rearrange xrep_reap_block to make future code flow easier From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <168506055703.3728180.7446040903727951043.stgit@frogsfrogsfrogs> In-Reply-To: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> References: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong Rearrange the logic inside xrep_reap_block to make it more obvious that crosslinked metadata blocks are handled differently. Add a couple of tracepoints so that we can tell what's going on at the end of a btree rebuild operation. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/agheader_repair.c | 6 +++--- fs/xfs/scrub/reap.c | 19 ++++++++++++++----- fs/xfs/scrub/trace.h | 17 ++++++++--------- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index c902a5dee57f..b8d28cfec286 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -646,13 +646,13 @@ xrep_agfl_fill( xfs_fsblock_t fsbno = start; int error; + trace_xrep_agfl_insert(sc->sa.pag, XFS_FSB_TO_AGBNO(sc->mp, start), + len); + while (fsbno < start + len && af->fl_off < af->flcount) af->agfl_bno[af->fl_off++] = cpu_to_be32(XFS_FSB_TO_AGBNO(sc->mp, fsbno++)); - trace_xrep_agfl_insert(sc->mp, sc->sa.pag->pag_agno, - XFS_FSB_TO_AGBNO(sc->mp, start), len); - error = xbitmap_set(&af->used_extents, start, fsbno - 1); if (error) return error; diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index e9839f3905e7..30e61315feb0 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -175,8 +175,6 @@ xrep_reap_block( agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); - trace_xrep_dispose_btree_extent(sc->mp, agno, agbno, 1); - /* We don't support reaping file extents yet. */ if (sc->ip != NULL || sc->sa.pag->pag_agno != agno) { ASSERT(0); @@ -206,10 +204,21 @@ xrep_reap_block( * to run xfs_repair. */ if (has_other_rmap) { + trace_xrep_dispose_unmap_extent(sc->sa.pag, agbno, 1); + error = xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1, rs->oinfo); - } else if (rs->resv == XFS_AG_RESV_AGFL) { - xrep_block_reap_binval(sc, fsbno); + if (error) + return error; + + goto roll_out; + } + + trace_xrep_dispose_free_extent(sc->sa.pag, agbno, 1); + + xrep_block_reap_binval(sc, fsbno); + + if (rs->resv == XFS_AG_RESV_AGFL) { error = xrep_put_freelist(sc, agbno); } else { /* @@ -219,7 +228,6 @@ xrep_reap_block( * every 100 or so EFIs so that we don't exceed the log * reservation. */ - xrep_block_reap_binval(sc, fsbno); __xfs_free_extent_later(sc->tp, fsbno, 1, rs->oinfo, true); rs->deferred++; need_roll = rs->deferred > 100; @@ -227,6 +235,7 @@ xrep_reap_block( if (error || !need_roll) return error; +roll_out: rs->deferred = 0; return xrep_roll_ag_trans(sc); } diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 9c8c7dd0f262..71bfab3d2d29 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -729,9 +729,8 @@ TRACE_EVENT(xchk_refcount_incorrect, #if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) DECLARE_EVENT_CLASS(xrep_extent_class, - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, - xfs_agblock_t agbno, xfs_extlen_t len), - TP_ARGS(mp, agno, agbno, len), + TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len), + TP_ARGS(pag, agbno, len), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) @@ -739,8 +738,8 @@ DECLARE_EVENT_CLASS(xrep_extent_class, __field(xfs_extlen_t, len) ), TP_fast_assign( - __entry->dev = mp->m_super->s_dev; - __entry->agno = agno; + __entry->dev = pag->pag_mount->m_super->s_dev; + __entry->agno = pag->pag_agno; __entry->agbno = agbno; __entry->len = len; ), @@ -752,10 +751,10 @@ DECLARE_EVENT_CLASS(xrep_extent_class, ); #define DEFINE_REPAIR_EXTENT_EVENT(name) \ DEFINE_EVENT(xrep_extent_class, name, \ - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \ - xfs_agblock_t agbno, xfs_extlen_t len), \ - TP_ARGS(mp, agno, agbno, len)) -DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_btree_extent); + TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len), \ + TP_ARGS(pag, agbno, len)) +DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_unmap_extent); +DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_free_extent); DEFINE_REPAIR_EXTENT_EVENT(xrep_agfl_insert); DECLARE_EVENT_CLASS(xrep_rmap_class, From patchwork Fri May 26 00:44:48 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13255881 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7A8FDC77B7E for ; Fri, 26 May 2023 00:45:01 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235302AbjEZApA (ORCPT ); Thu, 25 May 2023 20:45:00 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47040 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S239896AbjEZAov (ORCPT ); Thu, 25 May 2023 20:44:51 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id B0CD1195 for ; Thu, 25 May 2023 17:44:49 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 4DF58616EF for ; Fri, 26 May 2023 00:44:49 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id AC32BC433D2; Fri, 26 May 2023 00:44:48 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1685061888; bh=qX63S54CCFrmm1d+RbL713OSMzj8rEsvf4I5QDUwFhw=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=DLkiCl0ZBnTW1rupDsFLj/XH/wR7zY83vjLu2bKOGt8sPfrLlgaAtHRSlXdYuU+LT rRpUj+wqhctC+B94WJfg+9J9MraJU9MfHeY6yfni9wJzKpMIvB40v8ILTiNuikb75G c/ONWKg0nEEkBJQriVKe6de/HA2YfCf4RKFRvj0g5cDB6zVEnGMROH5KV1ygObFg1Q Gb9FIzsxWrZ6V3imascdQGRygL3bPGyAnnC/wdivcSh+rsUz0myrUsb//9Bwdp74Vw jDKWSG6WQKPTxyLOOapnp/00wfkCmgjzGnauhhexsG2IF9d9QMzRke6kjh5GOPbU0O 8DfFt/ZWkCRDg== Date: Thu, 25 May 2023 17:44:48 -0700 Subject: [PATCH 7/9] xfs: ignore stale buffers when scanning the buffer cache From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <168506055718.3728180.15781485173918712234.stgit@frogsfrogsfrogs> In-Reply-To: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> References: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong After an online repair, we need to invalidate buffers representing the blocks from the old metadata that we're replacing. It's possible that parts of a tree that were previously cached in memory are no longer accessible due to media failure or other corruption on interior nodes, so repair figures out the old blocks from the reverse mapping data and scans the buffer cache directly. Unfortunately, the current buffer cache code triggers asserts if the rhashtable lookup finds a non-stale buffer of a different length than the key we searched for. For regular operation this is desirable, but for this repair procedure, we don't care since we're going to forcibly stale the buffer anyway. Add an internal lookup flag to avoid the assert. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/reap.c | 2 +- fs/xfs/xfs_buf.c | 5 ++++- fs/xfs/xfs_buf.h | 10 ++++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index 30e61315feb0..ca75c22481d2 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -149,7 +149,7 @@ xrep_block_reap_binval( */ error = xfs_buf_incore(sc->mp->m_ddev_targp, XFS_FSB_TO_DADDR(sc->mp, fsbno), - XFS_FSB_TO_BB(sc->mp, 1), 0, &bp); + XFS_FSB_TO_BB(sc->mp, 1), XBF_BCACHE_SCAN, &bp); if (error) return; diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 15d1e5a7c2d3..b31e6d09a056 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -481,7 +481,8 @@ _xfs_buf_obj_cmp( * reallocating a busy extent. Skip this buffer and * continue searching for an exact match. */ - ASSERT(bp->b_flags & XBF_STALE); + if (!(map->bm_flags & XBM_IGNORE_LENGTH_MISMATCH)) + ASSERT(bp->b_flags & XBF_STALE); return 1; } return 0; @@ -682,6 +683,8 @@ xfs_buf_get_map( int error; int i; + if (flags & XBF_BCACHE_SCAN) + cmap.bm_flags |= XBM_IGNORE_LENGTH_MISMATCH; for (i = 0; i < nmaps; i++) cmap.bm_len += map[i].bm_len; diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 549c60942208..d6e8c3bab9f6 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -44,6 +44,11 @@ struct xfs_buf; #define _XBF_DELWRI_Q (1u << 22)/* buffer on a delwri queue */ /* flags used only as arguments to access routines */ +/* + * We're scanning the buffer cache; do not warn about lookup mismatches. + * Only online repair should use this. + */ +#define XBF_BCACHE_SCAN (1u << 28) #define XBF_INCORE (1u << 29)/* lookup only, return if found in cache */ #define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */ #define XBF_UNMAPPED (1u << 31)/* do not map the buffer */ @@ -67,6 +72,7 @@ typedef unsigned int xfs_buf_flags_t; { _XBF_KMEM, "KMEM" }, \ { _XBF_DELWRI_Q, "DELWRI_Q" }, \ /* The following interface flags should never be set */ \ + { XBF_BCACHE_SCAN, "BCACHE_SCAN" }, \ { XBF_INCORE, "INCORE" }, \ { XBF_TRYLOCK, "TRYLOCK" }, \ { XBF_UNMAPPED, "UNMAPPED" } @@ -114,8 +120,12 @@ typedef struct xfs_buftarg { struct xfs_buf_map { xfs_daddr_t bm_bn; /* block number for I/O */ int bm_len; /* size of I/O */ + unsigned int bm_flags; }; +/* Don't complain about live buffers with the wrong length during lookup. */ +#define XBM_IGNORE_LENGTH_MISMATCH (1U << 0) + #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; From patchwork Fri May 26 00:45:03 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13255882 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C8473C77B7E for ; Fri, 26 May 2023 00:45:09 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S239896AbjEZApI (ORCPT ); Thu, 25 May 2023 20:45:08 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47104 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S241885AbjEZApI (ORCPT ); Thu, 25 May 2023 20:45:08 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 6624A194 for ; Thu, 25 May 2023 17:45:05 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id DBEEA60AAD for ; Fri, 26 May 2023 00:45:04 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 41359C433D2; Fri, 26 May 2023 00:45:04 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1685061904; bh=UL0Pn4eTySpLfN6hZfrZTlvgVjZwwsiLawlZDpf9ChQ=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=BpCsqsCTqJQGBUTBl4XbOu8dymR6O6FN/1f2lMWXNqqa8cjJ4TlwMxFcVAqV7e8sr WH5DIOu1WF2zMliqUUo8IvkrUOlw4gt2X8Hz0UXQUsNF0q7BlGuQYgg03U1D0WGKud 1JTfQAydteJ6tMLNHWy2mLkngJe4Dnf9Ua/WlHcCB2dDwZZKVv9lvgCHmOWxU5L5m7 dDuRoVP/T2rEII01Cc3fegZA4nACHbCPYg1Gi2bWQR3xirm9B/4+ZQ1zOOCPNdwYSt macRqtxAEfNdmHHq4S41GxMp60Y9PbSzZUf34C+tY0xamIyaxBeTabR9XK6dzdRToa W0xfkVogkuwHA== Date: Thu, 25 May 2023 17:45:03 -0700 Subject: [PATCH 8/9] xfs: reap large AG metadata extents when possible From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <168506055733.3728180.3134566782464969180.stgit@frogsfrogsfrogs> In-Reply-To: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> References: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong When we're freeing extents that have been set in a bitmap, break the bitmap extent into multiple sub-extents organized by fate, and reap the extents. This enables us to dispose of old resources more efficiently than doing them block by block. While we're at it, rename the reaping functions to make it clear that they're reaping per-AG extents. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/agheader_repair.c | 2 fs/xfs/scrub/bitmap.c | 37 ---- fs/xfs/scrub/bitmap.h | 4 fs/xfs/scrub/reap.c | 382 ++++++++++++++++++++++++++++++++-------- fs/xfs/scrub/reap.h | 2 fs/xfs/scrub/repair.c | 51 +++++ fs/xfs/scrub/repair.h | 1 fs/xfs/scrub/trace.h | 37 ++++ 8 files changed, 395 insertions(+), 121 deletions(-) diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index b8d28cfec286..9ad2987ed6e5 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -775,7 +775,7 @@ xrep_agfl( goto err; /* Dump any AGFL overflow. */ - error = xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG, + error = xrep_reap_ag_metadata(sc, &agfl_extents, &XFS_RMAP_OINFO_AG, XFS_AG_RESV_AGFL); err: xbitmap_destroy(&agfl_extents); diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c index 0c959be396ea..d926d708f295 100644 --- a/fs/xfs/scrub/bitmap.c +++ b/fs/xfs/scrub/bitmap.c @@ -385,43 +385,6 @@ xbitmap_walk( return error; } -struct xbitmap_walk_bits { - xbitmap_walk_bits_fn fn; - void *priv; -}; - -/* Walk all the bits in a run. */ -static int -xbitmap_walk_bits_in_run( - uint64_t start, - uint64_t len, - void *priv) -{ - struct xbitmap_walk_bits *wb = priv; - uint64_t i; - int error = 0; - - for (i = start; i < start + len; i++) { - error = wb->fn(i, wb->priv); - if (error) - break; - } - - return error; -} - -/* Call a function for every set bit in this bitmap. */ -int -xbitmap_walk_bits( - struct xbitmap *bitmap, - xbitmap_walk_bits_fn fn, - void *priv) -{ - struct xbitmap_walk_bits wb = {.fn = fn, .priv = priv}; - - return xbitmap_walk(bitmap, xbitmap_walk_bits_in_run, &wb); -} - /* Does this bitmap have no bits set at all? */ bool xbitmap_empty( diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h index 84981724ecaf..a3ad564d94b7 100644 --- a/fs/xfs/scrub/bitmap.h +++ b/fs/xfs/scrub/bitmap.h @@ -33,10 +33,6 @@ typedef int (*xbitmap_walk_fn)(uint64_t start, uint64_t len, void *priv); int xbitmap_walk(struct xbitmap *bitmap, xbitmap_walk_fn fn, void *priv); -typedef int (*xbitmap_walk_bits_fn)(uint64_t bit, void *priv); -int xbitmap_walk_bits(struct xbitmap *bitmap, xbitmap_walk_bits_fn fn, - void *priv); - bool xbitmap_empty(struct xbitmap *bitmap); bool xbitmap_test(struct xbitmap *bitmap, uint64_t start, uint64_t *len); diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index ca75c22481d2..4f57b428ffe2 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -27,6 +27,10 @@ #include "xfs_quota.h" #include "xfs_qm.h" #include "xfs_bmap.h" +#include "xfs_da_format.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_remote.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" @@ -76,20 +80,29 @@ */ /* Information about reaping extents after a repair. */ -struct xrep_reap_state { +struct xreap_state { struct xfs_scrub *sc; /* Reverse mapping owner and metadata reservation type. */ const struct xfs_owner_info *oinfo; enum xfs_ag_resv_type resv; + /* If true, roll the transaction before reaping the next extent. */ + bool force_roll; + /* Number of deferred reaps attached to the current transaction. */ unsigned int deferred; + + /* Number of invalidated buffers logged to the current transaction. */ + unsigned int invalidated; + + /* Number of deferred reaps queued during the whole reap sequence. */ + unsigned long long total_deferred; }; /* Put a block back on the AGFL. */ STATIC int -xrep_put_freelist( +xreap_put_freelist( struct xfs_scrub *sc, xfs_agblock_t agbno) { @@ -126,69 +139,162 @@ xrep_put_freelist( return 0; } -/* Try to invalidate the incore buffer for a block that we're about to free. */ +/* Are there any uncommitted reap operations? */ +static inline bool xreap_dirty(const struct xreap_state *rs) +{ + if (rs->force_roll) + return true; + if (rs->deferred) + return true; + if (rs->invalidated) + return true; + if (rs->total_deferred) + return true; + return false; +} + +#define XREAP_MAX_DEFERRED (128) +#define XREAP_MAX_BINVAL (2048) + +/* + * Decide if we want to roll the transaction after reaping an extent. We don't + * want to overrun the transaction reservation, so we prohibit more than + * 128 EFIs per transaction. For the same reason, we limit the number + * of buffer invalidations to 2048. + */ +static inline bool xreap_want_roll(const struct xreap_state *rs) +{ + if (rs->force_roll) + return true; + if (rs->deferred > XREAP_MAX_DEFERRED) + return true; + if (rs->invalidated > XREAP_MAX_BINVAL) + return true; + return false; +} + +static inline void xreap_reset(struct xreap_state *rs) +{ + rs->total_deferred += rs->deferred; + rs->deferred = 0; + rs->invalidated = 0; + rs->force_roll = false; +} + +#define XREAP_MAX_DEFER_CHAIN (2048) + +/* + * Decide if we want to finish the deferred ops that are attached to the scrub + * transaction. We don't want to queue huge chains of deferred ops because + * that can consume a lot of log space and kernel memory. Hence we trigger a + * xfs_defer_finish if there are more than 2048 deferred reap operations or the + * caller did some real work. + */ +static inline bool +xreap_want_defer_finish(const struct xreap_state *rs) +{ + if (rs->force_roll) + return true; + if (rs->total_deferred > XREAP_MAX_DEFER_CHAIN) + return true; + return false; +} + +static inline void xreap_defer_finish_reset(struct xreap_state *rs) +{ + rs->total_deferred = 0; + rs->deferred = 0; + rs->invalidated = 0; + rs->force_roll = false; +} + +/* Try to invalidate the incore buffers for an extent that we're freeing. */ STATIC void -xrep_block_reap_binval( - struct xfs_scrub *sc, - xfs_fsblock_t fsbno) +xreap_agextent_binval( + struct xreap_state *rs, + xfs_agblock_t agbno, + xfs_extlen_t *aglenp) { - struct xfs_buf *bp = NULL; - int error; + struct xfs_scrub *sc = rs->sc; + struct xfs_perag *pag = sc->sa.pag; + struct xfs_mount *mp = sc->mp; + xfs_agnumber_t agno = sc->sa.pag->pag_agno; + xfs_agblock_t agbno_next = agbno + *aglenp; + xfs_agblock_t bno = agbno; /* - * If there's an incore buffer for exactly this block, invalidate it. * Avoid invalidating AG headers and post-EOFS blocks because we never * own those. */ - if (!xfs_verify_fsbno(sc->mp, fsbno)) + if (!xfs_verify_agbno(pag, agbno) || + !xfs_verify_agbno(pag, agbno_next - 1)) return; /* - * We assume that the lack of any other known owners means that the - * buffer can be locked without risk of deadlocking. + * If there are incore buffers for these blocks, invalidate them. We + * assume that the lack of any other known owners means that the buffer + * can be locked without risk of deadlocking. The buffer cache cannot + * detect aliasing, so employ nested loops to scan for incore buffers + * of any plausible size. */ - error = xfs_buf_incore(sc->mp->m_ddev_targp, - XFS_FSB_TO_DADDR(sc->mp, fsbno), - XFS_FSB_TO_BB(sc->mp, 1), XBF_BCACHE_SCAN, &bp); - if (error) - return; - - xfs_trans_bjoin(sc->tp, bp); - xfs_trans_binval(sc->tp, bp); + while (bno < agbno_next) { + xfs_agblock_t fsbcount; + xfs_agblock_t max_fsbs; + + /* + * Max buffer size is the max remote xattr buffer size, which + * is one fs block larger than 64k. + */ + max_fsbs = min_t(xfs_agblock_t, agbno_next - bno, + xfs_attr3_rmt_blocks(mp, XFS_XATTR_SIZE_MAX)); + + for (fsbcount = 1; fsbcount < max_fsbs; fsbcount++) { + struct xfs_buf *bp = NULL; + xfs_daddr_t daddr; + int error; + + daddr = XFS_AGB_TO_DADDR(mp, agno, bno); + error = xfs_buf_incore(mp->m_ddev_targp, daddr, + XFS_FSB_TO_BB(mp, fsbcount), + XBF_BCACHE_SCAN, &bp); + if (error) + continue; + + xfs_trans_bjoin(sc->tp, bp); + xfs_trans_binval(sc->tp, bp); + rs->invalidated++; + + /* + * Stop invalidating if we've hit the limit; we should + * still have enough reservation left to free however + * far we've gotten. + */ + if (rs->invalidated > XREAP_MAX_BINVAL) { + *aglenp -= agbno_next - bno; + goto out; + } + } + + bno++; + } + +out: + trace_xreap_agextent_binval(sc->sa.pag, agbno, *aglenp); } -/* Dispose of a single block. */ +/* Dispose of a single AG extent. */ STATIC int -xrep_reap_block( - uint64_t fsbno, - void *priv) +xreap_agextent( + struct xreap_state *rs, + xfs_agblock_t agbno, + xfs_extlen_t *aglenp, + bool crosslinked) { - struct xrep_reap_state *rs = priv; - struct xfs_scrub *sc = rs->sc; - struct xfs_btree_cur *cur; - xfs_agnumber_t agno; - xfs_agblock_t agbno; - bool has_other_rmap; - bool need_roll = true; - int error; + struct xfs_scrub *sc = rs->sc; + xfs_fsblock_t fsbno; + int error = 0; - agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); - agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); - - /* We don't support reaping file extents yet. */ - if (sc->ip != NULL || sc->sa.pag->pag_agno != agno) { - ASSERT(0); - return -EFSCORRUPTED; - } - - cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, sc->sa.pag); - - /* Can we find any other rmappings? */ - error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo, - &has_other_rmap); - xfs_btree_del_cursor(cur, error); - if (error) - return error; + fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno, agbno); /* * If there are other rmappings, this block is cross linked and must @@ -203,52 +309,172 @@ xrep_reap_block( * blow on writeout, the filesystem will shut down, and the admin gets * to run xfs_repair. */ - if (has_other_rmap) { - trace_xrep_dispose_unmap_extent(sc->sa.pag, agbno, 1); + if (crosslinked) { + trace_xreap_dispose_unmap_extent(sc->sa.pag, agbno, *aglenp); - error = xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, - 1, rs->oinfo); - if (error) - return error; - - goto roll_out; + rs->force_roll = true; + return xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, + *aglenp, rs->oinfo); } - trace_xrep_dispose_free_extent(sc->sa.pag, agbno, 1); + trace_xreap_dispose_free_extent(sc->sa.pag, agbno, *aglenp); - xrep_block_reap_binval(sc, fsbno); + /* + * Invalidate as many buffers as we can, starting at agbno. If this + * function sets *aglenp to zero, the transaction is full of logged + * buffer invalidations, so we need to return early so that we can + * roll and retry. + */ + xreap_agextent_binval(rs, agbno, aglenp); + if (*aglenp == 0) { + ASSERT(xreap_want_roll(rs)); + return 0; + } if (rs->resv == XFS_AG_RESV_AGFL) { - error = xrep_put_freelist(sc, agbno); + ASSERT(*aglenp == 1); + error = xreap_put_freelist(sc, agbno); + rs->force_roll = true; } else { /* * Use deferred frees to get rid of the old btree blocks to try * to minimize the window in which we could crash and lose the - * old blocks. However, we still need to roll the transaction - * every 100 or so EFIs so that we don't exceed the log - * reservation. + * old blocks. */ - __xfs_free_extent_later(sc->tp, fsbno, 1, rs->oinfo, true); + __xfs_free_extent_later(sc->tp, fsbno, *aglenp, rs->oinfo, true); rs->deferred++; - need_roll = rs->deferred > 100; } - if (error || !need_roll) - return error; -roll_out: - rs->deferred = 0; - return xrep_roll_ag_trans(sc); + return error; } -/* Dispose of every block of every extent in the bitmap. */ +/* + * Figure out the longest run of blocks that we can dispose of with a single + * call. Cross-linked blocks should have their reverse mappings removed, but + * single-owner extents can be freed. AGFL blocks can only be put back one at + * a time. + */ +STATIC int +xreap_agextent_select( + struct xreap_state *rs, + xfs_agblock_t agbno, + xfs_agblock_t agbno_next, + bool *crosslinked, + xfs_extlen_t *aglenp) +{ + struct xfs_scrub *sc = rs->sc; + struct xfs_btree_cur *cur; + xfs_agblock_t bno = agbno + 1; + xfs_extlen_t len = 1; + int error; + + /* + * Determine if there are any other rmap records covering the first + * block of this extent. If so, the block is crosslinked. + */ + cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, + sc->sa.pag); + error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo, + crosslinked); + if (error) + goto out_cur; + + /* AGFL blocks can only be deal with one at a time. */ + if (rs->resv == XFS_AG_RESV_AGFL) + goto out_found; + + /* + * Figure out how many of the subsequent blocks have the same crosslink + * status. + */ + while (bno < agbno_next) { + bool also_crosslinked; + + error = xfs_rmap_has_other_keys(cur, bno, 1, rs->oinfo, + &also_crosslinked); + if (error) + goto out_cur; + + if (*crosslinked != also_crosslinked) + break; + + len++; + bno++; + } + +out_found: + *aglenp = len; + trace_xreap_agextent_select(sc->sa.pag, agbno, len, *crosslinked); +out_cur: + xfs_btree_del_cursor(cur, error); + return error; +} + +/* + * Break an AG metadata extent into sub-extents by fate (crosslinked, not + * crosslinked), and dispose of each sub-extent separately. + */ +STATIC int +xreap_agmeta_extent( + uint64_t fsbno, + uint64_t len, + void *priv) +{ + struct xreap_state *rs = priv; + struct xfs_scrub *sc = rs->sc; + xfs_agnumber_t agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); + xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); + xfs_agblock_t agbno_next = agbno + len; + int error = 0; + + ASSERT(len <= XFS_MAX_BMBT_EXTLEN); + ASSERT(sc->ip == NULL); + + if (agno != sc->sa.pag->pag_agno) { + ASSERT(sc->sa.pag->pag_agno == agno); + return -EFSCORRUPTED; + } + + while (agbno < agbno_next) { + xfs_extlen_t aglen; + bool crosslinked; + + error = xreap_agextent_select(rs, agbno, agbno_next, + &crosslinked, &aglen); + if (error) + return error; + + error = xreap_agextent(rs, agbno, &aglen, crosslinked); + if (error) + return error; + + if (xreap_want_defer_finish(rs)) { + error = xrep_defer_finish(sc); + if (error) + return error; + xreap_defer_finish_reset(rs); + } else if (xreap_want_roll(rs)) { + error = xrep_roll_ag_trans(sc); + if (error) + return error; + xreap_reset(rs); + } + + agbno += aglen; + } + + return 0; +} + +/* Dispose of every block of every AG metadata extent in the bitmap. */ int -xrep_reap_extents( +xrep_reap_ag_metadata( struct xfs_scrub *sc, struct xbitmap *bitmap, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type) { - struct xrep_reap_state rs = { + struct xreap_state rs = { .sc = sc, .oinfo = oinfo, .resv = type, @@ -256,10 +482,14 @@ xrep_reap_extents( int error; ASSERT(xfs_has_rmapbt(sc->mp)); + ASSERT(sc->ip == NULL); - error = xbitmap_walk_bits(bitmap, xrep_reap_block, &rs); - if (error || rs.deferred == 0) + error = xbitmap_walk(bitmap, xreap_agmeta_extent, &rs); + if (error) return error; - return xrep_roll_ag_trans(sc); + if (xreap_dirty(&rs)) + return xrep_defer_finish(sc); + + return 0; } diff --git a/fs/xfs/scrub/reap.h b/fs/xfs/scrub/reap.h index 85c8d8a5fe38..7f234abfa78d 100644 --- a/fs/xfs/scrub/reap.h +++ b/fs/xfs/scrub/reap.h @@ -6,7 +6,7 @@ #ifndef __XFS_SCRUB_REAP_H__ #define __XFS_SCRUB_REAP_H__ -int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *bitmap, +int xrep_reap_ag_metadata(struct xfs_scrub *sc, struct xbitmap *bitmap, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type); diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index a3eddfcb42fc..83a1b1437a4f 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -26,6 +26,7 @@ #include "xfs_ag_resv.h" #include "xfs_quota.h" #include "xfs_qm.h" +#include "xfs_defer.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" @@ -166,6 +167,56 @@ xrep_roll_ag_trans( return 0; } +/* Finish all deferred work attached to the repair transaction. */ +int +xrep_defer_finish( + struct xfs_scrub *sc) +{ + int error; + + /* + * Keep the AG header buffers locked while we complete deferred work + * items. Ensure that both AG buffers are dirty and held when we roll + * the transaction so that they move forward in the log without losing + * the bli (and hence the bli type) when the transaction commits. + * + * Normal code would never hold clean buffers across a roll, but repair + * needs both buffers to maintain a total lock on the AG. + */ + if (sc->sa.agi_bp) { + xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM); + xfs_trans_bhold(sc->tp, sc->sa.agi_bp); + } + + if (sc->sa.agf_bp) { + xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM); + xfs_trans_bhold(sc->tp, sc->sa.agf_bp); + } + + /* + * Finish all deferred work items. We still hold the AG header buffers + * locked regardless of whether or not that succeeds. On failure, the + * buffers will be released during teardown on our way out of the + * kernel. If successful, join the buffers to the new transaction + * and move on. + */ + error = xfs_defer_finish(&sc->tp); + if (error) + return error; + + /* + * Release the hold that we set above because defer_finish won't do + * that for us. The defer roll code redirties held buffers after each + * roll, so the AG header buffers should be ready for logging. + */ + if (sc->sa.agi_bp) + xfs_trans_bhold_release(sc->tp, sc->sa.agi_bp); + if (sc->sa.agf_bp) + xfs_trans_bhold_release(sc->tp, sc->sa.agf_bp); + + return 0; +} + /* * Does the given AG have enough space to rebuild a btree? Neither AG * reservation can be critical, and we must have enough space (factoring diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index e01d63a4a93b..dc89164d10a6 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -20,6 +20,7 @@ static inline int xrep_notsupported(struct xfs_scrub *sc) int xrep_attempt(struct xfs_scrub *sc); void xrep_failure(struct xfs_mount *mp); int xrep_roll_ag_trans(struct xfs_scrub *sc); +int xrep_defer_finish(struct xfs_scrub *sc); bool xrep_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks, enum xfs_ag_resv_type type); xfs_extlen_t xrep_calc_ag_resblks(struct xfs_scrub *sc); diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 71bfab3d2d29..73cf1002bd94 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -753,10 +753,43 @@ DECLARE_EVENT_CLASS(xrep_extent_class, DEFINE_EVENT(xrep_extent_class, name, \ TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len), \ TP_ARGS(pag, agbno, len)) -DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_unmap_extent); -DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_free_extent); +DEFINE_REPAIR_EXTENT_EVENT(xreap_dispose_unmap_extent); +DEFINE_REPAIR_EXTENT_EVENT(xreap_dispose_free_extent); +DEFINE_REPAIR_EXTENT_EVENT(xreap_agextent_binval); DEFINE_REPAIR_EXTENT_EVENT(xrep_agfl_insert); +DECLARE_EVENT_CLASS(xrep_reap_find_class, + TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len, + bool crosslinked), + TP_ARGS(pag, agbno, len, crosslinked), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + __field(bool, crosslinked) + ), + TP_fast_assign( + __entry->dev = pag->pag_mount->m_super->s_dev; + __entry->agno = pag->pag_agno; + __entry->agbno = agbno; + __entry->len = len; + __entry->crosslinked = crosslinked; + ), + TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x crosslinked %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len, + __entry->crosslinked ? 1 : 0) +); +#define DEFINE_REPAIR_REAP_FIND_EVENT(name) \ +DEFINE_EVENT(xrep_reap_find_class, name, \ + TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len, \ + bool crosslinked), \ + TP_ARGS(pag, agbno, len, crosslinked)) +DEFINE_REPAIR_REAP_FIND_EVENT(xreap_agextent_select); + DECLARE_EVENT_CLASS(xrep_rmap_class, TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, xfs_extlen_t len, From patchwork Fri May 26 00:45:19 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13255883 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 96533C77B7E for ; Fri, 26 May 2023 00:45:25 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232561AbjEZApY (ORCPT ); Thu, 25 May 2023 20:45:24 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47164 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S240779AbjEZApX (ORCPT ); Thu, 25 May 2023 20:45:23 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0A151194 for ; Thu, 25 May 2023 17:45:21 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 8919363A6B for ; Fri, 26 May 2023 00:45:20 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id E8E85C433EF; Fri, 26 May 2023 00:45:19 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1685061920; bh=YqwTdA+I0uX9Ena+WkHvJPt+V61eZW72fhcXjCEtLl0=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=hBE6y7ms/jhNbNgFFSy4p5AARaYRC+vaCxu6i0pilRmpud+eGCZYz4gWmSdWCmAm2 fZ47r1IBqGzuYAoCdrvx60qVAdVMjK5z8VBryWP4oQ+M3A2cg8rM3XZq30oDsI1r0r ijshGDsSglRIIwUVbxwtfgh66lKlnh4K9JNhaYBZUxB/pboxM5O14KFeiuPTw0DdSd /+RBa7vx9ERtJAGpXYDexLawJyzrF/SrH9M/v2GORB+lmvEpQNM0suM8vQMndGXx4s nvOwcSNS07cFKoi5QeP/xHAjcU7qIpNSaBpqBX5+pnPE4D/d5LK/XHIldSbgPSSGqd FZmqv1XqiiqaQ== Date: Thu, 25 May 2023 17:45:19 -0700 Subject: [PATCH 9/9] xfs: use per-AG bitmaps to reap unused AG metadata blocks during repair From: "Darrick J. Wong" To: djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <168506055748.3728180.3343593199301204602.stgit@frogsfrogsfrogs> In-Reply-To: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> References: <168506055606.3728180.16225214578338421625.stgit@frogsfrogsfrogs> User-Agent: StGit/0.19 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-xfs@vger.kernel.org From: Darrick J. Wong The AGFL repair code uses a series of bitmaps to figure out where there are OWN_AG blocks that are not claimed by the free space and rmap btrees. These blocks become the new AGFL, and any overflow is reaped. The bitmaps current track xfs_fsblock_t even though we already know the AG number. In the last patch, we introduced a new bitmap "type" for tracking xfs_agblock_t extents. Port the reaping code and the AGFL repair to use this new type, which makes it very obvious what we're tracking. This also eliminates a bunch of unnecessary agblock <-> fsblock conversions. Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/agheader_repair.c | 74 ++++++++++++++++++---------------------- fs/xfs/scrub/bitmap.c | 41 ++-------------------- fs/xfs/scrub/bitmap.h | 6 +-- fs/xfs/scrub/reap.c | 14 ++------ fs/xfs/scrub/reap.h | 5 +-- 5 files changed, 45 insertions(+), 95 deletions(-) diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index 9ad2987ed6e5..9e99486b5f20 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -445,13 +445,13 @@ xrep_agf( struct xrep_agfl { /* Bitmap of alleged AGFL blocks that we're not going to add. */ - struct xbitmap crossed; + struct xagb_bitmap crossed; /* Bitmap of other OWN_AG metadata blocks. */ - struct xbitmap agmetablocks; + struct xagb_bitmap agmetablocks; /* Bitmap of free space. */ - struct xbitmap *freesp; + struct xagb_bitmap *freesp; /* rmapbt cursor for finding crosslinked blocks */ struct xfs_btree_cur *rmap_cur; @@ -467,7 +467,6 @@ xrep_agfl_walk_rmap( void *priv) { struct xrep_agfl *ra = priv; - xfs_fsblock_t fsb; int error = 0; if (xchk_should_terminate(ra->sc, &error)) @@ -475,14 +474,13 @@ xrep_agfl_walk_rmap( /* Record all the OWN_AG blocks. */ if (rec->rm_owner == XFS_RMAP_OWN_AG) { - fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno, - rec->rm_startblock); - error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount); + error = xagb_bitmap_set(ra->freesp, rec->rm_startblock, + rec->rm_blockcount); if (error) return error; } - return xbitmap_set_btcur_path(&ra->agmetablocks, cur); + return xagb_bitmap_set_btcur_path(&ra->agmetablocks, cur); } /* Strike out the blocks that are cross-linked according to the rmapbt. */ @@ -493,12 +491,10 @@ xrep_agfl_check_extent( void *priv) { struct xrep_agfl *ra = priv; - xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(ra->sc->mp, start); + xfs_agblock_t agbno = start; xfs_agblock_t last_agbno = agbno + len - 1; int error; - ASSERT(XFS_FSB_TO_AGNO(ra->sc->mp, start) == ra->sc->sa.pag->pag_agno); - while (agbno <= last_agbno) { bool other_owners; @@ -508,7 +504,7 @@ xrep_agfl_check_extent( return error; if (other_owners) { - error = xbitmap_set(&ra->crossed, agbno, 1); + error = xagb_bitmap_set(&ra->crossed, agbno, 1); if (error) return error; } @@ -534,7 +530,7 @@ STATIC int xrep_agfl_collect_blocks( struct xfs_scrub *sc, struct xfs_buf *agf_bp, - struct xbitmap *agfl_extents, + struct xagb_bitmap *agfl_extents, xfs_agblock_t *flcount) { struct xrep_agfl ra; @@ -544,8 +540,8 @@ xrep_agfl_collect_blocks( ra.sc = sc; ra.freesp = agfl_extents; - xbitmap_init(&ra.agmetablocks); - xbitmap_init(&ra.crossed); + xagb_bitmap_init(&ra.agmetablocks); + xagb_bitmap_init(&ra.crossed); /* Find all space used by the free space btrees & rmapbt. */ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); @@ -557,7 +553,7 @@ xrep_agfl_collect_blocks( /* Find all blocks currently being used by the bnobt. */ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag, XFS_BTNUM_BNO); - error = xbitmap_set_btblocks(&ra.agmetablocks, cur); + error = xagb_bitmap_set_btblocks(&ra.agmetablocks, cur); xfs_btree_del_cursor(cur, error); if (error) goto out_bmp; @@ -565,7 +561,7 @@ xrep_agfl_collect_blocks( /* Find all blocks currently being used by the cntbt. */ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag, XFS_BTNUM_CNT); - error = xbitmap_set_btblocks(&ra.agmetablocks, cur); + error = xagb_bitmap_set_btblocks(&ra.agmetablocks, cur); xfs_btree_del_cursor(cur, error); if (error) goto out_bmp; @@ -574,17 +570,17 @@ xrep_agfl_collect_blocks( * Drop the freesp meta blocks that are in use by btrees. * The remaining blocks /should/ be AGFL blocks. */ - error = xbitmap_disunion(agfl_extents, &ra.agmetablocks); + error = xagb_bitmap_disunion(agfl_extents, &ra.agmetablocks); if (error) goto out_bmp; /* Strike out the blocks that are cross-linked. */ ra.rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); - error = xbitmap_walk(agfl_extents, xrep_agfl_check_extent, &ra); + error = xagb_bitmap_walk(agfl_extents, xrep_agfl_check_extent, &ra); xfs_btree_del_cursor(ra.rmap_cur, error); if (error) goto out_bmp; - error = xbitmap_disunion(agfl_extents, &ra.crossed); + error = xagb_bitmap_disunion(agfl_extents, &ra.crossed); if (error) goto out_bmp; @@ -592,12 +588,12 @@ xrep_agfl_collect_blocks( * Calculate the new AGFL size. If we found more blocks than fit in * the AGFL we'll free them later. */ - *flcount = min_t(uint64_t, xbitmap_hweight(agfl_extents), + *flcount = min_t(uint64_t, xagb_bitmap_hweight(agfl_extents), xfs_agfl_size(mp)); out_bmp: - xbitmap_destroy(&ra.crossed); - xbitmap_destroy(&ra.agmetablocks); + xagb_bitmap_destroy(&ra.crossed); + xagb_bitmap_destroy(&ra.agmetablocks); return error; } @@ -627,7 +623,7 @@ xrep_agfl_update_agf( } struct xrep_agfl_fill { - struct xbitmap used_extents; + struct xagb_bitmap used_extents; struct xfs_scrub *sc; __be32 *agfl_bno; xfs_agblock_t flcount; @@ -643,17 +639,15 @@ xrep_agfl_fill( { struct xrep_agfl_fill *af = priv; struct xfs_scrub *sc = af->sc; - xfs_fsblock_t fsbno = start; + xfs_agblock_t agbno = start; int error; - trace_xrep_agfl_insert(sc->sa.pag, XFS_FSB_TO_AGBNO(sc->mp, start), - len); + trace_xrep_agfl_insert(sc->sa.pag, agbno, len); - while (fsbno < start + len && af->fl_off < af->flcount) - af->agfl_bno[af->fl_off++] = - cpu_to_be32(XFS_FSB_TO_AGBNO(sc->mp, fsbno++)); + while (agbno < start + len && af->fl_off < af->flcount) + af->agfl_bno[af->fl_off++] = cpu_to_be32(agbno++); - error = xbitmap_set(&af->used_extents, start, fsbno - 1); + error = xagb_bitmap_set(&af->used_extents, start, agbno - 1); if (error) return error; @@ -668,7 +662,7 @@ STATIC int xrep_agfl_init_header( struct xfs_scrub *sc, struct xfs_buf *agfl_bp, - struct xbitmap *agfl_extents, + struct xagb_bitmap *agfl_extents, xfs_agblock_t flcount) { struct xrep_agfl_fill af = { @@ -696,17 +690,17 @@ xrep_agfl_init_header( * blocks than fit in the AGFL, they will be freed in a subsequent * step. */ - xbitmap_init(&af.used_extents); + xagb_bitmap_init(&af.used_extents); af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp), - xbitmap_walk(agfl_extents, xrep_agfl_fill, &af); - error = xbitmap_disunion(agfl_extents, &af.used_extents); + xagb_bitmap_walk(agfl_extents, xrep_agfl_fill, &af); + error = xagb_bitmap_disunion(agfl_extents, &af.used_extents); if (error) return error; /* Write new AGFL to disk. */ xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF); xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1); - xbitmap_destroy(&af.used_extents); + xagb_bitmap_destroy(&af.used_extents); return 0; } @@ -715,7 +709,7 @@ int xrep_agfl( struct xfs_scrub *sc) { - struct xbitmap agfl_extents; + struct xagb_bitmap agfl_extents; struct xfs_mount *mp = sc->mp; struct xfs_buf *agf_bp; struct xfs_buf *agfl_bp; @@ -726,7 +720,7 @@ xrep_agfl( if (!xfs_has_rmapbt(mp)) return -EOPNOTSUPP; - xbitmap_init(&agfl_extents); + xagb_bitmap_init(&agfl_extents); /* * Read the AGF so that we can query the rmapbt. We hope that there's @@ -775,10 +769,10 @@ xrep_agfl( goto err; /* Dump any AGFL overflow. */ - error = xrep_reap_ag_metadata(sc, &agfl_extents, &XFS_RMAP_OINFO_AG, + error = xrep_reap_agblocks(sc, &agfl_extents, &XFS_RMAP_OINFO_AG, XFS_AG_RESV_AGFL); err: - xbitmap_destroy(&agfl_extents); + xagb_bitmap_destroy(&agfl_extents); return error; } diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c index d926d708f295..e0c89a9a0ca0 100644 --- a/fs/xfs/scrub/bitmap.c +++ b/fs/xfs/scrub/bitmap.c @@ -301,21 +301,15 @@ xagb_bitmap_set_btblocks( * blocks going from the leaf towards the root. */ int -xbitmap_set_btcur_path( - struct xbitmap *bitmap, +xagb_bitmap_set_btcur_path( + struct xagb_bitmap *bitmap, struct xfs_btree_cur *cur) { - struct xfs_buf *bp; - xfs_fsblock_t fsb; int i; int error; for (i = 0; i < cur->bc_nlevels && cur->bc_levels[i].ptr == 1; i++) { - xfs_btree_get_block(cur, i, &bp); - if (!bp) - continue; - fsb = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); - error = xbitmap_set(bitmap, fsb, 1); + error = xagb_bitmap_visit_btblock(cur, i, bitmap); if (error) return error; } @@ -323,35 +317,6 @@ xbitmap_set_btcur_path( return 0; } -/* Collect a btree's block in the bitmap. */ -STATIC int -xbitmap_collect_btblock( - struct xfs_btree_cur *cur, - int level, - void *priv) -{ - struct xbitmap *bitmap = priv; - struct xfs_buf *bp; - xfs_fsblock_t fsbno; - - xfs_btree_get_block(cur, level, &bp); - if (!bp) - return 0; - - fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); - return xbitmap_set(bitmap, fsbno, 1); -} - -/* Walk the btree and mark the bitmap wherever a btree block is found. */ -int -xbitmap_set_btblocks( - struct xbitmap *bitmap, - struct xfs_btree_cur *cur) -{ - return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock, - XFS_BTREE_VISIT_ALL, bitmap); -} - /* How many bits are set in this bitmap? */ uint64_t xbitmap_hweight( diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h index a3ad564d94b7..4fe58bad6734 100644 --- a/fs/xfs/scrub/bitmap.h +++ b/fs/xfs/scrub/bitmap.h @@ -16,10 +16,6 @@ void xbitmap_destroy(struct xbitmap *bitmap); int xbitmap_clear(struct xbitmap *bitmap, uint64_t start, uint64_t len); int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len); int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub); -int xbitmap_set_btcur_path(struct xbitmap *bitmap, - struct xfs_btree_cur *cur); -int xbitmap_set_btblocks(struct xbitmap *bitmap, - struct xfs_btree_cur *cur); uint64_t xbitmap_hweight(struct xbitmap *bitmap); /* @@ -106,5 +102,7 @@ static inline int xagb_bitmap_walk(struct xagb_bitmap *bitmap, int xagb_bitmap_set_btblocks(struct xagb_bitmap *bitmap, struct xfs_btree_cur *cur); +int xagb_bitmap_set_btcur_path(struct xagb_bitmap *bitmap, + struct xfs_btree_cur *cur); #endif /* __XFS_SCRUB_BITMAP_H__ */ diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index 4f57b428ffe2..6d1d731c4019 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -422,19 +422,13 @@ xreap_agmeta_extent( { struct xreap_state *rs = priv; struct xfs_scrub *sc = rs->sc; - xfs_agnumber_t agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); - xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); + xfs_agblock_t agbno = fsbno; xfs_agblock_t agbno_next = agbno + len; int error = 0; ASSERT(len <= XFS_MAX_BMBT_EXTLEN); ASSERT(sc->ip == NULL); - if (agno != sc->sa.pag->pag_agno) { - ASSERT(sc->sa.pag->pag_agno == agno); - return -EFSCORRUPTED; - } - while (agbno < agbno_next) { xfs_extlen_t aglen; bool crosslinked; @@ -468,9 +462,9 @@ xreap_agmeta_extent( /* Dispose of every block of every AG metadata extent in the bitmap. */ int -xrep_reap_ag_metadata( +xrep_reap_agblocks( struct xfs_scrub *sc, - struct xbitmap *bitmap, + struct xagb_bitmap *bitmap, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type) { @@ -484,7 +478,7 @@ xrep_reap_ag_metadata( ASSERT(xfs_has_rmapbt(sc->mp)); ASSERT(sc->ip == NULL); - error = xbitmap_walk(bitmap, xreap_agmeta_extent, &rs); + error = xagb_bitmap_walk(bitmap, xreap_agmeta_extent, &rs); if (error) return error; diff --git a/fs/xfs/scrub/reap.h b/fs/xfs/scrub/reap.h index 7f234abfa78d..fe24626af164 100644 --- a/fs/xfs/scrub/reap.h +++ b/fs/xfs/scrub/reap.h @@ -6,8 +6,7 @@ #ifndef __XFS_SCRUB_REAP_H__ #define __XFS_SCRUB_REAP_H__ -int xrep_reap_ag_metadata(struct xfs_scrub *sc, struct xbitmap *bitmap, - const struct xfs_owner_info *oinfo, - enum xfs_ag_resv_type type); +int xrep_reap_agblocks(struct xfs_scrub *sc, struct xagb_bitmap *bitmap, + const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type); #endif /* __XFS_SCRUB_REAP_H__ */