From patchwork Tue Nov 5 22:12:43 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863586 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 621E41FBCA3 for ; Tue, 5 Nov 2024 22:12:44 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844764; cv=none; b=KS/FDD2lWdE7psGPhjqcK/t1W5E833Xu6SmljkGQMlyXkQrLGzMtCpiBPV31QACzPZf2bUOfCcCKb3KOaYkXyVU0lGGZGP1XF1VYPNlwuahvkdFnDrhs9xQz/z4ZjMTCqTvTgc27BFG1nWS9cY5gTHIhn+Lomt5SeSbc+8Z5AIo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844764; c=relaxed/simple; bh=TCipWp8oevBqM3OjhqXhsRLCe93xcev0+jNV4uMKeMs=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=CeGVbKKPkrSWBpx9P5P65UGG1zR5b9D/wGrIdYNh7nyBVDEJwNlIF8CUaEyWfVAG8WMc2FL67ulfS8053x/DnfzHfpPaChY6++dmnTwesrTEmiFhWFLV9s663NJ2Ec+pHqSIR7VTWsCEko+bGUR7c2NsvZ68XFsmR8iogRkd0Lc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=sLqUA+LX; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="sLqUA+LX" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3BE4CC4CECF; Tue, 5 Nov 2024 22:12:44 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844764; bh=TCipWp8oevBqM3OjhqXhsRLCe93xcev0+jNV4uMKeMs=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=sLqUA+LXtJ8vIInm8BKP/cD4D4URLI4citHVI5nlwdQQzVjLeDFZlw1DO90JHLXc1 W53ZkcISmk2Nw5zyWejdhuVuzn0jUpXkHxdE6DButn3SQZ0ma5gadqYg23yCyjquop 9ZVBz2HTQzMy3ZaLcV/7xqhl59LsOiD4l5zbtLI9HR33PVofScQI8A6E4V5EeNABc9 IGWBIcCChl4+rOdGeS+grnBcJeh0jo+XbELzmkvAd7Bj+G0IEUN+LElzViE7R+Ov6K ywEXWagdl35JLpHKfOH3T0EmYnrBo1nnXlWoNMNOAuDvUWJj2No/VOdvyNm3vzMlfw l5mUOHgEjH0AQ== Date: Tue, 05 Nov 2024 14:12:43 -0800 Subject: [PATCH 01/16] xfs: factor out a xfs_iwalk_args helper From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395281.1869491.10407906599387519773.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Add a helper to share more code between xfs_iwalk and xfs_inobt_walk, and at the same time do away with the extra flags indirect so that everyone use the same names for the same flags when using the common iwalk code. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_iwalk.c | 83 ++++++++++++++++++++-------------------------------- fs/xfs/xfs_iwalk.h | 7 +--- 2 files changed, 33 insertions(+), 57 deletions(-) diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c index d4ef7485e8f740..a89ae2aef7c445 100644 --- a/fs/xfs/xfs_iwalk.c +++ b/fs/xfs/xfs_iwalk.c @@ -534,6 +534,35 @@ xfs_iwalk_prefetch( return max(inobt_records, 2U); } +static int +xfs_iwalk_args( + struct xfs_iwalk_ag *iwag, + unsigned int flags) +{ + struct xfs_mount *mp = iwag->mp; + xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, iwag->startino); + int error; + + ASSERT(agno < mp->m_sb.sb_agcount); + ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL)); + + error = xfs_iwalk_alloc(iwag); + if (error) + return error; + + for_each_perag_from(mp, agno, iwag->pag) { + error = xfs_iwalk_ag(iwag); + if (error || (flags & XFS_IWALK_SAME_AG)) { + xfs_perag_rele(iwag->pag); + break; + } + iwag->startino = XFS_AGINO_TO_INO(mp, agno + 1, 0); + } + + xfs_iwalk_free(iwag); + return error; +} + /* * Walk all inodes in the filesystem starting from @startino. The @iwalk_fn * will be called for each allocated inode, being passed the inode's number and @@ -562,32 +591,8 @@ xfs_iwalk( .pwork = XFS_PWORK_SINGLE_THREADED, .lastino = NULLFSINO, }; - struct xfs_perag *pag; - xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino); - int error; - ASSERT(agno < mp->m_sb.sb_agcount); - ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL)); - - error = xfs_iwalk_alloc(&iwag); - if (error) - return error; - - for_each_perag_from(mp, agno, pag) { - iwag.pag = pag; - error = xfs_iwalk_ag(&iwag); - if (error) - break; - iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0); - if (flags & XFS_INOBT_WALK_SAME_AG) - break; - iwag.pag = NULL; - } - - if (iwag.pag) - xfs_perag_rele(pag); - xfs_iwalk_free(&iwag); - return error; + return xfs_iwalk_args(&iwag, flags); } /* Run per-thread iwalk work. */ @@ -673,7 +678,7 @@ xfs_iwalk_threaded( iwag->lastino = NULLFSINO; xfs_pwork_queue(&pctl, &iwag->pwork); startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0); - if (flags & XFS_INOBT_WALK_SAME_AG) + if (flags & XFS_IWALK_SAME_AG) break; } if (pag) @@ -747,30 +752,6 @@ xfs_inobt_walk( .pwork = XFS_PWORK_SINGLE_THREADED, .lastino = NULLFSINO, }; - struct xfs_perag *pag; - xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino); - int error; - ASSERT(agno < mp->m_sb.sb_agcount); - ASSERT(!(flags & ~XFS_INOBT_WALK_FLAGS_ALL)); - - error = xfs_iwalk_alloc(&iwag); - if (error) - return error; - - for_each_perag_from(mp, agno, pag) { - iwag.pag = pag; - error = xfs_iwalk_ag(&iwag); - if (error) - break; - iwag.startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0); - if (flags & XFS_INOBT_WALK_SAME_AG) - break; - iwag.pag = NULL; - } - - if (iwag.pag) - xfs_perag_rele(pag); - xfs_iwalk_free(&iwag); - return error; + return xfs_iwalk_args(&iwag, flags); } diff --git a/fs/xfs/xfs_iwalk.h b/fs/xfs/xfs_iwalk.h index 83699089755ebb..17a5a2c6debb15 100644 --- a/fs/xfs/xfs_iwalk.h +++ b/fs/xfs/xfs_iwalk.h @@ -25,7 +25,7 @@ int xfs_iwalk_threaded(struct xfs_mount *mp, xfs_ino_t startino, unsigned int flags, xfs_iwalk_fn iwalk_fn, unsigned int inode_records, bool poll, void *data); -/* Only iterate inodes within the same AG as @startino. */ +/* Only iterate within the same AG as @startino. */ #define XFS_IWALK_SAME_AG (1U << 0) #define XFS_IWALK_FLAGS_ALL (XFS_IWALK_SAME_AG) @@ -41,9 +41,4 @@ int xfs_inobt_walk(struct xfs_mount *mp, struct xfs_trans *tp, xfs_inobt_walk_fn inobt_walk_fn, unsigned int inobt_records, void *data); -/* Only iterate inobt records within the same AG as @startino. */ -#define XFS_INOBT_WALK_SAME_AG (XFS_IWALK_SAME_AG) - -#define XFS_INOBT_WALK_FLAGS_ALL (XFS_INOBT_WALK_SAME_AG) - #endif /* __XFS_IWALK_H__ */ From patchwork Tue Nov 5 22:12:59 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863587 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4B8421F667B for ; Tue, 5 Nov 2024 22:13:00 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844780; cv=none; b=CxbltC8R+HIyMbwh/vNJYP5zn2eVZGSkuWwojFoB+AAJHaFxQoTLnwJVEkenrv2XxTsHQnA5uai3zDvSqcGZZFPWGml09xr56QXjyxIn0sRIFd6L4LoLZh/4C4cMaXEnWVgKfmrqjKU9LgTx8fIPlyUhAwgn0+UnH/IAZp+zWmI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844780; c=relaxed/simple; bh=qLwtwVODYkU/ZkIM5dlhFiSmVsW7ckfDdxjdJx5mK0I=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=DH1UEef6GnI3dIccVn6FWTlol4Lv6VAQ7Snog2jiC7X96j0XBX7hL7JddGZexAqigCuzsWkH/iOPUqA3fxzgFvP/afIgq9cSIyYK3N4IY+7DCpR1jQQ2AeSm/xjN4zGFUeHhVvpKT37i6D/XVcWfCDRMSO9Zqt75WYyRDenE3pw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=GktiyvrU; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="GktiyvrU" Received: by smtp.kernel.org (Postfix) with ESMTPSA id CAFFEC4CED1; Tue, 5 Nov 2024 22:12:59 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844779; bh=qLwtwVODYkU/ZkIM5dlhFiSmVsW7ckfDdxjdJx5mK0I=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=GktiyvrUdiVGubPivnv2SuumBZzeG41DUfR0B1OibDh0mFkAe7/8zBEwYJLkAHNJK tXmH685DHfY5/Y3V/94pO3F8cEapS0bEIswzihuZsY+zv7NBvQnUrTRgo4aUMZ0v4K 4HvzvAqHJivuU6Ecwjz1I0J1zjYkj2Wzlba3bGza4pJ/XvJ6bmJB7MA5LUDSN0+EW7 drnPXhLZLqLFCb4uW5iqjZHQf4cOGMjXBp433QZ5iOd8ZQyeY0CKOwGljGcy7LF6DM hps9ucgCHQDos4rNzw0Gq/oqPpWKyuj5iOsfD45R3Z1O9sEfg/TTaTz93EbnSgdW/G 6kIN47emPVxwQ== Date: Tue, 05 Nov 2024 14:12:59 -0800 Subject: [PATCH 02/16] xfs: factor out a generic xfs_group structure From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395298.1869491.6713005639255694654.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Split the lookup and refcount handling of struct xfs_perag into an embedded xfs_group structure that can be reused for the upcoming realtime groups. It will be extended with more features later. Note that he xg_type field will only need a single bit even with realtime group support. For now it fills a hole, but it might be worth to fold it into another field if we can use this space better. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/Makefile | 1 fs/xfs/libxfs/xfs_ag.c | 139 ++++++------------------------ fs/xfs/libxfs/xfs_ag.h | 81 ++++++++++++++--- fs/xfs/libxfs/xfs_ag_resv.c | 19 ++-- fs/xfs/libxfs/xfs_alloc.c | 39 ++++---- fs/xfs/libxfs/xfs_alloc_btree.c | 2 fs/xfs/libxfs/xfs_bmap.c | 2 fs/xfs/libxfs/xfs_btree.c | 6 + fs/xfs/libxfs/xfs_group.c | 169 ++++++++++++++++++++++++++++++++++++ fs/xfs/libxfs/xfs_group.h | 41 +++++++++ fs/xfs/libxfs/xfs_ialloc.c | 40 ++++----- fs/xfs/libxfs/xfs_ialloc_btree.c | 16 ++- fs/xfs/libxfs/xfs_refcount.c | 4 - fs/xfs/libxfs/xfs_refcount_btree.c | 8 +- fs/xfs/libxfs/xfs_rmap.c | 4 - fs/xfs/libxfs/xfs_rmap_btree.c | 7 + fs/xfs/libxfs/xfs_sb.c | 6 + fs/xfs/libxfs/xfs_types.h | 8 ++ fs/xfs/scrub/agheader_repair.c | 22 ++--- fs/xfs/scrub/alloc_repair.c | 2 fs/xfs/scrub/common.h | 3 - fs/xfs/scrub/ialloc_repair.c | 2 fs/xfs/scrub/iscan.c | 4 - fs/xfs/scrub/newbt.c | 8 +- fs/xfs/scrub/repair.c | 4 - fs/xfs/scrub/rmap.c | 2 fs/xfs/scrub/rmap_repair.c | 10 +- fs/xfs/scrub/trace.h | 82 +++++++++-------- fs/xfs/xfs_discard.c | 4 - fs/xfs/xfs_extent_busy.c | 4 - fs/xfs/xfs_extfree_item.c | 2 fs/xfs/xfs_filestream.c | 8 +- fs/xfs/xfs_fsmap.c | 12 +-- fs/xfs/xfs_icache.c | 55 ++++-------- fs/xfs/xfs_inode.c | 6 + fs/xfs/xfs_iwalk.c | 6 + fs/xfs/xfs_log_recover.c | 6 + fs/xfs/xfs_mount.h | 6 + fs/xfs/xfs_refcount_item.c | 2 fs/xfs/xfs_reflink.c | 2 fs/xfs/xfs_rmap_item.c | 2 fs/xfs/xfs_super.c | 11 +- fs/xfs/xfs_trace.c | 1 fs/xfs/xfs_trace.h | 149 ++++++++++++++++++++------------ 44 files changed, 608 insertions(+), 399 deletions(-) create mode 100644 fs/xfs/libxfs/xfs_group.c create mode 100644 fs/xfs/libxfs/xfs_group.h diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index dd692619bed580..94cb8ca9f9da77 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -14,6 +14,7 @@ xfs-y += xfs_trace.o # build the libxfs code first xfs-y += $(addprefix libxfs/, \ + xfs_group.o \ xfs_ag.o \ xfs_alloc.o \ xfs_alloc_btree.o \ diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index d51e88a4e7e283..9ea20e9cf0d4e5 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -30,85 +30,7 @@ #include "xfs_trace.h" #include "xfs_inode.h" #include "xfs_icache.h" - - -/* - * Passive reference counting access wrappers to the perag structures. If the - * per-ag structure is to be freed, the freeing code is responsible for cleaning - * up objects with passive references before freeing the structure. This is - * things like cached buffers. - */ -struct xfs_perag * -xfs_perag_get( - struct xfs_mount *mp, - xfs_agnumber_t agno) -{ - struct xfs_perag *pag; - - rcu_read_lock(); - pag = xa_load(&mp->m_perags, agno); - if (pag) { - trace_xfs_perag_get(pag, _RET_IP_); - ASSERT(atomic_read(&pag->pag_ref) >= 0); - atomic_inc(&pag->pag_ref); - } - rcu_read_unlock(); - return pag; -} - -/* Get a passive reference to the given perag. */ -struct xfs_perag * -xfs_perag_hold( - struct xfs_perag *pag) -{ - ASSERT(atomic_read(&pag->pag_ref) > 0 || - atomic_read(&pag->pag_active_ref) > 0); - - trace_xfs_perag_hold(pag, _RET_IP_); - atomic_inc(&pag->pag_ref); - return pag; -} - -void -xfs_perag_put( - struct xfs_perag *pag) -{ - trace_xfs_perag_put(pag, _RET_IP_); - ASSERT(atomic_read(&pag->pag_ref) > 0); - atomic_dec(&pag->pag_ref); -} - -/* - * Active references for perag structures. This is for short term access to the - * per ag structures for walking trees or accessing state. If an AG is being - * shrunk or is offline, then this will fail to find that AG and return NULL - * instead. - */ -struct xfs_perag * -xfs_perag_grab( - struct xfs_mount *mp, - xfs_agnumber_t agno) -{ - struct xfs_perag *pag; - - rcu_read_lock(); - pag = xa_load(&mp->m_perags, agno); - if (pag) { - trace_xfs_perag_grab(pag, _RET_IP_); - if (!atomic_inc_not_zero(&pag->pag_active_ref)) - pag = NULL; - } - rcu_read_unlock(); - return pag; -} - -void -xfs_perag_rele( - struct xfs_perag *pag) -{ - trace_xfs_perag_rele(pag, _RET_IP_); - atomic_dec(&pag->pag_active_ref); -} +#include "xfs_group.h" /* * xfs_initialize_perag_data @@ -183,6 +105,19 @@ xfs_initialize_perag_data( return error; } +static void +xfs_perag_uninit( + struct xfs_group *xg) +{ +#ifdef __KERNEL__ + struct xfs_perag *pag = to_perag(xg); + + xfs_defer_drain_free(&pag->pag_intents_drain); + cancel_delayed_work_sync(&pag->pag_blockgc_work); + xfs_buf_cache_destroy(&pag->pag_bcache); +#endif +} + /* * Free up the per-ag resources within the specified AG range. */ @@ -195,22 +130,8 @@ xfs_free_perag_range( { xfs_agnumber_t agno; - for (agno = first_agno; agno < end_agno; agno++) { - struct xfs_perag *pag = xa_erase(&mp->m_perags, agno); - - ASSERT(pag); - XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0); - xfs_defer_drain_free(&pag->pag_intents_drain); - - cancel_delayed_work_sync(&pag->pag_blockgc_work); - xfs_buf_cache_destroy(&pag->pag_bcache); - - /* drop the mount's active reference */ - xfs_perag_rele(pag); - XFS_IS_CORRUPT(pag->pag_mount, - atomic_read(&pag->pag_active_ref) != 0); - kfree_rcu_mightsleep(pag); - } + for (agno = first_agno; agno < end_agno; agno++) + xfs_group_free(mp, agno, XG_TYPE_AG, xfs_perag_uninit); } /* Find the size of the AG, in blocks. */ @@ -332,16 +253,9 @@ xfs_perag_alloc( __xfs_agino_range(mp, pag->block_count, &pag->agino_min, &pag->agino_max); - pag->pag_agno = index; - pag->pag_mount = mp; - /* Active ref owned by mount indicates AG is online. */ - atomic_set(&pag->pag_active_ref, 1); - - error = xa_insert(&mp->m_perags, index, pag, GFP_KERNEL); - if (error) { - WARN_ON_ONCE(error == -EBUSY); + error = xfs_group_insert(mp, pag_group(pag), index, XG_TYPE_AG); + if (error) goto out_buf_cache_destroy; - } return 0; @@ -833,7 +747,7 @@ xfs_ag_shrink_space( struct xfs_trans **tpp, xfs_extlen_t delta) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_alloc_arg args = { .tp = *tpp, .mp = mp, @@ -850,7 +764,7 @@ xfs_ag_shrink_space( xfs_agblock_t aglen; int error, err2; - ASSERT(pag->pag_agno == mp->m_sb.sb_agcount - 1); + ASSERT(pag_agno(pag) == mp->m_sb.sb_agcount - 1); error = xfs_ialloc_read_agi(pag, *tpp, 0, &agibp); if (error) return error; @@ -947,8 +861,8 @@ xfs_ag_shrink_space( /* Update perag geometry */ pag->block_count -= delta; - __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min, - &pag->agino_max); + __xfs_agino_range(mp, pag->block_count, &pag->agino_min, + &pag->agino_max); xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH); xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH); @@ -973,12 +887,13 @@ xfs_ag_extend_space( struct xfs_trans *tp, xfs_extlen_t len) { + struct xfs_mount *mp = pag_mount(pag); struct xfs_buf *bp; struct xfs_agi *agi; struct xfs_agf *agf; int error; - ASSERT(pag->pag_agno == pag->pag_mount->m_sb.sb_agcount - 1); + ASSERT(pag_agno(pag) == mp->m_sb.sb_agcount - 1); error = xfs_ialloc_read_agi(pag, tp, 0, &bp); if (error) @@ -1018,8 +933,8 @@ xfs_ag_extend_space( /* Update perag geometry */ pag->block_count = be32_to_cpu(agf->agf_length); - __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min, - &pag->agino_max); + __xfs_agino_range(mp, pag->block_count, &pag->agino_min, + &pag->agino_max); return 0; } @@ -1046,7 +961,7 @@ xfs_ag_get_geometry( /* Fill out form. */ memset(ageo, 0, sizeof(*ageo)); - ageo->ag_number = pag->pag_agno; + ageo->ag_number = pag_agno(pag); agi = agi_bp->b_addr; ageo->ag_icount = be32_to_cpu(agi->agi_count); diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h index 8787823ae37f9f..69b934ad2c4aad 100644 --- a/fs/xfs/libxfs/xfs_ag.h +++ b/fs/xfs/libxfs/xfs_ag.h @@ -7,6 +7,8 @@ #ifndef __LIBXFS_AG_H #define __LIBXFS_AG_H 1 +#include "xfs_group.h" + struct xfs_mount; struct xfs_trans; struct xfs_perag; @@ -30,10 +32,7 @@ struct xfs_ag_resv { * performance of allocation group selection. */ struct xfs_perag { - struct xfs_mount *pag_mount; /* owner filesystem */ - xfs_agnumber_t pag_agno; /* AG this structure belongs to */ - atomic_t pag_ref; /* passive reference count */ - atomic_t pag_active_ref; /* active reference count */ + struct xfs_group pag_group; unsigned long pag_opstate; uint8_t pagf_bno_level; /* # of levels in bno btree */ uint8_t pagf_cnt_level; /* # of levels in cnt btree */ @@ -121,6 +120,26 @@ struct xfs_perag { #endif /* __KERNEL__ */ }; +static inline struct xfs_perag *to_perag(struct xfs_group *xg) +{ + return container_of(xg, struct xfs_perag, pag_group); +} + +static inline struct xfs_group *pag_group(struct xfs_perag *pag) +{ + return &pag->pag_group; +} + +static inline struct xfs_mount *pag_mount(const struct xfs_perag *pag) +{ + return pag->pag_group.xg_mount; +} + +static inline xfs_agnumber_t pag_agno(const struct xfs_perag *pag) +{ + return pag->pag_group.xg_gno; +} + /* * Per-AG operational state. These are atomic flag bits. */ @@ -151,13 +170,43 @@ int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno); int xfs_update_last_ag_size(struct xfs_mount *mp, xfs_agnumber_t prev_agcount); /* Passive AG references */ -struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno); -struct xfs_perag *xfs_perag_hold(struct xfs_perag *pag); -void xfs_perag_put(struct xfs_perag *pag); +static inline struct xfs_perag * +xfs_perag_get( + struct xfs_mount *mp, + xfs_agnumber_t agno) +{ + return to_perag(xfs_group_get(mp, agno, XG_TYPE_AG)); +} + +static inline struct xfs_perag * +xfs_perag_hold( + struct xfs_perag *pag) +{ + return to_perag(xfs_group_hold(pag_group(pag))); +} + +static inline void +xfs_perag_put( + struct xfs_perag *pag) +{ + xfs_group_put(pag_group(pag)); +} /* Active AG references */ -struct xfs_perag *xfs_perag_grab(struct xfs_mount *, xfs_agnumber_t); -void xfs_perag_rele(struct xfs_perag *pag); +static inline struct xfs_perag * +xfs_perag_grab( + struct xfs_mount *mp, + xfs_agnumber_t agno) +{ + return to_perag(xfs_group_grab(mp, agno, XG_TYPE_AG)); +} + +static inline void +xfs_perag_rele( + struct xfs_perag *pag) +{ + xfs_group_rele(pag_group(pag)); +} /* * Per-ag geometry infomation and validation @@ -233,9 +282,9 @@ xfs_perag_next( xfs_agnumber_t *agno, xfs_agnumber_t end_agno) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); - *agno = pag->pag_agno + 1; + *agno = pag_agno(pag) + 1; xfs_perag_rele(pag); while (*agno <= end_agno) { pag = xfs_perag_grab(mp, *agno); @@ -266,9 +315,9 @@ xfs_perag_next_wrap( xfs_agnumber_t restart_agno, xfs_agnumber_t wrap_agno) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); - *agno = pag->pag_agno + 1; + *agno = pag_agno(pag) + 1; xfs_perag_rele(pag); while (*agno != stop_agno) { if (*agno >= wrap_agno) { @@ -335,7 +384,7 @@ xfs_agbno_to_fsb( struct xfs_perag *pag, xfs_agblock_t agbno) { - return XFS_AGB_TO_FSB(pag->pag_mount, pag->pag_agno, agbno); + return XFS_AGB_TO_FSB(pag_mount(pag), pag_agno(pag), agbno); } static inline xfs_daddr_t @@ -343,7 +392,7 @@ xfs_agbno_to_daddr( struct xfs_perag *pag, xfs_agblock_t agbno) { - return XFS_AGB_TO_DADDR(pag->pag_mount, pag->pag_agno, agbno); + return XFS_AGB_TO_DADDR(pag_mount(pag), pag_agno(pag), agbno); } static inline xfs_ino_t @@ -351,7 +400,7 @@ xfs_agino_to_ino( struct xfs_perag *pag, xfs_agino_t agino) { - return XFS_AGINO_TO_INO(pag->pag_mount, pag->pag_agno, agino); + return XFS_AGINO_TO_INO(pag_mount(pag), pag_agno(pag), agino); } #endif /* __LIBXFS_AG_H */ diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index 4b1bd7cc7ba28c..f5d853089019f0 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c @@ -70,6 +70,7 @@ xfs_ag_resv_critical( struct xfs_perag *pag, enum xfs_ag_resv_type type) { + struct xfs_mount *mp = pag_mount(pag); xfs_extlen_t avail; xfs_extlen_t orig; @@ -92,8 +93,8 @@ xfs_ag_resv_critical( /* Critically low if less than 10% or max btree height remains. */ return XFS_TEST_ERROR(avail < orig / 10 || - avail < pag->pag_mount->m_agbtree_maxlevels, - pag->pag_mount, XFS_ERRTAG_AG_RESV_CRITICAL); + avail < mp->m_agbtree_maxlevels, + mp, XFS_ERRTAG_AG_RESV_CRITICAL); } /* @@ -137,8 +138,8 @@ __xfs_ag_resv_free( trace_xfs_ag_resv_free(pag, type, 0); resv = xfs_perag_resv(pag, type); - if (pag->pag_agno == 0) - pag->pag_mount->m_ag_max_usable += resv->ar_asked; + if (pag_agno(pag) == 0) + pag_mount(pag)->m_ag_max_usable += resv->ar_asked; /* * RMAPBT blocks come from the AGFL and AGFL blocks are always * considered "free", so whatever was reserved at mount time must be @@ -148,7 +149,7 @@ __xfs_ag_resv_free( oldresv = resv->ar_orig_reserved; else oldresv = resv->ar_reserved; - xfs_add_fdblocks(pag->pag_mount, oldresv); + xfs_add_fdblocks(pag_mount(pag), oldresv); resv->ar_reserved = 0; resv->ar_asked = 0; resv->ar_orig_reserved = 0; @@ -170,7 +171,7 @@ __xfs_ag_resv_init( xfs_extlen_t ask, xfs_extlen_t used) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_ag_resv *resv; int error; xfs_extlen_t hidden_space; @@ -209,7 +210,7 @@ __xfs_ag_resv_init( trace_xfs_ag_resv_init_error(pag, error, _RET_IP_); xfs_warn(mp, "Per-AG reservation for AG %u failed. Filesystem may run out of space.", - pag->pag_agno); + pag_agno(pag)); return error; } @@ -219,7 +220,7 @@ __xfs_ag_resv_init( * counter, we only make the adjustment for AG 0. This assumes that * there aren't any AGs hungrier for per-AG reservation than AG 0. */ - if (pag->pag_agno == 0) + if (pag_agno(pag) == 0) mp->m_ag_max_usable -= ask; resv = xfs_perag_resv(pag, type); @@ -237,7 +238,7 @@ xfs_ag_resv_init( struct xfs_perag *pag, struct xfs_trans *tp) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); xfs_extlen_t ask; xfs_extlen_t used; int error = 0, error2; diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index cd5c44a75cd138..bfe7b4321c47ae 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -275,7 +275,7 @@ xfs_alloc_complain_bad_rec( xfs_warn(mp, "%sbt record corruption in AG %d detected at %pS!", - cur->bc_ops->name, cur->bc_ag.pag->pag_agno, fa); + cur->bc_ops->name, pag_agno(cur->bc_ag.pag), fa); xfs_warn(mp, "start block 0x%x block count 0x%x", irec->ar_startblock, irec->ar_blockcount); @@ -799,7 +799,7 @@ xfs_agfl_verify( * use it by using uncached buffers that don't have the perag attached * so we can detect and avoid this problem. */ - if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno) + if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != pag_agno((bp->b_pag))) return __this_address; for (i = 0; i < xfs_agfl_size(mp); i++) { @@ -879,13 +879,12 @@ xfs_alloc_read_agfl( struct xfs_trans *tp, struct xfs_buf **bpp) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_buf *bp; int error; - error = xfs_trans_read_buf( - mp, tp, mp->m_ddev_targp, - XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGFL_DADDR(mp)), + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, pag_agno(pag), XFS_AGFL_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops); if (xfs_metadata_is_sick(error)) xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL); @@ -2428,7 +2427,7 @@ xfs_alloc_longest_free_extent( * reservations and AGFL rules in place, we can return this extent. */ if (pag->pagf_longest > delta) - return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable, + return min_t(xfs_extlen_t, pag_mount(pag)->m_ag_max_usable, pag->pagf_longest - delta); /* Otherwise, let the caller try for 1 block if there's space. */ @@ -2611,7 +2610,7 @@ xfs_agfl_reset( xfs_warn(mp, "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. " "Please unmount and run xfs_repair.", - pag->pag_agno, pag->pagf_flcount); + pag_agno(pag), pag->pagf_flcount); agf->agf_flfirst = 0; agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1); @@ -3188,7 +3187,7 @@ xfs_validate_ag_length( * use it by using uncached buffers that don't have the perag attached * so we can detect and avoid this problem. */ - if (bp->b_pag && seqno != bp->b_pag->pag_agno) + if (bp->b_pag && seqno != pag_agno(bp->b_pag)) return __this_address; /* @@ -3357,13 +3356,13 @@ xfs_read_agf( int flags, struct xfs_buf **agfbpp) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); int error; trace_xfs_read_agf(pag); error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, - XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGF_DADDR(mp)), + XFS_AG_DADDR(mp, pag_agno(pag), XFS_AGF_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops); if (xfs_metadata_is_sick(error)) xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF); @@ -3386,6 +3385,7 @@ xfs_alloc_read_agf( int flags, struct xfs_buf **agfbpp) { + struct xfs_mount *mp = pag_mount(pag); struct xfs_buf *agfbp; struct xfs_agf *agf; int error; @@ -3412,7 +3412,7 @@ xfs_alloc_read_agf( pag->pagf_cnt_level = be32_to_cpu(agf->agf_cnt_level); pag->pagf_rmap_level = be32_to_cpu(agf->agf_rmap_level); pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level); - if (xfs_agfl_needs_reset(pag->pag_mount, agf)) + if (xfs_agfl_needs_reset(mp, agf)) set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate); else clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate); @@ -3425,16 +3425,15 @@ xfs_alloc_read_agf( * counter only tracks non-root blocks. */ allocbt_blks = pag->pagf_btreeblks; - if (xfs_has_rmapbt(pag->pag_mount)) + if (xfs_has_rmapbt(mp)) allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1; if (allocbt_blks > 0) - atomic64_add(allocbt_blks, - &pag->pag_mount->m_allocbt_blks); + atomic64_add(allocbt_blks, &mp->m_allocbt_blks); set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate); } #ifdef DEBUG - else if (!xfs_is_shutdown(pag->pag_mount)) { + else if (!xfs_is_shutdown(mp)) { ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks)); ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks)); ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount)); @@ -3652,7 +3651,7 @@ xfs_alloc_vextent_this_ag( int error; ASSERT(args->pag != NULL); - ASSERT(args->pag->pag_agno == agno); + ASSERT(pag_agno(args->pag) == agno); args->agno = agno; args->agbno = 0; @@ -3865,7 +3864,7 @@ xfs_alloc_vextent_exact_bno( int error; ASSERT(args->pag != NULL); - ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target)); + ASSERT(pag_agno(args->pag) == XFS_FSB_TO_AGNO(mp, target)); args->agno = XFS_FSB_TO_AGNO(mp, target); args->agbno = XFS_FSB_TO_AGBNO(mp, target); @@ -3904,7 +3903,7 @@ xfs_alloc_vextent_near_bno( int error; if (!needs_perag) - ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target)); + ASSERT(pag_agno(args->pag) == XFS_FSB_TO_AGNO(mp, target)); args->agno = XFS_FSB_TO_AGNO(mp, target); args->agbno = XFS_FSB_TO_AGBNO(mp, target); @@ -3941,7 +3940,7 @@ xfs_free_extent_fix_freelist( memset(&args, 0, sizeof(struct xfs_alloc_arg)); args.tp = tp; args.mp = tp->t_mountp; - args.agno = pag->pag_agno; + args.agno = pag_agno(pag); args.pag = pag; /* diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c index 5175d0b4d32e48..88e1545ed4c9dc 100644 --- a/fs/xfs/libxfs/xfs_alloc_btree.c +++ b/fs/xfs/libxfs/xfs_alloc_btree.c @@ -178,7 +178,7 @@ xfs_allocbt_init_ptr_from_cur( { struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; - ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno)); + ASSERT(pag_agno(cur->bc_ag.pag) == be32_to_cpu(agf->agf_seqno)); if (xfs_btree_is_bno(cur->bc_ops)) ptr->s = agf->agf_bno_root; diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 36dd08d132931d..5eda036cf9bfa5 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -3280,7 +3280,7 @@ xfs_bmap_longest_free_extent( } longest = xfs_alloc_longest_free_extent(pag, - xfs_alloc_min_freelist(pag->pag_mount, pag), + xfs_alloc_min_freelist(pag_mount(pag), pag), xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); if (*blen < longest) *blen = longest; diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index 804a1c96941127..9a13dbf5f54a33 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c @@ -372,7 +372,7 @@ xfs_btree_check_ptr( case XFS_BTREE_TYPE_AG: xfs_err(cur->bc_mp, "AG %u: Corrupt %sbt pointer at level %d index %d.", - cur->bc_ag.pag->pag_agno, cur->bc_ops->name, + pag_agno(cur->bc_ag.pag), cur->bc_ops->name, level, index); break; } @@ -1312,7 +1312,7 @@ xfs_btree_owner( case XFS_BTREE_TYPE_INODE: return cur->bc_ino.ip->i_ino; case XFS_BTREE_TYPE_AG: - return cur->bc_ag.pag->pag_agno; + return pag_agno(cur->bc_ag.pag); default: ASSERT(0); return 0; @@ -4744,7 +4744,7 @@ xfs_btree_agblock_v5hdr_verify( return __this_address; if (block->bb_u.s.bb_blkno != cpu_to_be64(xfs_buf_daddr(bp))) return __this_address; - if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno) + if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag_agno(pag)) return __this_address; return NULL; } diff --git a/fs/xfs/libxfs/xfs_group.c b/fs/xfs/libxfs/xfs_group.c new file mode 100644 index 00000000000000..edf5907845f003 --- /dev/null +++ b/fs/xfs/libxfs/xfs_group.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 Red Hat, Inc. + */ + +#include "xfs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_error.h" +#include "xfs_trace.h" +#include "xfs_group.h" + +/* + * Groups can have passive and active references. + * + * For passive references the code freeing a group is responsible for cleaning + * up objects that hold the passive references (e.g. cached buffers). + * Routines manipulating passive references are xfs_group_get, xfs_group_hold + * and xfs_group_put. + * + * Active references are for short term access to the group for walking trees or + * accessing state. If a group is being shrunk or offlined, the lookup will fail + * to find that group and return NULL instead. + * Routines manipulating active references are xfs_group_grab and + * xfs_group_rele. + */ + +struct xfs_group * +xfs_group_get( + struct xfs_mount *mp, + uint32_t index, + enum xfs_group_type type) +{ + struct xfs_group *xg; + + rcu_read_lock(); + xg = xa_load(&mp->m_groups[type].xa, index); + if (xg) { + trace_xfs_group_get(xg, _RET_IP_); + ASSERT(atomic_read(&xg->xg_ref) >= 0); + atomic_inc(&xg->xg_ref); + } + rcu_read_unlock(); + return xg; +} + +struct xfs_group * +xfs_group_hold( + struct xfs_group *xg) +{ + ASSERT(atomic_read(&xg->xg_ref) > 0 || + atomic_read(&xg->xg_active_ref) > 0); + + trace_xfs_group_hold(xg, _RET_IP_); + atomic_inc(&xg->xg_ref); + return xg; +} + +void +xfs_group_put( + struct xfs_group *xg) +{ + trace_xfs_group_put(xg, _RET_IP_); + + ASSERT(atomic_read(&xg->xg_ref) > 0); + atomic_dec(&xg->xg_ref); +} + +struct xfs_group * +xfs_group_grab( + struct xfs_mount *mp, + uint32_t index, + enum xfs_group_type type) +{ + struct xfs_group *xg; + + rcu_read_lock(); + xg = xa_load(&mp->m_groups[type].xa, index); + if (xg) { + trace_xfs_group_grab(xg, _RET_IP_); + if (!atomic_inc_not_zero(&xg->xg_active_ref)) + xg = NULL; + } + rcu_read_unlock(); + return xg; +} + +/* + * Find the next group after @xg, or the first group if @xg is NULL. + */ +struct xfs_group * +xfs_group_grab_next_mark( + struct xfs_mount *mp, + struct xfs_group *xg, + xa_mark_t mark, + enum xfs_group_type type) +{ + unsigned long index = 0; + + if (xg) { + index = xg->xg_gno + 1; + xfs_group_rele(xg); + } + + rcu_read_lock(); + xg = xa_find(&mp->m_groups[type].xa, &index, ULONG_MAX, mark); + if (xg) { + trace_xfs_group_grab_next_tag(xg, _RET_IP_); + if (!atomic_inc_not_zero(&xg->xg_active_ref)) + xg = NULL; + } + rcu_read_unlock(); + return xg; +} + +void +xfs_group_rele( + struct xfs_group *xg) +{ + trace_xfs_group_rele(xg, _RET_IP_); + atomic_dec(&xg->xg_active_ref); +} + +void +xfs_group_free( + struct xfs_mount *mp, + uint32_t index, + enum xfs_group_type type, + void (*uninit)(struct xfs_group *xg)) +{ + struct xfs_group *xg = xa_erase(&mp->m_groups[type].xa, index); + + XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_ref) != 0); + + if (uninit) + uninit(xg); + + /* drop the mount's active reference */ + xfs_group_rele(xg); + XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) != 0); + kfree_rcu_mightsleep(xg); +} + +int +xfs_group_insert( + struct xfs_mount *mp, + struct xfs_group *xg, + uint32_t index, + enum xfs_group_type type) +{ + int error; + + xg->xg_mount = mp; + xg->xg_gno = index; + xg->xg_type = type; + + /* Active ref owned by mount indicates group is online. */ + atomic_set(&xg->xg_active_ref, 1); + + error = xa_insert(&mp->m_groups[type].xa, index, xg, GFP_KERNEL); + if (error) { + WARN_ON_ONCE(error == -EBUSY); + return error; + } + + return 0; +} diff --git a/fs/xfs/libxfs/xfs_group.h b/fs/xfs/libxfs/xfs_group.h new file mode 100644 index 00000000000000..e3b6be7ff9e802 --- /dev/null +++ b/fs/xfs/libxfs/xfs_group.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 Red Hat, Inc. + */ +#ifndef __LIBXFS_GROUP_H +#define __LIBXFS_GROUP_H 1 + +struct xfs_group { + struct xfs_mount *xg_mount; + uint32_t xg_gno; + enum xfs_group_type xg_type; + atomic_t xg_ref; /* passive reference count */ + atomic_t xg_active_ref; /* active reference count */ +}; + +struct xfs_group *xfs_group_get(struct xfs_mount *mp, uint32_t index, + enum xfs_group_type type); +struct xfs_group *xfs_group_hold(struct xfs_group *xg); +void xfs_group_put(struct xfs_group *xg); + +struct xfs_group *xfs_group_grab(struct xfs_mount *mp, uint32_t index, + enum xfs_group_type type); +struct xfs_group *xfs_group_grab_next_mark(struct xfs_mount *mp, + struct xfs_group *xg, xa_mark_t mark, enum xfs_group_type type); +void xfs_group_rele(struct xfs_group *xg); + +void xfs_group_free(struct xfs_mount *mp, uint32_t index, + enum xfs_group_type type, void (*uninit)(struct xfs_group *xg)); +int xfs_group_insert(struct xfs_mount *mp, struct xfs_group *xg, + uint32_t index, enum xfs_group_type); + +#define xfs_group_set_mark(_xg, _mark) \ + xa_set_mark(&(_xg)->xg_mount->m_groups[(_xg)->xg_type].xa, \ + (_xg)->xg_gno, (_mark)) +#define xfs_group_clear_mark(_xg, _mark) \ + xa_clear_mark(&(_xg)->xg_mount->m_groups[(_xg)->xg_type].xa, \ + (_xg)->xg_gno, (_mark)) +#define xfs_group_marked(_mp, _type, _mark) \ + xa_marked(&(_mp)->m_groups[(_type)].xa, (_mark)) + +#endif /* __LIBXFS_GROUP_H */ diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index f5167847f05119..78e1920c1ff964 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -142,7 +142,7 @@ xfs_inobt_complain_bad_rec( xfs_warn(mp, "%sbt record corruption in AG %d detected at %pS!", - cur->bc_ops->name, cur->bc_ag.pag->pag_agno, fa); + cur->bc_ops->name, pag_agno(cur->bc_ag.pag), fa); xfs_warn(mp, "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x", irec->ir_startino, irec->ir_count, irec->ir_freecount, @@ -551,7 +551,7 @@ xfs_inobt_insert_sprec( struct xfs_buf *agbp, struct xfs_inobt_rec_incore *nrec) /* in/out: new/merged rec. */ { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_btree_cur *cur; int error; int i; @@ -645,7 +645,7 @@ xfs_finobt_insert_sprec( struct xfs_buf *agbp, struct xfs_inobt_rec_incore *nrec) /* in/out: new rec. */ { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_btree_cur *cur; int error; int i; @@ -880,7 +880,7 @@ xfs_ialloc_ag_alloc( * rather than a linear progression to prevent the next generation * number from being easily guessable. */ - error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno, + error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag_agno(pag), args.agbno, args.len, get_random_u32()); if (error) @@ -1071,7 +1071,7 @@ xfs_dialloc_check_ino( if (error) return -EAGAIN; - error = xfs_imap_to_bp(pag->pag_mount, tp, &imap, &bp); + error = xfs_imap_to_bp(pag_mount(pag), tp, &imap, &bp); if (error) return -EAGAIN; @@ -1122,7 +1122,7 @@ xfs_dialloc_ag_inobt( /* * If in the same AG as the parent, try to get near the parent. */ - if (pagno == pag->pag_agno) { + if (pagno == pag_agno(pag)) { int doneleft; /* done, to the left */ int doneright; /* done, to the right */ @@ -1599,7 +1599,7 @@ xfs_dialloc_ag( * parent. If so, find the closest available inode to the parent. If * not, consider the agi hint or find the first free inode in the AG. */ - if (pag->pag_agno == pagno) + if (pag_agno(pag) == pagno) error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec); else error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec); @@ -2053,7 +2053,7 @@ xfs_difree_inobt( struct xfs_icluster *xic, struct xfs_inobt_rec_incore *orec) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_agi *agi = agbp->b_addr; struct xfs_btree_cur *cur; struct xfs_inobt_rec_incore rec; @@ -2187,7 +2187,7 @@ xfs_difree_finobt( xfs_agino_t agino, struct xfs_inobt_rec_incore *ibtrec) /* inobt record */ { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_btree_cur *cur; struct xfs_inobt_rec_incore rec; int offset = agino - ibtrec->ir_startino; @@ -2310,9 +2310,9 @@ xfs_difree( /* * Break up inode number into its components. */ - if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) { - xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).", - __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno); + if (pag_agno(pag) != XFS_INO_TO_AGNO(mp, inode)) { + xfs_warn(mp, "%s: agno != pag_agno(pag) (%d != %d).", + __func__, XFS_INO_TO_AGNO(mp, inode), pag_agno(pag)); ASSERT(0); return -EINVAL; } @@ -2373,7 +2373,7 @@ xfs_imap_lookup( xfs_agblock_t *offset_agbno, int flags) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_inobt_rec_incore rec; struct xfs_btree_cur *cur; struct xfs_buf *agbp; @@ -2384,7 +2384,7 @@ xfs_imap_lookup( if (error) { xfs_alert(mp, "%s: xfs_ialloc_read_agi() returned error %d, agno %d", - __func__, error, pag->pag_agno); + __func__, error, pag_agno(pag)); return error; } @@ -2434,7 +2434,7 @@ xfs_imap( struct xfs_imap *imap, /* location map structure */ uint flags) /* flags for inode btree lookup */ { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); xfs_agblock_t agbno; /* block number of inode in the alloc group */ xfs_agino_t agino; /* inode number within alloc group */ xfs_agblock_t chunk_agbno; /* first block in inode chunk */ @@ -2726,13 +2726,13 @@ xfs_read_agi( xfs_buf_flags_t flags, struct xfs_buf **agibpp) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); int error; trace_xfs_read_agi(pag); error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, - XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)), + XFS_AG_DADDR(mp, pag_agno(pag), XFS_AGI_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), flags, agibpp, &xfs_agi_buf_ops); if (xfs_metadata_is_sick(error)) xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI); @@ -2780,7 +2780,7 @@ xfs_ialloc_read_agi( * we are in the middle of a forced shutdown. */ ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || - xfs_is_shutdown(pag->pag_mount)); + xfs_is_shutdown(pag_mount(pag))); if (agibpp) *agibpp = agibp; else @@ -3119,13 +3119,13 @@ xfs_ialloc_check_shrink( int has; int error; - if (!xfs_has_sparseinodes(pag->pag_mount)) + if (!xfs_has_sparseinodes(pag_mount(pag))) return 0; cur = xfs_inobt_init_cursor(pag, tp, agibp); /* Look up the inobt record that would correspond to the new EOFS. */ - agino = XFS_AGB_TO_AGINO(pag->pag_mount, new_length); + agino = XFS_AGB_TO_AGINO(pag_mount(pag), new_length); error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has); if (error || !has) goto out; diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 3291541ae9665a..91d44be2ce48bc 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c @@ -248,7 +248,7 @@ xfs_inobt_init_ptr_from_cur( { struct xfs_agi *agi = cur->bc_ag.agbp->b_addr; - ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno)); + ASSERT(pag_agno(cur->bc_ag.pag) == be32_to_cpu(agi->agi_seqno)); ptr->s = agi->agi_root; } @@ -260,7 +260,7 @@ xfs_finobt_init_ptr_from_cur( { struct xfs_agi *agi = cur->bc_ag.agbp->b_addr; - ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno)); + ASSERT(pag_agno(cur->bc_ag.pag) == be32_to_cpu(agi->agi_seqno)); ptr->s = agi->agi_free_root; } @@ -478,7 +478,7 @@ xfs_inobt_init_cursor( struct xfs_trans *tp, struct xfs_buf *agbp) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_btree_cur *cur; cur = xfs_btree_alloc_cursor(mp, tp, &xfs_inobt_ops, @@ -504,7 +504,7 @@ xfs_finobt_init_cursor( struct xfs_trans *tp, struct xfs_buf *agbp) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_btree_cur *cur; cur = xfs_btree_alloc_cursor(mp, tp, &xfs_finobt_ops, @@ -715,7 +715,7 @@ static xfs_extlen_t xfs_inobt_max_size( struct xfs_perag *pag) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); xfs_agblock_t agblocks = pag->block_count; /* Bail out if we're uninitialized, which can happen in mkfs. */ @@ -727,7 +727,7 @@ xfs_inobt_max_size( * never be available for the kinds of things that would require btree * expansion. We therefore can pretend the space isn't there. */ - if (xfs_ag_contains_log(mp, pag->pag_agno)) + if (xfs_ag_contains_log(mp, pag_agno(pag))) agblocks -= mp->m_sb.sb_logblocks; return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, @@ -791,10 +791,10 @@ xfs_finobt_calc_reserves( xfs_extlen_t tree_len = 0; int error; - if (!xfs_has_finobt(pag->pag_mount)) + if (!xfs_has_finobt(pag_mount(pag))) return 0; - if (xfs_has_inobtcounts(pag->pag_mount)) + if (xfs_has_inobtcounts(pag_mount(pag))) error = xfs_finobt_read_blocks(pag, tp, &tree_len); else error = xfs_finobt_count_blocks(pag, tp, &tree_len); diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c index 5e166553a7a6e9..b8789c42c230b4 100644 --- a/fs/xfs/libxfs/xfs_refcount.c +++ b/fs/xfs/libxfs/xfs_refcount.c @@ -154,7 +154,7 @@ xfs_refcount_complain_bad_rec( xfs_warn(mp, "Refcount BTree record corruption in AG %d detected at %pS!", - cur->bc_ag.pag->pag_agno, fa); + pag_agno(cur->bc_ag.pag), fa); xfs_warn(mp, "Start block 0x%x, block count 0x%x, references 0x%x", irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount); @@ -1321,7 +1321,7 @@ xfs_refcount_continue_op( ri->ri_startblock = xfs_agbno_to_fsb(pag, new_agbno); ASSERT(xfs_verify_fsbext(mp, ri->ri_startblock, ri->ri_blockcount)); - ASSERT(pag->pag_agno == XFS_FSB_TO_AGNO(mp, ri->ri_startblock)); + ASSERT(pag_agno(pag) == XFS_FSB_TO_AGNO(mp, ri->ri_startblock)); return 0; } diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c index c4b10fbf8892a1..db389fdbd929a4 100644 --- a/fs/xfs/libxfs/xfs_refcount_btree.c +++ b/fs/xfs/libxfs/xfs_refcount_btree.c @@ -81,7 +81,7 @@ xfs_refcountbt_alloc_block( *stat = 0; return 0; } - ASSERT(args.agno == cur->bc_ag.pag->pag_agno); + ASSERT(args.agno == pag_agno(cur->bc_ag.pag)); ASSERT(args.len == 1); new->s = cpu_to_be32(args.agbno); @@ -169,7 +169,7 @@ xfs_refcountbt_init_ptr_from_cur( { struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; - ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno)); + ASSERT(pag_agno(cur->bc_ag.pag) == be32_to_cpu(agf->agf_seqno)); ptr->s = agf->agf_refcount_root; } @@ -361,7 +361,7 @@ xfs_refcountbt_init_cursor( { struct xfs_btree_cur *cur; - ASSERT(pag->pag_agno < mp->m_sb.sb_agcount); + ASSERT(pag_agno(pag) < mp->m_sb.sb_agcount); cur = xfs_btree_alloc_cursor(mp, tp, &xfs_refcountbt_ops, mp->m_refc_maxlevels, xfs_refcountbt_cur_cache); @@ -514,7 +514,7 @@ xfs_refcountbt_calc_reserves( * never be available for the kinds of things that would require btree * expansion. We therefore can pretend the space isn't there. */ - if (xfs_ag_contains_log(mp, pag->pag_agno)) + if (xfs_ag_contains_log(mp, pag_agno(pag))) agblocks -= mp->m_sb.sb_logblocks; *ask += xfs_refcountbt_max_size(mp, agblocks); diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c index 6ef4687b3aba8f..b6764d6b3ab891 100644 --- a/fs/xfs/libxfs/xfs_rmap.c +++ b/fs/xfs/libxfs/xfs_rmap.c @@ -213,7 +213,7 @@ xfs_rmap_check_irec( struct xfs_perag *pag, const struct xfs_rmap_irec *irec) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); bool is_inode; bool is_unwritten; bool is_bmbt; @@ -288,7 +288,7 @@ xfs_rmap_complain_bad_rec( else xfs_warn(mp, "Reverse Mapping BTree record corruption in AG %d detected at %pS!", - cur->bc_ag.pag->pag_agno, fa); + pag_agno(cur->bc_ag.pag), fa); xfs_warn(mp, "Owner 0x%llx, flags 0x%x, start block 0x%x block count 0x%x", irec->rm_owner, irec->rm_flags, irec->rm_startblock, diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index b49006c1ca7eee..6fd460fc7c9c1d 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c @@ -227,7 +227,7 @@ xfs_rmapbt_init_ptr_from_cur( { struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; - ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno)); + ASSERT(pag_agno(cur->bc_ag.pag) == be32_to_cpu(agf->agf_seqno)); ptr->s = agf->agf_rmap_root; } @@ -647,9 +647,8 @@ xfs_rmapbt_mem_cursor( struct xfbtree *xfbt) { struct xfs_btree_cur *cur; - struct xfs_mount *mp = pag->pag_mount; - cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rmapbt_mem_ops, + cur = xfs_btree_alloc_cursor(pag_mount(pag), tp, &xfs_rmapbt_mem_ops, xfs_rmapbt_maxlevels_ondisk(), xfs_rmapbt_cur_cache); cur->bc_mem.xfbtree = xfbt; cur->bc_nlevels = xfbt->nlevels; @@ -863,7 +862,7 @@ xfs_rmapbt_calc_reserves( * never be available for the kinds of things that would require btree * expansion. We therefore can pretend the space isn't there. */ - if (xfs_ag_contains_log(mp, pag->pag_agno)) + if (xfs_ag_contains_log(mp, pag_agno(pag))) agblocks -= mp->m_sb.sb_logblocks; /* Reserve 1% of the AG or enough for 1 block per record. */ diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index d95409f3cba667..d2012fbf07aa65 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -1120,7 +1120,7 @@ xfs_update_secondary_sbs( struct xfs_buf *bp; error = xfs_buf_get(mp->m_ddev_targp, - XFS_AG_DADDR(mp, pag->pag_agno, XFS_SB_DADDR), + XFS_AG_DADDR(mp, pag_agno(pag), XFS_SB_DADDR), XFS_FSS_TO_BB(mp, 1), &bp); /* * If we get an error reading or writing alternate superblocks, @@ -1132,7 +1132,7 @@ xfs_update_secondary_sbs( if (error) { xfs_warn(mp, "error allocating secondary superblock for ag %d", - pag->pag_agno); + pag_agno(pag)); if (!saved_error) saved_error = error; continue; @@ -1153,7 +1153,7 @@ xfs_update_secondary_sbs( if (error) { xfs_warn(mp, "write error %d updating a secondary superblock near ag %d", - error, pag->pag_agno); + error, pag_agno(pag)); if (!saved_error) saved_error = error; continue; diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h index a8cd44d03ef648..d3cb6ff3b91301 100644 --- a/fs/xfs/libxfs/xfs_types.h +++ b/fs/xfs/libxfs/xfs_types.h @@ -212,6 +212,14 @@ enum xbtree_recpacking { XBTREE_RECPACKING_FULL, }; +enum xfs_group_type { + XG_TYPE_AG, + XG_TYPE_MAX, +} __packed; + +#define XG_TYPE_STRINGS \ + { XG_TYPE_AG, "ag" } + /* * Type verifier functions */ diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index 82a850eba6c88c..0ea04d6e21cd83 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -208,7 +208,7 @@ xrep_agf_init_header( memset(agf, 0, BBTOB(agf_bp->b_length)); agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); - agf->agf_seqno = cpu_to_be32(pag->pag_agno); + agf->agf_seqno = cpu_to_be32(pag_agno(pag)); agf->agf_length = cpu_to_be32(pag->block_count); agf->agf_flfirst = old_agf->agf_flfirst; agf->agf_fllast = old_agf->agf_fllast; @@ -384,7 +384,7 @@ xrep_agf( * was corrupt after xfs_alloc_read_agf failed with -EFSCORRUPTED. */ error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp, - XFS_AG_DADDR(mp, sc->sa.pag->pag_agno, + XFS_AG_DADDR(mp, pag_agno(sc->sa.pag), XFS_AGF_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), 0, &agf_bp, NULL); if (error) @@ -687,7 +687,7 @@ xrep_agfl_init_header( agfl = XFS_BUF_TO_AGFL(agfl_bp); memset(agfl, 0xFF, BBTOB(agfl_bp->b_length)); agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); - agfl->agfl_seqno = cpu_to_be32(sc->sa.pag->pag_agno); + agfl->agfl_seqno = cpu_to_be32(pag_agno(sc->sa.pag)); uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid); /* @@ -741,7 +741,7 @@ xrep_agfl( * was corrupt after xfs_alloc_read_agfl failed with -EFSCORRUPTED. */ error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp, - XFS_AG_DADDR(mp, sc->sa.pag->pag_agno, + XFS_AG_DADDR(mp, pag_agno(sc->sa.pag), XFS_AGFL_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), 0, &agfl_bp, NULL); if (error) @@ -897,7 +897,7 @@ xrep_agi_init_header( memset(agi, 0, BBTOB(agi_bp->b_length)); agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); - agi->agi_seqno = cpu_to_be32(pag->pag_agno); + agi->agi_seqno = cpu_to_be32(pag_agno(pag)); agi->agi_length = cpu_to_be32(pag->block_count); agi->agi_newino = cpu_to_be32(NULLAGINO); agi->agi_dirino = cpu_to_be32(NULLAGINO); @@ -1112,9 +1112,9 @@ xrep_iunlink_igrab( struct xfs_perag *pag, struct xfs_inode *ip) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); - if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) + if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag_agno(pag)) return false; if (!xfs_inode_on_unlinked_list(ip)) @@ -1138,7 +1138,7 @@ xrep_iunlink_visit( unsigned int bucket; int error; - ASSERT(XFS_INO_TO_AGNO(mp, ip->i_ino) == ragi->sc->sa.pag->pag_agno); + ASSERT(XFS_INO_TO_AGNO(mp, ip->i_ino) == pag_agno(ragi->sc->sa.pag)); ASSERT(xfs_inode_on_unlinked_list(ip)); agino = XFS_INO_TO_AGINO(mp, ip->i_ino); @@ -1169,7 +1169,7 @@ xrep_iunlink_mark_incore( struct xrep_agi *ragi) { struct xfs_perag *pag = ragi->sc->sa.pag; - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); uint32_t first_index = 0; bool done = false; unsigned int nr_found = 0; @@ -1209,7 +1209,7 @@ xrep_iunlink_mark_incore( * us to see this inode, so another lookup from the * same index will not find it again. */ - if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) + if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag_agno(pag)) continue; first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) @@ -1761,7 +1761,7 @@ xrep_agi( * was corrupt after xfs_ialloc_read_agi failed with -EFSCORRUPTED. */ error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp, - XFS_AG_DADDR(mp, sc->sa.pag->pag_agno, + XFS_AG_DADDR(mp, pag_agno(sc->sa.pag), XFS_AGI_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), 0, &ragi->agi_bp, NULL); if (error) diff --git a/fs/xfs/scrub/alloc_repair.c b/fs/xfs/scrub/alloc_repair.c index 1e0c2db6dcf7df..ab0084c4249657 100644 --- a/fs/xfs/scrub/alloc_repair.c +++ b/fs/xfs/scrub/alloc_repair.c @@ -543,7 +543,7 @@ xrep_abt_dispose_one( /* Add a deferred rmap for each extent we used. */ if (resv->used > 0) - xfs_rmap_alloc_extent(sc->tp, pag->pag_agno, resv->agbno, + xfs_rmap_alloc_extent(sc->tp, pag_agno(pag), resv->agbno, resv->used, XFS_RMAP_OWN_AG); /* diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h index 47148cc4a833e5..f3db628b14e1b9 100644 --- a/fs/xfs/scrub/common.h +++ b/fs/xfs/scrub/common.h @@ -216,7 +216,8 @@ int xchk_metadata_inode_forks(struct xfs_scrub *sc); #define xchk_xfile_ag_descr(sc, fmt, ...) \ kasprintf(XCHK_GFP_FLAGS, "XFS (%s): AG 0x%x " fmt, \ (sc)->mp->m_super->s_id, \ - (sc)->sa.pag ? (sc)->sa.pag->pag_agno : (sc)->sm->sm_agno, \ + (sc)->sa.pag ? \ + pag_agno((sc)->sa.pag) : (sc)->sm->sm_agno, \ ##__VA_ARGS__) #define xchk_xfile_ino_descr(sc, fmt, ...) \ kasprintf(XCHK_GFP_FLAGS, "XFS (%s): inode 0x%llx " fmt, \ diff --git a/fs/xfs/scrub/ialloc_repair.c b/fs/xfs/scrub/ialloc_repair.c index ffa0d67508aa00..14e48d3f1912bf 100644 --- a/fs/xfs/scrub/ialloc_repair.c +++ b/fs/xfs/scrub/ialloc_repair.c @@ -814,7 +814,7 @@ xrep_iallocbt( sc->sick_mask = XFS_SICK_AG_INOBT | XFS_SICK_AG_FINOBT; /* Set up enough storage to handle an AG with nothing but inodes. */ - xfs_agino_range(mp, sc->sa.pag->pag_agno, &first_agino, &last_agino); + xfs_agino_range(mp, pag_agno(sc->sa.pag), &first_agino, &last_agino); last_agino /= XFS_INODES_PER_CHUNK; descr = xchk_xfile_ag_descr(sc, "inode index records"); error = xfarray_create(descr, last_agino, diff --git a/fs/xfs/scrub/iscan.c b/fs/xfs/scrub/iscan.c index cf9d983667cec6..84f117667ca2bf 100644 --- a/fs/xfs/scrub/iscan.c +++ b/fs/xfs/scrub/iscan.c @@ -67,7 +67,7 @@ xchk_iscan_mask_skipino( xfs_agnumber_t skip_agno = XFS_INO_TO_AGNO(mp, iscan->skip_ino); xfs_agnumber_t skip_agino = XFS_INO_TO_AGINO(mp, iscan->skip_ino); - if (pag->pag_agno != skip_agno) + if (pag_agno(pag) != skip_agno) return; if (skip_agino < rec->ir_startino) return; @@ -95,7 +95,7 @@ xchk_iscan_find_next( struct xfs_btree_cur *cur; struct xfs_mount *mp = sc->mp; struct xfs_trans *tp = sc->tp; - xfs_agnumber_t agno = pag->pag_agno; + xfs_agnumber_t agno = pag_agno(pag); xfs_agino_t lastino = NULLAGINO; xfs_agino_t first, last; xfs_agino_t agino = *cursor; diff --git a/fs/xfs/scrub/newbt.c b/fs/xfs/scrub/newbt.c index 81cad6c4fe6d9d..70af27d987342f 100644 --- a/fs/xfs/scrub/newbt.c +++ b/fs/xfs/scrub/newbt.c @@ -58,7 +58,7 @@ xrep_newbt_estimate_slack( if (sc->ops->type == ST_PERAG) { free = sc->sa.pag->pagf_freeblks; - sz = xfs_ag_block_count(sc->mp, sc->sa.pag->pag_agno); + sz = xfs_ag_block_count(sc->mp, pag_agno(sc->sa.pag)); } else { free = percpu_counter_sum(&sc->mp->m_fdblocks); sz = sc->mp->m_sb.sb_dblocks; @@ -205,7 +205,7 @@ xrep_newbt_validate_ag_alloc_hint( struct xfs_scrub *sc = xnr->sc; xfs_agnumber_t agno = XFS_FSB_TO_AGNO(sc->mp, xnr->alloc_hint); - if (agno == sc->sa.pag->pag_agno && + if (agno == pag_agno(sc->sa.pag) && xfs_verify_fsbno(sc->mp, xnr->alloc_hint)) return; @@ -250,8 +250,8 @@ xrep_newbt_alloc_ag_blocks( return -ENOSPC; agno = XFS_FSB_TO_AGNO(mp, args.fsbno); - if (agno != sc->sa.pag->pag_agno) { - ASSERT(agno == sc->sa.pag->pag_agno); + if (agno != pag_agno(sc->sa.pag)) { + ASSERT(agno == pag_agno(sc->sa.pag)); return -EFSCORRUPTED; } diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index 707ca52650e130..646ac8ade88d0b 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -413,7 +413,7 @@ xrep_fix_freelist( args.mp = sc->mp; args.tp = sc->tp; - args.agno = sc->sa.pag->pag_agno; + args.agno = pag_agno(sc->sa.pag); args.alignment = 1; args.pag = sc->sa.pag; @@ -972,7 +972,7 @@ xrep_reset_perag_resv( if (error == -ENOSPC) { xfs_err(sc->mp, "Insufficient free space to reset per-AG reservation for AG %u after repair.", - sc->sa.pag->pag_agno); + pag_agno(sc->sa.pag)); error = 0; } diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c index ba5bbc3fb754db..26b5c90b3f6aee 100644 --- a/fs/xfs/scrub/rmap.c +++ b/fs/xfs/scrub/rmap.c @@ -410,7 +410,7 @@ xchk_rmapbt_walk_ag_metadata( goto out; /* OWN_LOG: Internal log */ - if (xfs_ag_contains_log(mp, sc->sa.pag->pag_agno)) { + if (xfs_ag_contains_log(mp, pag_agno(sc->sa.pag))) { error = xagb_bitmap_set(&cr->log_owned, XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart), mp->m_sb.sb_logblocks); diff --git a/fs/xfs/scrub/rmap_repair.c b/fs/xfs/scrub/rmap_repair.c index 57d445f7cb2a5d..f88f58db909867 100644 --- a/fs/xfs/scrub/rmap_repair.c +++ b/fs/xfs/scrub/rmap_repair.c @@ -344,7 +344,7 @@ xrep_rmap_visit_bmbt( int error; if (XFS_FSB_TO_AGNO(mp, rec->br_startblock) != - rf->rr->sc->sa.pag->pag_agno) + pag_agno(rf->rr->sc->sa.pag)) return 0; agbno = XFS_FSB_TO_AGBNO(mp, rec->br_startblock); @@ -391,7 +391,7 @@ xrep_rmap_visit_iroot_btree_block( return 0; fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); - if (XFS_FSB_TO_AGNO(cur->bc_mp, fsbno) != rf->rr->sc->sa.pag->pag_agno) + if (XFS_FSB_TO_AGNO(cur->bc_mp, fsbno) != pag_agno(rf->rr->sc->sa.pag)) return 0; agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno); @@ -801,7 +801,7 @@ xrep_rmap_find_log_rmaps( { struct xfs_scrub *sc = rr->sc; - if (!xfs_ag_contains_log(sc->mp, sc->sa.pag->pag_agno)) + if (!xfs_ag_contains_log(sc->mp, pag_agno(sc->sa.pag))) return 0; return xrep_rmap_stash(rr, @@ -976,7 +976,7 @@ xrep_rmap_try_reserve( { struct xrep_rmap_agfl ra = { .bitmap = freesp_blocks, - .agno = rr->sc->sa.pag->pag_agno, + .agno = pag_agno(rr->sc->sa.pag), }; struct xfs_scrub *sc = rr->sc; struct xrep_newbt_resv *resv, *n; @@ -1596,7 +1596,7 @@ xrep_rmap_setup_scan( /* Set up in-memory rmap btree */ error = xfs_rmapbt_mem_init(sc->mp, &rr->rmap_btree, sc->xmbtp, - sc->sa.pag->pag_agno); + pag_agno(sc->sa.pag)); if (error) goto out_mutex; diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index a1ec6445ae5fbf..58cc61f2ed5372 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -792,8 +792,8 @@ TRACE_EVENT(xchk_iallocbt_check_cluster, __field(uint16_t, holemask) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->startino = startino; __entry->map_daddr = map_daddr; __entry->map_len = map_len; @@ -936,8 +936,8 @@ TRACE_EVENT(xchk_refcount_incorrect, __field(xfs_nlink_t, seen) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->domain = irec->rc_domain; __entry->startblock = irec->rc_startblock; __entry->blockcount = irec->rc_blockcount; @@ -1929,8 +1929,8 @@ DECLARE_EVENT_CLASS(xrep_extent_class, __field(xfs_extlen_t, len) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agbno = agbno; __entry->len = len; ), @@ -1963,8 +1963,8 @@ DECLARE_EVENT_CLASS(xrep_reap_find_class, __field(bool, crosslinked) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agbno = agbno; __entry->len = len; __entry->crosslinked = crosslinked; @@ -1997,8 +1997,8 @@ TRACE_EVENT(xrep_ibt_walk_rmap, __field(unsigned int, flags) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agbno = rec->rm_startblock; __entry->len = rec->rm_blockcount; __entry->owner = rec->rm_owner; @@ -2026,8 +2026,8 @@ TRACE_EVENT(xrep_abt_found, __field(xfs_extlen_t, blockcount) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->startblock = rec->ar_startblock; __entry->blockcount = rec->ar_blockcount; ), @@ -2052,8 +2052,8 @@ TRACE_EVENT(xrep_ibt_found, __field(uint64_t, freemask) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->startino = rec->ir_startino; __entry->holemask = rec->ir_holemask; __entry->count = rec->ir_count; @@ -2083,8 +2083,8 @@ TRACE_EVENT(xrep_refc_found, __field(xfs_nlink_t, refcount) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->domain = rec->rc_domain; __entry->startblock = rec->rc_startblock; __entry->blockcount = rec->rc_blockcount; @@ -2144,8 +2144,8 @@ TRACE_EVENT(xrep_rmap_found, __field(unsigned int, flags) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agbno = rec->rm_startblock; __entry->len = rec->rm_blockcount; __entry->owner = rec->rm_owner; @@ -2174,8 +2174,8 @@ TRACE_EVENT(xrep_findroot_block, __field(uint16_t, level) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agbno = agbno; __entry->magic = magic; __entry->level = level; @@ -2201,8 +2201,8 @@ TRACE_EVENT(xrep_calc_ag_resblks, __field(xfs_agblock_t, usedlen) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->icount = icount; __entry->aglen = aglen; __entry->freelen = freelen; @@ -2230,8 +2230,8 @@ TRACE_EVENT(xrep_calc_ag_resblks_btsize, __field(xfs_agblock_t, refcbt_sz) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->bnobt_sz = bnobt_sz; __entry->inobt_sz = inobt_sz; __entry->rmapbt_sz = rmapbt_sz; @@ -2282,8 +2282,8 @@ DECLARE_EVENT_CLASS(xrep_newbt_extent_class, __field(int64_t, owner) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agbno = agbno; __entry->len = len; __entry->owner = owner; @@ -2597,8 +2597,8 @@ TRACE_EVENT(xrep_cow_free_staging, __field(xfs_extlen_t, blockcount) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agbno = agbno; __entry->blockcount = blockcount; ), @@ -2657,8 +2657,8 @@ TRACE_EVENT(xrep_rmap_live_update, __field(unsigned int, flags) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->op = op; __entry->agbno = p->startblock; __entry->len = p->blockcount; @@ -3317,9 +3317,9 @@ TRACE_EVENT(xrep_iunlink_visit, __field(xfs_agino_t, next_agino) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; - __entry->agino = XFS_INO_TO_AGINO(pag->pag_mount, ip->i_ino); + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); + __entry->agino = XFS_INO_TO_AGINO(pag_mount(pag), ip->i_ino); __entry->bucket = bucket; __entry->bucket_agino = bucket_agino; __entry->prev_agino = ip->i_prev_unlinked; @@ -3405,8 +3405,8 @@ TRACE_EVENT(xrep_iunlink_walk_ondisk_bucket, __field(xfs_agino_t, next_agino) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->bucket = bucket; __entry->prev_agino = prev_agino; __entry->next_agino = next_agino; @@ -3431,8 +3431,8 @@ DECLARE_EVENT_CLASS(xrep_iunlink_resolve_class, __field(xfs_agino_t, next_agino) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->bucket = bucket; __entry->prev_agino = prev_agino; __entry->next_agino = next_agino; @@ -3518,8 +3518,8 @@ TRACE_EVENT(xrep_iunlink_add_to_bucket, __field(xfs_agino_t, next_agino) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->bucket = bucket; __entry->agino = agino; __entry->next_agino = curr_head; @@ -3544,8 +3544,8 @@ TRACE_EVENT(xrep_iunlink_commit_bucket, __field(xfs_agino_t, agino) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->bucket = bucket; __entry->old_agino = old_agino; __entry->agino = agino; diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index 79648f8c6b270b..dfd6edcebb6ea4 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -159,7 +159,7 @@ xfs_trim_gather_extents( struct xfs_trim_cur *tcur, struct xfs_busy_extents *extents) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_trans *tp; struct xfs_btree_cur *cur; struct xfs_buf *agbp; @@ -365,7 +365,7 @@ xfs_trim_perag_extents( * list after this function call, as it may have been freed by * the time control returns to us. */ - error = xfs_discard_extents(pag->pag_mount, extents); + error = xfs_discard_extents(pag_mount(pag), extents); if (error) break; diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c index 81099400a171dc..79b0f833c511e3 100644 --- a/fs/xfs/xfs_extent_busy.c +++ b/fs/xfs/xfs_extent_busy.c @@ -283,7 +283,7 @@ xfs_extent_busy_update_extent( out_force_log: spin_unlock(&pag->pagb_lock); - xfs_log_force(pag->pag_mount, XFS_LOG_SYNC); + xfs_log_force(pag_mount(pag), XFS_LOG_SYNC); trace_xfs_extent_busy_force(pag, fbno, flen); spin_lock(&pag->pagb_lock); return false; @@ -659,7 +659,7 @@ xfs_extent_busy_ag_cmp( container_of(l2, struct xfs_extent_busy, list); s32 diff; - diff = b1->pag->pag_agno - b2->pag->pag_agno; + diff = pag_agno(b1->pag) - pag_agno(b2->pag); if (!diff) diff = b1->bno - b2->bno; return diff; diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 7f1be08dbc1123..c198962edea163 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -362,7 +362,7 @@ xfs_extent_free_diff_items( struct xfs_extent_free_item *ra = xefi_entry(a); struct xfs_extent_free_item *rb = xefi_entry(b); - return ra->xefi_pag->pag_agno - rb->xefi_pag->pag_agno; + return pag_agno(ra->xefi_pag) - pag_agno(rb->xefi_pag); } /* Log a free extent to the intent item. */ diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index f2caebb78dd25d..a961aa420c488b 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c @@ -96,7 +96,7 @@ xfs_filestream_pick_ag( maxfree = pag->pagf_freeblks; if (max_pag) xfs_perag_rele(max_pag); - atomic_inc(&pag->pag_active_ref); + atomic_inc(&pag_group(pag)->xg_active_ref); max_pag = pag; } @@ -222,7 +222,7 @@ xfs_filestream_lookup_association( * down immediately after we mark the lookup as done. */ pag = container_of(mru, struct xfs_fstrm_item, mru)->pag; - atomic_inc(&pag->pag_active_ref); + atomic_inc(&pag_group(pag)->xg_active_ref); xfs_mru_cache_done(mp->m_filestream); trace_xfs_filestream_lookup(pag, ap->ip->i_ino); @@ -275,7 +275,7 @@ xfs_filestream_create_association( struct xfs_fstrm_item *item = container_of(mru, struct xfs_fstrm_item, mru); - agno = (item->pag->pag_agno + 1) % mp->m_sb.sb_agcount; + agno = (pag_agno(item->pag) + 1) % mp->m_sb.sb_agcount; xfs_fstrm_free_func(mp, mru); } else if (xfs_is_inode32(mp)) { xfs_agnumber_t rotorstep = xfs_rotorstep; @@ -314,7 +314,7 @@ xfs_filestream_create_association( if (!item) goto out_put_fstrms; - atomic_inc(&args->pag->pag_active_ref); + atomic_inc(&pag_group(args->pag)->xg_active_ref); item->pag = args->pag; error = xfs_mru_cache_insert(mp->m_filestream, pino, &item->mru); if (error) diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index eff198ae1ce33a..918e1c38a15592 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c @@ -353,7 +353,7 @@ xfs_getfsmap_helper( return -ECANCELED; trace_xfs_fsmap_mapping(mp, info->dev, - info->pag ? info->pag->pag_agno : NULLAGNUMBER, rec); + info->pag ? pag_agno(info->pag) : NULLAGNUMBER, rec); fmr.fmr_device = info->dev; fmr.fmr_physical = rec_daddr; @@ -519,7 +519,7 @@ __xfs_getfsmap_datadev( * is the last AG that we're querying. */ info->pag = pag; - if (pag->pag_agno == end_ag) { + if (pag_agno(pag) == end_ag) { info->high.rm_startblock = XFS_FSB_TO_AGBNO(mp, end_fsb); info->high.rm_offset = XFS_BB_TO_FSBT(mp, @@ -541,9 +541,9 @@ __xfs_getfsmap_datadev( if (error) break; - trace_xfs_fsmap_low_key(mp, info->dev, pag->pag_agno, + trace_xfs_fsmap_low_key(mp, info->dev, pag_agno(pag), &info->low); - trace_xfs_fsmap_high_key(mp, info->dev, pag->pag_agno, + trace_xfs_fsmap_high_key(mp, info->dev, pag_agno(pag), &info->high); error = query_fn(tp, info, &bt_cur, priv); @@ -554,7 +554,7 @@ __xfs_getfsmap_datadev( * Set the AG low key to the start of the AG prior to * moving on to the next AG. */ - if (pag->pag_agno == start_ag) + if (pag_agno(pag) == start_ag) memset(&info->low, 0, sizeof(info->low)); /* @@ -562,7 +562,7 @@ __xfs_getfsmap_datadev( * before we drop the reference to the perag when the loop * terminates. */ - if (pag->pag_agno == end_ag) { + if (pag_agno(pag) == end_ag) { info->last = true; error = query_fn(tp, info, &bt_cur, priv); if (error) diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 6b119a7a324fa4..0a930fc116f575 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -204,7 +204,7 @@ xfs_reclaim_work_queue( { rcu_read_lock(); - if (xa_marked(&mp->m_perags, XFS_PERAG_RECLAIM_MARK)) { + if (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) { queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); } @@ -219,15 +219,14 @@ static inline void xfs_blockgc_queue( struct xfs_perag *pag) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); if (!xfs_is_blockgc_enabled(mp)) return; rcu_read_lock(); if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) - queue_delayed_work(pag->pag_mount->m_blockgc_wq, - &pag->pag_blockgc_work, + queue_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work, msecs_to_jiffies(xfs_blockgc_secs * 1000)); rcu_read_unlock(); } @@ -239,7 +238,6 @@ xfs_perag_set_inode_tag( xfs_agino_t agino, unsigned int tag) { - struct xfs_mount *mp = pag->pag_mount; bool was_tagged; lockdep_assert_held(&pag->pag_ici_lock); @@ -253,13 +251,13 @@ xfs_perag_set_inode_tag( if (was_tagged) return; - /* propagate the tag up into the perag radix tree */ - xa_set_mark(&mp->m_perags, pag->pag_agno, ici_tag_to_mark(tag)); + /* propagate the tag up into the pag xarray tree */ + xfs_group_set_mark(pag_group(pag), ici_tag_to_mark(tag)); /* start background work */ switch (tag) { case XFS_ICI_RECLAIM_TAG: - xfs_reclaim_work_queue(mp); + xfs_reclaim_work_queue(pag_mount(pag)); break; case XFS_ICI_BLOCKGC_TAG: xfs_blockgc_queue(pag); @@ -276,8 +274,6 @@ xfs_perag_clear_inode_tag( xfs_agino_t agino, unsigned int tag) { - struct xfs_mount *mp = pag->pag_mount; - lockdep_assert_held(&pag->pag_ici_lock); /* @@ -295,9 +291,8 @@ xfs_perag_clear_inode_tag( if (radix_tree_tagged(&pag->pag_ici_root, tag)) return; - /* clear the tag from the perag radix tree */ - xa_clear_mark(&mp->m_perags, pag->pag_agno, ici_tag_to_mark(tag)); - + /* clear the tag from the pag xarray */ + xfs_group_clear_mark(pag_group(pag), ici_tag_to_mark(tag)); trace_xfs_perag_clear_inode_tag(pag, _RET_IP_); } @@ -310,22 +305,9 @@ xfs_perag_grab_next_tag( struct xfs_perag *pag, int tag) { - unsigned long index = 0; - - if (pag) { - index = pag->pag_agno + 1; - xfs_perag_rele(pag); - } - - rcu_read_lock(); - pag = xa_find(&mp->m_perags, &index, ULONG_MAX, ici_tag_to_mark(tag)); - if (pag) { - trace_xfs_perag_grab_next_tag(pag, _RET_IP_); - if (!atomic_inc_not_zero(&pag->pag_active_ref)) - pag = NULL; - } - rcu_read_unlock(); - return pag; + return to_perag(xfs_group_grab_next_mark(mp, + pag ? pag_group(pag) : NULL, + ici_tag_to_mark(tag), XG_TYPE_AG)); } /* @@ -1014,7 +996,7 @@ xfs_reclaim_inodes( if (xfs_want_reclaim_sick(mp)) icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK; - while (xa_marked(&mp->m_perags, XFS_PERAG_RECLAIM_MARK)) { + while (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) { xfs_ail_push_all_sync(mp->m_ail); xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw); } @@ -1056,7 +1038,7 @@ long xfs_reclaim_inodes_count( struct xfs_mount *mp) { - XA_STATE (xas, &mp->m_perags, 0); + XA_STATE (xas, &mp->m_groups[XG_TYPE_AG].xa, 0); long reclaimable = 0; struct xfs_perag *pag; @@ -1499,7 +1481,7 @@ xfs_blockgc_worker( { struct xfs_perag *pag = container_of(to_delayed_work(work), struct xfs_perag, pag_blockgc_work); - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); int error; trace_xfs_blockgc_worker(mp, __return_address); @@ -1507,7 +1489,7 @@ xfs_blockgc_worker( error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL); if (error) xfs_info(mp, "AG %u preallocation gc worker failed, err=%d", - pag->pag_agno, error); + pag_agno(pag), error); xfs_blockgc_queue(pag); } @@ -1548,8 +1530,7 @@ xfs_blockgc_flush_all( * queued, it will not be requeued. Then flush whatever is left. */ while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG))) - mod_delayed_work(pag->pag_mount->m_blockgc_wq, - &pag->pag_blockgc_work, 0); + mod_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work, 0); while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG))) flush_delayed_work(&pag->pag_blockgc_work); @@ -1688,7 +1669,7 @@ xfs_icwalk_ag( enum xfs_icwalk_goal goal, struct xfs_icwalk *icw) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); uint32_t first_index; int last_error = 0; int skipped; @@ -1741,7 +1722,7 @@ xfs_icwalk_ag( * us to see this inode, so another lookup from the * same index will not find it again. */ - if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) + if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag_agno(pag)) continue; first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 5529ff39b64001..693770f9bb09f0 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1514,7 +1514,7 @@ xfs_iunlink_reload_next( xfs_agino_t next_agino) { struct xfs_perag *pag = agibp->b_pag; - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_inode *next_ip = NULL; int error; @@ -1529,7 +1529,7 @@ xfs_iunlink_reload_next( xfs_info_ratelimited(mp, "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating recovery.", - next_agino, pag->pag_agno); + next_agino, pag_agno(pag)); /* * Use an untrusted lookup just to be cautious in case the AGI has been @@ -1572,7 +1572,7 @@ xfs_ifree_mark_inode_stale( struct xfs_inode *free_ip, xfs_ino_t inum) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_inode_log_item *iip; struct xfs_inode *ip; diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c index a89ae2aef7c445..ec2d56f1840fc6 100644 --- a/fs/xfs/xfs_iwalk.c +++ b/fs/xfs/xfs_iwalk.c @@ -188,7 +188,7 @@ xfs_iwalk_ag_recs( return 0; if (iwag->inobt_walk_fn) { - error = iwag->inobt_walk_fn(mp, tp, pag->pag_agno, irec, + error = iwag->inobt_walk_fn(mp, tp, pag_agno(pag), irec, iwag->data); if (error) return error; @@ -405,7 +405,7 @@ xfs_iwalk_ag( int error = 0; /* Set up our cursor at the right place in the inode btree. */ - ASSERT(pag->pag_agno == XFS_INO_TO_AGNO(mp, iwag->startino)); + ASSERT(pag_agno(pag) == XFS_INO_TO_AGNO(mp, iwag->startino)); agino = XFS_INO_TO_AGINO(mp, iwag->startino); error = xfs_iwalk_ag_start(iwag, agino, &cur, &agi_bp, &has_more); @@ -677,7 +677,7 @@ xfs_iwalk_threaded( iwag->sz_recs = xfs_iwalk_prefetch(inode_records); iwag->lastino = NULLFSINO; xfs_pwork_queue(&pctl, &iwag->pwork); - startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0); + startino = XFS_AGINO_TO_INO(mp, pag_agno(pag) + 1, 0); if (flags & XFS_IWALK_SAME_AG) break; } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 56772bbd38cdea..a285d2d1f68c15 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2677,7 +2677,7 @@ xlog_recover_clear_agi_bucket( struct xfs_perag *pag, int bucket) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_trans *tp; struct xfs_agi *agi; struct xfs_buf *agibp; @@ -2708,7 +2708,7 @@ xlog_recover_clear_agi_bucket( xfs_trans_cancel(tp); out_error: xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, - pag->pag_agno); + pag_agno(pag)); return; } @@ -2718,7 +2718,7 @@ xlog_recover_iunlink_bucket( struct xfs_agi *agi, int bucket) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = pag_mount(pag); struct xfs_inode *prev_ip = NULL; struct xfs_inode *ip; xfs_agino_t prev_agino, agino; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 96496f39f551ae..530d7f025506ce 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -71,6 +71,10 @@ struct xfs_inodegc { unsigned int cpu; }; +struct xfs_groups { + struct xarray xa; +}; + /* * The struct xfsmount layout is optimised to separate read-mostly variables * from variables that are frequently modified. We put the read-mostly variables @@ -208,7 +212,7 @@ typedef struct xfs_mount { */ atomic64_t m_allocbt_blks; - struct xarray m_perags; /* per-ag accounting info */ + struct xfs_groups m_groups[XG_TYPE_MAX]; uint64_t m_resblks; /* total reserved blocks */ uint64_t m_resblks_avail;/* available reserved blocks */ uint64_t m_resblks_save; /* reserved blks @ remount,ro */ diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index 27398512b179b2..29f101005f3eda 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c @@ -244,7 +244,7 @@ xfs_refcount_update_diff_items( struct xfs_refcount_intent *ra = ci_entry(a); struct xfs_refcount_intent *rb = ci_entry(b); - return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno; + return pag_agno(ra->ri_pag) - pag_agno(rb->ri_pag); } /* Log refcount updates in the intent item. */ diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 5bf6682e701b5a..2e82b5b6ed52d2 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -144,7 +144,7 @@ xfs_reflink_find_shared( if (error) return error; - cur = xfs_refcountbt_init_cursor(pag->pag_mount, tp, agbp, pag); + cur = xfs_refcountbt_init_cursor(pag_mount(pag), tp, agbp, pag); error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen, find_end_of_shared); diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c index 88b5580e1e19f5..1b83d09351f028 100644 --- a/fs/xfs/xfs_rmap_item.c +++ b/fs/xfs/xfs_rmap_item.c @@ -243,7 +243,7 @@ xfs_rmap_update_diff_items( struct xfs_rmap_intent *ra = ri_entry(a); struct xfs_rmap_intent *rb = ri_entry(b); - return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno; + return pag_agno(ra->ri_pag) - pag_agno(rb->ri_pag); } /* Log rmap updates in the intent item. */ diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index fbb3a1594c0dcc..457c2d70968d9a 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -238,7 +238,7 @@ xfs_set_inode_alloc_perag( xfs_ino_t ino, xfs_agnumber_t max_metadata) { - if (!xfs_is_inode32(pag->pag_mount)) { + if (!xfs_is_inode32(pag_mount(pag))) { set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); return false; @@ -251,7 +251,7 @@ xfs_set_inode_alloc_perag( } set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); - if (pag->pag_agno < max_metadata) + if (pag_agno(pag) < max_metadata) set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); else clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); @@ -2011,17 +2011,20 @@ static const struct fs_context_operations xfs_context_ops = { * mount option parsing having already been performed as this can be called from * fsopen() before any parameters have been set. */ -static int xfs_init_fs_context( +static int +xfs_init_fs_context( struct fs_context *fc) { struct xfs_mount *mp; + int i; mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL); if (!mp) return -ENOMEM; spin_lock_init(&mp->m_sb_lock); - xa_init(&mp->m_perags); + for (i = 0; i < XG_TYPE_MAX; i++) + xa_init(&mp->m_groups[i].xa); mutex_init(&mp->m_growlock); INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c index 7ef50107224647..17164b2d0472d4 100644 --- a/fs/xfs/xfs_trace.c +++ b/fs/xfs/xfs_trace.c @@ -11,6 +11,7 @@ #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" +#include "xfs_group.h" #include "xfs_defer.h" #include "xfs_da_format.h" #include "xfs_inode.h" diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index dcd0452fc7e438..14e7f6a26a2300 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -72,6 +72,7 @@ struct xfs_btree_cur; struct xfs_defer_op_type; struct xfs_refcount_irec; struct xfs_fsmap; +struct xfs_group; struct xfs_rmap_irec; struct xfs_icreate_log; struct xfs_iunlink_item; @@ -192,10 +193,11 @@ DECLARE_EVENT_CLASS(xfs_perag_class, __field(unsigned long, caller_ip) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; - __entry->refcount = atomic_read(&pag->pag_ref); - __entry->active_refcount = atomic_read(&pag->pag_active_ref); + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); + __entry->refcount = atomic_read(&pag->pag_group.xg_ref); + __entry->active_refcount = + atomic_read(&pag->pag_group.xg_active_ref); __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d agno 0x%x passive refs %d active refs %d caller %pS", @@ -210,16 +212,51 @@ DECLARE_EVENT_CLASS(xfs_perag_class, DEFINE_EVENT(xfs_perag_class, name, \ TP_PROTO(const struct xfs_perag *pag, unsigned long caller_ip), \ TP_ARGS(pag, caller_ip)) -DEFINE_PERAG_REF_EVENT(xfs_perag_get); -DEFINE_PERAG_REF_EVENT(xfs_perag_hold); -DEFINE_PERAG_REF_EVENT(xfs_perag_put); -DEFINE_PERAG_REF_EVENT(xfs_perag_grab); -DEFINE_PERAG_REF_EVENT(xfs_perag_grab_next_tag); -DEFINE_PERAG_REF_EVENT(xfs_perag_rele); DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag); DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag); DEFINE_PERAG_REF_EVENT(xfs_reclaim_inodes_count); +TRACE_DEFINE_ENUM(XG_TYPE_AG); + +DECLARE_EVENT_CLASS(xfs_group_class, + TP_PROTO(struct xfs_group *xg, unsigned long caller_ip), + TP_ARGS(xg, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(enum xfs_group_type, type) + __field(xfs_agnumber_t, agno) + __field(int, refcount) + __field(int, active_refcount) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = xg->xg_mount->m_super->s_dev; + __entry->type = xg->xg_type; + __entry->agno = xg->xg_gno; + __entry->refcount = atomic_read(&xg->xg_ref); + __entry->active_refcount = atomic_read(&xg->xg_active_ref); + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d %sno 0x%x passive refs %d active refs %d caller %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), + __entry->agno, + __entry->refcount, + __entry->active_refcount, + (char *)__entry->caller_ip) +); + +#define DEFINE_GROUP_REF_EVENT(name) \ +DEFINE_EVENT(xfs_group_class, name, \ + TP_PROTO(struct xfs_group *xg, unsigned long caller_ip), \ + TP_ARGS(xg, caller_ip)) +DEFINE_GROUP_REF_EVENT(xfs_group_get); +DEFINE_GROUP_REF_EVENT(xfs_group_hold); +DEFINE_GROUP_REF_EVENT(xfs_group_put); +DEFINE_GROUP_REF_EVENT(xfs_group_grab); +DEFINE_GROUP_REF_EVENT(xfs_group_grab_next_tag); +DEFINE_GROUP_REF_EVENT(xfs_group_rele); + TRACE_EVENT(xfs_inodegc_worker, TP_PROTO(struct xfs_mount *mp, unsigned int shrinker_hits), TP_ARGS(mp, shrinker_hits), @@ -307,8 +344,8 @@ DECLARE_EVENT_CLASS(xfs_ag_class, __field(xfs_agnumber_t, agno) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); ), TP_printk("dev %d:%d agno 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), @@ -672,9 +709,9 @@ DECLARE_EVENT_CLASS(xfs_filestream_class, __field(int, streams) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; + __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->ino = ino; - __entry->agno = pag->pag_agno; + __entry->agno = pag_agno(pag); __entry->streams = atomic_read(&pag->pagf_fstrms); ), TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d", @@ -702,9 +739,9 @@ TRACE_EVENT(xfs_filestream_pick, __field(xfs_extlen_t, free) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; + __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->ino = ino; - __entry->agno = pag->pag_agno; + __entry->agno = pag_agno(pag); __entry->streams = atomic_read(&pag->pagf_fstrms); __entry->free = pag->pagf_freeblks; ), @@ -908,8 +945,8 @@ TRACE_EVENT(xfs_irec_merge_pre, __field(uint16_t, nholemask) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agino = rec->ir_startino; __entry->holemask = rec->ir_holemask; __entry->nagino = nrec->ir_startino; @@ -935,8 +972,8 @@ TRACE_EVENT(xfs_irec_merge_post, __field(uint16_t, holemask) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agino = nrec->ir_startino; __entry->holemask = nrec->ir_holemask; ), @@ -1646,8 +1683,8 @@ DECLARE_EVENT_CLASS(xfs_extent_busy_class, __field(xfs_extlen_t, len) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agbno = agbno; __entry->len = len; ), @@ -1680,8 +1717,8 @@ TRACE_EVENT(xfs_extent_busy_trim, __field(xfs_extlen_t, tlen) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agbno = agbno; __entry->len = len; __entry->tbno = tbno; @@ -1773,8 +1810,8 @@ TRACE_EVENT(xfs_free_extent, __field(int, haveright) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agbno = agbno; __entry->len = len; __entry->resv = resv; @@ -2437,8 +2474,8 @@ DECLARE_EVENT_CLASS(xfs_discard_class, __field(xfs_extlen_t, len) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->agbno = agbno; __entry->len = len; ), @@ -2543,7 +2580,7 @@ TRACE_EVENT(xfs_btree_alloc_block, __entry->ino = cur->bc_ino.ip->i_ino; break; case XFS_BTREE_TYPE_AG: - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->ino = 0; break; case XFS_BTREE_TYPE_MEM: @@ -2799,7 +2836,7 @@ DECLARE_EVENT_CLASS(xfs_rmap_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->agbno = agbno; __entry->len = len; __entry->owner = oinfo->oi_owner; @@ -2844,7 +2881,7 @@ DECLARE_EVENT_CLASS(xfs_btree_error_class, __entry->ino = cur->bc_ino.ip->i_ino; break; case XFS_BTREE_TYPE_AG: - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->ino = 0; break; case XFS_BTREE_TYPE_MEM: @@ -2898,7 +2935,7 @@ TRACE_EVENT(xfs_rmap_convert_state, __entry->ino = cur->bc_ino.ip->i_ino; break; case XFS_BTREE_TYPE_AG: - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->ino = 0; break; case XFS_BTREE_TYPE_MEM: @@ -2933,7 +2970,7 @@ DECLARE_EVENT_CLASS(xfs_rmapbt_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->agbno = agbno; __entry->len = len; __entry->owner = owner; @@ -3106,8 +3143,8 @@ DECLARE_EVENT_CLASS(xfs_ag_resv_class, TP_fast_assign( struct xfs_ag_resv *r = xfs_perag_resv(pag, resv); - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->resv = resv; __entry->freeblks = pag->pagf_freeblks; __entry->flcount = pag->pagf_flcount; @@ -3151,8 +3188,8 @@ TRACE_EVENT(xfs_ag_resv_init_error, __field(unsigned long, caller_ip) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->error = error; __entry->caller_ip = caller_ip; ), @@ -3177,7 +3214,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->agbno = agbno; __entry->len = len; ), @@ -3208,7 +3245,7 @@ TRACE_EVENT(xfs_refcount_lookup, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->agbno = agbno; __entry->dir = dir; ), @@ -3234,7 +3271,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->domain = irec->rc_domain; __entry->startblock = irec->rc_startblock; __entry->blockcount = irec->rc_blockcount; @@ -3270,7 +3307,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->domain = irec->rc_domain; __entry->startblock = irec->rc_startblock; __entry->blockcount = irec->rc_blockcount; @@ -3312,7 +3349,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; __entry->i1_blockcount = i1->rc_blockcount; @@ -3362,7 +3399,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; __entry->i1_blockcount = i1->rc_blockcount; @@ -3417,7 +3454,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; __entry->i1_blockcount = i1->rc_blockcount; @@ -4040,8 +4077,8 @@ TRACE_EVENT(xfs_iunlink_update_bucket, __field(xfs_agino_t, new_ptr) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->bucket = bucket; __entry->old_ptr = old_ptr; __entry->new_ptr = new_ptr; @@ -4065,8 +4102,8 @@ TRACE_EVENT(xfs_iunlink_update_dinode, __field(xfs_agino_t, new_ptr) ), TP_fast_assign( - __entry->dev = iup->pag->pag_mount->m_super->s_dev; - __entry->agno = iup->pag->pag_agno; + __entry->dev = pag_mount(iup->pag)->m_super->s_dev; + __entry->agno = pag_agno(iup->pag); __entry->agino = XFS_INO_TO_AGINO(iup->ip->i_mount, iup->ip->i_ino); __entry->old_ptr = old_ptr; @@ -4187,8 +4224,8 @@ DECLARE_EVENT_CLASS(xfs_ag_corrupt_class, __field(unsigned int, flags) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->flags = flags; ), TP_printk("dev %d:%d agno 0x%x flags 0x%x", @@ -4241,8 +4278,8 @@ TRACE_EVENT(xfs_iwalk_ag_rec, __field(uint64_t, freemask) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->startino = irec->ir_startino; __entry->freemask = irec->ir_free; ), @@ -4304,7 +4341,7 @@ TRACE_EVENT(xfs_btree_commit_afakeroot, TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __assign_str(name); - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->agbno = cur->bc_ag.afake->af_root; __entry->levels = cur->bc_ag.afake->af_levels; __entry->blocks = cur->bc_ag.afake->af_blocks; @@ -4419,7 +4456,7 @@ TRACE_EVENT(xfs_btree_bload_block, __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb); __entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb); } else { - __entry->agno = cur->bc_ag.pag->pag_agno; + __entry->agno = pag_agno(cur->bc_ag.pag); __entry->agbno = be32_to_cpu(ptr->s); } __entry->nr_records = nr_records; @@ -4654,8 +4691,8 @@ DECLARE_EVENT_CLASS(xfs_perag_intents_class, __field(void *, caller_ip) ), TP_fast_assign( - __entry->dev = pag->pag_mount->m_super->s_dev; - __entry->agno = pag->pag_agno; + __entry->dev = pag_mount(pag)->m_super->s_dev; + __entry->agno = pag_agno(pag); __entry->nr_intents = atomic_read(&pag->pag_intents_drain.dr_count); __entry->caller_ip = caller_ip; ), From patchwork Tue Nov 5 22:13:15 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863588 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A7A051F667B for ; Tue, 5 Nov 2024 22:13:15 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844795; cv=none; b=HtkkKeiol0Z0QPIf9AiVmgZQv5LffjeuZY7kxIl19PfJnBxJu7+pobH8Y9hZruYe4XaxJs+wC325UsZRRwV1jDKCtvbUEsNYf72a96FuhDDYOKZuolWJe0SFgvSkemhQqztV6bQ1KiBpuC1xdCSHHzG4bXYnf7/cVHFAR7MLaZ4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844795; c=relaxed/simple; bh=c6SpfCov8L841eto8mrDZDR6uimoB6fGKncbmclUGo8=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=JxJVurMTvm1veA80B6N1o5hnOckuMVqb+soGFqnzgfazRtiBxc1jVGW1jfA9UCSqRWzTi335QxIo3dWxamHTM7Y9Qqb4j3RpdvnfN6DFjC3R47RoNHsaXqinfr66OyR/43gv/fJtG8Ic24yonnxKkT9jp2Ly83Yb4oCBEo8pNy4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=pO6nozOy; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="pO6nozOy" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 81E30C4CECF; Tue, 5 Nov 2024 22:13:15 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844795; bh=c6SpfCov8L841eto8mrDZDR6uimoB6fGKncbmclUGo8=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=pO6nozOy+S1Zs9MoX/FvQyCofn5TCRjZEU6YnQ/qig49C5HSc/38boYOg/EbKK1UC 5luhHOwFOUo4VY/tbb/vt48wDj+HpxOu6AKJBAV2q1cLAr80md004ZDPkmJiypNKT6 jxzpklHdp3psWxhF/EV1w2mOE+Vq13o+iAWS/+wzx1fnEECSiWd3egTKkI9pyn5HFA 40Mblzxl1FqbGFQ2Eo3mak9yZU3Dc6eljlSW+gg8k3kmJVruRpYHZLvBpiufNi4L1g Y0Pc7VvIEo/UxbrhHUdjMKjRKYAZK/gmWUmENS2IRlWJKp7cTSbZNjjdz5x8foQV/0 Wgwarjc+J+O+w== Date: Tue, 05 Nov 2024 14:13:15 -0800 Subject: [PATCH 03/16] xfs: add a xfs_group_next_range helper From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395320.1869491.1072338675356159578.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Add a helper to iterate over iterate over all groups, which can be used as a simple while loop: struct xfs_group *xg = NULL; while ((xg = xfs_group_next_range(mp, xg, 0, MAX_GROUP))) { ... } This will be wrapped by the realtime group code first, and eventually replace the for_each_rtgroup_from and for_each_rtgroup_range helpers. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/libxfs/xfs_group.c | 26 ++++++++++++++++++++++++++ fs/xfs/libxfs/xfs_group.h | 3 +++ 2 files changed, 29 insertions(+) diff --git a/fs/xfs/libxfs/xfs_group.c b/fs/xfs/libxfs/xfs_group.c index edf5907845f003..59e08cfaf9bffd 100644 --- a/fs/xfs/libxfs/xfs_group.c +++ b/fs/xfs/libxfs/xfs_group.c @@ -87,6 +87,32 @@ xfs_group_grab( return xg; } +/* + * Iterate to the next group. To start the iteration at @start_index, a %NULL + * @xg is passed, else the previous group returned from this function. The + * caller should break out of the loop when this returns %NULL. If the caller + * wants to break out of a loop that did not finish it needs to release the + * active reference to @xg using xfs_group_rele() itself. + */ +struct xfs_group * +xfs_group_next_range( + struct xfs_mount *mp, + struct xfs_group *xg, + uint32_t start_index, + uint32_t end_index, + enum xfs_group_type type) +{ + uint32_t index = start_index; + + if (xg) { + index = xg->xg_gno + 1; + xfs_group_rele(xg); + } + if (index > end_index) + return NULL; + return xfs_group_grab(mp, index, type); +} + /* * Find the next group after @xg, or the first group if @xg is NULL. */ diff --git a/fs/xfs/libxfs/xfs_group.h b/fs/xfs/libxfs/xfs_group.h index e3b6be7ff9e802..dd7da90443054b 100644 --- a/fs/xfs/libxfs/xfs_group.h +++ b/fs/xfs/libxfs/xfs_group.h @@ -20,6 +20,9 @@ void xfs_group_put(struct xfs_group *xg); struct xfs_group *xfs_group_grab(struct xfs_mount *mp, uint32_t index, enum xfs_group_type type); +struct xfs_group *xfs_group_next_range(struct xfs_mount *mp, + struct xfs_group *xg, uint32_t start_index, uint32_t end_index, + enum xfs_group_type type); struct xfs_group *xfs_group_grab_next_mark(struct xfs_mount *mp, struct xfs_group *xg, xa_mark_t mark, enum xfs_group_type type); void xfs_group_rele(struct xfs_group *xg); From patchwork Tue Nov 5 22:13:30 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863589 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 512B41F667B for ; Tue, 5 Nov 2024 22:13:31 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844811; cv=none; b=LsiTCO0OVj5SAsTfeQ9L3arT6Jx0WMM3/ykhFVQtsJfo4K49mrmDq4AyD4igb28pYn4k6U5cAOQInaSxAi283fKd4yhaYhboN62wQ08Ca9+uZaxjarlVxJ8SM2XNI2O0WbgwsjfyUWt6LhBvjBeSy5n3OCODQPXXIZS2WJ+kYpQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844811; c=relaxed/simple; bh=mmRwGe50xGwgJAmcVmaMoxcpqnoLS3LcEYPYYvoXzQQ=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=Zy495HV3/HKTfog/4z+xaoWPXVifPI7G1TU/9h81m104tHN1T+TD5Zhdlql1RrPHE4ZZjRQGb2YPplFLogooqfoWPOyIbM3Kdo2ZHWkipEMEXnJZJUIoblbQm3esD31JeLlWAYkaK/tUXWeEqTm6F9p3oeJdJam9rJRmJcr9kwc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=Wp44JjG6; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="Wp44JjG6" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 1F597C4CECF; Tue, 5 Nov 2024 22:13:31 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844811; bh=mmRwGe50xGwgJAmcVmaMoxcpqnoLS3LcEYPYYvoXzQQ=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=Wp44JjG6/hIHpbMXmgePHLH2LjpPAkGa4HS9qg6o1Ys7AiJ0VFTahtobwtTqeKTyk dKdyQFQ2ZgCsnJRfToLL49cKumykHGB9M+qGnevOX5oVheK4EEF1EUkiiLsL9cU44I W8D+W0UqKE16CcXPJ2k11HubY/HKM8pJdQCjuO2J75XuXN45i4/bWaEOmMNc6z6AHh ZY46og/eh3Gv0vMoVNvaUEcCdTfBjh1y+FVbuB1VYsgHkpOb5ntYnQZpG5Wsv302Q6 yWBxpDTMCLcJLEuppps6asf+9zdznI3vks/jw4dSHeAxIjuAceZqGF100vvVDGppMp 0lDzd0x7899Kw== Date: Tue, 05 Nov 2024 14:13:30 -0800 Subject: [PATCH 04/16] xfs: switch perag iteration from the for_each macros to a while based iterator From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395337.1869491.13164219999576618112.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig The current for_each_perag* macros are a bit annoying in that they require the caller to both provide an object and an index iterator, and also somewhat obsfucate the underlying control flow mechanism. Switch to open coded while loops using new xfs_perag_next{,_from,_range} helpers that return the next pag structure to iterate on based on the previous one or NULL for the loop start. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/libxfs/xfs_ag.h | 62 +++++++++++++++++++------------------------ fs/xfs/libxfs/xfs_sb.c | 15 +++------- fs/xfs/libxfs/xfs_types.c | 5 +-- fs/xfs/scrub/bmap.c | 5 +-- fs/xfs/scrub/bmap_repair.c | 5 +-- fs/xfs/scrub/fscounters.c | 10 +++---- fs/xfs/scrub/health.c | 11 +++----- fs/xfs/scrub/inode_repair.c | 5 +-- fs/xfs/xfs_discard.c | 6 ++-- fs/xfs/xfs_extent_busy.c | 5 +-- fs/xfs/xfs_fsmap.c | 7 ++--- fs/xfs/xfs_fsops.c | 10 +++---- fs/xfs/xfs_health.c | 5 +-- fs/xfs/xfs_icache.c | 5 +-- fs/xfs/xfs_iwalk.c | 18 +++++++----- fs/xfs/xfs_log_recover.c | 5 +-- fs/xfs/xfs_reflink.c | 5 +-- 17 files changed, 79 insertions(+), 105 deletions(-) diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h index 69b934ad2c4aad..80969682dc4746 100644 --- a/fs/xfs/libxfs/xfs_ag.h +++ b/fs/xfs/libxfs/xfs_ag.h @@ -208,6 +208,34 @@ xfs_perag_rele( xfs_group_rele(pag_group(pag)); } +static inline struct xfs_perag * +xfs_perag_next_range( + struct xfs_mount *mp, + struct xfs_perag *pag, + xfs_agnumber_t start_agno, + xfs_agnumber_t end_agno) +{ + return to_perag(xfs_group_next_range(mp, pag ? pag_group(pag) : NULL, + start_agno, end_agno, XG_TYPE_AG)); +} + +static inline struct xfs_perag * +xfs_perag_next_from( + struct xfs_mount *mp, + struct xfs_perag *pag, + xfs_agnumber_t start_agno) +{ + return xfs_perag_next_range(mp, pag, start_agno, mp->m_sb.sb_agcount - 1); +} + +static inline struct xfs_perag * +xfs_perag_next( + struct xfs_mount *mp, + struct xfs_perag *pag) +{ + return xfs_perag_next_from(mp, pag, 0); +} + /* * Per-ag geometry infomation and validation */ @@ -273,40 +301,6 @@ xfs_ag_contains_log(struct xfs_mount *mp, xfs_agnumber_t agno) agno == XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart); } -/* - * Perag iteration APIs - */ -static inline struct xfs_perag * -xfs_perag_next( - struct xfs_perag *pag, - xfs_agnumber_t *agno, - xfs_agnumber_t end_agno) -{ - struct xfs_mount *mp = pag_mount(pag); - - *agno = pag_agno(pag) + 1; - xfs_perag_rele(pag); - while (*agno <= end_agno) { - pag = xfs_perag_grab(mp, *agno); - if (pag) - return pag; - (*agno)++; - } - return NULL; -} - -#define for_each_perag_range(mp, agno, end_agno, pag) \ - for ((pag) = xfs_perag_grab((mp), (agno)); \ - (pag) != NULL; \ - (pag) = xfs_perag_next((pag), &(agno), (end_agno))) - -#define for_each_perag_from(mp, agno, pag) \ - for_each_perag_range((mp), (agno), (mp)->m_sb.sb_agcount - 1, (pag)) - -#define for_each_perag(mp, agno, pag) \ - (agno) = 0; \ - for_each_perag_from((mp), (agno), (pag)) - static inline struct xfs_perag * xfs_perag_next_wrap( struct xfs_perag *pag, diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index d2012fbf07aa65..061c8c961d5bc9 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -1109,14 +1109,13 @@ int xfs_update_secondary_sbs( struct xfs_mount *mp) { - struct xfs_perag *pag; - xfs_agnumber_t agno = 1; + struct xfs_perag *pag = NULL; int saved_error = 0; int error = 0; LIST_HEAD (buffer_list); /* update secondary superblocks. */ - for_each_perag_from(mp, agno, pag) { + while ((pag = xfs_perag_next_from(mp, pag, 1))) { struct xfs_buf *bp; error = xfs_buf_get(mp->m_ddev_targp, @@ -1146,7 +1145,7 @@ xfs_update_secondary_sbs( xfs_buf_relse(bp); /* don't hold too many buffers at once */ - if (agno % 16) + if (pag_agno(pag) % 16) continue; error = xfs_buf_delwri_submit(&buffer_list); @@ -1160,12 +1159,8 @@ xfs_update_secondary_sbs( } } error = xfs_buf_delwri_submit(&buffer_list); - if (error) { - xfs_warn(mp, - "write error %d updating a secondary superblock near ag %d", - error, agno); - } - + if (error) + xfs_warn(mp, "error %d writing secondary superblocks", error); return saved_error ? saved_error : error; } diff --git a/fs/xfs/libxfs/xfs_types.c b/fs/xfs/libxfs/xfs_types.c index c299b16c9365fa..c91db4f5140743 100644 --- a/fs/xfs/libxfs/xfs_types.c +++ b/fs/xfs/libxfs/xfs_types.c @@ -170,13 +170,12 @@ xfs_icount_range( unsigned long long *max) { unsigned long long nr_inos = 0; - struct xfs_perag *pag; - xfs_agnumber_t agno; + struct xfs_perag *pag = NULL; /* root, rtbitmap, rtsum all live in the first chunk */ *min = XFS_INODES_PER_CHUNK; - for_each_perag(mp, agno, pag) + while ((pag = xfs_perag_next(mp, pag))) nr_inos += pag->agino_max - pag->agino_min + 1; *max = nr_inos; } diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c index a43912227dd478..fb022b403716b1 100644 --- a/fs/xfs/scrub/bmap.c +++ b/fs/xfs/scrub/bmap.c @@ -760,11 +760,10 @@ xchk_bmap_check_rmaps( struct xfs_scrub *sc, int whichfork) { - struct xfs_perag *pag; - xfs_agnumber_t agno; + struct xfs_perag *pag = NULL; int error; - for_each_perag(sc->mp, agno, pag) { + while ((pag = xfs_perag_next(sc->mp, pag))) { error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag); if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) { diff --git a/fs/xfs/scrub/bmap_repair.c b/fs/xfs/scrub/bmap_repair.c index dc8fdd2da174ed..be408e50484b54 100644 --- a/fs/xfs/scrub/bmap_repair.c +++ b/fs/xfs/scrub/bmap_repair.c @@ -407,12 +407,11 @@ xrep_bmap_find_mappings( struct xrep_bmap *rb) { struct xfs_scrub *sc = rb->sc; - struct xfs_perag *pag; - xfs_agnumber_t agno; + struct xfs_perag *pag = NULL; int error = 0; /* Iterate the rmaps for extents. */ - for_each_perag(sc->mp, agno, pag) { + while ((pag = xfs_perag_next(sc->mp, pag))) { error = xrep_bmap_scan_ag(rb, pag); if (error) { xfs_perag_rele(pag); diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c index 1d3e98346933e1..28db0c83819c20 100644 --- a/fs/xfs/scrub/fscounters.c +++ b/fs/xfs/scrub/fscounters.c @@ -74,10 +74,9 @@ xchk_fscount_warmup( struct xfs_buf *agi_bp = NULL; struct xfs_buf *agf_bp = NULL; struct xfs_perag *pag = NULL; - xfs_agnumber_t agno; int error = 0; - for_each_perag(mp, agno, pag) { + while ((pag = xfs_perag_next(mp, pag))) { if (xchk_should_terminate(sc, &error)) break; if (xfs_perag_initialised_agi(pag) && @@ -295,9 +294,8 @@ xchk_fscount_aggregate_agcounts( struct xchk_fscounters *fsc) { struct xfs_mount *mp = sc->mp; - struct xfs_perag *pag; + struct xfs_perag *pag = NULL; uint64_t delayed; - xfs_agnumber_t agno; int tries = 8; int error = 0; @@ -306,7 +304,7 @@ xchk_fscount_aggregate_agcounts( fsc->ifree = 0; fsc->fdblocks = 0; - for_each_perag(mp, agno, pag) { + while ((pag = xfs_perag_next(mp, pag))) { if (xchk_should_terminate(sc, &error)) break; @@ -327,7 +325,7 @@ xchk_fscount_aggregate_agcounts( if (xfs_has_lazysbcount(sc->mp)) { fsc->fdblocks += pag->pagf_btreeblks; } else { - error = xchk_fscount_btreeblks(sc, fsc, agno); + error = xchk_fscount_btreeblks(sc, fsc, pag_agno(pag)); if (error) break; } diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c index b712a8bd34f543..112dd05e5551d3 100644 --- a/fs/xfs/scrub/health.c +++ b/fs/xfs/scrub/health.c @@ -160,12 +160,11 @@ STATIC void xchk_mark_all_healthy( struct xfs_mount *mp) { - struct xfs_perag *pag; - xfs_agnumber_t agno; + struct xfs_perag *pag = NULL; xfs_fs_mark_healthy(mp, XFS_SICK_FS_INDIRECT); xfs_rt_mark_healthy(mp, XFS_SICK_RT_INDIRECT); - for_each_perag(mp, agno, pag) + while ((pag = xfs_perag_next(mp, pag))) xfs_ag_mark_healthy(pag, XFS_SICK_AG_INDIRECT); } @@ -294,9 +293,7 @@ xchk_health_record( struct xfs_scrub *sc) { struct xfs_mount *mp = sc->mp; - struct xfs_perag *pag; - xfs_agnumber_t agno; - + struct xfs_perag *pag = NULL; unsigned int sick; unsigned int checked; @@ -308,7 +305,7 @@ xchk_health_record( if (sick & XFS_SICK_RT_PRIMARY) xchk_set_corrupt(sc); - for_each_perag(mp, agno, pag) { + while ((pag = xfs_perag_next(mp, pag))) { xfs_ag_measure_sickness(pag, &sick, &checked); if (sick & XFS_SICK_AG_PRIMARY) xchk_set_corrupt(sc); diff --git a/fs/xfs/scrub/inode_repair.c b/fs/xfs/scrub/inode_repair.c index 3e45b9b72312ab..5da9e1a387a8bb 100644 --- a/fs/xfs/scrub/inode_repair.c +++ b/fs/xfs/scrub/inode_repair.c @@ -761,14 +761,13 @@ STATIC int xrep_dinode_count_rmaps( struct xrep_inode *ri) { - struct xfs_perag *pag; - xfs_agnumber_t agno; + struct xfs_perag *pag = NULL; int error; if (!xfs_has_rmapbt(ri->sc->mp) || xfs_has_realtime(ri->sc->mp)) return -EOPNOTSUPP; - for_each_perag(ri->sc->mp, agno, pag) { + while ((pag = xfs_perag_next(ri->sc->mp, pag))) { error = xrep_dinode_count_ag_rmaps(ri, pag); if (error) { xfs_perag_rele(pag); diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index dfd6edcebb6ea4..739ec69c44281c 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -387,8 +387,8 @@ xfs_trim_datadev_extents( { xfs_agnumber_t start_agno, end_agno; xfs_agblock_t start_agbno, end_agbno; + struct xfs_perag *pag = NULL; xfs_daddr_t ddev_end; - struct xfs_perag *pag; int last_error = 0, error; ddev_end = min_t(xfs_daddr_t, end, @@ -399,10 +399,10 @@ xfs_trim_datadev_extents( end_agno = xfs_daddr_to_agno(mp, ddev_end); end_agbno = xfs_daddr_to_agbno(mp, ddev_end); - for_each_perag_range(mp, start_agno, end_agno, pag) { + while ((pag = xfs_perag_next_range(mp, pag, start_agno, end_agno))) { xfs_agblock_t agend = pag->block_count; - if (start_agno == end_agno) + if (pag_agno(pag) == end_agno) agend = end_agbno; error = xfs_trim_perag_extents(pag, start_agbno, agend, minlen); if (error) diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c index 79b0f833c511e3..3d5a57d7ac5e14 100644 --- a/fs/xfs/xfs_extent_busy.c +++ b/fs/xfs/xfs_extent_busy.c @@ -629,11 +629,10 @@ void xfs_extent_busy_wait_all( struct xfs_mount *mp) { - struct xfs_perag *pag; + struct xfs_perag *pag = NULL; DEFINE_WAIT (wait); - xfs_agnumber_t agno; - for_each_perag(mp, agno, pag) { + while ((pag = xfs_perag_next(mp, pag))) { do { prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE); if (RB_EMPTY_ROOT(&pag->pagb_tree)) diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index 918e1c38a15592..a26fb054346b68 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c @@ -460,11 +460,11 @@ __xfs_getfsmap_datadev( void *priv) { struct xfs_mount *mp = tp->t_mountp; - struct xfs_perag *pag; + struct xfs_perag *pag = NULL; struct xfs_btree_cur *bt_cur = NULL; xfs_fsblock_t start_fsb; xfs_fsblock_t end_fsb; - xfs_agnumber_t start_ag, end_ag, ag; + xfs_agnumber_t start_ag, end_ag; uint64_t eofs; int error = 0; @@ -512,8 +512,7 @@ __xfs_getfsmap_datadev( start_ag = XFS_FSB_TO_AGNO(mp, start_fsb); end_ag = XFS_FSB_TO_AGNO(mp, end_fsb); - ag = start_ag; - for_each_perag_range(mp, ag, end_ag, pag) { + while ((pag = xfs_perag_next_range(mp, pag, start_ag, end_ag))) { /* * Set the AG high key from the fsmap high key if this * is the last AG that we're querying. diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index b247d895c276d2..82812a458cf10f 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -528,13 +528,12 @@ int xfs_fs_reserve_ag_blocks( struct xfs_mount *mp) { - xfs_agnumber_t agno; - struct xfs_perag *pag; + struct xfs_perag *pag = NULL; int error = 0; int err2; mp->m_finobt_nores = false; - for_each_perag(mp, agno, pag) { + while ((pag = xfs_perag_next(mp, pag))) { err2 = xfs_ag_resv_init(pag, NULL); if (err2 && !error) error = err2; @@ -556,9 +555,8 @@ void xfs_fs_unreserve_ag_blocks( struct xfs_mount *mp) { - xfs_agnumber_t agno; - struct xfs_perag *pag; + struct xfs_perag *pag = NULL; - for_each_perag(mp, agno, pag) + while ((pag = xfs_perag_next(mp, pag))) xfs_ag_resv_free(pag); } diff --git a/fs/xfs/xfs_health.c b/fs/xfs/xfs_health.c index d6492128582a3e..ff5aca875ab0d0 100644 --- a/fs/xfs/xfs_health.c +++ b/fs/xfs/xfs_health.c @@ -28,8 +28,7 @@ void xfs_health_unmount( struct xfs_mount *mp) { - struct xfs_perag *pag; - xfs_agnumber_t agno; + struct xfs_perag *pag = NULL; unsigned int sick = 0; unsigned int checked = 0; bool warn = false; @@ -38,7 +37,7 @@ xfs_health_unmount( return; /* Measure AG corruption levels. */ - for_each_perag(mp, agno, pag) { + while ((pag = xfs_perag_next(mp, pag))) { xfs_ag_measure_sickness(pag, &sick, &checked); if (sick) { trace_xfs_ag_unfixed_corruption(pag, sick); diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 0a930fc116f575..383c245482027b 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -1383,13 +1383,12 @@ void xfs_blockgc_stop( struct xfs_mount *mp) { - struct xfs_perag *pag; - xfs_agnumber_t agno; + struct xfs_perag *pag = NULL; if (!xfs_clear_blockgc_enabled(mp)) return; - for_each_perag(mp, agno, pag) + while ((pag = xfs_perag_next(mp, pag))) cancel_delayed_work_sync(&pag->pag_blockgc_work); trace_xfs_blockgc_stop(mp, __return_address); } diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c index ec2d56f1840fc6..7db3ece370b100 100644 --- a/fs/xfs/xfs_iwalk.c +++ b/fs/xfs/xfs_iwalk.c @@ -540,23 +540,25 @@ xfs_iwalk_args( unsigned int flags) { struct xfs_mount *mp = iwag->mp; - xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, iwag->startino); + xfs_agnumber_t start_agno; int error; - ASSERT(agno < mp->m_sb.sb_agcount); + start_agno = XFS_INO_TO_AGNO(iwag->mp, iwag->startino); + ASSERT(start_agno < iwag->mp->m_sb.sb_agcount); ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL)); error = xfs_iwalk_alloc(iwag); if (error) return error; - for_each_perag_from(mp, agno, iwag->pag) { + while ((iwag->pag = xfs_perag_next_from(mp, iwag->pag, start_agno))) { error = xfs_iwalk_ag(iwag); if (error || (flags & XFS_IWALK_SAME_AG)) { xfs_perag_rele(iwag->pag); break; } - iwag->startino = XFS_AGINO_TO_INO(mp, agno + 1, 0); + iwag->startino = + XFS_AGINO_TO_INO(mp, pag_agno(iwag->pag) + 1, 0); } xfs_iwalk_free(iwag); @@ -644,19 +646,19 @@ xfs_iwalk_threaded( bool polled, void *data) { + xfs_agnumber_t start_agno = XFS_INO_TO_AGNO(mp, startino); struct xfs_pwork_ctl pctl; - struct xfs_perag *pag; - xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino); + struct xfs_perag *pag = NULL; int error; - ASSERT(agno < mp->m_sb.sb_agcount); + ASSERT(start_agno < mp->m_sb.sb_agcount); ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL)); error = xfs_pwork_init(mp, &pctl, xfs_iwalk_ag_work, "xfs_iwalk"); if (error) return error; - for_each_perag_from(mp, agno, pag) { + while ((pag = xfs_perag_next_from(mp, pag, start_agno))) { struct xfs_iwalk_ag *iwag; if (xfs_pwork_ctl_want_abort(&pctl)) diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index a285d2d1f68c15..55e412a821483e 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2845,10 +2845,9 @@ static void xlog_recover_process_iunlinks( struct xlog *log) { - struct xfs_perag *pag; - xfs_agnumber_t agno; + struct xfs_perag *pag = NULL; - for_each_perag(log->l_mp, agno, pag) + while ((pag = xfs_perag_next(log->l_mp, pag))) xlog_recover_iunlink_ag(pag); } diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 2e82b5b6ed52d2..b11769c009effc 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -894,14 +894,13 @@ int xfs_reflink_recover_cow( struct xfs_mount *mp) { - struct xfs_perag *pag; - xfs_agnumber_t agno; + struct xfs_perag *pag = NULL; int error = 0; if (!xfs_has_reflink(mp)) return 0; - for_each_perag(mp, agno, pag) { + while ((pag = xfs_perag_next(mp, pag))) { error = xfs_refcount_recover_cow_leftovers(mp, pag); if (error) { xfs_perag_rele(pag); From patchwork Tue Nov 5 22:13:46 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863590 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3A3C01F667B for ; Tue, 5 Nov 2024 22:13:46 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844827; cv=none; b=tTaG5hsYew1MoG2CC46zuyEouRD5GdzmKRPzXVQ1xcvk+Q9TZx4DgHA/DpGt5loy/8gE8UMwCf1U+mAaCLBudqtN2F/zkLDCYEeiYCs+DgFQZ1QVgv0VvwOh6Zmgz/D00IjDAHhQqbWFY3ViApT00mzwBISxD0++kNG6JXAz088= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844827; c=relaxed/simple; bh=v8ChzMYg1JFCEMzpJx+9c20M70FomBXzKsNleFygSmM=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=r77bCJL5xe7ySpCuoJ96MV3DULKZ4hyIsWjKDZ7i6+nw1byazXgBbyLKp+lh+AhdzvW12+QSasHz+5++1lZ12+uRpDjNCYM0S5V55gUnbQp/5XtBgocYtfW1rfaWekQgrKb3TUM4reK3r66NRVAaycFfpia/+IdwuQxGL5xLRY0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=nMlFFyDm; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="nMlFFyDm" Received: by smtp.kernel.org (Postfix) with ESMTPSA id B5894C4CECF; Tue, 5 Nov 2024 22:13:46 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844826; bh=v8ChzMYg1JFCEMzpJx+9c20M70FomBXzKsNleFygSmM=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=nMlFFyDmFxUziZEXl6/2l96TIqmRLqTd4JyYDxIcebOldVjXhBSdcEFjiS9PHecKZ f44Lw5EmU3xaAjeB+vOpJiFa9UZyq60CXTY1OTw8r33cBGLHBrV2s2e3xShvBAE6yR CDJS69jP3neX6MitzfAr9BNk3271nvjpUtX+XsWt3LSr0HsCSzWiuSJAb1m3g/BzIs Ttyq4a2TDBt/DqS8cFfMfw4KlPKlp8zzyihh7cmdsEnm4b53JpZvQ68mSuYIDy+WiR jq3pfch3GJP+grAroKuB0VX9jBuSYUbZ0o3irp1VS+pfU0siYbfOH1vTouTxKAa8p1 jFMgjBT/fLCcA== Date: Tue, 05 Nov 2024 14:13:46 -0800 Subject: [PATCH 05/16] xfs: move metadata health tracking to the generic group structure From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395355.1869491.14328715471299546203.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Prepare for also tracking the health status of the upcoming realtime groups by moving the health tracking code to the generic xfs_group structure. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/libxfs/xfs_ag.c | 1 - fs/xfs/libxfs/xfs_ag.h | 9 ------ fs/xfs/libxfs/xfs_group.c | 4 ++ fs/xfs/libxfs/xfs_group.h | 12 +++++++ fs/xfs/libxfs/xfs_health.h | 45 +++++++++++----------------- fs/xfs/scrub/health.c | 8 ++--- fs/xfs/xfs_health.c | 71 ++++++++++++++++++++++++-------------------- fs/xfs/xfs_trace.h | 35 ++++++++++++---------- 8 files changed, 94 insertions(+), 91 deletions(-) diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index 9ea20e9cf0d4e5..84bd3831297e07 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -232,7 +232,6 @@ xfs_perag_alloc( /* Place kernel structure only init below this point. */ spin_lock_init(&pag->pag_ici_lock); spin_lock_init(&pag->pagb_lock); - spin_lock_init(&pag->pag_state_lock); INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker); INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); xfs_defer_drain_init(&pag->pag_intents_drain); diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h index 80969682dc4746..8271cb72c88387 100644 --- a/fs/xfs/libxfs/xfs_ag.h +++ b/fs/xfs/libxfs/xfs_ag.h @@ -69,13 +69,6 @@ struct xfs_perag { #ifdef __KERNEL__ /* -- kernel only structures below this line -- */ - /* - * Bitsets of per-ag metadata that have been checked and/or are sick. - * Callers should hold pag_state_lock before accessing this field. - */ - uint16_t pag_checked; - uint16_t pag_sick; - #ifdef CONFIG_XFS_ONLINE_REPAIR /* * Alternate btree heights so that online repair won't trip the write @@ -87,8 +80,6 @@ struct xfs_perag { uint8_t pagf_repair_rmap_level; #endif - spinlock_t pag_state_lock; - spinlock_t pagb_lock; /* lock for pagb_tree */ struct rb_root pagb_tree; /* ordered tree of busy extents */ unsigned int pagb_gen; /* generation count for pagb_tree */ diff --git a/fs/xfs/libxfs/xfs_group.c b/fs/xfs/libxfs/xfs_group.c index 59e08cfaf9bffd..927e72c0882b88 100644 --- a/fs/xfs/libxfs/xfs_group.c +++ b/fs/xfs/libxfs/xfs_group.c @@ -182,6 +182,10 @@ xfs_group_insert( xg->xg_gno = index; xg->xg_type = type; +#ifdef __KERNEL__ + spin_lock_init(&xg->xg_state_lock); +#endif + /* Active ref owned by mount indicates group is online. */ atomic_set(&xg->xg_active_ref, 1); diff --git a/fs/xfs/libxfs/xfs_group.h b/fs/xfs/libxfs/xfs_group.h index dd7da90443054b..d2c61dd1f43e44 100644 --- a/fs/xfs/libxfs/xfs_group.h +++ b/fs/xfs/libxfs/xfs_group.h @@ -11,6 +11,18 @@ struct xfs_group { enum xfs_group_type xg_type; atomic_t xg_ref; /* passive reference count */ atomic_t xg_active_ref; /* active reference count */ + +#ifdef __KERNEL__ + /* -- kernel only structures below this line -- */ + + /* + * Bitsets of per-ag metadata that have been checked and/or are sick. + * Callers should hold xg_state_lock before accessing this field. + */ + uint16_t xg_checked; + uint16_t xg_sick; + spinlock_t xg_state_lock; +#endif /* __KERNEL__ */ }; struct xfs_group *xfs_group_get(struct xfs_mount *mp, uint32_t index, diff --git a/fs/xfs/libxfs/xfs_health.h b/fs/xfs/libxfs/xfs_health.h index b0edb4288e5929..13301420a2f670 100644 --- a/fs/xfs/libxfs/xfs_health.h +++ b/fs/xfs/libxfs/xfs_health.h @@ -6,6 +6,8 @@ #ifndef __XFS_HEALTH_H__ #define __XFS_HEALTH_H__ +struct xfs_group; + /* * In-Core Filesystem Health Assessments * ===================================== @@ -197,10 +199,12 @@ void xfs_rt_measure_sickness(struct xfs_mount *mp, unsigned int *sick, void xfs_agno_mark_sick(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int mask); -void xfs_ag_mark_sick(struct xfs_perag *pag, unsigned int mask); -void xfs_ag_mark_corrupt(struct xfs_perag *pag, unsigned int mask); -void xfs_ag_mark_healthy(struct xfs_perag *pag, unsigned int mask); -void xfs_ag_measure_sickness(struct xfs_perag *pag, unsigned int *sick, +void xfs_group_mark_sick(struct xfs_group *xg, unsigned int mask); +#define xfs_ag_mark_sick(pag, mask) \ + xfs_group_mark_sick(pag_group(pag), (mask)) +void xfs_group_mark_corrupt(struct xfs_group *xg, unsigned int mask); +void xfs_group_mark_healthy(struct xfs_group *xg, unsigned int mask); +void xfs_group_measure_sickness(struct xfs_group *xg, unsigned int *sick, unsigned int *checked); void xfs_inode_mark_sick(struct xfs_inode *ip, unsigned int mask); @@ -227,22 +231,19 @@ xfs_fs_has_sickness(struct xfs_mount *mp, unsigned int mask) } static inline bool -xfs_rt_has_sickness(struct xfs_mount *mp, unsigned int mask) +xfs_group_has_sickness( + struct xfs_group *xg, + unsigned int mask) { - unsigned int sick, checked; + unsigned int sick, checked; - xfs_rt_measure_sickness(mp, &sick, &checked); - return sick & mask; -} - -static inline bool -xfs_ag_has_sickness(struct xfs_perag *pag, unsigned int mask) -{ - unsigned int sick, checked; - - xfs_ag_measure_sickness(pag, &sick, &checked); + xfs_group_measure_sickness(xg, &sick, &checked); return sick & mask; } +#define xfs_ag_has_sickness(pag, mask) \ + xfs_group_has_sickness(pag_group(pag), (mask)) +#define xfs_ag_is_healthy(pag) \ + (!xfs_ag_has_sickness((pag), UINT_MAX)) static inline bool xfs_inode_has_sickness(struct xfs_inode *ip, unsigned int mask) @@ -259,18 +260,6 @@ xfs_fs_is_healthy(struct xfs_mount *mp) return !xfs_fs_has_sickness(mp, -1U); } -static inline bool -xfs_rt_is_healthy(struct xfs_mount *mp) -{ - return !xfs_rt_has_sickness(mp, -1U); -} - -static inline bool -xfs_ag_is_healthy(struct xfs_perag *pag) -{ - return !xfs_ag_has_sickness(pag, -1U); -} - static inline bool xfs_inode_is_healthy(struct xfs_inode *ip) { diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c index 112dd05e5551d3..fce04444c37c2a 100644 --- a/fs/xfs/scrub/health.c +++ b/fs/xfs/scrub/health.c @@ -165,7 +165,7 @@ xchk_mark_all_healthy( xfs_fs_mark_healthy(mp, XFS_SICK_FS_INDIRECT); xfs_rt_mark_healthy(mp, XFS_SICK_RT_INDIRECT); while ((pag = xfs_perag_next(mp, pag))) - xfs_ag_mark_healthy(pag, XFS_SICK_AG_INDIRECT); + xfs_group_mark_healthy(pag_group(pag), XFS_SICK_AG_INDIRECT); } /* @@ -206,9 +206,9 @@ xchk_update_health( case XHG_AG: pag = xfs_perag_get(sc->mp, sc->sm->sm_agno); if (bad) - xfs_ag_mark_corrupt(pag, sc->sick_mask); + xfs_group_mark_corrupt(pag_group(pag), sc->sick_mask); else - xfs_ag_mark_healthy(pag, sc->sick_mask); + xfs_group_mark_healthy(pag_group(pag), sc->sick_mask); xfs_perag_put(pag); break; case XHG_INO: @@ -306,7 +306,7 @@ xchk_health_record( xchk_set_corrupt(sc); while ((pag = xfs_perag_next(mp, pag))) { - xfs_ag_measure_sickness(pag, &sick, &checked); + xfs_group_measure_sickness(pag_group(pag), &sick, &checked); if (sick & XFS_SICK_AG_PRIMARY) xchk_set_corrupt(sc); } diff --git a/fs/xfs/xfs_health.c b/fs/xfs/xfs_health.c index ff5aca875ab0d0..732246f46680d5 100644 --- a/fs/xfs/xfs_health.c +++ b/fs/xfs/xfs_health.c @@ -38,9 +38,10 @@ xfs_health_unmount( /* Measure AG corruption levels. */ while ((pag = xfs_perag_next(mp, pag))) { - xfs_ag_measure_sickness(pag, &sick, &checked); + xfs_group_measure_sickness(pag_group(pag), &sick, &checked); if (sick) { - trace_xfs_ag_unfixed_corruption(pag, sick); + trace_xfs_group_unfixed_corruption(pag_group(pag), + sick); warn = true; } } @@ -227,61 +228,65 @@ xfs_agno_mark_sick( /* Mark unhealthy per-ag metadata. */ void -xfs_ag_mark_sick( - struct xfs_perag *pag, +xfs_group_mark_sick( + struct xfs_group *xg, unsigned int mask) { ASSERT(!(mask & ~XFS_SICK_AG_ALL)); - trace_xfs_ag_mark_sick(pag, mask); + trace_xfs_group_mark_sick(xg, mask); - spin_lock(&pag->pag_state_lock); - pag->pag_sick |= mask; - spin_unlock(&pag->pag_state_lock); + spin_lock(&xg->xg_state_lock); + xg->xg_sick |= mask; + spin_unlock(&xg->xg_state_lock); } -/* Mark per-ag metadata as having been checked and found unhealthy by fsck. */ +/* + * Mark per-group metadata as having been checked and found unhealthy by fsck. + */ void -xfs_ag_mark_corrupt( - struct xfs_perag *pag, +xfs_group_mark_corrupt( + struct xfs_group *xg, unsigned int mask) { ASSERT(!(mask & ~XFS_SICK_AG_ALL)); - trace_xfs_ag_mark_corrupt(pag, mask); + trace_xfs_group_mark_corrupt(xg, mask); - spin_lock(&pag->pag_state_lock); - pag->pag_sick |= mask; - pag->pag_checked |= mask; - spin_unlock(&pag->pag_state_lock); + spin_lock(&xg->xg_state_lock); + xg->xg_sick |= mask; + xg->xg_checked |= mask; + spin_unlock(&xg->xg_state_lock); } -/* Mark per-ag metadata ok. */ +/* + * Mark per-group metadata ok. + */ void -xfs_ag_mark_healthy( - struct xfs_perag *pag, +xfs_group_mark_healthy( + struct xfs_group *xg, unsigned int mask) { ASSERT(!(mask & ~XFS_SICK_AG_ALL)); - trace_xfs_ag_mark_healthy(pag, mask); + trace_xfs_group_mark_healthy(xg, mask); - spin_lock(&pag->pag_state_lock); - pag->pag_sick &= ~mask; - if (!(pag->pag_sick & XFS_SICK_AG_PRIMARY)) - pag->pag_sick &= ~XFS_SICK_AG_SECONDARY; - pag->pag_checked |= mask; - spin_unlock(&pag->pag_state_lock); + spin_lock(&xg->xg_state_lock); + xg->xg_sick &= ~mask; + if (!(xg->xg_sick & XFS_SICK_AG_PRIMARY)) + xg->xg_sick &= ~XFS_SICK_AG_SECONDARY; + xg->xg_checked |= mask; + spin_unlock(&xg->xg_state_lock); } /* Sample which per-ag metadata are unhealthy. */ void -xfs_ag_measure_sickness( - struct xfs_perag *pag, +xfs_group_measure_sickness( + struct xfs_group *xg, unsigned int *sick, unsigned int *checked) { - spin_lock(&pag->pag_state_lock); - *sick = pag->pag_sick; - *checked = pag->pag_checked; - spin_unlock(&pag->pag_state_lock); + spin_lock(&xg->xg_state_lock); + *sick = xg->xg_sick; + *checked = xg->xg_checked; + spin_unlock(&xg->xg_state_lock); } /* Mark the unhealthy parts of an inode. */ @@ -447,7 +452,7 @@ xfs_ag_geom_health( ageo->ag_sick = 0; ageo->ag_checked = 0; - xfs_ag_measure_sickness(pag, &sick, &checked); + xfs_group_measure_sickness(pag_group(pag), &sick, &checked); for (m = ag_map; m->sick_mask; m++) { if (checked & m->sick_mask) ageo->ag_checked |= m->ioctl_mask; diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 14e7f6a26a2300..fd597a410b0298 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -4215,31 +4215,34 @@ DEFINE_FS_CORRUPT_EVENT(xfs_rt_mark_corrupt); DEFINE_FS_CORRUPT_EVENT(xfs_rt_mark_healthy); DEFINE_FS_CORRUPT_EVENT(xfs_rt_unfixed_corruption); -DECLARE_EVENT_CLASS(xfs_ag_corrupt_class, - TP_PROTO(const struct xfs_perag *pag, unsigned int flags), - TP_ARGS(pag, flags), +DECLARE_EVENT_CLASS(xfs_group_corrupt_class, + TP_PROTO(const struct xfs_group *xg, unsigned int flags), + TP_ARGS(xg, flags), TP_STRUCT__entry( __field(dev_t, dev) - __field(xfs_agnumber_t, agno) + __field(enum xfs_group_type, type) + __field(uint32_t, index) __field(unsigned int, flags) ), TP_fast_assign( - __entry->dev = pag_mount(pag)->m_super->s_dev; - __entry->agno = pag_agno(pag); + __entry->dev = xg->xg_mount->m_super->s_dev; + __entry->type = xg->xg_type; + __entry->index = xg->xg_gno; __entry->flags = flags; ), - TP_printk("dev %d:%d agno 0x%x flags 0x%x", + TP_printk("dev %d:%d %sno 0x%x flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->agno, __entry->flags) + __print_symbolic(__entry->type, XG_TYPE_STRINGS), + __entry->index, __entry->flags) ); -#define DEFINE_AG_CORRUPT_EVENT(name) \ -DEFINE_EVENT(xfs_ag_corrupt_class, name, \ - TP_PROTO(const struct xfs_perag *pag, unsigned int flags), \ - TP_ARGS(pag, flags)) -DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_sick); -DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_corrupt); -DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_healthy); -DEFINE_AG_CORRUPT_EVENT(xfs_ag_unfixed_corruption); +#define DEFINE_GROUP_CORRUPT_EVENT(name) \ +DEFINE_EVENT(xfs_group_corrupt_class, name, \ + TP_PROTO(const struct xfs_group *xg, unsigned int flags), \ + TP_ARGS(xg, flags)) +DEFINE_GROUP_CORRUPT_EVENT(xfs_group_mark_sick); +DEFINE_GROUP_CORRUPT_EVENT(xfs_group_mark_corrupt); +DEFINE_GROUP_CORRUPT_EVENT(xfs_group_mark_healthy); +DEFINE_GROUP_CORRUPT_EVENT(xfs_group_unfixed_corruption); DECLARE_EVENT_CLASS(xfs_inode_corrupt_class, TP_PROTO(struct xfs_inode *ip, unsigned int flags), From patchwork Tue Nov 5 22:14:01 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863591 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 891A51F667B for ; Tue, 5 Nov 2024 22:14:02 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844842; cv=none; b=vAZEhVlH8kvH/GV1R2kBOzE2+OV9fH9PoAeq691Mn48ue6fQXXcWIl2tq5TZfJXOdi6zsXFa+JVGtz/VPLPqmVCgC3Rl1KsO24PQ7Xnf3P2aoO39XspTcwHlpsn982lFs4YDieM2VwjpXG6PyKXQ7qk4YrfyjOJ6cT/gw9GvbCA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844842; c=relaxed/simple; bh=odVXwCply6rMC7ianLYecw+et/woILpCNx64HsRIUKE=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=jg8Vvlo4J0N0oXuvPkxnoxi9FKEOTYeTq62ZJ1olqfmN1lOY/JnJxhfWTqjNkRHyP/a7EebYXi1ZDWWqd0gNov16ORGBMLdvxXQ1bZIMkwcHThxQ5N2fKd1VaRKxEHoLu4CkCbxInwoO80RWyIjKh9IB8cmiFDkwiotaSYv7p0U= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=iKOIMK6r; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="iKOIMK6r" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 52091C4CECF; Tue, 5 Nov 2024 22:14:02 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844842; bh=odVXwCply6rMC7ianLYecw+et/woILpCNx64HsRIUKE=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=iKOIMK6rDVVc9/M4xwXMuSM1KabGeoCyf6Jxq7WyhupI0jAhXG6WHToQJWNmD6iaF RXQH40EGZMkBlEVkENIjzzlWPHNtBxOPbtTr3lrEJbCS55k/YFf0kgBnA8OS9IBxso LeammHsXnSTSN1dWUgQyWFAIXpCYEhfd2m00/SpyezC+51lTtnwX883rPaPkMDyVhn cIOCk9jk5gnb7hV/sGU54kSgHovtFi41jD1NAKVRYGNn0bcoSdn3SHzvmR8900/Vj2 4xghj9RIVXFaiqVR9PMtqPmH9PJkbjfGt2gmpMsCx/3lmxijvs/57x4FeDKfbJDig1 +TG1B+WihflzA== Date: Tue, 05 Nov 2024 14:14:01 -0800 Subject: [PATCH 06/16] xfs: mark xfs_perag_intent_{hold,rele} static From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395373.1869491.17120938731640912247.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig These two functions are only used inside of xfs_drain.c, so mark them static. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_drain.c | 42 +++++++++++++++++++++--------------------- fs/xfs/xfs_drain.h | 6 ------ 2 files changed, 21 insertions(+), 27 deletions(-) diff --git a/fs/xfs/xfs_drain.c b/fs/xfs/xfs_drain.c index 7bdb9688c0f5e3..3f280971b498b8 100644 --- a/fs/xfs/xfs_drain.c +++ b/fs/xfs/xfs_drain.c @@ -93,6 +93,27 @@ static inline int xfs_defer_drain_wait(struct xfs_defer_drain *dr) return wait_event_killable(dr->dr_waiters, !xfs_defer_drain_busy(dr)); } +/* + * Declare an intent to update AG metadata. Other threads that need exclusive + * access can decide to back off if they see declared intentions. + */ +static void +xfs_perag_intent_hold( + struct xfs_perag *pag) +{ + trace_xfs_perag_intent_hold(pag, __return_address); + xfs_defer_drain_grab(&pag->pag_intents_drain); +} + +/* Release our intent to update this AG's metadata. */ +static void +xfs_perag_intent_rele( + struct xfs_perag *pag) +{ + trace_xfs_perag_intent_rele(pag, __return_address); + xfs_defer_drain_rele(&pag->pag_intents_drain); +} + /* * Get a passive reference to the AG that contains a fsbno and declare an intent * to update its metadata. @@ -124,27 +145,6 @@ xfs_perag_intent_put( xfs_perag_put(pag); } -/* - * Declare an intent to update AG metadata. Other threads that need exclusive - * access can decide to back off if they see declared intentions. - */ -void -xfs_perag_intent_hold( - struct xfs_perag *pag) -{ - trace_xfs_perag_intent_hold(pag, __return_address); - xfs_defer_drain_grab(&pag->pag_intents_drain); -} - -/* Release our intent to update this AG's metadata. */ -void -xfs_perag_intent_rele( - struct xfs_perag *pag) -{ - trace_xfs_perag_intent_rele(pag, __return_address); - xfs_defer_drain_rele(&pag->pag_intents_drain); -} - /* * Wait for the intent update count for this AG to hit zero. * Callers must not hold any AG header buffers. diff --git a/fs/xfs/xfs_drain.h b/fs/xfs/xfs_drain.h index 775164f54ea6de..f39c90946ab71f 100644 --- a/fs/xfs/xfs_drain.h +++ b/fs/xfs/xfs_drain.h @@ -65,9 +65,6 @@ struct xfs_perag *xfs_perag_intent_get(struct xfs_mount *mp, xfs_fsblock_t fsbno); void xfs_perag_intent_put(struct xfs_perag *pag); -void xfs_perag_intent_hold(struct xfs_perag *pag); -void xfs_perag_intent_rele(struct xfs_perag *pag); - int xfs_perag_intent_drain(struct xfs_perag *pag); bool xfs_perag_intent_busy(struct xfs_perag *pag); #else @@ -80,9 +77,6 @@ struct xfs_defer_drain { /* empty */ }; xfs_perag_get((mp), XFS_FSB_TO_AGNO(mp, fsbno)) #define xfs_perag_intent_put(pag) xfs_perag_put(pag) -static inline void xfs_perag_intent_hold(struct xfs_perag *pag) { } -static inline void xfs_perag_intent_rele(struct xfs_perag *pag) { } - #endif /* CONFIG_XFS_DRAIN_INTENTS */ #endif /* XFS_DRAIN_H_ */ From patchwork Tue Nov 5 22:14:17 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863592 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 770AE1F667B for ; Tue, 5 Nov 2024 22:14:18 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844859; cv=none; b=Pf2MzH8rLqNjPfBrI0iCQwZYFpg1sY+ESLIKDn27uK6GhAOEZcQlacuX1Csahk9/xKLTu0kCNTpl3QmTF1MIuaNJ2rzIyguXy4cuI9vn0QXwMnktTm8nkY0FLBCN5ZxG8p1tYaPH0txy4tnKep+thzzBEsomyirrK+gOEvuHo5c= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844859; c=relaxed/simple; bh=OPBVxrIwZX7ebPNYfCwLOHWFbGsCSJpCEhcuCPAbQWk=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=hZLokxE6jRPUTw0oC+XEFaUnDEUW9RA4+lrazyrvgz3CtuQRrtL5eWH2d9F4k9ibQSpT11eb6LQksX801wxX9KywizNTtGQ+WC0MoxvCDAU5Ef5dXF32hJvBleiw85jHOsO/ambWLdGB6/D1L0Fbwp3fOgiMSnOLeSH/b2TZIls= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=jqLrrfSD; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="jqLrrfSD" Received: by smtp.kernel.org (Postfix) with ESMTPSA id E30CEC4CECF; Tue, 5 Nov 2024 22:14:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844858; bh=OPBVxrIwZX7ebPNYfCwLOHWFbGsCSJpCEhcuCPAbQWk=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=jqLrrfSDCmboF1NMJ5OjfmlzvVriUZTwIowiqrFz9kQ38mEBYgo6EbKDHvAqjOm03 kP/u16UfFmGwsIhPvZrlUxrqV5KWm98JFy8pmzAnaVECh+8D9MUIz55slDtfizLVju rP1p3sgykWvaqFOvhzEH4W33GAyx2XrZH7MjrTIdy/WwOXSFAy2+y6X0mLXEr7h5RR wp7Cg7wYsv8v+Hf2qUxAl7ZhEI5sF/QFllUetwJM0VVsCUxLmwvlBBfDDNRt+mLZs/ jjyXq/sZtGPum3qpjRNhlbZ/g6K0ugpqXeKdDU6vp+wVRiTcaIIHK/G7TrF2RjYngg /fndWqufr2HeA== Date: Tue, 05 Nov 2024 14:14:17 -0800 Subject: [PATCH 07/16] xfs: move draining of deferred operations to the generic group structure From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395390.1869491.15523122724227287094.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Prepare supporting the upcoming realtime groups feature by moving the deferred operation draining to the generic xfs_group structure. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/libxfs/xfs_ag.c | 7 ++----- fs/xfs/libxfs/xfs_ag.h | 9 --------- fs/xfs/libxfs/xfs_group.c | 4 ++++ fs/xfs/libxfs/xfs_group.h | 9 +++++++++ fs/xfs/scrub/common.c | 4 ++-- fs/xfs/xfs_drain.c | 46 ++++++++++++++++++++++++--------------------- fs/xfs/xfs_drain.h | 6 ++++-- fs/xfs/xfs_trace.h | 36 ++++++++++++++++++++--------------- 8 files changed, 66 insertions(+), 55 deletions(-) diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index 84bd3831297e07..c2f1f830d299d3 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -112,7 +112,6 @@ xfs_perag_uninit( #ifdef __KERNEL__ struct xfs_perag *pag = to_perag(xg); - xfs_defer_drain_free(&pag->pag_intents_drain); cancel_delayed_work_sync(&pag->pag_blockgc_work); xfs_buf_cache_destroy(&pag->pag_bcache); #endif @@ -234,7 +233,6 @@ xfs_perag_alloc( spin_lock_init(&pag->pagb_lock); INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker); INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); - xfs_defer_drain_init(&pag->pag_intents_drain); init_waitqueue_head(&pag->pagb_wait); pag->pagb_tree = RB_ROOT; xfs_hooks_init(&pag->pag_rmap_update_hooks); @@ -242,7 +240,7 @@ xfs_perag_alloc( error = xfs_buf_cache_init(&pag->pag_bcache); if (error) - goto out_defer_drain_free; + goto out_free_perag; /* * Pre-calculated geometry @@ -260,8 +258,7 @@ xfs_perag_alloc( out_buf_cache_destroy: xfs_buf_cache_destroy(&pag->pag_bcache); -out_defer_drain_free: - xfs_defer_drain_free(&pag->pag_intents_drain); +out_free_perag: kfree(pag); return error; } diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h index 8271cb72c88387..45f8de06cdbc8a 100644 --- a/fs/xfs/libxfs/xfs_ag.h +++ b/fs/xfs/libxfs/xfs_ag.h @@ -97,15 +97,6 @@ struct xfs_perag { /* background prealloc block trimming */ struct delayed_work pag_blockgc_work; - /* - * We use xfs_drain to track the number of deferred log intent items - * that have been queued (but not yet processed) so that waiters (e.g. - * scrub) will not lock resources when other threads are in the middle - * of processing a chain of intent items only to find momentary - * inconsistencies. - */ - struct xfs_defer_drain pag_intents_drain; - /* Hook to feed rmapbt updates to an active online repair. */ struct xfs_hooks pag_rmap_update_hooks; #endif /* __KERNEL__ */ diff --git a/fs/xfs/libxfs/xfs_group.c b/fs/xfs/libxfs/xfs_group.c index 927e72c0882b88..6737f009dd38ca 100644 --- a/fs/xfs/libxfs/xfs_group.c +++ b/fs/xfs/libxfs/xfs_group.c @@ -160,6 +160,8 @@ xfs_group_free( XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_ref) != 0); + xfs_defer_drain_free(&xg->xg_intents_drain); + if (uninit) uninit(xg); @@ -185,6 +187,7 @@ xfs_group_insert( #ifdef __KERNEL__ spin_lock_init(&xg->xg_state_lock); #endif + xfs_defer_drain_init(&xg->xg_intents_drain); /* Active ref owned by mount indicates group is online. */ atomic_set(&xg->xg_active_ref, 1); @@ -192,6 +195,7 @@ xfs_group_insert( error = xa_insert(&mp->m_groups[type].xa, index, xg, GFP_KERNEL); if (error) { WARN_ON_ONCE(error == -EBUSY); + xfs_defer_drain_free(&xg->xg_intents_drain); return error; } diff --git a/fs/xfs/libxfs/xfs_group.h b/fs/xfs/libxfs/xfs_group.h index d2c61dd1f43e44..ebefbba7d98cc2 100644 --- a/fs/xfs/libxfs/xfs_group.h +++ b/fs/xfs/libxfs/xfs_group.h @@ -22,6 +22,15 @@ struct xfs_group { uint16_t xg_checked; uint16_t xg_sick; spinlock_t xg_state_lock; + + /* + * We use xfs_drain to track the number of deferred log intent items + * that have been queued (but not yet processed) so that waiters (e.g. + * scrub) will not lock resources when other threads are in the middle + * of processing a chain of intent items only to find momentary + * inconsistencies. + */ + struct xfs_defer_drain xg_intents_drain; #endif /* __KERNEL__ */ }; diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c index 28095ed490fbf6..e8b5e73bab60d3 100644 --- a/fs/xfs/scrub/common.c +++ b/fs/xfs/scrub/common.c @@ -513,7 +513,7 @@ xchk_perag_drain_and_lock( * Obviously, this should be slanted against scrub and in favor * of runtime threads. */ - if (!xfs_perag_intent_busy(sa->pag)) + if (!xfs_group_intent_busy(pag_group(sa->pag))) return 0; if (sa->agf_bp) { @@ -528,7 +528,7 @@ xchk_perag_drain_and_lock( if (!(sc->flags & XCHK_FSGATES_DRAIN)) return -ECHRNG; - error = xfs_perag_intent_drain(sa->pag); + error = xfs_group_intent_drain(pag_group(sa->pag)); if (error == -ERESTARTSYS) error = -EINTR; } while (!error); diff --git a/fs/xfs/xfs_drain.c b/fs/xfs/xfs_drain.c index 3f280971b498b8..a72d08947d6d10 100644 --- a/fs/xfs/xfs_drain.c +++ b/fs/xfs/xfs_drain.c @@ -94,24 +94,26 @@ static inline int xfs_defer_drain_wait(struct xfs_defer_drain *dr) } /* - * Declare an intent to update AG metadata. Other threads that need exclusive - * access can decide to back off if they see declared intentions. + * Declare an intent to update group metadata. Other threads that need + * exclusive access can decide to back off if they see declared intentions. */ static void -xfs_perag_intent_hold( - struct xfs_perag *pag) +xfs_group_intent_hold( + struct xfs_group *xg) { - trace_xfs_perag_intent_hold(pag, __return_address); - xfs_defer_drain_grab(&pag->pag_intents_drain); + trace_xfs_group_intent_hold(xg, __return_address); + xfs_defer_drain_grab(&xg->xg_intents_drain); } -/* Release our intent to update this AG's metadata. */ +/* + * Release our intent to update this groups metadata. + */ static void -xfs_perag_intent_rele( - struct xfs_perag *pag) +xfs_group_intent_rele( + struct xfs_group *xg) { - trace_xfs_perag_intent_rele(pag, __return_address); - xfs_defer_drain_rele(&pag->pag_intents_drain); + trace_xfs_group_intent_rele(xg, __return_address); + xfs_defer_drain_rele(&xg->xg_intents_drain); } /* @@ -129,7 +131,7 @@ xfs_perag_intent_get( if (!pag) return NULL; - xfs_perag_intent_hold(pag); + xfs_group_intent_hold(pag_group(pag)); return pag; } @@ -141,7 +143,7 @@ void xfs_perag_intent_put( struct xfs_perag *pag) { - xfs_perag_intent_rele(pag); + xfs_group_intent_rele(pag_group(pag)); xfs_perag_put(pag); } @@ -150,17 +152,19 @@ xfs_perag_intent_put( * Callers must not hold any AG header buffers. */ int -xfs_perag_intent_drain( - struct xfs_perag *pag) +xfs_group_intent_drain( + struct xfs_group *xg) { - trace_xfs_perag_wait_intents(pag, __return_address); - return xfs_defer_drain_wait(&pag->pag_intents_drain); + trace_xfs_group_wait_intents(xg, __return_address); + return xfs_defer_drain_wait(&xg->xg_intents_drain); } -/* Has anyone declared an intent to update this AG? */ +/* + * Has anyone declared an intent to update this group? + */ bool -xfs_perag_intent_busy( - struct xfs_perag *pag) +xfs_group_intent_busy( + struct xfs_group *xg) { - return xfs_defer_drain_busy(&pag->pag_intents_drain); + return xfs_defer_drain_busy(&xg->xg_intents_drain); } diff --git a/fs/xfs/xfs_drain.h b/fs/xfs/xfs_drain.h index f39c90946ab71f..3e6143572e52d2 100644 --- a/fs/xfs/xfs_drain.h +++ b/fs/xfs/xfs_drain.h @@ -6,6 +6,7 @@ #ifndef XFS_DRAIN_H_ #define XFS_DRAIN_H_ +struct xfs_group; struct xfs_perag; #ifdef CONFIG_XFS_DRAIN_INTENTS @@ -65,8 +66,9 @@ struct xfs_perag *xfs_perag_intent_get(struct xfs_mount *mp, xfs_fsblock_t fsbno); void xfs_perag_intent_put(struct xfs_perag *pag); -int xfs_perag_intent_drain(struct xfs_perag *pag); -bool xfs_perag_intent_busy(struct xfs_perag *pag); +int xfs_group_intent_drain(struct xfs_group *xg); +bool xfs_group_intent_busy(struct xfs_group *xg); + #else struct xfs_defer_drain { /* empty */ }; diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index fd597a410b0298..29e8be9b6829d9 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -4684,35 +4684,39 @@ TRACE_EVENT(xfs_force_shutdown, ); #ifdef CONFIG_XFS_DRAIN_INTENTS -DECLARE_EVENT_CLASS(xfs_perag_intents_class, - TP_PROTO(const struct xfs_perag *pag, void *caller_ip), - TP_ARGS(pag, caller_ip), +DECLARE_EVENT_CLASS(xfs_group_intents_class, + TP_PROTO(const struct xfs_group *xg, void *caller_ip), + TP_ARGS(xg, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) - __field(xfs_agnumber_t, agno) + __field(enum xfs_group_type, type) + __field(uint32_t, index) __field(long, nr_intents) __field(void *, caller_ip) ), TP_fast_assign( - __entry->dev = pag_mount(pag)->m_super->s_dev; - __entry->agno = pag_agno(pag); - __entry->nr_intents = atomic_read(&pag->pag_intents_drain.dr_count); + __entry->dev = xg->xg_mount->m_super->s_dev; + __entry->type = xg->xg_type; + __entry->index = xg->xg_gno; + __entry->nr_intents = + atomic_read(&xg->xg_intents_drain.dr_count); __entry->caller_ip = caller_ip; ), - TP_printk("dev %d:%d agno 0x%x intents %ld caller %pS", + TP_printk("dev %d:%d %sno 0x%x intents %ld caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->agno, + __print_symbolic(__entry->type, XG_TYPE_STRINGS), + __entry->index, __entry->nr_intents, __entry->caller_ip) ); -#define DEFINE_PERAG_INTENTS_EVENT(name) \ -DEFINE_EVENT(xfs_perag_intents_class, name, \ - TP_PROTO(const struct xfs_perag *pag, void *caller_ip), \ - TP_ARGS(pag, caller_ip)) -DEFINE_PERAG_INTENTS_EVENT(xfs_perag_intent_hold); -DEFINE_PERAG_INTENTS_EVENT(xfs_perag_intent_rele); -DEFINE_PERAG_INTENTS_EVENT(xfs_perag_wait_intents); +#define DEFINE_GROUP_INTENTS_EVENT(name) \ +DEFINE_EVENT(xfs_group_intents_class, name, \ + TP_PROTO(const struct xfs_group *xg, void *caller_ip), \ + TP_ARGS(xg, caller_ip)) +DEFINE_GROUP_INTENTS_EVENT(xfs_group_intent_hold); +DEFINE_GROUP_INTENTS_EVENT(xfs_group_intent_rele); +DEFINE_GROUP_INTENTS_EVENT(xfs_group_wait_intents); #endif /* CONFIG_XFS_DRAIN_INTENTS */ From patchwork Tue Nov 5 22:14:33 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863593 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B33661F667B for ; Tue, 5 Nov 2024 22:14:33 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844873; cv=none; b=qMkoL0nehdlcnVekgA1zugJiQ+LQ3dnjBGP02WPMFTUuROg/m/ehD3zQ3GgkgSrAMy05a80TUsW4tWf0SoS2tYDxaRBIGdjQuaQyMH+WQzP2FB86h71ryTfHT45U23Qqz3ab0MXRTO2yIA4ir4oGbooiQOu+2y1k2DGPwH4gAlA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844873; c=relaxed/simple; bh=my1wQOYy9qJmjip6zjHKpjyKCj1UZ30TFtEdehsIE4w=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=atyFjY3e2cRq/pnPo5fHj6BkqNS4rj3Zkyfho5Yy8SKinT/FEManx2bIBRarSaZi4uwCyJ+GIPQ7pa2BaSXVv2bStXf9JOuoQhCYaQRElPU0L8JPmCw3B1xT9MvKO6l/66OTpzl/xesoY+f4iPzYmWEtxz6SU6+/z/apvhx0a5M= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=MyOLyg1+; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="MyOLyg1+" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 89515C4CECF; Tue, 5 Nov 2024 22:14:33 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844873; bh=my1wQOYy9qJmjip6zjHKpjyKCj1UZ30TFtEdehsIE4w=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=MyOLyg1++SuXbNH8eSg2ar4ZkVmJgQ9fDQ3IxVCkDtr3HeJe+eMc3n+Aap8rpxjIr 1OttA6JSew3P7UJ/be8S36x086+vwFvXQixXOa6nbYvsDQ91UZVrUAtG4nG/RS02ra QrSkpDZUeBSavSp3WLWo1IpAq3P4RLFmCkzW02Jik3t88G8Dx2oW4IRSN7wlRRrRl8 PLscRffHb9PTow1K9gobXh7n+MHslYIpxDdbH+H9MAuFwuSzdjAoPernZHA7xrPB2x 7O79LlF9PHAwOOSas5eVMe4wDI7YpJKBZJsRSt5Kfc3Wz7KIkoe5wNx08i97qJl2iP LBXBajXoS1k1Q== Date: Tue, 05 Nov 2024 14:14:33 -0800 Subject: [PATCH 08/16] xfs: move the online repair rmap hooks to the generic group structure From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395407.1869491.17920277207031054678.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Prepare for the upcoming realtime groups feature by moving the online repair rmap hooks to based to the generic xfs_group structure. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/libxfs/xfs_ag.c | 1 - fs/xfs/libxfs/xfs_ag.h | 3 --- fs/xfs/libxfs/xfs_group.c | 1 + fs/xfs/libxfs/xfs_group.h | 5 +++++ fs/xfs/libxfs/xfs_rmap.c | 24 +++++++++++++----------- fs/xfs/libxfs/xfs_rmap.h | 4 ++-- fs/xfs/scrub/rmap_repair.c | 4 ++-- 7 files changed, 23 insertions(+), 19 deletions(-) diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index c2f1f830d299d3..e60469fee87514 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -235,7 +235,6 @@ xfs_perag_alloc( INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); init_waitqueue_head(&pag->pagb_wait); pag->pagb_tree = RB_ROOT; - xfs_hooks_init(&pag->pag_rmap_update_hooks); #endif /* __KERNEL__ */ error = xfs_buf_cache_init(&pag->pag_bcache); diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h index 45f8de06cdbc8a..042ee0913fb9b9 100644 --- a/fs/xfs/libxfs/xfs_ag.h +++ b/fs/xfs/libxfs/xfs_ag.h @@ -96,9 +96,6 @@ struct xfs_perag { /* background prealloc block trimming */ struct delayed_work pag_blockgc_work; - - /* Hook to feed rmapbt updates to an active online repair. */ - struct xfs_hooks pag_rmap_update_hooks; #endif /* __KERNEL__ */ }; diff --git a/fs/xfs/libxfs/xfs_group.c b/fs/xfs/libxfs/xfs_group.c index 6737f009dd38ca..8532dc2f8628c5 100644 --- a/fs/xfs/libxfs/xfs_group.c +++ b/fs/xfs/libxfs/xfs_group.c @@ -186,6 +186,7 @@ xfs_group_insert( #ifdef __KERNEL__ spin_lock_init(&xg->xg_state_lock); + xfs_hooks_init(&xg->xg_rmap_update_hooks); #endif xfs_defer_drain_init(&xg->xg_intents_drain); diff --git a/fs/xfs/libxfs/xfs_group.h b/fs/xfs/libxfs/xfs_group.h index ebefbba7d98cc2..a87b9b80ef7516 100644 --- a/fs/xfs/libxfs/xfs_group.h +++ b/fs/xfs/libxfs/xfs_group.h @@ -31,6 +31,11 @@ struct xfs_group { * inconsistencies. */ struct xfs_defer_drain xg_intents_drain; + + /* + * Hook to feed rmapbt updates to an active online repair. + */ + struct xfs_hooks xg_rmap_update_hooks; #endif /* __KERNEL__ */ }; diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c index b6764d6b3ab891..984120b128fb9c 100644 --- a/fs/xfs/libxfs/xfs_rmap.c +++ b/fs/xfs/libxfs/xfs_rmap.c @@ -835,7 +835,7 @@ xfs_rmap_hook_enable(void) static inline void xfs_rmap_update_hook( struct xfs_trans *tp, - struct xfs_perag *pag, + struct xfs_group *xg, enum xfs_rmap_intent_type op, xfs_agblock_t startblock, xfs_extlen_t blockcount, @@ -850,27 +850,27 @@ xfs_rmap_update_hook( .oinfo = *oinfo, /* struct copy */ }; - if (pag) - xfs_hooks_call(&pag->pag_rmap_update_hooks, op, &p); + if (xg) + xfs_hooks_call(&xg->xg_rmap_update_hooks, op, &p); } } /* Call the specified function during a reverse mapping update. */ int xfs_rmap_hook_add( - struct xfs_perag *pag, + struct xfs_group *xg, struct xfs_rmap_hook *hook) { - return xfs_hooks_add(&pag->pag_rmap_update_hooks, &hook->rmap_hook); + return xfs_hooks_add(&xg->xg_rmap_update_hooks, &hook->rmap_hook); } /* Stop calling the specified function during a reverse mapping update. */ void xfs_rmap_hook_del( - struct xfs_perag *pag, + struct xfs_group *xg, struct xfs_rmap_hook *hook) { - xfs_hooks_del(&pag->pag_rmap_update_hooks, &hook->rmap_hook); + xfs_hooks_del(&xg->xg_rmap_update_hooks, &hook->rmap_hook); } /* Configure rmap update hook functions. */ @@ -905,7 +905,8 @@ xfs_rmap_free( return 0; cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag); - xfs_rmap_update_hook(tp, pag, XFS_RMAP_UNMAP, bno, len, false, oinfo); + xfs_rmap_update_hook(tp, pag_group(pag), XFS_RMAP_UNMAP, bno, len, + false, oinfo); error = xfs_rmap_unmap(cur, bno, len, false, oinfo); xfs_btree_del_cursor(cur, error); @@ -1149,7 +1150,8 @@ xfs_rmap_alloc( return 0; cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag); - xfs_rmap_update_hook(tp, pag, XFS_RMAP_MAP, bno, len, false, oinfo); + xfs_rmap_update_hook(tp, pag_group(pag), XFS_RMAP_MAP, bno, len, false, + oinfo); error = xfs_rmap_map(cur, bno, len, false, oinfo); xfs_btree_del_cursor(cur, error); @@ -2620,8 +2622,8 @@ xfs_rmap_finish_one( if (error) return error; - xfs_rmap_update_hook(tp, ri->ri_pag, ri->ri_type, bno, - ri->ri_bmap.br_blockcount, unwritten, &oinfo); + xfs_rmap_update_hook(tp, pag_group(ri->ri_pag), ri->ri_type, bno, + ri->ri_bmap.br_blockcount, unwritten, &oinfo); return 0; } diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h index b783dd4dd95d1a..d409b463bc6662 100644 --- a/fs/xfs/libxfs/xfs_rmap.h +++ b/fs/xfs/libxfs/xfs_rmap.h @@ -264,8 +264,8 @@ struct xfs_rmap_hook { void xfs_rmap_hook_disable(void); void xfs_rmap_hook_enable(void); -int xfs_rmap_hook_add(struct xfs_perag *pag, struct xfs_rmap_hook *hook); -void xfs_rmap_hook_del(struct xfs_perag *pag, struct xfs_rmap_hook *hook); +int xfs_rmap_hook_add(struct xfs_group *xg, struct xfs_rmap_hook *hook); +void xfs_rmap_hook_del(struct xfs_group *xg, struct xfs_rmap_hook *hook); void xfs_rmap_hook_setup(struct xfs_rmap_hook *hook, notifier_fn_t mod_fn); #endif diff --git a/fs/xfs/scrub/rmap_repair.c b/fs/xfs/scrub/rmap_repair.c index f88f58db909867..6c420ec7dacd1b 100644 --- a/fs/xfs/scrub/rmap_repair.c +++ b/fs/xfs/scrub/rmap_repair.c @@ -1611,7 +1611,7 @@ xrep_rmap_setup_scan( */ ASSERT(sc->flags & XCHK_FSGATES_RMAP); xfs_rmap_hook_setup(&rr->rhook, xrep_rmapbt_live_update); - error = xfs_rmap_hook_add(sc->sa.pag, &rr->rhook); + error = xfs_rmap_hook_add(pag_group(sc->sa.pag), &rr->rhook); if (error) goto out_iscan; return 0; @@ -1632,7 +1632,7 @@ xrep_rmap_teardown( struct xfs_scrub *sc = rr->sc; xchk_iscan_abort(&rr->iscan); - xfs_rmap_hook_del(sc->sa.pag, &rr->rhook); + xfs_rmap_hook_del(pag_group(sc->sa.pag), &rr->rhook); xchk_iscan_teardown(&rr->iscan); xfbtree_destroy(&rr->rmap_btree); mutex_destroy(&rr->lock); From patchwork Tue Nov 5 22:14:48 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863594 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 556B0216209 for ; Tue, 5 Nov 2024 22:14:49 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844889; cv=none; b=m8nHEYsIHKFZ5QvNP/e0p3A5YAmXBPwGH6djRx9+WvxIcXdqRwLyg/luDQFaqJ0WNKg5AyftStyfrXVzGCOwl1hwFrf2NspYHnG9YBkw/6IfFlugXQW34HDXpkEaSG6MfyYhA9KPmjr36usfl5UY2ijN/a4ti2PxY/JhqRYkzlw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844889; c=relaxed/simple; bh=2Nq70ziXD1UW7b3CyWHft1Typiy8Iy6IfUmyc5cz00I=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=LOyDha0r/OyEHSjt1I0+FTkTuOXX3OnrNaw4XbFfnsGp/Pmn6hxQ/PkepVvZe2HoCwF3FuwalY0G1ezyr8RetedayuLFiac5m5woTUWpa6Aof0hFgh3ulbsuydFuID2wJ7UW/jM2EILsDXtb37fAXcahqjzZxyzoRyk/TzITntI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=rm/Wq4U8; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="rm/Wq4U8" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 2CDD1C4CECF; Tue, 5 Nov 2024 22:14:49 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844889; bh=2Nq70ziXD1UW7b3CyWHft1Typiy8Iy6IfUmyc5cz00I=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=rm/Wq4U80I5nf/FxcE2tgObXykd29D7seCnEHz7Fga9Nppb3CvtfQ1Lz+HNrPiiDV Jiy3OLmqCzyZ4ZtyGeJAtwX/9lxnV/r9FuI+G3taO05oi5qMI5dyE7CGLQzfmyrVJj k2AvnZ0zm+3AolEUurwxJoODtTfcNu8hZu6mPzEe5c/n2A81cTRGaOKBDDZdy7/+P1 TOuqFqxF4K7m2vofTcHtonBgIfYf+HbE9TAF4HA4gAti+sWdmmOgE7hPBERK7x9RdQ h3ZEwJpX3saghDy4YFGyfqaMf2u+PMpn1DcljzkQKoS5bjIENeUfmtRi0zFg9EMDae 88Vt96peaKa+A== Date: Tue, 05 Nov 2024 14:14:48 -0800 Subject: [PATCH 09/16] xfs: return the busy generation from xfs_extent_busy_list_empty From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395424.1869491.2480576942197321919.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig This avoid having to poke into the internals of the busy tracking in xrep_setup_ag_allocbt. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/scrub/alloc_repair.c | 9 +++------ fs/xfs/xfs_extent_busy.c | 4 +++- fs/xfs/xfs_extent_busy.h | 2 +- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/fs/xfs/scrub/alloc_repair.c b/fs/xfs/scrub/alloc_repair.c index ab0084c4249657..f07cd93012c675 100644 --- a/fs/xfs/scrub/alloc_repair.c +++ b/fs/xfs/scrub/alloc_repair.c @@ -132,16 +132,12 @@ int xrep_setup_ag_allocbt( struct xfs_scrub *sc) { - unsigned int busy_gen; - /* * Make sure the busy extent list is clear because we can't put extents * on there twice. */ - busy_gen = READ_ONCE(sc->sa.pag->pagb_gen); - if (xfs_extent_busy_list_empty(sc->sa.pag)) + if (xfs_extent_busy_list_empty(sc->sa.pag, &busy_gen)) return 0; - return xfs_extent_busy_flush(sc->tp, sc->sa.pag, busy_gen, 0); } @@ -849,6 +845,7 @@ xrep_allocbt( { struct xrep_abt *ra; struct xfs_mount *mp = sc->mp; + unsigned int busy_gen; char *descr; int error; @@ -869,7 +866,7 @@ xrep_allocbt( * on there twice. In theory we cleared this before we started, but * let's not risk the filesystem. */ - if (!xfs_extent_busy_list_empty(sc->sa.pag)) { + if (!xfs_extent_busy_list_empty(sc->sa.pag, &busy_gen)) { error = -EDEADLOCK; goto out_ra; } diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c index 3d5a57d7ac5e14..2806fc6ab4800d 100644 --- a/fs/xfs/xfs_extent_busy.c +++ b/fs/xfs/xfs_extent_busy.c @@ -667,12 +667,14 @@ xfs_extent_busy_ag_cmp( /* Are there any busy extents in this AG? */ bool xfs_extent_busy_list_empty( - struct xfs_perag *pag) + struct xfs_perag *pag, + unsigned *busy_gen) { bool res; spin_lock(&pag->pagb_lock); res = RB_EMPTY_ROOT(&pag->pagb_tree); + *busy_gen = READ_ONCE(pag->pagb_gen); spin_unlock(&pag->pagb_lock); return res; } diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h index 7241035ce4ef9d..c803dcd124a628 100644 --- a/fs/xfs/xfs_extent_busy.h +++ b/fs/xfs/xfs_extent_busy.h @@ -83,6 +83,6 @@ static inline void xfs_extent_busy_sort(struct list_head *list) list_sort(NULL, list, xfs_extent_busy_ag_cmp); } -bool xfs_extent_busy_list_empty(struct xfs_perag *pag); +bool xfs_extent_busy_list_empty(struct xfs_perag *pag, unsigned int *busy_gen); #endif /* __XFS_EXTENT_BUSY_H__ */ From patchwork Tue Nov 5 22:15:04 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863595 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E52A2216447 for ; Tue, 5 Nov 2024 22:15:04 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844905; cv=none; b=YwQLx5qe8rU+p6V61gK1MN/1INkOe5qI2ADpR0BevSJ/Q5byNawUG2nQGUhKrmjnTwzamfy3yxQjzwULwqdyWBC77jw6745gDq/9bk5BMl/n8KoHzEf4LXWUePrDXp1DlQfO8RiPWJoAaOoPLNydfsC674AvMRhseZGeheCcvCo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844905; c=relaxed/simple; bh=rJARdPFgmx2P7gg8c5thrkDbTcRO6TWYtXLhsPxdz2U=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=UZWGN4kAMhbvO9AffVmuz1CXaxJqORgw3xGawUOEUy63vGfihq+xx/QJf0MfsnjN7dQWdeoD1qSxWHKbJBwvyuYauMx0B3FnYgHb/c3t6/SekVxzfFUSisZLAbidPENz6XXA88Z65xVTXYyfFYIwv0cjLjq7fQnWvpFq6ovFV3k= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=TGU0yJ41; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="TGU0yJ41" Received: by smtp.kernel.org (Postfix) with ESMTPSA id BEB73C4CECF; Tue, 5 Nov 2024 22:15:04 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844904; bh=rJARdPFgmx2P7gg8c5thrkDbTcRO6TWYtXLhsPxdz2U=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=TGU0yJ41oEDa6WBHLqdAxMt9hp5519qlg3YwJGs5INnTD5jMLRSheT/CP98jwyE+0 8oZENY4jI5U3D6f44KotQi9nGSqO5WAh429dMRlOx1K4Ab857CpFlnJb1WJrYEE9m0 qynkYgyouWdZzsGW9UTEc5z0ZJHfdAULjQC92RWTLC5wBW3ZyNm2Qe/EpmFIHI0oKo FU5QmzyQSzA//zq37+AsVd1JyDoNOVeAUaEizgxN5qyXF9ati4JFlRhMbRAFroePDx T0RxB7Ege0wLdAQKmAfLH5KkfteYyg63eHnpEHQxL7WvOPO4FBJWrTABqVOtR/KqWu Qe/BmsnFW6bAg== Date: Tue, 05 Nov 2024 14:15:04 -0800 Subject: [PATCH 10/16] xfs: convert extent busy tracepoints to the generic group structure From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395440.1869491.7266466304695861700.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Prepare for tracking busy RT extents by passing the generic group structure to the xfs_extent_busy_class tracepoints. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_extent_busy.c | 12 +++++++----- fs/xfs/xfs_trace.h | 34 +++++++++++++++++++++------------- 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c index 2806fc6ab4800d..9c5c6279ae216e 100644 --- a/fs/xfs/xfs_extent_busy.c +++ b/fs/xfs/xfs_extent_busy.c @@ -41,7 +41,7 @@ xfs_extent_busy_insert_list( new->flags = flags; /* trace before insert to be able to see failed inserts */ - trace_xfs_extent_busy(pag, bno, len); + trace_xfs_extent_busy(pag_group(pag), bno, len); spin_lock(&pag->pagb_lock); rbp = &pag->pagb_tree.rb_node; @@ -278,13 +278,13 @@ xfs_extent_busy_update_extent( ASSERT(0); } - trace_xfs_extent_busy_reuse(pag, fbno, flen); + trace_xfs_extent_busy_reuse(pag_group(pag), fbno, flen); return true; out_force_log: spin_unlock(&pag->pagb_lock); xfs_log_force(pag_mount(pag), XFS_LOG_SYNC); - trace_xfs_extent_busy_force(pag, fbno, flen); + trace_xfs_extent_busy_force(pag_group(pag), fbno, flen); spin_lock(&pag->pagb_lock); return false; } @@ -496,7 +496,8 @@ xfs_extent_busy_trim( out: if (fbno != *bno || flen != *len) { - trace_xfs_extent_busy_trim(args->pag, *bno, *len, fbno, flen); + trace_xfs_extent_busy_trim(pag_group(args->pag), *bno, *len, + fbno, flen); *bno = fbno; *len = flen; *busy_gen = args->pag->pagb_gen; @@ -525,7 +526,8 @@ xfs_extent_busy_clear_one( busyp->flags = XFS_EXTENT_BUSY_DISCARDED; return false; } - trace_xfs_extent_busy_clear(pag, busyp->bno, busyp->length); + trace_xfs_extent_busy_clear(pag_group(pag), busyp->bno, + busyp->length); rb_erase(&busyp->rb_node, &pag->pagb_tree); } diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 29e8be9b6829d9..562e0ad1c6cf0d 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -1673,43 +1673,48 @@ TRACE_EVENT(xfs_bunmap, ); DECLARE_EVENT_CLASS(xfs_extent_busy_class, - TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, + TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, xfs_extlen_t len), - TP_ARGS(pag, agbno, len), + TP_ARGS(xg, agbno, len), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(xfs_extlen_t, len) ), TP_fast_assign( - __entry->dev = pag_mount(pag)->m_super->s_dev; - __entry->agno = pag_agno(pag); + __entry->dev = xg->xg_mount->m_super->s_dev; + __entry->type = xg->xg_type; + __entry->agno = xg->xg_gno; __entry->agbno = agbno; __entry->len = len; ), - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x", + TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agbno, __entry->len) ); #define DEFINE_BUSY_EVENT(name) \ DEFINE_EVENT(xfs_extent_busy_class, name, \ - TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, \ - xfs_extlen_t len), \ - TP_ARGS(pag, agbno, len)) + TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, \ + xfs_extlen_t len), \ + TP_ARGS(xg, agbno, len)) DEFINE_BUSY_EVENT(xfs_extent_busy); DEFINE_BUSY_EVENT(xfs_extent_busy_force); DEFINE_BUSY_EVENT(xfs_extent_busy_reuse); DEFINE_BUSY_EVENT(xfs_extent_busy_clear); TRACE_EVENT(xfs_extent_busy_trim, - TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, + TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, xfs_extlen_t len, xfs_agblock_t tbno, xfs_extlen_t tlen), - TP_ARGS(pag, agbno, len, tbno, tlen), + TP_ARGS(xg, agbno, len, tbno, tlen), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(xfs_extlen_t, len) @@ -1717,16 +1722,19 @@ TRACE_EVENT(xfs_extent_busy_trim, __field(xfs_extlen_t, tlen) ), TP_fast_assign( - __entry->dev = pag_mount(pag)->m_super->s_dev; - __entry->agno = pag_agno(pag); + __entry->dev = xg->xg_mount->m_super->s_dev; + __entry->type = xg->xg_type; + __entry->agno = xg->xg_gno; __entry->agbno = agbno; __entry->len = len; __entry->tbno = tbno; __entry->tlen = tlen; ), - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x found_agbno 0x%x found_fsbcount 0x%x", + TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x found_agbno 0x%x found_fsbcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agbno, __entry->len, __entry->tbno, From patchwork Tue Nov 5 22:15:19 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863596 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 9A07C1DD0D2 for ; Tue, 5 Nov 2024 22:15:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844920; cv=none; b=aX4mPXpEhldj9nPEE4/r380g1CdiaYGAS/mSNxjvl0yG5Z0hlD1Y9xVYwxoRteS4auVrLOO1k/RHtmV5j1zh8YRYFXVVVrUXaP6job0towPThlh3WexNYRY5cXwxpqC5Ihiip34UfvfSwbXAcxTFqEq/BrpHvo59gKlQ1/CAANg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844920; c=relaxed/simple; bh=bblKtioxk+Xvt+V+GFaTh26lSF5/00EUkaM9ZHGLhTc=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=OH5SaETdvsGEWpGIBeoX+ZStcBgx/W28/hi8XP2Pl0M/Vl9s17e6LT4CAWZ1wUlmRiEeWCx+J6D47DgMtrhyf5p1NtXjFnyH29SfEdki/G3eWQtkr4/M8p630wIE74ejT2gxY2BD820oL/AGHa8+410VPTLVOfOAOjSaYYndAiU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=NYh6GVtH; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="NYh6GVtH" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 66066C4CECF; Tue, 5 Nov 2024 22:15:20 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844920; bh=bblKtioxk+Xvt+V+GFaTh26lSF5/00EUkaM9ZHGLhTc=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=NYh6GVtHeouJymhwNNQlHYC7hcoTQX/VrHvGfc11mvjZM+ArM8JI+cA7I3ZLGF1LL Azrz1LBfNt0C0LVy6FeDZGBUdhS9eJaA5OzKlUimggmM/UJPx2JKO0kdoaZE961rOd hfBfzq/KVnU1+Df5p6li09s7qTsZF7IlB9RTY6VHjShpkf5H5tTLnwiUjTGNT85nNL Mw1AytJpykgrNL0CX+2I+QJUGbvuJ18Pgkh3DUMUjRWyoWrfjlJTu7xs0Omhqj7kjk e5LfKYD1vNTXwSIQteRBcmDyZqvY8pJtIXiSVnydzwa3d2OsrRzuBss180trngA+GL J91JnSG9pj+WA== Date: Tue, 05 Nov 2024 14:15:19 -0800 Subject: [PATCH 11/16] xfs: convert busy extent tracking to the generic group structure From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395457.1869491.13607324034670456984.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Split busy extent tracking from struct xfs_perag into its own private structure, which can be pointed to by the generic group structure. Note that this structure is now dynamically allocated instead of embedded as the upcoming zone XFS code doesn't need it and will also have an unusually high number of groups due to hardware constraints. Dynamically allocating the structure this is a big memory saver for this case. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/libxfs/xfs_ag.c | 3 - fs/xfs/libxfs/xfs_ag.h | 5 - fs/xfs/libxfs/xfs_alloc.c | 29 +++--- fs/xfs/libxfs/xfs_alloc_btree.c | 4 - fs/xfs/libxfs/xfs_group.c | 16 +++ fs/xfs/libxfs/xfs_group.h | 5 + fs/xfs/libxfs/xfs_rmap_btree.c | 4 - fs/xfs/scrub/alloc_repair.c | 9 +- fs/xfs/scrub/reap.c | 2 fs/xfs/xfs_discard.c | 10 +- fs/xfs/xfs_extent_busy.c | 198 +++++++++++++++++++++++---------------- fs/xfs/xfs_extent_busy.h | 59 ++++-------- 12 files changed, 193 insertions(+), 151 deletions(-) diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index e60469fee87514..47e90dbb852bba 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -230,11 +230,8 @@ xfs_perag_alloc( #ifdef __KERNEL__ /* Place kernel structure only init below this point. */ spin_lock_init(&pag->pag_ici_lock); - spin_lock_init(&pag->pagb_lock); INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker); INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); - init_waitqueue_head(&pag->pagb_wait); - pag->pagb_tree = RB_ROOT; #endif /* __KERNEL__ */ error = xfs_buf_cache_init(&pag->pag_bcache); diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h index 042ee0913fb9b9..7290148fa6e6aa 100644 --- a/fs/xfs/libxfs/xfs_ag.h +++ b/fs/xfs/libxfs/xfs_ag.h @@ -80,11 +80,6 @@ struct xfs_perag { uint8_t pagf_repair_rmap_level; #endif - spinlock_t pagb_lock; /* lock for pagb_tree */ - struct rb_root pagb_tree; /* ordered tree of busy extents */ - unsigned int pagb_gen; /* generation count for pagb_tree */ - wait_queue_head_t pagb_wait; /* woken when pagb_gen changes */ - atomic_t pagf_fstrms; /* # of filestreams active in this AG */ spinlock_t pag_ici_lock; /* incore inode cache lock */ diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index bfe7b4321c47ae..4ddd05c97a2928 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -331,7 +331,8 @@ xfs_alloc_compute_aligned( bool busy; /* Trim busy sections out of found extent */ - busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen); + busy = xfs_extent_busy_trim(pag_group(args->pag), args->minlen, + args->maxlen, &bno, &len, busy_gen); /* * If we have a largish extent that happens to start before min_agbno, @@ -1251,7 +1252,7 @@ xfs_alloc_ag_vextent_small( if (fbno == NULLAGBLOCK) goto out; - xfs_extent_busy_reuse(args->pag, fbno, 1, + xfs_extent_busy_reuse(pag_group(args->pag), fbno, 1, (args->datatype & XFS_ALLOC_NOBUSY)); if (args->datatype & XFS_ALLOC_USERDATA) { @@ -1364,7 +1365,8 @@ xfs_alloc_ag_vextent_exact( */ tbno = fbno; tlen = flen; - xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen); + xfs_extent_busy_trim(pag_group(args->pag), args->minlen, args->maxlen, + &tbno, &tlen, &busy_gen); /* * Give up if the start of the extent is busy, or the freespace isn't @@ -1757,8 +1759,9 @@ xfs_alloc_ag_vextent_near( * the allocation can be retried. */ trace_xfs_alloc_near_busy(args); - error = xfs_extent_busy_flush(args->tp, args->pag, - acur.busy_gen, alloc_flags); + error = xfs_extent_busy_flush(args->tp, + pag_group(args->pag), acur.busy_gen, + alloc_flags); if (error) goto out; @@ -1873,8 +1876,9 @@ xfs_alloc_ag_vextent_size( * the allocation can be retried. */ trace_xfs_alloc_size_busy(args); - error = xfs_extent_busy_flush(args->tp, args->pag, - busy_gen, alloc_flags); + error = xfs_extent_busy_flush(args->tp, + pag_group(args->pag), busy_gen, + alloc_flags); if (error) goto error0; @@ -1972,8 +1976,9 @@ xfs_alloc_ag_vextent_size( * the allocation can be retried. */ trace_xfs_alloc_size_busy(args); - error = xfs_extent_busy_flush(args->tp, args->pag, - busy_gen, alloc_flags); + error = xfs_extent_busy_flush(args->tp, + pag_group(args->pag), busy_gen, + alloc_flags); if (error) goto error0; @@ -3615,8 +3620,8 @@ xfs_alloc_vextent_finish( if (error) goto out_drop_perag; - ASSERT(!xfs_extent_busy_search(args->pag, args->agbno, - args->len)); + ASSERT(!xfs_extent_busy_search(pag_group(args->pag), + args->agbno, args->len)); } xfs_ag_resv_alloc_extent(args->pag, args->resv, args); @@ -4014,7 +4019,7 @@ __xfs_free_extent( if (skip_discard) busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD; - xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags); + xfs_extent_busy_insert(tp, pag_group(pag), agbno, len, busy_flags); return 0; err_release: diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c index 88e1545ed4c9dc..e69a1bb13f7f86 100644 --- a/fs/xfs/libxfs/xfs_alloc_btree.c +++ b/fs/xfs/libxfs/xfs_alloc_btree.c @@ -86,7 +86,7 @@ xfs_allocbt_alloc_block( } atomic64_inc(&cur->bc_mp->m_allocbt_blks); - xfs_extent_busy_reuse(cur->bc_ag.pag, bno, 1, false); + xfs_extent_busy_reuse(pag_group(cur->bc_ag.pag), bno, 1, false); new->s = cpu_to_be32(bno); @@ -110,7 +110,7 @@ xfs_allocbt_free_block( return error; atomic64_dec(&cur->bc_mp->m_allocbt_blks); - xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1, + xfs_extent_busy_insert(cur->bc_tp, pag_group(agbp->b_pag), bno, 1, XFS_EXTENT_BUSY_SKIP_DISCARD); return 0; } diff --git a/fs/xfs/libxfs/xfs_group.c b/fs/xfs/libxfs/xfs_group.c index 8532dc2f8628c5..5c6fa5d76a91b1 100644 --- a/fs/xfs/libxfs/xfs_group.c +++ b/fs/xfs/libxfs/xfs_group.c @@ -10,6 +10,7 @@ #include "xfs_mount.h" #include "xfs_error.h" #include "xfs_trace.h" +#include "xfs_extent_busy.h" #include "xfs_group.h" /* @@ -161,6 +162,9 @@ xfs_group_free( XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_ref) != 0); xfs_defer_drain_free(&xg->xg_intents_drain); +#ifdef __KERNEL__ + kfree(xg->xg_busy_extents); +#endif if (uninit) uninit(xg); @@ -185,6 +189,9 @@ xfs_group_insert( xg->xg_type = type; #ifdef __KERNEL__ + xg->xg_busy_extents = xfs_extent_busy_alloc(); + if (!xg->xg_busy_extents) + return -ENOMEM; spin_lock_init(&xg->xg_state_lock); xfs_hooks_init(&xg->xg_rmap_update_hooks); #endif @@ -196,9 +203,14 @@ xfs_group_insert( error = xa_insert(&mp->m_groups[type].xa, index, xg, GFP_KERNEL); if (error) { WARN_ON_ONCE(error == -EBUSY); - xfs_defer_drain_free(&xg->xg_intents_drain); - return error; + goto out_drain; } return 0; +out_drain: + xfs_defer_drain_free(&xg->xg_intents_drain); +#ifdef __KERNEL__ + kfree(xg->xg_busy_extents); +#endif + return error; } diff --git a/fs/xfs/libxfs/xfs_group.h b/fs/xfs/libxfs/xfs_group.h index a87b9b80ef7516..0ff6e1d5635cb1 100644 --- a/fs/xfs/libxfs/xfs_group.h +++ b/fs/xfs/libxfs/xfs_group.h @@ -15,6 +15,11 @@ struct xfs_group { #ifdef __KERNEL__ /* -- kernel only structures below this line -- */ + /* + * Track freed but not yet committed extents. + */ + struct xfs_extent_busy_tree *xg_busy_extents; + /* * Bitsets of per-ag metadata that have been checked and/or are sick. * Callers should hold xg_state_lock before accessing this field. diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index 6fd460fc7c9c1d..b37eaf37c7fd2d 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c @@ -102,7 +102,7 @@ xfs_rmapbt_alloc_block( return 0; } - xfs_extent_busy_reuse(pag, bno, 1, false); + xfs_extent_busy_reuse(pag_group(pag), bno, 1, false); new->s = cpu_to_be32(bno); be32_add_cpu(&agf->agf_rmap_blocks, 1); @@ -136,7 +136,7 @@ xfs_rmapbt_free_block( if (error) return error; - xfs_extent_busy_insert(cur->bc_tp, pag, bno, 1, + xfs_extent_busy_insert(cur->bc_tp, pag_group(pag), bno, 1, XFS_EXTENT_BUSY_SKIP_DISCARD); xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1); diff --git a/fs/xfs/scrub/alloc_repair.c b/fs/xfs/scrub/alloc_repair.c index f07cd93012c675..0433363a90b616 100644 --- a/fs/xfs/scrub/alloc_repair.c +++ b/fs/xfs/scrub/alloc_repair.c @@ -132,13 +132,16 @@ int xrep_setup_ag_allocbt( struct xfs_scrub *sc) { + struct xfs_group *xg = pag_group(sc->sa.pag); + unsigned int busy_gen; + /* * Make sure the busy extent list is clear because we can't put extents * on there twice. */ - if (xfs_extent_busy_list_empty(sc->sa.pag, &busy_gen)) + if (xfs_extent_busy_list_empty(xg, &busy_gen)) return 0; - return xfs_extent_busy_flush(sc->tp, sc->sa.pag, busy_gen, 0); + return xfs_extent_busy_flush(sc->tp, xg, busy_gen, 0); } /* Check for any obvious conflicts in the free extent. */ @@ -866,7 +869,7 @@ xrep_allocbt( * on there twice. In theory we cleared this before we started, but * let's not risk the filesystem. */ - if (!xfs_extent_busy_list_empty(sc->sa.pag, &busy_gen)) { + if (!xfs_extent_busy_list_empty(pag_group(sc->sa.pag), &busy_gen)) { error = -EDEADLOCK; goto out_ra; } diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index d65ad6aa856f4d..08230952053b7d 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -137,7 +137,7 @@ xreap_put_freelist( agfl_bp, agbno, 0); if (error) return error; - xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1, + xfs_extent_busy_insert(sc->tp, pag_group(sc->sa.pag), agbno, 1, XFS_EXTENT_BUSY_SKIP_DISCARD); return 0; diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index 739ec69c44281c..019371c865d22a 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -117,10 +117,12 @@ xfs_discard_extents( blk_start_plug(&plug); list_for_each_entry(busyp, &extents->extent_list, list) { - trace_xfs_discard_extent(busyp->pag, busyp->bno, busyp->length); + struct xfs_perag *pag = to_perag(busyp->group); + + trace_xfs_discard_extent(pag, busyp->bno, busyp->length); error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, - xfs_agbno_to_daddr(busyp->pag, busyp->bno), + xfs_agbno_to_daddr(pag, busyp->bno), XFS_FSB_TO_BB(mp, busyp->length), GFP_KERNEL, &bio); if (error && error != -EOPNOTSUPP) { @@ -271,12 +273,12 @@ xfs_trim_gather_extents( * If any blocks in the range are still busy, skip the * discard and try again the next time. */ - if (xfs_extent_busy_search(pag, fbno, flen)) { + if (xfs_extent_busy_search(pag_group(pag), fbno, flen)) { trace_xfs_discard_busy(pag, fbno, flen); goto next_extent; } - xfs_extent_busy_insert_discard(pag, fbno, flen, + xfs_extent_busy_insert_discard(pag_group(pag), fbno, flen, &extents->extent_list); next_extent: if (tcur->by_bno) diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c index 9c5c6279ae216e..457a27ab837599 100644 --- a/fs/xfs/xfs_extent_busy.c +++ b/fs/xfs/xfs_extent_busy.c @@ -19,14 +19,22 @@ #include "xfs_log.h" #include "xfs_ag.h" +struct xfs_extent_busy_tree { + spinlock_t eb_lock; + struct rb_root eb_tree; + unsigned int eb_gen; + wait_queue_head_t eb_wait; +}; + static void xfs_extent_busy_insert_list( - struct xfs_perag *pag, + struct xfs_group *xg, xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags, struct list_head *busy_list) { + struct xfs_extent_busy_tree *eb = xg->xg_busy_extents; struct xfs_extent_busy *new; struct xfs_extent_busy *busyp; struct rb_node **rbp; @@ -34,17 +42,17 @@ xfs_extent_busy_insert_list( new = kzalloc(sizeof(struct xfs_extent_busy), GFP_KERNEL | __GFP_NOFAIL); - new->pag = xfs_perag_hold(pag); + new->group = xfs_group_hold(xg); new->bno = bno; new->length = len; INIT_LIST_HEAD(&new->list); new->flags = flags; /* trace before insert to be able to see failed inserts */ - trace_xfs_extent_busy(pag_group(pag), bno, len); + trace_xfs_extent_busy(xg, bno, len); - spin_lock(&pag->pagb_lock); - rbp = &pag->pagb_tree.rb_node; + spin_lock(&eb->eb_lock); + rbp = &eb->eb_tree.rb_node; while (*rbp) { parent = *rbp; busyp = rb_entry(parent, struct xfs_extent_busy, rb_node); @@ -61,32 +69,32 @@ xfs_extent_busy_insert_list( } rb_link_node(&new->rb_node, parent, rbp); - rb_insert_color(&new->rb_node, &pag->pagb_tree); + rb_insert_color(&new->rb_node, &eb->eb_tree); /* always process discard lists in fifo order */ list_add_tail(&new->list, busy_list); - spin_unlock(&pag->pagb_lock); + spin_unlock(&eb->eb_lock); } void xfs_extent_busy_insert( struct xfs_trans *tp, - struct xfs_perag *pag, + struct xfs_group *xg, xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags) { - xfs_extent_busy_insert_list(pag, bno, len, flags, &tp->t_busy); + xfs_extent_busy_insert_list(xg, bno, len, flags, &tp->t_busy); } void xfs_extent_busy_insert_discard( - struct xfs_perag *pag, + struct xfs_group *xg, xfs_agblock_t bno, xfs_extlen_t len, struct list_head *busy_list) { - xfs_extent_busy_insert_list(pag, bno, len, XFS_EXTENT_BUSY_DISCARDED, + xfs_extent_busy_insert_list(xg, bno, len, XFS_EXTENT_BUSY_DISCARDED, busy_list); } @@ -101,17 +109,18 @@ xfs_extent_busy_insert_discard( */ int xfs_extent_busy_search( - struct xfs_perag *pag, + struct xfs_group *xg, xfs_agblock_t bno, xfs_extlen_t len) { + struct xfs_extent_busy_tree *eb = xg->xg_busy_extents; struct rb_node *rbp; struct xfs_extent_busy *busyp; int match = 0; /* find closest start bno overlap */ - spin_lock(&pag->pagb_lock); - rbp = pag->pagb_tree.rb_node; + spin_lock(&eb->eb_lock); + rbp = eb->eb_tree.rb_node; while (rbp) { busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node); if (bno < busyp->bno) { @@ -130,7 +139,7 @@ xfs_extent_busy_search( break; } } - spin_unlock(&pag->pagb_lock); + spin_unlock(&eb->eb_lock); return match; } @@ -147,13 +156,15 @@ xfs_extent_busy_search( */ STATIC bool xfs_extent_busy_update_extent( - struct xfs_perag *pag, + struct xfs_group *xg, struct xfs_extent_busy *busyp, xfs_agblock_t fbno, xfs_extlen_t flen, - bool userdata) __releases(&pag->pagb_lock) - __acquires(&pag->pagb_lock) + bool userdata) + __releases(&eb->eb_lock) + __acquires(&eb->eb_lock) { + struct xfs_extent_busy_tree *eb = xg->xg_busy_extents; xfs_agblock_t fend = fbno + flen; xfs_agblock_t bbno = busyp->bno; xfs_agblock_t bend = bbno + busyp->length; @@ -164,9 +175,9 @@ xfs_extent_busy_update_extent( * and retry. */ if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) { - spin_unlock(&pag->pagb_lock); + spin_unlock(&eb->eb_lock); delay(1); - spin_lock(&pag->pagb_lock); + spin_lock(&eb->eb_lock); return false; } @@ -239,7 +250,7 @@ xfs_extent_busy_update_extent( * tree root, because erasing the node can rearrange the * tree topology. */ - rb_erase(&busyp->rb_node, &pag->pagb_tree); + rb_erase(&busyp->rb_node, &eb->eb_tree); busyp->length = 0; return false; } else if (fend < bend) { @@ -278,14 +289,14 @@ xfs_extent_busy_update_extent( ASSERT(0); } - trace_xfs_extent_busy_reuse(pag_group(pag), fbno, flen); + trace_xfs_extent_busy_reuse(xg, fbno, flen); return true; out_force_log: - spin_unlock(&pag->pagb_lock); - xfs_log_force(pag_mount(pag), XFS_LOG_SYNC); - trace_xfs_extent_busy_force(pag_group(pag), fbno, flen); - spin_lock(&pag->pagb_lock); + spin_unlock(&eb->eb_lock); + xfs_log_force(xg->xg_mount, XFS_LOG_SYNC); + trace_xfs_extent_busy_force(xg, fbno, flen); + spin_lock(&eb->eb_lock); return false; } @@ -294,17 +305,18 @@ xfs_extent_busy_update_extent( */ void xfs_extent_busy_reuse( - struct xfs_perag *pag, + struct xfs_group *xg, xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata) { + struct xfs_extent_busy_tree *eb = xg->xg_busy_extents; struct rb_node *rbp; ASSERT(flen > 0); - spin_lock(&pag->pagb_lock); + spin_lock(&eb->eb_lock); restart: - rbp = pag->pagb_tree.rb_node; + rbp = eb->eb_tree.rb_node; while (rbp) { struct xfs_extent_busy *busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node); @@ -319,11 +331,11 @@ xfs_extent_busy_reuse( continue; } - if (!xfs_extent_busy_update_extent(pag, busyp, fbno, flen, + if (!xfs_extent_busy_update_extent(xg, busyp, fbno, flen, userdata)) goto restart; } - spin_unlock(&pag->pagb_lock); + spin_unlock(&eb->eb_lock); } /* @@ -332,7 +344,7 @@ xfs_extent_busy_reuse( * args->minlen no suitable extent could be found, and the higher level * code needs to force out the log and retry the allocation. * - * Return the current busy generation for the AG if the extent is busy. This + * Return the current busy generation for the group if the extent is busy. This * value can be used to wait for at least one of the currently busy extents * to be cleared. Note that the busy list is not guaranteed to be empty after * the gen is woken. The state of a specific extent must always be confirmed @@ -340,11 +352,14 @@ xfs_extent_busy_reuse( */ bool xfs_extent_busy_trim( - struct xfs_alloc_arg *args, + struct xfs_group *xg, + xfs_extlen_t minlen, + xfs_extlen_t maxlen, xfs_agblock_t *bno, xfs_extlen_t *len, unsigned *busy_gen) { + struct xfs_extent_busy_tree *eb = xg->xg_busy_extents; xfs_agblock_t fbno; xfs_extlen_t flen; struct rb_node *rbp; @@ -352,11 +367,11 @@ xfs_extent_busy_trim( ASSERT(*len > 0); - spin_lock(&args->pag->pagb_lock); + spin_lock(&eb->eb_lock); fbno = *bno; flen = *len; - rbp = args->pag->pagb_tree.rb_node; - while (rbp && flen >= args->minlen) { + rbp = eb->eb_tree.rb_node; + while (rbp && flen >= minlen) { struct xfs_extent_busy *busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node); xfs_agblock_t fend = fbno + flen; @@ -477,13 +492,13 @@ xfs_extent_busy_trim( * good chance subsequent allocations will be * contiguous. */ - if (bbno - fbno >= args->maxlen) { + if (bbno - fbno >= maxlen) { /* left candidate fits perfect */ fend = bbno; - } else if (fend - bend >= args->maxlen * 4) { + } else if (fend - bend >= maxlen * 4) { /* right candidate has enough free space */ fbno = bend; - } else if (bbno - fbno >= args->minlen) { + } else if (bbno - fbno >= minlen) { /* left candidate fits minimum requirement */ fend = bbno; } else { @@ -496,14 +511,13 @@ xfs_extent_busy_trim( out: if (fbno != *bno || flen != *len) { - trace_xfs_extent_busy_trim(pag_group(args->pag), *bno, *len, - fbno, flen); + trace_xfs_extent_busy_trim(xg, *bno, *len, fbno, flen); *bno = fbno; *len = flen; - *busy_gen = args->pag->pagb_gen; + *busy_gen = eb->eb_gen; ret = true; } - spin_unlock(&args->pag->pagb_lock); + spin_unlock(&eb->eb_lock); return ret; fail: /* @@ -516,23 +530,24 @@ xfs_extent_busy_trim( static bool xfs_extent_busy_clear_one( - struct xfs_perag *pag, struct xfs_extent_busy *busyp, bool do_discard) { + struct xfs_extent_busy_tree *eb = busyp->group->xg_busy_extents; + if (busyp->length) { if (do_discard && !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) { busyp->flags = XFS_EXTENT_BUSY_DISCARDED; return false; } - trace_xfs_extent_busy_clear(pag_group(pag), busyp->bno, + trace_xfs_extent_busy_clear(busyp->group, busyp->bno, busyp->length); - rb_erase(&busyp->rb_node, &pag->pagb_tree); + rb_erase(&busyp->rb_node, &eb->eb_tree); } list_del_init(&busyp->list); - xfs_perag_put(busyp->pag); + xfs_group_put(busyp->group); kfree(busyp); return true; } @@ -554,29 +569,30 @@ xfs_extent_busy_clear( return; do { - struct xfs_perag *pag = xfs_perag_hold(busyp->pag); + struct xfs_group *xg = xfs_group_hold(busyp->group); + struct xfs_extent_busy_tree *eb = xg->xg_busy_extents; bool wakeup = false; - spin_lock(&pag->pagb_lock); + spin_lock(&eb->eb_lock); do { next = list_next_entry(busyp, list); - if (xfs_extent_busy_clear_one(pag, busyp, do_discard)) + if (xfs_extent_busy_clear_one(busyp, do_discard)) wakeup = true; busyp = next; } while (!list_entry_is_head(busyp, list, list) && - busyp->pag == pag); + busyp->group == xg); if (wakeup) { - pag->pagb_gen++; - wake_up_all(&pag->pagb_wait); + eb->eb_gen++; + wake_up_all(&eb->eb_wait); } - spin_unlock(&pag->pagb_lock); - xfs_perag_put(pag); + spin_unlock(&eb->eb_lock); + xfs_group_put(xg); } while (!list_entry_is_head(busyp, list, list)); } /* - * Flush out all busy extents for this AG. + * Flush out all busy extents for this group. * * If the current transaction is holding busy extents, the caller may not want * to wait for committed busy extents to resolve. If we are being told just to @@ -592,10 +608,11 @@ xfs_extent_busy_clear( int xfs_extent_busy_flush( struct xfs_trans *tp, - struct xfs_perag *pag, + struct xfs_group *xg, unsigned busy_gen, uint32_t alloc_flags) { + struct xfs_extent_busy_tree *eb = xg->xg_busy_extents; DEFINE_WAIT (wait); int error; @@ -608,7 +625,7 @@ xfs_extent_busy_flush( if (alloc_flags & XFS_ALLOC_FLAG_TRYFLUSH) return 0; - if (busy_gen != READ_ONCE(pag->pagb_gen)) + if (busy_gen != READ_ONCE(eb->eb_gen)) return 0; if (alloc_flags & XFS_ALLOC_FLAG_FREEING) @@ -617,36 +634,44 @@ xfs_extent_busy_flush( /* Wait for committed busy extents to resolve. */ do { - prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE); - if (busy_gen != READ_ONCE(pag->pagb_gen)) + prepare_to_wait(&eb->eb_wait, &wait, TASK_KILLABLE); + if (busy_gen != READ_ONCE(eb->eb_gen)) break; schedule(); } while (1); - finish_wait(&pag->pagb_wait, &wait); + finish_wait(&eb->eb_wait, &wait); return 0; } +static void +xfs_extent_busy_wait_group( + struct xfs_group *xg) +{ + DEFINE_WAIT (wait); + struct xfs_extent_busy_tree *eb = xg->xg_busy_extents; + + do { + prepare_to_wait(&eb->eb_wait, &wait, TASK_KILLABLE); + if (RB_EMPTY_ROOT(&eb->eb_tree)) + break; + schedule(); + } while (1); + finish_wait(&eb->eb_wait, &wait); +} + void xfs_extent_busy_wait_all( struct xfs_mount *mp) { struct xfs_perag *pag = NULL; - DEFINE_WAIT (wait); - while ((pag = xfs_perag_next(mp, pag))) { - do { - prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE); - if (RB_EMPTY_ROOT(&pag->pagb_tree)) - break; - schedule(); - } while (1); - finish_wait(&pag->pagb_wait, &wait); - } + while ((pag = xfs_perag_next(mp, pag))) + xfs_extent_busy_wait_group(pag_group(pag)); } /* - * Callback for list_sort to sort busy extents by the AG they reside in. + * Callback for list_sort to sort busy extents by the group they reside in. */ int xfs_extent_busy_ag_cmp( @@ -660,23 +685,38 @@ xfs_extent_busy_ag_cmp( container_of(l2, struct xfs_extent_busy, list); s32 diff; - diff = pag_agno(b1->pag) - pag_agno(b2->pag); + diff = b1->group->xg_gno - b2->group->xg_gno; if (!diff) diff = b1->bno - b2->bno; return diff; } -/* Are there any busy extents in this AG? */ +/* Are there any busy extents in this group? */ bool xfs_extent_busy_list_empty( - struct xfs_perag *pag, + struct xfs_group *xg, unsigned *busy_gen) { + struct xfs_extent_busy_tree *eb = xg->xg_busy_extents; bool res; - spin_lock(&pag->pagb_lock); - res = RB_EMPTY_ROOT(&pag->pagb_tree); - *busy_gen = READ_ONCE(pag->pagb_gen); - spin_unlock(&pag->pagb_lock); + spin_lock(&eb->eb_lock); + res = RB_EMPTY_ROOT(&eb->eb_tree); + *busy_gen = READ_ONCE(eb->eb_gen); + spin_unlock(&eb->eb_lock); return res; } + +struct xfs_extent_busy_tree * +xfs_extent_busy_alloc(void) +{ + struct xfs_extent_busy_tree *eb; + + eb = kzalloc(sizeof(*eb), GFP_KERNEL); + if (!eb) + return NULL; + spin_lock_init(&eb->eb_lock); + init_waitqueue_head(&eb->eb_wait); + eb->eb_tree = RB_ROOT; + return eb; +} diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h index c803dcd124a628..f069b04e8ea184 100644 --- a/fs/xfs/xfs_extent_busy.h +++ b/fs/xfs/xfs_extent_busy.h @@ -8,19 +8,18 @@ #ifndef __XFS_EXTENT_BUSY_H__ #define __XFS_EXTENT_BUSY_H__ +struct xfs_group; struct xfs_mount; -struct xfs_perag; struct xfs_trans; -struct xfs_alloc_arg; /* - * Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that - * have been freed but whose transactions aren't committed to disk yet. + * Busy block/extent entry. Indexed by a rbtree in the group to mark blocks + * that have been freed but whose transactions aren't committed to disk yet. */ struct xfs_extent_busy { - struct rb_node rb_node; /* ag by-bno indexed search tree */ + struct rb_node rb_node; /* group by-bno indexed search tree */ struct list_head list; /* transaction busy extent list */ - struct xfs_perag *pag; + struct xfs_group *group; xfs_agblock_t bno; xfs_extlen_t length; unsigned int flags; @@ -44,45 +43,29 @@ struct xfs_busy_extents { void *owner; }; -void -xfs_extent_busy_insert(struct xfs_trans *tp, struct xfs_perag *pag, - xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags); - -void -xfs_extent_busy_insert_discard(struct xfs_perag *pag, xfs_agblock_t bno, - xfs_extlen_t len, struct list_head *busy_list); - -void -xfs_extent_busy_clear(struct list_head *list, bool do_discard); - -int -xfs_extent_busy_search(struct xfs_perag *pag, xfs_agblock_t bno, +void xfs_extent_busy_insert(struct xfs_trans *tp, struct xfs_group *xg, + xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags); +void xfs_extent_busy_insert_discard(struct xfs_group *xg, xfs_agblock_t bno, + xfs_extlen_t len, struct list_head *busy_list); +void xfs_extent_busy_clear(struct list_head *list, bool do_discard); +int xfs_extent_busy_search(struct xfs_group *xg, xfs_agblock_t bno, xfs_extlen_t len); - -void -xfs_extent_busy_reuse(struct xfs_perag *pag, xfs_agblock_t fbno, +void xfs_extent_busy_reuse(struct xfs_group *xg, xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata); - -bool -xfs_extent_busy_trim(struct xfs_alloc_arg *args, xfs_agblock_t *bno, - xfs_extlen_t *len, unsigned *busy_gen); - -int -xfs_extent_busy_flush(struct xfs_trans *tp, struct xfs_perag *pag, +bool xfs_extent_busy_trim(struct xfs_group *xg, xfs_extlen_t minlen, + xfs_extlen_t maxlen, xfs_agblock_t *bno, xfs_extlen_t *len, + unsigned *busy_gen); +int xfs_extent_busy_flush(struct xfs_trans *tp, struct xfs_group *xg, unsigned busy_gen, uint32_t alloc_flags); +void xfs_extent_busy_wait_all(struct xfs_mount *mp); +bool xfs_extent_busy_list_empty(struct xfs_group *xg, unsigned int *busy_gen); +struct xfs_extent_busy_tree *xfs_extent_busy_alloc(void); -void -xfs_extent_busy_wait_all(struct xfs_mount *mp); - -int -xfs_extent_busy_ag_cmp(void *priv, const struct list_head *a, - const struct list_head *b); - +int xfs_extent_busy_ag_cmp(void *priv, const struct list_head *a, + const struct list_head *b); static inline void xfs_extent_busy_sort(struct list_head *list) { list_sort(NULL, list, xfs_extent_busy_ag_cmp); } -bool xfs_extent_busy_list_empty(struct xfs_perag *pag, unsigned int *busy_gen); - #endif /* __XFS_EXTENT_BUSY_H__ */ From patchwork Tue Nov 5 22:15:35 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863597 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 86D9D1DD0D2 for ; Tue, 5 Nov 2024 22:15:36 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844936; cv=none; b=J9pgniWQnAZz707NgUcNaq9NkEzqQtaxcgOn//7BZydsluTkUJ0WFVr1gP2lenxMDcjGsIg4HjaOCC3ix+1Kfc5xqBUPKXy13OgCdKPUnn5QqIg57McK6QuX2BuNIV2ry47m8PK36kIaYXU6hjaBLh692jF7nqEuE8aTeFZKPp8= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844936; c=relaxed/simple; bh=5nkPLvr1kS7LlhwDbQTAhJVY8FGM4gBZbB6x/MaFPyA=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=Jt1JBFENhGqzKyFi3yFRiGPIyp98HRvjjw1gARLAi4FHkRH7No/gk2GPP/WLWTLa6ZwfyqYZMu6+/ox2y9pzVvK1EukndiGu5CQE/E1ya/+bqjArbWT2uyZYPT+gC0zTl+w75lChZSVfbA/OXzHOAEmkBBFR5Es1S6ZrqIdnuuY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=s0FT4Vo+; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="s0FT4Vo+" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 10FA2C4CECF; Tue, 5 Nov 2024 22:15:36 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844936; bh=5nkPLvr1kS7LlhwDbQTAhJVY8FGM4gBZbB6x/MaFPyA=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=s0FT4Vo+CDJp9ov5cjgzd5KhwlG2Ke38ccG06VPv9SZIZRjObdPDPw8BRGX9H7lpE Ot3BJiYsKYTS5jmVS1c+rUnhMJKTW9IaP0o0naHxzPDM3lOXcKX+AQM6e/HMeYY/wq CdGUVYZNpGFJimBm/AgYqKpVhQmZoMS3D8//8d+WT/Y+nwHvzRSJmfTcbKO12MbvNa woc0cI3hfVayw2cJeGzxMhh3ZFs4r+eNUotN8/X+jxfeRT/spwoxMi9DSVlZUnm6b+ bhPdrfnfnjz9AAyBYRFMUqvo27MA4k82XASQgejLTUlwkahx6SNfitiOLuA2ZEWicd PUaVq9G+sDoJg== Date: Tue, 05 Nov 2024 14:15:35 -0800 Subject: [PATCH 12/16] xfs: add a generic group pointer to the btree cursor From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395475.1869491.2440263348314025875.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Replace the pag pointers in the type specific union with a generic xfs_group pointer. This prepares for adding realtime group support. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/libxfs/xfs_alloc.c | 8 ++++---- fs/xfs/libxfs/xfs_alloc_btree.c | 28 ++++++++++++++-------------- fs/xfs/libxfs/xfs_btree.c | 35 ++++++++++++----------------------- fs/xfs/libxfs/xfs_btree.h | 3 +-- fs/xfs/libxfs/xfs_btree_mem.c | 6 ++---- fs/xfs/libxfs/xfs_ialloc.c | 12 +++++++----- fs/xfs/libxfs/xfs_ialloc_btree.c | 15 ++++++++------- fs/xfs/libxfs/xfs_refcount.c | 17 +++++++++-------- fs/xfs/libxfs/xfs_refcount_btree.c | 10 +++++----- fs/xfs/libxfs/xfs_rmap.c | 8 +++----- fs/xfs/libxfs/xfs_rmap_btree.c | 19 ++++++++++--------- fs/xfs/scrub/alloc.c | 2 +- fs/xfs/scrub/bmap.c | 3 ++- fs/xfs/scrub/bmap_repair.c | 4 ++-- fs/xfs/scrub/cow_repair.c | 9 ++++++--- fs/xfs/scrub/health.c | 2 +- fs/xfs/scrub/ialloc.c | 14 +++++++------- fs/xfs/scrub/refcount.c | 3 ++- fs/xfs/scrub/rmap.c | 2 +- fs/xfs/scrub/rmap_repair.c | 2 +- fs/xfs/xfs_fsmap.c | 6 ++++-- fs/xfs/xfs_health.c | 23 ++++++----------------- fs/xfs/xfs_trace.h | 28 ++++++++++++++-------------- 23 files changed, 122 insertions(+), 137 deletions(-) diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 4ddd05c97a2928..d33c2fdaf4f2c9 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -275,7 +275,7 @@ xfs_alloc_complain_bad_rec( xfs_warn(mp, "%sbt record corruption in AG %d detected at %pS!", - cur->bc_ops->name, pag_agno(cur->bc_ag.pag), fa); + cur->bc_ops->name, cur->bc_group->xg_gno, fa); xfs_warn(mp, "start block 0x%x block count 0x%x", irec->ar_startblock, irec->ar_blockcount); @@ -303,7 +303,7 @@ xfs_alloc_get_rec( return error; xfs_alloc_btrec_to_irec(rec, &irec); - fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec); + fa = xfs_alloc_check_irec(to_perag(cur->bc_group), &irec); if (fa) return xfs_alloc_complain_bad_rec(cur, fa, &irec); @@ -540,7 +540,7 @@ static int xfs_alloc_fixup_longest( struct xfs_btree_cur *cnt_cur) { - struct xfs_perag *pag = cnt_cur->bc_ag.pag; + struct xfs_perag *pag = to_perag(cnt_cur->bc_group); struct xfs_buf *bp = cnt_cur->bc_ag.agbp; struct xfs_agf *agf = bp->b_addr; xfs_extlen_t longest = 0; @@ -4044,7 +4044,7 @@ xfs_alloc_query_range_helper( xfs_failaddr_t fa; xfs_alloc_btrec_to_irec(rec, &irec); - fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec); + fa = xfs_alloc_check_irec(to_perag(cur->bc_group), &irec); if (fa) return xfs_alloc_complain_bad_rec(cur, fa, &irec); diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c index e69a1bb13f7f86..a4ac37ba5d510e 100644 --- a/fs/xfs/libxfs/xfs_alloc_btree.c +++ b/fs/xfs/libxfs/xfs_alloc_btree.c @@ -28,7 +28,7 @@ xfs_bnobt_dup_cursor( struct xfs_btree_cur *cur) { return xfs_bnobt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ag.agbp, - cur->bc_ag.pag); + to_perag(cur->bc_group)); } STATIC struct xfs_btree_cur * @@ -36,29 +36,29 @@ xfs_cntbt_dup_cursor( struct xfs_btree_cur *cur) { return xfs_cntbt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ag.agbp, - cur->bc_ag.pag); + to_perag(cur->bc_group)); } - STATIC void xfs_allocbt_set_root( struct xfs_btree_cur *cur, const union xfs_btree_ptr *ptr, int inc) { - struct xfs_buf *agbp = cur->bc_ag.agbp; - struct xfs_agf *agf = agbp->b_addr; + struct xfs_perag *pag = to_perag(cur->bc_group); + struct xfs_buf *agbp = cur->bc_ag.agbp; + struct xfs_agf *agf = agbp->b_addr; ASSERT(ptr->s != 0); if (xfs_btree_is_bno(cur->bc_ops)) { agf->agf_bno_root = ptr->s; be32_add_cpu(&agf->agf_bno_level, inc); - cur->bc_ag.pag->pagf_bno_level += inc; + pag->pagf_bno_level += inc; } else { agf->agf_cnt_root = ptr->s; be32_add_cpu(&agf->agf_cnt_level, inc); - cur->bc_ag.pag->pagf_cnt_level += inc; + pag->pagf_cnt_level += inc; } xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); @@ -75,7 +75,7 @@ xfs_allocbt_alloc_block( xfs_agblock_t bno; /* Allocate the new block from the freelist. If we can't, give up. */ - error = xfs_alloc_get_freelist(cur->bc_ag.pag, cur->bc_tp, + error = xfs_alloc_get_freelist(to_perag(cur->bc_group), cur->bc_tp, cur->bc_ag.agbp, &bno, 1); if (error) return error; @@ -86,7 +86,7 @@ xfs_allocbt_alloc_block( } atomic64_inc(&cur->bc_mp->m_allocbt_blks); - xfs_extent_busy_reuse(pag_group(cur->bc_ag.pag), bno, 1, false); + xfs_extent_busy_reuse(cur->bc_group, bno, 1, false); new->s = cpu_to_be32(bno); @@ -104,8 +104,8 @@ xfs_allocbt_free_block( int error; bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp)); - error = xfs_alloc_put_freelist(cur->bc_ag.pag, cur->bc_tp, agbp, NULL, - bno, 1); + error = xfs_alloc_put_freelist(to_perag(cur->bc_group), cur->bc_tp, + agbp, NULL, bno, 1); if (error) return error; @@ -178,7 +178,7 @@ xfs_allocbt_init_ptr_from_cur( { struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; - ASSERT(pag_agno(cur->bc_ag.pag) == be32_to_cpu(agf->agf_seqno)); + ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agf->agf_seqno)); if (xfs_btree_is_bno(cur->bc_ops)) ptr->s = agf->agf_bno_root; @@ -492,7 +492,7 @@ xfs_bnobt_init_cursor( cur = xfs_btree_alloc_cursor(mp, tp, &xfs_bnobt_ops, mp->m_alloc_maxlevels, xfs_allocbt_cur_cache); - cur->bc_ag.pag = xfs_perag_hold(pag); + cur->bc_group = xfs_group_hold(pag_group(pag)); cur->bc_ag.agbp = agbp; if (agbp) { struct xfs_agf *agf = agbp->b_addr; @@ -518,7 +518,7 @@ xfs_cntbt_init_cursor( cur = xfs_btree_alloc_cursor(mp, tp, &xfs_cntbt_ops, mp->m_alloc_maxlevels, xfs_allocbt_cur_cache); - cur->bc_ag.pag = xfs_perag_hold(pag); + cur->bc_group = xfs_group_hold(pag_group(pag)); cur->bc_ag.agbp = agbp; if (agbp) { struct xfs_agf *agf = agbp->b_addr; diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index 9a13dbf5f54a33..2b5fc5fd16435d 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c @@ -225,7 +225,7 @@ __xfs_btree_check_agblock( struct xfs_buf *bp) { struct xfs_mount *mp = cur->bc_mp; - struct xfs_perag *pag = cur->bc_ag.pag; + struct xfs_perag *pag = to_perag(cur->bc_group); xfs_failaddr_t fa; xfs_agblock_t agbno; @@ -331,7 +331,7 @@ __xfs_btree_check_ptr( return -EFSCORRUPTED; break; case XFS_BTREE_TYPE_AG: - if (!xfs_verify_agbno(cur->bc_ag.pag, + if (!xfs_verify_agbno(to_perag(cur->bc_group), be32_to_cpu((&ptr->s)[index]))) return -EFSCORRUPTED; break; @@ -372,7 +372,7 @@ xfs_btree_check_ptr( case XFS_BTREE_TYPE_AG: xfs_err(cur->bc_mp, "AG %u: Corrupt %sbt pointer at level %d index %d.", - pag_agno(cur->bc_ag.pag), cur->bc_ops->name, + cur->bc_group->xg_gno, cur->bc_ops->name, level, index); break; } @@ -523,20 +523,8 @@ xfs_btree_del_cursor( ASSERT(!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_bmap.allocated == 0 || xfs_is_shutdown(cur->bc_mp) || error != 0); - switch (cur->bc_ops->type) { - case XFS_BTREE_TYPE_AG: - if (cur->bc_ag.pag) - xfs_perag_put(cur->bc_ag.pag); - break; - case XFS_BTREE_TYPE_INODE: - /* nothing to do */ - break; - case XFS_BTREE_TYPE_MEM: - if (cur->bc_mem.pag) - xfs_perag_put(cur->bc_mem.pag); - break; - } - + if (cur->bc_group) + xfs_group_put(cur->bc_group); kmem_cache_free(cur->bc_cache, cur); } @@ -1017,21 +1005,22 @@ xfs_btree_readahead_agblock( struct xfs_btree_block *block) { struct xfs_mount *mp = cur->bc_mp; + struct xfs_perag *pag = to_perag(cur->bc_group); xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib); xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib); int rval = 0; if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) { xfs_buf_readahead(mp->m_ddev_targp, - xfs_agbno_to_daddr(cur->bc_ag.pag, left), - mp->m_bsize, cur->bc_ops->buf_ops); + xfs_agbno_to_daddr(pag, left), mp->m_bsize, + cur->bc_ops->buf_ops); rval++; } if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) { xfs_buf_readahead(mp->m_ddev_targp, - xfs_agbno_to_daddr(cur->bc_ag.pag, right), - mp->m_bsize, cur->bc_ops->buf_ops); + xfs_agbno_to_daddr(pag, right), mp->m_bsize, + cur->bc_ops->buf_ops); rval++; } @@ -1090,7 +1079,7 @@ xfs_btree_ptr_to_daddr( switch (cur->bc_ops->type) { case XFS_BTREE_TYPE_AG: - *daddr = xfs_agbno_to_daddr(cur->bc_ag.pag, + *daddr = xfs_agbno_to_daddr(to_perag(cur->bc_group), be32_to_cpu(ptr->s)); break; case XFS_BTREE_TYPE_INODE: @@ -1312,7 +1301,7 @@ xfs_btree_owner( case XFS_BTREE_TYPE_INODE: return cur->bc_ino.ip->i_ino; case XFS_BTREE_TYPE_AG: - return pag_agno(cur->bc_ag.pag); + return cur->bc_group->xg_gno; default: ASSERT(0); return 0; diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h index 10b7ddc3b2b34e..3b739459ebb0f4 100644 --- a/fs/xfs/libxfs/xfs_btree.h +++ b/fs/xfs/libxfs/xfs_btree.h @@ -254,6 +254,7 @@ struct xfs_btree_cur union xfs_btree_irec bc_rec; /* current insert/search record value */ uint8_t bc_nlevels; /* number of levels in the tree */ uint8_t bc_maxlevels; /* maximum levels for this btree type */ + struct xfs_group *bc_group; /* per-type information */ union { @@ -264,13 +265,11 @@ struct xfs_btree_cur struct xbtree_ifakeroot *ifake; /* for staging cursor */ } bc_ino; struct { - struct xfs_perag *pag; struct xfs_buf *agbp; struct xbtree_afakeroot *afake; /* for staging cursor */ } bc_ag; struct { struct xfbtree *xfbtree; - struct xfs_perag *pag; } bc_mem; }; diff --git a/fs/xfs/libxfs/xfs_btree_mem.c b/fs/xfs/libxfs/xfs_btree_mem.c index 036061fe32cc90..df3d613675a15a 100644 --- a/fs/xfs/libxfs/xfs_btree_mem.c +++ b/fs/xfs/libxfs/xfs_btree_mem.c @@ -57,10 +57,8 @@ xfbtree_dup_cursor( ncur->bc_flags = cur->bc_flags; ncur->bc_nlevels = cur->bc_nlevels; ncur->bc_mem.xfbtree = cur->bc_mem.xfbtree; - - if (cur->bc_mem.pag) - ncur->bc_mem.pag = xfs_perag_hold(cur->bc_mem.pag); - + if (cur->bc_group) + ncur->bc_group = xfs_group_hold(cur->bc_group); return ncur; } diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 78e1920c1ff964..f0261c4d91061c 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -142,7 +142,7 @@ xfs_inobt_complain_bad_rec( xfs_warn(mp, "%sbt record corruption in AG %d detected at %pS!", - cur->bc_ops->name, pag_agno(cur->bc_ag.pag), fa); + cur->bc_ops->name, cur->bc_group->xg_gno, fa); xfs_warn(mp, "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x", irec->ir_startino, irec->ir_count, irec->ir_freecount, @@ -170,7 +170,7 @@ xfs_inobt_get_rec( return error; xfs_inobt_btrec_to_irec(mp, rec, irec); - fa = xfs_inobt_check_irec(cur->bc_ag.pag, irec); + fa = xfs_inobt_check_irec(to_perag(cur->bc_group), irec); if (fa) return xfs_inobt_complain_bad_rec(cur, fa, irec); @@ -275,8 +275,10 @@ xfs_check_agi_freecount( } } while (i == 1); - if (!xfs_is_shutdown(cur->bc_mp)) - ASSERT(freecount == cur->bc_ag.pag->pagi_freecount); + if (!xfs_is_shutdown(cur->bc_mp)) { + ASSERT(freecount == + to_perag(cur->bc_group)->pagi_freecount); + } } return 0; } @@ -2880,7 +2882,7 @@ xfs_ialloc_count_inodes_rec( xfs_failaddr_t fa; xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec); - fa = xfs_inobt_check_irec(cur->bc_ag.pag, &irec); + fa = xfs_inobt_check_irec(to_perag(cur->bc_group), &irec); if (fa) return xfs_inobt_complain_bad_rec(cur, fa, &irec); diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 91d44be2ce48bc..4c28deb3e9881d 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c @@ -37,7 +37,7 @@ STATIC struct xfs_btree_cur * xfs_inobt_dup_cursor( struct xfs_btree_cur *cur) { - return xfs_inobt_init_cursor(cur->bc_ag.pag, cur->bc_tp, + return xfs_inobt_init_cursor(to_perag(cur->bc_group), cur->bc_tp, cur->bc_ag.agbp); } @@ -45,7 +45,7 @@ STATIC struct xfs_btree_cur * xfs_finobt_dup_cursor( struct xfs_btree_cur *cur) { - return xfs_finobt_init_cursor(cur->bc_ag.pag, cur->bc_tp, + return xfs_finobt_init_cursor(to_perag(cur->bc_group), cur->bc_tp, cur->bc_ag.agbp); } @@ -112,7 +112,7 @@ __xfs_inobt_alloc_block( memset(&args, 0, sizeof(args)); args.tp = cur->bc_tp; args.mp = cur->bc_mp; - args.pag = cur->bc_ag.pag; + args.pag = to_perag(cur->bc_group); args.oinfo = XFS_RMAP_OINFO_INOBT; args.minlen = 1; args.maxlen = 1; @@ -248,7 +248,7 @@ xfs_inobt_init_ptr_from_cur( { struct xfs_agi *agi = cur->bc_ag.agbp->b_addr; - ASSERT(pag_agno(cur->bc_ag.pag) == be32_to_cpu(agi->agi_seqno)); + ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agi->agi_seqno)); ptr->s = agi->agi_root; } @@ -260,7 +260,8 @@ xfs_finobt_init_ptr_from_cur( { struct xfs_agi *agi = cur->bc_ag.agbp->b_addr; - ASSERT(pag_agno(cur->bc_ag.pag) == be32_to_cpu(agi->agi_seqno)); + ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agi->agi_seqno)); + ptr->s = agi->agi_free_root; } @@ -483,7 +484,7 @@ xfs_inobt_init_cursor( cur = xfs_btree_alloc_cursor(mp, tp, &xfs_inobt_ops, M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache); - cur->bc_ag.pag = xfs_perag_hold(pag); + cur->bc_group = xfs_group_hold(pag_group(pag)); cur->bc_ag.agbp = agbp; if (agbp) { struct xfs_agi *agi = agbp->b_addr; @@ -509,7 +510,7 @@ xfs_finobt_init_cursor( cur = xfs_btree_alloc_cursor(mp, tp, &xfs_finobt_ops, M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache); - cur->bc_ag.pag = xfs_perag_hold(pag); + cur->bc_group = xfs_group_hold(pag_group(pag)); cur->bc_ag.agbp = agbp; if (agbp) { struct xfs_agi *agi = agbp->b_addr; diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c index b8789c42c230b4..ed943f6e616d96 100644 --- a/fs/xfs/libxfs/xfs_refcount.c +++ b/fs/xfs/libxfs/xfs_refcount.c @@ -154,7 +154,7 @@ xfs_refcount_complain_bad_rec( xfs_warn(mp, "Refcount BTree record corruption in AG %d detected at %pS!", - pag_agno(cur->bc_ag.pag), fa); + cur->bc_group->xg_gno, fa); xfs_warn(mp, "Start block 0x%x, block count 0x%x, references 0x%x", irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount); @@ -180,7 +180,7 @@ xfs_refcount_get_rec( return error; xfs_refcount_btrec_to_irec(rec, irec); - fa = xfs_refcount_check_irec(cur->bc_ag.pag, irec); + fa = xfs_refcount_check_irec(to_perag(cur->bc_group), irec); if (fa) return xfs_refcount_complain_bad_rec(cur, fa, irec); @@ -1154,7 +1154,7 @@ xfs_refcount_adjust_extents( goto out_error; } } else { - fsbno = xfs_agbno_to_fsb(cur->bc_ag.pag, + fsbno = xfs_agbno_to_fsb(to_perag(cur->bc_group), tmp.rc_startblock); error = xfs_free_extent_later(cur->bc_tp, fsbno, tmp.rc_blockcount, NULL, @@ -1216,7 +1216,7 @@ xfs_refcount_adjust_extents( } goto advloop; } else { - fsbno = xfs_agbno_to_fsb(cur->bc_ag.pag, + fsbno = xfs_agbno_to_fsb(to_perag(cur->bc_group), ext.rc_startblock); error = xfs_free_extent_later(cur->bc_tp, fsbno, ext.rc_blockcount, NULL, @@ -1310,7 +1310,7 @@ xfs_refcount_continue_op( xfs_agblock_t new_agbno) { struct xfs_mount *mp = cur->bc_mp; - struct xfs_perag *pag = cur->bc_ag.pag; + struct xfs_perag *pag = to_perag(cur->bc_group); if (XFS_IS_CORRUPT(mp, !xfs_verify_agbext(pag, new_agbno, ri->ri_blockcount))) { @@ -1358,7 +1358,7 @@ xfs_refcount_finish_one( * If we haven't gotten a cursor or the cursor AG doesn't match * the startblock, get one now. */ - if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) { + if (rcur != NULL && to_perag(rcur->bc_group) != ri->ri_pag) { nr_ops = rcur->bc_refc.nr_ops; shape_changes = rcur->bc_refc.shape_changes; xfs_btree_del_cursor(rcur, 0); @@ -1878,7 +1878,8 @@ xfs_refcount_recover_extent( INIT_LIST_HEAD(&rr->rr_list); xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec); - if (xfs_refcount_check_irec(cur->bc_ag.pag, &rr->rr_rrec) != NULL || + if (xfs_refcount_check_irec(to_perag(cur->bc_group), &rr->rr_rrec) != + NULL || XFS_IS_CORRUPT(cur->bc_mp, rr->rr_rrec.rc_domain != XFS_REFC_DOMAIN_COW)) { xfs_btree_mark_sick(cur); @@ -2026,7 +2027,7 @@ xfs_refcount_query_range_helper( xfs_failaddr_t fa; xfs_refcount_btrec_to_irec(rec, &irec); - fa = xfs_refcount_check_irec(cur->bc_ag.pag, &irec); + fa = xfs_refcount_check_irec(to_perag(cur->bc_group), &irec); if (fa) return xfs_refcount_complain_bad_rec(cur, fa, &irec); diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c index db389fdbd929a4..54505fee185289 100644 --- a/fs/xfs/libxfs/xfs_refcount_btree.c +++ b/fs/xfs/libxfs/xfs_refcount_btree.c @@ -30,7 +30,7 @@ xfs_refcountbt_dup_cursor( struct xfs_btree_cur *cur) { return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp, - cur->bc_ag.agbp, cur->bc_ag.pag); + cur->bc_ag.agbp, to_perag(cur->bc_group)); } STATIC void @@ -68,7 +68,7 @@ xfs_refcountbt_alloc_block( memset(&args, 0, sizeof(args)); args.tp = cur->bc_tp; args.mp = cur->bc_mp; - args.pag = cur->bc_ag.pag; + args.pag = to_perag(cur->bc_group); args.oinfo = XFS_RMAP_OINFO_REFC; args.minlen = args.maxlen = args.prod = 1; args.resv = XFS_AG_RESV_METADATA; @@ -81,7 +81,7 @@ xfs_refcountbt_alloc_block( *stat = 0; return 0; } - ASSERT(args.agno == pag_agno(cur->bc_ag.pag)); + ASSERT(args.agno == cur->bc_group->xg_gno); ASSERT(args.len == 1); new->s = cpu_to_be32(args.agbno); @@ -169,7 +169,7 @@ xfs_refcountbt_init_ptr_from_cur( { struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; - ASSERT(pag_agno(cur->bc_ag.pag) == be32_to_cpu(agf->agf_seqno)); + ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agf->agf_seqno)); ptr->s = agf->agf_refcount_root; } @@ -365,7 +365,7 @@ xfs_refcountbt_init_cursor( cur = xfs_btree_alloc_cursor(mp, tp, &xfs_refcountbt_ops, mp->m_refc_maxlevels, xfs_refcountbt_cur_cache); - cur->bc_ag.pag = xfs_perag_hold(pag); + cur->bc_group = xfs_group_hold(pag_group(pag)); cur->bc_refc.nr_ops = 0; cur->bc_refc.shape_changes = 0; cur->bc_ag.agbp = agbp; diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c index 984120b128fb9c..0c404625986163 100644 --- a/fs/xfs/libxfs/xfs_rmap.c +++ b/fs/xfs/libxfs/xfs_rmap.c @@ -269,9 +269,7 @@ xfs_rmap_check_btrec( struct xfs_btree_cur *cur, const struct xfs_rmap_irec *irec) { - if (xfs_btree_is_mem_rmap(cur->bc_ops)) - return xfs_rmap_check_irec(cur->bc_mem.pag, irec); - return xfs_rmap_check_irec(cur->bc_ag.pag, irec); + return xfs_rmap_check_irec(to_perag(cur->bc_group), irec); } static inline int @@ -288,7 +286,7 @@ xfs_rmap_complain_bad_rec( else xfs_warn(mp, "Reverse Mapping BTree record corruption in AG %d detected at %pS!", - pag_agno(cur->bc_ag.pag), fa); + cur->bc_group->xg_gno, fa); xfs_warn(mp, "Owner 0x%llx, flags 0x%x, start block 0x%x block count 0x%x", irec->rm_owner, irec->rm_flags, irec->rm_startblock, @@ -2588,7 +2586,7 @@ xfs_rmap_finish_one( * If we haven't gotten a cursor or the cursor AG doesn't match * the startblock, get one now. */ - if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) { + if (rcur != NULL && to_perag(rcur->bc_group) != ri->ri_pag) { xfs_btree_del_cursor(rcur, 0); rcur = NULL; *pcur = NULL; diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index b37eaf37c7fd2d..2cab694ac58a7a 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c @@ -57,7 +57,7 @@ xfs_rmapbt_dup_cursor( struct xfs_btree_cur *cur) { return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp, - cur->bc_ag.agbp, cur->bc_ag.pag); + cur->bc_ag.agbp, to_perag(cur->bc_group)); } STATIC void @@ -66,14 +66,15 @@ xfs_rmapbt_set_root( const union xfs_btree_ptr *ptr, int inc) { - struct xfs_buf *agbp = cur->bc_ag.agbp; - struct xfs_agf *agf = agbp->b_addr; + struct xfs_buf *agbp = cur->bc_ag.agbp; + struct xfs_agf *agf = agbp->b_addr; + struct xfs_perag *pag = to_perag(cur->bc_group); ASSERT(ptr->s != 0); agf->agf_rmap_root = ptr->s; be32_add_cpu(&agf->agf_rmap_level, inc); - cur->bc_ag.pag->pagf_rmap_level += inc; + pag->pagf_rmap_level += inc; xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); } @@ -87,7 +88,7 @@ xfs_rmapbt_alloc_block( { struct xfs_buf *agbp = cur->bc_ag.agbp; struct xfs_agf *agf = agbp->b_addr; - struct xfs_perag *pag = cur->bc_ag.pag; + struct xfs_perag *pag = to_perag(cur->bc_group); struct xfs_alloc_arg args = { .len = 1 }; int error; xfs_agblock_t bno; @@ -125,7 +126,7 @@ xfs_rmapbt_free_block( { struct xfs_buf *agbp = cur->bc_ag.agbp; struct xfs_agf *agf = agbp->b_addr; - struct xfs_perag *pag = cur->bc_ag.pag; + struct xfs_perag *pag = to_perag(cur->bc_group); xfs_agblock_t bno; int error; @@ -227,7 +228,7 @@ xfs_rmapbt_init_ptr_from_cur( { struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; - ASSERT(pag_agno(cur->bc_ag.pag) == be32_to_cpu(agf->agf_seqno)); + ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agf->agf_seqno)); ptr->s = agf->agf_rmap_root; } @@ -538,7 +539,7 @@ xfs_rmapbt_init_cursor( cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rmapbt_ops, mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache); - cur->bc_ag.pag = xfs_perag_hold(pag); + cur->bc_group = xfs_group_hold(pag_group(pag)); cur->bc_ag.agbp = agbp; if (agbp) { struct xfs_agf *agf = agbp->b_addr; @@ -653,7 +654,7 @@ xfs_rmapbt_mem_cursor( cur->bc_mem.xfbtree = xfbt; cur->bc_nlevels = xfbt->nlevels; - cur->bc_mem.pag = xfs_perag_hold(pag); + cur->bc_group = xfs_group_hold(pag_group(pag)); return cur; } diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c index d1b8a4997dd2ce..8b282138097fb8 100644 --- a/fs/xfs/scrub/alloc.c +++ b/fs/xfs/scrub/alloc.c @@ -139,7 +139,7 @@ xchk_allocbt_rec( struct xchk_alloc *ca = bs->private; xfs_alloc_btrec_to_irec(rec, &irec); - if (xfs_alloc_check_irec(bs->cur->bc_ag.pag, &irec) != NULL) { + if (xfs_alloc_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) { xchk_btree_set_corrupt(bs->sc, bs->cur, 0); return 0; } diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c index fb022b403716b1..64168f2e42220a 100644 --- a/fs/xfs/scrub/bmap.c +++ b/fs/xfs/scrub/bmap.c @@ -601,7 +601,8 @@ xchk_bmap_check_rmap( xchk_fblock_set_corrupt(sc, sbcri->whichfork, check_rec.rm_offset); if (irec.br_startblock != - xfs_agbno_to_fsb(cur->bc_ag.pag, check_rec.rm_startblock)) + xfs_agbno_to_fsb(to_perag(cur->bc_group), + check_rec.rm_startblock)) xchk_fblock_set_corrupt(sc, sbcri->whichfork, check_rec.rm_offset); if (irec.br_blockcount > check_rec.rm_blockcount) diff --git a/fs/xfs/scrub/bmap_repair.c b/fs/xfs/scrub/bmap_repair.c index be408e50484b54..7c4955482641f7 100644 --- a/fs/xfs/scrub/bmap_repair.c +++ b/fs/xfs/scrub/bmap_repair.c @@ -196,7 +196,7 @@ xrep_bmap_check_fork_rmap( return -EFSCORRUPTED; /* Check that this is within the AG. */ - if (!xfs_verify_agbext(cur->bc_ag.pag, rec->rm_startblock, + if (!xfs_verify_agbext(to_perag(cur->bc_group), rec->rm_startblock, rec->rm_blockcount)) return -EFSCORRUPTED; @@ -268,7 +268,7 @@ xrep_bmap_walk_rmap( if ((rec->rm_flags & XFS_RMAP_UNWRITTEN) && !rb->allow_unwritten) return -EFSCORRUPTED; - fsbno = xfs_agbno_to_fsb(cur->bc_ag.pag, rec->rm_startblock); + fsbno = xfs_agbno_to_fsb(to_perag(cur->bc_group), rec->rm_startblock); if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK) { rb->old_bmbt_block_count += rec->rm_blockcount; diff --git a/fs/xfs/scrub/cow_repair.c b/fs/xfs/scrub/cow_repair.c index 19bded43c4fe1e..5b6194cef3e5e3 100644 --- a/fs/xfs/scrub/cow_repair.c +++ b/fs/xfs/scrub/cow_repair.c @@ -145,7 +145,8 @@ xrep_cow_mark_shared_staging( xrep_cow_trim_refcount(xc, &rrec, rec); return xrep_cow_mark_file_range(xc, - xfs_agbno_to_fsb(cur->bc_ag.pag, rrec.rc_startblock), + xfs_agbno_to_fsb(to_perag(cur->bc_group), + rrec.rc_startblock), rrec.rc_blockcount); } @@ -176,8 +177,9 @@ xrep_cow_mark_missing_staging( if (xc->next_bno >= rrec.rc_startblock) goto next; + error = xrep_cow_mark_file_range(xc, - xfs_agbno_to_fsb(cur->bc_ag.pag, xc->next_bno), + xfs_agbno_to_fsb(to_perag(cur->bc_group), xc->next_bno), rrec.rc_startblock - xc->next_bno); if (error) return error; @@ -220,7 +222,8 @@ xrep_cow_mark_missing_staging_rmap( } return xrep_cow_mark_file_range(xc, - xfs_agbno_to_fsb(cur->bc_ag.pag, rec_bno), rec_len); + xfs_agbno_to_fsb(to_perag(cur->bc_group), rec_bno), + rec_len); } /* diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c index fce04444c37c2a..6ceef3749e3b99 100644 --- a/fs/xfs/scrub/health.c +++ b/fs/xfs/scrub/health.c @@ -276,7 +276,7 @@ xchk_ag_btree_del_cursor_if_sick( type_to_health_flag[sc->sm->sm_type].group == XHG_AG) mask &= ~sc->sick_mask; - if (xfs_ag_has_sickness((*curp)->bc_ag.pag, mask)) { + if (xfs_group_has_sickness((*curp)->bc_group, mask)) { sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL; xfs_btree_del_cursor(*curp, XFS_BTREE_NOERROR); *curp = NULL; diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c index ee71cf2050b72e..abad54c3621d44 100644 --- a/fs/xfs/scrub/ialloc.c +++ b/fs/xfs/scrub/ialloc.c @@ -258,7 +258,7 @@ xchk_iallocbt_chunk( { struct xfs_scrub *sc = bs->sc; struct xfs_mount *mp = bs->cur->bc_mp; - struct xfs_perag *pag = bs->cur->bc_ag.pag; + struct xfs_perag *pag = to_perag(bs->cur->bc_group); xfs_agblock_t agbno; xfs_extlen_t len; @@ -318,7 +318,7 @@ xchk_iallocbt_check_cluster_ifree( * the record, compute which fs inode we're talking about. */ agino = irec->ir_startino + irec_ino; - fsino = xfs_agino_to_ino(bs->cur->bc_ag.pag, agino); + fsino = xfs_agino_to_ino(to_perag(bs->cur->bc_group), agino); irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino)); if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || @@ -394,7 +394,7 @@ xchk_iallocbt_check_cluster( * ir_startino can be large enough to make im_boffset nonzero. */ ir_holemask = (irec->ir_holemask & cluster_mask); - imap.im_blkno = xfs_agbno_to_daddr(bs->cur->bc_ag.pag, agbno); + imap.im_blkno = xfs_agbno_to_daddr(to_perag(bs->cur->bc_group), agbno); imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) << mp->m_sb.sb_inodelog; @@ -405,9 +405,9 @@ xchk_iallocbt_check_cluster( return 0; } - trace_xchk_iallocbt_check_cluster(bs->cur->bc_ag.pag, irec->ir_startino, - imap.im_blkno, imap.im_len, cluster_base, nr_inodes, - cluster_mask, ir_holemask, + trace_xchk_iallocbt_check_cluster(to_perag(bs->cur->bc_group), + irec->ir_startino, imap.im_blkno, imap.im_len, + cluster_base, nr_inodes, cluster_mask, ir_holemask, XFS_INO_TO_OFFSET(mp, irec->ir_startino + cluster_base)); @@ -583,7 +583,7 @@ xchk_iallocbt_rec( uint16_t holemask; xfs_inobt_btrec_to_irec(mp, rec, &irec); - if (xfs_inobt_check_irec(bs->cur->bc_ag.pag, &irec) != NULL) { + if (xfs_inobt_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) { xchk_btree_set_corrupt(bs->sc, bs->cur, 0); return 0; } diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c index d0c7d4a29c0feb..2b6be75e942415 100644 --- a/fs/xfs/scrub/refcount.c +++ b/fs/xfs/scrub/refcount.c @@ -453,7 +453,8 @@ xchk_refcountbt_rec( struct xchk_refcbt_records *rrc = bs->private; xfs_refcount_btrec_to_irec(rec, &irec); - if (xfs_refcount_check_irec(bs->cur->bc_ag.pag, &irec) != NULL) { + if (xfs_refcount_check_irec(to_perag(bs->cur->bc_group), &irec) != + NULL) { xchk_btree_set_corrupt(bs->sc, bs->cur, 0); return 0; } diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c index 26b5c90b3f6aee..39e9ad7cd8aea5 100644 --- a/fs/xfs/scrub/rmap.c +++ b/fs/xfs/scrub/rmap.c @@ -358,7 +358,7 @@ xchk_rmapbt_rec( struct xfs_rmap_irec irec; if (xfs_rmap_btrec_to_irec(rec, &irec) != NULL || - xfs_rmap_check_irec(bs->cur->bc_ag.pag, &irec) != NULL) { + xfs_rmap_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) { xchk_btree_set_corrupt(bs->sc, bs->cur, 0); return 0; } diff --git a/fs/xfs/scrub/rmap_repair.c b/fs/xfs/scrub/rmap_repair.c index 6c420ec7dacd1b..a0a227d183d28d 100644 --- a/fs/xfs/scrub/rmap_repair.c +++ b/fs/xfs/scrub/rmap_repair.c @@ -622,7 +622,7 @@ xrep_rmap_walk_inobt( return error; xfs_inobt_btrec_to_irec(mp, rec, &irec); - if (xfs_inobt_check_irec(cur->bc_ag.pag, &irec) != NULL) + if (xfs_inobt_check_irec(to_perag(cur->bc_group), &irec) != NULL) return -EFSCORRUPTED; agino = irec.ir_startino; diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index a26fb054346b68..5d5e54a16f23c8 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c @@ -394,7 +394,8 @@ xfs_getfsmap_datadev_helper( struct xfs_getfsmap_info *info = priv; return xfs_getfsmap_helper(cur->bc_tp, info, rec, - xfs_agbno_to_daddr(cur->bc_ag.pag, rec->rm_startblock), + xfs_agbno_to_daddr(to_perag(cur->bc_group), + rec->rm_startblock), 0); } @@ -415,7 +416,8 @@ xfs_getfsmap_datadev_bnobt_helper( irec.rm_flags = 0; return xfs_getfsmap_helper(cur->bc_tp, info, &irec, - xfs_agbno_to_daddr(cur->bc_ag.pag, rec->ar_startblock), + xfs_agbno_to_daddr(to_perag(cur->bc_group), + rec->ar_startblock), 0); } diff --git a/fs/xfs/xfs_health.c b/fs/xfs/xfs_health.c index 732246f46680d5..f45f125a669de7 100644 --- a/fs/xfs/xfs_health.c +++ b/fs/xfs/xfs_health.c @@ -531,24 +531,13 @@ void xfs_btree_mark_sick( struct xfs_btree_cur *cur) { - switch (cur->bc_ops->type) { - case XFS_BTREE_TYPE_MEM: - /* no health state tracking for ephemeral btrees */ - return; - case XFS_BTREE_TYPE_AG: + if (xfs_btree_is_bmap(cur->bc_ops)) { + xfs_bmap_mark_sick(cur->bc_ino.ip, cur->bc_ino.whichfork); + /* no health state tracking for ephemeral btrees */ + } else if (cur->bc_ops->type != XFS_BTREE_TYPE_MEM) { + ASSERT(cur->bc_group); ASSERT(cur->bc_ops->sick_mask); - xfs_ag_mark_sick(cur->bc_ag.pag, cur->bc_ops->sick_mask); - return; - case XFS_BTREE_TYPE_INODE: - if (xfs_btree_is_bmap(cur->bc_ops)) { - xfs_bmap_mark_sick(cur->bc_ino.ip, - cur->bc_ino.whichfork); - return; - } - fallthrough; - default: - ASSERT(0); - return; + xfs_group_mark_sick(cur->bc_group, cur->bc_ops->sick_mask); } } diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 562e0ad1c6cf0d..8aa6af5c9c0174 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -2588,7 +2588,7 @@ TRACE_EVENT(xfs_btree_alloc_block, __entry->ino = cur->bc_ino.ip->i_ino; break; case XFS_BTREE_TYPE_AG: - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->ino = 0; break; case XFS_BTREE_TYPE_MEM: @@ -2844,7 +2844,7 @@ DECLARE_EVENT_CLASS(xfs_rmap_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->agbno = agbno; __entry->len = len; __entry->owner = oinfo->oi_owner; @@ -2889,7 +2889,7 @@ DECLARE_EVENT_CLASS(xfs_btree_error_class, __entry->ino = cur->bc_ino.ip->i_ino; break; case XFS_BTREE_TYPE_AG: - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->ino = 0; break; case XFS_BTREE_TYPE_MEM: @@ -2943,7 +2943,7 @@ TRACE_EVENT(xfs_rmap_convert_state, __entry->ino = cur->bc_ino.ip->i_ino; break; case XFS_BTREE_TYPE_AG: - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->ino = 0; break; case XFS_BTREE_TYPE_MEM: @@ -2978,7 +2978,7 @@ DECLARE_EVENT_CLASS(xfs_rmapbt_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->agbno = agbno; __entry->len = len; __entry->owner = owner; @@ -3222,7 +3222,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->agbno = agbno; __entry->len = len; ), @@ -3253,7 +3253,7 @@ TRACE_EVENT(xfs_refcount_lookup, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->agbno = agbno; __entry->dir = dir; ), @@ -3279,7 +3279,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->domain = irec->rc_domain; __entry->startblock = irec->rc_startblock; __entry->blockcount = irec->rc_blockcount; @@ -3315,7 +3315,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->domain = irec->rc_domain; __entry->startblock = irec->rc_startblock; __entry->blockcount = irec->rc_blockcount; @@ -3357,7 +3357,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; __entry->i1_blockcount = i1->rc_blockcount; @@ -3407,7 +3407,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; __entry->i1_blockcount = i1->rc_blockcount; @@ -3462,7 +3462,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; __entry->i1_blockcount = i1->rc_blockcount; @@ -4352,7 +4352,7 @@ TRACE_EVENT(xfs_btree_commit_afakeroot, TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __assign_str(name); - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->agbno = cur->bc_ag.afake->af_root; __entry->levels = cur->bc_ag.afake->af_levels; __entry->blocks = cur->bc_ag.afake->af_blocks; @@ -4467,7 +4467,7 @@ TRACE_EVENT(xfs_btree_bload_block, __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb); __entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb); } else { - __entry->agno = pag_agno(cur->bc_ag.pag); + __entry->agno = cur->bc_group->xg_gno; __entry->agbno = be32_to_cpu(ptr->s); } __entry->nr_records = nr_records; From patchwork Tue Nov 5 22:15:51 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863598 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 348141DD0D2 for ; Tue, 5 Nov 2024 22:15:51 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844952; cv=none; b=m/P846nv3SLP2SJcqlk4WPLrew9+N7JQz+Hj/aBAWK4As4cmCPYMAonDtLt4marRzuwQr+exEFhP8nx/XeY/rQR/QY/+VXYK4TuV6SC+8CO66bDF2CyUaeJEp2rmjP7+juG/9QSH3uFbTVCAGodeopBPLzBNZKyNw4SjRpFPL2Q= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844952; c=relaxed/simple; bh=U0LQDbiSEjftj2FCRJzHtp6pnD88HBSP6PywAi5Whrg=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=EvaB5QSOgLRyQBjOwOYgDKSkBZBcxyZIwKCP4Yer8NHbYmMzup88/9IfuTmm+UgWNxL+wzjhXCGN9Hvu8GGBlt7T2EPrp3GKkL7rJuwCL9R/9+5lvbwF/bKFZ4r/A9z3gcCpcI9GiLd6EBLNGX6T4Kfrw9/1OwVVr4dVMsyPDB8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=R74X79lT; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="R74X79lT" Received: by smtp.kernel.org (Postfix) with ESMTPSA id AC6D4C4CECF; Tue, 5 Nov 2024 22:15:51 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844951; bh=U0LQDbiSEjftj2FCRJzHtp6pnD88HBSP6PywAi5Whrg=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=R74X79lTtlL+EsM0PPk7nvAgCddlZoV3gSh7eGkVDGGgitwzcIlA7vMwUuz7+WVVR novE0jXCt2kqJ/653Nk/sR5LtQCXzwzBDnuNr5SkoYryeM+0Y9hEBPThSIOxC6giDV JENwSkcaMRKYrQFa5NcL+l0r2UIoT7u2WMHeUiDtvON2f14qfqwnkbWREgJI4eKT/J is+z2vOU+ztvH1uGZN4+Uze0wEWutFvo5sYKPHS37zpKYhYu6ayFtXrLWunEuFx0YP qHGbkisy2gM8AuJla3xEQVKuj1Pu0KA6Llm80vhEqQvpfKHcpwhCHjyZa+d28FrzDP xtZlTMz+1gT+Q== Date: Tue, 05 Nov 2024 14:15:51 -0800 Subject: [PATCH 13/16] xfs: store a generic xfs_group pointer in xfs_getfsmap_info From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395493.1869491.7056223583293426352.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Replace the pag and rtg pointers with a generic group pointer. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_fsmap.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index 5d5e54a16f23c8..a91677ac54e7e3 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c @@ -158,7 +158,7 @@ struct xfs_getfsmap_info { struct xfs_fsmap_head *head; struct fsmap *fsmap_recs; /* mapping records */ struct xfs_buf *agf_bp; /* AGF, for refcount queries */ - struct xfs_perag *pag; /* AG info, if applicable */ + struct xfs_group *group; /* group info, if applicable */ xfs_daddr_t next_daddr; /* next daddr we expect */ /* daddr of low fsmap key when we're using the rtbitmap */ xfs_daddr_t low_daddr; @@ -216,12 +216,13 @@ xfs_getfsmap_is_shared( if (!xfs_has_reflink(mp)) return 0; /* rt files will have no perag structure */ - if (!info->pag) + if (!info->group) return 0; /* Are there any shared blocks here? */ flen = 0; - cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp, info->pag); + cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp, + to_perag(info->group)); error = xfs_refcount_find_shared(cur, rec->rm_startblock, rec->rm_blockcount, &fbno, &flen, false); @@ -353,7 +354,8 @@ xfs_getfsmap_helper( return -ECANCELED; trace_xfs_fsmap_mapping(mp, info->dev, - info->pag ? pag_agno(info->pag) : NULLAGNUMBER, rec); + info->group ? info->group->xg_gno : NULLAGNUMBER, + rec); fmr.fmr_device = info->dev; fmr.fmr_physical = rec_daddr; @@ -519,7 +521,7 @@ __xfs_getfsmap_datadev( * Set the AG high key from the fsmap high key if this * is the last AG that we're querying. */ - info->pag = pag; + info->group = pag_group(pag); if (pag_agno(pag) == end_ag) { info->high.rm_startblock = XFS_FSB_TO_AGBNO(mp, end_fsb); @@ -569,7 +571,7 @@ __xfs_getfsmap_datadev( if (error) break; } - info->pag = NULL; + info->group = NULL; } if (bt_cur) @@ -579,9 +581,9 @@ __xfs_getfsmap_datadev( xfs_trans_brelse(tp, info->agf_bp); info->agf_bp = NULL; } - if (info->pag) { - xfs_perag_rele(info->pag); - info->pag = NULL; + if (info->group) { + xfs_perag_rele(pag); + info->group = NULL; } else if (pag) { /* loop termination case */ xfs_perag_rele(pag); @@ -604,7 +606,7 @@ xfs_getfsmap_datadev_rmapbt_query( /* Allocate cursor for this AG and query_range it. */ *curpp = xfs_rmapbt_init_cursor(tp->t_mountp, tp, info->agf_bp, - info->pag); + to_perag(info->group)); return xfs_rmap_query_range(*curpp, &info->low, &info->high, xfs_getfsmap_datadev_helper, info); } @@ -637,7 +639,7 @@ xfs_getfsmap_datadev_bnobt_query( /* Allocate cursor for this AG and query_range it. */ *curpp = xfs_bnobt_init_cursor(tp->t_mountp, tp, info->agf_bp, - info->pag); + to_perag(info->group)); key->ar_startblock = info->low.rm_startblock; key[1].ar_startblock = info->high.rm_startblock; return xfs_alloc_query_range(*curpp, key, &key[1], @@ -997,7 +999,7 @@ xfs_getfsmap( info.dev = handlers[i].dev; info.last = false; - info.pag = NULL; + info.group = NULL; info.low_daddr = XFS_BUF_DADDR_NULL; info.low.rm_blockcount = 0; error = handlers[i].fn(tp, dkeys, &info); From patchwork Tue Nov 5 22:16:06 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863599 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B56D61DD0D2 for ; Tue, 5 Nov 2024 22:16:07 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844967; cv=none; b=HRIu2QVu3uuWc82aCdB3sXZDcH2yMIYd4Rt/YzeB/GRqJJ1MCjKz2/aq/gnGl4gPig48A1vY1fEPl/olE7CtO3EWPfxjdYeCu9Eszd9NmOaks3SOyAmLaJx8eTZpw3LdK3A3IXKW4FUoAHKJI54ipA4BHvWPQOyq27sOmWjeGHc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844967; c=relaxed/simple; bh=xdSlhFWTnk7OiNjLPJy3W+tiXBvS2g7IZmo4a8qClxc=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=gNIwm/fDg+kybilf+DLWMWtylTV7Z3DYgEDfXBaH8vajx2qMn7qvot2V6fXPxlsHG666brJfjpBWpMV5jSrYY2+CZYt/TWnY7GSERbpa+nlY7Zg0wONY2KBsbfpVnVTeFfdVBu9oduy2DFQF96wROAc0UzhGEWMIIyTJ/J1Zhxk= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=kPOBBwfO; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="kPOBBwfO" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4F441C4CECF; Tue, 5 Nov 2024 22:16:07 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844967; bh=xdSlhFWTnk7OiNjLPJy3W+tiXBvS2g7IZmo4a8qClxc=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=kPOBBwfOBN/hrZKMsPGXznb/poiAOJCMxtPVCpB1SDu/RJzpRotikWHu0hmluVMkN nJztnCBk2ZYgjZAkhamaTRncu56LQmCFE21tbFWlQHU4K/otkGFAwDZxrKHfABJnv5 Mz6aO21fOpMw1SZJEjpWl31TRfJ1fkUI8UdI3zLYODWJpJB1GrSPuj7w4v/H6allQz b3Mpw2wf9jitpXxzURmFDa/aude1aAejn6r7A2PaQntXNCpHeYSxN/b6wP5kFggwRE o+dYparZSF4lvy0i+xKzjK+HqALpntYACHPqanOWAfgKybJxjPhQa2zjWnmlVoNMls wafRCV3Py4VRg== Date: Tue, 05 Nov 2024 14:16:06 -0800 Subject: [PATCH 14/16] xfs: add group based bno conversion helpers From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395510.1869491.5968887681069440623.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Add/move the blocks, blklog and blkmask fields to the generic groups structure so that code can work with AGs and RTGs by just using the right index into the array. Then, add convenience helpers to convert block numbers based on the generic group. This will allow writing code that doesn't care if it is used on AGs or the upcoming realtime groups. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/libxfs/xfs_group.c | 9 +++++++ fs/xfs/libxfs/xfs_group.h | 56 +++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/libxfs/xfs_sb.c | 7 ++++++ fs/xfs/xfs_mount.h | 30 ++++++++++++++++++++++++ 4 files changed, 102 insertions(+) diff --git a/fs/xfs/libxfs/xfs_group.c b/fs/xfs/libxfs/xfs_group.c index 5c6fa5d76a91b1..e9d76bcdc820dd 100644 --- a/fs/xfs/libxfs/xfs_group.c +++ b/fs/xfs/libxfs/xfs_group.c @@ -214,3 +214,12 @@ xfs_group_insert( #endif return error; } + +struct xfs_group * +xfs_group_get_by_fsb( + struct xfs_mount *mp, + xfs_fsblock_t fsbno, + enum xfs_group_type type) +{ + return xfs_group_get(mp, xfs_fsb_to_gno(mp, fsbno, type), type); +} diff --git a/fs/xfs/libxfs/xfs_group.h b/fs/xfs/libxfs/xfs_group.h index 0ff6e1d5635cb1..5b7362277c3f7a 100644 --- a/fs/xfs/libxfs/xfs_group.h +++ b/fs/xfs/libxfs/xfs_group.h @@ -46,6 +46,8 @@ struct xfs_group { struct xfs_group *xfs_group_get(struct xfs_mount *mp, uint32_t index, enum xfs_group_type type); +struct xfs_group *xfs_group_get_by_fsb(struct xfs_mount *mp, + xfs_fsblock_t fsbno, enum xfs_group_type type); struct xfs_group *xfs_group_hold(struct xfs_group *xg); void xfs_group_put(struct xfs_group *xg); @@ -72,4 +74,58 @@ int xfs_group_insert(struct xfs_mount *mp, struct xfs_group *xg, #define xfs_group_marked(_mp, _type, _mark) \ xa_marked(&(_mp)->m_groups[(_type)].xa, (_mark)) +static inline xfs_agblock_t +xfs_group_max_blocks( + struct xfs_group *xg) +{ + return xg->xg_mount->m_groups[xg->xg_type].blocks; +} + +static inline xfs_fsblock_t +xfs_group_start_fsb( + struct xfs_group *xg) +{ + return ((xfs_fsblock_t)xg->xg_gno) << + xg->xg_mount->m_groups[xg->xg_type].blklog; +} + +static inline xfs_fsblock_t +xfs_gbno_to_fsb( + struct xfs_group *xg, + xfs_agblock_t gbno) +{ + return xfs_group_start_fsb(xg) | gbno; +} + +static inline xfs_daddr_t +xfs_gbno_to_daddr( + struct xfs_group *xg, + xfs_agblock_t gbno) +{ + struct xfs_mount *mp = xg->xg_mount; + uint32_t blocks = mp->m_groups[xg->xg_type].blocks; + + return XFS_FSB_TO_BB(mp, (xfs_fsblock_t)xg->xg_gno * blocks + gbno); +} + +static inline uint32_t +xfs_fsb_to_gno( + struct xfs_mount *mp, + xfs_fsblock_t fsbno, + enum xfs_group_type type) +{ + if (!mp->m_groups[type].blklog) + return 0; + return fsbno >> mp->m_groups[type].blklog; +} + +static inline xfs_agblock_t +xfs_fsb_to_gbno( + struct xfs_mount *mp, + xfs_fsblock_t fsbno, + enum xfs_group_type type) +{ + return fsbno & mp->m_groups[type].blkmask; +} + #endif /* __LIBXFS_GROUP_H */ diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 061c8c961d5bc9..f7a07e61341ded 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -988,6 +988,8 @@ xfs_sb_mount_common( struct xfs_mount *mp, struct xfs_sb *sbp) { + struct xfs_groups *ags = &mp->m_groups[XG_TYPE_AG]; + mp->m_agfrotor = 0; atomic_set(&mp->m_agirotor, 0); mp->m_maxagi = mp->m_sb.sb_agcount; @@ -998,6 +1000,11 @@ xfs_sb_mount_common( mp->m_blockmask = sbp->sb_blocksize - 1; mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG; mp->m_blockwmask = mp->m_blockwsize - 1; + + ags->blocks = mp->m_sb.sb_agblocks; + ags->blklog = mp->m_sb.sb_agblklog; + ags->blkmask = xfs_mask32lo(mp->m_sb.sb_agblklog); + xfs_mount_sb_set_rextsize(mp, sbp); mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, true); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 530d7f025506ce..1b698878f40cb1 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -71,8 +71,38 @@ struct xfs_inodegc { unsigned int cpu; }; +/* + * Container for each type of groups, used to look up individual groups and + * describes the geometry. + */ struct xfs_groups { struct xarray xa; + + /* + * Maximum capacity of the group in FSBs. + * + * Each group is laid out densely in the daddr space. For the + * degenerate case of a pre-rtgroups filesystem, the incore rtgroup + * pretends to have a zero-block and zero-blklog rtgroup. + */ + uint32_t blocks; + + /* + * Log(2) of the logical size of each group. + * + * Compared to the blocks field above this is rounded up to the next + * power of two, and thus lays out the xfs_fsblock_t/xfs_rtblock_t + * space sparsely with a hole from blocks to (1 << blklog) at the end + * of each group. + */ + uint8_t blklog; + + /* + * Mask to extract the group-relative block number from a FSB. + * For a pre-rtgroups filesystem we pretend to have one very large + * rtgroup, so this mask must be 64-bit. + */ + uint64_t blkmask; }; /* From patchwork Tue Nov 5 22:16:22 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863600 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 625F21E5022 for ; Tue, 5 Nov 2024 22:16:23 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844983; cv=none; b=OJu8dK01JLJhcVq0JRyCjF7CplCNwb7h/lFLbil+62n3gwdT6VN5p3I5vauRKEqdyTNJiS4lkcWzF20zLb2TWgXN6dd5eliieC2HUUw+O4Mccp2SXpKil8b8xAZOFrVCocmNynIGEwiXcVDhH0Fqxu/YRz8/4pdEM1ossnDEbXs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844983; c=relaxed/simple; bh=EDO4OYcQrMAaIyf2Y3ZlWO1EckiVk+A0KqU7Wd3e5Us=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=W5dAI+lbsneAp+nOllDt45N1iYU8X2+0xYZP7mvssDDnOSQECZlJrvPT189TShkW2mPYnzTcwQxVkqcwLR6WC2xh3PjNQuc6TrQVwo8Hti12JGK7q7oATV0rQzJ5dnwSyiHBxxCCjKDJd08Y3NYxP5x5X+sBMqlwndVVxjm5VXY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=KYrW0yak; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="KYrW0yak" Received: by smtp.kernel.org (Postfix) with ESMTPSA id D7F97C4CECF; Tue, 5 Nov 2024 22:16:22 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844982; bh=EDO4OYcQrMAaIyf2Y3ZlWO1EckiVk+A0KqU7Wd3e5Us=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=KYrW0yakeQLs5zFNtPwJxWZUL5OWVCsAbZZTtf8u1m8kNOhH3NuF54fzj8gwdepCX NpVz/9PXHppWT2KlEpaME1WmOwG+fksdSiNdPxXWs5QJaw/rtZW1+5pMgrL7AGCjXI Uo1T5cuYJewnoMaANEGNu+rVYWsDaV4JTgNmIwhWIuiih6e//WUWfzW5sqQr3beeLB iYTfCKeKO/iU9D163bSXx5Dgw11oIse9pd+6mqaMBoHbu0zzcGu/7rA4vudV26Y3Uy T3YhB/6Yp8e4NnZnH6nkXnwUcvSIjxc4STn55I6a6GJMmDDD9hLV7cTmGwaugCvDle kloofHNyMakZw== Date: Tue, 05 Nov 2024 14:16:22 -0800 Subject: [PATCH 15/16] xfs: remove xfs_group_intent_hold and xfs_group_intent_rele From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395527.1869491.6553959744827461856.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Each of them just has a single caller, so fold them. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_drain.c | 36 +++++++++--------------------------- 1 file changed, 9 insertions(+), 27 deletions(-) diff --git a/fs/xfs/xfs_drain.c b/fs/xfs/xfs_drain.c index a72d08947d6d10..7a728a04f7a6b1 100644 --- a/fs/xfs/xfs_drain.c +++ b/fs/xfs/xfs_drain.c @@ -94,31 +94,11 @@ static inline int xfs_defer_drain_wait(struct xfs_defer_drain *dr) } /* - * Declare an intent to update group metadata. Other threads that need - * exclusive access can decide to back off if they see declared intentions. - */ -static void -xfs_group_intent_hold( - struct xfs_group *xg) -{ - trace_xfs_group_intent_hold(xg, __return_address); - xfs_defer_drain_grab(&xg->xg_intents_drain); -} - -/* - * Release our intent to update this groups metadata. - */ -static void -xfs_group_intent_rele( - struct xfs_group *xg) -{ - trace_xfs_group_intent_rele(xg, __return_address); - xfs_defer_drain_rele(&xg->xg_intents_drain); -} - -/* - * Get a passive reference to the AG that contains a fsbno and declare an intent - * to update its metadata. + * Get a passive reference to the AG that contains a fsbno and declare an + * intent to update its metadata. + * + * Other threads that need exclusive access can decide to back off if they see + * declared intentions. */ struct xfs_perag * xfs_perag_intent_get( @@ -131,7 +111,8 @@ xfs_perag_intent_get( if (!pag) return NULL; - xfs_group_intent_hold(pag_group(pag)); + trace_xfs_group_intent_hold(pag_group(pag), __return_address); + xfs_defer_drain_grab(pag_group(pag).xg_intents_drain); return pag; } @@ -143,7 +124,8 @@ void xfs_perag_intent_put( struct xfs_perag *pag) { - xfs_group_intent_rele(pag_group(pag)); + trace_xfs_group_intent_rele(pag_group(pag), __return_address); + xfs_defer_drain_rele(pag_group(pag).xg_intents_drain); xfs_perag_put(pag); } From patchwork Tue Nov 5 22:16:38 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Darrick J. Wong" X-Patchwork-Id: 13863601 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 03CDE1DD0D2 for ; Tue, 5 Nov 2024 22:16:38 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844999; cv=none; b=VC2xI6xT5OgHfWoLE03uehzWAkTingRjkNRCHOaMriO8xpOP/7RJ2zpfnvs2lDvw3Xo4YTybwUOpVbH4uF9kCuVVIscjAk9MiJYB1fSRBIQCJszBIGgluhhD4an1Q/NWjYyE9Wc8G/roIzn9O52p8ez2wTKzliUSdzRm7j6FQis= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730844999; c=relaxed/simple; bh=YGoRT+ODaJAExPN9GqV3BY+sdiklUwoAy1ZtaSAXABY=; h=Date:Subject:From:To:Cc:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=bDfDqxf6xlrIy1e6fgbVv1Svs6RoMbIyIMp38YbIcidXCsEWmcLPqUiWj4sY0SMvHDeS8lt4vJH0JGfitmzfRP3w0RL+6nk9Vk73/1goHyT373/rI3Q4ZPmjZJexv+oiBCKXqk06uILPjpNVLjFAaw0FPHDRPdYRTcRHtw6hTt8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=GGySxxxD; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="GGySxxxD" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 884D2C4CECF; Tue, 5 Nov 2024 22:16:38 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730844998; bh=YGoRT+ODaJAExPN9GqV3BY+sdiklUwoAy1ZtaSAXABY=; h=Date:Subject:From:To:Cc:In-Reply-To:References:From; b=GGySxxxDk4xyScfwXML7M/OJ/3Ubv/AYQjHLRAAMt3yNxQmi2fQBJGtZNl2rV+7cE st4L9Vix6Kq4om8qlcwoik+glcM8/ZPSr3Zi7160wLayGk9tx485SSj2L9gqO0Niah ZjFr5meqkINXs2QSfrB3HCLMtd7SHjg2IZdCFcfVX1u3bQDuYaHH+9qsZD2mtZEvpG /BC3CIdHF+mOMNeA9bSG66yJOFzEBf0uhZSlIZVK90otVlqVzKlSjxLmsjTkd0mxY6 SFAYEgnKOyDiwy9tvMHzbvJQHwcshjjpC5N/cecx0mOofi4U1m2FXXnQy8PCHX47H9 X5docKsqrsjYA== Date: Tue, 05 Nov 2024 14:16:38 -0800 Subject: [PATCH 16/16] xfs: store a generic group structure in the intents From: "Darrick J. Wong" To: cem@kernel.org, djwong@kernel.org Cc: linux-xfs@vger.kernel.org Message-ID: <173084395543.1869491.1607375863736059392.stgit@frogsfrogsfrogs> In-Reply-To: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> References: <173084395220.1869491.11426383276644234025.stgit@frogsfrogsfrogs> Precedence: bulk X-Mailing-List: linux-xfs@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 From: Christoph Hellwig Replace the pag pointers in the extent free, bmap, rmap and refcount intent structures with a pointer to the generic group to prepare for adding intents for realtime groups. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/libxfs/xfs_alloc.h | 2 +- fs/xfs/libxfs/xfs_bmap.h | 2 +- fs/xfs/libxfs/xfs_refcount.c | 9 +++++---- fs/xfs/libxfs/xfs_refcount.h | 2 +- fs/xfs/libxfs/xfs_rmap.c | 16 +++++++++------- fs/xfs/libxfs/xfs_rmap.h | 2 +- fs/xfs/xfs_bmap_item.c | 5 +++-- fs/xfs/xfs_drain.c | 36 ++++++++++++++++++------------------ fs/xfs/xfs_drain.h | 12 ++++++------ fs/xfs/xfs_extfree_item.c | 14 ++++++++------ fs/xfs/xfs_refcount_item.c | 9 +++++---- fs/xfs/xfs_rmap_item.c | 9 +++++---- 12 files changed, 63 insertions(+), 55 deletions(-) diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h index 88fbce5001185f..efbde04fbbb15f 100644 --- a/fs/xfs/libxfs/xfs_alloc.h +++ b/fs/xfs/libxfs/xfs_alloc.h @@ -248,7 +248,7 @@ struct xfs_extent_free_item { uint64_t xefi_owner; xfs_fsblock_t xefi_startblock;/* starting fs block number */ xfs_extlen_t xefi_blockcount;/* number of blocks in extent */ - struct xfs_perag *xefi_pag; + struct xfs_group *xefi_group; unsigned int xefi_flags; enum xfs_ag_resv_type xefi_agresv; }; diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index 7592d46e97c661..4b721d9359943b 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h @@ -248,7 +248,7 @@ struct xfs_bmap_intent { enum xfs_bmap_intent_type bi_type; int bi_whichfork; struct xfs_inode *bi_owner; - struct xfs_perag *bi_pag; + struct xfs_group *bi_group; struct xfs_bmbt_irec bi_bmap; }; diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c index ed943f6e616d96..2dbab68b4fe69f 100644 --- a/fs/xfs/libxfs/xfs_refcount.c +++ b/fs/xfs/libxfs/xfs_refcount.c @@ -1358,7 +1358,7 @@ xfs_refcount_finish_one( * If we haven't gotten a cursor or the cursor AG doesn't match * the startblock, get one now. */ - if (rcur != NULL && to_perag(rcur->bc_group) != ri->ri_pag) { + if (rcur != NULL && rcur->bc_group != ri->ri_group) { nr_ops = rcur->bc_refc.nr_ops; shape_changes = rcur->bc_refc.shape_changes; xfs_btree_del_cursor(rcur, 0); @@ -1366,13 +1366,14 @@ xfs_refcount_finish_one( *pcur = NULL; } if (rcur == NULL) { - error = xfs_alloc_read_agf(ri->ri_pag, tp, + struct xfs_perag *pag = to_perag(ri->ri_group); + + error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_FREEING, &agbp); if (error) return error; - *pcur = rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, - ri->ri_pag); + *pcur = rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag); rcur->bc_refc.nr_ops = nr_ops; rcur->bc_refc.shape_changes = shape_changes; } diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h index 68acb0b1b4a878..62d78afcf1f3ff 100644 --- a/fs/xfs/libxfs/xfs_refcount.h +++ b/fs/xfs/libxfs/xfs_refcount.h @@ -56,7 +56,7 @@ enum xfs_refcount_intent_type { struct xfs_refcount_intent { struct list_head ri_list; - struct xfs_perag *ri_pag; + struct xfs_group *ri_group; enum xfs_refcount_intent_type ri_type; xfs_extlen_t ri_blockcount; xfs_fsblock_t ri_startblock; diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c index 0c404625986163..d0df68dc313185 100644 --- a/fs/xfs/libxfs/xfs_rmap.c +++ b/fs/xfs/libxfs/xfs_rmap.c @@ -2586,28 +2586,30 @@ xfs_rmap_finish_one( * If we haven't gotten a cursor or the cursor AG doesn't match * the startblock, get one now. */ - if (rcur != NULL && to_perag(rcur->bc_group) != ri->ri_pag) { + if (rcur != NULL && rcur->bc_group != ri->ri_group) { xfs_btree_del_cursor(rcur, 0); rcur = NULL; *pcur = NULL; } if (rcur == NULL) { + struct xfs_perag *pag = to_perag(ri->ri_group); + /* * Refresh the freelist before we start changing the * rmapbt, because a shape change could cause us to * allocate blocks. */ - error = xfs_free_extent_fix_freelist(tp, ri->ri_pag, &agbp); + error = xfs_free_extent_fix_freelist(tp, pag, &agbp); if (error) { - xfs_ag_mark_sick(ri->ri_pag, XFS_SICK_AG_AGFL); + xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL); return error; } if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) { - xfs_ag_mark_sick(ri->ri_pag, XFS_SICK_AG_AGFL); + xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL); return -EFSCORRUPTED; } - *pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, ri->ri_pag); + *pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag); } xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork, @@ -2620,8 +2622,8 @@ xfs_rmap_finish_one( if (error) return error; - xfs_rmap_update_hook(tp, pag_group(ri->ri_pag), ri->ri_type, bno, - ri->ri_bmap.br_blockcount, unwritten, &oinfo); + xfs_rmap_update_hook(tp, ri->ri_group, ri->ri_type, bno, + ri->ri_bmap.br_blockcount, unwritten, &oinfo); return 0; } diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h index d409b463bc6662..96b4321d831007 100644 --- a/fs/xfs/libxfs/xfs_rmap.h +++ b/fs/xfs/libxfs/xfs_rmap.h @@ -173,7 +173,7 @@ struct xfs_rmap_intent { int ri_whichfork; uint64_t ri_owner; struct xfs_bmbt_irec ri_bmap; - struct xfs_perag *ri_pag; + struct xfs_group *ri_group; }; /* functions for updating the rmapbt based on bmbt map/unmap operations */ diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c index 35a8c1b8b3cb34..37dab184c2dfc2 100644 --- a/fs/xfs/xfs_bmap_item.c +++ b/fs/xfs/xfs_bmap_item.c @@ -334,7 +334,8 @@ xfs_bmap_update_get_group( * intent drops the intent count, ensuring that the intent count * remains nonzero across the transaction roll. */ - bi->bi_pag = xfs_perag_intent_get(mp, bi->bi_bmap.br_startblock); + bi->bi_group = xfs_group_intent_get(mp, bi->bi_bmap.br_startblock, + XG_TYPE_AG); } /* Add this deferred BUI to the transaction. */ @@ -368,7 +369,7 @@ xfs_bmap_update_put_group( if (xfs_ifork_is_realtime(bi->bi_owner, bi->bi_whichfork)) return; - xfs_perag_intent_put(bi->bi_pag); + xfs_group_intent_put(bi->bi_group); } /* Cancel a deferred bmap update. */ diff --git a/fs/xfs/xfs_drain.c b/fs/xfs/xfs_drain.c index 7a728a04f7a6b1..5ede81fadbd8ca 100644 --- a/fs/xfs/xfs_drain.c +++ b/fs/xfs/xfs_drain.c @@ -94,39 +94,39 @@ static inline int xfs_defer_drain_wait(struct xfs_defer_drain *dr) } /* - * Get a passive reference to the AG that contains a fsbno and declare an + * Get a passive reference to the group that contains a fsbno and declare an * intent to update its metadata. * * Other threads that need exclusive access can decide to back off if they see * declared intentions. */ -struct xfs_perag * -xfs_perag_intent_get( +struct xfs_group * +xfs_group_intent_get( struct xfs_mount *mp, - xfs_fsblock_t fsbno) + xfs_fsblock_t fsbno, + enum xfs_group_type type) { - struct xfs_perag *pag; + struct xfs_group *xg; - pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, fsbno)); - if (!pag) + xg = xfs_group_get_by_fsb(mp, fsbno, type); + if (!xg) return NULL; - - trace_xfs_group_intent_hold(pag_group(pag), __return_address); - xfs_defer_drain_grab(pag_group(pag).xg_intents_drain); - return pag; + trace_xfs_group_intent_hold(xg, __return_address); + xfs_defer_drain_grab(&xg->xg_intents_drain); + return xg; } /* - * Release our intent to update this AG's metadata, and then release our - * passive ref to the AG. + * Release our intent to update this groups metadata, and then release our + * passive ref to it. */ void -xfs_perag_intent_put( - struct xfs_perag *pag) +xfs_group_intent_put( + struct xfs_group *xg) { - trace_xfs_group_intent_rele(pag_group(pag), __return_address); - xfs_defer_drain_rele(pag_group(pag).xg_intents_drain); - xfs_perag_put(pag); + trace_xfs_group_intent_rele(xg, __return_address); + xfs_defer_drain_rele(&xg->xg_intents_drain); + xfs_group_put(xg); } /* diff --git a/fs/xfs/xfs_drain.h b/fs/xfs/xfs_drain.h index 3e6143572e52d2..efcf88df9a5e70 100644 --- a/fs/xfs/xfs_drain.h +++ b/fs/xfs/xfs_drain.h @@ -62,9 +62,9 @@ void xfs_drain_wait_enable(void); * soon as the item is added to the transaction and cannot drop the counter * until the item is finished or cancelled. */ -struct xfs_perag *xfs_perag_intent_get(struct xfs_mount *mp, - xfs_fsblock_t fsbno); -void xfs_perag_intent_put(struct xfs_perag *pag); +struct xfs_group *xfs_group_intent_get(struct xfs_mount *mp, + xfs_fsblock_t fsbno, enum xfs_group_type type); +void xfs_group_intent_put(struct xfs_group *rtg); int xfs_group_intent_drain(struct xfs_group *xg); bool xfs_group_intent_busy(struct xfs_group *xg); @@ -75,9 +75,9 @@ struct xfs_defer_drain { /* empty */ }; #define xfs_defer_drain_free(dr) ((void)0) #define xfs_defer_drain_init(dr) ((void)0) -#define xfs_perag_intent_get(mp, fsbno) \ - xfs_perag_get((mp), XFS_FSB_TO_AGNO(mp, fsbno)) -#define xfs_perag_intent_put(pag) xfs_perag_put(pag) +#define xfs_group_intent_get(_mp, _fsbno, _type) \ + xfs_group_get_by_fsb((_mp), (_fsbno), (_type)) +#define xfs_group_intent_put(xg) xfs_group_put(xg) #endif /* CONFIG_XFS_DRAIN_INTENTS */ diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index c198962edea163..e469510986e8d0 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -362,7 +362,7 @@ xfs_extent_free_diff_items( struct xfs_extent_free_item *ra = xefi_entry(a); struct xfs_extent_free_item *rb = xefi_entry(b); - return pag_agno(ra->xefi_pag) - pag_agno(rb->xefi_pag); + return ra->xefi_group->xg_gno - rb->xefi_group->xg_gno; } /* Log a free extent to the intent item. */ @@ -447,7 +447,8 @@ xfs_extent_free_defer_add( trace_xfs_extent_free_defer(mp, xefi); - xefi->xefi_pag = xfs_perag_intent_get(mp, xefi->xefi_startblock); + xefi->xefi_group = xfs_group_intent_get(mp, xefi->xefi_startblock, + XG_TYPE_AG); if (xefi->xefi_agresv == XFS_AG_RESV_AGFL) *dfpp = xfs_defer_add(tp, &xefi->xefi_list, &xfs_agfl_free_defer_type); @@ -463,7 +464,7 @@ xfs_extent_free_cancel_item( { struct xfs_extent_free_item *xefi = xefi_entry(item); - xfs_perag_intent_put(xefi->xefi_pag); + xfs_group_intent_put(xefi->xefi_group); kmem_cache_free(xfs_extfree_item_cache, xefi); } @@ -499,7 +500,7 @@ xfs_extent_free_finish_item( * in this EFI to the EFD so this works correctly. */ if (!(xefi->xefi_flags & XFS_EFI_CANCELLED)) - error = __xfs_free_extent(tp, xefi->xefi_pag, agbno, + error = __xfs_free_extent(tp, to_perag(xefi->xefi_group), agbno, xefi->xefi_blockcount, &oinfo, xefi->xefi_agresv, xefi->xefi_flags & XFS_EFI_SKIP_DISCARD); if (error == -EAGAIN) { @@ -545,7 +546,7 @@ xfs_agfl_free_finish_item( trace_xfs_agfl_free_deferred(mp, xefi); - error = xfs_alloc_read_agf(xefi->xefi_pag, tp, 0, &agbp); + error = xfs_alloc_read_agf(to_perag(xefi->xefi_group), tp, 0, &agbp); if (!error) error = xfs_free_ag_extent(tp, agbp, agbno, 1, &oinfo, XFS_AG_RESV_AGFL); @@ -578,7 +579,8 @@ xfs_efi_recover_work( xefi->xefi_blockcount = extp->ext_len; xefi->xefi_agresv = XFS_AG_RESV_NONE; xefi->xefi_owner = XFS_RMAP_OWN_UNKNOWN; - xefi->xefi_pag = xfs_perag_intent_get(mp, extp->ext_start); + xefi->xefi_group = xfs_group_intent_get(mp, extp->ext_start, + XG_TYPE_AG); xfs_defer_add_item(dfp, &xefi->xefi_list); } diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index 29f101005f3eda..bede1c96c33011 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c @@ -244,7 +244,7 @@ xfs_refcount_update_diff_items( struct xfs_refcount_intent *ra = ci_entry(a); struct xfs_refcount_intent *rb = ci_entry(b); - return pag_agno(ra->ri_pag) - pag_agno(rb->ri_pag); + return ra->ri_group->xg_gno - rb->ri_group->xg_gno; } /* Log refcount updates in the intent item. */ @@ -330,7 +330,7 @@ xfs_refcount_defer_add( trace_xfs_refcount_defer(mp, ri); - ri->ri_pag = xfs_perag_intent_get(mp, ri->ri_startblock); + ri->ri_group = xfs_group_intent_get(mp, ri->ri_startblock, XG_TYPE_AG); xfs_defer_add(tp, &ri->ri_list, &xfs_refcount_update_defer_type); } @@ -341,7 +341,7 @@ xfs_refcount_update_cancel_item( { struct xfs_refcount_intent *ri = ci_entry(item); - xfs_perag_intent_put(ri->ri_pag); + xfs_group_intent_put(ri->ri_group); kmem_cache_free(xfs_refcount_intent_cache, ri); } @@ -431,7 +431,8 @@ xfs_cui_recover_work( ri->ri_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK; ri->ri_startblock = pmap->pe_startblock; ri->ri_blockcount = pmap->pe_len; - ri->ri_pag = xfs_perag_intent_get(mp, pmap->pe_startblock); + ri->ri_group = xfs_group_intent_get(mp, pmap->pe_startblock, + XG_TYPE_AG); xfs_defer_add_item(dfp, &ri->ri_list); } diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c index 1b83d09351f028..76b3c0ed3b4f63 100644 --- a/fs/xfs/xfs_rmap_item.c +++ b/fs/xfs/xfs_rmap_item.c @@ -243,7 +243,7 @@ xfs_rmap_update_diff_items( struct xfs_rmap_intent *ra = ri_entry(a); struct xfs_rmap_intent *rb = ri_entry(b); - return pag_agno(ra->ri_pag) - pag_agno(rb->ri_pag); + return ra->ri_group->xg_gno - rb->ri_group->xg_gno; } /* Log rmap updates in the intent item. */ @@ -353,7 +353,8 @@ xfs_rmap_defer_add( trace_xfs_rmap_defer(mp, ri); - ri->ri_pag = xfs_perag_intent_get(mp, ri->ri_bmap.br_startblock); + ri->ri_group = xfs_group_intent_get(mp, ri->ri_bmap.br_startblock, + XG_TYPE_AG); xfs_defer_add(tp, &ri->ri_list, &xfs_rmap_update_defer_type); } @@ -364,7 +365,7 @@ xfs_rmap_update_cancel_item( { struct xfs_rmap_intent *ri = ri_entry(item); - xfs_perag_intent_put(ri->ri_pag); + xfs_group_intent_put(ri->ri_group); kmem_cache_free(xfs_rmap_intent_cache, ri); } @@ -494,7 +495,7 @@ xfs_rui_recover_work( ri->ri_bmap.br_blockcount = map->me_len; ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ? XFS_EXT_UNWRITTEN : XFS_EXT_NORM; - ri->ri_pag = xfs_perag_intent_get(mp, map->me_startblock); + ri->ri_group = xfs_group_intent_get(mp, map->me_startblock, XG_TYPE_AG); xfs_defer_add_item(dfp, &ri->ri_list); }