@@ -457,6 +457,7 @@ const struct xfs_btree_ops xfs_bnobt_ops = {
.key_len = sizeof(xfs_alloc_key_t),
.lru_refs = XFS_ALLOC_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_abtb_2),
.dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root,
@@ -484,6 +485,7 @@ const struct xfs_btree_ops xfs_cntbt_ops = {
.key_len = sizeof(xfs_alloc_key_t),
.lru_refs = XFS_ALLOC_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_abtc_2),
.dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root,
@@ -512,22 +514,17 @@ xfs_allocbt_init_common(
struct xfs_perag *pag,
xfs_btnum_t btnum)
{
+ const struct xfs_btree_ops *ops = &xfs_bnobt_ops;
struct xfs_btree_cur *cur;
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
- if (btnum == XFS_BTNUM_CNT) {
- cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_cntbt_ops,
- mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
- } else {
- cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_bnobt_ops,
- mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
- }
+ if (btnum == XFS_BTNUM_CNT)
+ ops = &xfs_cntbt_ops;
+ cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops, mp->m_alloc_maxlevels,
+ xfs_allocbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
-
return cur;
}
@@ -530,6 +530,7 @@ const struct xfs_btree_ops xfs_bmbt_ops = {
.key_len = sizeof(xfs_bmbt_key_t),
.lru_refs = XFS_BMAP_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2),
.dup_cursor = xfs_bmbt_dup_cursor,
.update_cursor = xfs_bmbt_update_cursor,
@@ -563,7 +564,6 @@ xfs_bmbt_init_common(
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, &xfs_bmbt_ops,
mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
cur->bc_ino.ip = ip;
cur->bc_ino.allocated = 0;
@@ -87,9 +87,11 @@ uint32_t xfs_btree_magic(struct xfs_mount *mp, const struct xfs_btree_ops *ops);
* Generic stats interface
*/
#define XFS_BTREE_STATS_INC(cur, stat) \
- XFS_STATS_INC_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat)
+ XFS_STATS_INC_OFF((cur)->bc_mp, \
+ (cur)->bc_ops->statoff + __XBTS_ ## stat)
#define XFS_BTREE_STATS_ADD(cur, stat, val) \
- XFS_STATS_ADD_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat, val)
+ XFS_STATS_ADD_OFF((cur)->bc_mp, \
+ (cur)->bc_ops->statoff + __XBTS_ ## stat, val)
enum xbtree_key_contig {
XBTREE_KEY_GAP = 0,
@@ -123,6 +125,9 @@ struct xfs_btree_ops {
/* LRU refcount to set on each btree buffer created */
unsigned int lru_refs;
+ /* offset of btree stats array */
+ unsigned int statoff;
+
/* cursor operations */
struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
void (*update_cursor)(struct xfs_btree_cur *src,
@@ -280,7 +285,6 @@ struct xfs_btree_cur
union xfs_btree_irec bc_rec; /* current insert/search record value */
uint8_t bc_nlevels; /* number of levels in the tree */
uint8_t bc_maxlevels; /* maximum levels for this btree type */
- int bc_statoff; /* offset of btree stats array */
/*
* Short btree pointers need an agno to be able to turn the pointers
@@ -402,6 +402,7 @@ const struct xfs_btree_ops xfs_inobt_ops = {
.key_len = sizeof(xfs_inobt_key_t),
.lru_refs = XFS_INO_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_ibt_2),
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_inobt_set_root,
@@ -426,6 +427,7 @@ const struct xfs_btree_ops xfs_finobt_ops = {
.key_len = sizeof(xfs_inobt_key_t),
.lru_refs = XFS_INO_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_fibt_2),
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_finobt_set_root,
@@ -455,20 +457,16 @@ xfs_inobt_init_common(
xfs_btnum_t btnum) /* ialloc or free ino btree */
{
struct xfs_mount *mp = pag->pag_mount;
+ const struct xfs_btree_ops *ops = &xfs_inobt_ops;
struct xfs_btree_cur *cur;
- if (btnum == XFS_BTNUM_INO) {
- cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_inobt_ops,
- M_IGEO(mp)->inobt_maxlevels,
- xfs_inobt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
- } else {
- cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_finobt_ops,
- M_IGEO(mp)->inobt_maxlevels,
- xfs_inobt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
- }
+ ASSERT(btnum == XFS_BTNUM_INO || btnum == XFS_BTNUM_FINO);
+ if (btnum == XFS_BTNUM_FINO)
+ ops = &xfs_finobt_ops;
+
+ cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops,
+ M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
return cur;
}
@@ -321,6 +321,7 @@ const struct xfs_btree_ops xfs_refcountbt_ops = {
.key_len = sizeof(struct xfs_refcount_key),
.lru_refs = XFS_REFC_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2),
.dup_cursor = xfs_refcountbt_dup_cursor,
.set_root = xfs_refcountbt_set_root,
@@ -356,8 +357,6 @@ xfs_refcountbt_init_common(
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
&xfs_refcountbt_ops, mp->m_refc_maxlevels,
xfs_refcountbt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
-
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.refc.nr_ops = 0;
cur->bc_ag.refc.shape_changes = 0;
@@ -477,6 +477,7 @@ const struct xfs_btree_ops xfs_rmapbt_ops = {
.key_len = 2 * sizeof(struct xfs_rmap_key),
.lru_refs = XFS_RMAP_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_rmap_2),
.dup_cursor = xfs_rmapbt_dup_cursor,
.set_root = xfs_rmapbt_set_root,
@@ -507,8 +508,6 @@ xfs_rmapbt_init_common(
/* Overlapping btree; 2 keys per pointer. */
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops,
mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
-
cur->bc_ag.pag = xfs_perag_hold(pag);
return cur;
}