@@ -478,6 +478,7 @@ static const struct xfs_btree_ops xfs_bnobt_ops = {
static const struct xfs_btree_ops xfs_cntbt_ops = {
.rec_len = sizeof(xfs_alloc_rec_t),
.key_len = sizeof(xfs_alloc_key_t),
+ .geom_flags = XFS_BTREE_LASTREC_UPDATE,
.dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root,
@@ -514,19 +515,14 @@ xfs_allocbt_init_common(
cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_cntbt_ops,
mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
- cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
} else {
cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_bnobt_ops,
mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
}
- cur->bc_ag.abt.active = false;
cur->bc_ag.pag = xfs_perag_hold(pag);
-
- if (xfs_has_crc(mp))
- cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
-
+ cur->bc_ag.abt.active = false;
return cur;
}
@@ -516,6 +516,7 @@ xfs_bmbt_keys_contiguous(
static const struct xfs_btree_ops xfs_bmbt_ops = {
.rec_len = sizeof(xfs_bmbt_rec_t),
.key_len = sizeof(xfs_bmbt_key_t),
+ .geom_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE,
.dup_cursor = xfs_bmbt_dup_cursor,
.update_cursor = xfs_bmbt_update_cursor,
@@ -551,10 +552,6 @@ xfs_bmbt_init_common(
mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
- cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
- if (xfs_has_crc(mp))
- cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
-
cur->bc_ino.ip = ip;
cur->bc_ino.allocated = 0;
cur->bc_ino.flags = 0;
@@ -116,6 +116,9 @@ struct xfs_btree_ops {
size_t key_len;
size_t rec_len;
+ /* XFS_BTREE_* flags that determine the geometry of the btree */
+ unsigned int geom_flags;
+
/* cursor operations */
struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
void (*update_cursor)(struct xfs_btree_cur *src,
@@ -750,6 +753,9 @@ xfs_btree_alloc_cursor(
cur->bc_btnum = btnum;
cur->bc_maxlevels = maxlevels;
cur->bc_cache = cache;
+ cur->bc_flags = ops->geom_flags;
+ if (xfs_has_crc(mp))
+ cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
return cur;
}
@@ -465,9 +465,6 @@ xfs_inobt_init_common(
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
}
- if (xfs_has_crc(mp))
- cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
-
cur->bc_ag.pag = xfs_perag_hold(pag);
return cur;
}
@@ -356,8 +356,6 @@ xfs_refcountbt_init_common(
xfs_refcountbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
- cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
-
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.refc.nr_ops = 0;
cur->bc_ag.refc.shape_changes = 0;
@@ -487,6 +487,7 @@ xfs_rmapbt_keys_contiguous(
static const struct xfs_btree_ops xfs_rmapbt_ops = {
.rec_len = sizeof(struct xfs_rmap_rec),
.key_len = 2 * sizeof(struct xfs_rmap_key),
+ .geom_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING,
.dup_cursor = xfs_rmapbt_dup_cursor,
.set_root = xfs_rmapbt_set_root,
@@ -517,7 +518,6 @@ xfs_rmapbt_init_common(
/* Overlapping btree; 2 keys per pointer. */
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops,
mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
- cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
cur->bc_ag.pag = xfs_perag_hold(pag);
@@ -611,6 +611,8 @@ static const struct xfs_buf_ops xfs_rmapbt_mem_buf_ops = {
static const struct xfs_btree_ops xfs_rmapbt_mem_ops = {
.rec_len = sizeof(struct xfs_rmap_rec),
.key_len = 2 * sizeof(struct xfs_rmap_key),
+ .geom_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING |
+ XFS_BTREE_IN_XFILE,
.dup_cursor = xfbtree_dup_cursor,
.set_root = xfbtree_set_root,
@@ -645,8 +647,6 @@ xfs_rmapbt_mem_cursor(
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP,
&xfs_rmapbt_mem_ops, mp->m_rmap_maxlevels,
xfs_rmapbt_cur_cache);
- cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING |
- XFS_BTREE_IN_XFILE;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
cur->bc_mem.xfbtree = xfbtree;
cur->bc_mem.head_bp = head_bp;