@@ -132,7 +132,7 @@ verify_inode_chunk(xfs_mount_t *mp,
if (check_aginode_block(mp, agno, agino) == 0)
return 0;
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
state = get_bmap(agno, agbno);
switch (state) {
@@ -167,8 +167,8 @@ verify_inode_chunk(xfs_mount_t *mp,
_("inode block %d/%d multiply claimed, (state %d)\n"),
agno, agbno, state);
set_bmap(agno, agbno, XR_E_MULT);
- pthread_mutex_unlock(&ag_locks[agno].lock);
- return(0);
+ unlock_ag(agno);
+ return 0;
default:
do_warn(
_("inode block %d/%d bad state, (state %d)\n"),
@@ -177,7 +177,7 @@ verify_inode_chunk(xfs_mount_t *mp,
break;
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
start_agino = XFS_AGB_TO_AGINO(mp, agbno);
*start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino);
@@ -424,7 +424,7 @@ verify_inode_chunk(xfs_mount_t *mp,
* user data -- we're probably here as a result of a directory
* entry or an iunlinked pointer
*/
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
for (cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno;
cur_agbno += blen) {
@@ -438,7 +438,7 @@ verify_inode_chunk(xfs_mount_t *mp,
_("inode block %d/%d multiply claimed, (state %d)\n"),
agno, cur_agbno, state);
set_bmap_ext(agno, cur_agbno, blen, XR_E_MULT);
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
return 0;
case XR_E_METADATA:
case XR_E_INO:
@@ -450,7 +450,7 @@ verify_inode_chunk(xfs_mount_t *mp,
break;
}
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
/*
* ok, chunk is good. put the record into the tree if required,
@@ -473,8 +473,7 @@ verify_inode_chunk(xfs_mount_t *mp,
set_inode_used(irec_p, agino - start_agino);
- pthread_mutex_lock(&ag_locks[agno].lock);
-
+ lock_ag(agno);
for (cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno;
cur_agbno += blen) {
@@ -516,7 +515,7 @@ verify_inode_chunk(xfs_mount_t *mp,
break;
}
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
return(ino_cnt);
}
@@ -575,7 +574,7 @@ process_inode_agbno_state(
{
int state;
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
state = get_bmap(agno, agbno);
switch (state) {
case XR_E_INO: /* already marked */
@@ -605,7 +604,7 @@ process_inode_agbno_state(
XFS_AGB_TO_FSB(mp, agno, agbno), state);
break;
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
}
/*
@@ -540,9 +540,9 @@ _("Fatal error: inode %" PRIu64 " - blkmap_set_ext(): %s\n"
ebno = agbno + irec.br_blockcount;
if (agno != locked_agno) {
if (locked_agno != -1)
- pthread_mutex_unlock(&ag_locks[locked_agno].lock);
- pthread_mutex_lock(&ag_locks[agno].lock);
+ unlock_ag(locked_agno);
locked_agno = agno;
+ lock_ag(locked_agno);
}
for (b = irec.br_startblock;
@@ -663,7 +663,7 @@ _("illegal state %d in block map %" PRIu64 "\n"),
error = 0;
done:
if (locked_agno != -1)
- pthread_mutex_unlock(&ag_locks[locked_agno].lock);
+ unlock_ag(locked_agno);
if (i != *numrecs) {
ASSERT(i < *numrecs);
@@ -111,8 +111,6 @@ xfs_extlen_t sb_inoalignmt;
uint32_t sb_unit;
uint32_t sb_width;
-struct aglock *ag_locks;
-
time_t report_interval;
uint64_t *prog_rpt_done;
@@ -152,10 +152,6 @@ extern xfs_extlen_t sb_inoalignmt;
extern uint32_t sb_unit;
extern uint32_t sb_width;
-struct aglock {
- pthread_mutex_t lock __attribute__((__aligned__(64)));
-};
-extern struct aglock *ag_locks;
extern pthread_mutex_t rt_lock;
extern time_t report_interval;
@@ -24,7 +24,25 @@
static int states[16] =
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
-static struct btree_root **ag_bmap;
+struct bmap {
+ pthread_mutex_t lock __attribute__((__aligned__(64)));
+ struct btree_root *root;
+};
+static struct bmap *ag_bmaps;
+
+void
+lock_ag(
+ xfs_agnumber_t agno)
+{
+ pthread_mutex_lock(&ag_bmaps[agno].lock);
+}
+
+void
+unlock_ag(
+ xfs_agnumber_t agno)
+{
+ pthread_mutex_unlock(&ag_bmaps[agno].lock);
+}
static void
update_bmap(
@@ -129,7 +147,7 @@ set_bmap_ext(
xfs_extlen_t blen,
int state)
{
- update_bmap(ag_bmap[agno], agbno, blen, &states[state]);
+ update_bmap(ag_bmaps[agno].root, agbno, blen, &states[state]);
}
int
@@ -139,23 +157,24 @@ get_bmap_ext(
xfs_agblock_t maxbno,
xfs_extlen_t *blen)
{
+ struct btree_root *bmap = ag_bmaps[agno].root;
int *statep;
unsigned long key;
- statep = btree_find(ag_bmap[agno], agbno, &key);
+ statep = btree_find(bmap, agbno, &key);
if (!statep)
return -1;
if (key == agbno) {
if (blen) {
- if (!btree_peek_next(ag_bmap[agno], &key))
+ if (!btree_peek_next(bmap, &key))
return -1;
*blen = min(maxbno, key) - agbno;
}
return *statep;
}
- statep = btree_peek_prev(ag_bmap[agno], NULL);
+ statep = btree_peek_prev(bmap, NULL);
if (!statep)
return -1;
if (blen)
@@ -243,13 +262,15 @@ reset_bmaps(xfs_mount_t *mp)
ag_size = mp->m_sb.sb_agblocks;
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+ struct btree_root *bmap = ag_bmaps[agno].root;
+
if (agno == mp->m_sb.sb_agcount - 1)
ag_size = (xfs_extlen_t)(mp->m_sb.sb_dblocks -
(xfs_rfsblock_t)mp->m_sb.sb_agblocks * agno);
#ifdef BTREE_STATS
- if (btree_find(ag_bmap[agno], 0, NULL)) {
+ if (btree_find(bmap, 0, NULL)) {
printf("ag_bmap[%d] btree stats:\n", i);
- btree_print_stats(ag_bmap[agno], stdout);
+ btree_print_stats(bmap, stdout);
}
#endif
/*
@@ -260,11 +281,10 @@ reset_bmaps(xfs_mount_t *mp)
* ag_hdr_block..ag_size: XR_E_UNKNOWN
* ag_size... XR_E_BAD_STATE
*/
- btree_clear(ag_bmap[agno]);
- btree_insert(ag_bmap[agno], 0, &states[XR_E_INUSE_FS]);
- btree_insert(ag_bmap[agno],
- ag_hdr_block, &states[XR_E_UNKNOWN]);
- btree_insert(ag_bmap[agno], ag_size, &states[XR_E_BAD_STATE]);
+ btree_clear(bmap);
+ btree_insert(bmap, 0, &states[XR_E_INUSE_FS]);
+ btree_insert(bmap, ag_hdr_block, &states[XR_E_UNKNOWN]);
+ btree_insert(bmap, ag_size, &states[XR_E_BAD_STATE]);
}
if (mp->m_sb.sb_logstart != 0) {
@@ -276,44 +296,58 @@ reset_bmaps(xfs_mount_t *mp)
reset_rt_bmap();
}
+static struct bmap *
+alloc_bmaps(
+ unsigned int nr_groups)
+{
+ struct bmap *bmap;
+ unsigned int i;
+
+ bmap = calloc(nr_groups, sizeof(*bmap));
+ if (!bmap)
+ return NULL;
+
+ for (i = 0; i < nr_groups; i++) {
+ btree_init(&bmap[i].root);
+ pthread_mutex_init(&bmap[i].lock, NULL);
+ }
+
+ return bmap;
+}
+
+static void
+destroy_bmaps(
+ struct bmap *bmap,
+ unsigned int nr_groups)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr_groups; i++) {
+ btree_destroy(bmap[i].root);
+ pthread_mutex_destroy(&bmap[i].lock);
+ }
+
+ free(bmap);
+}
+
void
-init_bmaps(xfs_mount_t *mp)
+init_bmaps(
+ struct xfs_mount *mp)
{
- xfs_agnumber_t i;
-
- ag_bmap = calloc(mp->m_sb.sb_agcount, sizeof(struct btree_root *));
- if (!ag_bmap)
+ ag_bmaps = alloc_bmaps(mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount);
+ if (!ag_bmaps)
do_error(_("couldn't allocate block map btree roots\n"));
- ag_locks = calloc(mp->m_sb.sb_agcount, sizeof(struct aglock));
- if (!ag_locks)
- do_error(_("couldn't allocate block map locks\n"));
-
- for (i = 0; i < mp->m_sb.sb_agcount; i++) {
- btree_init(&ag_bmap[i]);
- pthread_mutex_init(&ag_locks[i].lock, NULL);
- }
-
init_rt_bmap(mp);
reset_bmaps(mp);
}
void
-free_bmaps(xfs_mount_t *mp)
+free_bmaps(
+ struct xfs_mount *mp)
{
- xfs_agnumber_t i;
-
- for (i = 0; i < mp->m_sb.sb_agcount; i++)
- pthread_mutex_destroy(&ag_locks[i].lock);
-
- free(ag_locks);
- ag_locks = NULL;
-
- for (i = 0; i < mp->m_sb.sb_agcount; i++)
- btree_destroy(ag_bmap[i]);
-
- free(ag_bmap);
- ag_bmap = NULL;
+ destroy_bmaps(ag_bmaps, mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount);
+ ag_bmaps = NULL;
free_rt_bmap(mp);
}
@@ -23,6 +23,9 @@ void init_bmaps(xfs_mount_t *mp);
void reset_bmaps(xfs_mount_t *mp);
void free_bmaps(xfs_mount_t *mp);
+void lock_ag(xfs_agnumber_t agno);
+void unlock_ag(xfs_agnumber_t agno);
+
void set_bmap_ext(xfs_agnumber_t agno, xfs_agblock_t agbno,
xfs_extlen_t blen, int state);
int get_bmap_ext(xfs_agnumber_t agno, xfs_agblock_t agbno,
@@ -761,12 +761,12 @@ mark_reflink_inodes(
agno = XFS_INO_TO_AGNO(mp, rciter.ino);
agino = XFS_INO_TO_AGINO(mp, rciter.ino);
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
irec = find_inode_rec(mp, agno, agino);
off = get_inode_offset(mp, rciter.ino, irec);
/* lock here because we might go outside this ag */
set_inode_is_rl(irec, off);
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
}
rcbag_ino_iter_stop(rcstack, &rciter);
}
@@ -340,7 +340,7 @@ _("bad back (left) sibling pointer (saw %llu should be NULL (0))\n"
agno = XFS_FSB_TO_AGNO(mp, bno);
agbno = XFS_FSB_TO_AGBNO(mp, bno);
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
state = get_bmap(agno, agbno);
switch (state) {
case XR_E_INUSE1:
@@ -407,7 +407,7 @@ _("bad state %d, inode %" PRIu64 " bmap block 0x%" PRIx64 "\n"),
state, ino, bno);
break;
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
} else {
if (search_dup_extent(XFS_FSB_TO_AGNO(mp, bno),
XFS_FSB_TO_AGBNO(mp, bno),
@@ -420,9 +420,9 @@ _("bad state %d, inode %" PRIu64 " bmap block 0x%" PRIx64 "\n"),
/* Record BMBT blocks in the reverse-mapping data. */
if (check_dups && collect_rmaps && !zap_metadata) {
agno = XFS_FSB_TO_AGNO(mp, bno);
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
rmap_add_bmbt_rec(mp, ino, whichfork, bno);
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
}
if (level == 0) {