@@ -295,6 +295,16 @@ xfs_buf_free_pages(
bp->b_flags &= ~_XBF_PAGES;
}
+static void
+xfs_buf_free_callback(
+ struct callback_head *cb)
+{
+ struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu);
+
+ xfs_buf_free_maps(bp);
+ kmem_cache_free(xfs_buf_cache, bp);
+}
+
static void
xfs_buf_free(
struct xfs_buf *bp)
@@ -308,10 +318,10 @@ xfs_buf_free(
else if (bp->b_flags & _XBF_KMEM)
kmem_free(bp->b_addr);
- xfs_buf_free_maps(bp);
- kmem_cache_free(xfs_buf_cache, bp);
+ call_rcu(&bp->b_rcu, xfs_buf_free_callback);
}
+
static int
xfs_buf_alloc_kmem(
struct xfs_buf *bp,
@@ -612,12 +622,11 @@ xfs_buf_get_map(
pag = xfs_perag_get(mp, xfs_daddr_to_agno(mp, cmap.bm_bn));
- spin_lock(&pag->pag_buf_lock);
- bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
- xfs_buf_hash_params);
- if (bp)
- atomic_inc(&bp->b_hold);
- spin_unlock(&pag->pag_buf_lock);
+ rcu_read_lock();
+ bp = rhashtable_lookup(&pag->pag_buf_hash, &cmap, xfs_buf_hash_params);
+ if (bp && !atomic_inc_not_zero(&bp->b_hold))
+ bp = NULL;
+ rcu_read_unlock();
if (unlikely(!bp)) {
if (flags & XBF_NOALLOC) {
@@ -194,6 +194,7 @@ struct xfs_buf {
int b_last_error;
const struct xfs_buf_ops *b_ops;
+ struct rcu_head b_rcu;
};
/* Finding and Reading Buffers */