@@ -32,7 +32,8 @@ int __init btrfs_delayed_inode_init(void)
delayed_node_cache = kmem_cache_create("delayed_node",
sizeof(struct btrfs_delayed_node),
0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_MEM_SPREAD | SLAB_DESTROY_BY_RCU,
NULL);
if (!delayed_node_cache)
return -ENOMEM;
@@ -90,22 +91,35 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
struct btrfs_root *root = btrfs_inode->root;
- if (btrfs_inode->delayed_node) {
- node = btrfs_inode->delayed_node;
- atomic_inc(&node->refs); /* can be accessed */
- return node;
+again:
+ rcu_read_lock();
+again_rcu:
+ node = btrfs_inode->delayed_node;
+ if (node) {
+ if (atomic_inc_not_zero(&node->refs)) {
+ rcu_read_unlock();
+ return node;
+ }
+printk("racing on node access!\n");
+ goto again_rcu;
}
+ rcu_read_unlock();
node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
if (!node)
return ERR_PTR(-ENOMEM);
btrfs_init_delayed_node(node, root->objectid, inode->i_ino);
- btrfs_inode->delayed_node = node;
node->delayed_root = btrfs_get_delayed_root(root);
atomic_inc(&node->refs); /* cached in the btrfs inode */
atomic_inc(&node->refs); /* can be accessed */
+ if (cmpxchg(&BTRFS_I(inode)->delayed_node, NULL, node)) {
+ kmem_cache_free(delayed_node_cache, node);
+printk("racing on new node insertion!\n");
+ goto again;
+ }
+
return node;
}
@@ -1167,7 +1181,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
nr = trans->blocks_used;
btrfs_end_transaction_dmeta(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ __btrfs_btree_balance_dirty(root, nr);
free_path:
btrfs_free_path(path);
out:
@@ -2649,6 +2649,28 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
balance_dirty_pages_ratelimited_nr(
root->fs_info->btree_inode->i_mapping, 1);
}
+ btrfs_balance_delayed_items(root);
+ return;
+}
+
+void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
+{
+ /*
+ * looks as though older kernels can get into trouble with
+ * this code, they end up stuck in balance_dirty_pages forever
+ */
+ u64 num_dirty;
+ unsigned long thresh = 32 * 1024 * 1024;
+
+ if (current->flags & PF_MEMALLOC)
+ return;
+
+ num_dirty = root->fs_info->dirty_metadata_bytes;
+
+ if (num_dirty > thresh) {
+ balance_dirty_pages_ratelimited_nr(
+ root->fs_info->btree_inode->i_mapping, 1);
+ }
return;
}
@@ -71,6 +71,7 @@ int btrfs_insert_dev_radix(struct btrfs_root *root,
u64 block_start,
u64 num_blocks);
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
+void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf);
@@ -4449,6 +4449,8 @@ void btrfs_dirty_inode(struct inode *inode)
}
}
btrfs_end_transaction(trans, root);
+ if (BTRFS_I(inode)->delayed_node)
+ btrfs_balance_delayed_items(root);
}
/*
@@ -483,8 +483,6 @@ int btrfs_end_transaction(struct btrfs_trans_handle *trans,
ret = __btrfs_end_transaction(trans, root, 0, 1);
if (ret)
return ret;
-
- btrfs_balance_delayed_items(root);
return 0;
}
@@ -496,8 +494,6 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
ret = __btrfs_end_transaction(trans, root, 1, 1);
if (ret)
return ret;
-
- btrfs_balance_delayed_items(root);
return 0;
}
@@ -509,8 +505,6 @@ int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
ret = __btrfs_end_transaction(trans, root, 0, 0);
if (ret)
return ret;
-
- btrfs_balance_delayed_items(root);
return 0;
}