@@ -32,26 +32,6 @@
#undef SCRAMBLE_DELAYED_REFS
-/*
- * Declare a helper function to detect underflow of various space info members
- */
-#define DECLARE_SPACE_INFO_UPDATE(name) \
-static inline void update_##name(struct btrfs_fs_info *fs_info, \
- struct btrfs_space_info *sinfo, \
- s64 bytes) \
-{ \
- lockdep_assert_held(&sinfo->lock); \
- trace_update_##name(fs_info, sinfo, sinfo->name, bytes); \
- if (bytes < 0 && sinfo->name < -bytes) { \
- WARN_ON(1); \
- sinfo->name = 0; \
- return; \
- } \
- sinfo->name += bytes; \
-}
-
-DECLARE_SPACE_INFO_UPDATE(bytes_may_use);
-DECLARE_SPACE_INFO_UPDATE(bytes_pinned);
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node, u64 parent,
@@ -4054,7 +4034,7 @@ int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
data_sinfo->flags, bytes, 1);
return -ENOSPC;
}
- update_bytes_may_use(fs_info, data_sinfo, bytes);
+ btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
data_sinfo->flags, bytes, 1);
spin_unlock(&data_sinfo->lock);
@@ -4107,7 +4087,7 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
data_sinfo = fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
- update_bytes_may_use(fs_info, data_sinfo, -len);
+ btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, -len);
trace_btrfs_space_reservation(fs_info, "space_info",
data_sinfo->flags, len, 0);
spin_unlock(&data_sinfo->lock);
@@ -4949,13 +4929,15 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
* If not things get more complicated.
*/
if (used + orig_bytes <= space_info->total_bytes) {
- update_bytes_may_use(fs_info, space_info, orig_bytes);
+ btrfs_space_info_update_bytes_may_use(fs_info, space_info,
+ orig_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, orig_bytes, 1);
ret = 0;
} else if (btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush,
system_chunk)) {
- update_bytes_may_use(fs_info, space_info, orig_bytes);
+ btrfs_space_info_update_bytes_may_use(fs_info, space_info,
+ orig_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, orig_bytes, 1);
ret = 0;
@@ -5286,7 +5268,7 @@ void btrfs_space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
flush = BTRFS_RESERVE_FLUSH_ALL;
goto again;
}
- update_bytes_may_use(fs_info, space_info, -num_bytes);
+ btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, num_bytes, 0);
spin_unlock(&space_info->lock);
@@ -5314,8 +5296,9 @@ void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
ticket->bytes, 1);
list_del_init(&ticket->list);
num_bytes -= ticket->bytes;
- update_bytes_may_use(fs_info, space_info,
- ticket->bytes);
+ btrfs_space_info_update_bytes_may_use(fs_info,
+ space_info,
+ ticket->bytes);
ticket->bytes = 0;
space_info->tickets_id++;
wake_up(&ticket->wait);
@@ -5323,7 +5306,9 @@ void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags,
num_bytes, 1);
- update_bytes_may_use(fs_info, space_info, num_bytes);
+ btrfs_space_info_update_bytes_may_use(fs_info,
+ space_info,
+ num_bytes);
ticket->bytes -= num_bytes;
num_bytes = 0;
}
@@ -5616,14 +5601,16 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
num_bytes = min(num_bytes,
block_rsv->size - block_rsv->reserved);
block_rsv->reserved += num_bytes;
- update_bytes_may_use(fs_info, sinfo, num_bytes);
+ btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
+ num_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
sinfo->flags, num_bytes,
1);
}
} else if (block_rsv->reserved > block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size;
- update_bytes_may_use(fs_info, sinfo, -num_bytes);
+ btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
+ -num_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
sinfo->flags, num_bytes, 0);
block_rsv->reserved = block_rsv->size;
@@ -6086,7 +6073,9 @@ static int update_block_group(struct btrfs_trans_handle *trans,
old_val -= num_bytes;
btrfs_set_block_group_used(&cache->item, old_val);
cache->pinned += num_bytes;
- update_bytes_pinned(info, cache->space_info, num_bytes);
+ btrfs_space_info_update_bytes_pinned(info,
+ cache->space_info,
+ num_bytes);
cache->space_info->bytes_used -= num_bytes;
cache->space_info->disk_used -= num_bytes * factor;
spin_unlock(&cache->lock);
@@ -6161,7 +6150,8 @@ static int pin_down_extent(struct btrfs_block_group_cache *cache,
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
cache->pinned += num_bytes;
- update_bytes_pinned(fs_info, cache->space_info, num_bytes);
+ btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info,
+ num_bytes);
if (reserved) {
cache->reserved -= num_bytes;
cache->space_info->bytes_reserved -= num_bytes;
@@ -6370,7 +6360,8 @@ static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
} else {
cache->reserved += num_bytes;
space_info->bytes_reserved += num_bytes;
- update_bytes_may_use(cache->fs_info, space_info, -ram_bytes);
+ btrfs_space_info_update_bytes_may_use(cache->fs_info,
+ space_info, -ram_bytes);
if (delalloc)
cache->delalloc_bytes += num_bytes;
}
@@ -6526,7 +6517,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
cache->pinned -= len;
- update_bytes_pinned(fs_info, space_info, -len);
+ btrfs_space_info_update_bytes_pinned(fs_info, space_info,
+ -len);
trace_btrfs_space_reservation(fs_info, "pinned",
space_info->flags, len, 0);
@@ -6547,8 +6539,9 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
to_add = min(len, global_rsv->size -
global_rsv->reserved);
global_rsv->reserved += to_add;
- update_bytes_may_use(fs_info, space_info,
- to_add);
+ btrfs_space_info_update_bytes_may_use(fs_info,
+ space_info,
+ to_add);
if (global_rsv->reserved >= global_rsv->size)
global_rsv->full = 1;
trace_btrfs_space_reservation(fs_info,
@@ -10808,7 +10801,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
- update_bytes_pinned(fs_info, space_info, -block_group->pinned);
+ btrfs_space_info_update_bytes_pinned(fs_info, space_info,
+ -block_group->pinned);
space_info->bytes_readonly += block_group->pinned;
percpu_counter_add_batch(&space_info->total_bytes_pinned,
-block_group->pinned,
@@ -77,6 +77,28 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) &&
(space_info->flags & BTRFS_BLOCK_GROUP_DATA));
}
+/*
+ *
+ * Declare a helper function to detect underflow of various space info members
+ */
+#define DECLARE_SPACE_INFO_UPDATE(name) \
+static inline void \
+btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
+ struct btrfs_space_info *sinfo, \
+ s64 bytes) \
+{ \
+ lockdep_assert_held(&sinfo->lock); \
+ trace_update_##name(fs_info, sinfo, sinfo->name, bytes); \
+ if (bytes < 0 && sinfo->name < -bytes) { \
+ WARN_ON(1); \
+ sinfo->name = 0; \
+ return; \
+ } \
+ sinfo->name += bytes; \
+}
+
+DECLARE_SPACE_INFO_UPDATE(bytes_may_use);
+DECLARE_SPACE_INFO_UPDATE(bytes_pinned);
void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
Also rename it to btrfs_space_info_update_* so it's clear what we're updating. Signed-off-by: Josef Bacik <josef@toxicpanda.com> --- fs/btrfs/extent-tree.c | 68 +++++++++++++++++++++++--------------------------- fs/btrfs/space-info.h | 22 ++++++++++++++++ 2 files changed, 53 insertions(+), 37 deletions(-)