[12/25] btrfs: temporarily export inc_block_group_ro
diff mbox series

Message ID 20190620193807.29311-13-josef@toxicpanda.com
State New
Headers show
Series
  • btrfs: migrate the block group code
Related show

Commit Message

Josef Bacik June 20, 2019, 7:37 p.m. UTC
This is used in a few logical parts of the block group code, temporarily
export it so we can move things in pieces.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
---
 fs/btrfs/block-group.h |  2 ++
 fs/btrfs/extent-tree.c | 15 ++++++++-------
 2 files changed, 10 insertions(+), 7 deletions(-)

Patch
diff mbox series

diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 058b55cd6d29..6a1644908556 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -188,4 +188,6 @@  btrfs_block_group_cache_done(struct btrfs_block_group_cache *cache)
 		cache->cached == BTRFS_CACHE_ERROR;
 }
 
+int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache,
+			       int force);
 #endif /* BTRFS_BLOCK_GROUP_H */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index c5ffa3db9533..79ef07cdff6b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6685,7 +6685,8 @@  static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
 	return flags;
 }
 
-static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
+int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache,
+			       int force)
 {
 	struct btrfs_space_info *sinfo = cache->space_info;
 	u64 num_bytes;
@@ -6790,14 +6791,14 @@  int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
 			goto out;
 	}
 
-	ret = inc_block_group_ro(cache, 0);
+	ret = __btrfs_inc_block_group_ro(cache, 0);
 	if (!ret)
 		goto out;
 	alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
 	ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
 	if (ret < 0)
 		goto out;
-	ret = inc_block_group_ro(cache, 0);
+	ret = __btrfs_inc_block_group_ro(cache, 0);
 out:
 	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
 		alloc_flags = update_block_group_flags(fs_info, cache->flags);
@@ -7524,7 +7525,7 @@  int btrfs_read_block_groups(struct btrfs_fs_info *info)
 
 		set_avail_alloc_bits(info, cache->flags);
 		if (btrfs_chunk_readonly(info, cache->key.objectid)) {
-			inc_block_group_ro(cache, 1);
+			__btrfs_inc_block_group_ro(cache, 1);
 		} else if (btrfs_block_group_used(&cache->item) == 0) {
 			ASSERT(list_empty(&cache->bg_list));
 			btrfs_mark_bg_unused(cache);
@@ -7546,11 +7547,11 @@  int btrfs_read_block_groups(struct btrfs_fs_info *info)
 		list_for_each_entry(cache,
 				&space_info->block_groups[BTRFS_RAID_RAID0],
 				list)
-			inc_block_group_ro(cache, 1);
+			__btrfs_inc_block_group_ro(cache, 1);
 		list_for_each_entry(cache,
 				&space_info->block_groups[BTRFS_RAID_SINGLE],
 				list)
-			inc_block_group_ro(cache, 1);
+			__btrfs_inc_block_group_ro(cache, 1);
 	}
 
 	btrfs_add_raid_kobjects(info);
@@ -8058,7 +8059,7 @@  void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 		spin_unlock(&block_group->lock);
 
 		/* We don't want to force the issue, only flip if it's ok. */
-		ret = inc_block_group_ro(block_group, 0);
+		ret = __btrfs_inc_block_group_ro(block_group, 0);
 		up_write(&space_info->groups_sem);
 		if (ret < 0) {
 			ret = 0;