diff mbox series

[RFC,14/17] btrfs: priority alloc: introduce btrfs_set_bg_updating and call btrfs_update_block_group_prioriy

Message ID 20181128031148.357-15-suy.fnst@cn.fujitsu.com (mailing list archive)
State New, archived
Headers show
Series btrfs: implementation of priority aware allocator | expand

Commit Message

Su Yue Nov. 28, 2018, 3:11 a.m. UTC
For usage as priority, the varaiables in block groups we concered are
reserved, bytes_super and btrfs_block_group_used(&cache->item).

This patch calls btrfs_set_bg_updating() in locations where above three
varaiables changed to mark block groups needs to be updated, then
calls btrfs_update_block_group() to update priority tree if needed.

Signed-off-by: Su Yue <suy.fnst@cn.fujitsu.com>
---
 fs/btrfs/ctree.h            |  2 ++
 fs/btrfs/extent-tree.c      | 40 +++++++++++++++++++++++++++++++++++++
 fs/btrfs/free-space-cache.c |  3 +++
 3 files changed, 45 insertions(+)
diff mbox series

Patch

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 091b878e326c..f1ab0310da08 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2753,6 +2753,8 @@  u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info);
 u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info);
 void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
 void btrfs_remove_block_group_priority(struct btrfs_block_group_cache *cache);
+void btrfs_set_bg_priority_updating(struct btrfs_block_group_cache *cache);
+void btrfs_update_block_group_priority(struct btrfs_block_group_cache *cache);
 
 enum btrfs_reserve_flush_enum {
 	/* If we are in the transaction, we can't flush anything.*/
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4c76677a54a9..f530a4344368 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6183,6 +6183,7 @@  static int update_block_group(struct btrfs_trans_handle *trans,
 			cache->space_info->bytes_reserved -= num_bytes;
 			cache->space_info->bytes_used += num_bytes;
 			cache->space_info->disk_used += num_bytes * factor;
+			btrfs_set_bg_priority_updating(cache);
 			spin_unlock(&cache->lock);
 			spin_unlock(&cache->space_info->lock);
 		} else {
@@ -6192,6 +6193,7 @@  static int update_block_group(struct btrfs_trans_handle *trans,
 			update_bytes_pinned(cache->space_info, num_bytes);
 			cache->space_info->bytes_used -= num_bytes;
 			cache->space_info->disk_used -= num_bytes * factor;
+			btrfs_set_bg_priority_updating(cache);
 			spin_unlock(&cache->lock);
 			spin_unlock(&cache->space_info->lock);
 
@@ -6205,6 +6207,7 @@  static int update_block_group(struct btrfs_trans_handle *trans,
 					 bytenr, bytenr + num_bytes - 1,
 					 GFP_NOFS | __GFP_NOFAIL);
 		}
+		btrfs_update_block_group_priority(cache);
 
 		spin_lock(&trans->transaction->dirty_bgs_lock);
 		if (list_empty(&cache->dirty_list)) {
@@ -6264,6 +6267,7 @@  static int pin_down_extent(struct btrfs_fs_info *fs_info,
 	if (reserved) {
 		cache->reserved -= num_bytes;
 		cache->space_info->bytes_reserved -= num_bytes;
+		btrfs_set_bg_priority_updating(cache);
 	}
 	spin_unlock(&cache->lock);
 	spin_unlock(&cache->space_info->lock);
@@ -6274,6 +6278,8 @@  static int pin_down_extent(struct btrfs_fs_info *fs_info,
 		    num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
 	set_extent_dirty(fs_info->pinned_extents, bytenr,
 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
+
+	btrfs_update_block_group_priority(cache);
 	return 0;
 }
 
@@ -6472,6 +6478,12 @@  static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
 		update_bytes_may_use(space_info, -ram_bytes);
 		if (delalloc)
 			cache->delalloc_bytes += num_bytes;
+		/*
+		 * Since it's called in find_free_extent(),
+		 * call btrfs_update_block_group_priority() in outter to
+		 * avoid dead lock.
+		 */
+		btrfs_set_bg_priority_updating(cache);
 	}
 	spin_unlock(&cache->lock);
 	spin_unlock(&space_info->lock);
@@ -6502,11 +6514,14 @@  static void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
 	cache->reserved -= num_bytes;
 	space_info->bytes_reserved -= num_bytes;
 	space_info->max_extent_size = 0;
+	btrfs_set_bg_priority_updating(cache);
 
 	if (delalloc)
 		cache->delalloc_bytes -= num_bytes;
 	spin_unlock(&cache->lock);
 	spin_unlock(&space_info->lock);
+
+	btrfs_update_block_group_priority(cache);
 }
 void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
 {
@@ -8025,6 +8040,7 @@  static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
 		else
 			up_read(&space_info->groups_sem);
 		btrfs_release_block_group(block_group, delalloc);
+		btrfs_update_block_group_priority(block_group);
 	}
 	ret = find_free_extent_update_loop(fs_info, last_ptr, ins, &ffe_ctl,
 					   full_search, use_cluster);
@@ -8434,9 +8450,12 @@  int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
 	spin_lock(&block_group->lock);
 	space_info->bytes_reserved += ins->offset;
 	block_group->reserved += ins->offset;
+	btrfs_set_bg_priority_updating(block_group);
 	spin_unlock(&block_group->lock);
 	spin_unlock(&space_info->lock);
 
+	btrfs_update_block_group_priority(block_group);
+
 	ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
 					 offset, ins, 1);
 	btrfs_put_block_group(block_group);
@@ -11706,3 +11725,24 @@  void btrfs_update_block_group_priority(struct btrfs_block_group_cache *cache)
 	up_write(front_sem);
 	up_write(back_sem);
 }
+
+/* Caller must hold cache->lock */
+void
+btrfs_set_bg_priority_updating(struct btrfs_block_group_cache *cache)
+{
+	long priority;
+	int new_level;
+
+	if (!is_priority_alloc_enabled(cache->fs_info))
+		return;
+	if (cache->priority == PRIORITY_BG_DELETED)
+		return;
+
+	priority = compute_block_group_priority(cache);
+	new_level = compute_priority_level(cache->fs_info, priority);
+
+	if (cache->priority_tree->level != new_level)
+		priority = PRIORITY_BG_UPDATING;
+
+	cache->priority = priority;
+}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 74aa552f4793..ff28a26c9104 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -3149,6 +3149,7 @@  static int do_trimming(struct btrfs_block_group_cache *block_group,
 		block_group->reserved += reserved_bytes;
 		space_info->bytes_reserved += reserved_bytes;
 		update = 1;
+		btrfs_set_bg_priority_updating(block_group);
 	}
 	spin_unlock(&block_group->lock);
 	spin_unlock(&space_info->lock);
@@ -3169,10 +3170,12 @@  static int do_trimming(struct btrfs_block_group_cache *block_group,
 			space_info->bytes_readonly += reserved_bytes;
 		block_group->reserved -= reserved_bytes;
 		space_info->bytes_reserved -= reserved_bytes;
+		btrfs_set_bg_priority_updating(block_group);
 		spin_unlock(&space_info->lock);
 		spin_unlock(&block_group->lock);
 	}
 
+	btrfs_update_block_group_priority(block_group);
 	return ret;
 }