diff mbox series

[RFC,03/17] btrfs: priority alloc: introduce compute_block_group_priority/usage

Message ID 20181128031148.357-4-suy.fnst@cn.fujitsu.com (mailing list archive)
State New, archived
Headers show
Series btrfs: implementation of priority aware allocator | expand

Commit Message

Su Yue Nov. 28, 2018, 3:11 a.m. UTC
Introduce compute_block_group_usage() and compute_block_group_usage().
And call the latter in btrfs_make_block_group() and
btrfs_read_block_groups().

compute_priority_level use ilog2(free) to compute priority level.

Signed-off-by: Su Yue <suy.fnst@cn.fujitsu.com>
---
 fs/btrfs/extent-tree.c | 60 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 60 insertions(+)

Comments

Nikolay Borisov Nov. 28, 2018, 8:56 a.m. UTC | #1
On 28.11.18 г. 5:11 ч., Su Yue wrote:
> Introduce compute_block_group_usage() and compute_block_group_usage().
> And call the latter in btrfs_make_block_group() and
> btrfs_read_block_groups().
> 
> compute_priority_level use ilog2(free) to compute priority level.
> 
> Signed-off-by: Su Yue <suy.fnst@cn.fujitsu.com>
> ---
>  fs/btrfs/extent-tree.c | 60 ++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 60 insertions(+)
> 
> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
> index d242a1174e50..0f4c5b1e0bcc 100644
> --- a/fs/btrfs/extent-tree.c
> +++ b/fs/btrfs/extent-tree.c
> @@ -10091,6 +10091,7 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
>  	return ret;
>  }
>  
> +static long compute_block_group_priority(struct btrfs_block_group_cache *bg);

That is ugly, just put the function above the first place where they are
going to be used and don't introduce forward declarations for static
functions.

>  int btrfs_read_block_groups(struct btrfs_fs_info *info)
>  {
>  	struct btrfs_path *path;
> @@ -10224,6 +10225,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
>  
>  		link_block_group(cache);
>  
> +		cache->priority = compute_block_group_priority(cache);
> +
>  		set_avail_alloc_bits(info, cache->flags);
>  		if (btrfs_chunk_readonly(info, cache->key.objectid)) {
>  			inc_block_group_ro(cache, 1);
> @@ -10373,6 +10376,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
>  
>  	link_block_group(cache);
>  
> +	cache->priority = compute_block_group_priority(cache);
> +
>  	list_add_tail(&cache->bg_list, &trans->new_bgs);
>  
>  	set_avail_alloc_bits(fs_info, type);
> @@ -11190,3 +11195,58 @@ void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
>  	}
>  	spin_unlock(&fs_info->unused_bgs_lock);
>  }
> +
> +enum btrfs_priority_shift {
> +	PRIORITY_USAGE_SHIFT = 0
> +};
> +
> +static inline u8
> +compute_block_group_usage(struct btrfs_block_group_cache *cache)
> +{
> +	u64 num_bytes;
> +	u8 usage;
> +
> +	num_bytes = cache->reserved + cache->bytes_super +
> +		btrfs_block_group_used(&cache->item);
> +
> +	usage = div_u64(num_bytes, div_factor_fine(cache->key.offset, 1));

Mention somewhere (either as a function description or in the patch
description) that you use the % used.

> +
> +	return usage;
> +}
> +
> +static long compute_block_group_priority(struct btrfs_block_group_cache *bg)
> +{
> +	u8 usage;
> +	long priority = 0;
> +
> +	if (btrfs_test_opt(bg->fs_info, PRIORITY_USAGE)) {
> +		usage = compute_block_group_usage(bg);
> +		priority |= usage << PRIORITY_USAGE_SHIFT;
> +	}

Why is priority a signed type and not unsigned, I assume priority can
never be negative? I briefly looked at the other patches and most of the
time the argument passed is indeed na unsigned value.

> +
> +	return priority;
> +}
> +
> +static int compute_priority_level(struct btrfs_fs_info *fs_info,
> +				  long priority)
> +{
> +	int level = 0;
> +
> +	if (btrfs_test_opt(fs_info, PRIORITY_USAGE)) {
> +		u8 free;
> +
> +		WARN_ON(priority < 0);

I think this WARN_ON is redundant provided that the high-level
interfaces are sane and don't allow negative value to trickle down.

> +		free = 100 - (priority >> PRIORITY_USAGE_SHIFT);
> +
> +		if (free == 0)
> +			level = 0;
> +		else if (free == 100)
> +			level = ilog2(free) + 1;
> +		else
> +			level = ilog2(free);
> +		/* log2(1) == 0, leave level 0 for unused block_groups */
> +		level = ilog2(100) + 1 - level;
> +	}
> +
> +	return level;
> +}
>
diff mbox series

Patch

diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d242a1174e50..0f4c5b1e0bcc 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -10091,6 +10091,7 @@  static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
 	return ret;
 }
 
+static long compute_block_group_priority(struct btrfs_block_group_cache *bg);
 int btrfs_read_block_groups(struct btrfs_fs_info *info)
 {
 	struct btrfs_path *path;
@@ -10224,6 +10225,8 @@  int btrfs_read_block_groups(struct btrfs_fs_info *info)
 
 		link_block_group(cache);
 
+		cache->priority = compute_block_group_priority(cache);
+
 		set_avail_alloc_bits(info, cache->flags);
 		if (btrfs_chunk_readonly(info, cache->key.objectid)) {
 			inc_block_group_ro(cache, 1);
@@ -10373,6 +10376,8 @@  int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
 
 	link_block_group(cache);
 
+	cache->priority = compute_block_group_priority(cache);
+
 	list_add_tail(&cache->bg_list, &trans->new_bgs);
 
 	set_avail_alloc_bits(fs_info, type);
@@ -11190,3 +11195,58 @@  void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
 	}
 	spin_unlock(&fs_info->unused_bgs_lock);
 }
+
+enum btrfs_priority_shift {
+	PRIORITY_USAGE_SHIFT = 0
+};
+
+static inline u8
+compute_block_group_usage(struct btrfs_block_group_cache *cache)
+{
+	u64 num_bytes;
+	u8 usage;
+
+	num_bytes = cache->reserved + cache->bytes_super +
+		btrfs_block_group_used(&cache->item);
+
+	usage = div_u64(num_bytes, div_factor_fine(cache->key.offset, 1));
+
+	return usage;
+}
+
+static long compute_block_group_priority(struct btrfs_block_group_cache *bg)
+{
+	u8 usage;
+	long priority = 0;
+
+	if (btrfs_test_opt(bg->fs_info, PRIORITY_USAGE)) {
+		usage = compute_block_group_usage(bg);
+		priority |= usage << PRIORITY_USAGE_SHIFT;
+	}
+
+	return priority;
+}
+
+static int compute_priority_level(struct btrfs_fs_info *fs_info,
+				  long priority)
+{
+	int level = 0;
+
+	if (btrfs_test_opt(fs_info, PRIORITY_USAGE)) {
+		u8 free;
+
+		WARN_ON(priority < 0);
+		free = 100 - (priority >> PRIORITY_USAGE_SHIFT);
+
+		if (free == 0)
+			level = 0;
+		else if (free == 100)
+			level = ilog2(free) + 1;
+		else
+			level = ilog2(free);
+		/* log2(1) == 0, leave level 0 for unused block_groups */
+		level = ilog2(100) + 1 - level;
+	}
+
+	return level;
+}