diff mbox series

[03/10] btrfs-progs: port block group cache tree insertion and lookup functions

Message ID 20191205042921.25316-4-Damenly_Su@gmx.com (mailing list archive)
State New, archived
Headers show
Series unify origanization structure of block group cache | expand

Commit Message

Su Yue Dec. 5, 2019, 4:29 a.m. UTC
From: Su Yue <Damenly_Su@gmx.com>

Simple copy and paste codes, remove useless lock operantions in progs.
Th new coming lookup functions are named with suffix _kernel in
temporary.

Signed-off-by: Su Yue <Damenly_Su@gmx.com>
---
 extent-tree.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 86 insertions(+)

Comments

Qu Wenruo Dec. 5, 2019, 7:29 a.m. UTC | #1
On 2019/12/5 下午12:29, damenly.su@gmail.com wrote:
> From: Su Yue <Damenly_Su@gmx.com>
> 
> Simple copy and paste codes, remove useless lock operantions in progs.
> Th new coming lookup functions are named with suffix _kernel in
> temporary.
> 
> Signed-off-by: Su Yue <Damenly_Su@gmx.com>

Reviewed-by: Qu Wenruo <wqu@suse.com>

Just an extra hint, it would be much better if we backport this
functions to block-group.c.

Thanks,
Qu
> ---
>  extent-tree.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 86 insertions(+)
> 
> diff --git a/extent-tree.c b/extent-tree.c
> index 4a3db029e811..ab576f8732a2 100644
> --- a/extent-tree.c
> +++ b/extent-tree.c
> @@ -164,6 +164,92 @@ err:
>  	return 0;
>  }
>  
> +/*
> + * This adds the block group to the fs_info rb tree for the block group cache
> + */
> +static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
> +				struct btrfs_block_group_cache *block_group)
> +{
> +	struct rb_node **p;
> +	struct rb_node *parent = NULL;
> +	struct btrfs_block_group_cache *cache;
> +
> +	p = &info->block_group_cache_tree.rb_node;
> +
> +	while (*p) {
> +		parent = *p;
> +		cache = rb_entry(parent, struct btrfs_block_group_cache,
> +				 cache_node);
> +		if (block_group->key.objectid < cache->key.objectid)
> +			p = &(*p)->rb_left;
> +		else if (block_group->key.objectid > cache->key.objectid)
> +			p = &(*p)->rb_right;
> +		else
> +			return -EEXIST;
> +	}
> +
> +	rb_link_node(&block_group->cache_node, parent, p);
> +	rb_insert_color(&block_group->cache_node,
> +			&info->block_group_cache_tree);
> +
> +	return 0;
> +}
> +
> +/*
> + * This will return the block group at or after bytenr if contains is 0, else
> + * it will return the block group that contains the bytenr
> + */
> +static struct btrfs_block_group_cache *block_group_cache_tree_search(
> +		struct btrfs_fs_info *info, u64 bytenr, int contains)
> +{
> +	struct btrfs_block_group_cache *cache, *ret = NULL;
> +	struct rb_node *n;
> +	u64 end, start;
> +
> +	n = info->block_group_cache_tree.rb_node;
> +
> +	while (n) {
> +		cache = rb_entry(n, struct btrfs_block_group_cache,
> +				 cache_node);
> +		end = cache->key.objectid + cache->key.offset - 1;
> +		start = cache->key.objectid;
> +
> +		if (bytenr < start) {
> +			if (!contains && (!ret || start < ret->key.objectid))
> +				ret = cache;
> +			n = n->rb_left;
> +		} else if (bytenr > start) {
> +			if (contains && bytenr <= end) {
> +				ret = cache;
> +				break;
> +			}
> +			n = n->rb_right;
> +		} else {
> +			ret = cache;
> +			break;
> +		}
> +	}
> +	return ret;
> +}
> +
> +/*
> + * Return the block group that starts at or after bytenr
> + */
> +struct btrfs_block_group_cache *btrfs_lookup_first_block_group_kernel(
> +		struct btrfs_fs_info *info, u64 bytenr)
> +{
> +	return block_group_cache_tree_search(info, bytenr, 0);
> +}
> +
> +/*
> + * Return the block group that contains the given bytenr
> + */
> +struct btrfs_block_group_cache *btrfs_lookup_block_group_kernel(
> +		struct btrfs_fs_info *info, u64 bytenr)
> +{
> +	return block_group_cache_tree_search(info, bytenr, 1);
> +}
> +
>  /*
>   * Return the block group that contains @bytenr, otherwise return the next one
>   * that starts after @bytenr
>
Su Yue Dec. 5, 2019, 8:32 a.m. UTC | #2
On 2019/12/5 3:29 PM, Qu Wenruo wrote:
>
>
> On 2019/12/5 下午12:29, damenly.su@gmail.com wrote:
>> From: Su Yue <Damenly_Su@gmx.com>
>>
>> Simple copy and paste codes, remove useless lock operantions in progs.
>> Th new coming lookup functions are named with suffix _kernel in
>> temporary.
>>
>> Signed-off-by: Su Yue <Damenly_Su@gmx.com>
>
> Reviewed-by: Qu Wenruo <wqu@suse.com>
>
> Just an extra hint, it would be much better if we backport this
> functions to block-group.c.
>
Considered it, then porting functions moved will not require
any suffixes to avoid conflictions. It will be more clean while doing
reform work. But I wonder if it's a proper timing to create
block-group.c in progs.

Thanks
> Thanks,
> Qu
>> ---
>>   extent-tree.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++
>>   1 file changed, 86 insertions(+)
>>
>> diff --git a/extent-tree.c b/extent-tree.c
>> index 4a3db029e811..ab576f8732a2 100644
>> --- a/extent-tree.c
>> +++ b/extent-tree.c
>> @@ -164,6 +164,92 @@ err:
>>   	return 0;
>>   }
>>
>> +/*
>> + * This adds the block group to the fs_info rb tree for the block group cache
>> + */
>> +static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
>> +				struct btrfs_block_group_cache *block_group)
>> +{
>> +	struct rb_node **p;
>> +	struct rb_node *parent = NULL;
>> +	struct btrfs_block_group_cache *cache;
>> +
>> +	p = &info->block_group_cache_tree.rb_node;
>> +
>> +	while (*p) {
>> +		parent = *p;
>> +		cache = rb_entry(parent, struct btrfs_block_group_cache,
>> +				 cache_node);
>> +		if (block_group->key.objectid < cache->key.objectid)
>> +			p = &(*p)->rb_left;
>> +		else if (block_group->key.objectid > cache->key.objectid)
>> +			p = &(*p)->rb_right;
>> +		else
>> +			return -EEXIST;
>> +	}
>> +
>> +	rb_link_node(&block_group->cache_node, parent, p);
>> +	rb_insert_color(&block_group->cache_node,
>> +			&info->block_group_cache_tree);
>> +
>> +	return 0;
>> +}
>> +
>> +/*
>> + * This will return the block group at or after bytenr if contains is 0, else
>> + * it will return the block group that contains the bytenr
>> + */
>> +static struct btrfs_block_group_cache *block_group_cache_tree_search(
>> +		struct btrfs_fs_info *info, u64 bytenr, int contains)
>> +{
>> +	struct btrfs_block_group_cache *cache, *ret = NULL;
>> +	struct rb_node *n;
>> +	u64 end, start;
>> +
>> +	n = info->block_group_cache_tree.rb_node;
>> +
>> +	while (n) {
>> +		cache = rb_entry(n, struct btrfs_block_group_cache,
>> +				 cache_node);
>> +		end = cache->key.objectid + cache->key.offset - 1;
>> +		start = cache->key.objectid;
>> +
>> +		if (bytenr < start) {
>> +			if (!contains && (!ret || start < ret->key.objectid))
>> +				ret = cache;
>> +			n = n->rb_left;
>> +		} else if (bytenr > start) {
>> +			if (contains && bytenr <= end) {
>> +				ret = cache;
>> +				break;
>> +			}
>> +			n = n->rb_right;
>> +		} else {
>> +			ret = cache;
>> +			break;
>> +		}
>> +	}
>> +	return ret;
>> +}
>> +
>> +/*
>> + * Return the block group that starts at or after bytenr
>> + */
>> +struct btrfs_block_group_cache *btrfs_lookup_first_block_group_kernel(
>> +		struct btrfs_fs_info *info, u64 bytenr)
>> +{
>> +	return block_group_cache_tree_search(info, bytenr, 0);
>> +}
>> +
>> +/*
>> + * Return the block group that contains the given bytenr
>> + */
>> +struct btrfs_block_group_cache *btrfs_lookup_block_group_kernel(
>> +		struct btrfs_fs_info *info, u64 bytenr)
>> +{
>> +	return block_group_cache_tree_search(info, bytenr, 1);
>> +}
>> +
>>   /*
>>    * Return the block group that contains @bytenr, otherwise return the next one
>>    * that starts after @bytenr
>>
>
David Sterba Dec. 5, 2019, 1:41 p.m. UTC | #3
On Thu, Dec 05, 2019 at 04:32:57PM +0800, Su Yue wrote:
> On 2019/12/5 3:29 PM, Qu Wenruo wrote:
> > On 2019/12/5 下午12:29, damenly.su@gmail.com wrote:
> >> From: Su Yue <Damenly_Su@gmx.com>
> >>
> >> Simple copy and paste codes, remove useless lock operantions in progs.
> >> Th new coming lookup functions are named with suffix _kernel in
> >> temporary.
> >>
> >> Signed-off-by: Su Yue <Damenly_Su@gmx.com>
> >
> > Reviewed-by: Qu Wenruo <wqu@suse.com>
> >
> > Just an extra hint, it would be much better if we backport this
> > functions to block-group.c.
> >
> Considered it, then porting functions moved will not require
> any suffixes to avoid conflictions. It will be more clean while doing
> reform work. But I wonder if it's a proper timing to create
> block-group.c in progs.

The small incremental changes are IMHO better for now, the kernel and
userspace code bases are not close enough so we can just copy code. When
the code that implements some logic (and uses same structures) is "close
enough", we can copy it directly (eg. the delayed-refs.[ch]), but
otherwise it needs to be done in small steps like you do in this
patchset.
diff mbox series

Patch

diff --git a/extent-tree.c b/extent-tree.c
index 4a3db029e811..ab576f8732a2 100644
--- a/extent-tree.c
+++ b/extent-tree.c
@@ -164,6 +164,92 @@  err:
 	return 0;
 }
 
+/*
+ * This adds the block group to the fs_info rb tree for the block group cache
+ */
+static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
+				struct btrfs_block_group_cache *block_group)
+{
+	struct rb_node **p;
+	struct rb_node *parent = NULL;
+	struct btrfs_block_group_cache *cache;
+
+	p = &info->block_group_cache_tree.rb_node;
+
+	while (*p) {
+		parent = *p;
+		cache = rb_entry(parent, struct btrfs_block_group_cache,
+				 cache_node);
+		if (block_group->key.objectid < cache->key.objectid)
+			p = &(*p)->rb_left;
+		else if (block_group->key.objectid > cache->key.objectid)
+			p = &(*p)->rb_right;
+		else
+			return -EEXIST;
+	}
+
+	rb_link_node(&block_group->cache_node, parent, p);
+	rb_insert_color(&block_group->cache_node,
+			&info->block_group_cache_tree);
+
+	return 0;
+}
+
+/*
+ * This will return the block group at or after bytenr if contains is 0, else
+ * it will return the block group that contains the bytenr
+ */
+static struct btrfs_block_group_cache *block_group_cache_tree_search(
+		struct btrfs_fs_info *info, u64 bytenr, int contains)
+{
+	struct btrfs_block_group_cache *cache, *ret = NULL;
+	struct rb_node *n;
+	u64 end, start;
+
+	n = info->block_group_cache_tree.rb_node;
+
+	while (n) {
+		cache = rb_entry(n, struct btrfs_block_group_cache,
+				 cache_node);
+		end = cache->key.objectid + cache->key.offset - 1;
+		start = cache->key.objectid;
+
+		if (bytenr < start) {
+			if (!contains && (!ret || start < ret->key.objectid))
+				ret = cache;
+			n = n->rb_left;
+		} else if (bytenr > start) {
+			if (contains && bytenr <= end) {
+				ret = cache;
+				break;
+			}
+			n = n->rb_right;
+		} else {
+			ret = cache;
+			break;
+		}
+	}
+	return ret;
+}
+
+/*
+ * Return the block group that starts at or after bytenr
+ */
+struct btrfs_block_group_cache *btrfs_lookup_first_block_group_kernel(
+		struct btrfs_fs_info *info, u64 bytenr)
+{
+	return block_group_cache_tree_search(info, bytenr, 0);
+}
+
+/*
+ * Return the block group that contains the given bytenr
+ */
+struct btrfs_block_group_cache *btrfs_lookup_block_group_kernel(
+		struct btrfs_fs_info *info, u64 bytenr)
+{
+	return block_group_cache_tree_search(info, bytenr, 1);
+}
+
 /*
  * Return the block group that contains @bytenr, otherwise return the next one
  * that starts after @bytenr