diff mbox series

[v2,17/39] btrfs: Rename tree_entry to simple_node and export it

Message ID 20200326083316.48847-18-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: qgroup: Use backref cache based backref walk for commit roots | expand

Commit Message

Qu Wenruo March 26, 2020, 8:32 a.m. UTC
Structure tree_entry provides a very simple rb_tree which only uses
bytenr as search index.

That tree_entry is used in 3 structures: backref_node, mapping_node and
tree_block.

Since we're going to make backref_node independnt from relocation, it's
a good time to extract the tree_entry into simple_node, and export it
into misc.h.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/backref.h    |   6 ++-
 fs/btrfs/misc.h       |  54 +++++++++++++++++++++
 fs/btrfs/relocation.c | 109 +++++++++++++-----------------------------
 3 files changed, 90 insertions(+), 79 deletions(-)

Comments

David Sterba April 1, 2020, 3:48 p.m. UTC | #1
On Thu, Mar 26, 2020 at 04:32:54PM +0800, Qu Wenruo wrote:
> Structure tree_entry provides a very simple rb_tree which only uses
> bytenr as search index.
> 
> That tree_entry is used in 3 structures: backref_node, mapping_node and
> tree_block.
> 
> Since we're going to make backref_node independnt from relocation, it's
> a good time to extract the tree_entry into simple_node, and export it
> into misc.h.
> 
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> ---
>  fs/btrfs/backref.h    |   6 ++-
>  fs/btrfs/misc.h       |  54 +++++++++++++++++++++
>  fs/btrfs/relocation.c | 109 +++++++++++++-----------------------------
>  3 files changed, 90 insertions(+), 79 deletions(-)
> 
> diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
> index 76858ec099d9..f3eae9e9f84b 100644
> --- a/fs/btrfs/backref.h
> +++ b/fs/btrfs/backref.h
> @@ -162,8 +162,10 @@ btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
>   * present a tree block in the backref cache
>   */
>  struct btrfs_backref_node {
> -	struct rb_node rb_node;
> -	u64 bytenr;
> +	struct {
> +		struct rb_node rb_node;
> +		u64 bytenr;
> +	}; /* Use simple_node for search/insert */

Why is this anonymous struct? This should be the simple_node as I see
below. For some simple rb search API.

>  
>  	u64 new_bytenr;
>  	/* objectid of tree block owner, can be not uptodate */
> diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
> index 72bab64ecf60..d199bfdb210e 100644
> --- a/fs/btrfs/misc.h
> +++ b/fs/btrfs/misc.h
> @@ -6,6 +6,7 @@
>  #include <linux/sched.h>
>  #include <linux/wait.h>
>  #include <asm/div64.h>
> +#include <linux/rbtree.h>
>  
>  #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
>  
> @@ -58,4 +59,57 @@ static inline bool has_single_bit_set(u64 n)
>  	return is_power_of_two_u64(n);
>  }
>  
> +/*
> + * Simple bytenr based rb_tree relate structures
> + *
> + * Any structure wants to use bytenr as single search index should have their
> + * structure start with these members.

This is not very clean coding style, relying on particular placement and
order in another struct.

> + */
> +struct simple_node {
> +	struct rb_node rb_node;
> +	u64 bytenr;
> +};
> +
> +static inline struct rb_node *simple_search(struct rb_root *root, u64 bytenr)

simple_search is IMHO too vague, it's related to a rb-tree so this could
be reflected in the name somehow.

I think it's ok if you do this as a middle step before making it a
proper struct hook and API but I don't like the end result as it's not
really an improvement.
Qu Wenruo April 1, 2020, 11:40 p.m. UTC | #2
On 2020/4/1 下午11:48, David Sterba wrote:
> On Thu, Mar 26, 2020 at 04:32:54PM +0800, Qu Wenruo wrote:
>> Structure tree_entry provides a very simple rb_tree which only uses
>> bytenr as search index.
>>
>> That tree_entry is used in 3 structures: backref_node, mapping_node and
>> tree_block.
>>
>> Since we're going to make backref_node independnt from relocation, it's
>> a good time to extract the tree_entry into simple_node, and export it
>> into misc.h.
>>
>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>> ---
>>  fs/btrfs/backref.h    |   6 ++-
>>  fs/btrfs/misc.h       |  54 +++++++++++++++++++++
>>  fs/btrfs/relocation.c | 109 +++++++++++++-----------------------------
>>  3 files changed, 90 insertions(+), 79 deletions(-)
>>
>> diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
>> index 76858ec099d9..f3eae9e9f84b 100644
>> --- a/fs/btrfs/backref.h
>> +++ b/fs/btrfs/backref.h
>> @@ -162,8 +162,10 @@ btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
>>   * present a tree block in the backref cache
>>   */
>>  struct btrfs_backref_node {
>> -	struct rb_node rb_node;
>> -	u64 bytenr;
>> +	struct {
>> +		struct rb_node rb_node;
>> +		u64 bytenr;
>> +	}; /* Use simple_node for search/insert */
> 
> Why is this anonymous struct? This should be the simple_node as I see
> below. For some simple rb search API.

If using simple_node, we need a ton of extra wrapper to wrap things like
rb_entry(), rb_postorder_()

Thus here we still want byte/rb_node directly embeded into the structure.

The ideal method would be anonymous but typed structure.
Unfortunately no such C standard supports this.

> 
>>  
>>  	u64 new_bytenr;
>>  	/* objectid of tree block owner, can be not uptodate */
>> diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
>> index 72bab64ecf60..d199bfdb210e 100644
>> --- a/fs/btrfs/misc.h
>> +++ b/fs/btrfs/misc.h
>> @@ -6,6 +6,7 @@
>>  #include <linux/sched.h>
>>  #include <linux/wait.h>
>>  #include <asm/div64.h>
>> +#include <linux/rbtree.h>
>>  
>>  #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
>>  
>> @@ -58,4 +59,57 @@ static inline bool has_single_bit_set(u64 n)
>>  	return is_power_of_two_u64(n);
>>  }
>>  
>> +/*
>> + * Simple bytenr based rb_tree relate structures
>> + *
>> + * Any structure wants to use bytenr as single search index should have their
>> + * structure start with these members.
> 
> This is not very clean coding style, relying on particular placement and
> order in another struct.

Order is not a problem, since we call container_of(), thus there is no
need for any order or placement.
User can easily put rb_node at the end of the structure, and bytenr at
the beginning of the structure, and everything still goes well.

The anonymous structure is mostly here to inform callers that we're
using simple_node structure.

> 
>> + */
>> +struct simple_node {
>> +	struct rb_node rb_node;
>> +	u64 bytenr;
>> +};
>> +
>> +static inline struct rb_node *simple_search(struct rb_root *root, u64 bytenr)
> 
> simple_search is IMHO too vague, it's related to a rb-tree so this could
> be reflected in the name somehow.
> 
> I think it's ok if you do this as a middle step before making it a
> proper struct hook and API but I don't like the end result as it's not
> really an improvement.
> 
That's the what I mean for "simple", it's really just a simple, not even
a full wrapper, for bytenr based rb tree search.

Adding too many wrappers may simply kill the "simple" part.

Although I have to admit, that most of the simple_node part is only to
reuse code across relocation.c and backref.c. Since no other users
utilize such simple facility.

Any idea to improve such situation? Or we really need to go full wrappers?

Thanks,
Qu
Qu Wenruo April 2, 2020, 12:52 a.m. UTC | #3
On 2020/4/2 上午7:40, Qu Wenruo wrote:
> 
> 
> On 2020/4/1 下午11:48, David Sterba wrote:
>> On Thu, Mar 26, 2020 at 04:32:54PM +0800, Qu Wenruo wrote:
>>> Structure tree_entry provides a very simple rb_tree which only uses
>>> bytenr as search index.
>>>
>>> That tree_entry is used in 3 structures: backref_node, mapping_node and
>>> tree_block.
>>>
>>> Since we're going to make backref_node independnt from relocation, it's
>>> a good time to extract the tree_entry into simple_node, and export it
>>> into misc.h.
>>>
>>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>>> ---
>>>  fs/btrfs/backref.h    |   6 ++-
>>>  fs/btrfs/misc.h       |  54 +++++++++++++++++++++
>>>  fs/btrfs/relocation.c | 109 +++++++++++++-----------------------------
>>>  3 files changed, 90 insertions(+), 79 deletions(-)
>>>
>>> diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
>>> index 76858ec099d9..f3eae9e9f84b 100644
>>> --- a/fs/btrfs/backref.h
>>> +++ b/fs/btrfs/backref.h
>>> @@ -162,8 +162,10 @@ btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
>>>   * present a tree block in the backref cache
>>>   */
>>>  struct btrfs_backref_node {
>>> -	struct rb_node rb_node;
>>> -	u64 bytenr;
>>> +	struct {
>>> +		struct rb_node rb_node;
>>> +		u64 bytenr;
>>> +	}; /* Use simple_node for search/insert */
>>
>> Why is this anonymous struct? This should be the simple_node as I see
>> below. For some simple rb search API.
> 
> If using simple_node, we need a ton of extra wrapper to wrap things like
> rb_entry(), rb_postorder_()
> 
> Thus here we still want byte/rb_node directly embeded into the structure.
> 
> The ideal method would be anonymous but typed structure.
> Unfortunately no such C standard supports this.
> 
>>
>>>  
>>>  	u64 new_bytenr;
>>>  	/* objectid of tree block owner, can be not uptodate */
>>> diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
>>> index 72bab64ecf60..d199bfdb210e 100644
>>> --- a/fs/btrfs/misc.h
>>> +++ b/fs/btrfs/misc.h
>>> @@ -6,6 +6,7 @@
>>>  #include <linux/sched.h>
>>>  #include <linux/wait.h>
>>>  #include <asm/div64.h>
>>> +#include <linux/rbtree.h>
>>>  
>>>  #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
>>>  
>>> @@ -58,4 +59,57 @@ static inline bool has_single_bit_set(u64 n)
>>>  	return is_power_of_two_u64(n);
>>>  }
>>>  
>>> +/*
>>> + * Simple bytenr based rb_tree relate structures
>>> + *
>>> + * Any structure wants to use bytenr as single search index should have their
>>> + * structure start with these members.
>>
>> This is not very clean coding style, relying on particular placement and
>> order in another struct.
> 
> Order is not a problem, since we call container_of(), thus there is no
> need for any order or placement.
> User can easily put rb_node at the end of the structure, and bytenr at
> the beginning of the structure, and everything still goes well.

My bad, the order is still a pretty important thing...

Thus we still need to keep everything in their correct order to make the
code work...

> 
> The anonymous structure is mostly here to inform callers that we're
> using simple_node structure.
> 
>>
>>> + */
>>> +struct simple_node {
>>> +	struct rb_node rb_node;
>>> +	u64 bytenr;
>>> +};
>>> +
>>> +static inline struct rb_node *simple_search(struct rb_root *root, u64 bytenr)
>>
>> simple_search is IMHO too vague, it's related to a rb-tree so this could
>> be reflected in the name somehow.
>>
>> I think it's ok if you do this as a middle step before making it a
>> proper struct hook and API but I don't like the end result as it's not
>> really an improvement.
>>
> That's the what I mean for "simple", it's really just a simple, not even
> a full wrapper, for bytenr based rb tree search.
> 
> Adding too many wrappers may simply kill the "simple" part.
> 
> Although I have to admit, that most of the simple_node part is only to
> reuse code across relocation.c and backref.c. Since no other users
> utilize such simple facility.
> 
> Any idea to improve such situation? Or we really need to go full wrappers?
> 
> Thanks,
> Qu
>
David Sterba April 2, 2020, 1:09 a.m. UTC | #4
On Thu, Apr 02, 2020 at 07:40:29AM +0800, Qu Wenruo wrote:
> >>  struct btrfs_backref_node {
> >> -	struct rb_node rb_node;
> >> -	u64 bytenr;
> >> +	struct {
> >> +		struct rb_node rb_node;
> >> +		u64 bytenr;
> >> +	}; /* Use simple_node for search/insert */
> > 
> > Why is this anonymous struct? This should be the simple_node as I see
> > below. For some simple rb search API.
> 
> If using simple_node, we need a ton of extra wrapper to wrap things like
> rb_entry(), rb_postorder_()
> 
> Thus here we still want byte/rb_node directly embeded into the structure.
> 
> The ideal method would be anonymous but typed structure.
> Unfortunately no such C standard supports this.

My idea was to have something like this (simplified):

	struct tree_node {
		struct rb_node node;
		u64 bytenr;
	};

	struct backref_node {
		...
		struct tree_node cache_node;
		...
	};

	struct backref_node bnode;

when the rb_node is needed, pass &bnode.cache_node.rb_node . All the
rb_* functions should work without adding another interface layer.

> >>  	u64 new_bytenr;
> >>  	/* objectid of tree block owner, can be not uptodate */
> >> diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
> >> index 72bab64ecf60..d199bfdb210e 100644
> >> --- a/fs/btrfs/misc.h
> >> +++ b/fs/btrfs/misc.h
> >> @@ -6,6 +6,7 @@
> >>  #include <linux/sched.h>
> >>  #include <linux/wait.h>
> >>  #include <asm/div64.h>
> >> +#include <linux/rbtree.h>
> >>  
> >>  #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
> >>  
> >> @@ -58,4 +59,57 @@ static inline bool has_single_bit_set(u64 n)
> >>  	return is_power_of_two_u64(n);
> >>  }
> >>  
> >> +/*
> >> + * Simple bytenr based rb_tree relate structures
> >> + *
> >> + * Any structure wants to use bytenr as single search index should have their
> >> + * structure start with these members.
> > 
> > This is not very clean coding style, relying on particular placement and
> > order in another struct.
> 
> Order is not a problem, since we call container_of(), thus there is no
> need for any order or placement.
> User can easily put rb_node at the end of the structure, and bytenr at
> the beginning of the structure, and everything still goes well.
> 
> The anonymous structure is mostly here to inform callers that we're
> using simple_node structure.
> 
> > 
> >> + */
> >> +struct simple_node {
> >> +	struct rb_node rb_node;
> >> +	u64 bytenr;
> >> +};
> >> +
> >> +static inline struct rb_node *simple_search(struct rb_root *root, u64 bytenr)
> > 
> > simple_search is IMHO too vague, it's related to a rb-tree so this could
> > be reflected in the name somehow.
> > 
> > I think it's ok if you do this as a middle step before making it a
> > proper struct hook and API but I don't like the end result as it's not
> > really an improvement.
> > 
> That's the what I mean for "simple", it's really just a simple, not even
> a full wrapper, for bytenr based rb tree search.
> 
> Adding too many wrappers may simply kill the "simple" part.
> 
> Although I have to admit, that most of the simple_node part is only to
> reuse code across relocation.c and backref.c. Since no other users
> utilize such simple facility.
> 
> Any idea to improve such situation? Or we really need to go full wrappers?

If the above works we won't need to add more wrappers. But after some
thinking I'm ok with the way you implement it as it will certainly clean
up some things and once it's merged we'll have another chance to look at
the code and fix up only the structures.
Qu Wenruo April 2, 2020, 1:32 a.m. UTC | #5
On 2020/4/2 上午9:09, David Sterba wrote:
> On Thu, Apr 02, 2020 at 07:40:29AM +0800, Qu Wenruo wrote:
>>>>  struct btrfs_backref_node {
>>>> -	struct rb_node rb_node;
>>>> -	u64 bytenr;
>>>> +	struct {
>>>> +		struct rb_node rb_node;
>>>> +		u64 bytenr;
>>>> +	}; /* Use simple_node for search/insert */
>>>
>>> Why is this anonymous struct? This should be the simple_node as I see
>>> below. For some simple rb search API.
>>
>> If using simple_node, we need a ton of extra wrapper to wrap things like
>> rb_entry(), rb_postorder_()
>>
>> Thus here we still want byte/rb_node directly embeded into the structure.
>>
>> The ideal method would be anonymous but typed structure.
>> Unfortunately no such C standard supports this.
> 
> My idea was to have something like this (simplified):
> 
> 	struct tree_node {
> 		struct rb_node node;
> 		u64 bytenr;
> 	};
> 
> 	struct backref_node {
> 		...
> 		struct tree_node cache_node;
> 		...
> 	};
> 
> 	struct backref_node bnode;
> 
> when the rb_node is needed, pass &bnode.cache_node.rb_node . All the
> rb_* functions should work without adding another interface layer.

The problem is function relocate_tree_blocks().

In which we call rbtree_postorder_for_each_entry_safe().

If we use tree_node directly, we need to call container_of() again to
grab the tree_block structure, which almost kills the meaning of
rbtree_postorder_for_each_entry_safe()

This also applies to rb_first() callers like free_block_list().

> 
>>>>  	u64 new_bytenr;
>>>>  	/* objectid of tree block owner, can be not uptodate */
>>>> diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
>>>> index 72bab64ecf60..d199bfdb210e 100644
>>>> --- a/fs/btrfs/misc.h
>>>> +++ b/fs/btrfs/misc.h
>>>> @@ -6,6 +6,7 @@
>>>>  #include <linux/sched.h>
>>>>  #include <linux/wait.h>
>>>>  #include <asm/div64.h>
>>>> +#include <linux/rbtree.h>
>>>>  
>>>>  #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
>>>>  
>>>> @@ -58,4 +59,57 @@ static inline bool has_single_bit_set(u64 n)
>>>>  	return is_power_of_two_u64(n);
>>>>  }
>>>>  
>>>> +/*
>>>> + * Simple bytenr based rb_tree relate structures
>>>> + *
>>>> + * Any structure wants to use bytenr as single search index should have their
>>>> + * structure start with these members.
>>>
>>> This is not very clean coding style, relying on particular placement and
>>> order in another struct.
>>
>> Order is not a problem, since we call container_of(), thus there is no
>> need for any order or placement.
>> User can easily put rb_node at the end of the structure, and bytenr at
>> the beginning of the structure, and everything still goes well.
>>
>> The anonymous structure is mostly here to inform callers that we're
>> using simple_node structure.
>>
>>>
>>>> + */
>>>> +struct simple_node {
>>>> +	struct rb_node rb_node;
>>>> +	u64 bytenr;
>>>> +};
>>>> +
>>>> +static inline struct rb_node *simple_search(struct rb_root *root, u64 bytenr)
>>>
>>> simple_search is IMHO too vague, it's related to a rb-tree so this could
>>> be reflected in the name somehow.
>>>
>>> I think it's ok if you do this as a middle step before making it a
>>> proper struct hook and API but I don't like the end result as it's not
>>> really an improvement.
>>>
>> That's the what I mean for "simple", it's really just a simple, not even
>> a full wrapper, for bytenr based rb tree search.
>>
>> Adding too many wrappers may simply kill the "simple" part.
>>
>> Although I have to admit, that most of the simple_node part is only to
>> reuse code across relocation.c and backref.c. Since no other users
>> utilize such simple facility.
>>
>> Any idea to improve such situation? Or we really need to go full wrappers?
> 
> If the above works we won't need to add more wrappers. But after some
> thinking I'm ok with the way you implement it as it will certainly clean
> up some things and once it's merged we'll have another chance to look at
> the code and fix up only the structures.

Looking forward to better cleanups.

Thanks,
Qu
diff mbox series

Patch

diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 76858ec099d9..f3eae9e9f84b 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -162,8 +162,10 @@  btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
  * present a tree block in the backref cache
  */
 struct btrfs_backref_node {
-	struct rb_node rb_node;
-	u64 bytenr;
+	struct {
+		struct rb_node rb_node;
+		u64 bytenr;
+	}; /* Use simple_node for search/insert */
 
 	u64 new_bytenr;
 	/* objectid of tree block owner, can be not uptodate */
diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
index 72bab64ecf60..d199bfdb210e 100644
--- a/fs/btrfs/misc.h
+++ b/fs/btrfs/misc.h
@@ -6,6 +6,7 @@ 
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <asm/div64.h>
+#include <linux/rbtree.h>
 
 #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
 
@@ -58,4 +59,57 @@  static inline bool has_single_bit_set(u64 n)
 	return is_power_of_two_u64(n);
 }
 
+/*
+ * Simple bytenr based rb_tree relate structures
+ *
+ * Any structure wants to use bytenr as single search index should have their
+ * structure start with these members.
+ */
+struct simple_node {
+	struct rb_node rb_node;
+	u64 bytenr;
+};
+
+static inline struct rb_node *simple_search(struct rb_root *root, u64 bytenr)
+{
+	struct rb_node *n = root->rb_node;
+	struct simple_node *entry;
+
+	while (n) {
+		entry = rb_entry(n, struct simple_node, rb_node);
+
+		if (bytenr < entry->bytenr)
+			n = n->rb_left;
+		else if (bytenr > entry->bytenr)
+			n = n->rb_right;
+		else
+			return n;
+	}
+	return NULL;
+}
+
+static inline struct rb_node *simple_insert(struct rb_root *root, u64 bytenr,
+					    struct rb_node *node)
+{
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct simple_node *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct simple_node, rb_node);
+
+		if (bytenr < entry->bytenr)
+			p = &(*p)->rb_left;
+		else if (bytenr > entry->bytenr)
+			p = &(*p)->rb_right;
+		else
+			return parent;
+	}
+
+	rb_link_node(node, parent, p);
+	rb_insert_color(node, root);
+	return NULL;
+}
+
 #endif
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ec7d28d63347..d62537792ac0 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -24,6 +24,7 @@ 
 #include "delalloc-space.h"
 #include "block-group.h"
 #include "backref.h"
+#include "misc.h"
 
 /*
  * Relocation overview
@@ -72,21 +73,15 @@ 
  * The entry point of relocation is relocate_block_group() function.
  */
 
-/*
- * btrfs_backref_node, mapping_node and tree_block start with this
- */
-struct tree_entry {
-	struct rb_node rb_node;
-	u64 bytenr;
-};
-
 #define RELOCATION_RESERVED_NODES	256
 /*
  * map address of tree root to tree
  */
 struct mapping_node {
-	struct rb_node rb_node;
-	u64 bytenr;
+	struct {
+		struct rb_node rb_node;
+		u64 bytenr;
+	}; /* Use simle_node for search_insert */
 	void *data;
 };
 
@@ -99,8 +94,10 @@  struct mapping_tree {
  * present a tree block to process
  */
 struct tree_block {
-	struct rb_node rb_node;
-	u64 bytenr;
+	struct {
+		struct rb_node rb_node;
+		u64 bytenr;
+	}; /* Use simple_node for search/insert */
 	struct btrfs_key key;
 	unsigned int level:8;
 	unsigned int key_ready:1;
@@ -293,48 +290,6 @@  static void free_backref_edge(struct btrfs_backref_cache *cache,
 	}
 }
 
-static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
-				   struct rb_node *node)
-{
-	struct rb_node **p = &root->rb_node;
-	struct rb_node *parent = NULL;
-	struct tree_entry *entry;
-
-	while (*p) {
-		parent = *p;
-		entry = rb_entry(parent, struct tree_entry, rb_node);
-
-		if (bytenr < entry->bytenr)
-			p = &(*p)->rb_left;
-		else if (bytenr > entry->bytenr)
-			p = &(*p)->rb_right;
-		else
-			return parent;
-	}
-
-	rb_link_node(node, parent, p);
-	rb_insert_color(node, root);
-	return NULL;
-}
-
-static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
-{
-	struct rb_node *n = root->rb_node;
-	struct tree_entry *entry;
-
-	while (n) {
-		entry = rb_entry(n, struct tree_entry, rb_node);
-
-		if (bytenr < entry->bytenr)
-			n = n->rb_left;
-		else if (bytenr > entry->bytenr)
-			n = n->rb_right;
-		else
-			return n;
-	}
-	return NULL;
-}
-
 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
 {
 
@@ -473,7 +428,7 @@  static void update_backref_node(struct btrfs_backref_cache *cache,
 	struct rb_node *rb_node;
 	rb_erase(&node->rb_node, &cache->rb_root);
 	node->bytenr = bytenr;
-	rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
+	rb_node = simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
 	if (rb_node)
 		backref_tree_panic(rb_node, -EEXIST, bytenr);
 }
@@ -598,7 +553,7 @@  struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
 
 	ASSERT(rc);
 	spin_lock(&rc->reloc_root_tree.lock);
-	rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
+	rb_node = simple_search(&rc->reloc_root_tree.rb_root, bytenr);
 	if (rb_node) {
 		node = rb_entry(rb_node, struct mapping_node, rb_node);
 		root = (struct btrfs_root *)node->data;
@@ -666,7 +621,7 @@  static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
 	if (!edge)
 		return -ENOMEM;
 
-	rb_node = tree_search(&cache->rb_root, ref_key->offset);
+	rb_node = simple_search(&cache->rb_root, ref_key->offset);
 	if (!rb_node) {
 		/* Parent node not yet cached */
 		upper = alloc_backref_node(cache, ref_key->offset,
@@ -788,7 +743,7 @@  static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
 		}
 
 		eb = path->nodes[level];
-		rb_node = tree_search(&cache->rb_root, eb->start);
+		rb_node = simple_search(&cache->rb_root, eb->start);
 		if (!rb_node) {
 			upper = alloc_backref_node(cache, eb->start,
 						   lower->level + 1);
@@ -994,8 +949,8 @@  static int finish_upper_links(struct btrfs_backref_cache *cache,
 
 	/* Insert this node to cache if it's not cowonly */
 	if (!start->cowonly) {
-		rb_node = tree_insert(&cache->rb_root, start->bytenr,
-				      &start->rb_node);
+		rb_node = simple_insert(&cache->rb_root, start->bytenr,
+					&start->rb_node);
 		if (rb_node)
 			backref_tree_panic(rb_node, -EEXIST, start->bytenr);
 		list_add_tail(&start->lower, &cache->leaves);
@@ -1062,8 +1017,8 @@  static int finish_upper_links(struct btrfs_backref_cache *cache,
 
 		/* Only cache non-cowonly (subvolume trees) tree blocks */
 		if (!upper->cowonly) {
-			rb_node = tree_insert(&cache->rb_root, upper->bytenr,
-					      &upper->rb_node);
+			rb_node = simple_insert(&cache->rb_root, upper->bytenr,
+						&upper->rb_node);
 			if (rb_node) {
 				backref_tree_panic(rb_node, -EEXIST,
 						   upper->bytenr);
@@ -1310,7 +1265,7 @@  static int clone_backref_node(struct btrfs_trans_handle *trans,
 	if (cache->last_trans > 0)
 		update_backref_cache(trans, cache);
 
-	rb_node = tree_search(&cache->rb_root, src->commit_root->start);
+	rb_node = simple_search(&cache->rb_root, src->commit_root->start);
 	if (rb_node) {
 		node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
 		if (node->detached)
@@ -1320,8 +1275,8 @@  static int clone_backref_node(struct btrfs_trans_handle *trans,
 	}
 
 	if (!node) {
-		rb_node = tree_search(&cache->rb_root,
-				      reloc_root->commit_root->start);
+		rb_node = simple_search(&cache->rb_root,
+					reloc_root->commit_root->start);
 		if (rb_node) {
 			node = rb_entry(rb_node, struct btrfs_backref_node,
 					rb_node);
@@ -1354,8 +1309,8 @@  static int clone_backref_node(struct btrfs_trans_handle *trans,
 		list_add_tail(&new_node->lower, &cache->leaves);
 	}
 
-	rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
-			      &new_node->rb_node);
+	rb_node = simple_insert(&cache->rb_root, new_node->bytenr,
+				&new_node->rb_node);
 	if (rb_node)
 		backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
 
@@ -1395,8 +1350,8 @@  static int __must_check __add_reloc_root(struct btrfs_root *root)
 	node->data = root;
 
 	spin_lock(&rc->reloc_root_tree.lock);
-	rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
-			      node->bytenr, &node->rb_node);
+	rb_node = simple_insert(&rc->reloc_root_tree.rb_root,
+				node->bytenr, &node->rb_node);
 	spin_unlock(&rc->reloc_root_tree.lock);
 	if (rb_node) {
 		btrfs_panic(fs_info, -EEXIST,
@@ -1422,8 +1377,8 @@  static void __del_reloc_root(struct btrfs_root *root)
 
 	if (rc && root->node) {
 		spin_lock(&rc->reloc_root_tree.lock);
-		rb_node = tree_search(&rc->reloc_root_tree.rb_root,
-				      root->commit_root->start);
+		rb_node = simple_search(&rc->reloc_root_tree.rb_root,
+					root->commit_root->start);
 		if (rb_node) {
 			node = rb_entry(rb_node, struct mapping_node, rb_node);
 			rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@@ -1466,8 +1421,8 @@  static int __update_reloc_root(struct btrfs_root *root)
 	struct reloc_control *rc = fs_info->reloc_ctl;
 
 	spin_lock(&rc->reloc_root_tree.lock);
-	rb_node = tree_search(&rc->reloc_root_tree.rb_root,
-			      root->commit_root->start);
+	rb_node = simple_search(&rc->reloc_root_tree.rb_root,
+				root->commit_root->start);
 	if (rb_node) {
 		node = rb_entry(rb_node, struct mapping_node, rb_node);
 		rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@@ -1480,8 +1435,8 @@  static int __update_reloc_root(struct btrfs_root *root)
 
 	spin_lock(&rc->reloc_root_tree.lock);
 	node->bytenr = root->node->start;
-	rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
-			      node->bytenr, &node->rb_node);
+	rb_node = simple_insert(&rc->reloc_root_tree.rb_root,
+				node->bytenr, &node->rb_node);
 	spin_unlock(&rc->reloc_root_tree.lock);
 	if (rb_node)
 		backref_tree_panic(rb_node, -EEXIST, node->bytenr);
@@ -3624,7 +3579,7 @@  static int add_tree_block(struct reloc_control *rc,
 	block->level = level;
 	block->key_ready = 0;
 
-	rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
+	rb_node = simple_insert(blocks, block->bytenr, &block->rb_node);
 	if (rb_node)
 		backref_tree_panic(rb_node, -EEXIST, block->bytenr);
 
@@ -3647,7 +3602,7 @@  static int __add_tree_block(struct reloc_control *rc,
 	if (tree_block_processed(bytenr, rc))
 		return 0;
 
-	if (tree_search(blocks, bytenr))
+	if (simple_search(blocks, bytenr))
 		return 0;
 
 	path = btrfs_alloc_path();