diff mbox series

btrfs: use rb_entry_safe() where possible to simplify code

Message ID 20250327161918.1406913-1-dsterba@suse.com (mailing list archive)
State New
Headers show
Series btrfs: use rb_entry_safe() where possible to simplify code | expand

Commit Message

David Sterba March 27, 2025, 4:19 p.m. UTC
Simplify conditionally reading an rb_entry(), there's the
rb_entry_safe() helper that checks the node pointer for NULL so we don't
have to write it explicitly.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/defrag.c         |  5 +----
 fs/btrfs/delayed-inode.c  | 27 ++++++---------------------
 fs/btrfs/delayed-ref.c    |  7 ++-----
 fs/btrfs/extent-io-tree.c | 10 ++--------
 fs/btrfs/extent_map.c     |  8 ++++----
 5 files changed, 15 insertions(+), 42 deletions(-)

Comments

Johannes Thumshirn March 31, 2025, 3:36 p.m. UTC | #1
On 27.03.25 17:19, David Sterba wrote:
> diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
> index 7f46abbd6311b2..d62c36a0b7ba41 100644
> --- a/fs/btrfs/extent_map.c
> +++ b/fs/btrfs/extent_map.c
> @@ -361,8 +361,8 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
>   
>   	if (em->start != 0) {
>   		rb = rb_prev(&em->rb_node);
> -		if (rb)
> -			merge = rb_entry(rb, struct extent_map, rb_node);
> +		merge = rb_entry_safe(rb, struct extent_map, rb_node);
> +
>   		if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
>   			em->start = merge->start;
>   			em->len += merge->len;
> @@ -379,8 +379,8 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
>   	}
>   
>   	rb = rb_next(&em->rb_node);
> -	if (rb)
> -		merge = rb_entry(rb, struct extent_map, rb_node);
> +	merge = rb_entry_safe(rb, struct extent_map, rb_node);
> +
>   	if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
>   		em->len += merge->len;
>   		if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)


Nothing to do with your patch, but how does this even work? If 'merge' 
is NULL we pass it into can_merge_extent_map() which does not check if 
it is NULL and goes ahead and dereferences ->flags right in the beginning.
David Sterba March 31, 2025, 9:10 p.m. UTC | #2
On Mon, Mar 31, 2025 at 03:36:15PM +0000, Johannes Thumshirn wrote:
> On 27.03.25 17:19, David Sterba wrote:
> > diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
> > index 7f46abbd6311b2..d62c36a0b7ba41 100644
> > --- a/fs/btrfs/extent_map.c
> > +++ b/fs/btrfs/extent_map.c
> > @@ -361,8 +361,8 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
> >   
> >   	if (em->start != 0) {
> >   		rb = rb_prev(&em->rb_node);
> > -		if (rb)
> > -			merge = rb_entry(rb, struct extent_map, rb_node);
> > +		merge = rb_entry_safe(rb, struct extent_map, rb_node);
> > +
> >   		if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
> >   			em->start = merge->start;
> >   			em->len += merge->len;
> > @@ -379,8 +379,8 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
> >   	}
> >   
> >   	rb = rb_next(&em->rb_node);
> > -	if (rb)
> > -		merge = rb_entry(rb, struct extent_map, rb_node);
> > +	merge = rb_entry_safe(rb, struct extent_map, rb_node);
> > +
> >   	if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
> >   		em->len += merge->len;
> >   		if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
> 
> 
> Nothing to do with your patch, but how does this even work? If 'merge' 
> is NULL we pass it into can_merge_extent_map() which does not check if 
> it is NULL and goes ahead and dereferences ->flags right in the beginning.

If merge is NULL then rb was NULL and is checked in the 'if' before
passing it to can_merge_extent_map().
diff mbox series

Patch

diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index 8195e5fe355b00..11d27a9c5bd34d 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -191,10 +191,7 @@  static struct inode_defrag *btrfs_pick_defrag_inode(
 
 	if (parent && compare_inode_defrag(&tmp, entry) > 0) {
 		parent = rb_next(parent);
-		if (parent)
-			entry = rb_entry(parent, struct inode_defrag, rb_node);
-		else
-			entry = NULL;
+		entry = rb_entry_safe(parent, struct inode_defrag, rb_node);
 	}
 out:
 	if (entry)
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 3f1551d8a5c68b..c49bf8f2889dda 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -454,40 +454,25 @@  static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 					struct btrfs_delayed_node *delayed_node)
 {
-	struct rb_node *p;
-	struct btrfs_delayed_item *item = NULL;
+	struct rb_node *p = rb_first_cached(&delayed_node->ins_root);
 
-	p = rb_first_cached(&delayed_node->ins_root);
-	if (p)
-		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
-
-	return item;
+	return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
 }
 
 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 					struct btrfs_delayed_node *delayed_node)
 {
-	struct rb_node *p;
-	struct btrfs_delayed_item *item = NULL;
+	struct rb_node *p = rb_first_cached(&delayed_node->del_root);
 
-	p = rb_first_cached(&delayed_node->del_root);
-	if (p)
-		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
-
-	return item;
+	return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
 }
 
 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 						struct btrfs_delayed_item *item)
 {
-	struct rb_node *p;
-	struct btrfs_delayed_item *next = NULL;
+	struct rb_node *p = rb_next(&item->rb_node);
 
-	p = rb_next(&item->rb_node);
-	if (p)
-		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
-
-	return next;
+	return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
 }
 
 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 98c5b61dabe88c..343a452a9f9fa5 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -331,12 +331,9 @@  static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
 		struct btrfs_delayed_ref_node *ins)
 {
 	struct rb_node *node = &ins->ref_node;
-	struct rb_node *exist;
+	struct rb_node *exist = rb_find_add_cached(node, root, cmp_refs_node);
 
-	exist = rb_find_add_cached(node, root, cmp_refs_node);
-	if (exist)
-		return rb_entry(exist, struct btrfs_delayed_ref_node, ref_node);
-	return NULL;
+	return rb_entry_safe(exist, struct btrfs_delayed_ref_node, ref_node);
 }
 
 static struct btrfs_delayed_ref_head *find_first_ref_head(
diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
index 13de6af279e526..db7503a91b3151 100644
--- a/fs/btrfs/extent-io-tree.c
+++ b/fs/btrfs/extent-io-tree.c
@@ -222,20 +222,14 @@  static inline struct extent_state *next_state(struct extent_state *state)
 {
 	struct rb_node *next = rb_next(&state->rb_node);
 
-	if (next)
-		return rb_entry(next, struct extent_state, rb_node);
-	else
-		return NULL;
+	return rb_entry_safe(next, struct extent_state, rb_node);
 }
 
 static inline struct extent_state *prev_state(struct extent_state *state)
 {
 	struct rb_node *next = rb_prev(&state->rb_node);
 
-	if (next)
-		return rb_entry(next, struct extent_state, rb_node);
-	else
-		return NULL;
+	return rb_entry_safe(next, struct extent_state, rb_node);
 }
 
 /*
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 7f46abbd6311b2..d62c36a0b7ba41 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -361,8 +361,8 @@  static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
 
 	if (em->start != 0) {
 		rb = rb_prev(&em->rb_node);
-		if (rb)
-			merge = rb_entry(rb, struct extent_map, rb_node);
+		merge = rb_entry_safe(rb, struct extent_map, rb_node);
+
 		if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
 			em->start = merge->start;
 			em->len += merge->len;
@@ -379,8 +379,8 @@  static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
 	}
 
 	rb = rb_next(&em->rb_node);
-	if (rb)
-		merge = rb_entry(rb, struct extent_map, rb_node);
+	merge = rb_entry_safe(rb, struct extent_map, rb_node);
+
 	if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
 		em->len += merge->len;
 		if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)