diff mbox series

[1/6] btrfs: add btrfs_delete_ref_head helper

Message ID 20181121185912.24288-2-josef@toxicpanda.com (mailing list archive)
State New, archived
Headers show
Series Delayed refs rsv | expand

Commit Message

Josef Bacik Nov. 21, 2018, 6:59 p.m. UTC
From: Josef Bacik <jbacik@fb.com>

We do this dance in cleanup_ref_head and check_ref_cleanup, unify it
into a helper and cleanup the calling functions.

Signed-off-by: Josef Bacik <jbacik@fb.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
---
 fs/btrfs/delayed-ref.c | 14 ++++++++++++++
 fs/btrfs/delayed-ref.h |  3 ++-
 fs/btrfs/extent-tree.c | 22 +++-------------------
 3 files changed, 19 insertions(+), 20 deletions(-)

Comments

Nikolay Borisov Nov. 22, 2018, 9:12 a.m. UTC | #1
On 21.11.18 г. 20:59 ч., Josef Bacik wrote:
> From: Josef Bacik <jbacik@fb.com>
> 
> We do this dance in cleanup_ref_head and check_ref_cleanup, unify it
> into a helper and cleanup the calling functions.
> 
> Signed-off-by: Josef Bacik <jbacik@fb.com>
> Reviewed-by: Omar Sandoval <osandov@fb.com>
> ---
>  fs/btrfs/delayed-ref.c | 14 ++++++++++++++
>  fs/btrfs/delayed-ref.h |  3 ++-
>  fs/btrfs/extent-tree.c | 22 +++-------------------
>  3 files changed, 19 insertions(+), 20 deletions(-)
> 
> diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
> index 9301b3ad9217..b3e4c9fcb664 100644
> --- a/fs/btrfs/delayed-ref.c
> +++ b/fs/btrfs/delayed-ref.c
> @@ -400,6 +400,20 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
>  	return head;
>  }
>  
> +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
> +			   struct btrfs_delayed_ref_head *head)
> +{
> +	lockdep_assert_held(&delayed_refs->lock);
> +	lockdep_assert_held(&head->lock);
> +
> +	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
> +	RB_CLEAR_NODE(&head->href_node);
> +	atomic_dec(&delayed_refs->num_entries);
> +	delayed_refs->num_heads--;
> +	if (head->processing == 0)
> +		delayed_refs->num_heads_ready--;

num_heads_ready will never execute in cleanup_ref_head, since
processing == 0 only when the ref head is unselected. Perhaps those 2
lines shouldn't be in this function? I find it a bit confusing that if
processing is 0 we decrement num_heads_ready in check_ref_cleanup, but
in unselect_delayed_ref_head we set it to 0 and increment it.

> +}
> +
>  /*
>   * Helper to insert the ref_node to the tail or merge with tail.
>   *
> diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
> index 8e20c5cb5404..d2af974f68a1 100644
> --- a/fs/btrfs/delayed-ref.h
> +++ b/fs/btrfs/delayed-ref.h
> @@ -261,7 +261,8 @@ static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
>  {
>  	mutex_unlock(&head->mutex);
>  }
> -
> +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
> +			   struct btrfs_delayed_ref_head *head);
>  
>  struct btrfs_delayed_ref_head *btrfs_select_ref_head(
>  		struct btrfs_delayed_ref_root *delayed_refs);
> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
> index d242a1174e50..c36b3a42f2bb 100644
> --- a/fs/btrfs/extent-tree.c
> +++ b/fs/btrfs/extent-tree.c
> @@ -2474,12 +2474,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
>  		spin_unlock(&delayed_refs->lock);
>  		return 1;
>  	}
> -	delayed_refs->num_heads--;
> -	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
> -	RB_CLEAR_NODE(&head->href_node);
> +	btrfs_delete_ref_head(delayed_refs, head);
>  	spin_unlock(&head->lock);
>  	spin_unlock(&delayed_refs->lock);
> -	atomic_dec(&delayed_refs->num_entries);
>  
>  	trace_run_delayed_ref_head(fs_info, head, 0);
>  
> @@ -6984,22 +6981,9 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
>  	if (!mutex_trylock(&head->mutex))
>  		goto out;
>  
> -	/*
> -	 * at this point we have a head with no other entries.  Go
> -	 * ahead and process it.
> -	 */
> -	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
> -	RB_CLEAR_NODE(&head->href_node);
> -	atomic_dec(&delayed_refs->num_entries);
> -
> -	/*
> -	 * we don't take a ref on the node because we're removing it from the
> -	 * tree, so we just steal the ref the tree was holding.
> -	 */
> -	delayed_refs->num_heads--;
> -	if (head->processing == 0)
> -		delayed_refs->num_heads_ready--;
> +	btrfs_delete_ref_head(delayed_refs, head);
>  	head->processing = 0;

Something is fishy here, before the code checked for processing == 0 and
then also set it to 0 ?

> +
>  	spin_unlock(&head->lock);
>  	spin_unlock(&delayed_refs->lock);
>  
>
Nikolay Borisov Nov. 22, 2018, 9:42 a.m. UTC | #2
On 22.11.18 г. 11:12 ч., Nikolay Borisov wrote:
> 
> 
> On 21.11.18 г. 20:59 ч., Josef Bacik wrote:
>> From: Josef Bacik <jbacik@fb.com>
>>
>> We do this dance in cleanup_ref_head and check_ref_cleanup, unify it
>> into a helper and cleanup the calling functions.
>>
>> Signed-off-by: Josef Bacik <jbacik@fb.com>
>> Reviewed-by: Omar Sandoval <osandov@fb.com>
>> ---
>>  fs/btrfs/delayed-ref.c | 14 ++++++++++++++
>>  fs/btrfs/delayed-ref.h |  3 ++-
>>  fs/btrfs/extent-tree.c | 22 +++-------------------
>>  3 files changed, 19 insertions(+), 20 deletions(-)
>>
>> diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
>> index 9301b3ad9217..b3e4c9fcb664 100644
>> --- a/fs/btrfs/delayed-ref.c
>> +++ b/fs/btrfs/delayed-ref.c
>> @@ -400,6 +400,20 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
>>  	return head;
>>  }
>>  
>> +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
>> +			   struct btrfs_delayed_ref_head *head)
>> +{
>> +	lockdep_assert_held(&delayed_refs->lock);
>> +	lockdep_assert_held(&head->lock);
>> +
>> +	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
>> +	RB_CLEAR_NODE(&head->href_node);
>> +	atomic_dec(&delayed_refs->num_entries);
>> +	delayed_refs->num_heads--;
>> +	if (head->processing == 0)
>> +		delayed_refs->num_heads_ready--;
> 
> num_heads_ready will never execute in cleanup_ref_head, since
> processing == 0 only when the ref head is unselected. Perhaps those 2
> lines shouldn't be in this function? I find it a bit confusing that if
> processing is 0 we decrement num_heads_ready in check_ref_cleanup, but
> in unselect_delayed_ref_head we set it to 0 and increment it.
> 
>> +}
>> +
>>  /*
>>   * Helper to insert the ref_node to the tail or merge with tail.
>>   *
>> diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
>> index 8e20c5cb5404..d2af974f68a1 100644
>> --- a/fs/btrfs/delayed-ref.h
>> +++ b/fs/btrfs/delayed-ref.h
>> @@ -261,7 +261,8 @@ static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
>>  {
>>  	mutex_unlock(&head->mutex);
>>  }
>> -
>> +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
>> +			   struct btrfs_delayed_ref_head *head);
>>  
>>  struct btrfs_delayed_ref_head *btrfs_select_ref_head(
>>  		struct btrfs_delayed_ref_root *delayed_refs);
>> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
>> index d242a1174e50..c36b3a42f2bb 100644
>> --- a/fs/btrfs/extent-tree.c
>> +++ b/fs/btrfs/extent-tree.c
>> @@ -2474,12 +2474,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
>>  		spin_unlock(&delayed_refs->lock);
>>  		return 1;
>>  	}
>> -	delayed_refs->num_heads--;
>> -	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
>> -	RB_CLEAR_NODE(&head->href_node);
>> +	btrfs_delete_ref_head(delayed_refs, head);
>>  	spin_unlock(&head->lock);
>>  	spin_unlock(&delayed_refs->lock);
>> -	atomic_dec(&delayed_refs->num_entries);
>>  
>>  	trace_run_delayed_ref_head(fs_info, head, 0);
>>  
>> @@ -6984,22 +6981,9 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
>>  	if (!mutex_trylock(&head->mutex))
>>  		goto out;
>>  
>> -	/*
>> -	 * at this point we have a head with no other entries.  Go
>> -	 * ahead and process it.
>> -	 */
>> -	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
>> -	RB_CLEAR_NODE(&head->href_node);
>> -	atomic_dec(&delayed_refs->num_entries);
>> -
>> -	/*
>> -	 * we don't take a ref on the node because we're removing it from the
>> -	 * tree, so we just steal the ref the tree was holding.
>> -	 */
>> -	delayed_refs->num_heads--;
>> -	if (head->processing == 0)
>> -		delayed_refs->num_heads_ready--;
>> +	btrfs_delete_ref_head(delayed_refs, head);
>>  	head->processing = 0;

On a closer inspection I think here we can do:

ASSERT(head->processing == 0) because at that point we've taken the
head->lock spinlock which is held during ordinary delayed refs
processing (in __btrfs_run_delayed_refs) when the head is selected (and
processing is 1). So head->processing == 0 here I think is a hard
invariant of the code. The decrement here should pair with the increment
when the head was initially added to the tree.

In cleanup_ref_head we don't need to ever worry about num_heads_ready
since it has already been decremented by btrfs_select_ref_head.

As a matter fact this counter is not used anywhere so we might as well
just remove it.

> 
> Something is fishy here, before the code checked for processing == 0 and
> then also set it to 0 ?
> 
>> +
>>  	spin_unlock(&head->lock);
>>  	spin_unlock(&delayed_refs->lock);
>>  
>>
>
David Sterba Nov. 23, 2018, 1:45 p.m. UTC | #3
On Thu, Nov 22, 2018 at 11:42:28AM +0200, Nikolay Borisov wrote:
> 
> 
> On 22.11.18 г. 11:12 ч., Nikolay Borisov wrote:
> > 
> > 
> > On 21.11.18 г. 20:59 ч., Josef Bacik wrote:
> >> From: Josef Bacik <jbacik@fb.com>
> >>
> >> We do this dance in cleanup_ref_head and check_ref_cleanup, unify it
> >> into a helper and cleanup the calling functions.
> >>
> >> Signed-off-by: Josef Bacik <jbacik@fb.com>
> >> Reviewed-by: Omar Sandoval <osandov@fb.com>
> >> ---
> >>  fs/btrfs/delayed-ref.c | 14 ++++++++++++++
> >>  fs/btrfs/delayed-ref.h |  3 ++-
> >>  fs/btrfs/extent-tree.c | 22 +++-------------------
> >>  3 files changed, 19 insertions(+), 20 deletions(-)
> >>
> >> diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
> >> index 9301b3ad9217..b3e4c9fcb664 100644
> >> --- a/fs/btrfs/delayed-ref.c
> >> +++ b/fs/btrfs/delayed-ref.c
> >> @@ -400,6 +400,20 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
> >>  	return head;
> >>  }
> >>  
> >> +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
> >> +			   struct btrfs_delayed_ref_head *head)
> >> +{
> >> +	lockdep_assert_held(&delayed_refs->lock);
> >> +	lockdep_assert_held(&head->lock);
> >> +
> >> +	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
> >> +	RB_CLEAR_NODE(&head->href_node);
> >> +	atomic_dec(&delayed_refs->num_entries);
> >> +	delayed_refs->num_heads--;
> >> +	if (head->processing == 0)
> >> +		delayed_refs->num_heads_ready--;
> > 
> > num_heads_ready will never execute in cleanup_ref_head, since
> > processing == 0 only when the ref head is unselected. Perhaps those 2
> > lines shouldn't be in this function? I find it a bit confusing that if
> > processing is 0 we decrement num_heads_ready in check_ref_cleanup, but
> > in unselect_delayed_ref_head we set it to 0 and increment it.
> > 
> >> +}
> >> +
> >>  /*
> >>   * Helper to insert the ref_node to the tail or merge with tail.
> >>   *
> >> diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
> >> index 8e20c5cb5404..d2af974f68a1 100644
> >> --- a/fs/btrfs/delayed-ref.h
> >> +++ b/fs/btrfs/delayed-ref.h
> >> @@ -261,7 +261,8 @@ static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
> >>  {
> >>  	mutex_unlock(&head->mutex);
> >>  }
> >> -
> >> +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
> >> +			   struct btrfs_delayed_ref_head *head);
> >>  
> >>  struct btrfs_delayed_ref_head *btrfs_select_ref_head(
> >>  		struct btrfs_delayed_ref_root *delayed_refs);
> >> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
> >> index d242a1174e50..c36b3a42f2bb 100644
> >> --- a/fs/btrfs/extent-tree.c
> >> +++ b/fs/btrfs/extent-tree.c
> >> @@ -2474,12 +2474,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
> >>  		spin_unlock(&delayed_refs->lock);
> >>  		return 1;
> >>  	}
> >> -	delayed_refs->num_heads--;
> >> -	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
> >> -	RB_CLEAR_NODE(&head->href_node);
> >> +	btrfs_delete_ref_head(delayed_refs, head);
> >>  	spin_unlock(&head->lock);
> >>  	spin_unlock(&delayed_refs->lock);
> >> -	atomic_dec(&delayed_refs->num_entries);
> >>  
> >>  	trace_run_delayed_ref_head(fs_info, head, 0);
> >>  
> >> @@ -6984,22 +6981,9 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
> >>  	if (!mutex_trylock(&head->mutex))
> >>  		goto out;
> >>  
> >> -	/*
> >> -	 * at this point we have a head with no other entries.  Go
> >> -	 * ahead and process it.
> >> -	 */
> >> -	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
> >> -	RB_CLEAR_NODE(&head->href_node);
> >> -	atomic_dec(&delayed_refs->num_entries);
> >> -
> >> -	/*
> >> -	 * we don't take a ref on the node because we're removing it from the
> >> -	 * tree, so we just steal the ref the tree was holding.
> >> -	 */
> >> -	delayed_refs->num_heads--;
> >> -	if (head->processing == 0)
> >> -		delayed_refs->num_heads_ready--;
> >> +	btrfs_delete_ref_head(delayed_refs, head);
> >>  	head->processing = 0;
> 
> On a closer inspection I think here we can do:
> 
> ASSERT(head->processing == 0) because at that point we've taken the
> head->lock spinlock which is held during ordinary delayed refs
> processing (in __btrfs_run_delayed_refs) when the head is selected (and
> processing is 1). So head->processing == 0 here I think is a hard
> invariant of the code. The decrement here should pair with the increment
> when the head was initially added to the tree.
> 
> In cleanup_ref_head we don't need to ever worry about num_heads_ready
> since it has already been decremented by btrfs_select_ref_head.
> 
> As a matter fact this counter is not used anywhere so we might as well
> just remove it.

The logic does not use it, there's only a WARN_ON in
btrfs_select_ref_head, that's more like a debugging or assertion that
everything is fine. So the question is whether to keep it as a
consistency check (and add comments) or remove it and simplify the code.
Nikolay Borisov Nov. 23, 2018, 1:50 p.m. UTC | #4
On 23.11.18 г. 15:45 ч., David Sterba wrote:
> On Thu, Nov 22, 2018 at 11:42:28AM +0200, Nikolay Borisov wrote:
>>
>>
>> On 22.11.18 г. 11:12 ч., Nikolay Borisov wrote:
>>>
>>>
>>> On 21.11.18 г. 20:59 ч., Josef Bacik wrote:
>>>> From: Josef Bacik <jbacik@fb.com>
>>>>
>>>> We do this dance in cleanup_ref_head and check_ref_cleanup, unify it
>>>> into a helper and cleanup the calling functions.
>>>>
>>>> Signed-off-by: Josef Bacik <jbacik@fb.com>
>>>> Reviewed-by: Omar Sandoval <osandov@fb.com>
>>>> ---
>>>>  fs/btrfs/delayed-ref.c | 14 ++++++++++++++
>>>>  fs/btrfs/delayed-ref.h |  3 ++-
>>>>  fs/btrfs/extent-tree.c | 22 +++-------------------
>>>>  3 files changed, 19 insertions(+), 20 deletions(-)
>>>>
>>>> diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
>>>> index 9301b3ad9217..b3e4c9fcb664 100644
>>>> --- a/fs/btrfs/delayed-ref.c
>>>> +++ b/fs/btrfs/delayed-ref.c
>>>> @@ -400,6 +400,20 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
>>>>  	return head;
>>>>  }
>>>>  
>>>> +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
>>>> +			   struct btrfs_delayed_ref_head *head)
>>>> +{
>>>> +	lockdep_assert_held(&delayed_refs->lock);
>>>> +	lockdep_assert_held(&head->lock);
>>>> +
>>>> +	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
>>>> +	RB_CLEAR_NODE(&head->href_node);
>>>> +	atomic_dec(&delayed_refs->num_entries);
>>>> +	delayed_refs->num_heads--;
>>>> +	if (head->processing == 0)
>>>> +		delayed_refs->num_heads_ready--;
>>>
>>> num_heads_ready will never execute in cleanup_ref_head, since
>>> processing == 0 only when the ref head is unselected. Perhaps those 2
>>> lines shouldn't be in this function? I find it a bit confusing that if
>>> processing is 0 we decrement num_heads_ready in check_ref_cleanup, but
>>> in unselect_delayed_ref_head we set it to 0 and increment it.
>>>
>>>> +}
>>>> +
>>>>  /*
>>>>   * Helper to insert the ref_node to the tail or merge with tail.
>>>>   *
>>>> diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
>>>> index 8e20c5cb5404..d2af974f68a1 100644
>>>> --- a/fs/btrfs/delayed-ref.h
>>>> +++ b/fs/btrfs/delayed-ref.h
>>>> @@ -261,7 +261,8 @@ static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
>>>>  {
>>>>  	mutex_unlock(&head->mutex);
>>>>  }
>>>> -
>>>> +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
>>>> +			   struct btrfs_delayed_ref_head *head);
>>>>  
>>>>  struct btrfs_delayed_ref_head *btrfs_select_ref_head(
>>>>  		struct btrfs_delayed_ref_root *delayed_refs);
>>>> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
>>>> index d242a1174e50..c36b3a42f2bb 100644
>>>> --- a/fs/btrfs/extent-tree.c
>>>> +++ b/fs/btrfs/extent-tree.c
>>>> @@ -2474,12 +2474,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
>>>>  		spin_unlock(&delayed_refs->lock);
>>>>  		return 1;
>>>>  	}
>>>> -	delayed_refs->num_heads--;
>>>> -	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
>>>> -	RB_CLEAR_NODE(&head->href_node);
>>>> +	btrfs_delete_ref_head(delayed_refs, head);
>>>>  	spin_unlock(&head->lock);
>>>>  	spin_unlock(&delayed_refs->lock);
>>>> -	atomic_dec(&delayed_refs->num_entries);
>>>>  
>>>>  	trace_run_delayed_ref_head(fs_info, head, 0);
>>>>  
>>>> @@ -6984,22 +6981,9 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
>>>>  	if (!mutex_trylock(&head->mutex))
>>>>  		goto out;
>>>>  
>>>> -	/*
>>>> -	 * at this point we have a head with no other entries.  Go
>>>> -	 * ahead and process it.
>>>> -	 */
>>>> -	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
>>>> -	RB_CLEAR_NODE(&head->href_node);
>>>> -	atomic_dec(&delayed_refs->num_entries);
>>>> -
>>>> -	/*
>>>> -	 * we don't take a ref on the node because we're removing it from the
>>>> -	 * tree, so we just steal the ref the tree was holding.
>>>> -	 */
>>>> -	delayed_refs->num_heads--;
>>>> -	if (head->processing == 0)
>>>> -		delayed_refs->num_heads_ready--;
>>>> +	btrfs_delete_ref_head(delayed_refs, head);
>>>>  	head->processing = 0;
>>
>> On a closer inspection I think here we can do:
>>
>> ASSERT(head->processing == 0) because at that point we've taken the
>> head->lock spinlock which is held during ordinary delayed refs
>> processing (in __btrfs_run_delayed_refs) when the head is selected (and
>> processing is 1). So head->processing == 0 here I think is a hard
>> invariant of the code. The decrement here should pair with the increment
>> when the head was initially added to the tree.
>>
>> In cleanup_ref_head we don't need to ever worry about num_heads_ready
>> since it has already been decremented by btrfs_select_ref_head.
>>
>> As a matter fact this counter is not used anywhere so we might as well
>> just remove it.
> 
> The logic does not use it, there's only a WARN_ON in
> btrfs_select_ref_head, that's more like a debugging or assertion that
> everything is fine. So the question is whether to keep it as a
> consistency check (and add comments) or remove it and simplify the code.

IMO it should go. A later patch pretty much tracks what this number used
to to track - btrfs: only track ref_heads in delayed_ref_updates.

Even for consistency I don't see much value brought by num_heads_ready.

>
diff mbox series

Patch

diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 9301b3ad9217..b3e4c9fcb664 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -400,6 +400,20 @@  struct btrfs_delayed_ref_head *btrfs_select_ref_head(
 	return head;
 }
 
+void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+			   struct btrfs_delayed_ref_head *head)
+{
+	lockdep_assert_held(&delayed_refs->lock);
+	lockdep_assert_held(&head->lock);
+
+	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
+	RB_CLEAR_NODE(&head->href_node);
+	atomic_dec(&delayed_refs->num_entries);
+	delayed_refs->num_heads--;
+	if (head->processing == 0)
+		delayed_refs->num_heads_ready--;
+}
+
 /*
  * Helper to insert the ref_node to the tail or merge with tail.
  *
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 8e20c5cb5404..d2af974f68a1 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -261,7 +261,8 @@  static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
 {
 	mutex_unlock(&head->mutex);
 }
-
+void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+			   struct btrfs_delayed_ref_head *head);
 
 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
 		struct btrfs_delayed_ref_root *delayed_refs);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d242a1174e50..c36b3a42f2bb 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2474,12 +2474,9 @@  static int cleanup_ref_head(struct btrfs_trans_handle *trans,
 		spin_unlock(&delayed_refs->lock);
 		return 1;
 	}
-	delayed_refs->num_heads--;
-	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
-	RB_CLEAR_NODE(&head->href_node);
+	btrfs_delete_ref_head(delayed_refs, head);
 	spin_unlock(&head->lock);
 	spin_unlock(&delayed_refs->lock);
-	atomic_dec(&delayed_refs->num_entries);
 
 	trace_run_delayed_ref_head(fs_info, head, 0);
 
@@ -6984,22 +6981,9 @@  static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
 	if (!mutex_trylock(&head->mutex))
 		goto out;
 
-	/*
-	 * at this point we have a head with no other entries.  Go
-	 * ahead and process it.
-	 */
-	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
-	RB_CLEAR_NODE(&head->href_node);
-	atomic_dec(&delayed_refs->num_entries);
-
-	/*
-	 * we don't take a ref on the node because we're removing it from the
-	 * tree, so we just steal the ref the tree was holding.
-	 */
-	delayed_refs->num_heads--;
-	if (head->processing == 0)
-		delayed_refs->num_heads_ready--;
+	btrfs_delete_ref_head(delayed_refs, head);
 	head->processing = 0;
+
 	spin_unlock(&head->lock);
 	spin_unlock(&delayed_refs->lock);