diff mbox series

[01/11] btrfs: Perform pinned cleanup directly in btrfs_destroy_delayed_refs

Message ID 20200120140918.15647-2-nborisov@suse.com (mailing list archive)
State New, archived
Headers show
Series Make pinned extents tracking per-transaction | expand

Commit Message

Nikolay Borisov Jan. 20, 2020, 2:09 p.m. UTC
Having btrfs_destroy_delayed_refs call btrfs_pin_extent is problematic
for making pinned extents tracking per-transaction since
btrfs_trans_handle cannot be passed to btrfs_pin_extent in this context.
Additionally delayed refs heads pinned in btrfs_destroy_delayed_refs
are going to be handled very closely, in btrfs_destroy_pinned_extent.

To enable btrfs_pin_extent to take btrfs_trans_handle simply open code
it in btrfs_destroy_delayed_refs and call btrfs_error_unpin_extent_range
on the range. This enables us to do less work in
btrfs_destroy_pinned_extent and leaves btrfs_pin_extent being called in
contexts which have a valid btrfs_trans_handle.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
---
 fs/btrfs/disk-io.c | 26 +++++++++++++++++++++++---
 1 file changed, 23 insertions(+), 3 deletions(-)

Comments

Josef Bacik Jan. 21, 2020, 2:22 p.m. UTC | #1
On 1/20/20 9:09 AM, Nikolay Borisov wrote:
> Having btrfs_destroy_delayed_refs call btrfs_pin_extent is problematic
> for making pinned extents tracking per-transaction since
> btrfs_trans_handle cannot be passed to btrfs_pin_extent in this context.
> Additionally delayed refs heads pinned in btrfs_destroy_delayed_refs
> are going to be handled very closely, in btrfs_destroy_pinned_extent.
> 
> To enable btrfs_pin_extent to take btrfs_trans_handle simply open code
> it in btrfs_destroy_delayed_refs and call btrfs_error_unpin_extent_range
> on the range. This enables us to do less work in
> btrfs_destroy_pinned_extent and leaves btrfs_pin_extent being called in
> contexts which have a valid btrfs_trans_handle.
> 
> Signed-off-by: Nikolay Borisov <nborisov@suse.com>

Reviewed-by: Josef Bacik <josef@toxicpanda.com>

Thanks,

Josef
David Sterba Jan. 30, 2020, 1:51 p.m. UTC | #2
On Mon, Jan 20, 2020 at 04:09:08PM +0200, Nikolay Borisov wrote:
> Having btrfs_destroy_delayed_refs call btrfs_pin_extent is problematic
> for making pinned extents tracking per-transaction since
> btrfs_trans_handle cannot be passed to btrfs_pin_extent in this context.
> Additionally delayed refs heads pinned in btrfs_destroy_delayed_refs
> are going to be handled very closely, in btrfs_destroy_pinned_extent.
> 
> To enable btrfs_pin_extent to take btrfs_trans_handle simply open code
> it in btrfs_destroy_delayed_refs and call btrfs_error_unpin_extent_range
> on the range. This enables us to do less work in
> btrfs_destroy_pinned_extent and leaves btrfs_pin_extent being called in
> contexts which have a valid btrfs_trans_handle.
> 
> Signed-off-by: Nikolay Borisov <nborisov@suse.com>
> ---
>  fs/btrfs/disk-io.c | 26 +++++++++++++++++++++++---
>  1 file changed, 23 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> index 5ce2801f8388..9209c7b0997c 100644
> --- a/fs/btrfs/disk-io.c
> +++ b/fs/btrfs/disk-io.c
> @@ -42,6 +42,7 @@
>  #include "ref-verify.h"
>  #include "block-group.h"
>  #include "discard.h"
> +#include "space-info.h"
>  
>  #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
>  				 BTRFS_HEADER_FLAG_RELOC |\
> @@ -4261,9 +4262,28 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
>  		spin_unlock(&delayed_refs->lock);
>  		mutex_unlock(&head->mutex);
>  
> -		if (pin_bytes)
> -			btrfs_pin_extent(fs_info, head->bytenr,
> -					 head->num_bytes, 1);
> +		if (pin_bytes) {
> +			struct btrfs_block_group *cache;
> +			cache = btrfs_lookup_block_group(fs_info, head->bytenr);
> +			BUG_ON(!cache);

So this BUG_ON is propagated from btrfs_pin_extent but not turned into
proper error handling in any of the followup patches.

> +
> +			spin_lock(&cache->space_info->lock);
> +			spin_lock(&cache->lock);
> +			cache->pinned += head->num_bytes;
> +			btrfs_space_info_update_bytes_pinned(fs_info,
> +					cache->space_info, head->num_bytes);
> +			cache->reserved -= head->num_bytes;
> +			cache->space_info->bytes_reserved -= head->num_bytes;
> +			spin_unlock(&cache->lock);
> +			spin_unlock(&cache->space_info->lock);
> +			percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
> +				    head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
> +
> +			btrfs_put_block_group(cache);
> +
> +			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
> +						       head->bytenr + head->num_bytes - 1);

This should also handle errors

> +		}
>  		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
>  		btrfs_put_delayed_ref_head(head);
>  		cond_resched();
> -- 
> 2.17.1
Nikolay Borisov Feb. 5, 2020, 8:32 a.m. UTC | #3
On 30.01.20 г. 15:51 ч., David Sterba wrote:
> On Mon, Jan 20, 2020 at 04:09:08PM +0200, Nikolay Borisov wrote:
>> Having btrfs_destroy_delayed_refs call btrfs_pin_extent is problematic
>> for making pinned extents tracking per-transaction since
>> btrfs_trans_handle cannot be passed to btrfs_pin_extent in this context.
>> Additionally delayed refs heads pinned in btrfs_destroy_delayed_refs
>> are going to be handled very closely, in btrfs_destroy_pinned_extent.
>>
>> To enable btrfs_pin_extent to take btrfs_trans_handle simply open code
>> it in btrfs_destroy_delayed_refs and call btrfs_error_unpin_extent_range
>> on the range. This enables us to do less work in
>> btrfs_destroy_pinned_extent and leaves btrfs_pin_extent being called in
>> contexts which have a valid btrfs_trans_handle.
>>
>> Signed-off-by: Nikolay Borisov <nborisov@suse.com>
>> ---
>>  fs/btrfs/disk-io.c | 26 +++++++++++++++++++++++---
>>  1 file changed, 23 insertions(+), 3 deletions(-)
>>
>> diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
>> index 5ce2801f8388..9209c7b0997c 100644
>> --- a/fs/btrfs/disk-io.c
>> +++ b/fs/btrfs/disk-io.c
>> @@ -42,6 +42,7 @@
>>  #include "ref-verify.h"
>>  #include "block-group.h"
>>  #include "discard.h"
>> +#include "space-info.h"
>>  
>>  #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
>>  				 BTRFS_HEADER_FLAG_RELOC |\
>> @@ -4261,9 +4262,28 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
>>  		spin_unlock(&delayed_refs->lock);
>>  		mutex_unlock(&head->mutex);
>>  
>> -		if (pin_bytes)
>> -			btrfs_pin_extent(fs_info, head->bytenr,
>> -					 head->num_bytes, 1);
>> +		if (pin_bytes) {
>> +			struct btrfs_block_group *cache;
>> +			cache = btrfs_lookup_block_group(fs_info, head->bytenr);
>> +			BUG_ON(!cache);
> 
> So this BUG_ON is propagated from btrfs_pin_extent but not turned into
> proper error handling in any of the followup patches.

This BUGON should be there or it could be turned into an ASSERT. It's
used to catch racing block group freeing i.e if it triggers it's a
logical error, a real bug.

> 

<snip>

>> +			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
>> +						       head->bytenr + head->num_bytes - 1);
> 
> This should also handle errors

Turns out unpin_extent_range cane just return void, since it cannot fail
so there's nothing to handle.

> 
>> +		}
>>  		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
>>  		btrfs_put_delayed_ref_head(head);
>>  		cond_resched();
>> -- 
>> 2.17.1
diff mbox series

Patch

diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5ce2801f8388..9209c7b0997c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -42,6 +42,7 @@ 
 #include "ref-verify.h"
 #include "block-group.h"
 #include "discard.h"
+#include "space-info.h"
 
 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
 				 BTRFS_HEADER_FLAG_RELOC |\
@@ -4261,9 +4262,28 @@  static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 		spin_unlock(&delayed_refs->lock);
 		mutex_unlock(&head->mutex);
 
-		if (pin_bytes)
-			btrfs_pin_extent(fs_info, head->bytenr,
-					 head->num_bytes, 1);
+		if (pin_bytes) {
+			struct btrfs_block_group *cache;
+			cache = btrfs_lookup_block_group(fs_info, head->bytenr);
+			BUG_ON(!cache);
+
+			spin_lock(&cache->space_info->lock);
+			spin_lock(&cache->lock);
+			cache->pinned += head->num_bytes;
+			btrfs_space_info_update_bytes_pinned(fs_info,
+					cache->space_info, head->num_bytes);
+			cache->reserved -= head->num_bytes;
+			cache->space_info->bytes_reserved -= head->num_bytes;
+			spin_unlock(&cache->lock);
+			spin_unlock(&cache->space_info->lock);
+			percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
+				    head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
+
+			btrfs_put_block_group(cache);
+
+			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
+						       head->bytenr + head->num_bytes - 1);
+		}
 		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
 		btrfs_put_delayed_ref_head(head);
 		cond_resched();