diff mbox series

[6/6] btrfs: fix truncate throttling

Message ID 20181121185912.24288-7-josef@toxicpanda.com (mailing list archive)
State New, archived
Headers show
Series Delayed refs rsv | expand

Commit Message

Josef Bacik Nov. 21, 2018, 6:59 p.m. UTC
We have a bunch of magic to make sure we're throttling delayed refs when
truncating a file.  Now that we have a delayed refs rsv and a mechanism
for refilling that reserve simply use that instead of all of this magic.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
---
 fs/btrfs/inode.c | 79 ++++++++++++--------------------------------------------
 1 file changed, 17 insertions(+), 62 deletions(-)

Comments

Nikolay Borisov Nov. 26, 2018, 9:44 a.m. UTC | #1
On 21.11.18 г. 20:59 ч., Josef Bacik wrote:
> We have a bunch of magic to make sure we're throttling delayed refs when
> truncating a file.  Now that we have a delayed refs rsv and a mechanism
> for refilling that reserve simply use that instead of all of this magic.
> 
> Signed-off-by: Josef Bacik <josef@toxicpanda.com>

Reviewed-by: Nikolay Borisov <nborisov@suse.com>

> ---
>  fs/btrfs/inode.c | 79 ++++++++++++--------------------------------------------
>  1 file changed, 17 insertions(+), 62 deletions(-)
> 
> diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
> index 8532a2eb56d1..cae30f6c095f 100644
> --- a/fs/btrfs/inode.c
> +++ b/fs/btrfs/inode.c
> @@ -4437,31 +4437,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
>  	return err;
>  }
>  
> -static int truncate_space_check(struct btrfs_trans_handle *trans,
> -				struct btrfs_root *root,
> -				u64 bytes_deleted)
> -{
> -	struct btrfs_fs_info *fs_info = root->fs_info;
> -	int ret;
> -
> -	/*
> -	 * This is only used to apply pressure to the enospc system, we don't
> -	 * intend to use this reservation at all.
> -	 */
> -	bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
> -	bytes_deleted *= fs_info->nodesize;
> -	ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
> -				  bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
> -	if (!ret) {
> -		trace_btrfs_space_reservation(fs_info, "transaction",
> -					      trans->transid,
> -					      bytes_deleted, 1);
> -		trans->bytes_reserved += bytes_deleted;
> -	}
> -	return ret;
> -
> -}
> -
>  /*
>   * Return this if we need to call truncate_block for the last bit of the
>   * truncate.
> @@ -4506,7 +4481,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
>  	u64 bytes_deleted = 0;
>  	bool be_nice = false;
>  	bool should_throttle = false;
> -	bool should_end = false;
>  
>  	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
>  
> @@ -4719,15 +4693,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
>  				btrfs_abort_transaction(trans, ret);
>  				break;
>  			}
> -			if (btrfs_should_throttle_delayed_refs(trans))
> -				btrfs_async_run_delayed_refs(fs_info,
> -					trans->delayed_ref_updates * 2,
> -					trans->transid, 0);
>  			if (be_nice) {
> -				if (truncate_space_check(trans, root,
> -							 extent_num_bytes)) {
> -					should_end = true;
> -				}
>  				if (btrfs_should_throttle_delayed_refs(trans))
>  					should_throttle = true;
>  			}
> @@ -4738,7 +4704,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
>  
>  		if (path->slots[0] == 0 ||
>  		    path->slots[0] != pending_del_slot ||
> -		    should_throttle || should_end) {
> +		    should_throttle) {
>  			if (pending_del_nr) {
>  				ret = btrfs_del_items(trans, root, path,
>  						pending_del_slot,
> @@ -4750,23 +4716,24 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
>  				pending_del_nr = 0;
>  			}
>  			btrfs_release_path(path);
> -			if (should_throttle) {
> -				unsigned long updates = trans->delayed_ref_updates;
> -				if (updates) {
> -					trans->delayed_ref_updates = 0;
> -					ret = btrfs_run_delayed_refs(trans,
> -								   updates * 2);
> -					if (ret)
> -						break;
> -				}
> -			}
> +
>  			/*
> -			 * if we failed to refill our space rsv, bail out
> -			 * and let the transaction restart
> +			 * We can generate a lot of delayed refs, so we need to
> +			 * throttle every once and a while and make sure we're
> +			 * adding enough space to keep up with the work we are
> +			 * generating.  Since we hold a transaction here we
> +			 * can't flush, and we don't want to FLUSH_LIMIT because
> +			 * we could have generated too many delayed refs to
> +			 * actually allocate, so just bail if we're short and
> +			 * let the normal reservation dance happen higher up.
>  			 */
> -			if (should_end) {
> -				ret = -EAGAIN;
> -				break;
> +			if (should_throttle) {
> +				ret = btrfs_throttle_delayed_refs(fs_info,
> +							BTRFS_RESERVE_NO_FLUSH);
> +				if (ret) {
> +					ret = -EAGAIN;
> +					break;
> +				}
>  			}
>  			goto search_again;
>  		} else {
> @@ -4792,18 +4759,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
>  	}
>  
>  	btrfs_free_path(path);
> -
> -	if (be_nice && bytes_deleted > SZ_32M && (ret >= 0 || ret == -EAGAIN)) {
> -		unsigned long updates = trans->delayed_ref_updates;
> -		int err;
> -
> -		if (updates) {
> -			trans->delayed_ref_updates = 0;
> -			err = btrfs_run_delayed_refs(trans, updates * 2);
> -			if (err)
> -				ret = err;
> -		}
> -	}
>  	return ret;
>  }
>  
>
diff mbox series

Patch

diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8532a2eb56d1..cae30f6c095f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4437,31 +4437,6 @@  static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
 	return err;
 }
 
-static int truncate_space_check(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
-				u64 bytes_deleted)
-{
-	struct btrfs_fs_info *fs_info = root->fs_info;
-	int ret;
-
-	/*
-	 * This is only used to apply pressure to the enospc system, we don't
-	 * intend to use this reservation at all.
-	 */
-	bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
-	bytes_deleted *= fs_info->nodesize;
-	ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
-				  bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
-	if (!ret) {
-		trace_btrfs_space_reservation(fs_info, "transaction",
-					      trans->transid,
-					      bytes_deleted, 1);
-		trans->bytes_reserved += bytes_deleted;
-	}
-	return ret;
-
-}
-
 /*
  * Return this if we need to call truncate_block for the last bit of the
  * truncate.
@@ -4506,7 +4481,6 @@  int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 	u64 bytes_deleted = 0;
 	bool be_nice = false;
 	bool should_throttle = false;
-	bool should_end = false;
 
 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
 
@@ -4719,15 +4693,7 @@  int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 				btrfs_abort_transaction(trans, ret);
 				break;
 			}
-			if (btrfs_should_throttle_delayed_refs(trans))
-				btrfs_async_run_delayed_refs(fs_info,
-					trans->delayed_ref_updates * 2,
-					trans->transid, 0);
 			if (be_nice) {
-				if (truncate_space_check(trans, root,
-							 extent_num_bytes)) {
-					should_end = true;
-				}
 				if (btrfs_should_throttle_delayed_refs(trans))
 					should_throttle = true;
 			}
@@ -4738,7 +4704,7 @@  int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 
 		if (path->slots[0] == 0 ||
 		    path->slots[0] != pending_del_slot ||
-		    should_throttle || should_end) {
+		    should_throttle) {
 			if (pending_del_nr) {
 				ret = btrfs_del_items(trans, root, path,
 						pending_del_slot,
@@ -4750,23 +4716,24 @@  int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 				pending_del_nr = 0;
 			}
 			btrfs_release_path(path);
-			if (should_throttle) {
-				unsigned long updates = trans->delayed_ref_updates;
-				if (updates) {
-					trans->delayed_ref_updates = 0;
-					ret = btrfs_run_delayed_refs(trans,
-								   updates * 2);
-					if (ret)
-						break;
-				}
-			}
+
 			/*
-			 * if we failed to refill our space rsv, bail out
-			 * and let the transaction restart
+			 * We can generate a lot of delayed refs, so we need to
+			 * throttle every once and a while and make sure we're
+			 * adding enough space to keep up with the work we are
+			 * generating.  Since we hold a transaction here we
+			 * can't flush, and we don't want to FLUSH_LIMIT because
+			 * we could have generated too many delayed refs to
+			 * actually allocate, so just bail if we're short and
+			 * let the normal reservation dance happen higher up.
 			 */
-			if (should_end) {
-				ret = -EAGAIN;
-				break;
+			if (should_throttle) {
+				ret = btrfs_throttle_delayed_refs(fs_info,
+							BTRFS_RESERVE_NO_FLUSH);
+				if (ret) {
+					ret = -EAGAIN;
+					break;
+				}
 			}
 			goto search_again;
 		} else {
@@ -4792,18 +4759,6 @@  int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 	}
 
 	btrfs_free_path(path);
-
-	if (be_nice && bytes_deleted > SZ_32M && (ret >= 0 || ret == -EAGAIN)) {
-		unsigned long updates = trans->delayed_ref_updates;
-		int err;
-
-		if (updates) {
-			trans->delayed_ref_updates = 0;
-			err = btrfs_run_delayed_refs(trans, updates * 2);
-			if (err)
-				ret = err;
-		}
-	}
 	return ret;
 }