[15/22] btrfs: limit max discard size for async discard
diff mbox series

Message ID 396e2d043068b620574892ebfc4b9f5c77b41618.1576195673.git.dennis@kernel.org
State New
Headers show
Series
  • btrfs: async discard support
Related show

Commit Message

Dennis Zhou Dec. 14, 2019, 12:22 a.m. UTC
Throttle the maximum size of a discard so that we can provide an upper
bound for the rate of async discard. While the block layer is able to
split discards into the appropriate sized discards, we want to be able
to account more accurately the rate at which we are consuming ncq slots
as well as limit the upper bound of work for a discard.

Signed-off-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
---
 fs/btrfs/discard.h          |  5 ++++
 fs/btrfs/free-space-cache.c | 48 +++++++++++++++++++++++++++----------
 2 files changed, 41 insertions(+), 12 deletions(-)

Comments

David Sterba Dec. 30, 2019, 6 p.m. UTC | #1
On Fri, Dec 13, 2019 at 04:22:24PM -0800, Dennis Zhou wrote:
> Throttle the maximum size of a discard so that we can provide an upper
> bound for the rate of async discard. While the block layer is able to
> split discards into the appropriate sized discards, we want to be able
> to account more accurately the rate at which we are consuming ncq slots
> as well as limit the upper bound of work for a discard.
> 
> Signed-off-by: Dennis Zhou <dennis@kernel.org>
> Reviewed-by: Josef Bacik <josef@toxicpanda.com>
> ---
>  fs/btrfs/discard.h          |  5 ++++
>  fs/btrfs/free-space-cache.c | 48 +++++++++++++++++++++++++++----------
>  2 files changed, 41 insertions(+), 12 deletions(-)
> 
> diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
> index 3ed6855e24da..cb6ef0ab879d 100644
> --- a/fs/btrfs/discard.h
> +++ b/fs/btrfs/discard.h
> @@ -3,10 +3,15 @@
>  #ifndef BTRFS_DISCARD_H
>  #define BTRFS_DISCARD_H
>  
> +#include <linux/sizes.h>
> +
>  struct btrfs_fs_info;
>  struct btrfs_discard_ctl;
>  struct btrfs_block_group;
>  
> +/* Discard size limits. */
> +#define BTRFS_ASYNC_DISCARD_MAX_SIZE	(SZ_64M)
> +
>  /* Work operations. */
>  void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
>  			       struct btrfs_block_group *block_group);
> diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
> index 57df34480b93..0dbcea6c59f9 100644
> --- a/fs/btrfs/free-space-cache.c
> +++ b/fs/btrfs/free-space-cache.c
> @@ -3466,19 +3466,40 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group,
>  		if (entry->offset >= end)
>  			goto out_unlock;
>  
> -		extent_start = entry->offset;
> -		extent_bytes = entry->bytes;
> -		extent_trim_state = entry->trim_state;
> -		start = max(start, extent_start);
> -		bytes = min(extent_start + extent_bytes, end) - start;
> -		if (bytes < minlen) {
> -			spin_unlock(&ctl->tree_lock);
> -			mutex_unlock(&ctl->cache_writeout_mutex);
> -			goto next;
> -		}
> +		if (async) {
> +			start = extent_start = entry->offset;
> +			bytes = extent_bytes = entry->bytes;

Please avoid chain initializations, I'll fix that unless there are more
updates to this patch.

> +			extent_trim_state = entry->trim_state;
> +			if (bytes < minlen) {
> +				spin_unlock(&ctl->tree_lock);
> +				mutex_unlock(&ctl->cache_writeout_mutex);
> +				goto next;
> +			}
> +			unlink_free_space(ctl, entry);
> +			if (bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE) {
> +				bytes = extent_bytes =
> +					BTRFS_ASYNC_DISCARD_MAX_SIZE;
> +				entry->offset += BTRFS_ASYNC_DISCARD_MAX_SIZE;
> +				entry->bytes -= BTRFS_ASYNC_DISCARD_MAX_SIZE;
> +				link_free_space(ctl, entry);
> +			} else {
> +				kmem_cache_free(btrfs_free_space_cachep, entry);
> +			}
> +		} else {
> +			extent_start = entry->offset;
> +			extent_bytes = entry->bytes;
> +			extent_trim_state = entry->trim_state;
> +			start = max(start, extent_start);
> +			bytes = min(extent_start + extent_bytes, end) - start;
> +			if (bytes < minlen) {
> +				spin_unlock(&ctl->tree_lock);
> +				mutex_unlock(&ctl->cache_writeout_mutex);
> +				goto next;
> +			}
>  
> -		unlink_free_space(ctl, entry);
> -		kmem_cache_free(btrfs_free_space_cachep, entry);
> +			unlink_free_space(ctl, entry);
> +			kmem_cache_free(btrfs_free_space_cachep, entry);
> +		}
>  
>  		spin_unlock(&ctl->tree_lock);
>  		trim_entry.start = extent_start;
> @@ -3643,6 +3664,9 @@ static int trim_bitmaps(struct btrfs_block_group *block_group,
>  			goto next;
>  		}
>  
> +		if (async && bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE)
> +			bytes = BTRFS_ASYNC_DISCARD_MAX_SIZE;
> +
>  		bitmap_clear_bits(ctl, entry, start, bytes);
>  		if (entry->bytes == 0)
>  			free_bitmap(ctl, entry);
> -- 
> 2.17.1
David Sterba Dec. 30, 2019, 6:08 p.m. UTC | #2
On Fri, Dec 13, 2019 at 04:22:24PM -0800, Dennis Zhou wrote:
> Throttle the maximum size of a discard so that we can provide an upper
> bound for the rate of async discard. While the block layer is able to
> split discards into the appropriate sized discards, we want to be able
> to account more accurately the rate at which we are consuming ncq slots
> as well as limit the upper bound of work for a discard.
> 
> Signed-off-by: Dennis Zhou <dennis@kernel.org>
> Reviewed-by: Josef Bacik <josef@toxicpanda.com>
> ---
>  fs/btrfs/discard.h          |  5 ++++
>  fs/btrfs/free-space-cache.c | 48 +++++++++++++++++++++++++++----------
>  2 files changed, 41 insertions(+), 12 deletions(-)
> 
> diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
> index 3ed6855e24da..cb6ef0ab879d 100644
> --- a/fs/btrfs/discard.h
> +++ b/fs/btrfs/discard.h
> @@ -3,10 +3,15 @@
>  #ifndef BTRFS_DISCARD_H
>  #define BTRFS_DISCARD_H
>  
> +#include <linux/sizes.h>
> +
>  struct btrfs_fs_info;
>  struct btrfs_discard_ctl;
>  struct btrfs_block_group;
>  
> +/* Discard size limits. */
> +#define BTRFS_ASYNC_DISCARD_MAX_SIZE	(SZ_64M)
> +
>  /* Work operations. */
>  void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
>  			       struct btrfs_block_group *block_group);
> diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
> index 57df34480b93..0dbcea6c59f9 100644
> --- a/fs/btrfs/free-space-cache.c
> +++ b/fs/btrfs/free-space-cache.c
> @@ -3466,19 +3466,40 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group,
>  		if (entry->offset >= end)
>  			goto out_unlock;
>  
> -		extent_start = entry->offset;
> -		extent_bytes = entry->bytes;
> -		extent_trim_state = entry->trim_state;
> -		start = max(start, extent_start);
> -		bytes = min(extent_start + extent_bytes, end) - start;
> -		if (bytes < minlen) {
> -			spin_unlock(&ctl->tree_lock);
> -			mutex_unlock(&ctl->cache_writeout_mutex);
> -			goto next;
> -		}
> +		if (async) {
> +			start = extent_start = entry->offset;
> +			bytes = extent_bytes = entry->bytes;
> +			extent_trim_state = entry->trim_state;
> +			if (bytes < minlen) {
> +				spin_unlock(&ctl->tree_lock);
> +				mutex_unlock(&ctl->cache_writeout_mutex);
> +				goto next;
> +			}
> +			unlink_free_space(ctl, entry);
> +			if (bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE) {
> +				bytes = extent_bytes =
> +					BTRFS_ASYNC_DISCARD_MAX_SIZE;
> +				entry->offset += BTRFS_ASYNC_DISCARD_MAX_SIZE;
> +				entry->bytes -= BTRFS_ASYNC_DISCARD_MAX_SIZE;
> +				link_free_space(ctl, entry);
> +			} else {
> +				kmem_cache_free(btrfs_free_space_cachep, entry);
> +			}
> +		} else {

> +			extent_start = entry->offset;
> +			extent_bytes = entry->bytes;
> +			extent_trim_state = entry->trim_state;

This is common initialization for both async and sync cases so it could
be merged to a common block.

> +			start = max(start, extent_start);
> +			bytes = min(extent_start + extent_bytes, end) - start;
> +			if (bytes < minlen) {
> +				spin_unlock(&ctl->tree_lock);
> +				mutex_unlock(&ctl->cache_writeout_mutex);
> +				goto next;
> +			}
Dennis Zhou Jan. 2, 2020, 4:48 p.m. UTC | #3
On Mon, Dec 30, 2019 at 07:08:03PM +0100, David Sterba wrote:
> On Fri, Dec 13, 2019 at 04:22:24PM -0800, Dennis Zhou wrote:
> > Throttle the maximum size of a discard so that we can provide an upper
> > bound for the rate of async discard. While the block layer is able to
> > split discards into the appropriate sized discards, we want to be able
> > to account more accurately the rate at which we are consuming ncq slots
> > as well as limit the upper bound of work for a discard.
> > 
> > Signed-off-by: Dennis Zhou <dennis@kernel.org>
> > Reviewed-by: Josef Bacik <josef@toxicpanda.com>
> > ---
> >  fs/btrfs/discard.h          |  5 ++++
> >  fs/btrfs/free-space-cache.c | 48 +++++++++++++++++++++++++++----------
> >  2 files changed, 41 insertions(+), 12 deletions(-)
> > 
> > diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
> > index 3ed6855e24da..cb6ef0ab879d 100644
> > --- a/fs/btrfs/discard.h
> > +++ b/fs/btrfs/discard.h
> > @@ -3,10 +3,15 @@
> >  #ifndef BTRFS_DISCARD_H
> >  #define BTRFS_DISCARD_H
> >  
> > +#include <linux/sizes.h>
> > +
> >  struct btrfs_fs_info;
> >  struct btrfs_discard_ctl;
> >  struct btrfs_block_group;
> >  
> > +/* Discard size limits. */
> > +#define BTRFS_ASYNC_DISCARD_MAX_SIZE	(SZ_64M)
> > +
> >  /* Work operations. */
> >  void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
> >  			       struct btrfs_block_group *block_group);
> > diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
> > index 57df34480b93..0dbcea6c59f9 100644
> > --- a/fs/btrfs/free-space-cache.c
> > +++ b/fs/btrfs/free-space-cache.c
> > @@ -3466,19 +3466,40 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group,
> >  		if (entry->offset >= end)
> >  			goto out_unlock;
> >  
> > -		extent_start = entry->offset;
> > -		extent_bytes = entry->bytes;
> > -		extent_trim_state = entry->trim_state;
> > -		start = max(start, extent_start);
> > -		bytes = min(extent_start + extent_bytes, end) - start;
> > -		if (bytes < minlen) {
> > -			spin_unlock(&ctl->tree_lock);
> > -			mutex_unlock(&ctl->cache_writeout_mutex);
> > -			goto next;
> > -		}
> > +		if (async) {
> > +			start = extent_start = entry->offset;
> > +			bytes = extent_bytes = entry->bytes;
> > +			extent_trim_state = entry->trim_state;
> > +			if (bytes < minlen) {
> > +				spin_unlock(&ctl->tree_lock);
> > +				mutex_unlock(&ctl->cache_writeout_mutex);
> > +				goto next;
> > +			}
> > +			unlink_free_space(ctl, entry);
> > +			if (bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE) {
> > +				bytes = extent_bytes =
> > +					BTRFS_ASYNC_DISCARD_MAX_SIZE;
> > +				entry->offset += BTRFS_ASYNC_DISCARD_MAX_SIZE;
> > +				entry->bytes -= BTRFS_ASYNC_DISCARD_MAX_SIZE;
> > +				link_free_space(ctl, entry);
> > +			} else {
> > +				kmem_cache_free(btrfs_free_space_cachep, entry);
> > +			}
> > +		} else {
> 
> > +			extent_start = entry->offset;
> > +			extent_bytes = entry->bytes;
> > +			extent_trim_state = entry->trim_state;
> 
> This is common initialization for both async and sync cases so it could
> be merged to a common block.
> 

I removed the chain initialization and moved the common out.

> > +			start = max(start, extent_start);
> > +			bytes = min(extent_start + extent_bytes, end) - start;
> > +			if (bytes < minlen) {
> > +				spin_unlock(&ctl->tree_lock);
> > +				mutex_unlock(&ctl->cache_writeout_mutex);
> > +				goto next;
> > +			}

Patch
diff mbox series

diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
index 3ed6855e24da..cb6ef0ab879d 100644
--- a/fs/btrfs/discard.h
+++ b/fs/btrfs/discard.h
@@ -3,10 +3,15 @@ 
 #ifndef BTRFS_DISCARD_H
 #define BTRFS_DISCARD_H
 
+#include <linux/sizes.h>
+
 struct btrfs_fs_info;
 struct btrfs_discard_ctl;
 struct btrfs_block_group;
 
+/* Discard size limits. */
+#define BTRFS_ASYNC_DISCARD_MAX_SIZE	(SZ_64M)
+
 /* Work operations. */
 void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
 			       struct btrfs_block_group *block_group);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 57df34480b93..0dbcea6c59f9 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -3466,19 +3466,40 @@  static int trim_no_bitmap(struct btrfs_block_group *block_group,
 		if (entry->offset >= end)
 			goto out_unlock;
 
-		extent_start = entry->offset;
-		extent_bytes = entry->bytes;
-		extent_trim_state = entry->trim_state;
-		start = max(start, extent_start);
-		bytes = min(extent_start + extent_bytes, end) - start;
-		if (bytes < minlen) {
-			spin_unlock(&ctl->tree_lock);
-			mutex_unlock(&ctl->cache_writeout_mutex);
-			goto next;
-		}
+		if (async) {
+			start = extent_start = entry->offset;
+			bytes = extent_bytes = entry->bytes;
+			extent_trim_state = entry->trim_state;
+			if (bytes < minlen) {
+				spin_unlock(&ctl->tree_lock);
+				mutex_unlock(&ctl->cache_writeout_mutex);
+				goto next;
+			}
+			unlink_free_space(ctl, entry);
+			if (bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE) {
+				bytes = extent_bytes =
+					BTRFS_ASYNC_DISCARD_MAX_SIZE;
+				entry->offset += BTRFS_ASYNC_DISCARD_MAX_SIZE;
+				entry->bytes -= BTRFS_ASYNC_DISCARD_MAX_SIZE;
+				link_free_space(ctl, entry);
+			} else {
+				kmem_cache_free(btrfs_free_space_cachep, entry);
+			}
+		} else {
+			extent_start = entry->offset;
+			extent_bytes = entry->bytes;
+			extent_trim_state = entry->trim_state;
+			start = max(start, extent_start);
+			bytes = min(extent_start + extent_bytes, end) - start;
+			if (bytes < minlen) {
+				spin_unlock(&ctl->tree_lock);
+				mutex_unlock(&ctl->cache_writeout_mutex);
+				goto next;
+			}
 
-		unlink_free_space(ctl, entry);
-		kmem_cache_free(btrfs_free_space_cachep, entry);
+			unlink_free_space(ctl, entry);
+			kmem_cache_free(btrfs_free_space_cachep, entry);
+		}
 
 		spin_unlock(&ctl->tree_lock);
 		trim_entry.start = extent_start;
@@ -3643,6 +3664,9 @@  static int trim_bitmaps(struct btrfs_block_group *block_group,
 			goto next;
 		}
 
+		if (async && bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE)
+			bytes = BTRFS_ASYNC_DISCARD_MAX_SIZE;
+
 		bitmap_clear_bits(ctl, entry, start, bytes);
 		if (entry->bytes == 0)
 			free_bitmap(ctl, entry);