diff mbox series

[4/8] btrfs: cleanup btrfs_discard_update_discardable usage

Message ID afb3c72b04191707f96001bc3698e14b4d3400a8.1603460665.git.josef@toxicpanda.com (mailing list archive)
State New, archived
Headers show
Series Block group caching fixes | expand

Commit Message

Josef Bacik Oct. 23, 2020, 1:58 p.m. UTC
This passes in the block_group and the free_space_ctl, but we can get
this from the block group itself.  Part of this is because we call it
from __load_free_space_cache, which can be called for the inode cache as
well.  Move that call into the block group specific load section, wrap
it in the right lock that we need, and fix up the arguments to only take
the block group.  Add a lockdep_assert as well for good measure to make
sure we don't mess up the locking again.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
---
 fs/btrfs/discard.c          |  7 ++++---
 fs/btrfs/discard.h          |  3 +--
 fs/btrfs/free-space-cache.c | 14 ++++++++------
 3 files changed, 13 insertions(+), 11 deletions(-)

Comments

Filipe Manana Nov. 4, 2020, 3:54 p.m. UTC | #1
On Fri, Oct 23, 2020 at 5:12 PM Josef Bacik <josef@toxicpanda.com> wrote:
>
> This passes in the block_group and the free_space_ctl, but we can get
> this from the block group itself.  Part of this is because we call it
> from __load_free_space_cache, which can be called for the inode cache as
> well.  Move that call into the block group specific load section, wrap
> it in the right lock that we need, and fix up the arguments to only take
> the block group.  Add a lockdep_assert as well for good measure to make
> sure we don't mess up the locking again.

So this is actually 2 different things in one patch:

1) A cleanup to remove an unnecessary argument to
btrfs_discard_update_discardable();

2) A bug because btrfs_discard_update_discardable() is not being
called with the lock ->tree_lock held in one specific context.

Shouldn't we really have this split in two patches?

Other than that, it looks good. Thanks.

>
> Signed-off-by: Josef Bacik <josef@toxicpanda.com>
> ---
>  fs/btrfs/discard.c          |  7 ++++---
>  fs/btrfs/discard.h          |  3 +--
>  fs/btrfs/free-space-cache.c | 14 ++++++++------
>  3 files changed, 13 insertions(+), 11 deletions(-)
>
> diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
> index 741c7e19c32f..5a88b584276f 100644
> --- a/fs/btrfs/discard.c
> +++ b/fs/btrfs/discard.c
> @@ -563,15 +563,14 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
>  /**
>   * btrfs_discard_update_discardable - propagate discard counters
>   * @block_group: block_group of interest
> - * @ctl: free_space_ctl of @block_group
>   *
>   * This propagates deltas of counters up to the discard_ctl.  It maintains a
>   * current counter and a previous counter passing the delta up to the global
>   * stat.  Then the current counter value becomes the previous counter value.
>   */
> -void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
> -                                     struct btrfs_free_space_ctl *ctl)
> +void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
>  {
> +       struct btrfs_free_space_ctl *ctl;
>         struct btrfs_discard_ctl *discard_ctl;
>         s32 extents_delta;
>         s64 bytes_delta;
> @@ -581,8 +580,10 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
>             !btrfs_is_block_group_data_only(block_group))
>                 return;
>
> +       ctl = block_group->free_space_ctl;
>         discard_ctl = &block_group->fs_info->discard_ctl;
>
> +       lockdep_assert_held(&ctl->tree_lock);
>         extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] -
>                         ctl->discardable_extents[BTRFS_STAT_PREV];
>         if (extents_delta) {
> diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
> index 353228d62f5a..57b9202f427f 100644
> --- a/fs/btrfs/discard.h
> +++ b/fs/btrfs/discard.h
> @@ -28,8 +28,7 @@ bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl);
>
>  /* Update operations */
>  void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl);
> -void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
> -                                     struct btrfs_free_space_ctl *ctl);
> +void btrfs_discard_update_discardable(struct btrfs_block_group *block_group);
>
>  /* Setup/cleanup operations */
>  void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info);
> diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
> index 5ea36a06e514..0787339c7b93 100644
> --- a/fs/btrfs/free-space-cache.c
> +++ b/fs/btrfs/free-space-cache.c
> @@ -828,7 +828,6 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
>         merge_space_tree(ctl);
>         ret = 1;
>  out:
> -       btrfs_discard_update_discardable(ctl->private, ctl);
>         io_ctl_free(&io_ctl);
>         return ret;
>  free_cache:
> @@ -929,6 +928,9 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
>                            block_group->start);
>         }
>
> +       spin_lock(&ctl->tree_lock);
> +       btrfs_discard_update_discardable(block_group);
> +       spin_unlock(&ctl->tree_lock);
>         iput(inode);
>         return ret;
>  }
> @@ -2508,7 +2510,7 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
>         if (ret)
>                 kmem_cache_free(btrfs_free_space_cachep, info);
>  out:
> -       btrfs_discard_update_discardable(block_group, ctl);
> +       btrfs_discard_update_discardable(block_group);
>         spin_unlock(&ctl->tree_lock);
>
>         if (ret) {
> @@ -2643,7 +2645,7 @@ int btrfs_remove_free_space(struct btrfs_block_group *block_group,
>                 goto again;
>         }
>  out_lock:
> -       btrfs_discard_update_discardable(block_group, ctl);
> +       btrfs_discard_update_discardable(block_group);
>         spin_unlock(&ctl->tree_lock);
>  out:
>         return ret;
> @@ -2779,7 +2781,7 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
>         spin_lock(&ctl->tree_lock);
>         __btrfs_remove_free_space_cache_locked(ctl);
>         if (ctl->private)
> -               btrfs_discard_update_discardable(ctl->private, ctl);
> +               btrfs_discard_update_discardable(ctl->private);
>         spin_unlock(&ctl->tree_lock);
>  }
>
> @@ -2801,7 +2803,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
>                 cond_resched_lock(&ctl->tree_lock);
>         }
>         __btrfs_remove_free_space_cache_locked(ctl);
> -       btrfs_discard_update_discardable(block_group, ctl);
> +       btrfs_discard_update_discardable(block_group);
>         spin_unlock(&ctl->tree_lock);
>
>  }
> @@ -2885,7 +2887,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
>                         link_free_space(ctl, entry);
>         }
>  out:
> -       btrfs_discard_update_discardable(block_group, ctl);
> +       btrfs_discard_update_discardable(block_group);
>         spin_unlock(&ctl->tree_lock);
>
>         if (align_gap_len)
> --
> 2.26.2
>
Amy Parker Nov. 4, 2020, 5:36 p.m. UTC | #2
On Wed, Nov 4, 2020 at 7:56 AM Filipe Manana <fdmanana@gmail.com> wrote:
>
> On Fri, Oct 23, 2020 at 5:12 PM Josef Bacik <josef@toxicpanda.com> wrote:
> >
> > This passes in the block_group and the free_space_ctl, but we can get
> > this from the block group itself.  Part of this is because we call it
> > from __load_free_space_cache, which can be called for the inode cache as
> > well.  Move that call into the block group specific load section, wrap
> > it in the right lock that we need, and fix up the arguments to only take
> > the block group.  Add a lockdep_assert as well for good measure to make
> > sure we don't mess up the locking again.
>
> So this is actually 2 different things in one patch:
>
> 1) A cleanup to remove an unnecessary argument to
> btrfs_discard_update_discardable();
>
> 2) A bug because btrfs_discard_update_discardable() is not being
> called with the lock ->tree_lock held in one specific context.
>
> Shouldn't we really have this split in two patches?

Absolutely, yes. We should split this into two patches.

>
> Other than that, it looks good. Thanks.
>
> >
> > Signed-off-by: Josef Bacik <josef@toxicpanda.com>
> > ---
> >  fs/btrfs/discard.c          |  7 ++++---
> >  fs/btrfs/discard.h          |  3 +--
> >  fs/btrfs/free-space-cache.c | 14 ++++++++------
> >  3 files changed, 13 insertions(+), 11 deletions(-)
> >
> > diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
> > index 741c7e19c32f..5a88b584276f 100644
> > --- a/fs/btrfs/discard.c
> > +++ b/fs/btrfs/discard.c
> > @@ -563,15 +563,14 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
> >  /**
> >   * btrfs_discard_update_discardable - propagate discard counters
> >   * @block_group: block_group of interest
> > - * @ctl: free_space_ctl of @block_group
> >   *
> >   * This propagates deltas of counters up to the discard_ctl.  It maintains a
> >   * current counter and a previous counter passing the delta up to the global
> >   * stat.  Then the current counter value becomes the previous counter value.
> >   */
> > -void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
> > -                                     struct btrfs_free_space_ctl *ctl)
> > +void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
> >  {
> > +       struct btrfs_free_space_ctl *ctl;
> >         struct btrfs_discard_ctl *discard_ctl;
> >         s32 extents_delta;
> >         s64 bytes_delta;
> > @@ -581,8 +580,10 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
> >             !btrfs_is_block_group_data_only(block_group))
> >                 return;
> >
> > +       ctl = block_group->free_space_ctl;
> >         discard_ctl = &block_group->fs_info->discard_ctl;
> >
> > +       lockdep_assert_held(&ctl->tree_lock);
> >         extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] -
> >                         ctl->discardable_extents[BTRFS_STAT_PREV];
> >         if (extents_delta) {
> > diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
> > index 353228d62f5a..57b9202f427f 100644
> > --- a/fs/btrfs/discard.h
> > +++ b/fs/btrfs/discard.h
> > @@ -28,8 +28,7 @@ bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl);
> >
> >  /* Update operations */
> >  void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl);
> > -void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
> > -                                     struct btrfs_free_space_ctl *ctl);
> > +void btrfs_discard_update_discardable(struct btrfs_block_group *block_group);
> >
> >  /* Setup/cleanup operations */
> >  void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info);
> > diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
> > index 5ea36a06e514..0787339c7b93 100644
> > --- a/fs/btrfs/free-space-cache.c
> > +++ b/fs/btrfs/free-space-cache.c
> > @@ -828,7 +828,6 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
> >         merge_space_tree(ctl);
> >         ret = 1;
> >  out:
> > -       btrfs_discard_update_discardable(ctl->private, ctl);
> >         io_ctl_free(&io_ctl);
> >         return ret;
> >  free_cache:
> > @@ -929,6 +928,9 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
> >                            block_group->start);
> >         }
> >
> > +       spin_lock(&ctl->tree_lock);
> > +       btrfs_discard_update_discardable(block_group);
> > +       spin_unlock(&ctl->tree_lock);
> >         iput(inode);
> >         return ret;
> >  }
> > @@ -2508,7 +2510,7 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
> >         if (ret)
> >                 kmem_cache_free(btrfs_free_space_cachep, info);
> >  out:
> > -       btrfs_discard_update_discardable(block_group, ctl);
> > +       btrfs_discard_update_discardable(block_group);
> >         spin_unlock(&ctl->tree_lock);
> >
> >         if (ret) {
> > @@ -2643,7 +2645,7 @@ int btrfs_remove_free_space(struct btrfs_block_group *block_group,
> >                 goto again;
> >         }
> >  out_lock:
> > -       btrfs_discard_update_discardable(block_group, ctl);
> > +       btrfs_discard_update_discardable(block_group);
> >         spin_unlock(&ctl->tree_lock);
> >  out:
> >         return ret;
> > @@ -2779,7 +2781,7 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
> >         spin_lock(&ctl->tree_lock);
> >         __btrfs_remove_free_space_cache_locked(ctl);
> >         if (ctl->private)
> > -               btrfs_discard_update_discardable(ctl->private, ctl);
> > +               btrfs_discard_update_discardable(ctl->private);
> >         spin_unlock(&ctl->tree_lock);
> >  }
> >
> > @@ -2801,7 +2803,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
> >                 cond_resched_lock(&ctl->tree_lock);
> >         }
> >         __btrfs_remove_free_space_cache_locked(ctl);
> > -       btrfs_discard_update_discardable(block_group, ctl);
> > +       btrfs_discard_update_discardable(block_group);
> >         spin_unlock(&ctl->tree_lock);
> >
> >  }
> > @@ -2885,7 +2887,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
> >                         link_free_space(ctl, entry);
> >         }
> >  out:
> > -       btrfs_discard_update_discardable(block_group, ctl);
> > +       btrfs_discard_update_discardable(block_group);
> >         spin_unlock(&ctl->tree_lock);
> >
> >         if (align_gap_len)
> > --
> > 2.26.2
> >
>
>
> --
> Filipe David Manana,
>
> “Whether you think you can, or you think you can't — you're right.”

Best regards,
Amy Parker
(they/them)
Josef Bacik Nov. 4, 2020, 6:21 p.m. UTC | #3
On 11/4/20 10:54 AM, Filipe Manana wrote:
> On Fri, Oct 23, 2020 at 5:12 PM Josef Bacik <josef@toxicpanda.com> wrote:
>>
>> This passes in the block_group and the free_space_ctl, but we can get
>> this from the block group itself.  Part of this is because we call it
>> from __load_free_space_cache, which can be called for the inode cache as
>> well.  Move that call into the block group specific load section, wrap
>> it in the right lock that we need, and fix up the arguments to only take
>> the block group.  Add a lockdep_assert as well for good measure to make
>> sure we don't mess up the locking again.
> 
> So this is actually 2 different things in one patch:
> 
> 1) A cleanup to remove an unnecessary argument to
> btrfs_discard_update_discardable();
> 
> 2) A bug because btrfs_discard_update_discardable() is not being
> called with the lock ->tree_lock held in one specific context.

Yeah but the specific context is on load, so we won't have concurrent modifiers 
to the tree until _after_ the cache is successfully loaded.  Of course this 
patchset changes that so it's important now, but prior to this we didn't 
necessarily need the lock, so it's not really a bug fix, just an adjustment.

However I'm always happy to inflate my patch counts, makes me look good at 
performance review time ;).  I'm happy to respin with it broken out.  Thanks,

Josef
Filipe Manana Nov. 4, 2020, 6:28 p.m. UTC | #4
On Wed, Nov 4, 2020 at 6:21 PM Josef Bacik <josef@toxicpanda.com> wrote:
>
> On 11/4/20 10:54 AM, Filipe Manana wrote:
> > On Fri, Oct 23, 2020 at 5:12 PM Josef Bacik <josef@toxicpanda.com> wrote:
> >>
> >> This passes in the block_group and the free_space_ctl, but we can get
> >> this from the block group itself.  Part of this is because we call it
> >> from __load_free_space_cache, which can be called for the inode cache as
> >> well.  Move that call into the block group specific load section, wrap
> >> it in the right lock that we need, and fix up the arguments to only take
> >> the block group.  Add a lockdep_assert as well for good measure to make
> >> sure we don't mess up the locking again.
> >
> > So this is actually 2 different things in one patch:
> >
> > 1) A cleanup to remove an unnecessary argument to
> > btrfs_discard_update_discardable();
> >
> > 2) A bug because btrfs_discard_update_discardable() is not being
> > called with the lock ->tree_lock held in one specific context.
>
> Yeah but the specific context is on load, so we won't have concurrent modifiers
> to the tree until _after_ the cache is successfully loaded.  Of course this
> patchset changes that so it's important now, but prior to this we didn't
> necessarily need the lock, so it's not really a bug fix, just an adjustment.
>
> However I'm always happy to inflate my patch counts, makes me look good at
> performance review time ;).  I'm happy to respin with it broken out.  Thanks,

Then make it 3! One more just to add the assertion!

I'm fine with it as it is, maybe just add an explicit note that we
can't have concurrent access in the load case, so it's not fixing any
bug, but just prevention.

Reviewed-by: Filipe Manana <fdmanana@suse.com>

Thanks.

>
> Josef
David Sterba Nov. 5, 2020, 1:22 p.m. UTC | #5
On Wed, Nov 04, 2020 at 06:28:13PM +0000, Filipe Manana wrote:
> On Wed, Nov 4, 2020 at 6:21 PM Josef Bacik <josef@toxicpanda.com> wrote:
> >
> > On 11/4/20 10:54 AM, Filipe Manana wrote:
> > > On Fri, Oct 23, 2020 at 5:12 PM Josef Bacik <josef@toxicpanda.com> wrote:
> > >>
> > >> This passes in the block_group and the free_space_ctl, but we can get
> > >> this from the block group itself.  Part of this is because we call it
> > >> from __load_free_space_cache, which can be called for the inode cache as
> > >> well.  Move that call into the block group specific load section, wrap
> > >> it in the right lock that we need, and fix up the arguments to only take
> > >> the block group.  Add a lockdep_assert as well for good measure to make
> > >> sure we don't mess up the locking again.
> > >
> > > So this is actually 2 different things in one patch:
> > >
> > > 1) A cleanup to remove an unnecessary argument to
> > > btrfs_discard_update_discardable();
> > >
> > > 2) A bug because btrfs_discard_update_discardable() is not being
> > > called with the lock ->tree_lock held in one specific context.
> >
> > Yeah but the specific context is on load, so we won't have concurrent modifiers
> > to the tree until _after_ the cache is successfully loaded.  Of course this
> > patchset changes that so it's important now, but prior to this we didn't
> > necessarily need the lock, so it's not really a bug fix, just an adjustment.
> >
> > However I'm always happy to inflate my patch counts, makes me look good at
> > performance review time ;).  I'm happy to respin with it broken out.  Thanks,
> 
> Then make it 3! One more just to add the assertion!
> 
> I'm fine with it as it is, maybe just add an explicit note that we
> can't have concurrent access in the load case, so it's not fixing any
> bug, but just prevention.

Changelog updated to reflect that:

This passes in the block_group and the free_space_ctl, but we can get
this from the block group itself.  Part of this is because we call it
from __load_free_space_cache, which can be called for the inode cache as
well.

Move that call into the block group specific load section, wrap it in
the right lock that we need for the assertion (but otherwise this is
safe without the lock because this happens in single-thread context).

Fix up the arguments to only take the block group.  Add a lockdep_assert
as well for good measure to make sure we don't mess up the locking
again.
diff mbox series

Patch

diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
index 741c7e19c32f..5a88b584276f 100644
--- a/fs/btrfs/discard.c
+++ b/fs/btrfs/discard.c
@@ -563,15 +563,14 @@  void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
 /**
  * btrfs_discard_update_discardable - propagate discard counters
  * @block_group: block_group of interest
- * @ctl: free_space_ctl of @block_group
  *
  * This propagates deltas of counters up to the discard_ctl.  It maintains a
  * current counter and a previous counter passing the delta up to the global
  * stat.  Then the current counter value becomes the previous counter value.
  */
-void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
-				      struct btrfs_free_space_ctl *ctl)
+void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
 {
+	struct btrfs_free_space_ctl *ctl;
 	struct btrfs_discard_ctl *discard_ctl;
 	s32 extents_delta;
 	s64 bytes_delta;
@@ -581,8 +580,10 @@  void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
 	    !btrfs_is_block_group_data_only(block_group))
 		return;
 
+	ctl = block_group->free_space_ctl;
 	discard_ctl = &block_group->fs_info->discard_ctl;
 
+	lockdep_assert_held(&ctl->tree_lock);
 	extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] -
 			ctl->discardable_extents[BTRFS_STAT_PREV];
 	if (extents_delta) {
diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
index 353228d62f5a..57b9202f427f 100644
--- a/fs/btrfs/discard.h
+++ b/fs/btrfs/discard.h
@@ -28,8 +28,7 @@  bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl);
 
 /* Update operations */
 void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl);
-void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
-				      struct btrfs_free_space_ctl *ctl);
+void btrfs_discard_update_discardable(struct btrfs_block_group *block_group);
 
 /* Setup/cleanup operations */
 void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 5ea36a06e514..0787339c7b93 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -828,7 +828,6 @@  static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 	merge_space_tree(ctl);
 	ret = 1;
 out:
-	btrfs_discard_update_discardable(ctl->private, ctl);
 	io_ctl_free(&io_ctl);
 	return ret;
 free_cache:
@@ -929,6 +928,9 @@  int load_free_space_cache(struct btrfs_block_group *block_group)
 			   block_group->start);
 	}
 
+	spin_lock(&ctl->tree_lock);
+	btrfs_discard_update_discardable(block_group);
+	spin_unlock(&ctl->tree_lock);
 	iput(inode);
 	return ret;
 }
@@ -2508,7 +2510,7 @@  int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
 	if (ret)
 		kmem_cache_free(btrfs_free_space_cachep, info);
 out:
-	btrfs_discard_update_discardable(block_group, ctl);
+	btrfs_discard_update_discardable(block_group);
 	spin_unlock(&ctl->tree_lock);
 
 	if (ret) {
@@ -2643,7 +2645,7 @@  int btrfs_remove_free_space(struct btrfs_block_group *block_group,
 		goto again;
 	}
 out_lock:
-	btrfs_discard_update_discardable(block_group, ctl);
+	btrfs_discard_update_discardable(block_group);
 	spin_unlock(&ctl->tree_lock);
 out:
 	return ret;
@@ -2779,7 +2781,7 @@  void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
 	spin_lock(&ctl->tree_lock);
 	__btrfs_remove_free_space_cache_locked(ctl);
 	if (ctl->private)
-		btrfs_discard_update_discardable(ctl->private, ctl);
+		btrfs_discard_update_discardable(ctl->private);
 	spin_unlock(&ctl->tree_lock);
 }
 
@@ -2801,7 +2803,7 @@  void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
 		cond_resched_lock(&ctl->tree_lock);
 	}
 	__btrfs_remove_free_space_cache_locked(ctl);
-	btrfs_discard_update_discardable(block_group, ctl);
+	btrfs_discard_update_discardable(block_group);
 	spin_unlock(&ctl->tree_lock);
 
 }
@@ -2885,7 +2887,7 @@  u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
 			link_free_space(ctl, entry);
 	}
 out:
-	btrfs_discard_update_discardable(block_group, ctl);
+	btrfs_discard_update_discardable(block_group);
 	spin_unlock(&ctl->tree_lock);
 
 	if (align_gap_len)