Message ID | 20180830174225.2200-5-josef@toxicpanda.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | My current patch queue | expand |
On 30.08.2018 20:41, Josef Bacik wrote: > From: Josef Bacik <jbacik@fb.com> > > We use this number to figure out how many delayed refs to run, but > __btrfs_run_delayed_refs really only checks every time we need a new > delayed ref head, so we always run at least one ref head completely no > matter what the number of items on it. So instead track only the ref > heads added by this trans handle and adjust the counting appropriately > in __btrfs_run_delayed_refs. > > Signed-off-by: Josef Bacik <jbacik@fb.com> > --- > fs/btrfs/delayed-ref.c | 3 --- > fs/btrfs/extent-tree.c | 5 +---- > 2 files changed, 1 insertion(+), 7 deletions(-) > > diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c > index 3a9e4ac21794..27f7dd4e3d52 100644 > --- a/fs/btrfs/delayed-ref.c > +++ b/fs/btrfs/delayed-ref.c > @@ -234,8 +234,6 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans, > ref->in_tree = 0; > btrfs_put_delayed_ref(ref); > atomic_dec(&delayed_refs->num_entries); > - if (trans->delayed_ref_updates) > - trans->delayed_ref_updates--; There was feedback on this particular hunk and you've completely ignored it, that's not nice: https://www.spinics.net/lists/linux-btrfs/msg80514.html > } > > static bool merge_ref(struct btrfs_trans_handle *trans, > @@ -460,7 +458,6 @@ static int insert_delayed_ref(struct btrfs_trans_handle *trans, > if (ref->action == BTRFS_ADD_DELAYED_REF) > list_add_tail(&ref->add_list, &href->ref_add_list); > atomic_inc(&root->num_entries); > - trans->delayed_ref_updates++; > spin_unlock(&href->lock); > return ret; > } > diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c > index 87c42a2c45b1..20531389a20a 100644 > --- a/fs/btrfs/extent-tree.c > +++ b/fs/btrfs/extent-tree.c > @@ -2583,6 +2583,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, > spin_unlock(&delayed_refs->lock); > break; > } > + count++; > > /* grab the lock that says we are going to process > * all the refs for this head */ > @@ -2596,7 +2597,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, > */ > if (ret == -EAGAIN) { > locked_ref = NULL; > - count++; > continue; > } > } > @@ -2624,7 +2624,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, > unselect_delayed_ref_head(delayed_refs, locked_ref); > locked_ref = NULL; > cond_resched(); > - count++; > continue; > } > > @@ -2642,7 +2641,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, > return ret; > } > locked_ref = NULL; > - count++; > continue; > } > > @@ -2693,7 +2691,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, > } > > btrfs_put_delayed_ref(ref); > - count++; > cond_resched(); > } > >
On Fri, Aug 31, 2018 at 10:52:55AM +0300, Nikolay Borisov wrote: > > > On 30.08.2018 20:41, Josef Bacik wrote: > > From: Josef Bacik <jbacik@fb.com> > > > > We use this number to figure out how many delayed refs to run, but > > __btrfs_run_delayed_refs really only checks every time we need a new > > delayed ref head, so we always run at least one ref head completely no > > matter what the number of items on it. So instead track only the ref > > heads added by this trans handle and adjust the counting appropriately > > in __btrfs_run_delayed_refs. > > > > Signed-off-by: Josef Bacik <jbacik@fb.com> > > --- > > fs/btrfs/delayed-ref.c | 3 --- > > fs/btrfs/extent-tree.c | 5 +---- > > 2 files changed, 1 insertion(+), 7 deletions(-) > > > > diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c > > index 3a9e4ac21794..27f7dd4e3d52 100644 > > --- a/fs/btrfs/delayed-ref.c > > +++ b/fs/btrfs/delayed-ref.c > > @@ -234,8 +234,6 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans, > > ref->in_tree = 0; > > btrfs_put_delayed_ref(ref); > > atomic_dec(&delayed_refs->num_entries); > > - if (trans->delayed_ref_updates) > > - trans->delayed_ref_updates--; > > There was feedback on this particular hunk and you've completely ignored > it, that's not nice: > > https://www.spinics.net/lists/linux-btrfs/msg80514.html I just missed it in the last go around (as is the case for the other ones). I'm not sure what part is confusing, we only want delayed_ref_updates to be how many delayed ref heads there are, which is what this patch is changing. I could probably split this between these two changes and the count changing below since they are slightly different things, I'll do that. Thanks, Josef
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 3a9e4ac21794..27f7dd4e3d52 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -234,8 +234,6 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans, ref->in_tree = 0; btrfs_put_delayed_ref(ref); atomic_dec(&delayed_refs->num_entries); - if (trans->delayed_ref_updates) - trans->delayed_ref_updates--; } static bool merge_ref(struct btrfs_trans_handle *trans, @@ -460,7 +458,6 @@ static int insert_delayed_ref(struct btrfs_trans_handle *trans, if (ref->action == BTRFS_ADD_DELAYED_REF) list_add_tail(&ref->add_list, &href->ref_add_list); atomic_inc(&root->num_entries); - trans->delayed_ref_updates++; spin_unlock(&href->lock); return ret; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 87c42a2c45b1..20531389a20a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2583,6 +2583,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, spin_unlock(&delayed_refs->lock); break; } + count++; /* grab the lock that says we are going to process * all the refs for this head */ @@ -2596,7 +2597,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, */ if (ret == -EAGAIN) { locked_ref = NULL; - count++; continue; } } @@ -2624,7 +2624,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unselect_delayed_ref_head(delayed_refs, locked_ref); locked_ref = NULL; cond_resched(); - count++; continue; } @@ -2642,7 +2641,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, return ret; } locked_ref = NULL; - count++; continue; } @@ -2693,7 +2691,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, } btrfs_put_delayed_ref(ref); - count++; cond_resched(); }