Message ID | 20181203152038.21388-2-josef@toxicpanda.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Delayed refs rsv | expand |
On 3.12.18 г. 17:20 ч., Josef Bacik wrote: > From: Josef Bacik <jbacik@fb.com> > > We do this dance in cleanup_ref_head and check_ref_cleanup, unify it > into a helper and cleanup the calling functions. > > Signed-off-by: Josef Bacik <jbacik@fb.com> > Reviewed-by: Omar Sandoval <osandov@fb.com> Reviewed-by: Nikolay Borisov <nborisov@suse.com> > --- > fs/btrfs/delayed-ref.c | 14 ++++++++++++++ > fs/btrfs/delayed-ref.h | 3 ++- > fs/btrfs/extent-tree.c | 22 +++------------------- > 3 files changed, 19 insertions(+), 20 deletions(-) > > diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c > index 9301b3ad9217..b3e4c9fcb664 100644 > --- a/fs/btrfs/delayed-ref.c > +++ b/fs/btrfs/delayed-ref.c > @@ -400,6 +400,20 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head( > return head; > } > > +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs, > + struct btrfs_delayed_ref_head *head) > +{ > + lockdep_assert_held(&delayed_refs->lock); > + lockdep_assert_held(&head->lock); > + > + rb_erase_cached(&head->href_node, &delayed_refs->href_root); > + RB_CLEAR_NODE(&head->href_node); > + atomic_dec(&delayed_refs->num_entries); > + delayed_refs->num_heads--; > + if (head->processing == 0) > + delayed_refs->num_heads_ready--; > +} > + > /* > * Helper to insert the ref_node to the tail or merge with tail. > * > diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h > index 8e20c5cb5404..d2af974f68a1 100644 > --- a/fs/btrfs/delayed-ref.h > +++ b/fs/btrfs/delayed-ref.h > @@ -261,7 +261,8 @@ static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) > { > mutex_unlock(&head->mutex); > } > - > +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs, > + struct btrfs_delayed_ref_head *head); > > struct btrfs_delayed_ref_head *btrfs_select_ref_head( > struct btrfs_delayed_ref_root *delayed_refs); > diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c > index d242a1174e50..c36b3a42f2bb 100644 > --- a/fs/btrfs/extent-tree.c > +++ b/fs/btrfs/extent-tree.c > @@ -2474,12 +2474,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, > spin_unlock(&delayed_refs->lock); > return 1; > } > - delayed_refs->num_heads--; > - rb_erase_cached(&head->href_node, &delayed_refs->href_root); > - RB_CLEAR_NODE(&head->href_node); > + btrfs_delete_ref_head(delayed_refs, head); > spin_unlock(&head->lock); > spin_unlock(&delayed_refs->lock); > - atomic_dec(&delayed_refs->num_entries); > > trace_run_delayed_ref_head(fs_info, head, 0); > > @@ -6984,22 +6981,9 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, > if (!mutex_trylock(&head->mutex)) > goto out; > > - /* > - * at this point we have a head with no other entries. Go > - * ahead and process it. > - */ > - rb_erase_cached(&head->href_node, &delayed_refs->href_root); > - RB_CLEAR_NODE(&head->href_node); > - atomic_dec(&delayed_refs->num_entries); > - > - /* > - * we don't take a ref on the node because we're removing it from the > - * tree, so we just steal the ref the tree was holding. > - */ > - delayed_refs->num_heads--; > - if (head->processing == 0) > - delayed_refs->num_heads_ready--; > + btrfs_delete_ref_head(delayed_refs, head); > head->processing = 0; > + > spin_unlock(&head->lock); > spin_unlock(&delayed_refs->lock); > >
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 9301b3ad9217..b3e4c9fcb664 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -400,6 +400,20 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head( return head; } +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_head *head) +{ + lockdep_assert_held(&delayed_refs->lock); + lockdep_assert_held(&head->lock); + + rb_erase_cached(&head->href_node, &delayed_refs->href_root); + RB_CLEAR_NODE(&head->href_node); + atomic_dec(&delayed_refs->num_entries); + delayed_refs->num_heads--; + if (head->processing == 0) + delayed_refs->num_heads_ready--; +} + /* * Helper to insert the ref_node to the tail or merge with tail. * diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 8e20c5cb5404..d2af974f68a1 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -261,7 +261,8 @@ static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) { mutex_unlock(&head->mutex); } - +void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_head *head); struct btrfs_delayed_ref_head *btrfs_select_ref_head( struct btrfs_delayed_ref_root *delayed_refs); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d242a1174e50..c36b3a42f2bb 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2474,12 +2474,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, spin_unlock(&delayed_refs->lock); return 1; } - delayed_refs->num_heads--; - rb_erase_cached(&head->href_node, &delayed_refs->href_root); - RB_CLEAR_NODE(&head->href_node); + btrfs_delete_ref_head(delayed_refs, head); spin_unlock(&head->lock); spin_unlock(&delayed_refs->lock); - atomic_dec(&delayed_refs->num_entries); trace_run_delayed_ref_head(fs_info, head, 0); @@ -6984,22 +6981,9 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, if (!mutex_trylock(&head->mutex)) goto out; - /* - * at this point we have a head with no other entries. Go - * ahead and process it. - */ - rb_erase_cached(&head->href_node, &delayed_refs->href_root); - RB_CLEAR_NODE(&head->href_node); - atomic_dec(&delayed_refs->num_entries); - - /* - * we don't take a ref on the node because we're removing it from the - * tree, so we just steal the ref the tree was holding. - */ - delayed_refs->num_heads--; - if (head->processing == 0) - delayed_refs->num_heads_ready--; + btrfs_delete_ref_head(delayed_refs, head); head->processing = 0; + spin_unlock(&head->lock); spin_unlock(&delayed_refs->lock);