Message ID | 160b0452ecb4a810b819e0eae68bd9ef507cc813.1618494550.git.johannes.thumshirn@wdc.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | btrfs: zoned: automatic BG reclaim | expand |
On 4/15/21 9:58 AM, Johannes Thumshirn wrote: > As a preparation for another user, rename the unused_bgs_mutex into > reclaim_bgs_lock. > > Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Thanks, Josef
On Thu, Apr 15, 2021 at 3:00 PM Johannes Thumshirn <johannes.thumshirn@wdc.com> wrote: > > As a preparation for another user, rename the unused_bgs_mutex into > reclaim_bgs_lock. > > Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Filipe Manana <fdmanana@suse.com> Looks good, thanks. > --- > fs/btrfs/block-group.c | 6 +++--- > fs/btrfs/ctree.h | 2 +- > fs/btrfs/disk-io.c | 6 +++--- > fs/btrfs/volumes.c | 46 +++++++++++++++++++++--------------------- > 4 files changed, 30 insertions(+), 30 deletions(-) > > diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c > index 293f3169be80..bbb5a6e170c7 100644 > --- a/fs/btrfs/block-group.c > +++ b/fs/btrfs/block-group.c > @@ -1289,7 +1289,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) > * Long running balances can keep us blocked here for eternity, so > * simply skip deletion if we're unable to get the mutex. > */ > - if (!mutex_trylock(&fs_info->delete_unused_bgs_mutex)) > + if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) > return; > > spin_lock(&fs_info->unused_bgs_lock); > @@ -1462,12 +1462,12 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) > spin_lock(&fs_info->unused_bgs_lock); > } > spin_unlock(&fs_info->unused_bgs_lock); > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > return; > > flip_async: > btrfs_end_transaction(trans); > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > btrfs_put_block_group(block_group); > btrfs_discard_punt_unused_bgs_list(fs_info); > } > diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h > index 2c858d5349c8..c80302564e6b 100644 > --- a/fs/btrfs/ctree.h > +++ b/fs/btrfs/ctree.h > @@ -957,7 +957,7 @@ struct btrfs_fs_info { > spinlock_t unused_bgs_lock; > struct list_head unused_bgs; > struct mutex unused_bg_unpin_mutex; > - struct mutex delete_unused_bgs_mutex; > + struct mutex reclaim_bgs_lock; > > /* Cached block sizes */ > u32 nodesize; > diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c > index 0a1182694f48..e52b89ad0a61 100644 > --- a/fs/btrfs/disk-io.c > +++ b/fs/btrfs/disk-io.c > @@ -1890,10 +1890,10 @@ static int cleaner_kthread(void *arg) > btrfs_run_defrag_inodes(fs_info); > > /* > - * Acquires fs_info->delete_unused_bgs_mutex to avoid racing > + * Acquires fs_info->reclaim_bgs_lock to avoid racing > * with relocation (btrfs_relocate_chunk) and relocation > * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) > - * after acquiring fs_info->delete_unused_bgs_mutex. So we > + * after acquiring fs_info->reclaim_bgs_lock. So we > * can't hold, nor need to, fs_info->cleaner_mutex when deleting > * unused block groups. > */ > @@ -2876,7 +2876,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) > spin_lock_init(&fs_info->treelog_bg_lock); > rwlock_init(&fs_info->tree_mod_log_lock); > mutex_init(&fs_info->unused_bg_unpin_mutex); > - mutex_init(&fs_info->delete_unused_bgs_mutex); > + mutex_init(&fs_info->reclaim_bgs_lock); > mutex_init(&fs_info->reloc_mutex); > mutex_init(&fs_info->delalloc_root_mutex); > mutex_init(&fs_info->zoned_meta_io_lock); > diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c > index b1bab75ec12a..a2a7f5ab0a3e 100644 > --- a/fs/btrfs/volumes.c > +++ b/fs/btrfs/volumes.c > @@ -3118,7 +3118,7 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) > * we release the path used to search the chunk/dev tree and before > * the current task acquires this mutex and calls us. > */ > - lockdep_assert_held(&fs_info->delete_unused_bgs_mutex); > + lockdep_assert_held(&fs_info->reclaim_bgs_lock); > > /* step one, relocate all the extents inside this chunk */ > btrfs_scrub_pause(fs_info); > @@ -3185,10 +3185,10 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) > key.type = BTRFS_CHUNK_ITEM_KEY; > > while (1) { > - mutex_lock(&fs_info->delete_unused_bgs_mutex); > + mutex_lock(&fs_info->reclaim_bgs_lock); > ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); > if (ret < 0) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > goto error; > } > BUG_ON(ret == 0); /* Corruption */ > @@ -3196,7 +3196,7 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) > ret = btrfs_previous_item(chunk_root, path, key.objectid, > key.type); > if (ret) > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > if (ret < 0) > goto error; > if (ret > 0) > @@ -3217,7 +3217,7 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) > else > BUG_ON(ret); > } > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > > if (found_key.offset == 0) > break; > @@ -3757,10 +3757,10 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) > goto error; > } > > - mutex_lock(&fs_info->delete_unused_bgs_mutex); > + mutex_lock(&fs_info->reclaim_bgs_lock); > ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); > if (ret < 0) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > goto error; > } > > @@ -3774,7 +3774,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) > ret = btrfs_previous_item(chunk_root, path, 0, > BTRFS_CHUNK_ITEM_KEY); > if (ret) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > ret = 0; > break; > } > @@ -3784,7 +3784,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) > btrfs_item_key_to_cpu(leaf, &found_key, slot); > > if (found_key.objectid != key.objectid) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > break; > } > > @@ -3801,12 +3801,12 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) > > btrfs_release_path(path); > if (!ret) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > goto loop; > } > > if (counting) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > spin_lock(&fs_info->balance_lock); > bctl->stat.expected++; > spin_unlock(&fs_info->balance_lock); > @@ -3831,7 +3831,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) > count_meta < bctl->meta.limit_min) > || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && > count_sys < bctl->sys.limit_min)) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > goto loop; > } > > @@ -3845,7 +3845,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) > ret = btrfs_may_alloc_data_chunk(fs_info, > found_key.offset); > if (ret < 0) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > goto error; > } else if (ret == 1) { > chunk_reserved = 1; > @@ -3853,7 +3853,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) > } > > ret = btrfs_relocate_chunk(fs_info, found_key.offset); > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > if (ret == -ENOSPC) { > enospc_errors++; > } else if (ret == -ETXTBSY) { > @@ -4738,16 +4738,16 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) > key.type = BTRFS_DEV_EXTENT_KEY; > > do { > - mutex_lock(&fs_info->delete_unused_bgs_mutex); > + mutex_lock(&fs_info->reclaim_bgs_lock); > ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); > if (ret < 0) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > goto done; > } > > ret = btrfs_previous_item(root, path, 0, key.type); > if (ret) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > if (ret < 0) > goto done; > ret = 0; > @@ -4760,7 +4760,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) > btrfs_item_key_to_cpu(l, &key, path->slots[0]); > > if (key.objectid != device->devid) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > btrfs_release_path(path); > break; > } > @@ -4769,7 +4769,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) > length = btrfs_dev_extent_length(l, dev_extent); > > if (key.offset + length <= new_size) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > btrfs_release_path(path); > break; > } > @@ -4785,12 +4785,12 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) > */ > ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); > if (ret < 0) { > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > goto done; > } > > ret = btrfs_relocate_chunk(fs_info, chunk_offset); > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > if (ret == -ENOSPC) { > failed++; > } else if (ret) { > @@ -8016,7 +8016,7 @@ static int relocating_repair_kthread(void *data) > return -EBUSY; > } > > - mutex_lock(&fs_info->delete_unused_bgs_mutex); > + mutex_lock(&fs_info->reclaim_bgs_lock); > > /* Ensure block group still exists */ > cache = btrfs_lookup_block_group(fs_info, target); > @@ -8038,7 +8038,7 @@ static int relocating_repair_kthread(void *data) > out: > if (cache) > btrfs_put_block_group(cache); > - mutex_unlock(&fs_info->delete_unused_bgs_mutex); > + mutex_unlock(&fs_info->reclaim_bgs_lock); > btrfs_exclop_finish(fs_info); > > return ret; > -- > 2.30.0 >
On Thu, Apr 15, 2021 at 10:58:34PM +0900, Johannes Thumshirn wrote: > --- a/fs/btrfs/ctree.h > +++ b/fs/btrfs/ctree.h > @@ -957,7 +957,7 @@ struct btrfs_fs_info { > spinlock_t unused_bgs_lock; > struct list_head unused_bgs; > struct mutex unused_bg_unpin_mutex; > - struct mutex delete_unused_bgs_mutex; > + struct mutex reclaim_bgs_lock; Please write a comment what the mutex protects.
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 293f3169be80..bbb5a6e170c7 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1289,7 +1289,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * Long running balances can keep us blocked here for eternity, so * simply skip deletion if we're unable to get the mutex. */ - if (!mutex_trylock(&fs_info->delete_unused_bgs_mutex)) + if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) return; spin_lock(&fs_info->unused_bgs_lock); @@ -1462,12 +1462,12 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) spin_lock(&fs_info->unused_bgs_lock); } spin_unlock(&fs_info->unused_bgs_lock); - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); return; flip_async: btrfs_end_transaction(trans); - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); btrfs_put_block_group(block_group); btrfs_discard_punt_unused_bgs_list(fs_info); } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2c858d5349c8..c80302564e6b 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -957,7 +957,7 @@ struct btrfs_fs_info { spinlock_t unused_bgs_lock; struct list_head unused_bgs; struct mutex unused_bg_unpin_mutex; - struct mutex delete_unused_bgs_mutex; + struct mutex reclaim_bgs_lock; /* Cached block sizes */ u32 nodesize; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0a1182694f48..e52b89ad0a61 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1890,10 +1890,10 @@ static int cleaner_kthread(void *arg) btrfs_run_defrag_inodes(fs_info); /* - * Acquires fs_info->delete_unused_bgs_mutex to avoid racing + * Acquires fs_info->reclaim_bgs_lock to avoid racing * with relocation (btrfs_relocate_chunk) and relocation * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) - * after acquiring fs_info->delete_unused_bgs_mutex. So we + * after acquiring fs_info->reclaim_bgs_lock. So we * can't hold, nor need to, fs_info->cleaner_mutex when deleting * unused block groups. */ @@ -2876,7 +2876,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) spin_lock_init(&fs_info->treelog_bg_lock); rwlock_init(&fs_info->tree_mod_log_lock); mutex_init(&fs_info->unused_bg_unpin_mutex); - mutex_init(&fs_info->delete_unused_bgs_mutex); + mutex_init(&fs_info->reclaim_bgs_lock); mutex_init(&fs_info->reloc_mutex); mutex_init(&fs_info->delalloc_root_mutex); mutex_init(&fs_info->zoned_meta_io_lock); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index b1bab75ec12a..a2a7f5ab0a3e 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3118,7 +3118,7 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) * we release the path used to search the chunk/dev tree and before * the current task acquires this mutex and calls us. */ - lockdep_assert_held(&fs_info->delete_unused_bgs_mutex); + lockdep_assert_held(&fs_info->reclaim_bgs_lock); /* step one, relocate all the extents inside this chunk */ btrfs_scrub_pause(fs_info); @@ -3185,10 +3185,10 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) key.type = BTRFS_CHUNK_ITEM_KEY; while (1) { - mutex_lock(&fs_info->delete_unused_bgs_mutex); + mutex_lock(&fs_info->reclaim_bgs_lock); ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); goto error; } BUG_ON(ret == 0); /* Corruption */ @@ -3196,7 +3196,7 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) ret = btrfs_previous_item(chunk_root, path, key.objectid, key.type); if (ret) - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); if (ret < 0) goto error; if (ret > 0) @@ -3217,7 +3217,7 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) else BUG_ON(ret); } - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); if (found_key.offset == 0) break; @@ -3757,10 +3757,10 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) goto error; } - mutex_lock(&fs_info->delete_unused_bgs_mutex); + mutex_lock(&fs_info->reclaim_bgs_lock); ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); goto error; } @@ -3774,7 +3774,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) ret = btrfs_previous_item(chunk_root, path, 0, BTRFS_CHUNK_ITEM_KEY); if (ret) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); ret = 0; break; } @@ -3784,7 +3784,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) btrfs_item_key_to_cpu(leaf, &found_key, slot); if (found_key.objectid != key.objectid) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); break; } @@ -3801,12 +3801,12 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) btrfs_release_path(path); if (!ret) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); goto loop; } if (counting) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); spin_lock(&fs_info->balance_lock); bctl->stat.expected++; spin_unlock(&fs_info->balance_lock); @@ -3831,7 +3831,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) count_meta < bctl->meta.limit_min) || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && count_sys < bctl->sys.limit_min)) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); goto loop; } @@ -3845,7 +3845,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) ret = btrfs_may_alloc_data_chunk(fs_info, found_key.offset); if (ret < 0) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); goto error; } else if (ret == 1) { chunk_reserved = 1; @@ -3853,7 +3853,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) } ret = btrfs_relocate_chunk(fs_info, found_key.offset); - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); if (ret == -ENOSPC) { enospc_errors++; } else if (ret == -ETXTBSY) { @@ -4738,16 +4738,16 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) key.type = BTRFS_DEV_EXTENT_KEY; do { - mutex_lock(&fs_info->delete_unused_bgs_mutex); + mutex_lock(&fs_info->reclaim_bgs_lock); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); goto done; } ret = btrfs_previous_item(root, path, 0, key.type); if (ret) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); if (ret < 0) goto done; ret = 0; @@ -4760,7 +4760,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != device->devid) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); btrfs_release_path(path); break; } @@ -4769,7 +4769,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) length = btrfs_dev_extent_length(l, dev_extent); if (key.offset + length <= new_size) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); btrfs_release_path(path); break; } @@ -4785,12 +4785,12 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) */ ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); if (ret < 0) { - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); goto done; } ret = btrfs_relocate_chunk(fs_info, chunk_offset); - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); if (ret == -ENOSPC) { failed++; } else if (ret) { @@ -8016,7 +8016,7 @@ static int relocating_repair_kthread(void *data) return -EBUSY; } - mutex_lock(&fs_info->delete_unused_bgs_mutex); + mutex_lock(&fs_info->reclaim_bgs_lock); /* Ensure block group still exists */ cache = btrfs_lookup_block_group(fs_info, target); @@ -8038,7 +8038,7 @@ static int relocating_repair_kthread(void *data) out: if (cache) btrfs_put_block_group(cache); - mutex_unlock(&fs_info->delete_unused_bgs_mutex); + mutex_unlock(&fs_info->reclaim_bgs_lock); btrfs_exclop_finish(fs_info); return ret;
As a preparation for another user, rename the unused_bgs_mutex into reclaim_bgs_lock. Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> --- fs/btrfs/block-group.c | 6 +++--- fs/btrfs/ctree.h | 2 +- fs/btrfs/disk-io.c | 6 +++--- fs/btrfs/volumes.c | 46 +++++++++++++++++++++--------------------- 4 files changed, 30 insertions(+), 30 deletions(-)