diff mbox series

[5/5] fs_btrfs_block-group: code cleanup

Message ID 20191210071357.5323-6-sebastian.scherbel@fau.de (mailing list archive)
State New, archived
Headers show
Series btrfs: code cleanup | expand

Commit Message

Sebastian Dec. 10, 2019, 7:13 a.m. UTC
From: Sebastian Scherbel <sebastian.scherbel@fau.de>

This patch changes several instances in block-group where the coding style
is not in line with the Linux kernel guidelines to improve readability.

1. bare use of 'unsigned' replaced by 'unsigned int'
2. code indentation fixed
3. lines with more than 80 characters are broken into sensible chunks,
unless exceeding the limit significantly increases readability
4. tabs are used for indentations where possible

Signed-off-by: Sebastian Scherbel <sebastian.scherbel@fau.de>
Co-developed-by: Ole Wiedemann <ole.wiedemann@fau.de>
Signed-off-by: Ole Wiedemann <ole.wiedemann@fau.de>
---
 fs/btrfs/block-group.c | 21 +++++++++++++--------
 fs/btrfs/block-group.h |  8 ++++----
 2 files changed, 17 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 6934a5b8708f..22bc97515e96 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -97,7 +97,7 @@  static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
 
 static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
 {
-	unsigned seq;
+	unsigned int seq;
 	u64 flags;
 
 	do {
@@ -259,7 +259,8 @@  struct btrfs_block_group *btrfs_next_block_group(
 
 		spin_unlock(&fs_info->block_group_cache_lock);
 		btrfs_put_block_group(cache);
-		cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
+		cache = btrfs_lookup_first_block_group(fs_info, next_bytenr);
+		return cache;
 	}
 	node = rb_next(&cache->cache_node);
 	btrfs_put_block_group(cache);
@@ -447,7 +448,8 @@  static void fragment_free_space(struct btrfs_block_group *block_group)
  * used yet since their free space will be released as soon as the transaction
  * commits.
  */
-u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
+u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start,
+		       u64 end)
 {
 	struct btrfs_fs_info *info = block_group->fs_info;
 	u64 extent_start, extent_end, size, total_added = 0;
@@ -670,7 +672,8 @@  static noinline void caching_thread(struct btrfs_work *work)
 	btrfs_put_block_group(block_group);
 }
 
-int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
+int btrfs_cache_block_group(struct btrfs_block_group *cache,
+			    int load_cache_only)
 {
 	DEFINE_WAIT(wait);
 	struct btrfs_fs_info *fs_info = cache->fs_info;
@@ -1696,7 +1699,8 @@  static int read_one_block_group(struct btrfs_fs_info *info,
 
 	ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
 
-	cache = btrfs_create_block_group_cache(info, key->objectid, key->offset);
+	cache = btrfs_create_block_group_cache(info, key->objectid,
+					       key->offset);
 	if (!cache)
 		return -ENOMEM;
 
@@ -2023,8 +2027,8 @@  static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
  *
  * @cache:		the destination block group
  * @do_chunk_alloc:	whether need to do chunk pre-allocation, this is to
- * 			ensure we still have some free space after marking this
- * 			block group RO.
+ *			ensure we still have some free space after marking this
+ *			block group RO.
  */
 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
 			     bool do_chunk_alloc)
@@ -2082,7 +2086,8 @@  int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
 		goto unlock_out;
 	if (!ret)
 		goto out;
-	alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
+	alloc_flags = btrfs_get_alloc_profile(fs_info,
+					      cache->space_info->flags);
 	ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
 	if (ret < 0)
 		goto out;
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 9b409676c4b2..d4e9d2d88542 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -139,9 +139,9 @@  struct btrfs_block_group {
 	 * Incremented while holding the spinlock *lock* by a task checking if
 	 * it can perform a nocow write (incremented if the value for the *ro*
 	 * field is 0). Decremented by such tasks once they create an ordered
-	 * extent or before that if some error happens before reaching that step.
-	 * This is to prevent races between block group relocation and nocow
-	 * writes through direct IO.
+	 * extent or before that if some error happens before reaching that
+	 * step. This is to prevent races between block group relocation and
+	 * nocow writes through direct IO.
 	 */
 	atomic_t nocow_writers;
 
@@ -186,7 +186,7 @@  bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
-				           u64 num_bytes);
+					   u64 num_bytes);
 int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
 int btrfs_cache_block_group(struct btrfs_block_group *cache,
 			    int load_cache_only);