@@ -97,7 +97,7 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
{
- unsigned seq;
+ unsigned int seq;
u64 flags;
do {
@@ -259,7 +259,8 @@ struct btrfs_block_group *btrfs_next_block_group(
spin_unlock(&fs_info->block_group_cache_lock);
btrfs_put_block_group(cache);
- cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
+ cache = btrfs_lookup_first_block_group(fs_info, next_bytenr);
+ return cache;
}
node = rb_next(&cache->cache_node);
btrfs_put_block_group(cache);
@@ -447,7 +448,8 @@ static void fragment_free_space(struct btrfs_block_group *block_group)
* used yet since their free space will be released as soon as the transaction
* commits.
*/
-u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
+u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start,
+ u64 end)
{
struct btrfs_fs_info *info = block_group->fs_info;
u64 extent_start, extent_end, size, total_added = 0;
@@ -670,7 +672,8 @@ static noinline void caching_thread(struct btrfs_work *work)
btrfs_put_block_group(block_group);
}
-int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
+int btrfs_cache_block_group(struct btrfs_block_group *cache,
+ int load_cache_only)
{
DEFINE_WAIT(wait);
struct btrfs_fs_info *fs_info = cache->fs_info;
@@ -1696,7 +1699,8 @@ static int read_one_block_group(struct btrfs_fs_info *info,
ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
- cache = btrfs_create_block_group_cache(info, key->objectid, key->offset);
+ cache = btrfs_create_block_group_cache(info, key->objectid,
+ key->offset);
if (!cache)
return -ENOMEM;
@@ -2023,8 +2027,8 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
*
* @cache: the destination block group
* @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
- * ensure we still have some free space after marking this
- * block group RO.
+ * ensure we still have some free space after marking this
+ * block group RO.
*/
int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
bool do_chunk_alloc)
@@ -2082,7 +2086,8 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
goto unlock_out;
if (!ret)
goto out;
- alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
+ alloc_flags = btrfs_get_alloc_profile(fs_info,
+ cache->space_info->flags);
ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
@@ -139,9 +139,9 @@ struct btrfs_block_group {
* Incremented while holding the spinlock *lock* by a task checking if
* it can perform a nocow write (incremented if the value for the *ro*
* field is 0). Decremented by such tasks once they create an ordered
- * extent or before that if some error happens before reaching that step.
- * This is to prevent races between block group relocation and nocow
- * writes through direct IO.
+ * extent or before that if some error happens before reaching that
+ * step. This is to prevent races between block group relocation and
+ * nocow writes through direct IO.
*/
atomic_t nocow_writers;
@@ -186,7 +186,7 @@ bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
- u64 num_bytes);
+ u64 num_bytes);
int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
int btrfs_cache_block_group(struct btrfs_block_group *cache,
int load_cache_only);