@@ -7595,9 +7595,9 @@ static int find_possible_backrefs(struct btrfs_path *path,
cache = lookup_cache_extent(extent_cache, bytenr, 1);
if (cache) {
- struct extent_record *tmp;
+ struct extent_record *extent;
- tmp = container_of(cache, struct extent_record, cache);
+ extent = container_of(cache, struct extent_record, cache);
/*
* If we found an extent record for the bytenr for this
@@ -7607,7 +7607,7 @@ static int find_possible_backrefs(struct btrfs_path *path,
* extent tree since they likely belong to this record
* and we need to fix it if it doesn't match bytenrs.
*/
- if (tmp->found_rec)
+ if (extent->found_rec)
continue;
}
@@ -5027,7 +5027,7 @@ static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
next = btrfs_find_tree_block(gfs_info, bytenr, gfs_info->nodesize);
if (!next || !btrfs_buffer_uptodate(next, ptr_gen, 0)) {
- struct btrfs_tree_parent_check check = {
+ struct btrfs_tree_parent_check tree_check = {
.owner_root = btrfs_header_owner(cur),
.transid = ptr_gen,
.level = *level - 1,
@@ -5035,7 +5035,7 @@ static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
free_extent_buffer(next);
reada_walk_down(root, cur, path->slots[*level]);
- next = read_tree_block(gfs_info, bytenr, &check);
+ next = read_tree_block(gfs_info, bytenr, &tree_check);
if (!extent_buffer_uptodate(next)) {
struct btrfs_key node_key;
@@ -406,7 +406,7 @@ static int account_one_extent(struct ulist *roots, u64 bytenr, u64 num_bytes)
int ret;
u64 id, nr_roots, nr_refs;
struct qgroup_count *count;
- struct ulist *counts = ulist_alloc(0);
+ struct ulist *local_counts = ulist_alloc(0);
struct ulist *tmp = ulist_alloc(0);
struct ulist_iterator uiter;
struct ulist_iterator tmp_uiter;
@@ -414,8 +414,8 @@ static int account_one_extent(struct ulist *roots, u64 bytenr, u64 num_bytes)
struct ulist_node *tmp_unode;
struct btrfs_qgroup_list *glist;
- if (!counts || !tmp) {
- ulist_free(counts);
+ if (!local_counts || !tmp) {
+ ulist_free(local_counts);
ulist_free(tmp);
return ENOMEM;
}
@@ -433,7 +433,7 @@ static int account_one_extent(struct ulist *roots, u64 bytenr, u64 num_bytes)
continue;
BUG_ON(!is_fstree(unode->val));
- ret = ulist_add(counts, count->qgroupid, ptr_to_u64(count), 0);
+ ret = ulist_add(local_counts, count->qgroupid, ptr_to_u64(count), 0);
if (ret < 0)
goto out;
@@ -460,7 +460,7 @@ static int account_one_extent(struct ulist *roots, u64 bytenr, u64 num_bytes)
BUG_ON(!count);
- ret = ulist_add(counts, id, ptr_to_u64(parent),
+ ret = ulist_add(local_counts, id, ptr_to_u64(parent),
0);
if (ret < 0)
goto out;
@@ -478,7 +478,7 @@ static int account_one_extent(struct ulist *roots, u64 bytenr, u64 num_bytes)
*/
nr_roots = roots->nnodes;
ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(counts, &uiter))) {
+ while ((unode = ulist_next(local_counts, &uiter))) {
count = u64_to_ptr(unode->aux);
nr_refs = group_get_cur_refcnt(count);
@@ -504,7 +504,7 @@ static int account_one_extent(struct ulist *roots, u64 bytenr, u64 num_bytes)
inc_qgroup_seq(roots->nnodes);
ret = 0;
out:
- ulist_free(counts);
+ ulist_free(local_counts);
ulist_free(tmp);
return ret;
}
@@ -920,7 +920,7 @@ static int add_qgroup_relation(u64 memberid, u64 parentid)
}
static void read_qgroup_status(struct extent_buffer *eb, int slot,
- struct counts_tree *counts)
+ struct counts_tree *ct)
{
struct btrfs_qgroup_status_item *status_item;
u64 flags;
@@ -931,10 +931,9 @@ static void read_qgroup_status(struct extent_buffer *eb, int slot,
* Since qgroup_inconsist/rescan_running is just one bit,
* assign value directly won't work.
*/
- counts->qgroup_inconsist = !!(flags &
- BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT);
- counts->rescan_running = !!(flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN);
- counts->scan_progress = btrfs_qgroup_status_rescan(eb, status_item);
+ ct->qgroup_inconsist = !!(flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT);
+ ct->rescan_running = !!(flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN);
+ ct->scan_progress = btrfs_qgroup_status_rescan(eb, status_item);
}
static int load_quota_info(struct btrfs_fs_info *info)
@@ -169,10 +169,10 @@ static int traverse_tree_blocks(struct extent_io_tree *tree,
if (ret)
return ret;
} else {
- u64 end;
+ u64 child_end;
bytenr = btrfs_node_blockptr(eb, i);
- end = bytenr + fs_info->nodesize - 1;
+ child_end = bytenr + fs_info->nodesize - 1;
/* If we aren't the tree root don't read the block */
if (level == 1 && !tree_root) {
@@ -180,7 +180,8 @@ static int traverse_tree_blocks(struct extent_io_tree *tree,
btrfs_pin_extent(fs_info, bytenr,
fs_info->nodesize);
else
- set_extent_dirty(tree, bytenr, end, GFP_NOFS);
+ set_extent_dirty(tree, bytenr,
+ child_end, GFP_NOFS);
continue;
}
@@ -537,11 +537,11 @@ static int print_filesystem_usage_overall(int fd, struct chunk_info *chunkinfo,
* As mixed mode is not supported in zoned mode, this
* will account for all profile types
*/
- u64 tmp;
+ u64 unusable;
- tmp = device_get_zone_unusable(fd, flags);
- if (tmp != DEVICE_ZONE_UNUSABLE_UNKNOWN)
- zone_unusable += tmp;
+ unusable = device_get_zone_unusable(fd, flags);
+ if (unusable != DEVICE_ZONE_UNUSABLE_UNKNOWN)
+ zone_unusable += unusable;
}
if (flags & BTRFS_BLOCK_GROUP_DATA) {
r_data_used += sargs->spaces[i].used_bytes * ratio;
@@ -1411,7 +1411,7 @@ static int btrfs_list_subvols(int fd, struct rb_root *root_lookup)
n = rb_first(root_lookup);
while (n) {
struct root_info *entry;
- int ret;
+
entry = to_root_info(n);
ret = lookup_ino_path(fd, entry);
if (ret && ret != -ENOENT)
@@ -227,15 +227,15 @@ static int fixup_chunk_tree_block(struct mdrestore_struct *mdres,
for (i = 0; i < btrfs_header_nritems(eb); i++) {
struct btrfs_chunk *chunk;
struct btrfs_key key;
- u64 type, physical, physical_dup, size = (u64)-1;
+ u64 type, physical, physical_dup;
+ u64 found_size = (u64)-1;
btrfs_item_key_to_cpu(eb, &key, i);
if (key.type != BTRFS_CHUNK_ITEM_KEY)
continue;
- size = 0;
physical = logical_to_physical(mdres, key.offset,
- &size, &physical_dup);
+ &found_size, &physical_dup);
if (!physical_dup)
truncate_item(eb, i, sizeof(*chunk));
@@ -254,9 +254,7 @@ static int fixup_chunk_tree_block(struct mdrestore_struct *mdres,
btrfs_set_chunk_num_stripes(eb, chunk, 1);
btrfs_set_chunk_sub_stripes(eb, chunk, 0);
btrfs_set_stripe_devid_nr(eb, chunk, 0, mdres->devid);
- if (size != (u64)-1)
- btrfs_set_stripe_offset_nr(eb, chunk, 0,
- physical);
+ btrfs_set_stripe_offset_nr(eb, chunk, 0, physical);
/* update stripe 2 offset */
if (physical_dup)
btrfs_set_stripe_offset_nr(eb, chunk, 1,
@@ -3768,7 +3768,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
BUG_ON(!extent_op || !extent_op->update_flags);
ret = alloc_reserved_tree_block(trans, node, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- struct btrfs_delayed_tree_ref *ref = btrfs_delayed_node_to_tree_ref(node);
ret = __free_extent(trans, node->bytenr, node->num_bytes,
ref->parent, ref->root, ref->level, 0, 1);
} else {
@@ -248,7 +248,7 @@ static int generate_new_data_csums_range(struct btrfs_fs_info *fs_info, u64 star
}
while (cur < last_csum) {
- u64 start;
+ u64 csum_start;
u64 len;
u32 item_size;
@@ -276,14 +276,14 @@ static int generate_new_data_csums_range(struct btrfs_fs_info *fs_info, u64 star
assert(key.offset >= cur);
item_size = btrfs_item_size(path.nodes[0], path.slots[0]);
- start = key.offset;
+ csum_start = key.offset;
len = item_size / fs_info->csum_size * fs_info->sectorsize;
read_extent_buffer(path.nodes[0], csum_buffer,
btrfs_item_ptr_offset(path.nodes[0], path.slots[0]),
item_size);
btrfs_release_path(&path);
- ret = generate_new_csum_range(trans, start, len, new_csum_type,
+ ret = generate_new_csum_range(trans, csum_start, len, new_csum_type,
csum_buffer);
if (ret < 0)
goto out;
@@ -303,7 +303,7 @@ static int generate_new_data_csums_range(struct btrfs_fs_info *fs_info, u64 star
goto out;
}
}
- cur = start + len;
+ cur = csum_start + len;
}
ret = btrfs_commit_transaction(trans, csum_root);
if (inject_error(0x4de02239))
@@ -628,7 +628,7 @@ out:
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
- u64 super_flags = btrfs_super_flags(fs_info->super_copy);
+ super_flags = btrfs_super_flags(fs_info->super_copy);
btrfs_set_super_csum_type(fs_info->super_copy, new_csum_type);
super_flags &= ~(BTRFS_SUPER_FLAG_CHANGING_DATA_CSUM |
There are quite some variable shadowing in btrfs-progs, most of them are just reusing some common names like tmp. And those are quite safe and the shadowed one are even different type. But there are some exceptions: - @end in traverse_tree_blocks() There is already an @end with the same type, but a different meaning (the end of the current extent buffer passed in). Just rename it to @child_end. - @start in generate_new_data_csums_range() Just rename it to @csum_start. - @size of fixup_chunk_tree_block() This one is particularly bad, we declare a local @size and initialize it to -1, then before we really utilize the variable @size, we immediately reset it to 0, then pass it to logical_to_physical(). Then there is a location to check if @size is -1, which will always be true. According to the code in logical_to_physical(), @size would be clamped down by its original value, thus our local @size will always be 0. This patch would rename the local @size to @found_size, and only set it to -1. The call site is only to pass something as logical_to_physical() requires a non-NULL pointer. We don't really need to bother the returned value. - duplicated @ref declaration in run_delayed_tree_ref() - duplicated @super_flags in change_meta_csums() Just delete the duplicated one. Signed-off-by: Qu Wenruo <wqu@suse.com> --- check/main.c | 6 +++--- check/mode-lowmem.c | 4 ++-- check/qgroup-verify.c | 23 +++++++++++------------ check/repair.c | 7 ++++--- cmds/filesystem-usage.c | 8 ++++---- cmds/subvolume-list.c | 2 +- image/image-restore.c | 10 ++++------ kernel-shared/extent-tree.c | 1 - tune/change-csum.c | 10 +++++----- 9 files changed, 34 insertions(+), 37 deletions(-)