@@ -914,6 +914,8 @@ static int copy_free_space_cache(struct btrfs_block_group *block_group,
return ret;
}
+static struct lock_class_key btrfs_free_space_inode_key;
+
int load_free_space_cache(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -924,6 +926,7 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
int ret = 0;
bool matched;
u64 used = block_group->used;
+ struct address_space *mapping;
/*
* Because we could potentially discard our loaded free space, we want
@@ -983,6 +986,14 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
}
spin_unlock(&block_group->lock);
+ /*
+ * Reinitialize the class of the inode->mapping->invalidate_lock for free
+ * space inodes to prevent false positives related to locks for normal
+ * inodes.
+ */
+ mapping = &inode->i_data;
+ lockdep_set_class(&mapping->invalidate_lock, &btrfs_free_space_inode_key);
+
ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl,
path, block_group->start);
btrfs_free_path(path);
Reinitialize the class of the lockdep map for inode->mapping->invalidate_lock in load_free_space_cache() function in fs/btrfs/free-space-cache.c This will prevent lockdep from producing false positives related to execution paths that make use of free space inodes and paths that make use of normal inodes. Specifically, with this change lockdep will create separate lock dependencies that include the invalidate_lock, in the case that free space inodes are used and in the case that normal inodes are used. Signed-off-by: Ioannis Angelakopoulos <iangelak@fb.com> --- fs/btrfs/free-space-cache.c | 11 +++++++++++ 1 file changed, 11 insertions(+)