diff mbox series

[RFC,1/2] btrfs: Add lockdep wrappers around the extent bits locking and unlocking functions

Message ID 20220812004241.1722846-2-iangelak@fb.com (mailing list archive)
State New, archived
Headers show
Series btrfs: Add a lockdep annotation for the extent bits wait event | expand

Commit Message

Ioannis Angelakopoulos Aug. 12, 2022, 12:42 a.m. UTC
Add wrappers and prototypes that apply lockdep annotations on the extent
bits wait event in fs/btrfs/extent-io-tree.h, mirroring the functions
that lock and unlock the extent bits.

Unfortunately, as it stands a generic annotation of the extent bits wait
event is not possible with lockdep since there are cases where the extent
bits are locked in one execution context (lockdep map acquire) and get
unlocked in another context (lockdep map release). However, lockdep expects
that the acquisition and release of the lockdep map occur in the same
execution context.

An example of such a case is btrfs_read_folio() in fs/btrfs/extent_io.c
which locks the extent bits by calling
btrfs_lock_and_flush_ordered_range(), however the extent bits are unlocked
within a submitted bio executed by a worker thread asynchronously.

The lockdep wrappers are used to manually annotate places where extent bits
are locked.

Also introduce a new owner bit for the extent io tree related to the free
space inodes. This way it is simple to distinguish if we are in a context
where free space inodes are used (do not annotate) or normal inodes are
used (do annotate).

Signed-off-by: Ioannis Angelakopoulos <iangelak@fb.com>
---
 fs/btrfs/extent-io-tree.h    | 32 ++++++++++++++++++++++++++++++++
 fs/btrfs/free-space-cache.c  |  1 +
 include/trace/events/btrfs.h |  1 +
 3 files changed, 34 insertions(+)
diff mbox series

Patch

diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
index c3eb52dbe61c..a6dd80f0408c 100644
--- a/fs/btrfs/extent-io-tree.h
+++ b/fs/btrfs/extent-io-tree.h
@@ -56,6 +56,7 @@  enum {
 	IO_TREE_FS_EXCLUDED_EXTENTS,
 	IO_TREE_BTREE_INODE_IO,
 	IO_TREE_INODE_IO,
+	IO_TREE_FREE_SPACE_INODE_IO,
 	IO_TREE_INODE_IO_FAILURE,
 	IO_TREE_RELOC_BLOCKS,
 	IO_TREE_TRANS_DIRTY_PAGES,
@@ -107,11 +108,20 @@  void extent_io_tree_release(struct extent_io_tree *tree);
 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 		     struct extent_state **cached);
 
+int lock_extent_bits_lockdep(struct extent_io_tree *tree, u64 start, u64 end,
+			     struct extent_state **cached, bool nested);
+
 static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
 {
 	return lock_extent_bits(tree, start, end, NULL);
 }
 
+static inline int lock_extent_lockdep(struct extent_io_tree *tree, u64 start,
+				      u64 end, bool nested)
+{
+	return lock_extent_bits_lockdep(tree, start, end, NULL, nested);
+}
+
 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 
 int __init extent_io_init(void);
@@ -134,11 +144,26 @@  int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 		     struct extent_state **cached, gfp_t mask,
 		     struct extent_changeset *changeset);
 
+int clear_extent_bit_lockdep(struct extent_io_tree *tree, u64 start, u64 end,
+			     u32 bits, int wake, int delete,
+			     struct extent_state **cached);
+int __clear_extent_bit_lockdep(struct extent_io_tree *tree, u64 start, u64 end,
+			     u32 bits, int wake, int delete,
+			     struct extent_state **cached, gfp_t mask,
+			     struct extent_changeset *changeset);
+
 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
 {
 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
 }
 
+static inline int unlock_extent_lockdep(struct extent_io_tree *tree, u64 start,
+					u64 end)
+{
+	return clear_extent_bit_lockdep(tree, start, end, EXTENT_LOCKED, 1, 0,
+					NULL);
+}
+
 static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
 		u64 end, struct extent_state **cached)
 {
@@ -146,6 +171,13 @@  static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
 				GFP_NOFS, NULL);
 }
 
+static inline int unlock_extent_cached_lockdep(struct extent_io_tree *tree,
+		u64 start, u64 end, struct extent_state **cached)
+{
+	return __clear_extent_bit_lockdep(tree, start, end, EXTENT_LOCKED, 1, 0,
+				cached, GFP_NOFS, NULL);
+}
+
 static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
 		u64 start, u64 end, struct extent_state **cached)
 {
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 81d9fe33672f..a93a8b91eda8 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -130,6 +130,7 @@  struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
 		block_group->inode = igrab(inode);
 	spin_unlock(&block_group->lock);
 
+	BTRFS_I(inode)->io_tree.owner = IO_TREE_FREE_SPACE_INODE_IO;
 	return inode;
 }
 
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 73df80d462dc..f8c900914474 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -84,6 +84,7 @@  struct raid56_bio_trace_info;
 	EM( IO_TREE_FS_EXCLUDED_EXTENTS,  "EXCLUDED_EXTENTS")	    \
 	EM( IO_TREE_BTREE_INODE_IO,	  "BTREE_INODE_IO")	    \
 	EM( IO_TREE_INODE_IO,		  "INODE_IO")		    \
+	EM( IO_TREE_FREE_SPACE_INODE_IO,  "FREE_SPACE_INODE_IO")    \
 	EM( IO_TREE_INODE_IO_FAILURE,	  "INODE_IO_FAILURE")	    \
 	EM( IO_TREE_RELOC_BLOCKS,	  "RELOC_BLOCKS")	    \
 	EM( IO_TREE_TRANS_DIRTY_PAGES,	  "TRANS_DIRTY_PAGES")      \