[3/4] btrfs: move btrfs_set_path_blocking to other locking functions
diff mbox series

Message ID 25f9e827d1677c9423c630bc948c12629729b2c1.1569345962.git.dsterba@suse.com
State New
Headers show
Series
  • Minor cleanups in locking helpers
Related show

Commit Message

David Sterba Sept. 24, 2019, 5:33 p.m. UTC
The function belongs to the family of locking functions, so move it
there. The 'noinline' keyword is dropped as it's now an exported
function that does not need it.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/ctree.c   | 25 -------------------------
 fs/btrfs/locking.c | 26 ++++++++++++++++++++++++++
 fs/btrfs/locking.h |  2 ++
 3 files changed, 28 insertions(+), 25 deletions(-)

Patch
diff mbox series

diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0231141de289..a55d55e5c913 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -56,31 +56,6 @@  struct btrfs_path *btrfs_alloc_path(void)
 	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
 }
 
-/*
- * set all locked nodes in the path to blocking locks.  This should
- * be done before scheduling
- */
-noinline void btrfs_set_path_blocking(struct btrfs_path *p)
-{
-	int i;
-	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
-		if (!p->nodes[i] || !p->locks[i])
-			continue;
-		/*
-		 * If we currently have a spinning reader or writer lock this
-		 * will bump the count of blocking holders and drop the
-		 * spinlock.
-		 */
-		if (p->locks[i] == BTRFS_READ_LOCK) {
-			btrfs_set_lock_blocking_read(p->nodes[i]);
-			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
-		} else if (p->locks[i] == BTRFS_WRITE_LOCK) {
-			btrfs_set_lock_blocking_write(p->nodes[i]);
-			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
-		}
-	}
-}
-
 /* this also releases the path */
 void btrfs_free_path(struct btrfs_path *p)
 {
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 028513153ac4..f58606887859 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -316,3 +316,29 @@  void btrfs_tree_unlock(struct extent_buffer *eb)
 		write_unlock(&eb->lock);
 	}
 }
+
+/*
+ * Set all locked nodes in the path to blocking locks.  This should be done
+ * before scheduling
+ */
+void btrfs_set_path_blocking(struct btrfs_path *p)
+{
+	int i;
+
+	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
+		if (!p->nodes[i] || !p->locks[i])
+			continue;
+		/*
+		 * If we currently have a spinning reader or writer lock this
+		 * will bump the count of blocking holders and drop the
+		 * spinlock.
+		 */
+		if (p->locks[i] == BTRFS_READ_LOCK) {
+			btrfs_set_lock_blocking_read(p->nodes[i]);
+			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
+		} else if (p->locks[i] == BTRFS_WRITE_LOCK) {
+			btrfs_set_lock_blocking_write(p->nodes[i]);
+			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
+		}
+	}
+}
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index ab4020de25e7..98c92222eaf0 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -33,6 +33,8 @@  static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
 static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
 #endif
 
+void btrfs_set_path_blocking(struct btrfs_path *p);
+
 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
 {
 	if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING)