diff mbox series

[1/3] btrfs: Implement btrfs_lock_and_flush_ordered_range

Message ID 20190422074653.13075-2-nborisov@suse.com (mailing list archive)
State New, archived
Headers show
Series Factor out common ordered extent flushing code | expand

Commit Message

Nikolay Borisov April 22, 2019, 7:46 a.m. UTC
There is a certain idiom used in multiple places in btrfs' codebase,
dealing with flushing an ordered range. Factor this in a separate
function that can be reused. Future patches will replace the existing
code with that function.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
---
 fs/btrfs/ordered-data.c | 32 ++++++++++++++++++++++++++++++++
 fs/btrfs/ordered-data.h |  3 +++
 2 files changed, 35 insertions(+)

Comments

David Sterba May 2, 2019, 2:13 p.m. UTC | #1
On Mon, Apr 22, 2019 at 10:46:51AM +0300, Nikolay Borisov wrote:
> There is a certain idiom used in multiple places in btrfs' codebase,
> dealing with flushing an ordered range. Factor this in a separate
> function that can be reused. Future patches will replace the existing
> code with that function.
> 
> Signed-off-by: Nikolay Borisov <nborisov@suse.com>
> ---
>  fs/btrfs/ordered-data.c | 32 ++++++++++++++++++++++++++++++++
>  fs/btrfs/ordered-data.h |  3 +++
>  2 files changed, 35 insertions(+)
> 
> diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
> index 4d9bb0dea9af..65f6409c1c9f 100644
> --- a/fs/btrfs/ordered-data.c
> +++ b/fs/btrfs/ordered-data.c
> @@ -954,6 +954,38 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
>  	return index;
>  }
>  
> +/*
> + * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
> + * ordered extents in it are run to completion.
> + *
> + * @tree:         IO tree used for locking out other users of the range
> + * @inode:        Inode whose ordered tree is to be searched
> + * @start:        Beginning of range to flush
> + * @end:          Last byte of range to lock
> + * @cached_state: If passed, will return the extent state responsible for the
> + * locked range. It's the caller's responsibility to free the cached state.
> + *
> + * This function always returns with the given range locked, ensuring after it's
> + * called no order extent can be pending.
> + */
> +void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
> +					struct inode *inode, u64 start, u64 end,
> +					struct extent_state **cached_state)
> +{

Please use btrfs_inode instead of inode for interfaces that are internal
to btrfs. This is not consistent but the plan is to switch everything to
btrfs_inode so new code should try to follow that.
diff mbox series

Patch

diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 4d9bb0dea9af..65f6409c1c9f 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -954,6 +954,38 @@  int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
 	return index;
 }
 
+/*
+ * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
+ * ordered extents in it are run to completion.
+ *
+ * @tree:         IO tree used for locking out other users of the range
+ * @inode:        Inode whose ordered tree is to be searched
+ * @start:        Beginning of range to flush
+ * @end:          Last byte of range to lock
+ * @cached_state: If passed, will return the extent state responsible for the
+ * locked range. It's the caller's responsibility to free the cached state.
+ *
+ * This function always returns with the given range locked, ensuring after it's
+ * called no order extent can be pending.
+ */
+void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
+					struct inode *inode, u64 start, u64 end,
+					struct extent_state **cached_state)
+{
+	struct btrfs_ordered_extent *ordered;
+
+	while (1) {
+		lock_extent_bits(tree, start, end, cached_state);
+		ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
+						     end - start + 1);
+		if (!ordered)
+			break;
+		unlock_extent_cached(tree, start, end, cached_state);
+		btrfs_start_ordered_extent(inode, ordered, 1);
+		btrfs_put_ordered_extent(ordered);
+	}
+}
+
 int __init ordered_data_init(void)
 {
 	btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 4c5991c3de14..3f6a7d12f435 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -188,6 +188,9 @@  u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
 			       const u64 range_start, const u64 range_len);
 u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
 			      const u64 range_start, const u64 range_len);
+void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
+					struct inode *inode, u64 start, u64 end,
+					struct extent_state **cached_state);
 int __init ordered_data_init(void);
 void __cold ordered_data_exit(void);