diff mbox series

[v3,3/6] btrfs: Make compress_file_range take only struct async_chunk

Message ID 20190221115717.5128-4-nborisov@suse.com (mailing list archive)
State New, archived
Headers show
Series Compress path cleanups | expand

Commit Message

Nikolay Borisov Feb. 21, 2019, 11:57 a.m. UTC
All context this function needs is held within struct async_chunk.
Currently we not only pass the struct but also every individual member.
This is redundant, simplify it by only passing struct async_chunk and
leaving it to compress_file_range to extract the values it requires.
No functional changes.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
---
 fs/btrfs/inode.c | 20 +++++++++-----------
 1 file changed, 9 insertions(+), 11 deletions(-)

Comments

Johannes Thumshirn Feb. 21, 2019, 1:07 p.m. UTC | #1
Looks good,
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
diff mbox series

Patch

diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d61dd538d2b4..d566e15f8c58 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -449,14 +449,14 @@  static inline void inode_should_defrag(struct btrfs_inode *inode,
  * are written in the same order that the flusher thread sent them
  * down.
  */
-static noinline void compress_file_range(struct inode *inode,
-					struct page *locked_page,
-					u64 start, u64 end,
-					struct async_chunk *async_cow,
-					int *num_added)
+static noinline void compress_file_range(struct async_chunk *async_cow,
+					 int *num_added)
 {
+	struct inode *inode = async_cow->inode;
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	u64 blocksize = fs_info->sectorsize;
+	u64 start = async_cow->start;
+	u64 end = async_cow->end;
 	u64 actual_end;
 	int ret = 0;
 	struct page **pages = NULL;
@@ -675,9 +675,9 @@  static noinline void compress_file_range(struct inode *inode,
 	 * to our extent and set things up for the async work queue to run
 	 * cow_file_range to do the normal delalloc dance.
 	 */
-	if (page_offset(locked_page) >= start &&
-	    page_offset(locked_page) <= end)
-		__set_page_dirty_nobuffers(locked_page);
+	if (page_offset(async_cow->locked_page) >= start &&
+	    page_offset(async_cow->locked_page) <= end)
+		__set_page_dirty_nobuffers(async_cow->locked_page);
 		/* unlocked later on in the async handlers */
 
 	if (redirty)
@@ -1141,9 +1141,7 @@  static noinline void async_cow_start(struct btrfs_work *work)
 	int num_added = 0;
 	async_cow = container_of(work, struct async_chunk, work);
 
-	compress_file_range(async_cow->inode, async_cow->locked_page,
-			    async_cow->start, async_cow->end, async_cow,
-			    &num_added);
+	compress_file_range(async_cow, &num_added);
 	if (num_added == 0) {
 		btrfs_add_delayed_iput(async_cow->inode);
 		async_cow->inode = NULL;