diff mbox series

[RFCv5,2/5] iomap: Refactor iop_set_range_uptodate() function

Message ID 203a9e25873f6c94c9de89823439aa1f6a7dc714.1683485700.git.ritesh.list@gmail.com (mailing list archive)
State New, archived
Headers show
Series iomap: Add support for per-block dirty state to improve write performance | expand

Commit Message

Ritesh Harjani (IBM) May 7, 2023, 7:27 p.m. UTC
This patch moves up and combine the definitions of two functions
(iomap_iop_set_range_uptodate() & iomap_set_range_uptodate()) into
iop_set_range_uptodate() & refactors it's arguments a bit.

No functionality change in this patch.

Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
---
 fs/iomap/buffered-io.c | 57 ++++++++++++++++++++----------------------
 1 file changed, 27 insertions(+), 30 deletions(-)

--
2.39.2

Comments

Brian Foster May 15, 2023, 3:09 p.m. UTC | #1
On Mon, May 08, 2023 at 12:57:57AM +0530, Ritesh Harjani (IBM) wrote:
> This patch moves up and combine the definitions of two functions
> (iomap_iop_set_range_uptodate() & iomap_set_range_uptodate()) into
> iop_set_range_uptodate() & refactors it's arguments a bit.
> 
> No functionality change in this patch.
> 
> Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
> ---

Hi Ritesh,

I just have a few random and nitty comments/questions on the series..

>  fs/iomap/buffered-io.c | 57 ++++++++++++++++++++----------------------
>  1 file changed, 27 insertions(+), 30 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index cbd945d96584..e732581dc2d4 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -43,6 +43,27 @@ static inline struct iomap_page *to_iomap_page(struct folio *folio)
> 
>  static struct bio_set iomap_ioend_bioset;
> 
> +static void iop_set_range_uptodate(struct inode *inode, struct folio *folio,
> +				   size_t off, size_t len)
> +{

Any particular reason this now takes the inode as a param now instead of
continuing to use the folio?

Brian

> +	struct iomap_page *iop = to_iomap_page(folio);
> +	unsigned int first_blk = off >> inode->i_blkbits;
> +	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
> +	unsigned int nr_blks = last_blk - first_blk + 1;
> +	unsigned long flags;
> +
> +	if (iop) {
> +		spin_lock_irqsave(&iop->uptodate_lock, flags);
> +		bitmap_set(iop->uptodate, first_blk, nr_blks);
> +		if (bitmap_full(iop->uptodate,
> +				i_blocks_per_folio(inode, folio)))
> +			folio_mark_uptodate(folio);
> +		spin_unlock_irqrestore(&iop->uptodate_lock, flags);
> +	} else {
> +		folio_mark_uptodate(folio);
> +	}
> +}
> +
>  static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
>  				    unsigned int flags)
>  {
> @@ -145,30 +166,6 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
>  	*lenp = plen;
>  }
> 
> -static void iomap_iop_set_range_uptodate(struct folio *folio,
> -		struct iomap_page *iop, size_t off, size_t len)
> -{
> -	struct inode *inode = folio->mapping->host;
> -	unsigned first = off >> inode->i_blkbits;
> -	unsigned last = (off + len - 1) >> inode->i_blkbits;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&iop->uptodate_lock, flags);
> -	bitmap_set(iop->uptodate, first, last - first + 1);
> -	if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
> -		folio_mark_uptodate(folio);
> -	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
> -}
> -
> -static void iomap_set_range_uptodate(struct folio *folio,
> -		struct iomap_page *iop, size_t off, size_t len)
> -{
> -	if (iop)
> -		iomap_iop_set_range_uptodate(folio, iop, off, len);
> -	else
> -		folio_mark_uptodate(folio);
> -}
> -
>  static void iomap_finish_folio_read(struct folio *folio, size_t offset,
>  		size_t len, int error)
>  {
> @@ -178,7 +175,8 @@ static void iomap_finish_folio_read(struct folio *folio, size_t offset,
>  		folio_clear_uptodate(folio);
>  		folio_set_error(folio);
>  	} else {
> -		iomap_set_range_uptodate(folio, iop, offset, len);
> +		iop_set_range_uptodate(folio->mapping->host, folio, offset,
> +				       len);
>  	}
> 
>  	if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
> @@ -240,7 +238,7 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
>  	memcpy(addr, iomap->inline_data, size);
>  	memset(addr + size, 0, PAGE_SIZE - poff - size);
>  	kunmap_local(addr);
> -	iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
> +	iop_set_range_uptodate(iter->inode, folio, offset, PAGE_SIZE - poff);
>  	return 0;
>  }
> 
> @@ -277,7 +275,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
> 
>  	if (iomap_block_needs_zeroing(iter, pos)) {
>  		folio_zero_range(folio, poff, plen);
> -		iomap_set_range_uptodate(folio, iop, poff, plen);
> +		iop_set_range_uptodate(iter->inode, folio, poff, plen);
>  		goto done;
>  	}
> 
> @@ -598,7 +596,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
>  			if (status)
>  				return status;
>  		}
> -		iomap_set_range_uptodate(folio, iop, poff, plen);
> +		iop_set_range_uptodate(iter->inode, folio, poff, plen);
>  	} while ((block_start += plen) < block_end);
> 
>  	return 0;
> @@ -705,7 +703,6 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
>  static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
>  		size_t copied, struct folio *folio)
>  {
> -	struct iomap_page *iop = to_iomap_page(folio);
>  	flush_dcache_folio(folio);
> 
>  	/*
> @@ -721,7 +718,7 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
>  	 */
>  	if (unlikely(copied < len && !folio_test_uptodate(folio)))
>  		return 0;
> -	iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
> +	iop_set_range_uptodate(inode, folio, offset_in_folio(folio, pos), len);
>  	filemap_dirty_folio(inode->i_mapping, folio);
>  	return copied;
>  }
> --
> 2.39.2
>
Ritesh Harjani (IBM) May 16, 2023, 10:12 a.m. UTC | #2
Brian Foster <bfoster@redhat.com> writes:

> On Mon, May 08, 2023 at 12:57:57AM +0530, Ritesh Harjani (IBM) wrote:
>> This patch moves up and combine the definitions of two functions
>> (iomap_iop_set_range_uptodate() & iomap_set_range_uptodate()) into
>> iop_set_range_uptodate() & refactors it's arguments a bit.
>>
>> No functionality change in this patch.
>>
>> Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
>> ---
>
> Hi Ritesh,
>
> I just have a few random and nitty comments/questions on the series..
>

Thanks a lot for reviewing.


>>  fs/iomap/buffered-io.c | 57 ++++++++++++++++++++----------------------
>>  1 file changed, 27 insertions(+), 30 deletions(-)
>>
>> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
>> index cbd945d96584..e732581dc2d4 100644
>> --- a/fs/iomap/buffered-io.c
>> +++ b/fs/iomap/buffered-io.c
>> @@ -43,6 +43,27 @@ static inline struct iomap_page *to_iomap_page(struct folio *folio)
>>
>>  static struct bio_set iomap_ioend_bioset;
>>
>> +static void iop_set_range_uptodate(struct inode *inode, struct folio *folio,
>> +				   size_t off, size_t len)
>> +{
>
> Any particular reason this now takes the inode as a param now instead of
> continuing to use the folio?
>

Mainly for blkbits (or blocksize) and blocks_per_folio.
Earlier we were explicitly passing this info, but with recent comments,
I felt the api make sense to have inode and folio (given that we want to
work on blocksize, blocks_per_folio and folio).

-ritesh

> Brian
>
>> +	struct iomap_page *iop = to_iomap_page(folio);
>> +	unsigned int first_blk = off >> inode->i_blkbits;
>> +	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
>> +	unsigned int nr_blks = last_blk - first_blk + 1;
>> +	unsigned long flags;
>> +
>> +	if (iop) {
>> +		spin_lock_irqsave(&iop->uptodate_lock, flags);
>> +		bitmap_set(iop->uptodate, first_blk, nr_blks);
>> +		if (bitmap_full(iop->uptodate,
>> +				i_blocks_per_folio(inode, folio)))
>> +			folio_mark_uptodate(folio);
>> +		spin_unlock_irqrestore(&iop->uptodate_lock, flags);
>> +	} else {
>> +		folio_mark_uptodate(folio);
>> +	}
>> +}
>> +
>>  static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
>>  				    unsigned int flags)
>>  {
>> @@ -145,30 +166,6 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
>>  	*lenp = plen;
>>  }
>>
>> -static void iomap_iop_set_range_uptodate(struct folio *folio,
>> -		struct iomap_page *iop, size_t off, size_t len)
>> -{
>> -	struct inode *inode = folio->mapping->host;
>> -	unsigned first = off >> inode->i_blkbits;
>> -	unsigned last = (off + len - 1) >> inode->i_blkbits;
>> -	unsigned long flags;
>> -
>> -	spin_lock_irqsave(&iop->uptodate_lock, flags);
>> -	bitmap_set(iop->uptodate, first, last - first + 1);
>> -	if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
>> -		folio_mark_uptodate(folio);
>> -	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
>> -}
>> -
>> -static void iomap_set_range_uptodate(struct folio *folio,
>> -		struct iomap_page *iop, size_t off, size_t len)
>> -{
>> -	if (iop)
>> -		iomap_iop_set_range_uptodate(folio, iop, off, len);
>> -	else
>> -		folio_mark_uptodate(folio);
>> -}
>> -
>>  static void iomap_finish_folio_read(struct folio *folio, size_t offset,
>>  		size_t len, int error)
>>  {
>> @@ -178,7 +175,8 @@ static void iomap_finish_folio_read(struct folio *folio, size_t offset,
>>  		folio_clear_uptodate(folio);
>>  		folio_set_error(folio);
>>  	} else {
>> -		iomap_set_range_uptodate(folio, iop, offset, len);
>> +		iop_set_range_uptodate(folio->mapping->host, folio, offset,
>> +				       len);
>>  	}
>>
>>  	if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
>> @@ -240,7 +238,7 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
>>  	memcpy(addr, iomap->inline_data, size);
>>  	memset(addr + size, 0, PAGE_SIZE - poff - size);
>>  	kunmap_local(addr);
>> -	iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
>> +	iop_set_range_uptodate(iter->inode, folio, offset, PAGE_SIZE - poff);
>>  	return 0;
>>  }
>>
>> @@ -277,7 +275,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
>>
>>  	if (iomap_block_needs_zeroing(iter, pos)) {
>>  		folio_zero_range(folio, poff, plen);
>> -		iomap_set_range_uptodate(folio, iop, poff, plen);
>> +		iop_set_range_uptodate(iter->inode, folio, poff, plen);
>>  		goto done;
>>  	}
>>
>> @@ -598,7 +596,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
>>  			if (status)
>>  				return status;
>>  		}
>> -		iomap_set_range_uptodate(folio, iop, poff, plen);
>> +		iop_set_range_uptodate(iter->inode, folio, poff, plen);
>>  	} while ((block_start += plen) < block_end);
>>
>>  	return 0;
>> @@ -705,7 +703,6 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
>>  static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
>>  		size_t copied, struct folio *folio)
>>  {
>> -	struct iomap_page *iop = to_iomap_page(folio);
>>  	flush_dcache_folio(folio);
>>
>>  	/*
>> @@ -721,7 +718,7 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
>>  	 */
>>  	if (unlikely(copied < len && !folio_test_uptodate(folio)))
>>  		return 0;
>> -	iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
>> +	iop_set_range_uptodate(inode, folio, offset_in_folio(folio, pos), len);
>>  	filemap_dirty_folio(inode->i_mapping, folio);
>>  	return copied;
>>  }
>> --
>> 2.39.2
>>
Christoph Hellwig May 18, 2023, 6:16 a.m. UTC | #3
> +	if (iop) {
> +		spin_lock_irqsave(&iop->uptodate_lock, flags);
> +		bitmap_set(iop->uptodate, first_blk, nr_blks);
> +		if (bitmap_full(iop->uptodate,
> +				i_blocks_per_folio(inode, folio)))
> +			folio_mark_uptodate(folio);
> +		spin_unlock_irqrestore(&iop->uptodate_lock, flags);
> +	} else {
> +		folio_mark_uptodate(folio);
> +	}

If we did a:

	if (!iop) {
		folio_mark_uptodate(folio);
		return;
	}

we can remove a leel of identation and keep thing a bit simpler.
But I can live with either style.
Ritesh Harjani (IBM) May 19, 2023, 3:03 p.m. UTC | #4
Christoph Hellwig <hch@infradead.org> writes:

>> +	if (iop) {
>> +		spin_lock_irqsave(&iop->uptodate_lock, flags);
>> +		bitmap_set(iop->uptodate, first_blk, nr_blks);
>> +		if (bitmap_full(iop->uptodate,
>> +				i_blocks_per_folio(inode, folio)))
>> +			folio_mark_uptodate(folio);
>> +		spin_unlock_irqrestore(&iop->uptodate_lock, flags);
>> +	} else {
>> +		folio_mark_uptodate(folio);
>> +	}
>
> If we did a:
>
> 	if (!iop) {
> 		folio_mark_uptodate(folio);
> 		return;
> 	}
>
> we can remove a leel of identation and keep thing a bit simpler.
> But I can live with either style.

Yup, it pleases my eyes too!! I will change it.

-ritesh
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index cbd945d96584..e732581dc2d4 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -43,6 +43,27 @@  static inline struct iomap_page *to_iomap_page(struct folio *folio)

 static struct bio_set iomap_ioend_bioset;

+static void iop_set_range_uptodate(struct inode *inode, struct folio *folio,
+				   size_t off, size_t len)
+{
+	struct iomap_page *iop = to_iomap_page(folio);
+	unsigned int first_blk = off >> inode->i_blkbits;
+	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
+	unsigned int nr_blks = last_blk - first_blk + 1;
+	unsigned long flags;
+
+	if (iop) {
+		spin_lock_irqsave(&iop->uptodate_lock, flags);
+		bitmap_set(iop->uptodate, first_blk, nr_blks);
+		if (bitmap_full(iop->uptodate,
+				i_blocks_per_folio(inode, folio)))
+			folio_mark_uptodate(folio);
+		spin_unlock_irqrestore(&iop->uptodate_lock, flags);
+	} else {
+		folio_mark_uptodate(folio);
+	}
+}
+
 static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
 				    unsigned int flags)
 {
@@ -145,30 +166,6 @@  static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
 	*lenp = plen;
 }

-static void iomap_iop_set_range_uptodate(struct folio *folio,
-		struct iomap_page *iop, size_t off, size_t len)
-{
-	struct inode *inode = folio->mapping->host;
-	unsigned first = off >> inode->i_blkbits;
-	unsigned last = (off + len - 1) >> inode->i_blkbits;
-	unsigned long flags;
-
-	spin_lock_irqsave(&iop->uptodate_lock, flags);
-	bitmap_set(iop->uptodate, first, last - first + 1);
-	if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
-		folio_mark_uptodate(folio);
-	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
-}
-
-static void iomap_set_range_uptodate(struct folio *folio,
-		struct iomap_page *iop, size_t off, size_t len)
-{
-	if (iop)
-		iomap_iop_set_range_uptodate(folio, iop, off, len);
-	else
-		folio_mark_uptodate(folio);
-}
-
 static void iomap_finish_folio_read(struct folio *folio, size_t offset,
 		size_t len, int error)
 {
@@ -178,7 +175,8 @@  static void iomap_finish_folio_read(struct folio *folio, size_t offset,
 		folio_clear_uptodate(folio);
 		folio_set_error(folio);
 	} else {
-		iomap_set_range_uptodate(folio, iop, offset, len);
+		iop_set_range_uptodate(folio->mapping->host, folio, offset,
+				       len);
 	}

 	if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
@@ -240,7 +238,7 @@  static int iomap_read_inline_data(const struct iomap_iter *iter,
 	memcpy(addr, iomap->inline_data, size);
 	memset(addr + size, 0, PAGE_SIZE - poff - size);
 	kunmap_local(addr);
-	iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
+	iop_set_range_uptodate(iter->inode, folio, offset, PAGE_SIZE - poff);
 	return 0;
 }

@@ -277,7 +275,7 @@  static loff_t iomap_readpage_iter(const struct iomap_iter *iter,

 	if (iomap_block_needs_zeroing(iter, pos)) {
 		folio_zero_range(folio, poff, plen);
-		iomap_set_range_uptodate(folio, iop, poff, plen);
+		iop_set_range_uptodate(iter->inode, folio, poff, plen);
 		goto done;
 	}

@@ -598,7 +596,7 @@  static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 			if (status)
 				return status;
 		}
-		iomap_set_range_uptodate(folio, iop, poff, plen);
+		iop_set_range_uptodate(iter->inode, folio, poff, plen);
 	} while ((block_start += plen) < block_end);

 	return 0;
@@ -705,7 +703,6 @@  static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 		size_t copied, struct folio *folio)
 {
-	struct iomap_page *iop = to_iomap_page(folio);
 	flush_dcache_folio(folio);

 	/*
@@ -721,7 +718,7 @@  static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 	 */
 	if (unlikely(copied < len && !folio_test_uptodate(folio)))
 		return 0;
-	iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
+	iop_set_range_uptodate(inode, folio, offset_in_folio(folio, pos), len);
 	filemap_dirty_folio(inode->i_mapping, folio);
 	return copied;
 }