diff mbox series

[RFC,1/2] iomap: Change uptodate variable name to state

Message ID 82faf435c4e5748e8c6554308f13cac5bc4a8546.1666928993.git.ritesh.list@gmail.com (mailing list archive)
State Superseded, archived
Headers show
Series iomap: Add support for subpage dirty state tracking to improve write performance | expand

Commit Message

Ritesh Harjani (IBM) Oct. 28, 2022, 4:30 a.m. UTC
This patch just changes the struct iomap_page uptodate & uptodate_lock
member names to state and state_lock to better reflect their purpose for
the upcoming patch.

Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
---
 fs/iomap/buffered-io.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

Comments

Darrick J. Wong Oct. 28, 2022, 4:31 p.m. UTC | #1
On Fri, Oct 28, 2022 at 10:00:32AM +0530, Ritesh Harjani (IBM) wrote:
> This patch just changes the struct iomap_page uptodate & uptodate_lock
> member names to state and state_lock to better reflect their purpose for
> the upcoming patch.
> 
> Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
> ---
>  fs/iomap/buffered-io.c | 30 +++++++++++++++---------------
>  1 file changed, 15 insertions(+), 15 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index ca5c62901541..255f9f92668c 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -25,13 +25,13 @@
>  
>  /*
>   * Structure allocated for each folio when block size < folio size
> - * to track sub-folio uptodate status and I/O completions.
> + * to track sub-folio uptodate state and I/O completions.
>   */
>  struct iomap_page {
>  	atomic_t		read_bytes_pending;
>  	atomic_t		write_bytes_pending;
> -	spinlock_t		uptodate_lock;
> -	unsigned long		uptodate[];
> +	spinlock_t		state_lock;
> +	unsigned long		state[];
>  };
>  
>  static inline struct iomap_page *to_iomap_page(struct folio *folio)
> @@ -58,12 +58,12 @@ iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
>  	else
>  		gfp = GFP_NOFS | __GFP_NOFAIL;
>  
> -	iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
> +	iop = kzalloc(struct_size(iop, state, BITS_TO_LONGS(nr_blocks)),
>  		      gfp);
>  	if (iop) {
> -		spin_lock_init(&iop->uptodate_lock);
> +		spin_lock_init(&iop->state_lock);
>  		if (folio_test_uptodate(folio))
> -			bitmap_fill(iop->uptodate, nr_blocks);
> +			bitmap_fill(iop->state, nr_blocks);
>  		folio_attach_private(folio, iop);
>  	}
>  	return iop;
> @@ -79,7 +79,7 @@ static void iomap_page_release(struct folio *folio)
>  		return;
>  	WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
>  	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
> -	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
> +	WARN_ON_ONCE(bitmap_full(iop->state, nr_blocks) !=
>  			folio_test_uptodate(folio));
>  	kfree(iop);
>  }
> @@ -110,7 +110,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
>  
>  		/* move forward for each leading block marked uptodate */
>  		for (i = first; i <= last; i++) {
> -			if (!test_bit(i, iop->uptodate))
> +			if (!test_bit(i, iop->state))

Hmm... time to add a new predicate helper clarifying that this is
uptodate state that we're checking here.

--D

>  				break;
>  			*pos += block_size;
>  			poff += block_size;
> @@ -120,7 +120,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
>  
>  		/* truncate len if we find any trailing uptodate block(s) */
>  		for ( ; i <= last; i++) {
> -			if (test_bit(i, iop->uptodate)) {
> +			if (test_bit(i, iop->state)) {
>  				plen -= (last - i + 1) * block_size;
>  				last = i - 1;
>  				break;
> @@ -152,11 +152,11 @@ static void iomap_iop_set_range_uptodate(struct folio *folio,
>  	unsigned last = (off + len - 1) >> inode->i_blkbits;
>  	unsigned long flags;
>  
> -	spin_lock_irqsave(&iop->uptodate_lock, flags);
> -	bitmap_set(iop->uptodate, first, last - first + 1);
> -	if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
> +	spin_lock_irqsave(&iop->state_lock, flags);
> +	bitmap_set(iop->state, first, last - first + 1);
> +	if (bitmap_full(iop->state, i_blocks_per_folio(inode, folio)))
>  		folio_mark_uptodate(folio);
> -	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
> +	spin_unlock_irqrestore(&iop->state_lock, flags);
>  }
>  
>  static void iomap_set_range_uptodate(struct folio *folio,
> @@ -451,7 +451,7 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
>  	last = (from + count - 1) >> inode->i_blkbits;
>  
>  	for (i = first; i <= last; i++)
> -		if (!test_bit(i, iop->uptodate))
> +		if (!test_bit(i, iop->state))
>  			return false;
>  	return true;
>  }
> @@ -1354,7 +1354,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
>  	 * invalid, grab a new one.
>  	 */
>  	for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
> -		if (iop && !test_bit(i, iop->uptodate))
> +		if (iop && !test_bit(i, iop->state))
>  			continue;
>  
>  		error = wpc->ops->map_blocks(wpc, inode, pos);
> -- 
> 2.37.3
>
Ritesh Harjani (IBM) Oct. 29, 2022, 3:09 a.m. UTC | #2
On 22/10/28 09:31AM, Darrick J. Wong wrote:
> On Fri, Oct 28, 2022 at 10:00:32AM +0530, Ritesh Harjani (IBM) wrote:
> > This patch just changes the struct iomap_page uptodate & uptodate_lock
> > member names to state and state_lock to better reflect their purpose for
> > the upcoming patch.
> > 
> > Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
> > ---
> >  fs/iomap/buffered-io.c | 30 +++++++++++++++---------------
> >  1 file changed, 15 insertions(+), 15 deletions(-)
> > 
> > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> > index ca5c62901541..255f9f92668c 100644
> > --- a/fs/iomap/buffered-io.c
> > +++ b/fs/iomap/buffered-io.c
> > @@ -25,13 +25,13 @@
> >  
> >  /*
> >   * Structure allocated for each folio when block size < folio size
> > - * to track sub-folio uptodate status and I/O completions.
> > + * to track sub-folio uptodate state and I/O completions.
> >   */
> >  struct iomap_page {
> >  	atomic_t		read_bytes_pending;
> >  	atomic_t		write_bytes_pending;
> > -	spinlock_t		uptodate_lock;
> > -	unsigned long		uptodate[];
> > +	spinlock_t		state_lock;
> > +	unsigned long		state[];
> >  };
> >  
> >  static inline struct iomap_page *to_iomap_page(struct folio *folio)
> > @@ -58,12 +58,12 @@ iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
> >  	else
> >  		gfp = GFP_NOFS | __GFP_NOFAIL;
> >  
> > -	iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
> > +	iop = kzalloc(struct_size(iop, state, BITS_TO_LONGS(nr_blocks)),
> >  		      gfp);
> >  	if (iop) {
> > -		spin_lock_init(&iop->uptodate_lock);
> > +		spin_lock_init(&iop->state_lock);
> >  		if (folio_test_uptodate(folio))
> > -			bitmap_fill(iop->uptodate, nr_blocks);
> > +			bitmap_fill(iop->state, nr_blocks);
> >  		folio_attach_private(folio, iop);
> >  	}
> >  	return iop;
> > @@ -79,7 +79,7 @@ static void iomap_page_release(struct folio *folio)
> >  		return;
> >  	WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
> >  	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
> > -	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
> > +	WARN_ON_ONCE(bitmap_full(iop->state, nr_blocks) !=
> >  			folio_test_uptodate(folio));
> >  	kfree(iop);
> >  }
> > @@ -110,7 +110,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
> >  
> >  		/* move forward for each leading block marked uptodate */
> >  		for (i = first; i <= last; i++) {
> > -			if (!test_bit(i, iop->uptodate))
> > +			if (!test_bit(i, iop->state))
> 
> Hmm... time to add a new predicate helper clarifying that this is
> uptodate state that we're checking here.

Yup. Willy suggested something like iop_block_**. But to keep it short we can 
keep it like iop_test_uptodate().

-ritesh
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index ca5c62901541..255f9f92668c 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -25,13 +25,13 @@ 
 
 /*
  * Structure allocated for each folio when block size < folio size
- * to track sub-folio uptodate status and I/O completions.
+ * to track sub-folio uptodate state and I/O completions.
  */
 struct iomap_page {
 	atomic_t		read_bytes_pending;
 	atomic_t		write_bytes_pending;
-	spinlock_t		uptodate_lock;
-	unsigned long		uptodate[];
+	spinlock_t		state_lock;
+	unsigned long		state[];
 };
 
 static inline struct iomap_page *to_iomap_page(struct folio *folio)
@@ -58,12 +58,12 @@  iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
 	else
 		gfp = GFP_NOFS | __GFP_NOFAIL;
 
-	iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
+	iop = kzalloc(struct_size(iop, state, BITS_TO_LONGS(nr_blocks)),
 		      gfp);
 	if (iop) {
-		spin_lock_init(&iop->uptodate_lock);
+		spin_lock_init(&iop->state_lock);
 		if (folio_test_uptodate(folio))
-			bitmap_fill(iop->uptodate, nr_blocks);
+			bitmap_fill(iop->state, nr_blocks);
 		folio_attach_private(folio, iop);
 	}
 	return iop;
@@ -79,7 +79,7 @@  static void iomap_page_release(struct folio *folio)
 		return;
 	WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
 	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
-	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
+	WARN_ON_ONCE(bitmap_full(iop->state, nr_blocks) !=
 			folio_test_uptodate(folio));
 	kfree(iop);
 }
@@ -110,7 +110,7 @@  static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
 
 		/* move forward for each leading block marked uptodate */
 		for (i = first; i <= last; i++) {
-			if (!test_bit(i, iop->uptodate))
+			if (!test_bit(i, iop->state))
 				break;
 			*pos += block_size;
 			poff += block_size;
@@ -120,7 +120,7 @@  static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
 
 		/* truncate len if we find any trailing uptodate block(s) */
 		for ( ; i <= last; i++) {
-			if (test_bit(i, iop->uptodate)) {
+			if (test_bit(i, iop->state)) {
 				plen -= (last - i + 1) * block_size;
 				last = i - 1;
 				break;
@@ -152,11 +152,11 @@  static void iomap_iop_set_range_uptodate(struct folio *folio,
 	unsigned last = (off + len - 1) >> inode->i_blkbits;
 	unsigned long flags;
 
-	spin_lock_irqsave(&iop->uptodate_lock, flags);
-	bitmap_set(iop->uptodate, first, last - first + 1);
-	if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
+	spin_lock_irqsave(&iop->state_lock, flags);
+	bitmap_set(iop->state, first, last - first + 1);
+	if (bitmap_full(iop->state, i_blocks_per_folio(inode, folio)))
 		folio_mark_uptodate(folio);
-	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
+	spin_unlock_irqrestore(&iop->state_lock, flags);
 }
 
 static void iomap_set_range_uptodate(struct folio *folio,
@@ -451,7 +451,7 @@  bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
 	last = (from + count - 1) >> inode->i_blkbits;
 
 	for (i = first; i <= last; i++)
-		if (!test_bit(i, iop->uptodate))
+		if (!test_bit(i, iop->state))
 			return false;
 	return true;
 }
@@ -1354,7 +1354,7 @@  iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	 * invalid, grab a new one.
 	 */
 	for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
-		if (iop && !test_bit(i, iop->uptodate))
+		if (iop && !test_bit(i, iop->state))
 			continue;
 
 		error = wpc->ops->map_blocks(wpc, inode, pos);