diff mbox series

[f2fs-dev,v4,4/4] f2fs: Optimize f2fs_truncate_data_blocks_range()

Message ID 20241223081044.1126291-5-yi.sun@unisoc.com (mailing list archive)
State New
Headers show
Series Speed up f2fs truncate | expand

Commit Message

Yi Sun Dec. 23, 2024, 8:10 a.m. UTC
Function f2fs_invalidate_blocks() can process continuous
blocks at a time, so f2fs_truncate_data_blocks_range() is
optimized to use the new functionality of
f2fs_invalidate_blocks().

Signed-off-by: Yi Sun <yi.sun@unisoc.com>
---
 fs/f2fs/file.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 68 insertions(+), 4 deletions(-)

Comments

Chao Yu Jan. 14, 2025, 4:28 a.m. UTC | #1
On 12/23/24 16:10, Yi Sun wrote:
> Function f2fs_invalidate_blocks() can process continuous
> blocks at a time, so f2fs_truncate_data_blocks_range() is
> optimized to use the new functionality of
> f2fs_invalidate_blocks().
> 
> Signed-off-by: Yi Sun <yi.sun@unisoc.com>
> ---
>   fs/f2fs/file.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++---
>   1 file changed, 68 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> index 81764b10840b..9980d17ef9f5 100644
> --- a/fs/f2fs/file.c
> +++ b/fs/f2fs/file.c
> @@ -612,6 +612,15 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
>   	return finish_preallocate_blocks(inode);
>   }
>   
> +static bool check_curr_block_is_consecutive(struct f2fs_sb_info *sbi,
> +					block_t curr, block_t end)

static inline bool is_consecutive_blkaddrs(block_t cur, block_t end)
{
	return cur == end || cur == end + 1;
}

But maybe we don't need to add this function, see below comments.

> +{
> +	if (curr - end == 1 || curr == end)
> +		return true;
> +	else
> +		return false;
> +}
> +
>   void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
>   {
>   	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
> @@ -621,8 +630,27 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
>   	int cluster_index = 0, valid_blocks = 0;
>   	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
>   	bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
> +	/*
> +	 * Temporary record location.
> +	 * When the current  @blkaddr and @blkaddr_end can be processed
> +	 * together, update the value of @blkaddr_end.
> +	 * When it is detected that current @blkaddr is not continues with
> +	 * @blkaddr_end, it is necessary to process continues blocks

I prefer not adding these comments into function, what about describing the 
details in commit message, instead, thoughts?

> +	 * range [blkaddr_start, blkaddr_end].
> +	 */
> +	block_t blkaddr_start, blkaddr_end;
> +	/*.
> +	 * To avoid processing various invalid data blocks.
> +	 * Because @blkaddr_start and @blkaddr_end may be assigned
> +	 * NULL_ADDR or invalid data blocks, @last_valid is used to
> +	 * record this situation.
> +	 */

Ditto,

What about using blkstart & blklen to record last consecutive block addresses, 
and using blklen to identify whether we need to call f2fs_invalidate_blocks() or
not?

> +	bool last_valid = false;
> +	/* Process the last @blkaddr separately? */
> +	bool last_one = true;
>   
>   	addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
> +	blkaddr_start = blkaddr_end = le32_to_cpu(*addr);
>   
>   	/* Assumption: truncation starts with cluster */
>   	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
> @@ -638,24 +666,60 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
>   		}
>   
>   		if (blkaddr == NULL_ADDR)
> -			continue;
> +			goto next;
>   
>   		f2fs_set_data_blkaddr(dn, NULL_ADDR);
>   
>   		if (__is_valid_data_blkaddr(blkaddr)) {
>   			if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
> -				continue;
> +				goto next;
>   			if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
>   						DATA_GENERIC_ENHANCE))
> -				continue;
> +				goto next;
>   			if (compressed_cluster)
>   				valid_blocks++;
>   		}
>   
> -		f2fs_invalidate_blocks(sbi, blkaddr, 1);

How about this? Can you change based on it?

		if (blkstart + blklen == blkaddr) {
			blklen++;
		} else {
			f2fs_invalidate_blocks(sbi, blkstart, blklen);
			blkstart = blkaddr;
			blklen = 1;
		}

> +
> +		if (check_curr_block_is_consecutive(sbi, blkaddr, blkaddr_end)) {
> +			/*
> +			 * The current block @blkaddr is continuous with
> +			 * @blkaddr_end, so @blkaddr_end is updated.
> +			 * And the f2fs_invalidate_blocks() is skipped
> +			 * until @blkaddr that cannot be processed
> +			 * together is encountered.
> +			 */
> +			blkaddr_end = blkaddr;
> +			if (count == 1)
> +				last_one = false;
> +			else
> +				goto skip_invalid;
> +		}
> +
> +		f2fs_invalidate_blocks(sbi, blkaddr_start,
> +					blkaddr_end - blkaddr_start + 1);
> +		blkaddr_start = blkaddr_end = blkaddr;
> +
> +		if (count == 1 && last_one)
> +			f2fs_invalidate_blocks(sbi, blkaddr, 1);
> +
> +skip_invalid:
> +		last_valid = true;
>   
>   		if (!released || blkaddr != COMPRESS_ADDR)
>   			nr_free++;
> +
> +		continue;
> +
> +next:

next:

		if (blklen) {
			f2fs_invalidate_blocks(sbi, blkstart, blklen);
			blkstart = blkaddr;
			blklen = 1;
		}

> +		/* If consecutive blocks have been recorded, we need to process them. */
> +		if (last_valid == true)
> +			f2fs_invalidate_blocks(sbi, blkaddr_start,
> +					blkaddr_end - blkaddr_start + 1);
> +
> +		blkaddr_start = blkaddr_end = le32_to_cpu(*(addr + 1));
> +		last_valid = false;
> +
>   	}

	if (blklen)
		f2fs_invalidate_blocks(sbi, blkstart, blklen);

Thanks,

>   
>   	if (compressed_cluster)
Yi Sun Jan. 15, 2025, 5:09 a.m. UTC | #2
> -----邮件原件-----
> 发件人: Chao Yu <chao@kernel.org>
> 发送时间: 2025年1月14日 12:29
> 收件人: 孙毅 (Yi Sun) <Yi.Sun@unisoc.com>; jaegeuk@kernel.org
> 抄送: chao@kernel.org; sunyibuaa@gmail.com; linux-f2fs-
> devel@lists.sourceforge.net; linux-kernel@vger.kernel.org;
> niuzhiguo84@gmail.com; 王皓 (Hao_hao Wang) <Hao_hao.Wang@unisoc.com>;
> 王科 (Ke Wang) <Ke.Wang@unisoc.com>
> 主题: Re: [PATCH v4 4/4] f2fs: Optimize f2fs_truncate_data_blocks_range()
> 
> On 12/23/24 16:10, Yi Sun wrote:
> > Function f2fs_invalidate_blocks() can process continuous
> > blocks at a time, so f2fs_truncate_data_blocks_range() is
> > optimized to use the new functionality of
> > f2fs_invalidate_blocks().
> >
> > Signed-off-by: Yi Sun <yi.sun@unisoc.com>
> > ---
> >   fs/f2fs/file.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++---
> >   1 file changed, 68 insertions(+), 4 deletions(-)
> >
> > diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> > index 81764b10840b..9980d17ef9f5 100644
> > --- a/fs/f2fs/file.c
> > +++ b/fs/f2fs/file.c
> > @@ -612,6 +612,15 @@ static int f2fs_file_open(struct inode *inode, struct file
> *filp)
> >       return finish_preallocate_blocks(inode);
> >   }
> >
> > +static bool check_curr_block_is_consecutive(struct f2fs_sb_info *sbi,
> > +                                     block_t curr, block_t end)
> 
> static inline bool is_consecutive_blkaddrs(block_t cur, block_t end)
> {
>         return cur == end || cur == end + 1;
> }
> 
> But maybe we don't need to add this function, see below comments.
> 
> > +{
> > +     if (curr - end == 1 || curr == end)
> > +             return true;
> > +     else
> > +             return false;
> > +}
> > +
> >   void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
> >   {
> >       struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
> > @@ -621,8 +630,27 @@ void f2fs_truncate_data_blocks_range(struct
> dnode_of_data *dn, int count)
> >       int cluster_index = 0, valid_blocks = 0;
> >       int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
> >       bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
> > +     /*
> > +      * Temporary record location.
> > +      * When the current  @blkaddr and @blkaddr_end can be processed
> > +      * together, update the value of @blkaddr_end.
> > +      * When it is detected that current @blkaddr is not continues with
> > +      * @blkaddr_end, it is necessary to process continues blocks
> 
> I prefer not adding these comments into function, what about describing the
> details in commit message, instead, thoughts?
> 
> > +      * range [blkaddr_start, blkaddr_end].
> > +      */
> > +     block_t blkaddr_start, blkaddr_end;
> > +     /*.
> > +      * To avoid processing various invalid data blocks.
> > +      * Because @blkaddr_start and @blkaddr_end may be assigned
> > +      * NULL_ADDR or invalid data blocks, @last_valid is used to
> > +      * record this situation.
> > +      */
> 
> Ditto,
> 
> What about using blkstart & blklen to record last consecutive block addresses,
> and using blklen to identify whether we need to call f2fs_invalidate_blocks() or
> not?
> 
> > +     bool last_valid = false;
> > +     /* Process the last @blkaddr separately? */
> > +     bool last_one = true;
> >
> >       addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
> > +     blkaddr_start = blkaddr_end = le32_to_cpu(*addr);
> >
> >       /* Assumption: truncation starts with cluster */
> >       for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
> > @@ -638,24 +666,60 @@ void f2fs_truncate_data_blocks_range(struct
> dnode_of_data *dn, int count)
> >               }
> >
> >               if (blkaddr == NULL_ADDR)
> > -                     continue;
> > +                     goto next;
> >
> >               f2fs_set_data_blkaddr(dn, NULL_ADDR);
> >
> >               if (__is_valid_data_blkaddr(blkaddr)) {
> >                       if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
> > -                             continue;
> > +                             goto next;
> >                       if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
> >                                               DATA_GENERIC_ENHANCE))
> > -                             continue;
> > +                             goto next;
> >                       if (compressed_cluster)
> >                               valid_blocks++;
> >               }
> >
> > -             f2fs_invalidate_blocks(sbi, blkaddr, 1);
> 
> How about this? Can you change based on it?
> 
>                 if (blkstart + blklen == blkaddr) {
>                         blklen++;
>                 } else {
>                         f2fs_invalidate_blocks(sbi, blkstart, blklen);
>                         blkstart = blkaddr;
>                         blklen = 1;
>                 }
> 
> > +
> > +             if (check_curr_block_is_consecutive(sbi, blkaddr, blkaddr_end)) {
> > +                     /*
> > +                      * The current block @blkaddr is continuous with
> > +                      * @blkaddr_end, so @blkaddr_end is updated.
> > +                      * And the f2fs_invalidate_blocks() is skipped
> > +                      * until @blkaddr that cannot be processed
> > +                      * together is encountered.
> > +                      */
> > +                     blkaddr_end = blkaddr;
> > +                     if (count == 1)
> > +                             last_one = false;
> > +                     else
> > +                             goto skip_invalid;
> > +             }
> > +
> > +             f2fs_invalidate_blocks(sbi, blkaddr_start,
> > +                                     blkaddr_end - blkaddr_start + 1);
> > +             blkaddr_start = blkaddr_end = blkaddr;
> > +
> > +             if (count == 1 && last_one)
> > +                     f2fs_invalidate_blocks(sbi, blkaddr, 1);
> > +
> > +skip_invalid:
> > +             last_valid = true;
> >
> >               if (!released || blkaddr != COMPRESS_ADDR)
> >                       nr_free++;
> > +
> > +             continue;
> > +
> > +next:
> 
> next:
> 
>                 if (blklen) {
>                         f2fs_invalidate_blocks(sbi, blkstart, blklen);
>                         blkstart = blkaddr;
>                         blklen = 1;
>                 }
> 
> > +             /* If consecutive blocks have been recorded, we need to process them.
> */
> > +             if (last_valid == true)
> > +                     f2fs_invalidate_blocks(sbi, blkaddr_start,
> > +                                     blkaddr_end - blkaddr_start + 1);
> > +
> > +             blkaddr_start = blkaddr_end = le32_to_cpu(*(addr + 1));
> > +             last_valid = false;
> > +
> >       }
> 
>         if (blklen)
>                 f2fs_invalidate_blocks(sbi, blkstart, blklen);
> 
> Thanks,
> 

Hi Chao,
Your suggestion looks great, I will modify it accordingly.

> >
> >       if (compressed_cluster)
diff mbox series

Patch

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 81764b10840b..9980d17ef9f5 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -612,6 +612,15 @@  static int f2fs_file_open(struct inode *inode, struct file *filp)
 	return finish_preallocate_blocks(inode);
 }
 
+static bool check_curr_block_is_consecutive(struct f2fs_sb_info *sbi,
+					block_t curr, block_t end)
+{
+	if (curr - end == 1 || curr == end)
+		return true;
+	else
+		return false;
+}
+
 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
@@ -621,8 +630,27 @@  void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 	int cluster_index = 0, valid_blocks = 0;
 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
 	bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
+	/*
+	 * Temporary record location.
+	 * When the current  @blkaddr and @blkaddr_end can be processed
+	 * together, update the value of @blkaddr_end.
+	 * When it is detected that current @blkaddr is not continues with
+	 * @blkaddr_end, it is necessary to process continues blocks
+	 * range [blkaddr_start, blkaddr_end].
+	 */
+	block_t blkaddr_start, blkaddr_end;
+	/*.
+	 * To avoid processing various invalid data blocks.
+	 * Because @blkaddr_start and @blkaddr_end may be assigned
+	 * NULL_ADDR or invalid data blocks, @last_valid is used to
+	 * record this situation.
+	 */
+	bool last_valid = false;
+	/* Process the last @blkaddr separately? */
+	bool last_one = true;
 
 	addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
+	blkaddr_start = blkaddr_end = le32_to_cpu(*addr);
 
 	/* Assumption: truncation starts with cluster */
 	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
@@ -638,24 +666,60 @@  void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 		}
 
 		if (blkaddr == NULL_ADDR)
-			continue;
+			goto next;
 
 		f2fs_set_data_blkaddr(dn, NULL_ADDR);
 
 		if (__is_valid_data_blkaddr(blkaddr)) {
 			if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
-				continue;
+				goto next;
 			if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
 						DATA_GENERIC_ENHANCE))
-				continue;
+				goto next;
 			if (compressed_cluster)
 				valid_blocks++;
 		}
 
-		f2fs_invalidate_blocks(sbi, blkaddr, 1);
+
+		if (check_curr_block_is_consecutive(sbi, blkaddr, blkaddr_end)) {
+			/*
+			 * The current block @blkaddr is continuous with
+			 * @blkaddr_end, so @blkaddr_end is updated.
+			 * And the f2fs_invalidate_blocks() is skipped
+			 * until @blkaddr that cannot be processed
+			 * together is encountered.
+			 */
+			blkaddr_end = blkaddr;
+			if (count == 1)
+				last_one = false;
+			else
+				goto skip_invalid;
+		}
+
+		f2fs_invalidate_blocks(sbi, blkaddr_start,
+					blkaddr_end - blkaddr_start + 1);
+		blkaddr_start = blkaddr_end = blkaddr;
+
+		if (count == 1 && last_one)
+			f2fs_invalidate_blocks(sbi, blkaddr, 1);
+
+skip_invalid:
+		last_valid = true;
 
 		if (!released || blkaddr != COMPRESS_ADDR)
 			nr_free++;
+
+		continue;
+
+next:
+		/* If consecutive blocks have been recorded, we need to process them. */
+		if (last_valid == true)
+			f2fs_invalidate_blocks(sbi, blkaddr_start,
+					blkaddr_end - blkaddr_start + 1);
+
+		blkaddr_start = blkaddr_end = le32_to_cpu(*(addr + 1));
+		last_valid = false;
+
 	}
 
 	if (compressed_cluster)