diff mbox series

[f2fs-dev] f2fs: introduce get_available_block_count() for cleanup

Message ID 20240124144342.14612-1-chao@kernel.org (mailing list archive)
State Superseded
Headers show
Series [f2fs-dev] f2fs: introduce get_available_block_count() for cleanup | expand

Commit Message

Chao Yu Jan. 24, 2024, 2:43 p.m. UTC
There are very similar codes in inc_valid_block_count() and
inc_valid_node_count() which is used for available user block
count calculation.

This patch introduces a new helper get_available_block_count()
to include those common codes, and used it to clean up codes.

Signed-off-by: Chao Yu <chao@kernel.org>
---
 fs/f2fs/f2fs.h | 61 +++++++++++++++++++++++++-------------------------
 1 file changed, 31 insertions(+), 30 deletions(-)

Comments

Chao Yu Feb. 19, 2024, 8:52 a.m. UTC | #1
Ping,

Since there is conflict while applying to last dev-test, let me
rebase the code in v2.

On 2024/1/24 22:43, Chao Yu wrote:
> There are very similar codes in inc_valid_block_count() and
> inc_valid_node_count() which is used for available user block
> count calculation.
> 
> This patch introduces a new helper get_available_block_count()
> to include those common codes, and used it to clean up codes.
> 
> Signed-off-by: Chao Yu <chao@kernel.org>
> ---
>   fs/f2fs/f2fs.h | 61 +++++++++++++++++++++++++-------------------------
>   1 file changed, 31 insertions(+), 30 deletions(-)
> 
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 5d19643a92af..0094a8c85f4a 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -2253,6 +2253,31 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
>   	return false;
>   }
>   
> +static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi,
> +						struct inode *inode, bool cap)
> +{
> +	block_t avail_user_block_count;
> +
> +	avail_user_block_count = sbi->user_block_count -
> +					sbi->current_reserved_blocks;
> +
> +	if (!__allow_reserved_blocks(sbi, inode, cap))
> +		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
> +
> +	if (F2FS_IO_ALIGNED(sbi))
> +		avail_user_block_count -= sbi->blocks_per_seg *
> +				SM_I(sbi)->additional_reserved_segments;
> +
> +	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
> +		if (avail_user_block_count > sbi->unusable_block_count)
> +			avail_user_block_count -= sbi->unusable_block_count;
> +		else
> +			avail_user_block_count = 0;
> +	}
> +
> +	return avail_user_block_count;
> +}
> +
>   static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
>   static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
>   				 struct inode *inode, blkcnt_t *count, bool partial)
> @@ -2278,22 +2303,8 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
>   
>   	spin_lock(&sbi->stat_lock);
>   	sbi->total_valid_block_count += (block_t)(*count);
> -	avail_user_block_count = sbi->user_block_count -
> -					sbi->current_reserved_blocks;
> -
> -	if (!__allow_reserved_blocks(sbi, inode, true))
> -		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
> +	avail_user_block_count = get_available_block_count(sbi, inode, true);
>   
> -	if (F2FS_IO_ALIGNED(sbi))
> -		avail_user_block_count -= sbi->blocks_per_seg *
> -				SM_I(sbi)->additional_reserved_segments;
> -
> -	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
> -		if (avail_user_block_count > sbi->unusable_block_count)
> -			avail_user_block_count -= sbi->unusable_block_count;
> -		else
> -			avail_user_block_count = 0;
> -	}
>   	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
>   		if (!partial) {
>   			spin_unlock(&sbi->stat_lock);
> @@ -2609,7 +2620,8 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
>   					struct inode *inode, bool is_inode)
>   {
>   	block_t	valid_block_count;
> -	unsigned int valid_node_count, user_block_count;
> +	unsigned int valid_node_count;
> +	unsigned int avail_user_block_count;
>   	int err;
>   
>   	if (is_inode) {
> @@ -2629,21 +2641,10 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
>   
>   	spin_lock(&sbi->stat_lock);
>   
> -	valid_block_count = sbi->total_valid_block_count +
> -					sbi->current_reserved_blocks + 1;
> -
> -	if (!__allow_reserved_blocks(sbi, inode, false))
> -		valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
> -
> -	if (F2FS_IO_ALIGNED(sbi))
> -		valid_block_count += sbi->blocks_per_seg *
> -				SM_I(sbi)->additional_reserved_segments;
> -
> -	user_block_count = sbi->user_block_count;
> -	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
> -		user_block_count -= sbi->unusable_block_count;
> +	valid_block_count = sbi->total_valid_block_count + 1;
> +	avail_user_block_count = get_available_block_count(sbi, inode, false);
>   
> -	if (unlikely(valid_block_count > user_block_count)) {
> +	if (unlikely(valid_block_count > avail_user_block_count)) {
>   		spin_unlock(&sbi->stat_lock);
>   		goto enospc;
>   	}
diff mbox series

Patch

diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 5d19643a92af..0094a8c85f4a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2253,6 +2253,31 @@  static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
 	return false;
 }
 
+static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi,
+						struct inode *inode, bool cap)
+{
+	block_t avail_user_block_count;
+
+	avail_user_block_count = sbi->user_block_count -
+					sbi->current_reserved_blocks;
+
+	if (!__allow_reserved_blocks(sbi, inode, cap))
+		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
+
+	if (F2FS_IO_ALIGNED(sbi))
+		avail_user_block_count -= sbi->blocks_per_seg *
+				SM_I(sbi)->additional_reserved_segments;
+
+	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+		if (avail_user_block_count > sbi->unusable_block_count)
+			avail_user_block_count -= sbi->unusable_block_count;
+		else
+			avail_user_block_count = 0;
+	}
+
+	return avail_user_block_count;
+}
+
 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
 				 struct inode *inode, blkcnt_t *count, bool partial)
@@ -2278,22 +2303,8 @@  static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
 
 	spin_lock(&sbi->stat_lock);
 	sbi->total_valid_block_count += (block_t)(*count);
-	avail_user_block_count = sbi->user_block_count -
-					sbi->current_reserved_blocks;
-
-	if (!__allow_reserved_blocks(sbi, inode, true))
-		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
+	avail_user_block_count = get_available_block_count(sbi, inode, true);
 
-	if (F2FS_IO_ALIGNED(sbi))
-		avail_user_block_count -= sbi->blocks_per_seg *
-				SM_I(sbi)->additional_reserved_segments;
-
-	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
-		if (avail_user_block_count > sbi->unusable_block_count)
-			avail_user_block_count -= sbi->unusable_block_count;
-		else
-			avail_user_block_count = 0;
-	}
 	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
 		if (!partial) {
 			spin_unlock(&sbi->stat_lock);
@@ -2609,7 +2620,8 @@  static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
 					struct inode *inode, bool is_inode)
 {
 	block_t	valid_block_count;
-	unsigned int valid_node_count, user_block_count;
+	unsigned int valid_node_count;
+	unsigned int avail_user_block_count;
 	int err;
 
 	if (is_inode) {
@@ -2629,21 +2641,10 @@  static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
 
 	spin_lock(&sbi->stat_lock);
 
-	valid_block_count = sbi->total_valid_block_count +
-					sbi->current_reserved_blocks + 1;
-
-	if (!__allow_reserved_blocks(sbi, inode, false))
-		valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
-
-	if (F2FS_IO_ALIGNED(sbi))
-		valid_block_count += sbi->blocks_per_seg *
-				SM_I(sbi)->additional_reserved_segments;
-
-	user_block_count = sbi->user_block_count;
-	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
-		user_block_count -= sbi->unusable_block_count;
+	valid_block_count = sbi->total_valid_block_count + 1;
+	avail_user_block_count = get_available_block_count(sbi, inode, false);
 
-	if (unlikely(valid_block_count > user_block_count)) {
+	if (unlikely(valid_block_count > avail_user_block_count)) {
 		spin_unlock(&sbi->stat_lock);
 		goto enospc;
 	}