diff mbox series

[2/2] maple_tree: refactor mas_wr_store_type()

Message ID 20241011214451.7286-2-sidhartha.kumar@oracle.com (mailing list archive)
State New
Headers show
Series [1/2] maple_tree: check for MA_STATE_BULK on setting wr_rebalance | expand

Commit Message

Sid Kumar Oct. 11, 2024, 9:44 p.m. UTC
In mas_wr_store_type(), we check if new_end < mt_slots[wr_mas->type]. If
this check fails, we know that ,after this, new_end is >= mt_min_slots.
Checking this again when we detect a wr_node_store later in the function
is reduntant. Because this check is part of an OR statement, the statement
will always evaluate to true, therefore we can just get rid of it.

We also refactor mas_wr_store_type() to return the store type rather
than set it directly as it greatly cleans up the function.

Suggested-by: Liam Howlett <liam.howlett@oracle.com>
Suggested-by: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: Sidhartha <sidhartha.kumar@oracle.com>
---

Hi Andrew, could you please drop
"maple_tree: remove conditionals to detect wr_node_store" in mm-unstable
before applying this patch

 lib/maple_tree.c | 72 +++++++++++++++++-------------------------------
 1 file changed, 25 insertions(+), 47 deletions(-)

Comments

Wei Yang Oct. 12, 2024, 3:31 a.m. UTC | #1
On Fri, Oct 11, 2024 at 05:44:51PM -0400, Sidhartha Kumar wrote:
>In mas_wr_store_type(), we check if new_end < mt_slots[wr_mas->type]. If
>this check fails, we know that ,after this, new_end is >= mt_min_slots.
>Checking this again when we detect a wr_node_store later in the function
>is reduntant. Because this check is part of an OR statement, the statement
>will always evaluate to true, therefore we can just get rid of it.
>
>We also refactor mas_wr_store_type() to return the store type rather
>than set it directly as it greatly cleans up the function.
>
>Suggested-by: Liam Howlett <liam.howlett@oracle.com>
>Suggested-by: Wei Yang <richard.weiyang@gmail.com>
>Signed-off-by: Sidhartha <sidhartha.kumar@oracle.com>

Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Liam R. Howlett Oct. 15, 2024, 2:04 a.m. UTC | #2
* Sidhartha Kumar <sidhartha.kumar@oracle.com> [241011 17:44]:
> In mas_wr_store_type(), we check if new_end < mt_slots[wr_mas->type]. If
> this check fails, we know that ,after this, new_end is >= mt_min_slots.
> Checking this again when we detect a wr_node_store later in the function
> is reduntant. Because this check is part of an OR statement, the statement
> will always evaluate to true, therefore we can just get rid of it.
> 
> We also refactor mas_wr_store_type() to return the store type rather
> than set it directly as it greatly cleans up the function.
> 
> Suggested-by: Liam Howlett <liam.howlett@oracle.com>
> Suggested-by: Wei Yang <richard.weiyang@gmail.com>
> Signed-off-by: Sidhartha <sidhartha.kumar@oracle.com>

Reviewed-by: Liam Howlett <liam.howlett@oracle.com>

> ---
> 
> Hi Andrew, could you please drop
> "maple_tree: remove conditionals to detect wr_node_store" in mm-unstable
> before applying this patch
> 
>  lib/maple_tree.c | 72 +++++++++++++++++-------------------------------
>  1 file changed, 25 insertions(+), 47 deletions(-)
> 
> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> index b3b1d4b8126b..a5e982e482dd 100644
> --- a/lib/maple_tree.c
> +++ b/lib/maple_tree.c
> @@ -4191,24 +4191,22 @@ static inline int mas_prealloc_calc(struct ma_state *mas, void *entry)
>  }
>  
>  /*
> - * mas_wr_store_type() - Set the store type for a given
> + * mas_wr_store_type() - Determine the store type for a given
>   * store operation.
>   * @wr_mas: The maple write state
> + *
> + * Return: the type of store needed for the operation
>   */
> -static inline void mas_wr_store_type(struct ma_wr_state *wr_mas)
> +static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas)
>  {
>  	struct ma_state *mas = wr_mas->mas;
>  	unsigned char new_end;
>  
> -	if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) {
> -		mas->store_type = wr_store_root;
> -		return;
> -	}
> +	if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
> +		return wr_store_root;
>  
> -	if (unlikely(!mas_wr_walk(wr_mas))) {
> -		mas->store_type = wr_spanning_store;
> -		return;
> -	}
> +	if (unlikely(!mas_wr_walk(wr_mas)))
> +		return wr_spanning_store;
>  
>  	/* At this point, we are at the leaf node that needs to be altered. */
>  	mas_wr_end_piv(wr_mas);
> @@ -4216,50 +4214,30 @@ static inline void mas_wr_store_type(struct ma_wr_state *wr_mas)
>  		mas_wr_extend_null(wr_mas);
>  
>  	new_end = mas_wr_new_end(wr_mas);
> -	if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last)) {
> -		mas->store_type = wr_exact_fit;
> -		return;
> -	}
> +	if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last))
> +		return wr_exact_fit;
>  
> -	if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
> -		mas->store_type = wr_new_root;
> -		return;
> -	}
> +	if (unlikely(!mas->index && mas->last == ULONG_MAX))
> +		return wr_new_root;
>  
>  	/* Potential spanning rebalance collapsing a node */
>  	if (new_end < mt_min_slots[wr_mas->type]) {
> -		if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK)) {
> -			mas->store_type = wr_rebalance;
> -			return;
> -		}
> -		mas->store_type = wr_node_store;
> -		return;
> +		if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK))
> +			return  wr_rebalance;
> +		return wr_node_store;
>  	}
>  
> -	if (new_end >= mt_slots[wr_mas->type]) {
> -		mas->store_type = wr_split_store;
> -		return;
> -	}
> +	if (new_end >= mt_slots[wr_mas->type])
> +		return wr_split_store;
>  
> -	if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end)) {
> -		mas->store_type = wr_append;
> -		return;
> -	}
> +	if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end))
> +		return wr_append;
>  
>  	if ((new_end == mas->end) && (!mt_in_rcu(mas->tree) ||
> -		(wr_mas->offset_end - mas->offset == 1))) {
> -		mas->store_type = wr_slot_store;
> -		return;
> -	}
> -
> -	if (mte_is_root(mas->node) || (new_end >= mt_min_slots[wr_mas->type]) ||
> -		(mas->mas_flags & MA_STATE_BULK)) {
> -		mas->store_type = wr_node_store;
> -		return;
> -	}
> +		(wr_mas->offset_end - mas->offset == 1)))
> +		return wr_slot_store;
>  
> -	mas->store_type = wr_invalid;
> -	MAS_WARN_ON(mas, 1);
> +	return wr_node_store;
>  }
>  
>  /**
> @@ -4274,7 +4252,7 @@ static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry)
>  	int request;
>  
>  	mas_wr_prealloc_setup(wr_mas);
> -	mas_wr_store_type(wr_mas);
> +	mas->store_type = mas_wr_store_type(wr_mas);
>  	request = mas_prealloc_calc(mas, entry);
>  	if (!request)
>  		return;
> @@ -5446,7 +5424,7 @@ void *mas_store(struct ma_state *mas, void *entry)
>  	 * overwrite multiple entries within a self-balancing B-Tree.
>  	 */
>  	mas_wr_prealloc_setup(&wr_mas);
> -	mas_wr_store_type(&wr_mas);
> +	mas->store_type = mas_wr_store_type(&wr_mas);
>  	if (mas->mas_flags & MA_STATE_PREALLOC) {
>  		mas_wr_store_entry(&wr_mas);
>  		MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
> @@ -5549,7 +5527,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
>  	int request;
>  
>  	mas_wr_prealloc_setup(&wr_mas);
> -	mas_wr_store_type(&wr_mas);
> +	mas->store_type = mas_wr_store_type(&wr_mas);
>  	request = mas_prealloc_calc(mas, entry);
>  	if (!request)
>  		return ret;
> -- 
> 2.43.0
>
diff mbox series

Patch

diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index b3b1d4b8126b..a5e982e482dd 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -4191,24 +4191,22 @@  static inline int mas_prealloc_calc(struct ma_state *mas, void *entry)
 }
 
 /*
- * mas_wr_store_type() - Set the store type for a given
+ * mas_wr_store_type() - Determine the store type for a given
  * store operation.
  * @wr_mas: The maple write state
+ *
+ * Return: the type of store needed for the operation
  */
-static inline void mas_wr_store_type(struct ma_wr_state *wr_mas)
+static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas)
 {
 	struct ma_state *mas = wr_mas->mas;
 	unsigned char new_end;
 
-	if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) {
-		mas->store_type = wr_store_root;
-		return;
-	}
+	if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
+		return wr_store_root;
 
-	if (unlikely(!mas_wr_walk(wr_mas))) {
-		mas->store_type = wr_spanning_store;
-		return;
-	}
+	if (unlikely(!mas_wr_walk(wr_mas)))
+		return wr_spanning_store;
 
 	/* At this point, we are at the leaf node that needs to be altered. */
 	mas_wr_end_piv(wr_mas);
@@ -4216,50 +4214,30 @@  static inline void mas_wr_store_type(struct ma_wr_state *wr_mas)
 		mas_wr_extend_null(wr_mas);
 
 	new_end = mas_wr_new_end(wr_mas);
-	if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last)) {
-		mas->store_type = wr_exact_fit;
-		return;
-	}
+	if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last))
+		return wr_exact_fit;
 
-	if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
-		mas->store_type = wr_new_root;
-		return;
-	}
+	if (unlikely(!mas->index && mas->last == ULONG_MAX))
+		return wr_new_root;
 
 	/* Potential spanning rebalance collapsing a node */
 	if (new_end < mt_min_slots[wr_mas->type]) {
-		if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK)) {
-			mas->store_type = wr_rebalance;
-			return;
-		}
-		mas->store_type = wr_node_store;
-		return;
+		if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK))
+			return  wr_rebalance;
+		return wr_node_store;
 	}
 
-	if (new_end >= mt_slots[wr_mas->type]) {
-		mas->store_type = wr_split_store;
-		return;
-	}
+	if (new_end >= mt_slots[wr_mas->type])
+		return wr_split_store;
 
-	if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end)) {
-		mas->store_type = wr_append;
-		return;
-	}
+	if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end))
+		return wr_append;
 
 	if ((new_end == mas->end) && (!mt_in_rcu(mas->tree) ||
-		(wr_mas->offset_end - mas->offset == 1))) {
-		mas->store_type = wr_slot_store;
-		return;
-	}
-
-	if (mte_is_root(mas->node) || (new_end >= mt_min_slots[wr_mas->type]) ||
-		(mas->mas_flags & MA_STATE_BULK)) {
-		mas->store_type = wr_node_store;
-		return;
-	}
+		(wr_mas->offset_end - mas->offset == 1)))
+		return wr_slot_store;
 
-	mas->store_type = wr_invalid;
-	MAS_WARN_ON(mas, 1);
+	return wr_node_store;
 }
 
 /**
@@ -4274,7 +4252,7 @@  static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry)
 	int request;
 
 	mas_wr_prealloc_setup(wr_mas);
-	mas_wr_store_type(wr_mas);
+	mas->store_type = mas_wr_store_type(wr_mas);
 	request = mas_prealloc_calc(mas, entry);
 	if (!request)
 		return;
@@ -5446,7 +5424,7 @@  void *mas_store(struct ma_state *mas, void *entry)
 	 * overwrite multiple entries within a self-balancing B-Tree.
 	 */
 	mas_wr_prealloc_setup(&wr_mas);
-	mas_wr_store_type(&wr_mas);
+	mas->store_type = mas_wr_store_type(&wr_mas);
 	if (mas->mas_flags & MA_STATE_PREALLOC) {
 		mas_wr_store_entry(&wr_mas);
 		MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
@@ -5549,7 +5527,7 @@  int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
 	int request;
 
 	mas_wr_prealloc_setup(&wr_mas);
-	mas_wr_store_type(&wr_mas);
+	mas->store_type = mas_wr_store_type(&wr_mas);
 	request = mas_prealloc_calc(mas, entry);
 	if (!request)
 		return ret;