diff mbox series

[v3,06/10] maple_tree: Add mas_wr_new_end() to calculate new_end accurately

Message ID 20230522050656.96215-7-zhangpeng.00@bytedance.com (mailing list archive)
State New
Headers show
Series Clean ups for maple tree | expand

Commit Message

Peng Zhang May 22, 2023, 5:06 a.m. UTC
The previous new_end calculation is inaccurate, because it assumes that
two new pivots must be added (this is inaccurate), and sometimes it will
miss the fast path and enter the slow path. Add mas_wr_new_end() to
accurately calculate new_end to make the conditions for entering the
fast path more accurate.

Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
---
 lib/maple_tree.c | 34 +++++++++++++++++++++++-----------
 1 file changed, 23 insertions(+), 11 deletions(-)

Comments

Liam R. Howlett May 23, 2023, 5:54 p.m. UTC | #1
* Peng Zhang <zhangpeng.00@bytedance.com> [230522 01:07]:
> The previous new_end calculation is inaccurate, because it assumes that
> two new pivots must be added (this is inaccurate), and sometimes it will
> miss the fast path and enter the slow path. Add mas_wr_new_end() to
> accurately calculate new_end to make the conditions for entering the
> fast path more accurate.
> 

Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>

> Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
> ---
>  lib/maple_tree.c | 34 +++++++++++++++++++++++-----------
>  1 file changed, 23 insertions(+), 11 deletions(-)
> 
> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> index 562507979a4b..0550a07355d7 100644
> --- a/lib/maple_tree.c
> +++ b/lib/maple_tree.c
> @@ -4297,6 +4297,21 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
>  	}
>  }
>  
> +static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
> +{
> +	struct ma_state *mas = wr_mas->mas;
> +	unsigned char new_end = wr_mas->node_end + 2;
> +
> +	new_end -= wr_mas->offset_end - mas->offset;
> +	if (wr_mas->r_min == mas->index)
> +		new_end--;
> +
> +	if (wr_mas->end_piv == mas->last)
> +		new_end--;
> +
> +	return new_end;
> +}
> +
>  static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
>  {
>  	unsigned char end = wr_mas->node_end;
> @@ -4352,9 +4367,8 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas)
>  
>  static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
>  {
> -	unsigned char node_slots;
> -	unsigned char node_size;
>  	struct ma_state *mas = wr_mas->mas;
> +	unsigned char new_end;
>  
>  	/* Direct replacement */
>  	if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
> @@ -4364,17 +4378,15 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
>  		return;
>  	}
>  
> -	/* Attempt to append */
> -	node_slots = mt_slots[wr_mas->type];
> -	node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
> -	if (mas->max == ULONG_MAX)
> -		node_size++;
> -
> -	/* slot and node store will not fit, go to the slow path */
> -	if (unlikely(node_size >= node_slots))
> +	/*
> +	 * new_end exceeds the size of the maple node and cannot enter the fast
> +	 * path.
> +	 */
> +	new_end = mas_wr_new_end(wr_mas);
> +	if (new_end >= mt_slots[wr_mas->type])
>  		goto slow_path;
>  
> -	if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
> +	if (wr_mas->entry && (wr_mas->node_end < mt_slots[wr_mas->type] - 1) &&
>  	    (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
>  		if (!wr_mas->content || !wr_mas->entry)
>  			mas_update_gap(mas);
> -- 
> 2.20.1
>
diff mbox series

Patch

diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 562507979a4b..0550a07355d7 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -4297,6 +4297,21 @@  static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
 	}
 }
 
+static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
+{
+	struct ma_state *mas = wr_mas->mas;
+	unsigned char new_end = wr_mas->node_end + 2;
+
+	new_end -= wr_mas->offset_end - mas->offset;
+	if (wr_mas->r_min == mas->index)
+		new_end--;
+
+	if (wr_mas->end_piv == mas->last)
+		new_end--;
+
+	return new_end;
+}
+
 static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
 {
 	unsigned char end = wr_mas->node_end;
@@ -4352,9 +4367,8 @@  static void mas_wr_bnode(struct ma_wr_state *wr_mas)
 
 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
 {
-	unsigned char node_slots;
-	unsigned char node_size;
 	struct ma_state *mas = wr_mas->mas;
+	unsigned char new_end;
 
 	/* Direct replacement */
 	if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
@@ -4364,17 +4378,15 @@  static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
 		return;
 	}
 
-	/* Attempt to append */
-	node_slots = mt_slots[wr_mas->type];
-	node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
-	if (mas->max == ULONG_MAX)
-		node_size++;
-
-	/* slot and node store will not fit, go to the slow path */
-	if (unlikely(node_size >= node_slots))
+	/*
+	 * new_end exceeds the size of the maple node and cannot enter the fast
+	 * path.
+	 */
+	new_end = mas_wr_new_end(wr_mas);
+	if (new_end >= mt_slots[wr_mas->type])
 		goto slow_path;
 
-	if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
+	if (wr_mas->entry && (wr_mas->node_end < mt_slots[wr_mas->type] - 1) &&
 	    (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
 		if (!wr_mas->content || !wr_mas->entry)
 			mas_update_gap(mas);