Message ID | 20230522050656.96215-10-zhangpeng.00@bytedance.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Clean ups for maple tree | expand |
* Peng Zhang <zhangpeng.00@bytedance.com> [230522 01:07]: > Simplify and clean up mas_wr_node_store(), remove unnecessary code. > > Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> > --- > lib/maple_tree.c | 87 +++++++++++++++--------------------------------- > 1 file changed, 26 insertions(+), 61 deletions(-) > > diff --git a/lib/maple_tree.c b/lib/maple_tree.c > index 1fc872f7683c..aa1472c45757 100644 > --- a/lib/maple_tree.c > +++ b/lib/maple_tree.c > @@ -4075,52 +4075,27 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) > * > * Return: True if stored, false otherwise > */ > -static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) > +static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, > + unsigned char new_end) > { > struct ma_state *mas = wr_mas->mas; > void __rcu **dst_slots; > unsigned long *dst_pivots; > - unsigned char dst_offset; > - unsigned char new_end = wr_mas->node_end; > - unsigned char offset; > - unsigned char node_slots = mt_slots[wr_mas->type]; > + unsigned char dst_offset, offset_end = wr_mas->offset_end; > struct maple_node reuse, *newnode; > - unsigned char copy_size, max_piv = mt_pivots[wr_mas->type]; > + unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; > bool in_rcu = mt_in_rcu(mas->tree); > > - offset = mas->offset; > - if (mas->last == wr_mas->r_max) { > - /* runs right to the end of the node */ > - if (mas->last == mas->max) > - new_end = offset; > - /* don't copy this offset */ > - wr_mas->offset_end++; > - } else if (mas->last < wr_mas->r_max) { > - /* new range ends in this range */ > - if (unlikely(wr_mas->r_max == ULONG_MAX)) > - mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); > - > - new_end++; > - } else { > - if (wr_mas->end_piv == mas->last) > - wr_mas->offset_end++; > - > - new_end -= wr_mas->offset_end - offset - 1; > - } > - > - /* new range starts within a range */ > - if (wr_mas->r_min < mas->index) > - new_end++; > - > - /* Not enough room */ > - if (new_end >= node_slots) > - return false; > - > - /* Not enough data. */ > + /* Check if there is enough data. The room is enough. */ > if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && > !(mas->mas_flags & MA_STATE_BULK)) > return false; > > + if (mas->last == wr_mas->end_piv) > + offset_end++; /* don't copy this offset */ > + else if (unlikely(wr_mas->r_max == ULONG_MAX)) > + mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); > + > /* set up node. */ > if (in_rcu) { > mas_node_count(mas, 1); > @@ -4137,47 +4112,36 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) > dst_pivots = ma_pivots(newnode, wr_mas->type); > dst_slots = ma_slots(newnode, wr_mas->type); > /* Copy from start to insert point */ > - memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1)); > - memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1)); > - dst_offset = offset; > + memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset); > + memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset); > > /* Handle insert of new range starting after old range */ > if (wr_mas->r_min < mas->index) { > - mas->offset++; > - rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content); > - dst_pivots[dst_offset++] = mas->index - 1; > + rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content); > + dst_pivots[mas->offset++] = mas->index - 1; > } > > /* Store the new entry and range end. */ > - if (dst_offset < max_piv) > - dst_pivots[dst_offset] = mas->last; > - mas->offset = dst_offset; > - rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry); > + if (mas->offset < node_pivots) > + dst_pivots[mas->offset] = mas->last; > + rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry); > > /* > * this range wrote to the end of the node or it overwrote the rest of > * the data > */ > - if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) { > - new_end = dst_offset; > + if (offset_end > wr_mas->node_end) > goto done; > - } > > - dst_offset++; > + dst_offset = mas->offset + 1; > /* Copy to the end of node if necessary. */ > - copy_size = wr_mas->node_end - wr_mas->offset_end + 1; > - memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end, > + copy_size = wr_mas->node_end - offset_end + 1; > + memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end, > sizeof(void *) * copy_size); > - if (dst_offset < max_piv) { > - if (copy_size > max_piv - dst_offset) > - copy_size = max_piv - dst_offset; > - > - memcpy(dst_pivots + dst_offset, > - wr_mas->pivots + wr_mas->offset_end, > - sizeof(unsigned long) * copy_size); > - } > + memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end, > + sizeof(unsigned long) * (copy_size - 1)); > > - if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1)) > + if (new_end < node_pivots) > dst_pivots[new_end] = mas->max; > > done: > @@ -4379,7 +4343,8 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas) > > if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas)) > return; > - else if (mas_wr_node_store(wr_mas)) > + > + if (mas_wr_node_store(wr_mas, new_end)) > return; > > if (mas_is_err(mas)) > -- > 2.20.1 >
diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 1fc872f7683c..aa1472c45757 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4075,52 +4075,27 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) * * Return: True if stored, false otherwise */ -static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) +static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, + unsigned char new_end) { struct ma_state *mas = wr_mas->mas; void __rcu **dst_slots; unsigned long *dst_pivots; - unsigned char dst_offset; - unsigned char new_end = wr_mas->node_end; - unsigned char offset; - unsigned char node_slots = mt_slots[wr_mas->type]; + unsigned char dst_offset, offset_end = wr_mas->offset_end; struct maple_node reuse, *newnode; - unsigned char copy_size, max_piv = mt_pivots[wr_mas->type]; + unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; bool in_rcu = mt_in_rcu(mas->tree); - offset = mas->offset; - if (mas->last == wr_mas->r_max) { - /* runs right to the end of the node */ - if (mas->last == mas->max) - new_end = offset; - /* don't copy this offset */ - wr_mas->offset_end++; - } else if (mas->last < wr_mas->r_max) { - /* new range ends in this range */ - if (unlikely(wr_mas->r_max == ULONG_MAX)) - mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); - - new_end++; - } else { - if (wr_mas->end_piv == mas->last) - wr_mas->offset_end++; - - new_end -= wr_mas->offset_end - offset - 1; - } - - /* new range starts within a range */ - if (wr_mas->r_min < mas->index) - new_end++; - - /* Not enough room */ - if (new_end >= node_slots) - return false; - - /* Not enough data. */ + /* Check if there is enough data. The room is enough. */ if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && !(mas->mas_flags & MA_STATE_BULK)) return false; + if (mas->last == wr_mas->end_piv) + offset_end++; /* don't copy this offset */ + else if (unlikely(wr_mas->r_max == ULONG_MAX)) + mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); + /* set up node. */ if (in_rcu) { mas_node_count(mas, 1); @@ -4137,47 +4112,36 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) dst_pivots = ma_pivots(newnode, wr_mas->type); dst_slots = ma_slots(newnode, wr_mas->type); /* Copy from start to insert point */ - memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1)); - memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1)); - dst_offset = offset; + memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset); + memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset); /* Handle insert of new range starting after old range */ if (wr_mas->r_min < mas->index) { - mas->offset++; - rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content); - dst_pivots[dst_offset++] = mas->index - 1; + rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content); + dst_pivots[mas->offset++] = mas->index - 1; } /* Store the new entry and range end. */ - if (dst_offset < max_piv) - dst_pivots[dst_offset] = mas->last; - mas->offset = dst_offset; - rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry); + if (mas->offset < node_pivots) + dst_pivots[mas->offset] = mas->last; + rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry); /* * this range wrote to the end of the node or it overwrote the rest of * the data */ - if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) { - new_end = dst_offset; + if (offset_end > wr_mas->node_end) goto done; - } - dst_offset++; + dst_offset = mas->offset + 1; /* Copy to the end of node if necessary. */ - copy_size = wr_mas->node_end - wr_mas->offset_end + 1; - memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end, + copy_size = wr_mas->node_end - offset_end + 1; + memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end, sizeof(void *) * copy_size); - if (dst_offset < max_piv) { - if (copy_size > max_piv - dst_offset) - copy_size = max_piv - dst_offset; - - memcpy(dst_pivots + dst_offset, - wr_mas->pivots + wr_mas->offset_end, - sizeof(unsigned long) * copy_size); - } + memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end, + sizeof(unsigned long) * (copy_size - 1)); - if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1)) + if (new_end < node_pivots) dst_pivots[new_end] = mas->max; done: @@ -4379,7 +4343,8 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas) if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas)) return; - else if (mas_wr_node_store(wr_mas)) + + if (mas_wr_node_store(wr_mas, new_end)) return; if (mas_is_err(mas))
Simplify and clean up mas_wr_node_store(), remove unnecessary code. Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com> --- lib/maple_tree.c | 87 +++++++++++++++--------------------------------- 1 file changed, 26 insertions(+), 61 deletions(-)