@@ -4120,12 +4120,18 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
if (dst_offset < max_piv)
dst_pivots[dst_offset] = mas->last;
mas->offset = dst_offset;
- rcu_assign_pointer(dst_slots[dst_offset++], wr_mas->entry);
+ rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
- /* this range wrote to the end of the node. */
- if (wr_mas->offset_end > wr_mas->node_end)
+ /*
+ * this range wrote to the end of the node or it overwrote the rest of
+ * the data
+ */
+ if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
+ new_end = dst_offset;
goto done;
+ }
+ dst_offset++;
/* Copy to the end of node if necessary. */
copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
@@ -4133,14 +4139,16 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
if (dst_offset < max_piv) {
if (copy_size > max_piv - dst_offset)
copy_size = max_piv - dst_offset;
- memcpy(dst_pivots + dst_offset, wr_mas->pivots + wr_mas->offset_end,
+
+ memcpy(dst_pivots + dst_offset,
+ wr_mas->pivots + wr_mas->offset_end,
sizeof(unsigned long) * copy_size);
}
-done:
if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
dst_pivots[new_end] = mas->max;
+done:
mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
if (in_rcu) {
mas->node = mt_mk_node(newnode, wr_mas->type);
@@ -6946,19 +6954,20 @@ static void mas_validate_limits(struct ma_state *mas)
{
int i;
unsigned long prev_piv = 0;
- void __rcu **slots = ma_slots(mte_to_node(mas->node),
- mte_node_type(mas->node));
+ enum maple_type type = mte_node_type(mas->node);
+ void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
+ unsigned long *pivots = ma_pivots(mas_mn(mas), type);
/* all limits are fine here. */
if (mte_is_root(mas->node))
return;
- for (i = 0; i < mt_slot_count(mas->node); i++) {
- enum maple_type type = mte_node_type(mas->node);
- unsigned long *pivots = ma_pivots(mas_mn(mas), type);
- unsigned long piv = mas_safe_pivot(mas, pivots, type, i);
+ for (i = 0; i < mt_slots[type]; i++) {
+ unsigned long piv;
+
+ piv = mas_safe_pivot(mas, pivots, i, type);
- if (!piv)
+ if (!piv & (i != 0))
break;
if (!mte_is_leaf(mas->node)) {
@@ -6991,6 +7000,26 @@ static void mas_validate_limits(struct ma_state *mas)
if (piv == mas->max)
break;
}
+ for (i += 1; i < mt_slots[type]; i++) {
+ void *entry = mas_slot(mas, slots, i);
+
+ if (entry && (i != mt_slots[type] - 1)) {
+ pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
+ i, entry);
+ MT_BUG_ON(mas->tree, entry != NULL);
+ }
+
+ if (i < mt_pivots[type]) {
+ unsigned long piv = pivots[i];
+
+ if (!piv)
+ continue;
+
+ pr_err("%p[%u] should not have piv %lu\n",
+ mas_mn(mas), i, piv);
+ MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1);
+ }
+ }
}
static void mt_validate_nulls(struct maple_tree *mt)
@@ -7023,8 +7052,9 @@ static void mt_validate_nulls(struct maple_tree *mt)
offset = 0;
slots = ma_slots(mte_to_node(mas.node),
mte_node_type(mas.node));
- } else
+ } else {
offset++;
+ }
} while (!mas_is_none(&mas));
}
This is already in v11, but I must have messed up the emailing of this patch so I am resending for completeness. When replacing or reusing a node, it is possible that stale data would be copied into the new node when a store operation wrote to the node maximum value but into lower slots. Fix this by skipping the copy step if the range being written is the node maximum, and skip setting the end pivot in this case as well. Reported-by: syzbot+b707736a1ad47fda6500@syzkaller.appspotmail.com Fixes: 2ee236fe53a8 ("mm: start tracking VMAs with maple tree") Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> --- lib/maple_tree.c | 56 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 13 deletions(-)