Message ID | 20240812190543.71967-2-sidhartha.kumar@oracle.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v2,1/2] maple_tree: reset mas->index and mas->last on write retries | expand |
* Sidhartha Kumar <sidhartha.kumar@oracle.com> [240812 15:05]: > Add new callback fields to the userspace implementation of struct > kmem_cache. This allows for executing callback functions in order to > further test low memory scenarios where node allocation is retried. > > This callback can help test race conditions by calling a function when a > low memory event is tested. > > Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> > --- > v1 -> v2: > - change test name to check_nomem_writer_race() > - move test down in farmer_tests() > - remove mas_destroy() from check_nomem_writer_race() as its not > needed > - remove using mas.index and mas.last directly through > mas_set_range() and MA_STATE macros. > - remove uneeded mas_reset() in check_nomem_writer_race(). > > lib/maple_tree.c | 13 +++++++ > tools/testing/radix-tree/maple.c | 63 ++++++++++++++++++++++++++++++++ > tools/testing/shared/linux.c | 26 ++++++++++++- > 3 files changed, 101 insertions(+), 1 deletion(-) > > diff --git a/lib/maple_tree.c b/lib/maple_tree.c > index b547ff211ac7..14d7864b8d53 100644 > --- a/lib/maple_tree.c > +++ b/lib/maple_tree.c > @@ -7005,6 +7005,19 @@ void mt_set_non_kernel(unsigned int val) > kmem_cache_set_non_kernel(maple_node_cache, val); > } > > +extern void kmem_cache_set_callback(struct kmem_cache *cachep, > + void (*callback)(void *)); > +void mt_set_callback(void (*callback)(void *)) > +{ > + kmem_cache_set_callback(maple_node_cache, callback); > +} > + > +extern void kmem_cache_set_private(struct kmem_cache *cachep, void *private); > +void mt_set_private(void *private) > +{ > + kmem_cache_set_private(maple_node_cache, private); > +} > + > extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); > unsigned long mt_get_alloc_size(void) > { > diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c > index cd1cf05503b4..ef5b83cf94ea 100644 > --- a/tools/testing/radix-tree/maple.c > +++ b/tools/testing/radix-tree/maple.c > @@ -36224,6 +36224,65 @@ static noinline void __init check_mtree_dup(struct maple_tree *mt) > > extern void test_kmem_cache_bulk(void); > > +/* callback function used for check_nomem_writer_race() */ > +static void writer2(void *maple_tree) > +{ > + struct maple_tree *mt = (struct maple_tree *)maple_tree; > + MA_STATE(mas, mt, 6, 10); > + > + mtree_lock(mas.tree); > + mas_store(&mas, xa_mk_value(0xC)); > + mas_destroy(&mas); > + mtree_unlock(mas.tree); > +} > + > +/* > + * check_nomem_writer_race() - test a possible race in the mas_nomem() path > + * @mt: The tree to build. > + * > + * There is a possible race condition in low memory conditions when mas_nomem() > + * gives up its lock. A second writer can chagne the entry that the primary > + * writer executing the mas_nomem() path is modifying. This test recreates this > + * scenario to ensure we are handling it correctly. > + */ > +static void check_nomem_writer_race(struct maple_tree *mt) > +{ > + MA_STATE(mas, mt, 0, 5); > + > + mt_set_non_kernel(0); > + /* setup root with 2 values with NULL in between */ > + mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL); > + mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL); > + mtree_store_range(mt, 11, 15, xa_mk_value(0xB), GFP_KERNEL); > + > + /* setup writer 2 that will trigger the race condition */ > + mt_set_private(mt); > + mt_set_callback(writer2); > + > + mtree_lock(mt); > + /* erase 0-5 */ > + mas_erase(&mas); > + > + /* index 6-10 should retain the value from writer 2 */ > + check_load(mt, 6, xa_mk_value(0xC)); > + mtree_unlock(mt); > + > + /* test for the same race but with mas_store_gfp() */ > + mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL); > + mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL); > + > + mas_set_range(&mas, 0, 5); > + mtree_lock(mt); > + mas_store_gfp(&mas, NULL, GFP_KERNEL); > + > + /* ensure write made by writer 2 is retained */ > + check_load(mt, 6, xa_mk_value(0xC)); > + > + mt_set_private(NULL); > + mt_set_callback(NULL); > + mtree_unlock(mt); > +} > + > void farmer_tests(void) > { > struct maple_node *node; > @@ -36257,6 +36316,10 @@ void farmer_tests(void) > check_dfs_preorder(&tree); > mtree_destroy(&tree); > > + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU); > + check_nomem_writer_race(&tree); > + mtree_destroy(&tree); > + > mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); > check_prealloc(&tree); > mtree_destroy(&tree); > diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c > index 4eb442206d01..17263696b5d8 100644 > --- a/tools/testing/shared/linux.c > +++ b/tools/testing/shared/linux.c > @@ -26,8 +26,21 @@ struct kmem_cache { > unsigned int non_kernel; > unsigned long nr_allocated; > unsigned long nr_tallocated; > + bool exec_callback; > + void (*callback)(void *); > + void *private; > }; > > +void kmem_cache_set_callback(struct kmem_cache *cachep, void (*callback)(void *)) > +{ > + cachep->callback = callback; > +} > + > +void kmem_cache_set_private(struct kmem_cache *cachep, void *private) > +{ > + cachep->private = private; > +} > + > void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val) > { > cachep->non_kernel = val; > @@ -58,9 +71,17 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, > { > void *p; > > + if (cachep->exec_callback) { > + if (cachep->callback) > + cachep->callback(cachep->private); > + cachep->exec_callback = false; > + } > + > if (!(gfp & __GFP_DIRECT_RECLAIM)) { > - if (!cachep->non_kernel) > + if (!cachep->non_kernel) { > + cachep->exec_callback = true; > return NULL; > + } > > cachep->non_kernel--; > } > @@ -223,6 +244,9 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align, > ret->objs = NULL; > ret->ctor = ctor; > ret->non_kernel = 0; > + ret->exec_callback = false; > + ret->callback = NULL; > + ret->private = NULL; > return ret; > } > > -- > 2.46.0 >
diff --git a/lib/maple_tree.c b/lib/maple_tree.c index b547ff211ac7..14d7864b8d53 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -7005,6 +7005,19 @@ void mt_set_non_kernel(unsigned int val) kmem_cache_set_non_kernel(maple_node_cache, val); } +extern void kmem_cache_set_callback(struct kmem_cache *cachep, + void (*callback)(void *)); +void mt_set_callback(void (*callback)(void *)) +{ + kmem_cache_set_callback(maple_node_cache, callback); +} + +extern void kmem_cache_set_private(struct kmem_cache *cachep, void *private); +void mt_set_private(void *private) +{ + kmem_cache_set_private(maple_node_cache, private); +} + extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); unsigned long mt_get_alloc_size(void) { diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index cd1cf05503b4..ef5b83cf94ea 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -36224,6 +36224,65 @@ static noinline void __init check_mtree_dup(struct maple_tree *mt) extern void test_kmem_cache_bulk(void); +/* callback function used for check_nomem_writer_race() */ +static void writer2(void *maple_tree) +{ + struct maple_tree *mt = (struct maple_tree *)maple_tree; + MA_STATE(mas, mt, 6, 10); + + mtree_lock(mas.tree); + mas_store(&mas, xa_mk_value(0xC)); + mas_destroy(&mas); + mtree_unlock(mas.tree); +} + +/* + * check_nomem_writer_race() - test a possible race in the mas_nomem() path + * @mt: The tree to build. + * + * There is a possible race condition in low memory conditions when mas_nomem() + * gives up its lock. A second writer can chagne the entry that the primary + * writer executing the mas_nomem() path is modifying. This test recreates this + * scenario to ensure we are handling it correctly. + */ +static void check_nomem_writer_race(struct maple_tree *mt) +{ + MA_STATE(mas, mt, 0, 5); + + mt_set_non_kernel(0); + /* setup root with 2 values with NULL in between */ + mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL); + mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL); + mtree_store_range(mt, 11, 15, xa_mk_value(0xB), GFP_KERNEL); + + /* setup writer 2 that will trigger the race condition */ + mt_set_private(mt); + mt_set_callback(writer2); + + mtree_lock(mt); + /* erase 0-5 */ + mas_erase(&mas); + + /* index 6-10 should retain the value from writer 2 */ + check_load(mt, 6, xa_mk_value(0xC)); + mtree_unlock(mt); + + /* test for the same race but with mas_store_gfp() */ + mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL); + mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL); + + mas_set_range(&mas, 0, 5); + mtree_lock(mt); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + + /* ensure write made by writer 2 is retained */ + check_load(mt, 6, xa_mk_value(0xC)); + + mt_set_private(NULL); + mt_set_callback(NULL); + mtree_unlock(mt); +} + void farmer_tests(void) { struct maple_node *node; @@ -36257,6 +36316,10 @@ void farmer_tests(void) check_dfs_preorder(&tree); mtree_destroy(&tree); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU); + check_nomem_writer_race(&tree); + mtree_destroy(&tree); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); check_prealloc(&tree); mtree_destroy(&tree); diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c index 4eb442206d01..17263696b5d8 100644 --- a/tools/testing/shared/linux.c +++ b/tools/testing/shared/linux.c @@ -26,8 +26,21 @@ struct kmem_cache { unsigned int non_kernel; unsigned long nr_allocated; unsigned long nr_tallocated; + bool exec_callback; + void (*callback)(void *); + void *private; }; +void kmem_cache_set_callback(struct kmem_cache *cachep, void (*callback)(void *)) +{ + cachep->callback = callback; +} + +void kmem_cache_set_private(struct kmem_cache *cachep, void *private) +{ + cachep->private = private; +} + void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val) { cachep->non_kernel = val; @@ -58,9 +71,17 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, { void *p; + if (cachep->exec_callback) { + if (cachep->callback) + cachep->callback(cachep->private); + cachep->exec_callback = false; + } + if (!(gfp & __GFP_DIRECT_RECLAIM)) { - if (!cachep->non_kernel) + if (!cachep->non_kernel) { + cachep->exec_callback = true; return NULL; + } cachep->non_kernel--; } @@ -223,6 +244,9 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align, ret->objs = NULL; ret->ctor = ctor; ret->non_kernel = 0; + ret->exec_callback = false; + ret->callback = NULL; + ret->private = NULL; return ret; }
Add new callback fields to the userspace implementation of struct kmem_cache. This allows for executing callback functions in order to further test low memory scenarios where node allocation is retried. This callback can help test race conditions by calling a function when a low memory event is tested. Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> --- v1 -> v2: - change test name to check_nomem_writer_race() - move test down in farmer_tests() - remove mas_destroy() from check_nomem_writer_race() as its not needed - remove using mas.index and mas.last directly through mas_set_range() and MA_STATE macros. - remove uneeded mas_reset() in check_nomem_writer_race(). lib/maple_tree.c | 13 +++++++ tools/testing/radix-tree/maple.c | 63 ++++++++++++++++++++++++++++++++ tools/testing/shared/linux.c | 26 ++++++++++++- 3 files changed, 101 insertions(+), 1 deletion(-)