diff mbox series

[RFC,12/15] xarray: Implement migration function for objects

Message ID 20190308041426.16654-13-tobin@kernel.org (mailing list archive)
State New, archived
Headers show
Series mm: Implement Slab Movable Objects (SMO) | expand

Commit Message

Tobin C. Harding March 8, 2019, 4:14 a.m. UTC
Implement functions to migrate objects. This is based on
initial code by Matthew Wilcox and was modified to work with
slab object migration.

Co-developed-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tobin C. Harding <tobin@kernel.org>
---
 lib/radix-tree.c | 13 +++++++++++++
 lib/xarray.c     | 44 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 57 insertions(+)

Comments

Roman Gushchin March 12, 2019, 12:16 a.m. UTC | #1
On Fri, Mar 08, 2019 at 03:14:23PM +1100, Tobin C. Harding wrote:
> Implement functions to migrate objects. This is based on
> initial code by Matthew Wilcox and was modified to work with
> slab object migration.
> 
> Co-developed-by: Christoph Lameter <cl@linux.com>
> Signed-off-by: Tobin C. Harding <tobin@kernel.org>
> ---
>  lib/radix-tree.c | 13 +++++++++++++
>  lib/xarray.c     | 44 ++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 57 insertions(+)
> 
> diff --git a/lib/radix-tree.c b/lib/radix-tree.c
> index 14d51548bea6..9412c2853726 100644
> --- a/lib/radix-tree.c
> +++ b/lib/radix-tree.c
> @@ -1613,6 +1613,17 @@ static int radix_tree_cpu_dead(unsigned int cpu)
>  	return 0;
>  }
>  
> +extern void xa_object_migrate(void *tree_node, int numa_node);
> +
> +static void radix_tree_migrate(struct kmem_cache *s, void **objects, int nr,
> +			       int node, void *private)
> +{
> +	int i;
> +
> +	for (i = 0; i < nr; i++)
> +		xa_object_migrate(objects[i], node);
> +}
> +
>  void __init radix_tree_init(void)
>  {
>  	int ret;
> @@ -1627,4 +1638,6 @@ void __init radix_tree_init(void)
>  	ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
>  					NULL, radix_tree_cpu_dead);
>  	WARN_ON(ret < 0);
> +	kmem_cache_setup_mobility(radix_tree_node_cachep, NULL,
> +				  radix_tree_migrate);
>  }
> diff --git a/lib/xarray.c b/lib/xarray.c
> index 81c3171ddde9..4f6f17c87769 100644
> --- a/lib/xarray.c
> +++ b/lib/xarray.c
> @@ -1950,6 +1950,50 @@ void xa_destroy(struct xarray *xa)
>  }
>  EXPORT_SYMBOL(xa_destroy);
>  
> +void xa_object_migrate(struct xa_node *node, int numa_node)
> +{
> +	struct xarray *xa = READ_ONCE(node->array);
> +	void __rcu **slot;
> +	struct xa_node *new_node;
> +	int i;
> +
> +	/* Freed or not yet in tree then skip */
> +	if (!xa || xa == XA_FREE_MARK)
> +		return;

XA_FREE_MARK is equal to 0, so the second check is redundant.

#define XA_MARK_0		((__force xa_mark_t)0U)
#define XA_MARK_1		((__force xa_mark_t)1U)
#define XA_MARK_2		((__force xa_mark_t)2U)
#define XA_PRESENT		((__force xa_mark_t)8U)
#define XA_MARK_MAX		XA_MARK_2
#define XA_FREE_MARK		XA_MARK_0

xa_node_free() sets node->array to XA_RCU_FREE, so maybe it's
what you need. I'm not sure however, Matthew should know better.

> +
> +	new_node = kmem_cache_alloc_node(radix_tree_node_cachep,
> +					 GFP_KERNEL, numa_node);

We need to check here if the allocation was successful.

Thanks!
Tobin Harding March 12, 2019, 1:54 a.m. UTC | #2
On Tue, Mar 12, 2019 at 12:16:07AM +0000, Roman Gushchin wrote:
> On Fri, Mar 08, 2019 at 03:14:23PM +1100, Tobin C. Harding wrote:
> > Implement functions to migrate objects. This is based on
> > initial code by Matthew Wilcox and was modified to work with
> > slab object migration.
> > 
> > Co-developed-by: Christoph Lameter <cl@linux.com>
> > Signed-off-by: Tobin C. Harding <tobin@kernel.org>
> > ---
> >  lib/radix-tree.c | 13 +++++++++++++
> >  lib/xarray.c     | 44 ++++++++++++++++++++++++++++++++++++++++++++
> >  2 files changed, 57 insertions(+)
> > 
> > diff --git a/lib/radix-tree.c b/lib/radix-tree.c
> > index 14d51548bea6..9412c2853726 100644
> > --- a/lib/radix-tree.c
> > +++ b/lib/radix-tree.c
> > @@ -1613,6 +1613,17 @@ static int radix_tree_cpu_dead(unsigned int cpu)
> >  	return 0;
> >  }
> >  
> > +extern void xa_object_migrate(void *tree_node, int numa_node);
> > +
> > +static void radix_tree_migrate(struct kmem_cache *s, void **objects, int nr,
> > +			       int node, void *private)
> > +{
> > +	int i;
> > +
> > +	for (i = 0; i < nr; i++)
> > +		xa_object_migrate(objects[i], node);
> > +}
> > +
> >  void __init radix_tree_init(void)
> >  {
> >  	int ret;
> > @@ -1627,4 +1638,6 @@ void __init radix_tree_init(void)
> >  	ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
> >  					NULL, radix_tree_cpu_dead);
> >  	WARN_ON(ret < 0);
> > +	kmem_cache_setup_mobility(radix_tree_node_cachep, NULL,
> > +				  radix_tree_migrate);
> >  }
> > diff --git a/lib/xarray.c b/lib/xarray.c
> > index 81c3171ddde9..4f6f17c87769 100644
> > --- a/lib/xarray.c
> > +++ b/lib/xarray.c
> > @@ -1950,6 +1950,50 @@ void xa_destroy(struct xarray *xa)
> >  }
> >  EXPORT_SYMBOL(xa_destroy);
> >  
> > +void xa_object_migrate(struct xa_node *node, int numa_node)
> > +{
> > +	struct xarray *xa = READ_ONCE(node->array);
> > +	void __rcu **slot;
> > +	struct xa_node *new_node;
> > +	int i;
> > +
> > +	/* Freed or not yet in tree then skip */
> > +	if (!xa || xa == XA_FREE_MARK)
> > +		return;
> 
> XA_FREE_MARK is equal to 0, so the second check is redundant.
> 
> #define XA_MARK_0		((__force xa_mark_t)0U)
> #define XA_MARK_1		((__force xa_mark_t)1U)
> #define XA_MARK_2		((__force xa_mark_t)2U)
> #define XA_PRESENT		((__force xa_mark_t)8U)
> #define XA_MARK_MAX		XA_MARK_2
> #define XA_FREE_MARK		XA_MARK_0
> 
> xa_node_free() sets node->array to XA_RCU_FREE, so maybe it's
> what you need. I'm not sure however, Matthew should know better

Cheers, will wait for his input.

> > +
> > +	new_node = kmem_cache_alloc_node(radix_tree_node_cachep,
> > +					 GFP_KERNEL, numa_node);
> 
> We need to check here if the allocation was successful.

Woops, bad Tobin.  Thanks.


	Tobin.
diff mbox series

Patch

diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 14d51548bea6..9412c2853726 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1613,6 +1613,17 @@  static int radix_tree_cpu_dead(unsigned int cpu)
 	return 0;
 }
 
+extern void xa_object_migrate(void *tree_node, int numa_node);
+
+static void radix_tree_migrate(struct kmem_cache *s, void **objects, int nr,
+			       int node, void *private)
+{
+	int i;
+
+	for (i = 0; i < nr; i++)
+		xa_object_migrate(objects[i], node);
+}
+
 void __init radix_tree_init(void)
 {
 	int ret;
@@ -1627,4 +1638,6 @@  void __init radix_tree_init(void)
 	ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
 					NULL, radix_tree_cpu_dead);
 	WARN_ON(ret < 0);
+	kmem_cache_setup_mobility(radix_tree_node_cachep, NULL,
+				  radix_tree_migrate);
 }
diff --git a/lib/xarray.c b/lib/xarray.c
index 81c3171ddde9..4f6f17c87769 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1950,6 +1950,50 @@  void xa_destroy(struct xarray *xa)
 }
 EXPORT_SYMBOL(xa_destroy);
 
+void xa_object_migrate(struct xa_node *node, int numa_node)
+{
+	struct xarray *xa = READ_ONCE(node->array);
+	void __rcu **slot;
+	struct xa_node *new_node;
+	int i;
+
+	/* Freed or not yet in tree then skip */
+	if (!xa || xa == XA_FREE_MARK)
+		return;
+
+	new_node = kmem_cache_alloc_node(radix_tree_node_cachep,
+					 GFP_KERNEL, numa_node);
+
+	xa_lock_irq(xa);
+
+	/* Check again..... */
+	if (xa != node->array || !list_empty(&node->private_list)) {
+		node = new_node;
+		goto unlock;
+	}
+
+	memcpy(new_node, node, sizeof(struct xa_node));
+
+	/* Move pointers to new node */
+	INIT_LIST_HEAD(&new_node->private_list);
+	for (i = 0; i < XA_CHUNK_SIZE; i++) {
+		void *x = xa_entry_locked(xa, new_node, i);
+
+		if (xa_is_node(x))
+			rcu_assign_pointer(xa_to_node(x)->parent, new_node);
+	}
+	if (!new_node->parent)
+		slot = &xa->xa_head;
+	else
+		slot = &xa_parent_locked(xa, new_node)->slots[new_node->offset];
+	rcu_assign_pointer(*slot, xa_mk_node(new_node));
+
+unlock:
+	xa_unlock_irq(xa);
+	xa_node_free(node);
+	rcu_barrier();
+}
+
 #ifdef XA_DEBUG
 void xa_dump_node(const struct xa_node *node)
 {