diff mbox series

mm/slab.c: add node spinlock protect in __cache_free_alien

Message ID 20200728095551.30634-1-qiang.zhang@windriver.com (mailing list archive)
State New, archived
Headers show
Series mm/slab.c: add node spinlock protect in __cache_free_alien | expand

Commit Message

Zhang, Qiang July 28, 2020, 9:55 a.m. UTC
From: Zhang Qiang <qiang.zhang@windriver.com>

We should add node spinlock protect "n->alien" which may be
assigned to NULL in cpuup_canceled func. cause address access
exception.

Fixes: 18bf854117c6 ("slab: use get_node() and kmem_cache_node() functions")
Signed-off-by: Zhang Qiang <qiang.zhang@windriver.com>
---
 mm/slab.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

Comments

David Rientjes July 28, 2020, 7:46 p.m. UTC | #1
On Tue, 28 Jul 2020, qiang.zhang@windriver.com wrote:

> From: Zhang Qiang <qiang.zhang@windriver.com>
> 
> We should add node spinlock protect "n->alien" which may be
> assigned to NULL in cpuup_canceled func. cause address access
> exception.
> 

Hi, do you have an example NULL pointer dereference where you have hit 
this?

This rather looks like something to fix up in cpuup_canceled() since it's 
currently manipulating the alien cache for the canceled cpu's node.

> Fixes: 18bf854117c6 ("slab: use get_node() and kmem_cache_node() functions")
> Signed-off-by: Zhang Qiang <qiang.zhang@windriver.com>
> ---
>  mm/slab.c | 7 +++++--
>  1 file changed, 5 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/slab.c b/mm/slab.c
> index a89633603b2d..290523c90b4e 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -759,8 +759,10 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
>  
>  	n = get_node(cachep, node);
>  	STATS_INC_NODEFREES(cachep);
> +	spin_lock(&n->list_lock);
>  	if (n->alien && n->alien[page_node]) {
>  		alien = n->alien[page_node];
> +		spin_unlock(&n->list_lock);
>  		ac = &alien->ac;
>  		spin_lock(&alien->lock);
>  		if (unlikely(ac->avail == ac->limit)) {
> @@ -769,14 +771,15 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
>  		}
>  		ac->entry[ac->avail++] = objp;
>  		spin_unlock(&alien->lock);
> -		slabs_destroy(cachep, &list);
>  	} else {
> +		spin_unlock(&n->list_lock);
>  		n = get_node(cachep, page_node);
>  		spin_lock(&n->list_lock);
>  		free_block(cachep, &objp, 1, page_node, &list);
>  		spin_unlock(&n->list_lock);
> -		slabs_destroy(cachep, &list);
>  	}
> +
> +	slabs_destroy(cachep, &list);
>  	return 1;
>  }
>  
> -- 
> 2.26.2
> 
>
diff mbox series

Patch

diff --git a/mm/slab.c b/mm/slab.c
index a89633603b2d..290523c90b4e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -759,8 +759,10 @@  static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
 
 	n = get_node(cachep, node);
 	STATS_INC_NODEFREES(cachep);
+	spin_lock(&n->list_lock);
 	if (n->alien && n->alien[page_node]) {
 		alien = n->alien[page_node];
+		spin_unlock(&n->list_lock);
 		ac = &alien->ac;
 		spin_lock(&alien->lock);
 		if (unlikely(ac->avail == ac->limit)) {
@@ -769,14 +771,15 @@  static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
 		}
 		ac->entry[ac->avail++] = objp;
 		spin_unlock(&alien->lock);
-		slabs_destroy(cachep, &list);
 	} else {
+		spin_unlock(&n->list_lock);
 		n = get_node(cachep, page_node);
 		spin_lock(&n->list_lock);
 		free_block(cachep, &objp, 1, page_node, &list);
 		spin_unlock(&n->list_lock);
-		slabs_destroy(cachep, &list);
 	}
+
+	slabs_destroy(cachep, &list);
 	return 1;
 }