diff mbox series

mm: list_lru: fix UAF for memory cgroup

Message ID 20240718083607.42068-1-songmuchun@bytedance.com (mailing list archive)
State New
Headers show
Series mm: list_lru: fix UAF for memory cgroup | expand

Commit Message

Muchun Song July 18, 2024, 8:36 a.m. UTC
The mem_cgroup_from_slab_obj() is supposed to be called under rcu
lock or cgroup_mutex or others which could prevent returned memcg
from being freed. Fix it by adding missing rcu read lock.

Fixes: 0a97c01cd20bb ("list_lru: allow explicit memcg and NUMA node selection)
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 mm/list_lru.c | 24 ++++++++++++++++++------
 1 file changed, 18 insertions(+), 6 deletions(-)

Comments

Vlastimil Babka July 18, 2024, 10:30 a.m. UTC | #1
On 7/18/24 10:36 AM, Muchun Song wrote:
> The mem_cgroup_from_slab_obj() is supposed to be called under rcu
> lock or cgroup_mutex or others which could prevent returned memcg
> from being freed. Fix it by adding missing rcu read lock.

Was the UAF ever observed, or is this due to code review?
Should there be some lockdep_assert somwhere?

> Fixes: 0a97c01cd20bb ("list_lru: allow explicit memcg and NUMA node selection)
> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
> ---
>  mm/list_lru.c | 24 ++++++++++++++++++------
>  1 file changed, 18 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/list_lru.c b/mm/list_lru.c
> index 3fd64736bc458..225da0778a3be 100644
> --- a/mm/list_lru.c
> +++ b/mm/list_lru.c
> @@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
>  }
>  #endif /* CONFIG_MEMCG_KMEM */
>  
> +/* The caller must ensure the memcg lifetime. */
>  bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
>  		    struct mem_cgroup *memcg)
>  {
> @@ -109,14 +110,20 @@ EXPORT_SYMBOL_GPL(list_lru_add);
>  
>  bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
>  {
> +	bool ret;
>  	int nid = page_to_nid(virt_to_page(item));
> -	struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
> -		mem_cgroup_from_slab_obj(item) : NULL;
> +	struct mem_cgroup *memcg;
>  
> -	return list_lru_add(lru, item, nid, memcg);
> +	rcu_read_lock();
> +	memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
> +	ret = list_lru_add(lru, item, nid, memcg);
> +	rcu_read_unlock();
> +
> +	return ret;
>  }
>  EXPORT_SYMBOL_GPL(list_lru_add_obj);
>  
> +/* The caller must ensure the memcg lifetime. */
>  bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
>  		    struct mem_cgroup *memcg)
>  {
> @@ -139,11 +146,16 @@ EXPORT_SYMBOL_GPL(list_lru_del);
>  
>  bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
>  {
> +	bool ret;
>  	int nid = page_to_nid(virt_to_page(item));
> -	struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
> -		mem_cgroup_from_slab_obj(item) : NULL;
> +	struct mem_cgroup *memcg;
>  
> -	return list_lru_del(lru, item, nid, memcg);
> +	rcu_read_lock();
> +	memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
> +	ret = list_lru_del(lru, item, nid, memcg);
> +	rcu_read_unlock();
> +
> +	return ret;
>  }
>  EXPORT_SYMBOL_GPL(list_lru_del_obj);
>
Muchun Song July 18, 2024, 11:20 a.m. UTC | #2
> On Jul 18, 2024, at 18:30, Vlastimil Babka <vbabka@suse.cz> wrote:
> 
> On 7/18/24 10:36 AM, Muchun Song wrote:
>> The mem_cgroup_from_slab_obj() is supposed to be called under rcu
>> lock or cgroup_mutex or others which could prevent returned memcg
>> from being freed. Fix it by adding missing rcu read lock.
> 
> Was the UAF ever observed, or is this due to code review?

Just code review.

Thanks.

> Should there be some lockdep_assert somwhere?
> 

It’s a good option to improve this. Maybe mem_cgroup_from_slab_obj() is a good place.

>> Fixes: 0a97c01cd20bb ("list_lru: allow explicit memcg and NUMA node selection)
>> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
>> ---
>> mm/list_lru.c | 24 ++++++++++++++++++------
>> 1 file changed, 18 insertions(+), 6 deletions(-)
>> 
>> diff --git a/mm/list_lru.c b/mm/list_lru.c
>> index 3fd64736bc458..225da0778a3be 100644
>> --- a/mm/list_lru.c
>> +++ b/mm/list_lru.c
>> @@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
>> }
>> #endif /* CONFIG_MEMCG_KMEM */
>> 
>> +/* The caller must ensure the memcg lifetime. */
>> bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
>>            struct mem_cgroup *memcg)
>> {
>> @@ -109,14 +110,20 @@ EXPORT_SYMBOL_GPL(list_lru_add);
>> 
>> bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
>> {
>> +    bool ret;
>>    int nid = page_to_nid(virt_to_page(item));
>> -    struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
>> -        mem_cgroup_from_slab_obj(item) : NULL;
>> +    struct mem_cgroup *memcg;
>> 
>> -    return list_lru_add(lru, item, nid, memcg);
>> +    rcu_read_lock();
>> +    memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
>> +    ret = list_lru_add(lru, item, nid, memcg);
>> +    rcu_read_unlock();
>> +
>> +    return ret;
>> }
>> EXPORT_SYMBOL_GPL(list_lru_add_obj);
>> 
>> +/* The caller must ensure the memcg lifetime. */
>> bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
>>            struct mem_cgroup *memcg)
>> {
>> @@ -139,11 +146,16 @@ EXPORT_SYMBOL_GPL(list_lru_del);
>> 
>> bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
>> {
>> +    bool ret;
>>    int nid = page_to_nid(virt_to_page(item));
>> -    struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
>> -        mem_cgroup_from_slab_obj(item) : NULL;
>> +    struct mem_cgroup *memcg;
>> 
>> -    return list_lru_del(lru, item, nid, memcg);
>> +    rcu_read_lock();
>> +    memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
>> +    ret = list_lru_del(lru, item, nid, memcg);
>> +    rcu_read_unlock();
>> +
>> +    return ret;
>> }
>> EXPORT_SYMBOL_GPL(list_lru_del_obj);
>> 
>
Muchun Song July 23, 2024, 11:23 a.m. UTC | #3
> On Jul 18, 2024, at 19:20, Muchun Song <muchun.song@linux.dev> wrote:
> 
> 
> 
>> On Jul 18, 2024, at 18:30, Vlastimil Babka <vbabka@suse.cz> wrote:
>> 
>> On 7/18/24 10:36 AM, Muchun Song wrote:
>>> The mem_cgroup_from_slab_obj() is supposed to be called under rcu
>>> lock or cgroup_mutex or others which could prevent returned memcg
>>> from being freed. Fix it by adding missing rcu read lock.
>> 
>> Was the UAF ever observed, or is this due to code review?
> 
> Just code review.
> 
> Thanks.
> 
>> Should there be some lockdep_assert somwhere?
>> 
> 
> It’s a good option to improve this. Maybe mem_cgroup_from_slab_obj() is a good place.

I added it to obj_cgroup_memcg() [1]. And CC memory cgroup maintainers to this thread.


[1] https://lore.kernel.org/linux-mm/20859F67-A80C-4FD0-990C-40C70905E55B@linux.dev/T/

> 
>>> Fixes: 0a97c01cd20bb ("list_lru: allow explicit memcg and NUMA node selection)
>>> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
>>> ---
>>> mm/list_lru.c | 24 ++++++++++++++++++------
>>> 1 file changed, 18 insertions(+), 6 deletions(-)
>>> 
>>> diff --git a/mm/list_lru.c b/mm/list_lru.c
>>> index 3fd64736bc458..225da0778a3be 100644
>>> --- a/mm/list_lru.c
>>> +++ b/mm/list_lru.c
>>> @@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
>>> }
>>> #endif /* CONFIG_MEMCG_KMEM */
>>> 
>>> +/* The caller must ensure the memcg lifetime. */
>>> bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
>>>           struct mem_cgroup *memcg)
>>> {
>>> @@ -109,14 +110,20 @@ EXPORT_SYMBOL_GPL(list_lru_add);
>>> 
>>> bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
>>> {
>>> +    bool ret;
>>>   int nid = page_to_nid(virt_to_page(item));
>>> -    struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
>>> -        mem_cgroup_from_slab_obj(item) : NULL;
>>> +    struct mem_cgroup *memcg;
>>> 
>>> -    return list_lru_add(lru, item, nid, memcg);
>>> +    rcu_read_lock();
>>> +    memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
>>> +    ret = list_lru_add(lru, item, nid, memcg);
>>> +    rcu_read_unlock();
>>> +
>>> +    return ret;
>>> }
>>> EXPORT_SYMBOL_GPL(list_lru_add_obj);
>>> 
>>> +/* The caller must ensure the memcg lifetime. */
>>> bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
>>>           struct mem_cgroup *memcg)
>>> {
>>> @@ -139,11 +146,16 @@ EXPORT_SYMBOL_GPL(list_lru_del);
>>> 
>>> bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
>>> {
>>> +    bool ret;
>>>   int nid = page_to_nid(virt_to_page(item));
>>> -    struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
>>> -        mem_cgroup_from_slab_obj(item) : NULL;
>>> +    struct mem_cgroup *memcg;
>>> 
>>> -    return list_lru_del(lru, item, nid, memcg);
>>> +    rcu_read_lock();
>>> +    memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
>>> +    ret = list_lru_del(lru, item, nid, memcg);
>>> +    rcu_read_unlock();
>>> +
>>> +    return ret;
>>> }
>>> EXPORT_SYMBOL_GPL(list_lru_del_obj);
>>> 
>>
Shakeel Butt July 23, 2024, 5:52 p.m. UTC | #4
On Thu, Jul 18, 2024 at 04:36:07PM GMT, Muchun Song wrote:
> The mem_cgroup_from_slab_obj() is supposed to be called under rcu
> lock or cgroup_mutex or others which could prevent returned memcg
> from being freed. Fix it by adding missing rcu read lock.
> 
> Fixes: 0a97c01cd20bb ("list_lru: allow explicit memcg and NUMA node selection)
> Signed-off-by: Muchun Song <songmuchun@bytedance.com>

Yup I noticed these as well while reviewing Kairui's patches.

Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Andrew Morton July 24, 2024, 12:45 a.m. UTC | #5
On Thu, 18 Jul 2024 16:36:07 +0800 Muchun Song <songmuchun@bytedance.com> wrote:

> The mem_cgroup_from_slab_obj() is supposed to be called under rcu
> lock or cgroup_mutex or others which could prevent returned memcg
> from being freed. Fix it by adding missing rcu read lock.

"or others" is rather vague.  What others?

> @@ -109,14 +110,20 @@ EXPORT_SYMBOL_GPL(list_lru_add);
>  
>  bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
>  {
> +	bool ret;
>  	int nid = page_to_nid(virt_to_page(item));
> -	struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
> -		mem_cgroup_from_slab_obj(item) : NULL;
> +	struct mem_cgroup *memcg;
>  
> -	return list_lru_add(lru, item, nid, memcg);
> +	rcu_read_lock();
> +	memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
> +	ret = list_lru_add(lru, item, nid, memcg);
> +	rcu_read_unlock();

We don't need rcu_read_lock() to evaluate NULL.

	memcg = NULL;
	if (list_lru_memcg_aware(lru)) {
		rcu_read_lock();
		memcg = mem_cgroup_from_slab_obj(item);
		rcu_read_unlock();
	}

Seems worthwhile?
Muchun Song July 24, 2024, 2:23 a.m. UTC | #6
> On Jul 24, 2024, at 08:45, Andrew Morton <akpm@linux-foundation.org> wrote:
> 
> On Thu, 18 Jul 2024 16:36:07 +0800 Muchun Song <songmuchun@bytedance.com> wrote:
> 
>> The mem_cgroup_from_slab_obj() is supposed to be called under rcu
>> lock or cgroup_mutex or others which could prevent returned memcg
>> from being freed. Fix it by adding missing rcu read lock.
> 
> "or others" is rather vague.  What others?

Like objcg_lock. I have added this one into obj_cgroup_memcg().

> 
>> @@ -109,14 +110,20 @@ EXPORT_SYMBOL_GPL(list_lru_add);
>> 
>> bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
>> {
>> + 	bool ret;
>> 	int nid = page_to_nid(virt_to_page(item));
>> - 	struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
>> - 	mem_cgroup_from_slab_obj(item) : NULL;
>> + 	struct mem_cgroup *memcg;
>> 
>> - 	return list_lru_add(lru, item, nid, memcg);
>> + 	rcu_read_lock();
>> + 	memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
>> + 	ret = list_lru_add(lru, item, nid, memcg);
>> + 	rcu_read_unlock();
> 
> We don't need rcu_read_lock() to evaluate NULL.
> 
> 	memcg = NULL;
> 	if (list_lru_memcg_aware(lru)) {
> 		rcu_read_lock();
> 		memcg = mem_cgroup_from_slab_obj(item);
> 		rcu_read_unlock();

Actually, the access to memcg is in list_lru_add(), so the rcu lock should
also cover this function rather than only mem_cgroup_from_slab_obj().
Something like:

memcg = NULL;
if (list_lru_memcg_aware(lru)) {
	rcu_read_lock();
	memcg = mem_cgroup_from_slab_obj(item);
}
ret = list_lru_add(lru, item, nid, memcg);
if (list_lru_memcg_aware(lru))
	rcu_read_unlock();

Not concise. I don't know if this is good.

> 	}
> 
> Seems worthwhile?
> 
>
Vlastimil Babka (SUSE) July 31, 2024, 10:06 a.m. UTC | #7
On 7/24/24 4:23 AM, Muchun Song wrote:
> 
> 
>> On Jul 24, 2024, at 08:45, Andrew Morton <akpm@linux-foundation.org> wrote:
>> 
>> On Thu, 18 Jul 2024 16:36:07 +0800 Muchun Song <songmuchun@bytedance.com> wrote:
>> 
>>> The mem_cgroup_from_slab_obj() is supposed to be called under rcu
>>> lock or cgroup_mutex or others which could prevent returned memcg
>>> from being freed. Fix it by adding missing rcu read lock.
>> 
>> "or others" is rather vague.  What others?
> 
> Like objcg_lock. I have added this one into obj_cgroup_memcg().
> 
>> 
>>> @@ -109,14 +110,20 @@ EXPORT_SYMBOL_GPL(list_lru_add);
>>> 
>>> bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
>>> {
>>> + 	bool ret;
>>> 	int nid = page_to_nid(virt_to_page(item));
>>> - 	struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
>>> - 	mem_cgroup_from_slab_obj(item) : NULL;
>>> + 	struct mem_cgroup *memcg;
>>> 
>>> - 	return list_lru_add(lru, item, nid, memcg);
>>> + 	rcu_read_lock();
>>> + 	memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
>>> + 	ret = list_lru_add(lru, item, nid, memcg);
>>> + 	rcu_read_unlock();
>> 
>> We don't need rcu_read_lock() to evaluate NULL.
>> 
>> 	memcg = NULL;
>> 	if (list_lru_memcg_aware(lru)) {
>> 		rcu_read_lock();
>> 		memcg = mem_cgroup_from_slab_obj(item);
>> 		rcu_read_unlock();
> 
> Actually, the access to memcg is in list_lru_add(), so the rcu lock should
> also cover this function rather than only mem_cgroup_from_slab_obj().
> Something like:
> 
> memcg = NULL;
> if (list_lru_memcg_aware(lru)) {
> 	rcu_read_lock();
> 	memcg = mem_cgroup_from_slab_obj(item);
> }
> ret = list_lru_add(lru, item, nid, memcg);
> if (list_lru_memcg_aware(lru))
> 	rcu_read_unlock();
> 
> Not concise. I don't know if this is good.

At such point, it's probably best to just:

if (list_lru_memcg_aware(lru)) {
	rcu_read_lock();
	ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
	rcu_read_unlock();
} else {
	list_lru_add(lru, item, nid, NULL);
}

?

> 
>> 	}
>> 
>> Seems worthwhile?
>> 
>> 
> 
>
Andrew Morton July 31, 2024, 8:28 p.m. UTC | #8
On Thu, 18 Jul 2024 16:36:07 +0800 Muchun Song <songmuchun@bytedance.com> wrote:

> The mem_cgroup_from_slab_obj() is supposed to be called under rcu
> lock or cgroup_mutex or others which could prevent returned memcg
> from being freed. Fix it by adding missing rcu read lock.
> 

Well I grabbed this, but the review led me to expect a v2.

Should it have cc:stable?
Muchun Song Aug. 1, 2024, 2:42 a.m. UTC | #9
> On Aug 1, 2024, at 04:28, Andrew Morton <akpm@linux-foundation.org> wrote:
> 
> On Thu, 18 Jul 2024 16:36:07 +0800 Muchun Song <songmuchun@bytedance.com> wrote:
> 
>> The mem_cgroup_from_slab_obj() is supposed to be called under rcu
>> lock or cgroup_mutex or others which could prevent returned memcg
>> from being freed. Fix it by adding missing rcu read lock.
>> 
> 
> Well I grabbed this, but the review led me to expect a v2.

Will do.

> 
> Should it have cc:stable?

Yes.
Muchun Song Aug. 1, 2024, 2:42 a.m. UTC | #10
> On Jul 31, 2024, at 18:06, Vlastimil Babka (SUSE) <vbabka@kernel.org> wrote:
> 
> On 7/24/24 4:23 AM, Muchun Song wrote:
>> 
>> 
>>> On Jul 24, 2024, at 08:45, Andrew Morton <akpm@linux-foundation.org> wrote:
>>> 
>>> On Thu, 18 Jul 2024 16:36:07 +0800 Muchun Song <songmuchun@bytedance.com> wrote:
>>> 
>>>> The mem_cgroup_from_slab_obj() is supposed to be called under rcu
>>>> lock or cgroup_mutex or others which could prevent returned memcg
>>>> from being freed. Fix it by adding missing rcu read lock.
>>> 
>>> "or others" is rather vague.  What others?
>> 
>> Like objcg_lock. I have added this one into obj_cgroup_memcg().
>> 
>>> 
>>>> @@ -109,14 +110,20 @@ EXPORT_SYMBOL_GPL(list_lru_add);
>>>> 
>>>> bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
>>>> {
>>>> +  bool ret;
>>>> int nid = page_to_nid(virt_to_page(item));
>>>> -  struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
>>>> -  mem_cgroup_from_slab_obj(item) : NULL;
>>>> +  struct mem_cgroup *memcg;
>>>> 
>>>> -  return list_lru_add(lru, item, nid, memcg);
>>>> +  rcu_read_lock();
>>>> +  memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
>>>> +  ret = list_lru_add(lru, item, nid, memcg);
>>>> +  rcu_read_unlock();
>>> 
>>> We don't need rcu_read_lock() to evaluate NULL.
>>> 
>>> memcg = NULL;
>>> if (list_lru_memcg_aware(lru)) {
>>> rcu_read_lock();
>>> memcg = mem_cgroup_from_slab_obj(item);
>>> rcu_read_unlock();
>> 
>> Actually, the access to memcg is in list_lru_add(), so the rcu lock should
>> also cover this function rather than only mem_cgroup_from_slab_obj().
>> Something like:
>> 
>> memcg = NULL;
>> if (list_lru_memcg_aware(lru)) {
>> rcu_read_lock();
>> memcg = mem_cgroup_from_slab_obj(item);
>> }
>> ret = list_lru_add(lru, item, nid, memcg);
>> if (list_lru_memcg_aware(lru))
>> rcu_read_unlock();
>> 
>> Not concise. I don't know if this is good.
> 
> At such point, it's probably best to just:
> 
> if (list_lru_memcg_aware(lru)) {
> 	rcu_read_lock();
> 	ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
> 	rcu_read_unlock();
> } else {
> 	list_lru_add(lru, item, nid, NULL);
> }

Good. Will update v2.

Thanks.

> 
> ?
> 
>> 
>>> }
>>> 
>>> Seems worthwhile?
diff mbox series

Patch

diff --git a/mm/list_lru.c b/mm/list_lru.c
index 3fd64736bc458..225da0778a3be 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -85,6 +85,7 @@  list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
 }
 #endif /* CONFIG_MEMCG_KMEM */
 
+/* The caller must ensure the memcg lifetime. */
 bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
 		    struct mem_cgroup *memcg)
 {
@@ -109,14 +110,20 @@  EXPORT_SYMBOL_GPL(list_lru_add);
 
 bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
 {
+	bool ret;
 	int nid = page_to_nid(virt_to_page(item));
-	struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
-		mem_cgroup_from_slab_obj(item) : NULL;
+	struct mem_cgroup *memcg;
 
-	return list_lru_add(lru, item, nid, memcg);
+	rcu_read_lock();
+	memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
+	ret = list_lru_add(lru, item, nid, memcg);
+	rcu_read_unlock();
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(list_lru_add_obj);
 
+/* The caller must ensure the memcg lifetime. */
 bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
 		    struct mem_cgroup *memcg)
 {
@@ -139,11 +146,16 @@  EXPORT_SYMBOL_GPL(list_lru_del);
 
 bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
 {
+	bool ret;
 	int nid = page_to_nid(virt_to_page(item));
-	struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
-		mem_cgroup_from_slab_obj(item) : NULL;
+	struct mem_cgroup *memcg;
 
-	return list_lru_del(lru, item, nid, memcg);
+	rcu_read_lock();
+	memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
+	ret = list_lru_del(lru, item, nid, memcg);
+	rcu_read_unlock();
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(list_lru_del_obj);