diff mbox series

mm: memcontrol: fix root_mem_cgroup charging

Message ID 20210421062644.68331-1-songmuchun@bytedance.com (mailing list archive)
State New, archived
Headers show
Series mm: memcontrol: fix root_mem_cgroup charging | expand

Commit Message

Muchun Song April 21, 2021, 6:26 a.m. UTC
The below scenario can cause the page counters of the root_mem_cgroup
to be out of balance.

CPU0:                                   CPU1:

objcg = get_obj_cgroup_from_current()
obj_cgroup_charge_pages(objcg)
                                        memcg_reparent_objcgs()
                                            // reparent to root_mem_cgroup
                                            WRITE_ONCE(iter->memcg, parent)
    // memcg == root_mem_cgroup
    memcg = get_mem_cgroup_from_objcg(objcg)
    // do not charge to the root_mem_cgroup
    try_charge(memcg)

obj_cgroup_uncharge_pages(objcg)
    memcg = get_mem_cgroup_from_objcg(objcg)
    // uncharge from the root_mem_cgroup
    page_counter_uncharge(&memcg->memory)

This can cause the page counter to be less than the actual value,
Although we do not display the value (mem_cgroup_usage) so there
shouldn't be any actual problem, but there is a WARN_ON_ONCE in
the page_counter_cancel(). Who knows if it will trigger? So it
is better to fix it.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 mm/memcontrol.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

Comments

Michal Hocko April 21, 2021, 7:34 a.m. UTC | #1
On Wed 21-04-21 14:26:44, Muchun Song wrote:
> The below scenario can cause the page counters of the root_mem_cgroup
> to be out of balance.
> 
> CPU0:                                   CPU1:
> 
> objcg = get_obj_cgroup_from_current()
> obj_cgroup_charge_pages(objcg)
>                                         memcg_reparent_objcgs()
>                                             // reparent to root_mem_cgroup
>                                             WRITE_ONCE(iter->memcg, parent)
>     // memcg == root_mem_cgroup
>     memcg = get_mem_cgroup_from_objcg(objcg)
>     // do not charge to the root_mem_cgroup
>     try_charge(memcg)
> 
> obj_cgroup_uncharge_pages(objcg)
>     memcg = get_mem_cgroup_from_objcg(objcg)
>     // uncharge from the root_mem_cgroup
>     page_counter_uncharge(&memcg->memory)
> 
> This can cause the page counter to be less than the actual value,
> Although we do not display the value (mem_cgroup_usage) so there
> shouldn't be any actual problem, but there is a WARN_ON_ONCE in
> the page_counter_cancel(). Who knows if it will trigger? So it
> is better to fix it.

The changelog doesn't explain the fix and why you have chosen to charge
kmem objects to root memcg and left all other try_charge users intact.
The reason is likely that those are not reparented now but that just
adds an inconsistency.

Is there any reason you haven't simply matched obj_cgroup_uncharge_pages
to check for the root memcg and bail out early?

> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
> ---
>  mm/memcontrol.c | 17 ++++++++++++-----
>  1 file changed, 12 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 1e68a9992b01..81b54bd9b9e0 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2686,8 +2686,8 @@ void mem_cgroup_handle_over_high(void)
>  	css_put(&memcg->css);
>  }
>  
> -static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
> -		      unsigned int nr_pages)
> +static int __try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
> +			unsigned int nr_pages)
>  {
>  	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
>  	int nr_retries = MAX_RECLAIM_RETRIES;
> @@ -2699,8 +2699,6 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
>  	bool drained = false;
>  	unsigned long pflags;
>  
> -	if (mem_cgroup_is_root(memcg))
> -		return 0;
>  retry:
>  	if (consume_stock(memcg, nr_pages))
>  		return 0;
> @@ -2880,6 +2878,15 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
>  	return 0;
>  }
>  
> +static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
> +			     unsigned int nr_pages)
> +{
> +	if (mem_cgroup_is_root(memcg))
> +		return 0;
> +
> +	return __try_charge(memcg, gfp_mask, nr_pages);
> +}
> +
>  #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
>  static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
>  {
> @@ -3125,7 +3132,7 @@ static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
>  
>  	memcg = get_mem_cgroup_from_objcg(objcg);
>  
> -	ret = try_charge(memcg, gfp, nr_pages);
> +	ret = __try_charge(memcg, gfp, nr_pages);
>  	if (ret)
>  		goto out;
>  
> -- 
> 2.11.0
Muchun Song April 21, 2021, 9:50 a.m. UTC | #2
On Wed, Apr 21, 2021 at 3:34 PM Michal Hocko <mhocko@suse.com> wrote:
>
> On Wed 21-04-21 14:26:44, Muchun Song wrote:
> > The below scenario can cause the page counters of the root_mem_cgroup
> > to be out of balance.
> >
> > CPU0:                                   CPU1:
> >
> > objcg = get_obj_cgroup_from_current()
> > obj_cgroup_charge_pages(objcg)
> >                                         memcg_reparent_objcgs()
> >                                             // reparent to root_mem_cgroup
> >                                             WRITE_ONCE(iter->memcg, parent)
> >     // memcg == root_mem_cgroup
> >     memcg = get_mem_cgroup_from_objcg(objcg)
> >     // do not charge to the root_mem_cgroup
> >     try_charge(memcg)
> >
> > obj_cgroup_uncharge_pages(objcg)
> >     memcg = get_mem_cgroup_from_objcg(objcg)
> >     // uncharge from the root_mem_cgroup
> >     page_counter_uncharge(&memcg->memory)
> >
> > This can cause the page counter to be less than the actual value,
> > Although we do not display the value (mem_cgroup_usage) so there
> > shouldn't be any actual problem, but there is a WARN_ON_ONCE in
> > the page_counter_cancel(). Who knows if it will trigger? So it
> > is better to fix it.
>
> The changelog doesn't explain the fix and why you have chosen to charge
> kmem objects to root memcg and left all other try_charge users intact.

The object cgroup is special (because the page can reparent). Only the
user of objcg APIs should be fixed.

> The reason is likely that those are not reparented now but that just
> adds an inconsistency.
>
> Is there any reason you haven't simply matched obj_cgroup_uncharge_pages
> to check for the root memcg and bail out early?

Because obj_cgroup_uncharge_pages() uncharges pages from the
root memcg unconditionally. Why? Because some pages can be
reparented to root memcg, in order to ensure the correctness of
page counter of root memcg. We have to uncharge pages from
root memcg. So we do not check whether the page belongs to
the root memcg when it uncharges. Based on this, we have
to make sure that the root memcg page counter is increased
when the page charged. I think the diagram (in the commit log) can
illustrate this problem well.

Thanks.

>
> > Signed-off-by: Muchun Song <songmuchun@bytedance.com>
> > ---
> >  mm/memcontrol.c | 17 ++++++++++++-----
> >  1 file changed, 12 insertions(+), 5 deletions(-)
> >
> > diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> > index 1e68a9992b01..81b54bd9b9e0 100644
> > --- a/mm/memcontrol.c
> > +++ b/mm/memcontrol.c
> > @@ -2686,8 +2686,8 @@ void mem_cgroup_handle_over_high(void)
> >       css_put(&memcg->css);
> >  }
> >
> > -static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
> > -                   unsigned int nr_pages)
> > +static int __try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
> > +                     unsigned int nr_pages)
> >  {
> >       unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
> >       int nr_retries = MAX_RECLAIM_RETRIES;
> > @@ -2699,8 +2699,6 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
> >       bool drained = false;
> >       unsigned long pflags;
> >
> > -     if (mem_cgroup_is_root(memcg))
> > -             return 0;
> >  retry:
> >       if (consume_stock(memcg, nr_pages))
> >               return 0;
> > @@ -2880,6 +2878,15 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
> >       return 0;
> >  }
> >
> > +static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
> > +                          unsigned int nr_pages)
> > +{
> > +     if (mem_cgroup_is_root(memcg))
> > +             return 0;
> > +
> > +     return __try_charge(memcg, gfp_mask, nr_pages);
> > +}
> > +
> >  #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
> >  static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
> >  {
> > @@ -3125,7 +3132,7 @@ static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
> >
> >       memcg = get_mem_cgroup_from_objcg(objcg);
> >
> > -     ret = try_charge(memcg, gfp, nr_pages);
> > +     ret = __try_charge(memcg, gfp, nr_pages);
> >       if (ret)
> >               goto out;
> >
> > --
> > 2.11.0
>
> --
> Michal Hocko
> SUSE Labs
Michal Hocko April 21, 2021, 1:03 p.m. UTC | #3
On Wed 21-04-21 17:50:06, Muchun Song wrote:
> On Wed, Apr 21, 2021 at 3:34 PM Michal Hocko <mhocko@suse.com> wrote:
> >
> > On Wed 21-04-21 14:26:44, Muchun Song wrote:
> > > The below scenario can cause the page counters of the root_mem_cgroup
> > > to be out of balance.
> > >
> > > CPU0:                                   CPU1:
> > >
> > > objcg = get_obj_cgroup_from_current()
> > > obj_cgroup_charge_pages(objcg)
> > >                                         memcg_reparent_objcgs()
> > >                                             // reparent to root_mem_cgroup
> > >                                             WRITE_ONCE(iter->memcg, parent)
> > >     // memcg == root_mem_cgroup
> > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > >     // do not charge to the root_mem_cgroup
> > >     try_charge(memcg)
> > >
> > > obj_cgroup_uncharge_pages(objcg)
> > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > >     // uncharge from the root_mem_cgroup
> > >     page_counter_uncharge(&memcg->memory)
> > >
> > > This can cause the page counter to be less than the actual value,
> > > Although we do not display the value (mem_cgroup_usage) so there
> > > shouldn't be any actual problem, but there is a WARN_ON_ONCE in
> > > the page_counter_cancel(). Who knows if it will trigger? So it
> > > is better to fix it.
> >
> > The changelog doesn't explain the fix and why you have chosen to charge
> > kmem objects to root memcg and left all other try_charge users intact.
> 
> The object cgroup is special (because the page can reparent). Only the
> user of objcg APIs should be fixed.
> 
> > The reason is likely that those are not reparented now but that just
> > adds an inconsistency.
> >
> > Is there any reason you haven't simply matched obj_cgroup_uncharge_pages
> > to check for the root memcg and bail out early?
> 
> Because obj_cgroup_uncharge_pages() uncharges pages from the
> root memcg unconditionally. Why? Because some pages can be
> reparented to root memcg, in order to ensure the correctness of
> page counter of root memcg. We have to uncharge pages from
> root memcg. So we do not check whether the page belongs to
> the root memcg when it uncharges.

I am not sure I follow. Let me ask differently. Wouldn't you
achieve the same if you simply didn't uncharge root memcg in
obj_cgroup_charge_pages?

Btw. which tree is this patch based on? The current linux-next doesn't
uncharge from memcg->memory inside obj_cgroup_uncharge_pages (nor does
the Linus tree).
Muchun Song April 21, 2021, 1:39 p.m. UTC | #4
On Wed, Apr 21, 2021 at 9:03 PM Michal Hocko <mhocko@suse.com> wrote:
>
> On Wed 21-04-21 17:50:06, Muchun Song wrote:
> > On Wed, Apr 21, 2021 at 3:34 PM Michal Hocko <mhocko@suse.com> wrote:
> > >
> > > On Wed 21-04-21 14:26:44, Muchun Song wrote:
> > > > The below scenario can cause the page counters of the root_mem_cgroup
> > > > to be out of balance.
> > > >
> > > > CPU0:                                   CPU1:
> > > >
> > > > objcg = get_obj_cgroup_from_current()
> > > > obj_cgroup_charge_pages(objcg)
> > > >                                         memcg_reparent_objcgs()
> > > >                                             // reparent to root_mem_cgroup
> > > >                                             WRITE_ONCE(iter->memcg, parent)
> > > >     // memcg == root_mem_cgroup
> > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > >     // do not charge to the root_mem_cgroup
> > > >     try_charge(memcg)
> > > >
> > > > obj_cgroup_uncharge_pages(objcg)
> > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > >     // uncharge from the root_mem_cgroup
> > > >     page_counter_uncharge(&memcg->memory)
> > > >
> > > > This can cause the page counter to be less than the actual value,
> > > > Although we do not display the value (mem_cgroup_usage) so there
> > > > shouldn't be any actual problem, but there is a WARN_ON_ONCE in
> > > > the page_counter_cancel(). Who knows if it will trigger? So it
> > > > is better to fix it.
> > >
> > > The changelog doesn't explain the fix and why you have chosen to charge
> > > kmem objects to root memcg and left all other try_charge users intact.
> >
> > The object cgroup is special (because the page can reparent). Only the
> > user of objcg APIs should be fixed.
> >
> > > The reason is likely that those are not reparented now but that just
> > > adds an inconsistency.
> > >
> > > Is there any reason you haven't simply matched obj_cgroup_uncharge_pages
> > > to check for the root memcg and bail out early?
> >
> > Because obj_cgroup_uncharge_pages() uncharges pages from the
> > root memcg unconditionally. Why? Because some pages can be
> > reparented to root memcg, in order to ensure the correctness of
> > page counter of root memcg. We have to uncharge pages from
> > root memcg. So we do not check whether the page belongs to
> > the root memcg when it uncharges.
>
> I am not sure I follow. Let me ask differently. Wouldn't you
> achieve the same if you simply didn't uncharge root memcg in
> obj_cgroup_charge_pages?

I'm afraid not. Some pages should uncharge root memcg, some
pages should not uncharge root memcg. But all those pages belong
to the root memcg. We cannot distinguish between the two.

I believe Roman is very familiar with this mechanism (objcg APIs).

Hi Roman,

Any thoughts on this?

>
> Btw. which tree is this patch based on? The current linux-next doesn't
> uncharge from memcg->memory inside obj_cgroup_uncharge_pages (nor does
> the Linus tree).

Sorry. I should expose more details.

obj_cgroup_uncharge_pages
  refill_stock->drain_stock
    page_counter_uncharge  // uncharging is here

Thanks.

> --
> Michal Hocko
> SUSE Labs
Roman Gushchin April 22, 2021, 12:57 a.m. UTC | #5
On Wed, Apr 21, 2021 at 09:39:03PM +0800, Muchun Song wrote:
> On Wed, Apr 21, 2021 at 9:03 PM Michal Hocko <mhocko@suse.com> wrote:
> >
> > On Wed 21-04-21 17:50:06, Muchun Song wrote:
> > > On Wed, Apr 21, 2021 at 3:34 PM Michal Hocko <mhocko@suse.com> wrote:
> > > >
> > > > On Wed 21-04-21 14:26:44, Muchun Song wrote:
> > > > > The below scenario can cause the page counters of the root_mem_cgroup
> > > > > to be out of balance.
> > > > >
> > > > > CPU0:                                   CPU1:
> > > > >
> > > > > objcg = get_obj_cgroup_from_current()
> > > > > obj_cgroup_charge_pages(objcg)
> > > > >                                         memcg_reparent_objcgs()
> > > > >                                             // reparent to root_mem_cgroup
> > > > >                                             WRITE_ONCE(iter->memcg, parent)
> > > > >     // memcg == root_mem_cgroup
> > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > >     // do not charge to the root_mem_cgroup
> > > > >     try_charge(memcg)
> > > > >
> > > > > obj_cgroup_uncharge_pages(objcg)
> > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > >     // uncharge from the root_mem_cgroup
> > > > >     page_counter_uncharge(&memcg->memory)
> > > > >
> > > > > This can cause the page counter to be less than the actual value,
> > > > > Although we do not display the value (mem_cgroup_usage) so there
> > > > > shouldn't be any actual problem, but there is a WARN_ON_ONCE in
> > > > > the page_counter_cancel(). Who knows if it will trigger? So it
> > > > > is better to fix it.
> > > >
> > > > The changelog doesn't explain the fix and why you have chosen to charge
> > > > kmem objects to root memcg and left all other try_charge users intact.
> > >
> > > The object cgroup is special (because the page can reparent). Only the
> > > user of objcg APIs should be fixed.
> > >
> > > > The reason is likely that those are not reparented now but that just
> > > > adds an inconsistency.
> > > >
> > > > Is there any reason you haven't simply matched obj_cgroup_uncharge_pages
> > > > to check for the root memcg and bail out early?
> > >
> > > Because obj_cgroup_uncharge_pages() uncharges pages from the
> > > root memcg unconditionally. Why? Because some pages can be
> > > reparented to root memcg, in order to ensure the correctness of
> > > page counter of root memcg. We have to uncharge pages from
> > > root memcg. So we do not check whether the page belongs to
> > > the root memcg when it uncharges.
> >
> > I am not sure I follow. Let me ask differently. Wouldn't you
> > achieve the same if you simply didn't uncharge root memcg in
> > obj_cgroup_charge_pages?
> 
> I'm afraid not. Some pages should uncharge root memcg, some
> pages should not uncharge root memcg. But all those pages belong
> to the root memcg. We cannot distinguish between the two.
> 
> I believe Roman is very familiar with this mechanism (objcg APIs).
> 
> Hi Roman,
> 
> Any thoughts on this?

First, unfortunately we do export the root's counter on cgroup v1:
/sys/fs/cgroup/memory/memory.kmem.usage_in_bytes
But we don't ignore these counters for the root mem cgroup, so there
are no bugs here. (Otherwise, please, reproduce it). So it's all about
the potential warning in page_counter_cancel().

The patch looks technically correct to me. Not sure about __try_charge()
naming, we never use "__" prefix to do something with the root_mem_cgroup.

The commit message should be more clear and mention the following:
get_obj_cgroup_from_current() never returns a root_mem_cgroup's objcg,
so we never explicitly charge the root_mem_cgroup. And it's not
going to change.
It's all about a race when we got an obj_cgroup pointing at some non-root
memcg, but before we were able to charge it, the cgroup was gone, objcg was
reparented to the root and so we're skipping the charging. Then we store the
objcg pointer and later use to uncharge the root_mem_cgroup.

But honestly I'm not sure the problem is worth the time spent on the fix
and the discussion. It's a small race and it's generally hard to trigger
a kernel allocation racing with a cgroup deletion and then you need *a lot*
of such races and then maybe there will be a single warning printed without
*any* other consequences.

Thanks!
Muchun Song April 22, 2021, 3:47 a.m. UTC | #6
On Thu, Apr 22, 2021 at 8:57 AM Roman Gushchin <guro@fb.com> wrote:
>
> On Wed, Apr 21, 2021 at 09:39:03PM +0800, Muchun Song wrote:
> > On Wed, Apr 21, 2021 at 9:03 PM Michal Hocko <mhocko@suse.com> wrote:
> > >
> > > On Wed 21-04-21 17:50:06, Muchun Song wrote:
> > > > On Wed, Apr 21, 2021 at 3:34 PM Michal Hocko <mhocko@suse.com> wrote:
> > > > >
> > > > > On Wed 21-04-21 14:26:44, Muchun Song wrote:
> > > > > > The below scenario can cause the page counters of the root_mem_cgroup
> > > > > > to be out of balance.
> > > > > >
> > > > > > CPU0:                                   CPU1:
> > > > > >
> > > > > > objcg = get_obj_cgroup_from_current()
> > > > > > obj_cgroup_charge_pages(objcg)
> > > > > >                                         memcg_reparent_objcgs()
> > > > > >                                             // reparent to root_mem_cgroup
> > > > > >                                             WRITE_ONCE(iter->memcg, parent)
> > > > > >     // memcg == root_mem_cgroup
> > > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > > >     // do not charge to the root_mem_cgroup
> > > > > >     try_charge(memcg)
> > > > > >
> > > > > > obj_cgroup_uncharge_pages(objcg)
> > > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > > >     // uncharge from the root_mem_cgroup
> > > > > >     page_counter_uncharge(&memcg->memory)
> > > > > >
> > > > > > This can cause the page counter to be less than the actual value,
> > > > > > Although we do not display the value (mem_cgroup_usage) so there
> > > > > > shouldn't be any actual problem, but there is a WARN_ON_ONCE in
> > > > > > the page_counter_cancel(). Who knows if it will trigger? So it
> > > > > > is better to fix it.
> > > > >
> > > > > The changelog doesn't explain the fix and why you have chosen to charge
> > > > > kmem objects to root memcg and left all other try_charge users intact.
> > > >
> > > > The object cgroup is special (because the page can reparent). Only the
> > > > user of objcg APIs should be fixed.
> > > >
> > > > > The reason is likely that those are not reparented now but that just
> > > > > adds an inconsistency.
> > > > >
> > > > > Is there any reason you haven't simply matched obj_cgroup_uncharge_pages
> > > > > to check for the root memcg and bail out early?
> > > >
> > > > Because obj_cgroup_uncharge_pages() uncharges pages from the
> > > > root memcg unconditionally. Why? Because some pages can be
> > > > reparented to root memcg, in order to ensure the correctness of
> > > > page counter of root memcg. We have to uncharge pages from
> > > > root memcg. So we do not check whether the page belongs to
> > > > the root memcg when it uncharges.
> > >
> > > I am not sure I follow. Let me ask differently. Wouldn't you
> > > achieve the same if you simply didn't uncharge root memcg in
> > > obj_cgroup_charge_pages?
> >
> > I'm afraid not. Some pages should uncharge root memcg, some
> > pages should not uncharge root memcg. But all those pages belong
> > to the root memcg. We cannot distinguish between the two.
> >
> > I believe Roman is very familiar with this mechanism (objcg APIs).
> >
> > Hi Roman,
> >
> > Any thoughts on this?
>
> First, unfortunately we do export the root's counter on cgroup v1:
> /sys/fs/cgroup/memory/memory.kmem.usage_in_bytes
> But we don't ignore these counters for the root mem cgroup, so there
> are no bugs here. (Otherwise, please, reproduce it). So it's all about
> the potential warning in page_counter_cancel().

Right.

>
> The patch looks technically correct to me. Not sure about __try_charge()
> naming, we never use "__" prefix to do something with the root_mem_cgroup.
>
> The commit message should be more clear and mention the following:
> get_obj_cgroup_from_current() never returns a root_mem_cgroup's objcg,
> so we never explicitly charge the root_mem_cgroup. And it's not
> going to change.
> It's all about a race when we got an obj_cgroup pointing at some non-root
> memcg, but before we were able to charge it, the cgroup was gone, objcg was
> reparented to the root and so we're skipping the charging. Then we store the
> objcg pointer and later use to uncharge the root_mem_cgroup.

Very clear. Thanks.

>
> But honestly I'm not sure the problem is worth the time spent on the fix
> and the discussion. It's a small race and it's generally hard to trigger
> a kernel allocation racing with a cgroup deletion and then you need *a lot*
> of such races and then maybe there will be a single warning printed without
> *any* other consequences.

I agree the race is very small. Since the fix is easy, but a little confusing
to someone. I want to hear other people's suggestions on whether to fix it.

>
> Thanks!
Michal Hocko April 22, 2021, 8:44 a.m. UTC | #7
On Wed 21-04-21 17:57:49, Roman Gushchin wrote:
> On Wed, Apr 21, 2021 at 09:39:03PM +0800, Muchun Song wrote:
> > On Wed, Apr 21, 2021 at 9:03 PM Michal Hocko <mhocko@suse.com> wrote:
> > >
> > > On Wed 21-04-21 17:50:06, Muchun Song wrote:
> > > > On Wed, Apr 21, 2021 at 3:34 PM Michal Hocko <mhocko@suse.com> wrote:
> > > > >
> > > > > On Wed 21-04-21 14:26:44, Muchun Song wrote:
> > > > > > The below scenario can cause the page counters of the root_mem_cgroup
> > > > > > to be out of balance.
> > > > > >
> > > > > > CPU0:                                   CPU1:
> > > > > >
> > > > > > objcg = get_obj_cgroup_from_current()
> > > > > > obj_cgroup_charge_pages(objcg)
> > > > > >                                         memcg_reparent_objcgs()
> > > > > >                                             // reparent to root_mem_cgroup
> > > > > >                                             WRITE_ONCE(iter->memcg, parent)
> > > > > >     // memcg == root_mem_cgroup
> > > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > > >     // do not charge to the root_mem_cgroup
> > > > > >     try_charge(memcg)
> > > > > >
> > > > > > obj_cgroup_uncharge_pages(objcg)
> > > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > > >     // uncharge from the root_mem_cgroup
> > > > > >     page_counter_uncharge(&memcg->memory)
> > > > > >
> > > > > > This can cause the page counter to be less than the actual value,
> > > > > > Although we do not display the value (mem_cgroup_usage) so there
> > > > > > shouldn't be any actual problem, but there is a WARN_ON_ONCE in
> > > > > > the page_counter_cancel(). Who knows if it will trigger? So it
> > > > > > is better to fix it.
> > > > >
> > > > > The changelog doesn't explain the fix and why you have chosen to charge
> > > > > kmem objects to root memcg and left all other try_charge users intact.
> > > >
> > > > The object cgroup is special (because the page can reparent). Only the
> > > > user of objcg APIs should be fixed.
> > > >
> > > > > The reason is likely that those are not reparented now but that just
> > > > > adds an inconsistency.
> > > > >
> > > > > Is there any reason you haven't simply matched obj_cgroup_uncharge_pages
> > > > > to check for the root memcg and bail out early?
> > > >
> > > > Because obj_cgroup_uncharge_pages() uncharges pages from the
> > > > root memcg unconditionally. Why? Because some pages can be
> > > > reparented to root memcg, in order to ensure the correctness of
> > > > page counter of root memcg. We have to uncharge pages from
> > > > root memcg. So we do not check whether the page belongs to
> > > > the root memcg when it uncharges.
> > >
> > > I am not sure I follow. Let me ask differently. Wouldn't you
> > > achieve the same if you simply didn't uncharge root memcg in
> > > obj_cgroup_charge_pages?
> > 
> > I'm afraid not. Some pages should uncharge root memcg, some
> > pages should not uncharge root memcg. But all those pages belong
> > to the root memcg. We cannot distinguish between the two.
> > 
> > I believe Roman is very familiar with this mechanism (objcg APIs).
> > 
> > Hi Roman,
> > 
> > Any thoughts on this?
> 
> First, unfortunately we do export the root's counter on cgroup v1:
> /sys/fs/cgroup/memory/memory.kmem.usage_in_bytes
> But we don't ignore these counters for the root mem cgroup, so there
> are no bugs here. (Otherwise, please, reproduce it). So it's all about
> the potential warning in page_counter_cancel().
> 
> The patch looks technically correct to me. Not sure about __try_charge()
> naming, we never use "__" prefix to do something with the root_mem_cgroup.
> 
> The commit message should be more clear and mention the following:
> get_obj_cgroup_from_current() never returns a root_mem_cgroup's objcg,
> so we never explicitly charge the root_mem_cgroup. And it's not
> going to change.
> It's all about a race when we got an obj_cgroup pointing at some non-root
> memcg, but before we were able to charge it, the cgroup was gone, objcg was
> reparented to the root and so we're skipping the charging. Then we store the
> objcg pointer and later use to uncharge the root_mem_cgroup.
> 
> But honestly I'm not sure the problem is worth the time spent on the fix
> and the discussion. It's a small race and it's generally hard to trigger
> a kernel allocation racing with a cgroup deletion and then you need *a lot*
> of such races and then maybe there will be a single warning printed without
> *any* other consequences.

Thanks for the clarification Roman! As I've said I am not a obj-cgroup
accounting insider but it would make some sense to opt out from
accounting in the uncharge path just from clarity point of view to match
the charging path (rather than what the patch is proposing and special
case the charging path and make it inconsistent with non obj-cgroup
tracking. What do you think?
Roman Gushchin April 22, 2021, 6:37 p.m. UTC | #8
On Thu, Apr 22, 2021 at 10:44:43AM +0200, Michal Hocko wrote:
> On Wed 21-04-21 17:57:49, Roman Gushchin wrote:
> > On Wed, Apr 21, 2021 at 09:39:03PM +0800, Muchun Song wrote:
> > > On Wed, Apr 21, 2021 at 9:03 PM Michal Hocko <mhocko@suse.com> wrote:
> > > >
> > > > On Wed 21-04-21 17:50:06, Muchun Song wrote:
> > > > > On Wed, Apr 21, 2021 at 3:34 PM Michal Hocko <mhocko@suse.com> wrote:
> > > > > >
> > > > > > On Wed 21-04-21 14:26:44, Muchun Song wrote:
> > > > > > > The below scenario can cause the page counters of the root_mem_cgroup
> > > > > > > to be out of balance.
> > > > > > >
> > > > > > > CPU0:                                   CPU1:
> > > > > > >
> > > > > > > objcg = get_obj_cgroup_from_current()
> > > > > > > obj_cgroup_charge_pages(objcg)
> > > > > > >                                         memcg_reparent_objcgs()
> > > > > > >                                             // reparent to root_mem_cgroup
> > > > > > >                                             WRITE_ONCE(iter->memcg, parent)
> > > > > > >     // memcg == root_mem_cgroup
> > > > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > > > >     // do not charge to the root_mem_cgroup
> > > > > > >     try_charge(memcg)
> > > > > > >
> > > > > > > obj_cgroup_uncharge_pages(objcg)
> > > > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > > > >     // uncharge from the root_mem_cgroup
> > > > > > >     page_counter_uncharge(&memcg->memory)
> > > > > > >
> > > > > > > This can cause the page counter to be less than the actual value,
> > > > > > > Although we do not display the value (mem_cgroup_usage) so there
> > > > > > > shouldn't be any actual problem, but there is a WARN_ON_ONCE in
> > > > > > > the page_counter_cancel(). Who knows if it will trigger? So it
> > > > > > > is better to fix it.
> > > > > >
> > > > > > The changelog doesn't explain the fix and why you have chosen to charge
> > > > > > kmem objects to root memcg and left all other try_charge users intact.
> > > > >
> > > > > The object cgroup is special (because the page can reparent). Only the
> > > > > user of objcg APIs should be fixed.
> > > > >
> > > > > > The reason is likely that those are not reparented now but that just
> > > > > > adds an inconsistency.
> > > > > >
> > > > > > Is there any reason you haven't simply matched obj_cgroup_uncharge_pages
> > > > > > to check for the root memcg and bail out early?
> > > > >
> > > > > Because obj_cgroup_uncharge_pages() uncharges pages from the
> > > > > root memcg unconditionally. Why? Because some pages can be
> > > > > reparented to root memcg, in order to ensure the correctness of
> > > > > page counter of root memcg. We have to uncharge pages from
> > > > > root memcg. So we do not check whether the page belongs to
> > > > > the root memcg when it uncharges.
> > > >
> > > > I am not sure I follow. Let me ask differently. Wouldn't you
> > > > achieve the same if you simply didn't uncharge root memcg in
> > > > obj_cgroup_charge_pages?
> > > 
> > > I'm afraid not. Some pages should uncharge root memcg, some
> > > pages should not uncharge root memcg. But all those pages belong
> > > to the root memcg. We cannot distinguish between the two.
> > > 
> > > I believe Roman is very familiar with this mechanism (objcg APIs).
> > > 
> > > Hi Roman,
> > > 
> > > Any thoughts on this?
> > 
> > First, unfortunately we do export the root's counter on cgroup v1:
> > /sys/fs/cgroup/memory/memory.kmem.usage_in_bytes
> > But we don't ignore these counters for the root mem cgroup, so there
> > are no bugs here. (Otherwise, please, reproduce it). So it's all about
> > the potential warning in page_counter_cancel().
> > 
> > The patch looks technically correct to me. Not sure about __try_charge()
> > naming, we never use "__" prefix to do something with the root_mem_cgroup.
> > 
> > The commit message should be more clear and mention the following:
> > get_obj_cgroup_from_current() never returns a root_mem_cgroup's objcg,
> > so we never explicitly charge the root_mem_cgroup. And it's not
> > going to change.
> > It's all about a race when we got an obj_cgroup pointing at some non-root
> > memcg, but before we were able to charge it, the cgroup was gone, objcg was
> > reparented to the root and so we're skipping the charging. Then we store the
> > objcg pointer and later use to uncharge the root_mem_cgroup.
> > 
> > But honestly I'm not sure the problem is worth the time spent on the fix
> > and the discussion. It's a small race and it's generally hard to trigger
> > a kernel allocation racing with a cgroup deletion and then you need *a lot*
> > of such races and then maybe there will be a single warning printed without
> > *any* other consequences.
> 
> Thanks for the clarification Roman! As I've said I am not a obj-cgroup
> accounting insider but it would make some sense to opt out from
> accounting in the uncharge path just from clarity point of view to match
> the charging path (rather than what the patch is proposing and special
> case the charging path and make it inconsistent with non obj-cgroup
> tracking. What do you think?

I don't see how it's possible to opt out just for these bytes, but what we can
do is to stop propagating charges to the root mem cgroup in general. Not only
objcg-related, but all. That would even likely have some performance benefit.

The only downside is that we'll still need to propagate charges for cgroup v1
dedicated kmem and tcpmem counters, because those are exported to the
userspace (for the root cgroup). So it will make the page counters code more
complicated.
Roman Gushchin April 22, 2021, 6:53 p.m. UTC | #9
On Thu, Apr 22, 2021 at 11:47:05AM +0800, Muchun Song wrote:
> On Thu, Apr 22, 2021 at 8:57 AM Roman Gushchin <guro@fb.com> wrote:
> >
> > On Wed, Apr 21, 2021 at 09:39:03PM +0800, Muchun Song wrote:
> > > On Wed, Apr 21, 2021 at 9:03 PM Michal Hocko <mhocko@suse.com> wrote:
> > > >
> > > > On Wed 21-04-21 17:50:06, Muchun Song wrote:
> > > > > On Wed, Apr 21, 2021 at 3:34 PM Michal Hocko <mhocko@suse.com> wrote:
> > > > > >
> > > > > > On Wed 21-04-21 14:26:44, Muchun Song wrote:
> > > > > > > The below scenario can cause the page counters of the root_mem_cgroup
> > > > > > > to be out of balance.
> > > > > > >
> > > > > > > CPU0:                                   CPU1:
> > > > > > >
> > > > > > > objcg = get_obj_cgroup_from_current()
> > > > > > > obj_cgroup_charge_pages(objcg)
> > > > > > >                                         memcg_reparent_objcgs()
> > > > > > >                                             // reparent to root_mem_cgroup
> > > > > > >                                             WRITE_ONCE(iter->memcg, parent)
> > > > > > >     // memcg == root_mem_cgroup
> > > > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > > > >     // do not charge to the root_mem_cgroup
> > > > > > >     try_charge(memcg)
> > > > > > >
> > > > > > > obj_cgroup_uncharge_pages(objcg)
> > > > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > > > >     // uncharge from the root_mem_cgroup
> > > > > > >     page_counter_uncharge(&memcg->memory)
> > > > > > >
> > > > > > > This can cause the page counter to be less than the actual value,
> > > > > > > Although we do not display the value (mem_cgroup_usage) so there
> > > > > > > shouldn't be any actual problem, but there is a WARN_ON_ONCE in
> > > > > > > the page_counter_cancel(). Who knows if it will trigger? So it
> > > > > > > is better to fix it.
> > > > > >
> > > > > > The changelog doesn't explain the fix and why you have chosen to charge
> > > > > > kmem objects to root memcg and left all other try_charge users intact.
> > > > >
> > > > > The object cgroup is special (because the page can reparent). Only the
> > > > > user of objcg APIs should be fixed.
> > > > >
> > > > > > The reason is likely that those are not reparented now but that just
> > > > > > adds an inconsistency.
> > > > > >
> > > > > > Is there any reason you haven't simply matched obj_cgroup_uncharge_pages
> > > > > > to check for the root memcg and bail out early?
> > > > >
> > > > > Because obj_cgroup_uncharge_pages() uncharges pages from the
> > > > > root memcg unconditionally. Why? Because some pages can be
> > > > > reparented to root memcg, in order to ensure the correctness of
> > > > > page counter of root memcg. We have to uncharge pages from
> > > > > root memcg. So we do not check whether the page belongs to
> > > > > the root memcg when it uncharges.
> > > >
> > > > I am not sure I follow. Let me ask differently. Wouldn't you
> > > > achieve the same if you simply didn't uncharge root memcg in
> > > > obj_cgroup_charge_pages?
> > >
> > > I'm afraid not. Some pages should uncharge root memcg, some
> > > pages should not uncharge root memcg. But all those pages belong
> > > to the root memcg. We cannot distinguish between the two.
> > >
> > > I believe Roman is very familiar with this mechanism (objcg APIs).
> > >
> > > Hi Roman,
> > >
> > > Any thoughts on this?
> >
> > First, unfortunately we do export the root's counter on cgroup v1:
> > /sys/fs/cgroup/memory/memory.kmem.usage_in_bytes
> > But we don't ignore these counters for the root mem cgroup, so there
> > are no bugs here. (Otherwise, please, reproduce it). So it's all about
> > the potential warning in page_counter_cancel().
> 
> Right.
> 
> >
> > The patch looks technically correct to me. Not sure about __try_charge()
> > naming, we never use "__" prefix to do something with the root_mem_cgroup.
> >
> > The commit message should be more clear and mention the following:
> > get_obj_cgroup_from_current() never returns a root_mem_cgroup's objcg,
> > so we never explicitly charge the root_mem_cgroup. And it's not
> > going to change.
> > It's all about a race when we got an obj_cgroup pointing at some non-root
> > memcg, but before we were able to charge it, the cgroup was gone, objcg was
> > reparented to the root and so we're skipping the charging. Then we store the
> > objcg pointer and later use to uncharge the root_mem_cgroup.
> 
> Very clear. Thanks.
> 
> >
> > But honestly I'm not sure the problem is worth the time spent on the fix
> > and the discussion. It's a small race and it's generally hard to trigger
> > a kernel allocation racing with a cgroup deletion and then you need *a lot*
> > of such races and then maybe there will be a single warning printed without
> > *any* other consequences.
> 
> I agree the race is very small. Since the fix is easy, but a little confusing
> to someone. I want to hear other people's suggestions on whether to fix it.

I'm not opposing the idea to fix this issue. But, __please__, make sure you
include all necessary information into the commit log.

Thanks!
Muchun Song April 23, 2021, 8:20 a.m. UTC | #10
On Fri, Apr 23, 2021 at 2:53 AM Roman Gushchin <guro@fb.com> wrote:
>
> On Thu, Apr 22, 2021 at 11:47:05AM +0800, Muchun Song wrote:
> > On Thu, Apr 22, 2021 at 8:57 AM Roman Gushchin <guro@fb.com> wrote:
> > >
> > > On Wed, Apr 21, 2021 at 09:39:03PM +0800, Muchun Song wrote:
> > > > On Wed, Apr 21, 2021 at 9:03 PM Michal Hocko <mhocko@suse.com> wrote:
> > > > >
> > > > > On Wed 21-04-21 17:50:06, Muchun Song wrote:
> > > > > > On Wed, Apr 21, 2021 at 3:34 PM Michal Hocko <mhocko@suse.com> wrote:
> > > > > > >
> > > > > > > On Wed 21-04-21 14:26:44, Muchun Song wrote:
> > > > > > > > The below scenario can cause the page counters of the root_mem_cgroup
> > > > > > > > to be out of balance.
> > > > > > > >
> > > > > > > > CPU0:                                   CPU1:
> > > > > > > >
> > > > > > > > objcg = get_obj_cgroup_from_current()
> > > > > > > > obj_cgroup_charge_pages(objcg)
> > > > > > > >                                         memcg_reparent_objcgs()
> > > > > > > >                                             // reparent to root_mem_cgroup
> > > > > > > >                                             WRITE_ONCE(iter->memcg, parent)
> > > > > > > >     // memcg == root_mem_cgroup
> > > > > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > > > > >     // do not charge to the root_mem_cgroup
> > > > > > > >     try_charge(memcg)
> > > > > > > >
> > > > > > > > obj_cgroup_uncharge_pages(objcg)
> > > > > > > >     memcg = get_mem_cgroup_from_objcg(objcg)
> > > > > > > >     // uncharge from the root_mem_cgroup
> > > > > > > >     page_counter_uncharge(&memcg->memory)
> > > > > > > >
> > > > > > > > This can cause the page counter to be less than the actual value,
> > > > > > > > Although we do not display the value (mem_cgroup_usage) so there
> > > > > > > > shouldn't be any actual problem, but there is a WARN_ON_ONCE in
> > > > > > > > the page_counter_cancel(). Who knows if it will trigger? So it
> > > > > > > > is better to fix it.
> > > > > > >
> > > > > > > The changelog doesn't explain the fix and why you have chosen to charge
> > > > > > > kmem objects to root memcg and left all other try_charge users intact.
> > > > > >
> > > > > > The object cgroup is special (because the page can reparent). Only the
> > > > > > user of objcg APIs should be fixed.
> > > > > >
> > > > > > > The reason is likely that those are not reparented now but that just
> > > > > > > adds an inconsistency.
> > > > > > >
> > > > > > > Is there any reason you haven't simply matched obj_cgroup_uncharge_pages
> > > > > > > to check for the root memcg and bail out early?
> > > > > >
> > > > > > Because obj_cgroup_uncharge_pages() uncharges pages from the
> > > > > > root memcg unconditionally. Why? Because some pages can be
> > > > > > reparented to root memcg, in order to ensure the correctness of
> > > > > > page counter of root memcg. We have to uncharge pages from
> > > > > > root memcg. So we do not check whether the page belongs to
> > > > > > the root memcg when it uncharges.
> > > > >
> > > > > I am not sure I follow. Let me ask differently. Wouldn't you
> > > > > achieve the same if you simply didn't uncharge root memcg in
> > > > > obj_cgroup_charge_pages?
> > > >
> > > > I'm afraid not. Some pages should uncharge root memcg, some
> > > > pages should not uncharge root memcg. But all those pages belong
> > > > to the root memcg. We cannot distinguish between the two.
> > > >
> > > > I believe Roman is very familiar with this mechanism (objcg APIs).
> > > >
> > > > Hi Roman,
> > > >
> > > > Any thoughts on this?
> > >
> > > First, unfortunately we do export the root's counter on cgroup v1:
> > > /sys/fs/cgroup/memory/memory.kmem.usage_in_bytes
> > > But we don't ignore these counters for the root mem cgroup, so there
> > > are no bugs here. (Otherwise, please, reproduce it). So it's all about
> > > the potential warning in page_counter_cancel().
> >
> > Right.
> >
> > >
> > > The patch looks technically correct to me. Not sure about __try_charge()
> > > naming, we never use "__" prefix to do something with the root_mem_cgroup.
> > >
> > > The commit message should be more clear and mention the following:
> > > get_obj_cgroup_from_current() never returns a root_mem_cgroup's objcg,
> > > so we never explicitly charge the root_mem_cgroup. And it's not
> > > going to change.
> > > It's all about a race when we got an obj_cgroup pointing at some non-root
> > > memcg, but before we were able to charge it, the cgroup was gone, objcg was
> > > reparented to the root and so we're skipping the charging. Then we store the
> > > objcg pointer and later use to uncharge the root_mem_cgroup.
> >
> > Very clear. Thanks.
> >
> > >
> > > But honestly I'm not sure the problem is worth the time spent on the fix
> > > and the discussion. It's a small race and it's generally hard to trigger
> > > a kernel allocation racing with a cgroup deletion and then you need *a lot*
> > > of such races and then maybe there will be a single warning printed without
> > > *any* other consequences.
> >
> > I agree the race is very small. Since the fix is easy, but a little confusing
> > to someone. I want to hear other people's suggestions on whether to fix it.
>
> I'm not opposing the idea to fix this issue. But, __please__, make sure you
> include all necessary information into the commit log.

Got it. Thanks Roman.

>
> Thanks!
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1e68a9992b01..81b54bd9b9e0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2686,8 +2686,8 @@  void mem_cgroup_handle_over_high(void)
 	css_put(&memcg->css);
 }
 
-static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
-		      unsigned int nr_pages)
+static int __try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
+			unsigned int nr_pages)
 {
 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
 	int nr_retries = MAX_RECLAIM_RETRIES;
@@ -2699,8 +2699,6 @@  static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
 	bool drained = false;
 	unsigned long pflags;
 
-	if (mem_cgroup_is_root(memcg))
-		return 0;
 retry:
 	if (consume_stock(memcg, nr_pages))
 		return 0;
@@ -2880,6 +2878,15 @@  static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
 	return 0;
 }
 
+static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
+			     unsigned int nr_pages)
+{
+	if (mem_cgroup_is_root(memcg))
+		return 0;
+
+	return __try_charge(memcg, gfp_mask, nr_pages);
+}
+
 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
@@ -3125,7 +3132,7 @@  static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
 
 	memcg = get_mem_cgroup_from_objcg(objcg);
 
-	ret = try_charge(memcg, gfp, nr_pages);
+	ret = __try_charge(memcg, gfp, nr_pages);
 	if (ret)
 		goto out;