diff mbox series

[v2,1/2] hugetlb: memcg: account hugetlb-backed memory in memory controller

Message ID 20230928005723.1709119-2-nphamcs@gmail.com (mailing list archive)
State New
Headers show
Series hugetlb memcg accounting | expand

Commit Message

Nhat Pham Sept. 28, 2023, 12:57 a.m. UTC
Currently, hugetlb memory usage is not acounted for in the memory
controller, which could lead to memory overprotection for cgroups with
hugetlb-backed memory. This has been observed in our production system.

This patch rectifies this issue by charging the memcg when the hugetlb
folio is allocated, and uncharging when the folio is freed (analogous to
the hugetlb controller).

Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
 Documentation/admin-guide/cgroup-v2.rst |  9 ++++++
 fs/hugetlbfs/inode.c                    |  2 +-
 include/linux/cgroup-defs.h             |  5 +++
 include/linux/hugetlb.h                 |  6 ++--
 include/linux/memcontrol.h              |  8 +++++
 kernel/cgroup/cgroup.c                  | 15 ++++++++-
 mm/hugetlb.c                            | 23 ++++++++++----
 mm/memcontrol.c                         | 41 +++++++++++++++++++++++++
 8 files changed, 99 insertions(+), 10 deletions(-)

Comments

Frank van der Linden Sept. 28, 2023, 10:59 p.m. UTC | #1
On Wed, Sep 27, 2023 at 5:57 PM Nhat Pham <nphamcs@gmail.com> wrote:

> Currently, hugetlb memory usage is not acounted for in the memory
> controller, which could lead to memory overprotection for cgroups with
> hugetlb-backed memory. This has been observed in our production system.
>
> This patch rectifies this issue by charging the memcg when the hugetlb
> folio is allocated, and uncharging when the folio is freed (analogous to
> the hugetlb controller).
>
> Signed-off-by: Nhat Pham <nphamcs@gmail.com>
> ---
>  Documentation/admin-guide/cgroup-v2.rst |  9 ++++++
>  fs/hugetlbfs/inode.c                    |  2 +-
>  include/linux/cgroup-defs.h             |  5 +++
>  include/linux/hugetlb.h                 |  6 ++--
>  include/linux/memcontrol.h              |  8 +++++
>  kernel/cgroup/cgroup.c                  | 15 ++++++++-
>  mm/hugetlb.c                            | 23 ++++++++++----
>  mm/memcontrol.c                         | 41 +++++++++++++++++++++++++
>  8 files changed, 99 insertions(+), 10 deletions(-)
>
> diff --git a/Documentation/admin-guide/cgroup-v2.rst
> b/Documentation/admin-guide/cgroup-v2.rst
> index 622a7f28db1f..e6267b8cbd1d 100644
> --- a/Documentation/admin-guide/cgroup-v2.rst
> +++ b/Documentation/admin-guide/cgroup-v2.rst
> @@ -210,6 +210,15 @@ cgroup v2 currently supports the following mount
> options.
>          relying on the original semantics (e.g. specifying bogusly
>          high 'bypass' protection values at higher tree levels).
>
> +  memory_hugetlb_accounting
> +        Count hugetlb memory usage towards the cgroup's overall
> +        memory usage for the memory controller. This is a new behavior
> +        that could regress existing setups, so it must be explicitly
> +        opted in with this mount option. Note that hugetlb pages
> +        allocated while this option is not selected will not be
> +        tracked by the memory controller (even if cgroup v2 is
> +        remounted later on).
> +
>
>  Organizing Processes and Threads
>  --------------------------------
> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
> index 60fce26ff937..034967319955 100644
> --- a/fs/hugetlbfs/inode.c
> +++ b/fs/hugetlbfs/inode.c
> @@ -902,7 +902,7 @@ static long hugetlbfs_fallocate(struct file *file, int
> mode, loff_t offset,
>                  * to keep reservation accounting consistent.
>                  */
>                 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
> -               folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0);
> +               folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0, true);
>                 hugetlb_drop_vma_policy(&pseudo_vma);
>                 if (IS_ERR(folio)) {
>                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
> diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
> index f1b3151ac30b..8641f4320c98 100644
> --- a/include/linux/cgroup-defs.h
> +++ b/include/linux/cgroup-defs.h
> @@ -115,6 +115,11 @@ enum {
>          * Enable recursive subtree protection
>          */
>         CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
> +
> +       /*
> +        * Enable hugetlb accounting for the memory controller.
> +        */
> +        CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
>  };
>
>  /* cftype->flags */
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index a30686e649f7..9b73db1605a2 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -713,7 +713,8 @@ struct huge_bootmem_page {
>
>  int isolate_or_dissolve_huge_page(struct page *page, struct list_head
> *list);
>  struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
> -                               unsigned long addr, int avoid_reserve);
> +                               unsigned long addr, int avoid_reserve,
> +                               bool restore_reserve_on_memcg_failure);
>  struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int
> preferred_nid,
>                                 nodemask_t *nmask, gfp_t gfp_mask);
>  struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct
> vm_area_struct *vma,
> @@ -1016,7 +1017,8 @@ static inline int
> isolate_or_dissolve_huge_page(struct page *page,
>
>  static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct
> *vma,
>                                            unsigned long addr,
> -                                          int avoid_reserve)
> +                                          int avoid_reserve,
> +                                          bool
> restore_reserve_on_memcg_failure)
>  {
>         return NULL;
>  }
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index e0cfab58ab71..8094679c99dd 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -677,6 +677,8 @@ static inline int mem_cgroup_charge(struct folio
> *folio, struct mm_struct *mm,
>         return __mem_cgroup_charge(folio, mm, gfp);
>  }
>
> +int mem_cgroup_hugetlb_charge_folio(struct folio *folio, gfp_t gfp);
> +
>  int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct
> *mm,
>                                   gfp_t gfp, swp_entry_t entry);
>  void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
> @@ -1251,6 +1253,12 @@ static inline int mem_cgroup_charge(struct folio
> *folio,
>         return 0;
>  }
>
> +static inline int mem_cgroup_hugetlb_charge_folio(struct folio *folio,
> +               gfp_t gfp)
> +{
> +       return 0;
> +}
> +
>  static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
>                         struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
>  {
> diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
> index 1fb7f562289d..f11488b18ceb 100644
> --- a/kernel/cgroup/cgroup.c
> +++ b/kernel/cgroup/cgroup.c
> @@ -1902,6 +1902,7 @@ enum cgroup2_param {
>         Opt_favordynmods,
>         Opt_memory_localevents,
>         Opt_memory_recursiveprot,
> +       Opt_memory_hugetlb_accounting,
>         nr__cgroup2_params
>  };
>
> @@ -1910,6 +1911,7 @@ static const struct fs_parameter_spec
> cgroup2_fs_parameters[] = {
>         fsparam_flag("favordynmods",            Opt_favordynmods),
>         fsparam_flag("memory_localevents",      Opt_memory_localevents),
>         fsparam_flag("memory_recursiveprot",    Opt_memory_recursiveprot),
> +       fsparam_flag("memory_hugetlb_accounting",
> Opt_memory_hugetlb_accounting),
>         {}
>  };
>
> @@ -1936,6 +1938,9 @@ static int cgroup2_parse_param(struct fs_context
> *fc, struct fs_parameter *param
>         case Opt_memory_recursiveprot:
>                 ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
>                 return 0;
> +       case Opt_memory_hugetlb_accounting:
> +               ctx->flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
> +               return 0;
>         }
>         return -EINVAL;
>  }
> @@ -1960,6 +1965,11 @@ static void apply_cgroup_root_flags(unsigned int
> root_flags)
>                         cgrp_dfl_root.flags |=
> CGRP_ROOT_MEMORY_RECURSIVE_PROT;
>                 else
>                         cgrp_dfl_root.flags &=
> ~CGRP_ROOT_MEMORY_RECURSIVE_PROT;
> +
> +               if (root_flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)
> +                       cgrp_dfl_root.flags |=
> CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
> +               else
> +                       cgrp_dfl_root.flags &=
> ~CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
>         }
>  }
>
> @@ -1973,6 +1983,8 @@ static int cgroup_show_options(struct seq_file *seq,
> struct kernfs_root *kf_root
>                 seq_puts(seq, ",memory_localevents");
>         if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
>                 seq_puts(seq, ",memory_recursiveprot");
> +       if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)
> +               seq_puts(seq, ",memory_hugetlb_accounting");
>         return 0;
>  }
>
> @@ -7050,7 +7062,8 @@ static ssize_t features_show(struct kobject *kobj,
> struct kobj_attribute *attr,
>                         "nsdelegate\n"
>                         "favordynmods\n"
>                         "memory_localevents\n"
> -                       "memory_recursiveprot\n");
> +                       "memory_recursiveprot\n"
> +                       "memory_hugetlb_accounting\n");
>  }
>  static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index de220e3ff8be..ff88ea4df11a 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1902,6 +1902,7 @@ void free_huge_folio(struct folio *folio)
>                                      pages_per_huge_page(h), folio);
>         hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
>                                           pages_per_huge_page(h), folio);
> +       mem_cgroup_uncharge(folio);
>         if (restore_reserve)
>                 h->resv_huge_pages++;
>
> @@ -3004,7 +3005,8 @@ int isolate_or_dissolve_huge_page(struct page *page,
> struct list_head *list)
>  }
>
>  struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
> -                                   unsigned long addr, int avoid_reserve)
> +                                       unsigned long addr, int
> avoid_reserve,
> +                                       bool
> restore_reserve_on_memcg_failure)
>  {
>         struct hugepage_subpool *spool = subpool_vma(vma);
>         struct hstate *h = hstate_vma(vma);
> @@ -3119,6 +3121,15 @@ struct folio *alloc_hugetlb_folio(struct
> vm_area_struct *vma,
>                         hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
>                                         pages_per_huge_page(h), folio);
>         }
> +
> +       /* undo allocation if memory controller disallows it. */
> +       if (mem_cgroup_hugetlb_charge_folio(folio, GFP_KERNEL)) {
> +               if (restore_reserve_on_memcg_failure)
> +                       restore_reserve_on_error(h, vma, addr, folio);
> +               folio_put(folio);
> +               return ERR_PTR(-ENOMEM);
> +       }
> +
>         return folio;
>
>  out_uncharge_cgroup:
> @@ -5179,7 +5190,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst,
> struct mm_struct *src,
>                                 spin_unlock(src_ptl);
>                                 spin_unlock(dst_ptl);
>                                 /* Do not use reserve as it's private
> owned */
> -                               new_folio = alloc_hugetlb_folio(dst_vma,
> addr, 1);
> +                               new_folio = alloc_hugetlb_folio(dst_vma,
> addr, 1, false);
>                                 if (IS_ERR(new_folio)) {
>                                         folio_put(pte_folio);
>                                         ret = PTR_ERR(new_folio);
> @@ -5656,7 +5667,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm,
> struct vm_area_struct *vma,
>          * be acquired again before returning to the caller, as expected.
>          */
>         spin_unlock(ptl);
> -       new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve);
> +       new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve, true);
>
>         if (IS_ERR(new_folio)) {
>                 /*
> @@ -5930,7 +5941,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct
> *mm,
>                                                         VM_UFFD_MISSING);
>                 }
>
> -               folio = alloc_hugetlb_folio(vma, haddr, 0);
> +               folio = alloc_hugetlb_folio(vma, haddr, 0, true);
>                 if (IS_ERR(folio)) {
>                         /*
>                          * Returning error will result in faulting task
> being
> @@ -6352,7 +6363,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
>                         goto out;
>                 }
>
> -               folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
> +               folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0, true);
>                 if (IS_ERR(folio)) {
>                         ret = -ENOMEM;
>                         goto out;
> @@ -6394,7 +6405,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
>                         goto out;
>                 }
>
> -               folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
> +               folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0, false);
>                 if (IS_ERR(folio)) {
>                         folio_put(*foliop);
>                         ret = -ENOMEM;
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index d1a322a75172..d5dfc9b36acb 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -7050,6 +7050,47 @@ int __mem_cgroup_charge(struct folio *folio, struct
> mm_struct *mm, gfp_t gfp)
>         return ret;
>  }
>
> +static struct mem_cgroup *get_mem_cgroup_from_current(void)
> +{
> +       struct mem_cgroup *memcg;
> +
> +again:
> +       rcu_read_lock();
> +       memcg = mem_cgroup_from_task(current);
> +       if (!css_tryget(&memcg->css)) {
> +               rcu_read_unlock();
> +               goto again;
> +       }
> +       rcu_read_unlock();
> +       return memcg;
> +}
> +
> +/**
> + * mem_cgroup_hugetlb_charge_folio - Charge a newly allocated hugetlb
> folio.
> + * @folio: folio to charge.
> + * @gfp: reclaim mode
> + *
> + * This function charges an allocated hugetlbf folio to the memcg of the
> + * current task.
> + *
> + * Returns 0 on success. Otherwise, an error code is returned.
> + */
> +int mem_cgroup_hugetlb_charge_folio(struct folio *folio, gfp_t gfp)
> +{
> +       struct mem_cgroup *memcg;
> +       int ret;
> +
> +       if (mem_cgroup_disabled() ||
> +               !(cgrp_dfl_root.flags &
> CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
> +               return 0;
> +
> +       memcg = get_mem_cgroup_from_current();
> +       ret = charge_memcg(folio, memcg, gfp);
> +       mem_cgroup_put(memcg);
> +
> +       return ret;
> +}
> +
>  /**
>   * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for
> swapin.
>   * @folio: folio to charge.
> --
> 2.34.1
>
>
With the mount option added, I'm fine with this. There are reasons to want
and reasons not to want this, so everybody's happy!

Out of curiosity: is anyone aware of any code that may behave badly when
folio_memcg(hugetlb_folio) != NULL, not expecting it?

- Frank
Nhat Pham Sept. 29, 2023, 12:33 a.m. UTC | #2
On Thu, Sep 28, 2023 at 3:59 PM Frank van der Linden <fvdl@google.com> wrote:
>
> On Wed, Sep 27, 2023 at 5:57 PM Nhat Pham <nphamcs@gmail.com> wrote:
>>
>> Currently, hugetlb memory usage is not acounted for in the memory
>> controller, which could lead to memory overprotection for cgroups with
>> hugetlb-backed memory. This has been observed in our production system.
>>
>> This patch rectifies this issue by charging the memcg when the hugetlb
>> folio is allocated, and uncharging when the folio is freed (analogous to
>> the hugetlb controller).
>>
>> Signed-off-by: Nhat Pham <nphamcs@gmail.com>
>> ---
>>  Documentation/admin-guide/cgroup-v2.rst |  9 ++++++
>>  fs/hugetlbfs/inode.c                    |  2 +-
>>  include/linux/cgroup-defs.h             |  5 +++
>>  include/linux/hugetlb.h                 |  6 ++--
>>  include/linux/memcontrol.h              |  8 +++++
>>  kernel/cgroup/cgroup.c                  | 15 ++++++++-
>>  mm/hugetlb.c                            | 23 ++++++++++----
>>  mm/memcontrol.c                         | 41 +++++++++++++++++++++++++
>>  8 files changed, 99 insertions(+), 10 deletions(-)
>>
>> diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
>> index 622a7f28db1f..e6267b8cbd1d 100644
>> --- a/Documentation/admin-guide/cgroup-v2.rst
>> +++ b/Documentation/admin-guide/cgroup-v2.rst
>> @@ -210,6 +210,15 @@ cgroup v2 currently supports the following mount options.
>>          relying on the original semantics (e.g. specifying bogusly
>>          high 'bypass' protection values at higher tree levels).
>>
>> +  memory_hugetlb_accounting
>> +        Count hugetlb memory usage towards the cgroup's overall
>> +        memory usage for the memory controller. This is a new behavior
>> +        that could regress existing setups, so it must be explicitly
>> +        opted in with this mount option. Note that hugetlb pages
>> +        allocated while this option is not selected will not be
>> +        tracked by the memory controller (even if cgroup v2 is
>> +        remounted later on).
>> +
>>
>>  Organizing Processes and Threads
>>  --------------------------------
>> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
>> index 60fce26ff937..034967319955 100644
>> --- a/fs/hugetlbfs/inode.c
>> +++ b/fs/hugetlbfs/inode.c
>> @@ -902,7 +902,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
>>                  * to keep reservation accounting consistent.
>>                  */
>>                 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
>> -               folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0);
>> +               folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0, true);
>>                 hugetlb_drop_vma_policy(&pseudo_vma);
>>                 if (IS_ERR(folio)) {
>>                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
>> diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
>> index f1b3151ac30b..8641f4320c98 100644
>> --- a/include/linux/cgroup-defs.h
>> +++ b/include/linux/cgroup-defs.h
>> @@ -115,6 +115,11 @@ enum {
>>          * Enable recursive subtree protection
>>          */
>>         CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
>> +
>> +       /*
>> +        * Enable hugetlb accounting for the memory controller.
>> +        */
>> +        CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
>>  };
>>
>>  /* cftype->flags */
>> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
>> index a30686e649f7..9b73db1605a2 100644
>> --- a/include/linux/hugetlb.h
>> +++ b/include/linux/hugetlb.h
>> @@ -713,7 +713,8 @@ struct huge_bootmem_page {
>>
>>  int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
>>  struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
>> -                               unsigned long addr, int avoid_reserve);
>> +                               unsigned long addr, int avoid_reserve,
>> +                               bool restore_reserve_on_memcg_failure);
>>  struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
>>                                 nodemask_t *nmask, gfp_t gfp_mask);
>>  struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
>> @@ -1016,7 +1017,8 @@ static inline int isolate_or_dissolve_huge_page(struct page *page,
>>
>>  static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
>>                                            unsigned long addr,
>> -                                          int avoid_reserve)
>> +                                          int avoid_reserve,
>> +                                          bool restore_reserve_on_memcg_failure)
>>  {
>>         return NULL;
>>  }
>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
>> index e0cfab58ab71..8094679c99dd 100644
>> --- a/include/linux/memcontrol.h
>> +++ b/include/linux/memcontrol.h
>> @@ -677,6 +677,8 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
>>         return __mem_cgroup_charge(folio, mm, gfp);
>>  }
>>
>> +int mem_cgroup_hugetlb_charge_folio(struct folio *folio, gfp_t gfp);
>> +
>>  int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
>>                                   gfp_t gfp, swp_entry_t entry);
>>  void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
>> @@ -1251,6 +1253,12 @@ static inline int mem_cgroup_charge(struct folio *folio,
>>         return 0;
>>  }
>>
>> +static inline int mem_cgroup_hugetlb_charge_folio(struct folio *folio,
>> +               gfp_t gfp)
>> +{
>> +       return 0;
>> +}
>> +
>>  static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
>>                         struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
>>  {
>> diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
>> index 1fb7f562289d..f11488b18ceb 100644
>> --- a/kernel/cgroup/cgroup.c
>> +++ b/kernel/cgroup/cgroup.c
>> @@ -1902,6 +1902,7 @@ enum cgroup2_param {
>>         Opt_favordynmods,
>>         Opt_memory_localevents,
>>         Opt_memory_recursiveprot,
>> +       Opt_memory_hugetlb_accounting,
>>         nr__cgroup2_params
>>  };
>>
>> @@ -1910,6 +1911,7 @@ static const struct fs_parameter_spec cgroup2_fs_parameters[] = {
>>         fsparam_flag("favordynmods",            Opt_favordynmods),
>>         fsparam_flag("memory_localevents",      Opt_memory_localevents),
>>         fsparam_flag("memory_recursiveprot",    Opt_memory_recursiveprot),
>> +       fsparam_flag("memory_hugetlb_accounting", Opt_memory_hugetlb_accounting),
>>         {}
>>  };
>>
>> @@ -1936,6 +1938,9 @@ static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param
>>         case Opt_memory_recursiveprot:
>>                 ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
>>                 return 0;
>> +       case Opt_memory_hugetlb_accounting:
>> +               ctx->flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
>> +               return 0;
>>         }
>>         return -EINVAL;
>>  }
>> @@ -1960,6 +1965,11 @@ static void apply_cgroup_root_flags(unsigned int root_flags)
>>                         cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
>>                 else
>>                         cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_RECURSIVE_PROT;
>> +
>> +               if (root_flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)
>> +                       cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
>> +               else
>> +                       cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
>>         }
>>  }
>>
>> @@ -1973,6 +1983,8 @@ static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root
>>                 seq_puts(seq, ",memory_localevents");
>>         if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
>>                 seq_puts(seq, ",memory_recursiveprot");
>> +       if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)
>> +               seq_puts(seq, ",memory_hugetlb_accounting");
>>         return 0;
>>  }
>>
>> @@ -7050,7 +7062,8 @@ static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
>>                         "nsdelegate\n"
>>                         "favordynmods\n"
>>                         "memory_localevents\n"
>> -                       "memory_recursiveprot\n");
>> +                       "memory_recursiveprot\n"
>> +                       "memory_hugetlb_accounting\n");
>>  }
>>  static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
>>
>> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
>> index de220e3ff8be..ff88ea4df11a 100644
>> --- a/mm/hugetlb.c
>> +++ b/mm/hugetlb.c
>> @@ -1902,6 +1902,7 @@ void free_huge_folio(struct folio *folio)
>>                                      pages_per_huge_page(h), folio);
>>         hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
>>                                           pages_per_huge_page(h), folio);
>> +       mem_cgroup_uncharge(folio);
>>         if (restore_reserve)
>>                 h->resv_huge_pages++;
>>
>> @@ -3004,7 +3005,8 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
>>  }
>>
>>  struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
>> -                                   unsigned long addr, int avoid_reserve)
>> +                                       unsigned long addr, int avoid_reserve,
>> +                                       bool restore_reserve_on_memcg_failure)
>>  {
>>         struct hugepage_subpool *spool = subpool_vma(vma);
>>         struct hstate *h = hstate_vma(vma);
>> @@ -3119,6 +3121,15 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
>>                         hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
>>                                         pages_per_huge_page(h), folio);
>>         }
>> +
>> +       /* undo allocation if memory controller disallows it. */
>> +       if (mem_cgroup_hugetlb_charge_folio(folio, GFP_KERNEL)) {
>> +               if (restore_reserve_on_memcg_failure)
>> +                       restore_reserve_on_error(h, vma, addr, folio);
>> +               folio_put(folio);
>> +               return ERR_PTR(-ENOMEM);
>> +       }
>> +
>>         return folio;
>>
>>  out_uncharge_cgroup:
>> @@ -5179,7 +5190,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
>>                                 spin_unlock(src_ptl);
>>                                 spin_unlock(dst_ptl);
>>                                 /* Do not use reserve as it's private owned */
>> -                               new_folio = alloc_hugetlb_folio(dst_vma, addr, 1);
>> +                               new_folio = alloc_hugetlb_folio(dst_vma, addr, 1, false);
>>                                 if (IS_ERR(new_folio)) {
>>                                         folio_put(pte_folio);
>>                                         ret = PTR_ERR(new_folio);
>> @@ -5656,7 +5667,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
>>          * be acquired again before returning to the caller, as expected.
>>          */
>>         spin_unlock(ptl);
>> -       new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve);
>> +       new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve, true);
>>
>>         if (IS_ERR(new_folio)) {
>>                 /*
>> @@ -5930,7 +5941,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>>                                                         VM_UFFD_MISSING);
>>                 }
>>
>> -               folio = alloc_hugetlb_folio(vma, haddr, 0);
>> +               folio = alloc_hugetlb_folio(vma, haddr, 0, true);
>>                 if (IS_ERR(folio)) {
>>                         /*
>>                          * Returning error will result in faulting task being
>> @@ -6352,7 +6363,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
>>                         goto out;
>>                 }
>>
>> -               folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
>> +               folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0, true);
>>                 if (IS_ERR(folio)) {
>>                         ret = -ENOMEM;
>>                         goto out;
>> @@ -6394,7 +6405,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
>>                         goto out;
>>                 }
>>
>> -               folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
>> +               folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0, false);
>>                 if (IS_ERR(folio)) {
>>                         folio_put(*foliop);
>>                         ret = -ENOMEM;
>> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
>> index d1a322a75172..d5dfc9b36acb 100644
>> --- a/mm/memcontrol.c
>> +++ b/mm/memcontrol.c
>> @@ -7050,6 +7050,47 @@ int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
>>         return ret;
>>  }
>>
>> +static struct mem_cgroup *get_mem_cgroup_from_current(void)
>> +{
>> +       struct mem_cgroup *memcg;
>> +
>> +again:
>> +       rcu_read_lock();
>> +       memcg = mem_cgroup_from_task(current);
>> +       if (!css_tryget(&memcg->css)) {
>> +               rcu_read_unlock();
>> +               goto again;
>> +       }
>> +       rcu_read_unlock();
>> +       return memcg;
>> +}
>> +
>> +/**
>> + * mem_cgroup_hugetlb_charge_folio - Charge a newly allocated hugetlb folio.
>> + * @folio: folio to charge.
>> + * @gfp: reclaim mode
>> + *
>> + * This function charges an allocated hugetlbf folio to the memcg of the
>> + * current task.
>> + *
>> + * Returns 0 on success. Otherwise, an error code is returned.
>> + */
>> +int mem_cgroup_hugetlb_charge_folio(struct folio *folio, gfp_t gfp)
>> +{
>> +       struct mem_cgroup *memcg;
>> +       int ret;
>> +
>> +       if (mem_cgroup_disabled() ||
>> +               !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
>> +               return 0;
>> +
>> +       memcg = get_mem_cgroup_from_current();
>> +       ret = charge_memcg(folio, memcg, gfp);
>> +       mem_cgroup_put(memcg);
>> +
>> +       return ret;
>> +}
>> +
>>  /**
>>   * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
>>   * @folio: folio to charge.
>> --
>> 2.34.1
>>
>
> With the mount option added, I'm fine with this. There are reasons to want and reasons not to want this, so everybody's happy!

And the default is no accounting, so this should be safe on impact!

>
> Out of curiosity: is anyone aware of any code that may behave badly when folio_memcg(hugetlb_folio) != NULL, not expecting it?

Good point. My understanding of the memory controller mechanism
is that it should be fine - we're just essentially storing some memcg
metadata in the struct folio, and then charging values towards the
memcg counters. I don't think we fiddle with anything else in
the folio itself that could be ruinous?

I also did my best to trace the code paths that go through
alloc_hugetlb_folio and free_huge_folio (the places where charging
and uncharging happens) to make sure no funny business is going
on, and it seems a lot of these paths have special, dedicated handling
for hugetlb folio. The usual pattern is checking if the folio is a hugetlb
one first, so we're unlikely to even call folio_memcg on a hugetlb
folio in existing code in the first place.

But if anyone knows something I missed please let me know!
And feel free to loop more people in if there's anyone I miss in
the cc list :)

>
> - Frank
Yosry Ahmed Sept. 29, 2023, 12:38 a.m. UTC | #3
<snip>

>
> +
> +/**
> + * mem_cgroup_hugetlb_charge_folio - Charge a newly allocated hugetlb folio.
> + * @folio: folio to charge.
> + * @gfp: reclaim mode
> + *
> + * This function charges an allocated hugetlbf folio to the memcg of the
> + * current task.
> + *
> + * Returns 0 on success. Otherwise, an error code is returned.
> + */
> +int mem_cgroup_hugetlb_charge_folio(struct folio *folio, gfp_t gfp)
> +{
> +       struct mem_cgroup *memcg;
> +       int ret;
> +
> +       if (mem_cgroup_disabled() ||
> +               !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))

What happens if the memory controller is mounted in a cgroup v1
hierarchy? It appears to me that we *will* go through with hugetlb
charging in this case?

>
> +               return 0;
> +
> +       memcg = get_mem_cgroup_from_current();
> +       ret = charge_memcg(folio, memcg, gfp);
> +       mem_cgroup_put(memcg);
> +
> +       return ret;
> +}
> +
>  /**
>   * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
>   * @folio: folio to charge.
> --
> 2.34.1
Nhat Pham Sept. 29, 2023, 12:58 a.m. UTC | #4
On Thu, Sep 28, 2023 at 5:38 PM Yosry Ahmed <yosryahmed@google.com> wrote:
>
> <snip>
>
> >
> > +
> > +/**
> > + * mem_cgroup_hugetlb_charge_folio - Charge a newly allocated hugetlb folio.
> > + * @folio: folio to charge.
> > + * @gfp: reclaim mode
> > + *
> > + * This function charges an allocated hugetlbf folio to the memcg of the
> > + * current task.
> > + *
> > + * Returns 0 on success. Otherwise, an error code is returned.
> > + */
> > +int mem_cgroup_hugetlb_charge_folio(struct folio *folio, gfp_t gfp)
> > +{
> > +       struct mem_cgroup *memcg;
> > +       int ret;
> > +
> > +       if (mem_cgroup_disabled() ||
> > +               !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
>
> What happens if the memory controller is mounted in a cgroup v1
> hierarchy? It appears to me that we *will* go through with hugetlb
> charging in this case?

Ah right, cgroup v1. Does it not work with mount flag guarding?
What's the behavior of cgroup v1 when it comes to memory
recursive protection for e.g (which this mount flag is based on)?

If it doesn't work, we'll have to add a separate knob for v1 -
no biggies.

Other than this concern, I don't have anything against cgroup v1
having this feature per se - everything should still work. But let
I know if it can break cgroupv1 accounting otherwise :)

>
> >
> > +               return 0;
> > +
> > +       memcg = get_mem_cgroup_from_current();
> > +       ret = charge_memcg(folio, memcg, gfp);
> > +       mem_cgroup_put(memcg);
> > +
> > +       return ret;
> > +}
> > +
> >  /**
> >   * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
> >   * @folio: folio to charge.
> > --
> > 2.34.1
Nhat Pham Sept. 29, 2023, 1:07 a.m. UTC | #5
On Thu, Sep 28, 2023 at 5:58 PM Nhat Pham <nphamcs@gmail.com> wrote:
>
> On Thu, Sep 28, 2023 at 5:38 PM Yosry Ahmed <yosryahmed@google.com> wrote:
> >
> > <snip>
> >
> > >
> > > +
> > > +/**
> > > + * mem_cgroup_hugetlb_charge_folio - Charge a newly allocated hugetlb folio.
> > > + * @folio: folio to charge.
> > > + * @gfp: reclaim mode
> > > + *
> > > + * This function charges an allocated hugetlbf folio to the memcg of the
> > > + * current task.
> > > + *
> > > + * Returns 0 on success. Otherwise, an error code is returned.
> > > + */
> > > +int mem_cgroup_hugetlb_charge_folio(struct folio *folio, gfp_t gfp)
> > > +{
> > > +       struct mem_cgroup *memcg;
> > > +       int ret;
> > > +
> > > +       if (mem_cgroup_disabled() ||
> > > +               !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
> >
> > What happens if the memory controller is mounted in a cgroup v1
> > hierarchy? It appears to me that we *will* go through with hugetlb
> > charging in this case?
>
> Ah right, cgroup v1. Does it not work with mount flag guarding?
> What's the behavior of cgroup v1 when it comes to memory
> recursive protection for e.g (which this mount flag is based on)?
>
> If it doesn't work, we'll have to add a separate knob for v1 -
> no biggies.

But to be clear, my intention is that we're not adding this
feature to v1 (which, to my understanding, has been
deprecated).

If it's added by virtue of it sharing infrastructure with v2,
then it's fine, but only if the mount option still works to
guard against unintentional enablement (if not we'll
also short-circuit v1, or add knobs if ppl really want
it in v1 as well).

If it's not added at all, then I don't have any complaints :)

>
> Other than this concern, I don't have anything against cgroup v1
> having this feature per se - everything should still work. But let
> I know if it can break cgroupv1 accounting otherwise :)
>
> >
> > >
> > > +               return 0;
> > > +
> > > +       memcg = get_mem_cgroup_from_current();
> > > +       ret = charge_memcg(folio, memcg, gfp);
> > > +       mem_cgroup_put(memcg);
> > > +
> > > +       return ret;
> > > +}
> > > +
> > >  /**
> > >   * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
> > >   * @folio: folio to charge.
> > > --
> > > 2.34.1
Yosry Ahmed Sept. 29, 2023, 1:18 a.m. UTC | #6
On Thu, Sep 28, 2023 at 6:07 PM Nhat Pham <nphamcs@gmail.com> wrote:
>
> On Thu, Sep 28, 2023 at 5:58 PM Nhat Pham <nphamcs@gmail.com> wrote:
> >
> > On Thu, Sep 28, 2023 at 5:38 PM Yosry Ahmed <yosryahmed@google.com> wrote:
> > >
> > > <snip>
> > >
> > > >
> > > > +
> > > > +/**
> > > > + * mem_cgroup_hugetlb_charge_folio - Charge a newly allocated hugetlb folio.
> > > > + * @folio: folio to charge.
> > > > + * @gfp: reclaim mode
> > > > + *
> > > > + * This function charges an allocated hugetlbf folio to the memcg of the
> > > > + * current task.
> > > > + *
> > > > + * Returns 0 on success. Otherwise, an error code is returned.
> > > > + */
> > > > +int mem_cgroup_hugetlb_charge_folio(struct folio *folio, gfp_t gfp)
> > > > +{
> > > > +       struct mem_cgroup *memcg;
> > > > +       int ret;
> > > > +
> > > > +       if (mem_cgroup_disabled() ||
> > > > +               !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
> > >
> > > What happens if the memory controller is mounted in a cgroup v1
> > > hierarchy? It appears to me that we *will* go through with hugetlb
> > > charging in this case?
> >
> > Ah right, cgroup v1. Does it not work with mount flag guarding?
> > What's the behavior of cgroup v1 when it comes to memory
> > recursive protection for e.g (which this mount flag is based on)?
> >
> > If it doesn't work, we'll have to add a separate knob for v1 -
> > no biggies.
>
> But to be clear, my intention is that we're not adding this
> feature to v1 (which, to my understanding, has been
> deprecated).
>
> If it's added by virtue of it sharing infrastructure with v2,
> then it's fine, but only if the mount option still works to
> guard against unintentional enablement (if not we'll
> also short-circuit v1, or add knobs if ppl really want
> it in v1 as well).
>
> If it's not added at all, then I don't have any complaints :)
>
> >
> > Other than this concern, I don't have anything against cgroup v1
> > having this feature per se - everything should still work. But let
> > I know if it can break cgroupv1 accounting otherwise :)
> >

My concern is the scenario where the memory controller is mounted in
cgroup v1, and cgroup v2 is mounted with memory_hugetlb_accounting.

In this case it seems like the current code will only check whether
memory_hugetlb_accounting was set on cgroup v2 or not, disregarding
the fact that cgroup v1 did not enable hugetlb accounting.

I obviously prefer that any features are also added to cgroup v1,
because we still didn't make it to cgroup v2, especially when the
infrastructure is shared. On the other hand, I am pretty sure the
maintainers will not like what I am saying :)
Nhat Pham Sept. 29, 2023, 1:25 a.m. UTC | #7
On Thu, Sep 28, 2023 at 18:18 Yosry Ahmed <yosryahmed@google.com> wrote:

> On Thu, Sep 28, 2023 at 6:07 PM Nhat Pham <nphamcs@gmail.com> wrote:
> >
> > On Thu, Sep 28, 2023 at 5:58 PM Nhat Pham <nphamcs@gmail.com> wrote:
> > >
> > > On Thu, Sep 28, 2023 at 5:38 PM Yosry Ahmed <yosryahmed@google.com>
> wrote:
> > > >
> > > > <snip>
> > > >
> > > > >
> > > > > +
> > > > > +/**
> > > > > + * mem_cgroup_hugetlb_charge_folio - Charge a newly allocated
> hugetlb folio.
> > > > > + * @folio: folio to charge.
> > > > > + * @gfp: reclaim mode
> > > > > + *
> > > > > + * This function charges an allocated hugetlbf folio to the memcg
> of the
> > > > > + * current task.
> > > > > + *
> > > > > + * Returns 0 on success. Otherwise, an error code is returned.
> > > > > + */
> > > > > +int mem_cgroup_hugetlb_charge_folio(struct folio *folio, gfp_t
> gfp)
> > > > > +{
> > > > > +       struct mem_cgroup *memcg;
> > > > > +       int ret;
> > > > > +
> > > > > +       if (mem_cgroup_disabled() ||
> > > > > +               !(cgrp_dfl_root.flags &
> CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
> > > >
> > > > What happens if the memory controller is mounted in a cgroup v1
> > > > hierarchy? It appears to me that we *will* go through with hugetlb
> > > > charging in this case?
> > >
> > > Ah right, cgroup v1. Does it not work with mount flag guarding?
> > > What's the behavior of cgroup v1 when it comes to memory
> > > recursive protection for e.g (which this mount flag is based on)?
> > >
> > > If it doesn't work, we'll have to add a separate knob for v1 -
> > > no biggies.
> >
> > But to be clear, my intention is that we're not adding this
> > feature to v1 (which, to my understanding, has been
> > deprecated).
> >
> > If it's added by virtue of it sharing infrastructure with v2,
> > then it's fine, but only if the mount option still works to
> > guard against unintentional enablement (if not we'll
> > also short-circuit v1, or add knobs if ppl really want
> > it in v1 as well).
> >
> > If it's not added at all, then I don't have any complaints :)
> >
> > >
> > > Other than this concern, I don't have anything against cgroup v1
> > > having this feature per se - everything should still work. But let
> > > I know if it can break cgroupv1 accounting otherwise :)
> > >
>
> My concern is the scenario where the memory controller is mounted in
> cgroup v1, and cgroup v2 is mounted with memory_hugetlb_accounting.


Ohh I see. Lemme do some testing to double
check :)



>
> In this case it seems like the current code will only check whether
> memory_hugetlb_accounting was set on cgroup v2 or not, disregarding
> the fact that cgroup v1 did not enable hugetlb accounting.
>
> I obviously prefer that any features are also added to cgroup v1,
> because we still didn't make it to cgroup v2, especially when the
> infrastructure is shared. On the other hand, I am pretty sure the
> maintainers will not like what I am saying :)


I can at least try to not break v1 for a start :)
Thanks for pointing it out tho!

>
Johannes Weiner Sept. 29, 2023, 3:08 p.m. UTC | #8
On Thu, Sep 28, 2023 at 06:18:19PM -0700, Yosry Ahmed wrote:
> My concern is the scenario where the memory controller is mounted in
> cgroup v1, and cgroup v2 is mounted with memory_hugetlb_accounting.
> 
> In this case it seems like the current code will only check whether
> memory_hugetlb_accounting was set on cgroup v2 or not, disregarding
> the fact that cgroup v1 did not enable hugetlb accounting.
> 
> I obviously prefer that any features are also added to cgroup v1,
> because we still didn't make it to cgroup v2, especially when the
> infrastructure is shared. On the other hand, I am pretty sure the
> maintainers will not like what I am saying :)

I have a weak preference.

It's definitely a little weird that the v1 controller's behavior
changes based on the v2 mount flag. And that if you want it as an
otherwise exclusive v1 user, you'd have to mount a dummy v2.

But I also don't see a scenario where it would hurt, or where there
would be an unresolvable conflict between v1 and v2 in expressing
desired behavior, since the memory controller is exclusive to one.

While we could eliminate this quirk with a simple
!cgroup_subsys_on_dfl(memory_cgrp_subsys) inside the charge function,
it would seem almost punitive to add extra code just to take something
away that isn't really a problem and could be useful to some people.

If Tejun doesn't object, I'd say let's just keep implied v1 behavior.
Yosry Ahmed Sept. 29, 2023, 3:11 p.m. UTC | #9
On Fri, Sep 29, 2023 at 8:08 AM Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> On Thu, Sep 28, 2023 at 06:18:19PM -0700, Yosry Ahmed wrote:
> > My concern is the scenario where the memory controller is mounted in
> > cgroup v1, and cgroup v2 is mounted with memory_hugetlb_accounting.
> >
> > In this case it seems like the current code will only check whether
> > memory_hugetlb_accounting was set on cgroup v2 or not, disregarding
> > the fact that cgroup v1 did not enable hugetlb accounting.
> >
> > I obviously prefer that any features are also added to cgroup v1,
> > because we still didn't make it to cgroup v2, especially when the
> > infrastructure is shared. On the other hand, I am pretty sure the
> > maintainers will not like what I am saying :)
>
> I have a weak preference.
>
> It's definitely a little weird that the v1 controller's behavior
> changes based on the v2 mount flag. And that if you want it as an
> otherwise exclusive v1 user, you'd have to mount a dummy v2.
>
> But I also don't see a scenario where it would hurt, or where there
> would be an unresolvable conflict between v1 and v2 in expressing
> desired behavior, since the memory controller is exclusive to one.
>
> While we could eliminate this quirk with a simple
> !cgroup_subsys_on_dfl(memory_cgrp_subsys) inside the charge function,
> it would seem almost punitive to add extra code just to take something
> away that isn't really a problem and could be useful to some people.
>
> If Tejun doesn't object, I'd say let's just keep implied v1 behavior.

I agree that adding extra code to take a feature away from v1 is
probably too much, but I also think relying on a v2 mount option is
weird. Would it be too much to just have a v1-specific flag as well
and use cgroup_subsys_on_dfl(memory_cgrp_subsys) to decide which flag
to read?
Johannes Weiner Sept. 29, 2023, 5:42 p.m. UTC | #10
On Fri, Sep 29, 2023 at 08:11:54AM -0700, Yosry Ahmed wrote:
> On Fri, Sep 29, 2023 at 8:08 AM Johannes Weiner <hannes@cmpxchg.org> wrote:
> >
> > On Thu, Sep 28, 2023 at 06:18:19PM -0700, Yosry Ahmed wrote:
> > > My concern is the scenario where the memory controller is mounted in
> > > cgroup v1, and cgroup v2 is mounted with memory_hugetlb_accounting.
> > >
> > > In this case it seems like the current code will only check whether
> > > memory_hugetlb_accounting was set on cgroup v2 or not, disregarding
> > > the fact that cgroup v1 did not enable hugetlb accounting.
> > >
> > > I obviously prefer that any features are also added to cgroup v1,
> > > because we still didn't make it to cgroup v2, especially when the
> > > infrastructure is shared. On the other hand, I am pretty sure the
> > > maintainers will not like what I am saying :)
> >
> > I have a weak preference.
> >
> > It's definitely a little weird that the v1 controller's behavior
> > changes based on the v2 mount flag. And that if you want it as an
> > otherwise exclusive v1 user, you'd have to mount a dummy v2.
> >
> > But I also don't see a scenario where it would hurt, or where there
> > would be an unresolvable conflict between v1 and v2 in expressing
> > desired behavior, since the memory controller is exclusive to one.
> >
> > While we could eliminate this quirk with a simple
> > !cgroup_subsys_on_dfl(memory_cgrp_subsys) inside the charge function,
> > it would seem almost punitive to add extra code just to take something
> > away that isn't really a problem and could be useful to some people.
> >
> > If Tejun doesn't object, I'd say let's just keep implied v1 behavior.
> 
> I agree that adding extra code to take a feature away from v1 is
> probably too much, but I also think relying on a v2 mount option is
> weird. Would it be too much to just have a v1-specific flag as well
> and use cgroup_subsys_on_dfl(memory_cgrp_subsys) to decide which flag
> to read?

Yeah, let's not preemptively add explicit new features to cgroup1.

Since we agree the incidental support is weird, let's filter hugetlb
charging on cgroup_subsys_on_dfl(memory_cgrp_subsys) after all. If
somebody wants this for v1 - and it doesn't sound like Google is even
in that category according to Frank - they should send a separate
patch and we can go through all the reasons why switching to v2 is not
an option for them.
Nhat Pham Sept. 29, 2023, 5:48 p.m. UTC | #11
On Fri, Sep 29, 2023 at 10:42 AM Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> On Fri, Sep 29, 2023 at 08:11:54AM -0700, Yosry Ahmed wrote:
> > On Fri, Sep 29, 2023 at 8:08 AM Johannes Weiner <hannes@cmpxchg.org> wrote:
> > >
> > > On Thu, Sep 28, 2023 at 06:18:19PM -0700, Yosry Ahmed wrote:
> > > > My concern is the scenario where the memory controller is mounted in
> > > > cgroup v1, and cgroup v2 is mounted with memory_hugetlb_accounting.
> > > >
> > > > In this case it seems like the current code will only check whether
> > > > memory_hugetlb_accounting was set on cgroup v2 or not, disregarding
> > > > the fact that cgroup v1 did not enable hugetlb accounting.
> > > >
> > > > I obviously prefer that any features are also added to cgroup v1,
> > > > because we still didn't make it to cgroup v2, especially when the
> > > > infrastructure is shared. On the other hand, I am pretty sure the
> > > > maintainers will not like what I am saying :)
> > >
> > > I have a weak preference.
> > >
> > > It's definitely a little weird that the v1 controller's behavior
> > > changes based on the v2 mount flag. And that if you want it as an
> > > otherwise exclusive v1 user, you'd have to mount a dummy v2.
> > >
> > > But I also don't see a scenario where it would hurt, or where there
> > > would be an unresolvable conflict between v1 and v2 in expressing
> > > desired behavior, since the memory controller is exclusive to one.
> > >
> > > While we could eliminate this quirk with a simple
> > > !cgroup_subsys_on_dfl(memory_cgrp_subsys) inside the charge function,
> > > it would seem almost punitive to add extra code just to take something
> > > away that isn't really a problem and could be useful to some people.
> > >
> > > If Tejun doesn't object, I'd say let's just keep implied v1 behavior.
> >
> > I agree that adding extra code to take a feature away from v1 is
> > probably too much, but I also think relying on a v2 mount option is
> > weird. Would it be too much to just have a v1-specific flag as well
> > and use cgroup_subsys_on_dfl(memory_cgrp_subsys) to decide which flag
> > to read?
>
> Yeah, let's not preemptively add explicit new features to cgroup1.
>
> Since we agree the incidental support is weird, let's filter hugetlb
> charging on cgroup_subsys_on_dfl(memory_cgrp_subsys) after all. If
> somebody wants this for v1 - and it doesn't sound like Google is even
> in that category according to Frank - they should send a separate
> patch and we can go through all the reasons why switching to v2 is not
> an option for them.

My gut reaction when I became aware of this is to just eliminate it with
!cgroup_subsys_on_dfl(memory_cgrp_subsys) too :)

Yeah, let's just keep it simple + safe and disable it on cgroupv1 for now.
We can have this conversation again later in the future when someone
wants this in v1 (and yes, this conversation should include the option
of moving to v2).

Consider it yet another incentive to migrate to cgroupv2 ;)
Frank van der Linden Sept. 29, 2023, 6:07 p.m. UTC | #12
On Fri, Sep 29, 2023 at 10:42 AM Johannes Weiner <hannes@cmpxchg.org> wrote:

> On Fri, Sep 29, 2023 at 08:11:54AM -0700, Yosry Ahmed wrote:
> > On Fri, Sep 29, 2023 at 8:08 AM Johannes Weiner <hannes@cmpxchg.org>
> wrote:
> > >
> > > On Thu, Sep 28, 2023 at 06:18:19PM -0700, Yosry Ahmed wrote:
> > > > My concern is the scenario where the memory controller is mounted in
> > > > cgroup v1, and cgroup v2 is mounted with memory_hugetlb_accounting.
> > > >
> > > > In this case it seems like the current code will only check whether
> > > > memory_hugetlb_accounting was set on cgroup v2 or not, disregarding
> > > > the fact that cgroup v1 did not enable hugetlb accounting.
> > > >
> > > > I obviously prefer that any features are also added to cgroup v1,
> > > > because we still didn't make it to cgroup v2, especially when the
> > > > infrastructure is shared. On the other hand, I am pretty sure the
> > > > maintainers will not like what I am saying :)
> > >
> > > I have a weak preference.
> > >
> > > It's definitely a little weird that the v1 controller's behavior
> > > changes based on the v2 mount flag. And that if you want it as an
> > > otherwise exclusive v1 user, you'd have to mount a dummy v2.
> > >
> > > But I also don't see a scenario where it would hurt, or where there
> > > would be an unresolvable conflict between v1 and v2 in expressing
> > > desired behavior, since the memory controller is exclusive to one.
> > >
> > > While we could eliminate this quirk with a simple
> > > !cgroup_subsys_on_dfl(memory_cgrp_subsys) inside the charge function,
> > > it would seem almost punitive to add extra code just to take something
> > > away that isn't really a problem and could be useful to some people.
> > >
> > > If Tejun doesn't object, I'd say let's just keep implied v1 behavior.
> >
> > I agree that adding extra code to take a feature away from v1 is
> > probably too much, but I also think relying on a v2 mount option is
> > weird. Would it be too much to just have a v1-specific flag as well
> > and use cgroup_subsys_on_dfl(memory_cgrp_subsys) to decide which flag
> > to read?
>
> Yeah, let's not preemptively add explicit new features to cgroup1.
>
> Since we agree the incidental support is weird, let's filter hugetlb
> charging on cgroup_subsys_on_dfl(memory_cgrp_subsys) after all. If
> somebody wants this for v1 - and it doesn't sound like Google is even
> in that category according to Frank - they should send a separate
> patch and we can go through all the reasons why switching to v2 is not
> an option for them.
>
>
Well, we do have it for v1 already in a local change, and it'll be around
for a while longer, so if this change at least doesn't break v1 or doesn't
require a number of additional changes to work around it, that'd be good.

Having said that, I know the general response is "do not use v1 anymore".
Which I understand - I realize that we're not in a strong position to argue
for changes to accomodate v1..

- Frank
Michal Hocko Oct. 2, 2023, 12:18 p.m. UTC | #13
On Fri 29-09-23 13:42:21, Johannes Weiner wrote:
> On Fri, Sep 29, 2023 at 08:11:54AM -0700, Yosry Ahmed wrote:
> > On Fri, Sep 29, 2023 at 8:08 AM Johannes Weiner <hannes@cmpxchg.org> wrote:
> > >
> > > On Thu, Sep 28, 2023 at 06:18:19PM -0700, Yosry Ahmed wrote:
> > > > My concern is the scenario where the memory controller is mounted in
> > > > cgroup v1, and cgroup v2 is mounted with memory_hugetlb_accounting.
> > > >
> > > > In this case it seems like the current code will only check whether
> > > > memory_hugetlb_accounting was set on cgroup v2 or not, disregarding
> > > > the fact that cgroup v1 did not enable hugetlb accounting.
> > > >
> > > > I obviously prefer that any features are also added to cgroup v1,
> > > > because we still didn't make it to cgroup v2, especially when the
> > > > infrastructure is shared. On the other hand, I am pretty sure the
> > > > maintainers will not like what I am saying :)
> > >
> > > I have a weak preference.
> > >
> > > It's definitely a little weird that the v1 controller's behavior
> > > changes based on the v2 mount flag. And that if you want it as an
> > > otherwise exclusive v1 user, you'd have to mount a dummy v2.
> > >
> > > But I also don't see a scenario where it would hurt, or where there
> > > would be an unresolvable conflict between v1 and v2 in expressing
> > > desired behavior, since the memory controller is exclusive to one.
> > >
> > > While we could eliminate this quirk with a simple
> > > !cgroup_subsys_on_dfl(memory_cgrp_subsys) inside the charge function,
> > > it would seem almost punitive to add extra code just to take something
> > > away that isn't really a problem and could be useful to some people.
> > >
> > > If Tejun doesn't object, I'd say let's just keep implied v1 behavior.
> > 
> > I agree that adding extra code to take a feature away from v1 is
> > probably too much, but I also think relying on a v2 mount option is
> > weird. Would it be too much to just have a v1-specific flag as well
> > and use cgroup_subsys_on_dfl(memory_cgrp_subsys) to decide which flag
> > to read?
> 
> Yeah, let's not preemptively add explicit new features to cgroup1.
> 
> Since we agree the incidental support is weird, let's filter hugetlb
> charging on cgroup_subsys_on_dfl(memory_cgrp_subsys) after all.

Agreed. It would be a bad idea to have an implicit behavior change based
on v2 mounting options. And I really do not think we want to add this
feature to v1. I am not supper thrilled about enabling this for v2 to be
completely honest but I do see a demand so I will not object to that.
Michal Hocko Oct. 2, 2023, 1:43 p.m. UTC | #14
On Wed 27-09-23 17:57:22, Nhat Pham wrote:
> Currently, hugetlb memory usage is not acounted for in the memory
> controller, which could lead to memory overprotection for cgroups with
> hugetlb-backed memory. This has been observed in our production system.
> 
> This patch rectifies this issue by charging the memcg when the hugetlb
> folio is allocated, and uncharging when the folio is freed (analogous to
> the hugetlb controller).

This changelog is missing a lot of information. Both about the usecase
(we do not want to fish that out from archives in the future) and the
actual implementation and the reasoning behind that.

AFAICS you have decided to charge on the hugetlb use rather than hugetlb
allocation to the pool. I suspect the underlying reasoning is that pool
pages do not belong to anybody. This is a deliberate decision and it
should be documented as such.

It is also very important do describe subtle behavior properties that
might be rather unintuitive to users. Most notably 
- there is no hugetlb pool management involved in the memcg
  controller. One has to use hugetlb controller for that purpose.
  Also the pre allocated pool as such doesn't belong to anybody so the
  memcg host overcommit management has to consider it when configuring
  hard limits.
- memcg limit reclaim doesn't assist hugetlb pages allocation when
  hugetlb overcommit is configured (i.e. pages are not consumed from the
  pool) which means that the page allocation might disrupt workloads
  from other memcgs.
- failure to charge a hugetlb page results in SIGBUS rather
  than memcg oom killer. That could be the case even if the
  hugetlb pool still has pages available and there is
  reclaimable memory in the memcg.
- hugetlb pages are contributing to memory reclaim protection 
  implicitly. This means that the low,min limits tunning has to consider
  hugetlb memory as well.

I suspect there is more than the above. To be completely honest I am
still not convinced this is a good idea.

I do recognize that this might work in a very limited environments but
hugetlb management is quite challenging on its own and this just adds
another layer of complexity which is really hard to see through without
an intimate understanding of both memcg and hugetlb. The reason that
hugetlb has been living outside of the core MM (and memcg) is not just
because we like it that way. And yes I do fully understand that users
shouldn't really care about that because this is just a memory to them.

We should also consider the global control for this functionality. I am
especially worried about setups where a mixed bag of workloads
(containers) is executed. While some of them will be ready for the new
accounting mode many will leave in their own world without ever being
modified. How do we deal with that situation?

All that being said, I am not going to ack nor nack this but I really do
prefer to be much more explicit about the motivation and current
implementation specifics so that we can forward users to something
they can digest.

> Signed-off-by: Nhat Pham <nphamcs@gmail.com>
[...]

a minor implementation detail below. I couldn't spot anything obviously
broken with the rest of the hugetlb specific code. restore_reserve_on_memcg_failure
is rather clumsy and potentially error prone but I will leave that out
to Mike as he is much more familiar with that behavior than me.

> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index de220e3ff8be..ff88ea4df11a 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
[...]
> @@ -3119,6 +3121,15 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
>  			hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
>  					pages_per_huge_page(h), folio);
>  	}
> +
> +	/* undo allocation if memory controller disallows it. */
> +	if (mem_cgroup_hugetlb_charge_folio(folio, GFP_KERNEL)) {

htlb_alloc_mask(h) rather than GFP_KERNEL. Ideally with
__GFP_RETRY_MAYFAIL which is a default allocation policy.

> +		if (restore_reserve_on_memcg_failure)
> +			restore_reserve_on_error(h, vma, addr, folio);
> +		folio_put(folio);
> +		return ERR_PTR(-ENOMEM);
> +	}
> +
>  	return folio;
>  
>  out_uncharge_cgroup:
Johannes Weiner Oct. 2, 2023, 2:50 p.m. UTC | #15
On Mon, Oct 02, 2023 at 03:43:19PM +0200, Michal Hocko wrote:
> On Wed 27-09-23 17:57:22, Nhat Pham wrote:
> > Currently, hugetlb memory usage is not acounted for in the memory
> > controller, which could lead to memory overprotection for cgroups with
> > hugetlb-backed memory. This has been observed in our production system.
> > 
> > This patch rectifies this issue by charging the memcg when the hugetlb
> > folio is allocated, and uncharging when the folio is freed (analogous to
> > the hugetlb controller).
> 
> This changelog is missing a lot of information. Both about the usecase
> (we do not want to fish that out from archives in the future) and the
> actual implementation and the reasoning behind that.
> 
> AFAICS you have decided to charge on the hugetlb use rather than hugetlb
> allocation to the pool. I suspect the underlying reasoning is that pool
> pages do not belong to anybody. This is a deliberate decision and it
> should be documented as such.
> 
> It is also very important do describe subtle behavior properties that
> might be rather unintuitive to users. Most notably 
> - there is no hugetlb pool management involved in the memcg
>   controller. One has to use hugetlb controller for that purpose.
>   Also the pre allocated pool as such doesn't belong to anybody so the
>   memcg host overcommit management has to consider it when configuring
>   hard limits.

+1

> - memcg limit reclaim doesn't assist hugetlb pages allocation when
>   hugetlb overcommit is configured (i.e. pages are not consumed from the
>   pool) which means that the page allocation might disrupt workloads
>   from other memcgs.
> - failure to charge a hugetlb page results in SIGBUS rather
>   than memcg oom killer. That could be the case even if the
>   hugetlb pool still has pages available and there is
>   reclaimable memory in the memcg.

Are these actually true? AFAICS, regardless of whether the page comes
from the pool or the buddy allocator, the memcg code will go through
the regular charge path, attempt reclaim, and OOM if that fails.
Michal Hocko Oct. 2, 2023, 3:08 p.m. UTC | #16
On Mon 02-10-23 10:50:26, Johannes Weiner wrote:
> On Mon, Oct 02, 2023 at 03:43:19PM +0200, Michal Hocko wrote:
> > On Wed 27-09-23 17:57:22, Nhat Pham wrote:
[...]
> > - memcg limit reclaim doesn't assist hugetlb pages allocation when
> >   hugetlb overcommit is configured (i.e. pages are not consumed from the
> >   pool) which means that the page allocation might disrupt workloads
> >   from other memcgs.
> > - failure to charge a hugetlb page results in SIGBUS rather
> >   than memcg oom killer. That could be the case even if the
> >   hugetlb pool still has pages available and there is
> >   reclaimable memory in the memcg.
> 
> Are these actually true? AFAICS, regardless of whether the page comes
> from the pool or the buddy allocator, the memcg code will go through
> the regular charge path, attempt reclaim, and OOM if that fails.

OK, I should have been more explicit. Let me expand. Charges are
accounted only _after_ the actual allocation is done. So the actual
allocation is not constrained by the memcg context. It might reclaim
from the memcg at that time but the disruption could have already
happened. Not really any different from regular memory allocation
attempt but much more visible with GB pages and one could reasonably
expect that memcg should stop such a GB allocation if the local reclaim
would be hopeless to free up enough from its own consumption.

Makes more sense?

With the later point I meant to say that the memcg OOM killer will not
communicate the hugetlb request failure so the usual SIGBUS will be
returned to the userspace. I can imagine a SIGBUS handler could check
hugetlb availability to retry or something similar.
Johannes Weiner Oct. 2, 2023, 3:25 p.m. UTC | #17
On Mon, Oct 02, 2023 at 05:08:34PM +0200, Michal Hocko wrote:
> On Mon 02-10-23 10:50:26, Johannes Weiner wrote:
> > On Mon, Oct 02, 2023 at 03:43:19PM +0200, Michal Hocko wrote:
> > > On Wed 27-09-23 17:57:22, Nhat Pham wrote:
> [...]
> > > - memcg limit reclaim doesn't assist hugetlb pages allocation when
> > >   hugetlb overcommit is configured (i.e. pages are not consumed from the
> > >   pool) which means that the page allocation might disrupt workloads
> > >   from other memcgs.
> > > - failure to charge a hugetlb page results in SIGBUS rather
> > >   than memcg oom killer. That could be the case even if the
> > >   hugetlb pool still has pages available and there is
> > >   reclaimable memory in the memcg.
> > 
> > Are these actually true? AFAICS, regardless of whether the page comes
> > from the pool or the buddy allocator, the memcg code will go through
> > the regular charge path, attempt reclaim, and OOM if that fails.
> 
> OK, I should have been more explicit. Let me expand. Charges are
> accounted only _after_ the actual allocation is done. So the actual
> allocation is not constrained by the memcg context. It might reclaim
> from the memcg at that time but the disruption could have already
> happened. Not really any different from regular memory allocation
> attempt but much more visible with GB pages and one could reasonably
> expect that memcg should stop such a GB allocation if the local reclaim
> would be hopeless to free up enough from its own consumption.
> 
> Makes more sense?

Yes, that makes sense.

This should be fairly easy to address by having hugetlb do the split
transaction that charge_memcg() does in one go, similar to what we do
for the hugetlb controller as well. IOW,

alloc_hugetlb_folio()
{
	if (mem_cgroup_hugetlb_try_charge())
		return ERR_PTR(-ENOMEM);

	folio = dequeue();
	if (!folio) {
		folio = alloc_buddy();
		if (!folio)
			goto uncharge;
	}

	mem_cgroup_hugetlb_commit_charge();
}
Johannes Weiner Oct. 2, 2023, 4:21 p.m. UTC | #18
On Mon, Oct 02, 2023 at 03:43:19PM +0200, Michal Hocko wrote:
> We should also consider the global control for this functionality. I am
> especially worried about setups where a mixed bag of workloads
> (containers) is executed. While some of them will be ready for the new
> accounting mode many will leave in their own world without ever being
> modified. How do we deal with that situation?

It's possible to add more localized control on top of the global flag
should this come up. But this seems like a new and honestly pretty
hypothetical usecase, given the host-level coordination already
involved in real-world hugetlb setups.

The same could be said about other mount options, such as nsdelegate,
memory_localevents, and memory_recursiveprot. Those you'd expect to
have a much broader audience, and nobody has asked for mixed use.

Let's cross this bridge not when but if we have to.
Nhat Pham Oct. 2, 2023, 5:28 p.m. UTC | #19
On Mon, Oct 2, 2023 at 6:43 AM Michal Hocko <mhocko@suse.com> wrote:
>
> On Wed 27-09-23 17:57:22, Nhat Pham wrote:
> > Currently, hugetlb memory usage is not acounted for in the memory
> > controller, which could lead to memory overprotection for cgroups with
> > hugetlb-backed memory. This has been observed in our production system.
> >
> > This patch rectifies this issue by charging the memcg when the hugetlb
> > folio is allocated, and uncharging when the folio is freed (analogous to
> > the hugetlb controller).
>
> This changelog is missing a lot of information. Both about the usecase
> (we do not want to fish that out from archives in the future) and the
> actual implementation and the reasoning behind that.
>
> AFAICS you have decided to charge on the hugetlb use rather than hugetlb
> allocation to the pool. I suspect the underlying reasoning is that pool
> pages do not belong to anybody. This is a deliberate decision and it
> should be documented as such.

Yep that was the intention behind placing the charging of the hugetlb folio
in alloc_hugetlb_folio(). I'll document this in the changelog and/or code.

>
> It is also very important do describe subtle behavior properties that
> might be rather unintuitive to users. Most notably

If you don't mind, I'll summarize these into the next version of
the patch's changelog :)

> - there is no hugetlb pool management involved in the memcg
>   controller. One has to use hugetlb controller for that purpose.
>   Also the pre allocated pool as such doesn't belong to anybody so the
>   memcg host overcommit management has to consider it when configuring
>   hard limits.
> - memcg limit reclaim doesn't assist hugetlb pages allocation when
>   hugetlb overcommit is configured (i.e. pages are not consumed from the
>   pool) which means that the page allocation might disrupt workloads
>   from other memcgs.
> - failure to charge a hugetlb page results in SIGBUS rather
>   than memcg oom killer. That could be the case even if the
>   hugetlb pool still has pages available and there is
>   reclaimable memory in the memcg.

Ah yes that should be documented indeed.

> - hugetlb pages are contributing to memory reclaim protection
>   implicitly. This means that the low,min limits tunning has to consider
>   hugetlb memory as well.

This was the original inspiration for this change. I'll expand on it
in the new version's changelog.

>
> I suspect there is more than the above. To be completely honest I am
> still not convinced this is a good idea.
>
> I do recognize that this might work in a very limited environments but
> hugetlb management is quite challenging on its own and this just adds
> another layer of complexity which is really hard to see through without
> an intimate understanding of both memcg and hugetlb. The reason that
> hugetlb has been living outside of the core MM (and memcg) is not just
> because we like it that way. And yes I do fully understand that users
> shouldn't really care about that because this is just a memory to them.
>
> We should also consider the global control for this functionality. I am
> especially worried about setups where a mixed bag of workloads
> (containers) is executed. While some of them will be ready for the new
> accounting mode many will leave in their own world without ever being
> modified. How do we deal with that situation?

Johannes already responded to this, but I also think this hypothetical
situation isn't super urgent to handle right now. That said, we can
always revisit it if/when it proves to be an issue and add appropriate
memcg-specific control for this feature as a follow-up.

>
> All that being said, I am not going to ack nor nack this but I really do
> prefer to be much more explicit about the motivation and current
> implementation specifics so that we can forward users to something
> they can digest.
>
> > Signed-off-by: Nhat Pham <nphamcs@gmail.com>
> [...]
>
> a minor implementation detail below. I couldn't spot anything obviously
> broken with the rest of the hugetlb specific code. restore_reserve_on_memcg_failure
> is rather clumsy and potentially error prone but I will leave that out
> to Mike as he is much more familiar with that behavior than me.

That part irks me too, but I'm trying to follow the error handling logic
that follows each alloc_hugetlb_folio() call site.

If anyone has any suggestions, I'd be happy to listen!

>
> > diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> > index de220e3ff8be..ff88ea4df11a 100644
> > --- a/mm/hugetlb.c
> > +++ b/mm/hugetlb.c
> [...]
> > @@ -3119,6 +3121,15 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
> >                       hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
> >                                       pages_per_huge_page(h), folio);
> >       }
> > +
> > +     /* undo allocation if memory controller disallows it. */
> > +     if (mem_cgroup_hugetlb_charge_folio(folio, GFP_KERNEL)) {
>
> htlb_alloc_mask(h) rather than GFP_KERNEL. Ideally with
> __GFP_RETRY_MAYFAIL which is a default allocation policy.

Oh I wasn't aware of htlb_alloc_mask(h). So I'll fix this to:

htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL

>
> > +             if (restore_reserve_on_memcg_failure)
> > +                     restore_reserve_on_error(h, vma, addr, folio);
> > +             folio_put(folio);
> > +             return ERR_PTR(-ENOMEM);
> > +     }
> > +
> >       return folio;
> >
> >  out_uncharge_cgroup:
>
> --
> Michal Hocko
> SUSE Labs
Nhat Pham Oct. 2, 2023, 5:32 p.m. UTC | #20
On Mon, Oct 2, 2023 at 8:25 AM Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> On Mon, Oct 02, 2023 at 05:08:34PM +0200, Michal Hocko wrote:
> > On Mon 02-10-23 10:50:26, Johannes Weiner wrote:
> > > On Mon, Oct 02, 2023 at 03:43:19PM +0200, Michal Hocko wrote:
> > > > On Wed 27-09-23 17:57:22, Nhat Pham wrote:
> > [...]
> > > > - memcg limit reclaim doesn't assist hugetlb pages allocation when
> > > >   hugetlb overcommit is configured (i.e. pages are not consumed from the
> > > >   pool) which means that the page allocation might disrupt workloads
> > > >   from other memcgs.
> > > > - failure to charge a hugetlb page results in SIGBUS rather
> > > >   than memcg oom killer. That could be the case even if the
> > > >   hugetlb pool still has pages available and there is
> > > >   reclaimable memory in the memcg.
> > >
> > > Are these actually true? AFAICS, regardless of whether the page comes
> > > from the pool or the buddy allocator, the memcg code will go through
> > > the regular charge path, attempt reclaim, and OOM if that fails.
> >
> > OK, I should have been more explicit. Let me expand. Charges are
> > accounted only _after_ the actual allocation is done. So the actual
> > allocation is not constrained by the memcg context. It might reclaim
> > from the memcg at that time but the disruption could have already
> > happened. Not really any different from regular memory allocation
> > attempt but much more visible with GB pages and one could reasonably
> > expect that memcg should stop such a GB allocation if the local reclaim
> > would be hopeless to free up enough from its own consumption.
> >
> > Makes more sense?
>
> Yes, that makes sense.
>
> This should be fairly easy to address by having hugetlb do the split
> transaction that charge_memcg() does in one go, similar to what we do
> for the hugetlb controller as well. IOW,
>
> alloc_hugetlb_folio()
> {
>         if (mem_cgroup_hugetlb_try_charge())
>                 return ERR_PTR(-ENOMEM);
>
>         folio = dequeue();
>         if (!folio) {
>                 folio = alloc_buddy();
>                 if (!folio)
>                         goto uncharge;
>         }
>
>         mem_cgroup_hugetlb_commit_charge();
> }

Ah actually, I like this better.
If I do this I can circumvent all the redo_reserve bogus as well!
Michal Hocko Oct. 3, 2023, 9:17 a.m. UTC | #21
On Mon 02-10-23 11:25:55, Johannes Weiner wrote:
> On Mon, Oct 02, 2023 at 05:08:34PM +0200, Michal Hocko wrote:
> > On Mon 02-10-23 10:50:26, Johannes Weiner wrote:
> > > On Mon, Oct 02, 2023 at 03:43:19PM +0200, Michal Hocko wrote:
> > > > On Wed 27-09-23 17:57:22, Nhat Pham wrote:
> > [...]
> > > > - memcg limit reclaim doesn't assist hugetlb pages allocation when
> > > >   hugetlb overcommit is configured (i.e. pages are not consumed from the
> > > >   pool) which means that the page allocation might disrupt workloads
> > > >   from other memcgs.
> > > > - failure to charge a hugetlb page results in SIGBUS rather
> > > >   than memcg oom killer. That could be the case even if the
> > > >   hugetlb pool still has pages available and there is
> > > >   reclaimable memory in the memcg.
> > > 
> > > Are these actually true? AFAICS, regardless of whether the page comes
> > > from the pool or the buddy allocator, the memcg code will go through
> > > the regular charge path, attempt reclaim, and OOM if that fails.
> > 
> > OK, I should have been more explicit. Let me expand. Charges are
> > accounted only _after_ the actual allocation is done. So the actual
> > allocation is not constrained by the memcg context. It might reclaim
> > from the memcg at that time but the disruption could have already
> > happened. Not really any different from regular memory allocation
> > attempt but much more visible with GB pages and one could reasonably
> > expect that memcg should stop such a GB allocation if the local reclaim
> > would be hopeless to free up enough from its own consumption.
> > 
> > Makes more sense?
> 
> Yes, that makes sense.
> 
> This should be fairly easy to address by having hugetlb do the split
> transaction that charge_memcg() does in one go, similar to what we do
> for the hugetlb controller as well. IOW,
> 
> alloc_hugetlb_folio()
> {
> 	if (mem_cgroup_hugetlb_try_charge())
> 		return ERR_PTR(-ENOMEM);
> 
> 	folio = dequeue();
> 	if (!folio) {
> 		folio = alloc_buddy();
> 		if (!folio)
> 			goto uncharge;
> 	}
> 
> 	mem_cgroup_hugetlb_commit_charge();
> }

yes, this makes sense. I still suspect we will need a better charge
reclaim tuning for GB pages as those are just too huge and a simple
MAX_RECLAIM_RETRIES * GB worth of reclaim targets might be just overly
aggressive.
diff mbox series

Patch

diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 622a7f28db1f..e6267b8cbd1d 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -210,6 +210,15 @@  cgroup v2 currently supports the following mount options.
         relying on the original semantics (e.g. specifying bogusly
         high 'bypass' protection values at higher tree levels).
 
+  memory_hugetlb_accounting
+        Count hugetlb memory usage towards the cgroup's overall
+        memory usage for the memory controller. This is a new behavior
+        that could regress existing setups, so it must be explicitly
+        opted in with this mount option. Note that hugetlb pages
+        allocated while this option is not selected will not be
+        tracked by the memory controller (even if cgroup v2 is
+        remounted later on).
+
 
 Organizing Processes and Threads
 --------------------------------
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 60fce26ff937..034967319955 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -902,7 +902,7 @@  static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
 		 * to keep reservation accounting consistent.
 		 */
 		hugetlb_set_vma_policy(&pseudo_vma, inode, index);
-		folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0);
+		folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0, true);
 		hugetlb_drop_vma_policy(&pseudo_vma);
 		if (IS_ERR(folio)) {
 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index f1b3151ac30b..8641f4320c98 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -115,6 +115,11 @@  enum {
 	 * Enable recursive subtree protection
 	 */
 	CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
+
+	/*
+	 * Enable hugetlb accounting for the memory controller.
+	 */
+	 CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
 };
 
 /* cftype->flags */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index a30686e649f7..9b73db1605a2 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -713,7 +713,8 @@  struct huge_bootmem_page {
 
 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
-				unsigned long addr, int avoid_reserve);
+				unsigned long addr, int avoid_reserve,
+				bool restore_reserve_on_memcg_failure);
 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
 				nodemask_t *nmask, gfp_t gfp_mask);
 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
@@ -1016,7 +1017,8 @@  static inline int isolate_or_dissolve_huge_page(struct page *page,
 
 static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
 					   unsigned long addr,
-					   int avoid_reserve)
+					   int avoid_reserve,
+					   bool restore_reserve_on_memcg_failure)
 {
 	return NULL;
 }
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e0cfab58ab71..8094679c99dd 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -677,6 +677,8 @@  static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
 	return __mem_cgroup_charge(folio, mm, gfp);
 }
 
+int mem_cgroup_hugetlb_charge_folio(struct folio *folio, gfp_t gfp);
+
 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
 				  gfp_t gfp, swp_entry_t entry);
 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
@@ -1251,6 +1253,12 @@  static inline int mem_cgroup_charge(struct folio *folio,
 	return 0;
 }
 
+static inline int mem_cgroup_hugetlb_charge_folio(struct folio *folio,
+		gfp_t gfp)
+{
+	return 0;
+}
+
 static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
 			struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
 {
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 1fb7f562289d..f11488b18ceb 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1902,6 +1902,7 @@  enum cgroup2_param {
 	Opt_favordynmods,
 	Opt_memory_localevents,
 	Opt_memory_recursiveprot,
+	Opt_memory_hugetlb_accounting,
 	nr__cgroup2_params
 };
 
@@ -1910,6 +1911,7 @@  static const struct fs_parameter_spec cgroup2_fs_parameters[] = {
 	fsparam_flag("favordynmods",		Opt_favordynmods),
 	fsparam_flag("memory_localevents",	Opt_memory_localevents),
 	fsparam_flag("memory_recursiveprot",	Opt_memory_recursiveprot),
+	fsparam_flag("memory_hugetlb_accounting", Opt_memory_hugetlb_accounting),
 	{}
 };
 
@@ -1936,6 +1938,9 @@  static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param
 	case Opt_memory_recursiveprot:
 		ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
 		return 0;
+	case Opt_memory_hugetlb_accounting:
+		ctx->flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
+		return 0;
 	}
 	return -EINVAL;
 }
@@ -1960,6 +1965,11 @@  static void apply_cgroup_root_flags(unsigned int root_flags)
 			cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
 		else
 			cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_RECURSIVE_PROT;
+
+		if (root_flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)
+			cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
+		else
+			cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
 	}
 }
 
@@ -1973,6 +1983,8 @@  static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root
 		seq_puts(seq, ",memory_localevents");
 	if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
 		seq_puts(seq, ",memory_recursiveprot");
+	if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)
+		seq_puts(seq, ",memory_hugetlb_accounting");
 	return 0;
 }
 
@@ -7050,7 +7062,8 @@  static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
 			"nsdelegate\n"
 			"favordynmods\n"
 			"memory_localevents\n"
-			"memory_recursiveprot\n");
+			"memory_recursiveprot\n"
+			"memory_hugetlb_accounting\n");
 }
 static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
 
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index de220e3ff8be..ff88ea4df11a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1902,6 +1902,7 @@  void free_huge_folio(struct folio *folio)
 				     pages_per_huge_page(h), folio);
 	hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
 					  pages_per_huge_page(h), folio);
+	mem_cgroup_uncharge(folio);
 	if (restore_reserve)
 		h->resv_huge_pages++;
 
@@ -3004,7 +3005,8 @@  int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
 }
 
 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
-				    unsigned long addr, int avoid_reserve)
+					unsigned long addr, int avoid_reserve,
+					bool restore_reserve_on_memcg_failure)
 {
 	struct hugepage_subpool *spool = subpool_vma(vma);
 	struct hstate *h = hstate_vma(vma);
@@ -3119,6 +3121,15 @@  struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
 			hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
 					pages_per_huge_page(h), folio);
 	}
+
+	/* undo allocation if memory controller disallows it. */
+	if (mem_cgroup_hugetlb_charge_folio(folio, GFP_KERNEL)) {
+		if (restore_reserve_on_memcg_failure)
+			restore_reserve_on_error(h, vma, addr, folio);
+		folio_put(folio);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	return folio;
 
 out_uncharge_cgroup:
@@ -5179,7 +5190,7 @@  int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 				spin_unlock(src_ptl);
 				spin_unlock(dst_ptl);
 				/* Do not use reserve as it's private owned */
-				new_folio = alloc_hugetlb_folio(dst_vma, addr, 1);
+				new_folio = alloc_hugetlb_folio(dst_vma, addr, 1, false);
 				if (IS_ERR(new_folio)) {
 					folio_put(pte_folio);
 					ret = PTR_ERR(new_folio);
@@ -5656,7 +5667,7 @@  static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
 	 * be acquired again before returning to the caller, as expected.
 	 */
 	spin_unlock(ptl);
-	new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve);
+	new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve, true);
 
 	if (IS_ERR(new_folio)) {
 		/*
@@ -5930,7 +5941,7 @@  static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
 							VM_UFFD_MISSING);
 		}
 
-		folio = alloc_hugetlb_folio(vma, haddr, 0);
+		folio = alloc_hugetlb_folio(vma, haddr, 0, true);
 		if (IS_ERR(folio)) {
 			/*
 			 * Returning error will result in faulting task being
@@ -6352,7 +6363,7 @@  int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 			goto out;
 		}
 
-		folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
+		folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0, true);
 		if (IS_ERR(folio)) {
 			ret = -ENOMEM;
 			goto out;
@@ -6394,7 +6405,7 @@  int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 			goto out;
 		}
 
-		folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
+		folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0, false);
 		if (IS_ERR(folio)) {
 			folio_put(*foliop);
 			ret = -ENOMEM;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d1a322a75172..d5dfc9b36acb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -7050,6 +7050,47 @@  int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
 	return ret;
 }
 
+static struct mem_cgroup *get_mem_cgroup_from_current(void)
+{
+	struct mem_cgroup *memcg;
+
+again:
+	rcu_read_lock();
+	memcg = mem_cgroup_from_task(current);
+	if (!css_tryget(&memcg->css)) {
+		rcu_read_unlock();
+		goto again;
+	}
+	rcu_read_unlock();
+	return memcg;
+}
+
+/**
+ * mem_cgroup_hugetlb_charge_folio - Charge a newly allocated hugetlb folio.
+ * @folio: folio to charge.
+ * @gfp: reclaim mode
+ *
+ * This function charges an allocated hugetlbf folio to the memcg of the
+ * current task.
+ *
+ * Returns 0 on success. Otherwise, an error code is returned.
+ */
+int mem_cgroup_hugetlb_charge_folio(struct folio *folio, gfp_t gfp)
+{
+	struct mem_cgroup *memcg;
+	int ret;
+
+	if (mem_cgroup_disabled() ||
+		!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
+		return 0;
+
+	memcg = get_mem_cgroup_from_current();
+	ret = charge_memcg(folio, memcg, gfp);
+	mem_cgroup_put(memcg);
+
+	return ret;
+}
+
 /**
  * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
  * @folio: folio to charge.