diff mbox series

mm/page_alloc: simplify kmem cgroup charge/uncharge code

Message ID 20201207142204.GA18516@rlk (mailing list archive)
State New, archived
Headers show
Series mm/page_alloc: simplify kmem cgroup charge/uncharge code | expand

Commit Message

Hui Su Dec. 7, 2020, 2:22 p.m. UTC
Since the commit 60cd4bcd6238 ("memcg: localize memcg_kmem_enabled()
check"), we have supplied the api which users don't have to explicitly
check memcg_kmem_enabled().

Signed-off-by: Hui Su <sh_def@163.com>
---
 mm/page_alloc.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Comments

Muchun Song Dec. 7, 2020, 2:42 p.m. UTC | #1
On Mon, Dec 7, 2020 at 10:22 PM Hui Su <sh_def@163.com> wrote:
>
> Since the commit 60cd4bcd6238 ("memcg: localize memcg_kmem_enabled()
> check"), we have supplied the api which users don't have to explicitly
> check memcg_kmem_enabled().
>
> Signed-off-by: Hui Su <sh_def@163.com>

Reviewed-by: Muchun Song <songmuchun@bytedance.com>

> ---
>  mm/page_alloc.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index eaa227a479e4..dc990a899ded 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -1214,8 +1214,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
>                  * Do not let hwpoison pages hit pcplists/buddy
>                  * Untie memcg state and reset page's owner
>                  */
> -               if (memcg_kmem_enabled() && PageKmemcg(page))
> -                       __memcg_kmem_uncharge_page(page, order);
> +               if (PageKmemcg(page))
> +                       memcg_kmem_uncharge_page(page, order);
>                 reset_page_owner(page, order);
>                 return false;
>         }
> @@ -1244,8 +1244,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
>         }
>         if (PageMappingFlags(page))
>                 page->mapping = NULL;
> -       if (memcg_kmem_enabled() && PageKmemcg(page))
> -               __memcg_kmem_uncharge_page(page, order);
> +       if (PageKmemcg(page))
> +               memcg_kmem_uncharge_page(page, order);
>         if (check_free)
>                 bad += check_free_page(page);
>         if (bad)
> @@ -4965,8 +4965,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
>         page = __alloc_pages_slowpath(alloc_mask, order, &ac);
>
>  out:
> -       if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
> -           unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
> +       if ((gfp_mask & __GFP_ACCOUNT) && page &&
> +           unlikely(memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
>                 __free_pages(page, order);
>                 page = NULL;
>         }
> --
> 2.29.2
>
>
Shakeel Butt Dec. 7, 2020, 5:28 p.m. UTC | #2
On Mon, Dec 7, 2020 at 6:22 AM Hui Su <sh_def@163.com> wrote:
>
> Since the commit 60cd4bcd6238 ("memcg: localize memcg_kmem_enabled()
> check"), we have supplied the api which users don't have to explicitly
> check memcg_kmem_enabled().
>
> Signed-off-by: Hui Su <sh_def@163.com>
> ---
>  mm/page_alloc.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index eaa227a479e4..dc990a899ded 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -1214,8 +1214,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
>                  * Do not let hwpoison pages hit pcplists/buddy
>                  * Untie memcg state and reset page's owner
>                  */
> -               if (memcg_kmem_enabled() && PageKmemcg(page))
> -                       __memcg_kmem_uncharge_page(page, order);
> +               if (PageKmemcg(page))
> +                       memcg_kmem_uncharge_page(page, order);
>                 reset_page_owner(page, order);
>                 return false;
>         }
> @@ -1244,8 +1244,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
>         }
>         if (PageMappingFlags(page))
>                 page->mapping = NULL;
> -       if (memcg_kmem_enabled() && PageKmemcg(page))
> -               __memcg_kmem_uncharge_page(page, order);
> +       if (PageKmemcg(page))
> +               memcg_kmem_uncharge_page(page, order);
>         if (check_free)
>                 bad += check_free_page(page);
>         if (bad)
> @@ -4965,8 +4965,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
>         page = __alloc_pages_slowpath(alloc_mask, order, &ac);
>
>  out:
> -       if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
> -           unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
> +       if ((gfp_mask & __GFP_ACCOUNT) && page &&
> +           unlikely(memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
>                 __free_pages(page, order);
>                 page = NULL;
>         }


The reason to keep __memcg_kmem_[un]charge_page functions is that they
were called in the very hot path. Can you please check the performance
impact of your change and if the generated code is actually same or
different.
Shakeel Butt Dec. 8, 2020, 5:12 p.m. UTC | #3
+Michal Hocko

Message starts at https://lkml.kernel.org/r/20201207142204.GA18516@rlk

On Mon, Dec 7, 2020 at 10:08 PM Hui Su <sh_def@163.com> wrote:
>
> On Mon, Dec 07, 2020 at 09:28:46AM -0800, Shakeel Butt wrote:
> > On Mon, Dec 7, 2020 at 6:22 AM Hui Su <sh_def@163.com> wrote:
> >
> > The reason to keep __memcg_kmem_[un]charge_page functions is that they
> > were called in the very hot path. Can you please check the performance
> > impact of your change and if the generated code is actually same or
> > different.
>
> Hi, Shakeel:
>
> I objdump the mm/page_alloc.o and comapre them, it change the assemble code
> indeed. In fact, it change some code order, which i personally think won't have
> impact on performance. And i ran the ltp mm and conatiner test, it seems nothing
> abnormal.

Did you run the tests in a memcg? The change is behind a static key of
kmem accounting which is enabled for subcontainers.

>
> BUT i still want to check whether this change will have negative impact on
> perforance due to this change code was called in the very hot path like you
> said, AND saddly i did not find a way to quantify the impact on performance.
> Can you give me some suggestion about how to quantify the performance or some
> tool?
>

At least I think we can try with a simple page allocation in a loop
i.e. alloc_page(GFP_KERNEL_ACCOUNT). I will think of any existing
benchmark which exercises this code path.

Michal, do you have any suggestions?
Michal Hocko Dec. 9, 2020, 4:29 p.m. UTC | #4
On Tue 08-12-20 09:12:23, Shakeel Butt wrote:
> +Michal Hocko
> 
> Message starts at https://lkml.kernel.org/r/20201207142204.GA18516@rlk
> 
> On Mon, Dec 7, 2020 at 10:08 PM Hui Su <sh_def@163.com> wrote:
> >
> > On Mon, Dec 07, 2020 at 09:28:46AM -0800, Shakeel Butt wrote:
> > > On Mon, Dec 7, 2020 at 6:22 AM Hui Su <sh_def@163.com> wrote:
> > >
> > > The reason to keep __memcg_kmem_[un]charge_page functions is that they
> > > were called in the very hot path. Can you please check the performance
> > > impact of your change and if the generated code is actually same or
> > > different.
> >
> > Hi, Shakeel:
> >
> > I objdump the mm/page_alloc.o and comapre them, it change the assemble code
> > indeed. In fact, it change some code order, which i personally think won't have
> > impact on performance. And i ran the ltp mm and conatiner test, it seems nothing
> > abnormal.
> 
> Did you run the tests in a memcg? The change is behind a static key of
> kmem accounting which is enabled for subcontainers.
> 
> >
> > BUT i still want to check whether this change will have negative impact on
> > perforance due to this change code was called in the very hot path like you
> > said, AND saddly i did not find a way to quantify the impact on performance.
> > Can you give me some suggestion about how to quantify the performance or some
> > tool?
> >
> 
> At least I think we can try with a simple page allocation in a loop
> i.e. alloc_page(GFP_KERNEL_ACCOUNT). I will think of any existing
> benchmark which exercises this code path.
> 
> Michal, do you have any suggestions?

I have to say I do not see any big benefit from the patch and it alters
a real hot path to check for the flag even in cases where kmem
accounting is not enabled, unless I am misreading the code.
Shakeel Butt Dec. 9, 2020, 6:15 p.m. UTC | #5
On Wed, Dec 9, 2020 at 8:29 AM Michal Hocko <mhocko@suse.com> wrote:
>
> On Tue 08-12-20 09:12:23, Shakeel Butt wrote:
> > +Michal Hocko
> >
> > Message starts at https://lkml.kernel.org/r/20201207142204.GA18516@rlk
> >
> > On Mon, Dec 7, 2020 at 10:08 PM Hui Su <sh_def@163.com> wrote:
> > >
> > > On Mon, Dec 07, 2020 at 09:28:46AM -0800, Shakeel Butt wrote:
> > > > On Mon, Dec 7, 2020 at 6:22 AM Hui Su <sh_def@163.com> wrote:
> > > >
> > > > The reason to keep __memcg_kmem_[un]charge_page functions is that they
> > > > were called in the very hot path. Can you please check the performance
> > > > impact of your change and if the generated code is actually same or
> > > > different.
> > >
> > > Hi, Shakeel:
> > >
> > > I objdump the mm/page_alloc.o and comapre them, it change the assemble code
> > > indeed. In fact, it change some code order, which i personally think won't have
> > > impact on performance. And i ran the ltp mm and conatiner test, it seems nothing
> > > abnormal.
> >
> > Did you run the tests in a memcg? The change is behind a static key of
> > kmem accounting which is enabled for subcontainers.
> >
> > >
> > > BUT i still want to check whether this change will have negative impact on
> > > perforance due to this change code was called in the very hot path like you
> > > said, AND saddly i did not find a way to quantify the impact on performance.
> > > Can you give me some suggestion about how to quantify the performance or some
> > > tool?
> > >
> >
> > At least I think we can try with a simple page allocation in a loop
> > i.e. alloc_page(GFP_KERNEL_ACCOUNT). I will think of any existing
> > benchmark which exercises this code path.
> >
> > Michal, do you have any suggestions?
>
> I have to say I do not see any big benefit from the patch and it alters
> a real hot path to check for the flag even in cases where kmem
> accounting is not enabled, unless I am misreading the code.
>

Yes you are right unless the super intelligent compiler re-arranges
the checks and puts the static key check at front to optimize the
non-kmem-accounting mode.
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eaa227a479e4..dc990a899ded 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1214,8 +1214,8 @@  static __always_inline bool free_pages_prepare(struct page *page,
 		 * Do not let hwpoison pages hit pcplists/buddy
 		 * Untie memcg state and reset page's owner
 		 */
-		if (memcg_kmem_enabled() && PageKmemcg(page))
-			__memcg_kmem_uncharge_page(page, order);
+		if (PageKmemcg(page))
+			memcg_kmem_uncharge_page(page, order);
 		reset_page_owner(page, order);
 		return false;
 	}
@@ -1244,8 +1244,8 @@  static __always_inline bool free_pages_prepare(struct page *page,
 	}
 	if (PageMappingFlags(page))
 		page->mapping = NULL;
-	if (memcg_kmem_enabled() && PageKmemcg(page))
-		__memcg_kmem_uncharge_page(page, order);
+	if (PageKmemcg(page))
+		memcg_kmem_uncharge_page(page, order);
 	if (check_free)
 		bad += check_free_page(page);
 	if (bad)
@@ -4965,8 +4965,8 @@  __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 	page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
 out:
-	if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
-	    unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
+	if ((gfp_mask & __GFP_ACCOUNT) && page &&
+	    unlikely(memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
 		__free_pages(page, order);
 		page = NULL;
 	}