diff mbox series

[1/6] mm: kfence: simplify kfence pool initialization

Message ID 20230328095807.7014-2-songmuchun@bytedance.com (mailing list archive)
State New
Headers show
Series Simplify kfence code | expand

Commit Message

Muchun Song March 28, 2023, 9:58 a.m. UTC
There are three similar loops to initialize kfence pool, we could merge
all of them into one loop to simplify the code and make code more
efficient.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 mm/kfence/core.c | 47 ++++++-----------------------------------------
 1 file changed, 6 insertions(+), 41 deletions(-)

Comments

Marco Elver March 28, 2023, 11:55 a.m. UTC | #1
On Tue, 28 Mar 2023 at 11:58, Muchun Song <songmuchun@bytedance.com> wrote:
>
> There are three similar loops to initialize kfence pool, we could merge
> all of them into one loop to simplify the code and make code more
> efficient.
>
> Signed-off-by: Muchun Song <songmuchun@bytedance.com>

Reviewed-by: Marco Elver <elver@google.com>

> ---
>  mm/kfence/core.c | 47 ++++++-----------------------------------------
>  1 file changed, 6 insertions(+), 41 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 7d01a2c76e80..de62a84d4830 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -539,35 +539,10 @@ static void rcu_guarded_free(struct rcu_head *h)
>  static unsigned long kfence_init_pool(void)
>  {
>         unsigned long addr = (unsigned long)__kfence_pool;
> -       struct page *pages;
>         int i;
>
>         if (!arch_kfence_init_pool())
>                 return addr;
> -
> -       pages = virt_to_page(__kfence_pool);
> -
> -       /*
> -        * Set up object pages: they must have PG_slab set, to avoid freeing
> -        * these as real pages.
> -        *
> -        * We also want to avoid inserting kfence_free() in the kfree()
> -        * fast-path in SLUB, and therefore need to ensure kfree() correctly
> -        * enters __slab_free() slow-path.
> -        */
> -       for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
> -               struct slab *slab = page_slab(nth_page(pages, i));
> -
> -               if (!i || (i % 2))
> -                       continue;
> -
> -               __folio_set_slab(slab_folio(slab));
> -#ifdef CONFIG_MEMCG
> -               slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
> -                                  MEMCG_DATA_OBJCGS;
> -#endif
> -       }
> -
>         /*
>          * Protect the first 2 pages. The first page is mostly unnecessary, and
>          * merely serves as an extended guard page. However, adding one
> @@ -581,8 +556,9 @@ static unsigned long kfence_init_pool(void)
>                 addr += PAGE_SIZE;
>         }
>
> -       for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
> +       for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++, addr += 2 * PAGE_SIZE) {
>                 struct kfence_metadata *meta = &kfence_metadata[i];
> +               struct slab *slab = page_slab(virt_to_page(addr));
>
>                 /* Initialize metadata. */
>                 INIT_LIST_HEAD(&meta->list);
> @@ -593,26 +569,15 @@ static unsigned long kfence_init_pool(void)
>
>                 /* Protect the right redzone. */
>                 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
> -                       goto reset_slab;
> -
> -               addr += 2 * PAGE_SIZE;
> -       }
> -
> -       return 0;
> -
> -reset_slab:
> -       for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
> -               struct slab *slab = page_slab(nth_page(pages, i));
> +                       return addr;
>
> -               if (!i || (i % 2))
> -                       continue;
> +               __folio_set_slab(slab_folio(slab));
>  #ifdef CONFIG_MEMCG
> -               slab->memcg_data = 0;
> +               slab->memcg_data = (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS;
>  #endif
> -               __folio_clear_slab(slab_folio(slab));
>         }
>
> -       return addr;
> +       return 0;
>  }
>
>  static bool __init kfence_init_pool_early(void)
> --
> 2.11.0
>
Marco Elver March 28, 2023, 12:05 p.m. UTC | #2
On Tue, 28 Mar 2023 at 13:55, Marco Elver <elver@google.com> wrote:
>
> On Tue, 28 Mar 2023 at 11:58, Muchun Song <songmuchun@bytedance.com> wrote:
> >
> > There are three similar loops to initialize kfence pool, we could merge
> > all of them into one loop to simplify the code and make code more
> > efficient.
> >
> > Signed-off-by: Muchun Song <songmuchun@bytedance.com>
>
> Reviewed-by: Marco Elver <elver@google.com>
>
> > ---
> >  mm/kfence/core.c | 47 ++++++-----------------------------------------
> >  1 file changed, 6 insertions(+), 41 deletions(-)
> >
> > diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> > index 7d01a2c76e80..de62a84d4830 100644
> > --- a/mm/kfence/core.c
> > +++ b/mm/kfence/core.c
> > @@ -539,35 +539,10 @@ static void rcu_guarded_free(struct rcu_head *h)
> >  static unsigned long kfence_init_pool(void)
> >  {
> >         unsigned long addr = (unsigned long)__kfence_pool;
> > -       struct page *pages;
> >         int i;
> >
> >         if (!arch_kfence_init_pool())
> >                 return addr;
> > -
> > -       pages = virt_to_page(__kfence_pool);
> > -
> > -       /*
> > -        * Set up object pages: they must have PG_slab set, to avoid freeing
> > -        * these as real pages.
> > -        *
> > -        * We also want to avoid inserting kfence_free() in the kfree()
> > -        * fast-path in SLUB, and therefore need to ensure kfree() correctly
> > -        * enters __slab_free() slow-path.
> > -        */

Actually: can you retain this comment somewhere?

> > -       for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
> > -               struct slab *slab = page_slab(nth_page(pages, i));
> > -
> > -               if (!i || (i % 2))
> > -                       continue;
> > -
> > -               __folio_set_slab(slab_folio(slab));
> > -#ifdef CONFIG_MEMCG
> > -               slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
> > -                                  MEMCG_DATA_OBJCGS;
> > -#endif
> > -       }
> > -
> >         /*
> >          * Protect the first 2 pages. The first page is mostly unnecessary, and
> >          * merely serves as an extended guard page. However, adding one
> > @@ -581,8 +556,9 @@ static unsigned long kfence_init_pool(void)
> >                 addr += PAGE_SIZE;
> >         }
> >
> > -       for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
> > +       for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++, addr += 2 * PAGE_SIZE) {
> >                 struct kfence_metadata *meta = &kfence_metadata[i];
> > +               struct slab *slab = page_slab(virt_to_page(addr));
> >
> >                 /* Initialize metadata. */
> >                 INIT_LIST_HEAD(&meta->list);
> > @@ -593,26 +569,15 @@ static unsigned long kfence_init_pool(void)
> >
> >                 /* Protect the right redzone. */
> >                 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
> > -                       goto reset_slab;
> > -
> > -               addr += 2 * PAGE_SIZE;
> > -       }
> > -
> > -       return 0;
> > -
> > -reset_slab:
> > -       for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
> > -               struct slab *slab = page_slab(nth_page(pages, i));
> > +                       return addr;
> >
> > -               if (!i || (i % 2))
> > -                       continue;
> > +               __folio_set_slab(slab_folio(slab));
> >  #ifdef CONFIG_MEMCG
> > -               slab->memcg_data = 0;
> > +               slab->memcg_data = (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS;
> >  #endif
> > -               __folio_clear_slab(slab_folio(slab));
> >         }
> >
> > -       return addr;
> > +       return 0;
> >  }
> >
> >  static bool __init kfence_init_pool_early(void)
> > --
> > 2.11.0
> >
Muchun Song March 28, 2023, 12:53 p.m. UTC | #3
> On Mar 28, 2023, at 20:05, Marco Elver <elver@google.com> wrote:
> 
> On Tue, 28 Mar 2023 at 13:55, Marco Elver <elver@google.com> wrote:
>> 
>> On Tue, 28 Mar 2023 at 11:58, Muchun Song <songmuchun@bytedance.com> wrote:
>>> 
>>> There are three similar loops to initialize kfence pool, we could merge
>>> all of them into one loop to simplify the code and make code more
>>> efficient.
>>> 
>>> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
>> 
>> Reviewed-by: Marco Elver <elver@google.com>
>> 
>>> ---
>>> mm/kfence/core.c | 47 ++++++-----------------------------------------
>>> 1 file changed, 6 insertions(+), 41 deletions(-)
>>> 
>>> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
>>> index 7d01a2c76e80..de62a84d4830 100644
>>> --- a/mm/kfence/core.c
>>> +++ b/mm/kfence/core.c
>>> @@ -539,35 +539,10 @@ static void rcu_guarded_free(struct rcu_head *h)
>>> static unsigned long kfence_init_pool(void)
>>> {
>>>        unsigned long addr = (unsigned long)__kfence_pool;
>>> -       struct page *pages;
>>>        int i;
>>> 
>>>        if (!arch_kfence_init_pool())
>>>                return addr;
>>> -
>>> -       pages = virt_to_page(__kfence_pool);
>>> -
>>> -       /*
>>> -        * Set up object pages: they must have PG_slab set, to avoid freeing
>>> -        * these as real pages.
>>> -        *
>>> -        * We also want to avoid inserting kfence_free() in the kfree()
>>> -        * fast-path in SLUB, and therefore need to ensure kfree() correctly
>>> -        * enters __slab_free() slow-path.
>>> -        */
> 
> Actually: can you retain this comment somewhere?

Sure, I'll move this to right place.

Thanks.
diff mbox series

Patch

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 7d01a2c76e80..de62a84d4830 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -539,35 +539,10 @@  static void rcu_guarded_free(struct rcu_head *h)
 static unsigned long kfence_init_pool(void)
 {
 	unsigned long addr = (unsigned long)__kfence_pool;
-	struct page *pages;
 	int i;
 
 	if (!arch_kfence_init_pool())
 		return addr;
-
-	pages = virt_to_page(__kfence_pool);
-
-	/*
-	 * Set up object pages: they must have PG_slab set, to avoid freeing
-	 * these as real pages.
-	 *
-	 * We also want to avoid inserting kfence_free() in the kfree()
-	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
-	 * enters __slab_free() slow-path.
-	 */
-	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
-		struct slab *slab = page_slab(nth_page(pages, i));
-
-		if (!i || (i % 2))
-			continue;
-
-		__folio_set_slab(slab_folio(slab));
-#ifdef CONFIG_MEMCG
-		slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
-				   MEMCG_DATA_OBJCGS;
-#endif
-	}
-
 	/*
 	 * Protect the first 2 pages. The first page is mostly unnecessary, and
 	 * merely serves as an extended guard page. However, adding one
@@ -581,8 +556,9 @@  static unsigned long kfence_init_pool(void)
 		addr += PAGE_SIZE;
 	}
 
-	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++, addr += 2 * PAGE_SIZE) {
 		struct kfence_metadata *meta = &kfence_metadata[i];
+		struct slab *slab = page_slab(virt_to_page(addr));
 
 		/* Initialize metadata. */
 		INIT_LIST_HEAD(&meta->list);
@@ -593,26 +569,15 @@  static unsigned long kfence_init_pool(void)
 
 		/* Protect the right redzone. */
 		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
-			goto reset_slab;
-
-		addr += 2 * PAGE_SIZE;
-	}
-
-	return 0;
-
-reset_slab:
-	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
-		struct slab *slab = page_slab(nth_page(pages, i));
+			return addr;
 
-		if (!i || (i % 2))
-			continue;
+		__folio_set_slab(slab_folio(slab));
 #ifdef CONFIG_MEMCG
-		slab->memcg_data = 0;
+		slab->memcg_data = (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS;
 #endif
-		__folio_clear_slab(slab_folio(slab));
 	}
 
-	return addr;
+	return 0;
 }
 
 static bool __init kfence_init_pool_early(void)