diff mbox series

z3fold: fix memory leak in kmem cache

Message ID 20190917185352.44cf285d3ebd9e64548de5de@gmail.com (mailing list archive)
State New, archived
Headers show
Series z3fold: fix memory leak in kmem cache | expand

Commit Message

Vitaly Wool Sept. 17, 2019, 3:53 p.m. UTC
Currently there is a leak in init_z3fold_page() -- it allocates
handles from kmem cache even for headless pages, but then they are
never used and never freed, so eventually kmem cache may get
exhausted. This patch provides a fix for that.

Reported-by: Markus Linnala <markus.linnala@gmail.com>
Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
---
 mm/z3fold.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

Comments

Vlastimil Babka Sept. 18, 2019, 7:31 a.m. UTC | #1
On 9/17/19 5:53 PM, Vitaly Wool wrote:
> Currently there is a leak in init_z3fold_page() -- it allocates
> handles from kmem cache even for headless pages, but then they are
> never used and never freed, so eventually kmem cache may get
> exhausted. This patch provides a fix for that.
> 
> Reported-by: Markus Linnala <markus.linnala@gmail.com>
> Signed-off-by: Vitaly Wool <vitalywool@gmail.com>

Can a Fixes: commit be pinpointed, and CC stable added?

> ---
>  mm/z3fold.c | 15 +++++++++------
>  1 file changed, 9 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/z3fold.c b/mm/z3fold.c
> index 6397725b5ec6..7dffef2599c3 100644
> --- a/mm/z3fold.c
> +++ b/mm/z3fold.c
> @@ -301,14 +301,11 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool)
>   }
>  
>  /* Initializes the z3fold header of a newly allocated z3fold page */
> -static struct z3fold_header *init_z3fold_page(struct page *page,
> +static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
>  					struct z3fold_pool *pool, gfp_t gfp)
>  {
>  	struct z3fold_header *zhdr = page_address(page);
> -	struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
> -
> -	if (!slots)
> -		return NULL;
> +	struct z3fold_buddy_slots *slots;
>  
>  	INIT_LIST_HEAD(&page->lru);
>  	clear_bit(PAGE_HEADLESS, &page->private);
> @@ -316,6 +313,12 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
>  	clear_bit(NEEDS_COMPACTING, &page->private);
>  	clear_bit(PAGE_STALE, &page->private);
>  	clear_bit(PAGE_CLAIMED, &page->private);
> +	if (headless)
> +		return zhdr;
> +
> +	slots = alloc_slots(pool, gfp);
> +	if (!slots)
> +		return NULL;
>  
>  	spin_lock_init(&zhdr->page_lock);
>  	kref_init(&zhdr->refcount);
> @@ -962,7 +965,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
>  	if (!page)
>  		return -ENOMEM;
>  
> -	zhdr = init_z3fold_page(page, pool, gfp);
> +	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
>  	if (!zhdr) {
>  		__free_page(page);
>  		return -ENOMEM;
>
Markus Linnala Sept. 19, 2019, 6:47 a.m. UTC | #2
ke 18. syysk. 2019 klo 10.35 Vlastimil Babka (vbabka@suse.cz) kirjoitti:

> On 9/17/19 5:53 PM, Vitaly Wool wrote:
> > Currently there is a leak in init_z3fold_page() -- it allocates
> > handles from kmem cache even for headless pages, but then they are
> > never used and never freed, so eventually kmem cache may get
> > exhausted. This patch provides a fix for that.
> >
> > Reported-by: Markus Linnala <markus.linnala@gmail.com>
> > Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
>
> Can a Fixes: commit be pinpointed, and CC stable added?
>
> > ---
> >  mm/z3fold.c | 15 +++++++++------
> >  1 file changed, 9 insertions(+), 6 deletions(-)
> >
> > diff --git a/mm/z3fold.c b/mm/z3fold.c
> > index 6397725b5ec6..7dffef2599c3 100644
> > --- a/mm/z3fold.c
> > +++ b/mm/z3fold.c
> > @@ -301,14 +301,11 @@ static void z3fold_unregister_migration(struct
> z3fold_pool *pool)
> >   }
> >
> >  /* Initializes the z3fold header of a newly allocated z3fold page */
> > -static struct z3fold_header *init_z3fold_page(struct page *page,
> > +static struct z3fold_header *init_z3fold_page(struct page *page, bool
> headless,
> >                                       struct z3fold_pool *pool, gfp_t
> gfp)
> >  {
> >       struct z3fold_header *zhdr = page_address(page);
> > -     struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
> > -
> > -     if (!slots)
> > -             return NULL;
> > +     struct z3fold_buddy_slots *slots;
> >
> >       INIT_LIST_HEAD(&page->lru);
> >       clear_bit(PAGE_HEADLESS, &page->private);
> > @@ -316,6 +313,12 @@ static struct z3fold_header
> *init_z3fold_page(struct page *page,
> >       clear_bit(NEEDS_COMPACTING, &page->private);
> >       clear_bit(PAGE_STALE, &page->private);
> >       clear_bit(PAGE_CLAIMED, &page->private);
> > +     if (headless)
> > +             return zhdr;
> > +
> > +     slots = alloc_slots(pool, gfp);
> > +     if (!slots)
> > +             return NULL;
> >
> >       spin_lock_init(&zhdr->page_lock);
> >       kref_init(&zhdr->refcount);
> > @@ -962,7 +965,7 @@ static int z3fold_alloc(struct z3fold_pool *pool,
> size_t size, gfp_t gfp,
> >       if (!page)
> >               return -ENOMEM;
> >
> > -     zhdr = init_z3fold_page(page, pool, gfp);
> > +     zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
> >       if (!zhdr) {
> >               __free_page(page);
> >               return -ENOMEM;
> >
>
>
I have somwhat extensive test suite for this issue. Effectively:

for tmout in 10 10 10 10 10 10 10 10 10 10 10 10 20 20 20 20 20 20 30 30 30
30 900; do
stress --vm $(($(nproc)+2)) --vm-bytes $(($(awk '"'"'/MemAvail/{print
$2}'"'"' /proc/meminfo)*1024/$(nproc))) --timeout '"$tmout"
done

and then in another session run:

while true; do
bash -c '
declare -A arr;
b=();
for a in $(seq 7000);do
    b+=($a);
    arr["$a"]="${b[@]}";
done;
sleep 60;
'
sleep 20
done

This should make testing machine to have near constant memory pressure from
stress and then swapping and releasing swap from other script. And then
there is tons of stuff to manage virtual machine when it is stuck, update
kernels, collect logs and analyze logs.

I run tests in virtual machine (Fedora 30) with 4 vCPU 1 GiB memory.

There was still some issues with this patch. I ran my test suite about 72
hours and got 5 issues.

Vitaly send me patch with additional lines about page_claimed bit. After
running my test suite so far about 65 hours there has not been any issues.

When I first saw issues with zswap, I did git bisect run from v5.1 (good)
to v5.3-rc4 (bad) and got this:

commit 7c2b8baa61fe578af905342938ad12f8dbaeae79
Author: Vitaly Wool <...>
Date:   Mon May 13 17:22:49 2019 -0700

    mm/z3fold.c: add structure for buddy handles

    For z3fold to be able to move its pages per request of the memory
    subsystem, it should not use direct object addresses in handles.
Instead,
    it will create abstract handles (3 per page) which will contain pointers
    to z3fold objects.  Thus, it will be possible to change these pointers
    when z3fold page is moved.
Vitaly Wool Sept. 19, 2019, 7:59 a.m. UTC | #3
On Wed, Sep 18, 2019 at 9:35 AM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> On 9/17/19 5:53 PM, Vitaly Wool wrote:
> > Currently there is a leak in init_z3fold_page() -- it allocates
> > handles from kmem cache even for headless pages, but then they are
> > never used and never freed, so eventually kmem cache may get
> > exhausted. This patch provides a fix for that.
> >
> > Reported-by: Markus Linnala <markus.linnala@gmail.com>
> > Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
>
> Can a Fixes: commit be pinpointed, and CC stable added?

Fixes: 7c2b8baa61fe578 "mm/z3fold.c: add structure for buddy handles"

Best regards,
   Vitaly

> > ---
> >  mm/z3fold.c | 15 +++++++++------
> >  1 file changed, 9 insertions(+), 6 deletions(-)
> >
> > diff --git a/mm/z3fold.c b/mm/z3fold.c
> > index 6397725b5ec6..7dffef2599c3 100644
> > --- a/mm/z3fold.c
> > +++ b/mm/z3fold.c
> > @@ -301,14 +301,11 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool)
> >   }
> >
> >  /* Initializes the z3fold header of a newly allocated z3fold page */
> > -static struct z3fold_header *init_z3fold_page(struct page *page,
> > +static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
> >                                       struct z3fold_pool *pool, gfp_t gfp)
> >  {
> >       struct z3fold_header *zhdr = page_address(page);
> > -     struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
> > -
> > -     if (!slots)
> > -             return NULL;
> > +     struct z3fold_buddy_slots *slots;
> >
> >       INIT_LIST_HEAD(&page->lru);
> >       clear_bit(PAGE_HEADLESS, &page->private);
> > @@ -316,6 +313,12 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
> >       clear_bit(NEEDS_COMPACTING, &page->private);
> >       clear_bit(PAGE_STALE, &page->private);
> >       clear_bit(PAGE_CLAIMED, &page->private);
> > +     if (headless)
> > +             return zhdr;
> > +
> > +     slots = alloc_slots(pool, gfp);
> > +     if (!slots)
> > +             return NULL;
> >
> >       spin_lock_init(&zhdr->page_lock);
> >       kref_init(&zhdr->refcount);
> > @@ -962,7 +965,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
> >       if (!page)
> >               return -ENOMEM;
> >
> > -     zhdr = init_z3fold_page(page, pool, gfp);
> > +     zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
> >       if (!zhdr) {
> >               __free_page(page);
> >               return -ENOMEM;
> >
>
diff mbox series

Patch

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 6397725b5ec6..7dffef2599c3 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -301,14 +301,11 @@  static void z3fold_unregister_migration(struct z3fold_pool *pool)
  }
 
 /* Initializes the z3fold header of a newly allocated z3fold page */
-static struct z3fold_header *init_z3fold_page(struct page *page,
+static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
 					struct z3fold_pool *pool, gfp_t gfp)
 {
 	struct z3fold_header *zhdr = page_address(page);
-	struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
-
-	if (!slots)
-		return NULL;
+	struct z3fold_buddy_slots *slots;
 
 	INIT_LIST_HEAD(&page->lru);
 	clear_bit(PAGE_HEADLESS, &page->private);
@@ -316,6 +313,12 @@  static struct z3fold_header *init_z3fold_page(struct page *page,
 	clear_bit(NEEDS_COMPACTING, &page->private);
 	clear_bit(PAGE_STALE, &page->private);
 	clear_bit(PAGE_CLAIMED, &page->private);
+	if (headless)
+		return zhdr;
+
+	slots = alloc_slots(pool, gfp);
+	if (!slots)
+		return NULL;
 
 	spin_lock_init(&zhdr->page_lock);
 	kref_init(&zhdr->refcount);
@@ -962,7 +965,7 @@  static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
 	if (!page)
 		return -ENOMEM;
 
-	zhdr = init_z3fold_page(page, pool, gfp);
+	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
 	if (!zhdr) {
 		__free_page(page);
 		return -ENOMEM;