diff mbox series

hugetlb: fix memory leak associated with vma_lock structure

Message ID 20221018233601.282381-1-mike.kravetz@oracle.com (mailing list archive)
State New
Headers show
Series hugetlb: fix memory leak associated with vma_lock structure | expand

Commit Message

Mike Kravetz Oct. 18, 2022, 11:36 p.m. UTC
The hugetlb vma_lock structure hangs off the vm_private_data pointer
of sharable hugetlb vmas.  The structure is vma specific and can not
be shared between vmas.  At fork and various other times, vmas are
duplicated via vm_area_dup().  When this happens, the pointer in the
newly created vma must be cleared and the structure reallocated.  Two
hugetlb specific routines deal with this hugetlb_dup_vma_private and
hugetlb_vm_op_open.  Both routines are called for newly created vmas.
hugetlb_dup_vma_private would always clear the pointer and
hugetlb_vm_op_open would allocate the new vms_lock structure.  This did
not work in the case of this calling sequence pointed out in [1].
  move_vma
    copy_vma
      new_vma = vm_area_dup(vma);
      new_vma->vm_ops->open(new_vma); --> new_vma has its own vma lock.
    is_vm_hugetlb_page(vma)
      clear_vma_resv_huge_pages
        hugetlb_dup_vma_private --> vma->vm_private_data is set to NULL
When clearing hugetlb_dup_vma_private we actually leak the associated
vma_lock structure.

The vma_lock structure contains a pointer to the associated vma.  This
information can be used in hugetlb_dup_vma_private and hugetlb_vm_op_open
to ensure we only clear the vm_private_data of newly created (copied)
vmas.  In such cases, the vma->vma_lock->vma field will not point to the
vma.

Update hugetlb_dup_vma_private and hugetlb_vm_op_open to not clear
vm_private_data if vma->vma_lock->vma == vma.  Also, log a warning if
hugetlb_vm_op_open ever encounters the case where vma_lock has already
been correctly allocated for the vma.

[1] https://lore.kernel.org/linux-mm/5154292a-4c55-28cd-0935-82441e512fc3@huawei.com/

Fixes: 131a79b474e9 ("hugetlb: fix vma lock handling during split vma and range unmapping")
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 mm/hugetlb.c | 31 ++++++++++++++++++++++++-------
 1 file changed, 24 insertions(+), 7 deletions(-)

Comments

Miaohe Lin Oct. 19, 2022, 8:16 a.m. UTC | #1
On 2022/10/19 7:36, Mike Kravetz wrote:
> The hugetlb vma_lock structure hangs off the vm_private_data pointer
> of sharable hugetlb vmas.  The structure is vma specific and can not
> be shared between vmas.  At fork and various other times, vmas are
> duplicated via vm_area_dup().  When this happens, the pointer in the
> newly created vma must be cleared and the structure reallocated.  Two
> hugetlb specific routines deal with this hugetlb_dup_vma_private and
> hugetlb_vm_op_open.  Both routines are called for newly created vmas.
> hugetlb_dup_vma_private would always clear the pointer and
> hugetlb_vm_op_open would allocate the new vms_lock structure.  This did
> not work in the case of this calling sequence pointed out in [1].
>   move_vma
>     copy_vma
>       new_vma = vm_area_dup(vma);
>       new_vma->vm_ops->open(new_vma); --> new_vma has its own vma lock.
>     is_vm_hugetlb_page(vma)
>       clear_vma_resv_huge_pages
>         hugetlb_dup_vma_private --> vma->vm_private_data is set to NULL
> When clearing hugetlb_dup_vma_private we actually leak the associated
> vma_lock structure.
> 
> The vma_lock structure contains a pointer to the associated vma.  This
> information can be used in hugetlb_dup_vma_private and hugetlb_vm_op_open
> to ensure we only clear the vm_private_data of newly created (copied)
> vmas.  In such cases, the vma->vma_lock->vma field will not point to the
> vma.
> 
> Update hugetlb_dup_vma_private and hugetlb_vm_op_open to not clear
> vm_private_data if vma->vma_lock->vma == vma.  Also, log a warning if
> hugetlb_vm_op_open ever encounters the case where vma_lock has already
> been correctly allocated for the vma.
> 
> [1] https://lore.kernel.org/linux-mm/5154292a-4c55-28cd-0935-82441e512fc3@huawei.com/
> 
> Fixes: 131a79b474e9 ("hugetlb: fix vma lock handling during split vma and range unmapping")
> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
> ---
>  mm/hugetlb.c | 31 ++++++++++++++++++++++++-------
>  1 file changed, 24 insertions(+), 7 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 02f781624fce..7f74cbff6619 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1014,15 +1014,23 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
>  	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
>  	/*
>  	 * Clear vm_private_data
> +	 * - For shared mappings this is a per-vma semaphore that may be
> +	 *   allocated in a subsequent call to hugetlb_vm_op_open.
> +	 *   Before clearing, make sure pointer is not associated with vma
> +	 *   as this will leak the structure.  This is the case when called
> +	 *   via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
> +	 *   been called to allocate a new structure.
>  	 * - For MAP_PRIVATE mappings, this is the reserve map which does
>  	 *   not apply to children.  Faults generated by the children are
>  	 *   not guaranteed to succeed, even if read-only.
> -	 * - For shared mappings this is a per-vma semaphore that may be
> -	 *   allocated in a subsequent call to hugetlb_vm_op_open.
>  	 */
> -	vma->vm_private_data = (void *)0;
> -	if (!(vma->vm_flags & VM_MAYSHARE))
> -		return;
> +	if (vma->vm_flags & VM_MAYSHARE) {
> +		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
> +
> +		if (vma_lock && vma_lock->vma != vma)
> +			vma->vm_private_data = NULL;
> +	} else
> +		vma->vm_private_data = NULL;
>  }
>  
>  /*
> @@ -4601,6 +4609,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
>  	struct resv_map *resv = vma_resv_map(vma);
>  
>  	/*
> +	 * HPAGE_RESV_OWNER indicates a private mapping.
>  	 * This new VMA should share its siblings reservation map if present.
>  	 * The VMA will only ever have a valid reservation map pointer where
>  	 * it is being copied for another still existing VMA.  As that VMA
> @@ -4616,10 +4625,18 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
>  	/*
>  	 * vma_lock structure for sharable mappings is vma specific.
>  	 * Clear old pointer (if copied via vm_area_dup) and create new.
> +	 * Before clearing, make sure vma_lock is not for this vma.
>  	 */
>  	if (vma->vm_flags & VM_MAYSHARE) {
> -		vma->vm_private_data = NULL;
> -		hugetlb_vma_lock_alloc(vma);
> +		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
> +
> +		if (vma_lock) {

Thanks Mike. It seems the case of "vma_lock == NULL" is missed, i.e. if vma->vm_private_data == NULL,
hugetlb_vm_op_open won't allocate a new vma lock?

Others look good to me.

Thanks,
Miaohe Lin

> +			if (vma_lock->vma != vma) {
> +				vma->vm_private_data = NULL;
> +				hugetlb_vma_lock_alloc(vma);
> +			} else
> +				pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
> +		}
>  	}
>  }
>  
>
Mike Kravetz Oct. 19, 2022, 7:57 p.m. UTC | #2
On 10/19/22 16:16, Miaohe Lin wrote:
> On 2022/10/19 7:36, Mike Kravetz wrote:
> > The hugetlb vma_lock structure hangs off the vm_private_data pointer
> > of sharable hugetlb vmas.  The structure is vma specific and can not
> > be shared between vmas.  At fork and various other times, vmas are
> > duplicated via vm_area_dup().  When this happens, the pointer in the
> > newly created vma must be cleared and the structure reallocated.  Two
> > hugetlb specific routines deal with this hugetlb_dup_vma_private and
> > hugetlb_vm_op_open.  Both routines are called for newly created vmas.
> > hugetlb_dup_vma_private would always clear the pointer and
> > hugetlb_vm_op_open would allocate the new vms_lock structure.  This did
> > not work in the case of this calling sequence pointed out in [1].
> >   move_vma
> >     copy_vma
> >       new_vma = vm_area_dup(vma);
> >       new_vma->vm_ops->open(new_vma); --> new_vma has its own vma lock.
> >     is_vm_hugetlb_page(vma)
> >       clear_vma_resv_huge_pages
> >         hugetlb_dup_vma_private --> vma->vm_private_data is set to NULL
> > When clearing hugetlb_dup_vma_private we actually leak the associated
> > vma_lock structure.
> > 
> > The vma_lock structure contains a pointer to the associated vma.  This
> > information can be used in hugetlb_dup_vma_private and hugetlb_vm_op_open
> > to ensure we only clear the vm_private_data of newly created (copied)
> > vmas.  In such cases, the vma->vma_lock->vma field will not point to the
> > vma.
> > 
> > Update hugetlb_dup_vma_private and hugetlb_vm_op_open to not clear
> > vm_private_data if vma->vma_lock->vma == vma.  Also, log a warning if
> > hugetlb_vm_op_open ever encounters the case where vma_lock has already
> > been correctly allocated for the vma.
> > 
> > [1] https://lore.kernel.org/linux-mm/5154292a-4c55-28cd-0935-82441e512fc3@huawei.com/
> > 
> > Fixes: 131a79b474e9 ("hugetlb: fix vma lock handling during split vma and range unmapping")
> > Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
> > ---
> >  mm/hugetlb.c | 31 ++++++++++++++++++++++++-------
> >  1 file changed, 24 insertions(+), 7 deletions(-)
> > 
> > diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> > index 02f781624fce..7f74cbff6619 100644
> > --- a/mm/hugetlb.c
> > +++ b/mm/hugetlb.c
> > @@ -1014,15 +1014,23 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
> >  	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
> >  	/*
> >  	 * Clear vm_private_data
> > +	 * - For shared mappings this is a per-vma semaphore that may be
> > +	 *   allocated in a subsequent call to hugetlb_vm_op_open.
> > +	 *   Before clearing, make sure pointer is not associated with vma
> > +	 *   as this will leak the structure.  This is the case when called
> > +	 *   via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
> > +	 *   been called to allocate a new structure.
> >  	 * - For MAP_PRIVATE mappings, this is the reserve map which does
> >  	 *   not apply to children.  Faults generated by the children are
> >  	 *   not guaranteed to succeed, even if read-only.
> > -	 * - For shared mappings this is a per-vma semaphore that may be
> > -	 *   allocated in a subsequent call to hugetlb_vm_op_open.
> >  	 */
> > -	vma->vm_private_data = (void *)0;
> > -	if (!(vma->vm_flags & VM_MAYSHARE))
> > -		return;
> > +	if (vma->vm_flags & VM_MAYSHARE) {
> > +		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
> > +
> > +		if (vma_lock && vma_lock->vma != vma)
> > +			vma->vm_private_data = NULL;
> > +	} else
> > +		vma->vm_private_data = NULL;
> >  }
> >  
> >  /*
> > @@ -4601,6 +4609,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
> >  	struct resv_map *resv = vma_resv_map(vma);
> >  
> >  	/*
> > +	 * HPAGE_RESV_OWNER indicates a private mapping.
> >  	 * This new VMA should share its siblings reservation map if present.
> >  	 * The VMA will only ever have a valid reservation map pointer where
> >  	 * it is being copied for another still existing VMA.  As that VMA
> > @@ -4616,10 +4625,18 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
> >  	/*
> >  	 * vma_lock structure for sharable mappings is vma specific.
> >  	 * Clear old pointer (if copied via vm_area_dup) and create new.
> > +	 * Before clearing, make sure vma_lock is not for this vma.
> >  	 */
> >  	if (vma->vm_flags & VM_MAYSHARE) {
> > -		vma->vm_private_data = NULL;
> > -		hugetlb_vma_lock_alloc(vma);
> > +		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
> > +
> > +		if (vma_lock) {
> 
> Thanks Mike. It seems the case of "vma_lock == NULL" is missed, i.e. if vma->vm_private_data == NULL,
> hugetlb_vm_op_open won't allocate a new vma lock?

Thank you so much!  Yes, you are correct.

Your review comments have prevented numerous bugs and led to better code.

I will send v2 shortly.
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 02f781624fce..7f74cbff6619 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1014,15 +1014,23 @@  void hugetlb_dup_vma_private(struct vm_area_struct *vma)
 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 	/*
 	 * Clear vm_private_data
+	 * - For shared mappings this is a per-vma semaphore that may be
+	 *   allocated in a subsequent call to hugetlb_vm_op_open.
+	 *   Before clearing, make sure pointer is not associated with vma
+	 *   as this will leak the structure.  This is the case when called
+	 *   via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
+	 *   been called to allocate a new structure.
 	 * - For MAP_PRIVATE mappings, this is the reserve map which does
 	 *   not apply to children.  Faults generated by the children are
 	 *   not guaranteed to succeed, even if read-only.
-	 * - For shared mappings this is a per-vma semaphore that may be
-	 *   allocated in a subsequent call to hugetlb_vm_op_open.
 	 */
-	vma->vm_private_data = (void *)0;
-	if (!(vma->vm_flags & VM_MAYSHARE))
-		return;
+	if (vma->vm_flags & VM_MAYSHARE) {
+		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+		if (vma_lock && vma_lock->vma != vma)
+			vma->vm_private_data = NULL;
+	} else
+		vma->vm_private_data = NULL;
 }
 
 /*
@@ -4601,6 +4609,7 @@  static void hugetlb_vm_op_open(struct vm_area_struct *vma)
 	struct resv_map *resv = vma_resv_map(vma);
 
 	/*
+	 * HPAGE_RESV_OWNER indicates a private mapping.
 	 * This new VMA should share its siblings reservation map if present.
 	 * The VMA will only ever have a valid reservation map pointer where
 	 * it is being copied for another still existing VMA.  As that VMA
@@ -4616,10 +4625,18 @@  static void hugetlb_vm_op_open(struct vm_area_struct *vma)
 	/*
 	 * vma_lock structure for sharable mappings is vma specific.
 	 * Clear old pointer (if copied via vm_area_dup) and create new.
+	 * Before clearing, make sure vma_lock is not for this vma.
 	 */
 	if (vma->vm_flags & VM_MAYSHARE) {
-		vma->vm_private_data = NULL;
-		hugetlb_vma_lock_alloc(vma);
+		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+		if (vma_lock) {
+			if (vma_lock->vma != vma) {
+				vma->vm_private_data = NULL;
+				hugetlb_vma_lock_alloc(vma);
+			} else
+				pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
+		}
 	}
 }