diff mbox

[1/4] mm: cma: Don't crash on allocation if CMA area can't be activated

Message ID 1414074828-4488-2-git-send-email-laurent.pinchart+renesas@ideasonboard.com (mailing list archive)
State New, archived
Headers show

Commit Message

Laurent Pinchart Oct. 23, 2014, 2:33 p.m. UTC
If activation of the CMA area fails its mutex won't be initialized,
leading to an oops at allocation time when trying to lock the mutex. Fix
this by failing allocation if the area hasn't been successfully actived,
and detect that condition by moving the CMA bitmap allocation after page
block reservation completion.

Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 mm/cma.c | 17 ++++++-----------
 1 file changed, 6 insertions(+), 11 deletions(-)

Comments

Michał Nazarewicz Oct. 23, 2014, 4:53 p.m. UTC | #1
On Thu, Oct 23 2014, Laurent Pinchart wrote:
> If activation of the CMA area fails its mutex won't be initialized,
> leading to an oops at allocation time when trying to lock the mutex. Fix
> this by failing allocation if the area hasn't been successfully actived,
> and detect that condition by moving the CMA bitmap allocation after page
> block reservation completion.
>
> Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>

Cc: <stable@vger.kernel.org>  # v3.17
Acked-by: Michal Nazarewicz <mina86@mina86.com>

As a matter of fact, this is present in kernels earlier than 3.17 but in
the 3.17 the code has been moved from drivers/base/dma-contiguous.c to
mm/cma.c so this might require separate stable patch.  I can track this
and prepare a patch if you want.

> ---
>  mm/cma.c | 17 ++++++-----------
>  1 file changed, 6 insertions(+), 11 deletions(-)
>
> diff --git a/mm/cma.c b/mm/cma.c
> index 963bc4a..16c6650 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -93,11 +93,6 @@ static int __init cma_activate_area(struct cma *cma)
>  	unsigned i = cma->count >> pageblock_order;
>  	struct zone *zone;
>  
> -	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
> -
> -	if (!cma->bitmap)
> -		return -ENOMEM;
> -
>  	WARN_ON_ONCE(!pfn_valid(pfn));
>  	zone = page_zone(pfn_to_page(pfn));
>  
> @@ -114,17 +109,17 @@ static int __init cma_activate_area(struct cma *cma)
>  			 * to be in the same zone.
>  			 */
>  			if (page_zone(pfn_to_page(pfn)) != zone)
> -				goto err;
> +				return -EINVAL;
>  		}
>  		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
>  	} while (--i);
>  
> +	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
> +	if (!cma->bitmap)
> +		return -ENOMEM;
> +
>  	mutex_init(&cma->lock);
>  	return 0;
> -
> -err:
> -	kfree(cma->bitmap);
> -	return -EINVAL;
>  }
>  
>  static int __init cma_init_reserved_areas(void)
> @@ -313,7 +308,7 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
>  	struct page *page = NULL;
>  	int ret;
>  
> -	if (!cma || !cma->count)
> +	if (!cma || !cma->count || !cma->bitmap)
>  		return NULL;
>  
>  	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
> -- 
> 2.0.4
>
Laurent Pinchart Oct. 23, 2014, 11:42 p.m. UTC | #2
Hi Michal,

On Thursday 23 October 2014 18:53:36 Michal Nazarewicz wrote:
> On Thu, Oct 23 2014, Laurent Pinchart wrote:
> > If activation of the CMA area fails its mutex won't be initialized,
> > leading to an oops at allocation time when trying to lock the mutex. Fix
> > this by failing allocation if the area hasn't been successfully actived,
> > and detect that condition by moving the CMA bitmap allocation after page
> > block reservation completion.
> > 
> > Signed-off-by: Laurent Pinchart
> > <laurent.pinchart+renesas@ideasonboard.com>
> 
> Cc: <stable@vger.kernel.org>  # v3.17
> Acked-by: Michal Nazarewicz <mina86@mina86.com>
> 
> As a matter of fact, this is present in kernels earlier than 3.17 but in
> the 3.17 the code has been moved from drivers/base/dma-contiguous.c to
> mm/cma.c so this might require separate stable patch.  I can track this
> and prepare a patch if you want.

That could be done, but I'm not sure if it's really worth it. The bug only 
occurs when the CMA zone activation fails. I've ran into that case due to a 
bug introduced in v3.18-rc1, but this shouldn't be the case for older kernel 
versions.

If you think the fix should be backported to stable kernels older than v3.17 
please feel free to cook up a patch.

> > ---
> > 
> >  mm/cma.c | 17 ++++++-----------
> >  1 file changed, 6 insertions(+), 11 deletions(-)
> > 
> > diff --git a/mm/cma.c b/mm/cma.c
> > index 963bc4a..16c6650 100644
> > --- a/mm/cma.c
> > +++ b/mm/cma.c
> > @@ -93,11 +93,6 @@ static int __init cma_activate_area(struct cma *cma)
> >  	unsigned i = cma->count >> pageblock_order;
> >  	struct zone *zone;
> > 
> > -	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
> > -
> > -	if (!cma->bitmap)
> > -		return -ENOMEM;
> > -
> >  	WARN_ON_ONCE(!pfn_valid(pfn));
> >  	zone = page_zone(pfn_to_page(pfn));
> > 
> > @@ -114,17 +109,17 @@ static int __init cma_activate_area(struct cma *cma)
> >  			 * to be in the same zone.
> >  			 */
> >  			if (page_zone(pfn_to_page(pfn)) != zone)
> > -				goto err;
> > +				return -EINVAL;
> >  		}
> >  		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
> >  	} while (--i);
> > 
> > +	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
> > +	if (!cma->bitmap)
> > +		return -ENOMEM;
> > +
> >  	mutex_init(&cma->lock);
> >  	return 0;
> > -
> > -err:
> > -	kfree(cma->bitmap);
> > -	return -EINVAL;
> >  }
> >  
> >  static int __init cma_init_reserved_areas(void)
> > @@ -313,7 +308,7 @@ struct page *cma_alloc(struct cma *cma, int count,
> > unsigned int align)
> >  	struct page *page = NULL;
> >  	int ret;
> > 
> > -	if (!cma || !cma->count)
> > +	if (!cma || !cma->count || !cma->bitmap)
> >  		return NULL;
> >  	
> >  	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
Weijie Yang Oct. 24, 2014, 2:02 a.m. UTC | #3
On Fri, Oct 24, 2014 at 7:42 AM, Laurent Pinchart
<laurent.pinchart@ideasonboard.com> wrote:
> Hi Michal,
>
> On Thursday 23 October 2014 18:53:36 Michal Nazarewicz wrote:
>> On Thu, Oct 23 2014, Laurent Pinchart wrote:
>> > If activation of the CMA area fails its mutex won't be initialized,
>> > leading to an oops at allocation time when trying to lock the mutex. Fix
>> > this by failing allocation if the area hasn't been successfully actived,
>> > and detect that condition by moving the CMA bitmap allocation after page
>> > block reservation completion.
>> >
>> > Signed-off-by: Laurent Pinchart
>> > <laurent.pinchart+renesas@ideasonboard.com>
>>
>> Cc: <stable@vger.kernel.org>  # v3.17
>> Acked-by: Michal Nazarewicz <mina86@mina86.com>

This patch is good, but how about add a active field in cma struct?
use cma->active to check whether cma is actived successfully.
I think it will make code more clear and readable.
Just my little opinion.


>> As a matter of fact, this is present in kernels earlier than 3.17 but in
>> the 3.17 the code has been moved from drivers/base/dma-contiguous.c to
>> mm/cma.c so this might require separate stable patch.  I can track this
>> and prepare a patch if you want.
>
> That could be done, but I'm not sure if it's really worth it. The bug only
> occurs when the CMA zone activation fails. I've ran into that case due to a
> bug introduced in v3.18-rc1, but this shouldn't be the case for older kernel
> versions.
>
> If you think the fix should be backported to stable kernels older than v3.17
> please feel free to cook up a patch.
>
>> > ---
>> >
>> >  mm/cma.c | 17 ++++++-----------
>> >  1 file changed, 6 insertions(+), 11 deletions(-)
>> >
>> > diff --git a/mm/cma.c b/mm/cma.c
>> > index 963bc4a..16c6650 100644
>> > --- a/mm/cma.c
>> > +++ b/mm/cma.c
>> > @@ -93,11 +93,6 @@ static int __init cma_activate_area(struct cma *cma)
>> >     unsigned i = cma->count >> pageblock_order;
>> >     struct zone *zone;
>> >
>> > -   cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
>> > -
>> > -   if (!cma->bitmap)
>> > -           return -ENOMEM;
>> > -
>> >     WARN_ON_ONCE(!pfn_valid(pfn));
>> >     zone = page_zone(pfn_to_page(pfn));
>> >
>> > @@ -114,17 +109,17 @@ static int __init cma_activate_area(struct cma *cma)
>> >                      * to be in the same zone.
>> >                      */
>> >                     if (page_zone(pfn_to_page(pfn)) != zone)
>> > -                           goto err;
>> > +                           return -EINVAL;
>> >             }
>> >             init_cma_reserved_pageblock(pfn_to_page(base_pfn));
>> >     } while (--i);
>> >
>> > +   cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
>> > +   if (!cma->bitmap)
>> > +           return -ENOMEM;
>> > +
>> >     mutex_init(&cma->lock);
>> >     return 0;
>> > -
>> > -err:
>> > -   kfree(cma->bitmap);
>> > -   return -EINVAL;
>> >  }
>> >
>> >  static int __init cma_init_reserved_areas(void)
>> > @@ -313,7 +308,7 @@ struct page *cma_alloc(struct cma *cma, int count,
>> > unsigned int align)
>> >     struct page *page = NULL;
>> >     int ret;
>> >
>> > -   if (!cma || !cma->count)
>> > +   if (!cma || !cma->count || !cma->bitmap)
>> >             return NULL;
>> >
>> >     pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
>
> --
> Regards,
>
> Laurent Pinchart
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
Laurent Pinchart Oct. 24, 2014, 9:54 a.m. UTC | #4
Hello,

On Friday 24 October 2014 11:50:14 Joonsoo Kim wrote:
> On Fri, Oct 24, 2014 at 10:02:49AM +0800, Weijie Yang wrote:
> > On Fri, Oct 24, 2014 at 7:42 AM, Laurent Pinchart wrote:
> > > On Thursday 23 October 2014 18:53:36 Michal Nazarewicz wrote:
> > >> On Thu, Oct 23 2014, Laurent Pinchart wrote:
> > >> > If activation of the CMA area fails its mutex won't be initialized,
> > >> > leading to an oops at allocation time when trying to lock the mutex.
> > >> > Fix this by failing allocation if the area hasn't been successfully
> > >> > actived, and detect that condition by moving the CMA bitmap
> > >> > allocation after page block reservation completion.
> > >> > 
> > >> > Signed-off-by: Laurent Pinchart
> > >> > <laurent.pinchart+renesas@ideasonboard.com>
> > >> 
> > >> Cc: <stable@vger.kernel.org>  # v3.17
> > >> Acked-by: Michal Nazarewicz <mina86@mina86.com>
> > 
> > This patch is good, but how about add a active field in cma struct?
> > use cma->active to check whether cma is actived successfully.
> > I think it will make code more clear and readable.
> > Just my little opinion.
> 
> Or just setting cma->count to 0 would work fine.

I would prefer setting cma->count to 0 to avoid the extra field. I'll modify 
the patch accordingly.
Michał Nazarewicz Oct. 24, 2014, 4:34 p.m. UTC | #5
> On Thursday 23 October 2014 18:53:36 Michal Nazarewicz wrote:
>> As a matter of fact, this is present in kernels earlier than 3.17 but in
>> the 3.17 the code has been moved from drivers/base/dma-contiguous.c to
>> mm/cma.c so this might require separate stable patch.

On Fri, Oct 24 2014, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
> That could be done, but I'm not sure if it's really worth it. The bug only 
> occurs when the CMA zone activation fails. I've ran into that case due to a 
> bug introduced in v3.18-rc1, but this shouldn't be the case for older kernel 
> versions.

Fair enough.
diff mbox

Patch

diff --git a/mm/cma.c b/mm/cma.c
index 963bc4a..16c6650 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -93,11 +93,6 @@  static int __init cma_activate_area(struct cma *cma)
 	unsigned i = cma->count >> pageblock_order;
 	struct zone *zone;
 
-	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-
-	if (!cma->bitmap)
-		return -ENOMEM;
-
 	WARN_ON_ONCE(!pfn_valid(pfn));
 	zone = page_zone(pfn_to_page(pfn));
 
@@ -114,17 +109,17 @@  static int __init cma_activate_area(struct cma *cma)
 			 * to be in the same zone.
 			 */
 			if (page_zone(pfn_to_page(pfn)) != zone)
-				goto err;
+				return -EINVAL;
 		}
 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
 	} while (--i);
 
+	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!cma->bitmap)
+		return -ENOMEM;
+
 	mutex_init(&cma->lock);
 	return 0;
-
-err:
-	kfree(cma->bitmap);
-	return -EINVAL;
 }
 
 static int __init cma_init_reserved_areas(void)
@@ -313,7 +308,7 @@  struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
 	struct page *page = NULL;
 	int ret;
 
-	if (!cma || !cma->count)
+	if (!cma || !cma->count || !cma->bitmap)
 		return NULL;
 
 	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,