diff mbox series

mm: change size_t to unsigned int for cma_alloc

Message ID 20210329182553.3129234-1-minchan@kernel.org (mailing list archive)
State New, archived
Headers show
Series mm: change size_t to unsigned int for cma_alloc | expand

Commit Message

Minchan Kim March 29, 2021, 6:25 p.m. UTC
size_t in cma_alloc is confusing since it makes people think
it's byte count, not pages. Fix it.

Link: https://lore.kernel.org/linux-mm/20210324043434.GP1719932@casper.infradead.org/
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 include/linux/cma.h | 2 +-
 mm/cma.c            | 8 ++++----
 2 files changed, 5 insertions(+), 5 deletions(-)

Comments

Matthew Wilcox March 29, 2021, 6:44 p.m. UTC | #1
On Mon, Mar 29, 2021 at 11:25:53AM -0700, Minchan Kim wrote:
> size_t in cma_alloc is confusing since it makes people think
> it's byte count, not pages. Fix it.

i think it has to be unsigned long.

67a2e213e7e937c41c52ab5bc46bf3f4de469f6e

> Link: https://lore.kernel.org/linux-mm/20210324043434.GP1719932@casper.infradead.org/
> Cc: Matthew Wilcox <willy@infradead.org>
> Signed-off-by: Minchan Kim <minchan@kernel.org>
> ---
>  include/linux/cma.h | 2 +-
>  mm/cma.c            | 8 ++++----
>  2 files changed, 5 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/cma.h b/include/linux/cma.h
> index 217999c8a762..a873edc20ca2 100644
> --- a/include/linux/cma.h
> +++ b/include/linux/cma.h
> @@ -44,7 +44,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
>  					unsigned int order_per_bit,
>  					const char *name,
>  					struct cma **res_cma);
> -extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> +extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align,
>  			      bool no_warn);
>  extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
>  
> diff --git a/mm/cma.c b/mm/cma.c
> index 08c45157911a..24dc01e26d45 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -423,21 +423,21 @@ static inline void cma_debug_show_areas(struct cma *cma) { }
>   * This function allocates part of contiguous memory on specific
>   * contiguous memory area.
>   */
> -struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> +struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align,
>  		       bool no_warn)
>  {
>  	unsigned long mask, offset;
>  	unsigned long pfn = -1;
>  	unsigned long start = 0;
>  	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
> -	size_t i;
> +	unsigned int i;
>  	struct page *page = NULL;
>  	int ret = -ENOMEM;
>  
>  	if (!cma || !cma->count || !cma->bitmap)
>  		goto out;
>  
> -	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
> +	pr_debug("%s(cma %p, count %u, align %d)\n", __func__, (void *)cma,
>  		 count, align);
>  
>  	if (!count)
> @@ -500,7 +500,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
>  	}
>  
>  	if (ret && !no_warn) {
> -		pr_err_ratelimited("%s: %s: alloc failed, req-size: %zu pages, ret: %d\n",
> +		pr_err_ratelimited("%s: %s: alloc failed, req-size: %u pages, ret: %d\n",
>  				   __func__, cma->name, count, ret);
>  		cma_debug_show_areas(cma);
>  	}
> -- 
> 2.31.0.291.g576ba9dcdaf-goog
> 
>
Minchan Kim March 29, 2021, 8:12 p.m. UTC | #2
On Mon, Mar 29, 2021 at 07:44:31PM +0100, Matthew Wilcox wrote:
> On Mon, Mar 29, 2021 at 11:25:53AM -0700, Minchan Kim wrote:
> > size_t in cma_alloc is confusing since it makes people think
> > it's byte count, not pages. Fix it.
> 
> i think it has to be unsigned long.
> 
> 67a2e213e7e937c41c52ab5bc46bf3f4de469f6e

Thanks for the pinter. I wanted to have the smallest change.
The commit leads me to change cma_release, trace_cma_alloc,
cma_clear_bitmap and front_contig_range as well.(Not sure
we have more. Will check).

Ccing david@redhat.com for upcoming changing free_contig_range.

> 
> > Link: https://lore.kernel.org/linux-mm/20210324043434.GP1719932@casper.infradead.org/
> > Cc: Matthew Wilcox <willy@infradead.org>
> > Signed-off-by: Minchan Kim <minchan@kernel.org>
> > ---
> >  include/linux/cma.h | 2 +-
> >  mm/cma.c            | 8 ++++----
> >  2 files changed, 5 insertions(+), 5 deletions(-)
> > 
> > diff --git a/include/linux/cma.h b/include/linux/cma.h
> > index 217999c8a762..a873edc20ca2 100644
> > --- a/include/linux/cma.h
> > +++ b/include/linux/cma.h
> > @@ -44,7 +44,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
> >  					unsigned int order_per_bit,
> >  					const char *name,
> >  					struct cma **res_cma);
> > -extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> > +extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align,
> >  			      bool no_warn);
> >  extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
> >  
> > diff --git a/mm/cma.c b/mm/cma.c
> > index 08c45157911a..24dc01e26d45 100644
> > --- a/mm/cma.c
> > +++ b/mm/cma.c
> > @@ -423,21 +423,21 @@ static inline void cma_debug_show_areas(struct cma *cma) { }
> >   * This function allocates part of contiguous memory on specific
> >   * contiguous memory area.
> >   */
> > -struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> > +struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align,
> >  		       bool no_warn)
> >  {
> >  	unsigned long mask, offset;
> >  	unsigned long pfn = -1;
> >  	unsigned long start = 0;
> >  	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
> > -	size_t i;
> > +	unsigned int i;
> >  	struct page *page = NULL;
> >  	int ret = -ENOMEM;
> >  
> >  	if (!cma || !cma->count || !cma->bitmap)
> >  		goto out;
> >  
> > -	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
> > +	pr_debug("%s(cma %p, count %u, align %d)\n", __func__, (void *)cma,
> >  		 count, align);
> >  
> >  	if (!count)
> > @@ -500,7 +500,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> >  	}
> >  
> >  	if (ret && !no_warn) {
> > -		pr_err_ratelimited("%s: %s: alloc failed, req-size: %zu pages, ret: %d\n",
> > +		pr_err_ratelimited("%s: %s: alloc failed, req-size: %u pages, ret: %d\n",
> >  				   __func__, cma->name, count, ret);
> >  		cma_debug_show_areas(cma);
> >  	}
> > -- 
> > 2.31.0.291.g576ba9dcdaf-goog
> > 
> >
David Hildenbrand March 30, 2021, 7:58 a.m. UTC | #3
On 29.03.21 22:12, Minchan Kim wrote:
> On Mon, Mar 29, 2021 at 07:44:31PM +0100, Matthew Wilcox wrote:
>> On Mon, Mar 29, 2021 at 11:25:53AM -0700, Minchan Kim wrote:
>>> size_t in cma_alloc is confusing since it makes people think
>>> it's byte count, not pages. Fix it.
>>
>> i think it has to be unsigned long.
>>
>> 67a2e213e7e937c41c52ab5bc46bf3f4de469f6e

Right.

Fortunately, we don't have such large allocations yet via 
CMA/alloc_contig_range

> 
> Thanks for the pinter. I wanted to have the smallest change.
> The commit leads me to change cma_release, trace_cma_alloc,
> cma_clear_bitmap and front_contig_range as well.(Not sure
> we have more. Will check).
> 
> Ccing david@redhat.com for upcoming changing free_contig_range.

While at it, we might want to convert free_contig_range() to eat
"unsigned long start, unsigned long end" like alloc_contig_range(), 
instead of "unsigned long pfn, unsigned int nr_pages" like 
alloc_contig_pages() ...
Minchan Kim March 30, 2021, 3 p.m. UTC | #4
On Tue, Mar 30, 2021 at 09:58:37AM +0200, David Hildenbrand wrote:
> On 29.03.21 22:12, Minchan Kim wrote:
> > On Mon, Mar 29, 2021 at 07:44:31PM +0100, Matthew Wilcox wrote:
> > > On Mon, Mar 29, 2021 at 11:25:53AM -0700, Minchan Kim wrote:
> > > > size_t in cma_alloc is confusing since it makes people think
> > > > it's byte count, not pages. Fix it.
> > > 
> > > i think it has to be unsigned long.
> > > 
> > > 67a2e213e7e937c41c52ab5bc46bf3f4de469f6e
> 
> Right.
> 
> Fortunately, we don't have such large allocations yet via
> CMA/alloc_contig_range
> 
> > 
> > Thanks for the pinter. I wanted to have the smallest change.
> > The commit leads me to change cma_release, trace_cma_alloc,
> > cma_clear_bitmap and front_contig_range as well.(Not sure
> > we have more. Will check).
> > 
> > Ccing david@redhat.com for upcoming changing free_contig_range.
> 
> While at it, we might want to convert free_contig_range() to eat
> "unsigned long start, unsigned long end" like alloc_contig_range(), instead
> of "unsigned long pfn, unsigned int nr_pages" like alloc_contig_pages() ...

Well, I personllay tempted to change alloc_contig_range, not
free_contig_range because base_pfn with nr_pages was more
straightforward than base_pfn and end_pfn in that we don't
need to tell whether end_pfn is inclusive or exclusive.

When I look at callers of [alloc|free]_contig_range, many of them
already have used nr_pages based approach rather than start_pfn,
end_pfn. If your suggestion come from that "it's *range* API",
I'd like to rename it with "alloc_contig_pages|free_contig_pages".

Since it's beyond the goal of this patch and might be controversial, 
I will not deal with it in this patch.
David Hildenbrand March 30, 2021, 3:05 p.m. UTC | #5
On 30.03.21 17:00, Minchan Kim wrote:
> On Tue, Mar 30, 2021 at 09:58:37AM +0200, David Hildenbrand wrote:
>> On 29.03.21 22:12, Minchan Kim wrote:
>>> On Mon, Mar 29, 2021 at 07:44:31PM +0100, Matthew Wilcox wrote:
>>>> On Mon, Mar 29, 2021 at 11:25:53AM -0700, Minchan Kim wrote:
>>>>> size_t in cma_alloc is confusing since it makes people think
>>>>> it's byte count, not pages. Fix it.
>>>>
>>>> i think it has to be unsigned long.
>>>>
>>>> 67a2e213e7e937c41c52ab5bc46bf3f4de469f6e
>>
>> Right.
>>
>> Fortunately, we don't have such large allocations yet via
>> CMA/alloc_contig_range
>>
>>>
>>> Thanks for the pinter. I wanted to have the smallest change.
>>> The commit leads me to change cma_release, trace_cma_alloc,
>>> cma_clear_bitmap and front_contig_range as well.(Not sure
>>> we have more. Will check).
>>>
>>> Ccing david@redhat.com for upcoming changing free_contig_range.
>>
>> While at it, we might want to convert free_contig_range() to eat
>> "unsigned long start, unsigned long end" like alloc_contig_range(), instead
>> of "unsigned long pfn, unsigned int nr_pages" like alloc_contig_pages() ...
> 
> Well, I personllay tempted to change alloc_contig_range, not
> free_contig_range because base_pfn with nr_pages was more
> straightforward than base_pfn and end_pfn in that we don't
> need to tell whether end_pfn is inclusive or exclusive.
> 

That's right.

> When I look at callers of [alloc|free]_contig_range, many of them
> already have used nr_pages based approach rather than start_pfn,
> end_pfn. If your suggestion come from that "it's *range* API",

Right you are, teaching alloc_contig_range() to eat "nr_pages" might 
actually be even better and more consistent.

> I'd like to rename it with "alloc_contig_pages|free_contig_pages".

alloc_contig_pages is just a wrapper for alloc_contig_range(), so 
free_contig_range() is a better fit; OTOH, having both would also 
somehow make sense.

> 
> Since it's beyond the goal of this patch and might be controversial,
> I will not deal with it in this patch.

Sure, but feel free to send a patch to make that consistent. It's been 
bugging me already (having to always remember if to pass in nr_pages or 
end).
diff mbox series

Patch

diff --git a/include/linux/cma.h b/include/linux/cma.h
index 217999c8a762..a873edc20ca2 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -44,7 +44,7 @@  extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 					unsigned int order_per_bit,
 					const char *name,
 					struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
+extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align,
 			      bool no_warn);
 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
 
diff --git a/mm/cma.c b/mm/cma.c
index 08c45157911a..24dc01e26d45 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -423,21 +423,21 @@  static inline void cma_debug_show_areas(struct cma *cma) { }
  * This function allocates part of contiguous memory on specific
  * contiguous memory area.
  */
-struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
+struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align,
 		       bool no_warn)
 {
 	unsigned long mask, offset;
 	unsigned long pfn = -1;
 	unsigned long start = 0;
 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
-	size_t i;
+	unsigned int i;
 	struct page *page = NULL;
 	int ret = -ENOMEM;
 
 	if (!cma || !cma->count || !cma->bitmap)
 		goto out;
 
-	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
+	pr_debug("%s(cma %p, count %u, align %d)\n", __func__, (void *)cma,
 		 count, align);
 
 	if (!count)
@@ -500,7 +500,7 @@  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 	}
 
 	if (ret && !no_warn) {
-		pr_err_ratelimited("%s: %s: alloc failed, req-size: %zu pages, ret: %d\n",
+		pr_err_ratelimited("%s: %s: alloc failed, req-size: %u pages, ret: %d\n",
 				   __func__, cma->name, count, ret);
 		cma_debug_show_areas(cma);
 	}