diff mbox series

[v3,2/5] mm/cma: add cma_pages_valid to determine if pages are in CMA

Message ID 20211001175210.45968-3-mike.kravetz@oracle.com (mailing list archive)
State New
Headers show
Series hugetlb: add demote/split page functionality | expand

Commit Message

Mike Kravetz Oct. 1, 2021, 5:52 p.m. UTC
Add new interface cma_pages_valid() which indicates if the specified
pages are part of a CMA region.  This interface will be used in a
subsequent patch by hugetlb code.

Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 include/linux/cma.h |  1 +
 mm/cma.c            | 21 +++++++++++++++++----
 2 files changed, 18 insertions(+), 4 deletions(-)

Comments

Oscar Salvador Oct. 5, 2021, 8:45 a.m. UTC | #1
On Fri, Oct 01, 2021 at 10:52:07AM -0700, Mike Kravetz wrote:
> +bool cma_pages_valid(struct cma *cma, const struct page *pages,
> +		     unsigned long count)
> +{
> +	unsigned long pfn;
> +
> +	if (!cma || !pages)
> +		return false;
> +
> +	pfn = page_to_pfn(pages);
> +
> +	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
> +		return false;
> +
> +	return true;
> +}
> +
>  /**
>   * cma_release() - release allocated pages
>   * @cma:   Contiguous memory region for which the allocation is performed.
> @@ -539,16 +555,13 @@ bool cma_release(struct cma *cma, const struct page *pages,
>  {
>  	unsigned long pfn;
>  
> -	if (!cma || !pages)
> +	if (!cma_pages_valid(cma, pages, count))
>  		return false;
>  
>  	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
>  
>  	pfn = page_to_pfn(pages);
>  
> -	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
> -		return false;
> -

After this patch, the timing of printing the debug statement changes as we back
off earlier.
You might want to point that out in the changelog in case someone wonders why.
David Hildenbrand Oct. 5, 2021, 8:48 a.m. UTC | #2
On 01.10.21 19:52, Mike Kravetz wrote:
> Add new interface cma_pages_valid() which indicates if the specified
> pages are part of a CMA region.  This interface will be used in a
> subsequent patch by hugetlb code.
> 
> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
> ---
>   include/linux/cma.h |  1 +
>   mm/cma.c            | 21 +++++++++++++++++----
>   2 files changed, 18 insertions(+), 4 deletions(-)
> 
> diff --git a/include/linux/cma.h b/include/linux/cma.h
> index 53fd8c3cdbd0..bd801023504b 100644
> --- a/include/linux/cma.h
> +++ b/include/linux/cma.h
> @@ -46,6 +46,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
>   					struct cma **res_cma);
>   extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
>   			      bool no_warn);
> +extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
>   extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
>   
>   extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
> diff --git a/mm/cma.c b/mm/cma.c
> index 995e15480937..960994b88c7f 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -524,6 +524,22 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
>   	return page;
>   }
>   
> +bool cma_pages_valid(struct cma *cma, const struct page *pages,
> +		     unsigned long count)
> +{
> +	unsigned long pfn;
> +
> +	if (!cma || !pages)
> +		return false;
> +
> +	pfn = page_to_pfn(pages);
> +
> +	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
> +		return false;
> +
> +	return true;
> +}
> +
>   /**
>    * cma_release() - release allocated pages
>    * @cma:   Contiguous memory region for which the allocation is performed.
> @@ -539,16 +555,13 @@ bool cma_release(struct cma *cma, const struct page *pages,
>   {
>   	unsigned long pfn;
>   
> -	if (!cma || !pages)
> +	if (!cma_pages_valid(cma, pages, count))
>   		return false;
>   
>   	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
>   
>   	pfn = page_to_pfn(pages);
>   
> -	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
> -		return false;
> -
>   	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
>   
>   	free_contig_range(pfn, count);
> 

Agreed that we might want to perform the pr_debug() now earlier, or do 
another pr_warn before returning false.

Acked-by: David Hildenbrand <david@redhat.com>
Mike Kravetz Oct. 5, 2021, 5:06 p.m. UTC | #3
On 10/5/21 1:45 AM, Oscar Salvador wrote:
> On Fri, Oct 01, 2021 at 10:52:07AM -0700, Mike Kravetz wrote:
>> +bool cma_pages_valid(struct cma *cma, const struct page *pages,
>> +		     unsigned long count)
>> +{
>> +	unsigned long pfn;
>> +
>> +	if (!cma || !pages)
>> +		return false;
>> +
>> +	pfn = page_to_pfn(pages);
>> +
>> +	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
>> +		return false;
>> +
>> +	return true;
>> +}
>> +
>>  /**
>>   * cma_release() - release allocated pages
>>   * @cma:   Contiguous memory region for which the allocation is performed.
>> @@ -539,16 +555,13 @@ bool cma_release(struct cma *cma, const struct page *pages,
>>  {
>>  	unsigned long pfn;
>>  
>> -	if (!cma || !pages)
>> +	if (!cma_pages_valid(cma, pages, count))
>>  		return false;
>>  
>>  	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
>>  
>>  	pfn = page_to_pfn(pages);
>>  
>> -	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
>> -		return false;
>> -
> 
> After this patch, the timing of printing the debug statement changes as we back
> off earlier.
> You might want to point that out in the changelog in case someone wonders why.
> 

When making this change, I did not want to duplicate that debug
statement.  However, duplicated code only becomes an issue if
CONFIG_DEBUG.  So, duplication should be acceptable.

I will make the debug statements function as before.
diff mbox series

Patch

diff --git a/include/linux/cma.h b/include/linux/cma.h
index 53fd8c3cdbd0..bd801023504b 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -46,6 +46,7 @@  extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 					struct cma **res_cma);
 extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
 			      bool no_warn);
+extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
 
 extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
diff --git a/mm/cma.c b/mm/cma.c
index 995e15480937..960994b88c7f 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -524,6 +524,22 @@  struct page *cma_alloc(struct cma *cma, unsigned long count,
 	return page;
 }
 
+bool cma_pages_valid(struct cma *cma, const struct page *pages,
+		     unsigned long count)
+{
+	unsigned long pfn;
+
+	if (!cma || !pages)
+		return false;
+
+	pfn = page_to_pfn(pages);
+
+	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+		return false;
+
+	return true;
+}
+
 /**
  * cma_release() - release allocated pages
  * @cma:   Contiguous memory region for which the allocation is performed.
@@ -539,16 +555,13 @@  bool cma_release(struct cma *cma, const struct page *pages,
 {
 	unsigned long pfn;
 
-	if (!cma || !pages)
+	if (!cma_pages_valid(cma, pages, count))
 		return false;
 
 	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
 
 	pfn = page_to_pfn(pages);
 
-	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
-		return false;
-
 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
 
 	free_contig_range(pfn, count);