diff mbox series

[RFC,2/2] arm64/mm: Enable device memory allocation and free for vmemmap mapping

Message ID 1561697083-7329-3-git-send-email-anshuman.khandual@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: Enable vmemmap from device memory | expand

Commit Message

Anshuman Khandual June 28, 2019, 4:44 a.m. UTC
This enables vmemmap_populate() and vmemmap_free() functions to incorporate
struct vmem_altmap based device memory allocation and free requests. With
this device memory with specific atlmap configuration can be hot plugged
and hot removed as ZONE_DEVICE memory on arm64 platforms.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org

Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/arm64/mm/mmu.c | 57 ++++++++++++++++++++++++++++++++++-------------------
 1 file changed, 37 insertions(+), 20 deletions(-)

Comments

Will Deacon July 31, 2019, 4:11 p.m. UTC | #1
On Fri, Jun 28, 2019 at 10:14:43AM +0530, Anshuman Khandual wrote:
> This enables vmemmap_populate() and vmemmap_free() functions to incorporate
> struct vmem_altmap based device memory allocation and free requests. With
> this device memory with specific atlmap configuration can be hot plugged
> and hot removed as ZONE_DEVICE memory on arm64 platforms.
> 
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will.deacon@arm.com>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: linux-arm-kernel@lists.infradead.org
> Cc: linux-kernel@vger.kernel.org
> 
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
>  arch/arm64/mm/mmu.c | 57 ++++++++++++++++++++++++++++++++++-------------------
>  1 file changed, 37 insertions(+), 20 deletions(-)
> 
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 39e18d1..8867bbd 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -735,15 +735,26 @@ int kern_addr_valid(unsigned long addr)
>  }
>  
>  #ifdef CONFIG_MEMORY_HOTPLUG
> -static void free_hotplug_page_range(struct page *page, size_t size)
> +static void free_hotplug_page_range(struct page *page, size_t size,
> +				    struct vmem_altmap *altmap)
>  {
> -	WARN_ON(!page || PageReserved(page));
> -	free_pages((unsigned long)page_address(page), get_order(size));
> +	if (altmap) {
> +		/*
> +		 * vmemmap_populate() creates vmemmap mapping either at pte
> +		 * or pmd level. Unmapping request at any other level would
> +		 * be a problem.
> +		 */
> +		WARN_ON((size != PAGE_SIZE) && (size != PMD_SIZE));
> +		vmem_altmap_free(altmap, size >> PAGE_SHIFT);
> +	} else {
> +		WARN_ON(!page || PageReserved(page));
> +		free_pages((unsigned long)page_address(page), get_order(size));
> +	}
>  }
>  
>  static void free_hotplug_pgtable_page(struct page *page)
>  {
> -	free_hotplug_page_range(page, PAGE_SIZE);
> +	free_hotplug_page_range(page, PAGE_SIZE, NULL);
>  }
>  
>  static void free_pte_table(pmd_t *pmdp, unsigned long addr)
> @@ -807,7 +818,8 @@ static void free_pud_table(pgd_t *pgdp, unsigned long addr)
>  }
>  
>  static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
> -				    unsigned long end, bool sparse_vmap)
> +				    unsigned long end, bool sparse_vmap,
> +				    struct vmem_altmap *altmap)

Do you still need the sparse_vmap parameter, or can you just pass a NULL
altmap pointer when sparse_vmap is false?

Will
Anshuman Khandual Aug. 1, 2019, 3:03 a.m. UTC | #2
On 07/31/2019 09:41 PM, Will Deacon wrote:
> On Fri, Jun 28, 2019 at 10:14:43AM +0530, Anshuman Khandual wrote:
>> This enables vmemmap_populate() and vmemmap_free() functions to incorporate
>> struct vmem_altmap based device memory allocation and free requests. With
>> this device memory with specific atlmap configuration can be hot plugged
>> and hot removed as ZONE_DEVICE memory on arm64 platforms.
>>
>> Cc: Catalin Marinas <catalin.marinas@arm.com>
>> Cc: Will Deacon <will.deacon@arm.com>
>> Cc: Mark Rutland <mark.rutland@arm.com>
>> Cc: linux-arm-kernel@lists.infradead.org
>> Cc: linux-kernel@vger.kernel.org
>>
>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>> ---
>>  arch/arm64/mm/mmu.c | 57 ++++++++++++++++++++++++++++++++++-------------------
>>  1 file changed, 37 insertions(+), 20 deletions(-)
>>
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index 39e18d1..8867bbd 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -735,15 +735,26 @@ int kern_addr_valid(unsigned long addr)
>>  }
>>  
>>  #ifdef CONFIG_MEMORY_HOTPLUG
>> -static void free_hotplug_page_range(struct page *page, size_t size)
>> +static void free_hotplug_page_range(struct page *page, size_t size,
>> +				    struct vmem_altmap *altmap)
>>  {
>> -	WARN_ON(!page || PageReserved(page));
>> -	free_pages((unsigned long)page_address(page), get_order(size));
>> +	if (altmap) {
>> +		/*
>> +		 * vmemmap_populate() creates vmemmap mapping either at pte
>> +		 * or pmd level. Unmapping request at any other level would
>> +		 * be a problem.
>> +		 */
>> +		WARN_ON((size != PAGE_SIZE) && (size != PMD_SIZE));
>> +		vmem_altmap_free(altmap, size >> PAGE_SHIFT);
>> +	} else {
>> +		WARN_ON(!page || PageReserved(page));
>> +		free_pages((unsigned long)page_address(page), get_order(size));
>> +	}
>>  }
>>  
>>  static void free_hotplug_pgtable_page(struct page *page)
>>  {
>> -	free_hotplug_page_range(page, PAGE_SIZE);
>> +	free_hotplug_page_range(page, PAGE_SIZE, NULL);
>>  }
>>  
>>  static void free_pte_table(pmd_t *pmdp, unsigned long addr)
>> @@ -807,7 +818,8 @@ static void free_pud_table(pgd_t *pgdp, unsigned long addr)
>>  }
>>  
>>  static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
>> -				    unsigned long end, bool sparse_vmap)
>> +				    unsigned long end, bool sparse_vmap,
>> +				    struct vmem_altmap *altmap)
> 
> Do you still need the sparse_vmap parameter, or can you just pass a NULL
> altmap pointer when sparse_vmap is false?

Yes, we will still require sparse_vmap parameter because vmemmap mapping
does not necessarily be created only for ZONE_DEVICE range with an altmap.
vmemmap can still be present with altmap as NULL (regular memory and device
memory without altmap) in which cases it will not be possible to
differentiate between linear and vmemmap mapping.
diff mbox series

Patch

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 39e18d1..8867bbd 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -735,15 +735,26 @@  int kern_addr_valid(unsigned long addr)
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-static void free_hotplug_page_range(struct page *page, size_t size)
+static void free_hotplug_page_range(struct page *page, size_t size,
+				    struct vmem_altmap *altmap)
 {
-	WARN_ON(!page || PageReserved(page));
-	free_pages((unsigned long)page_address(page), get_order(size));
+	if (altmap) {
+		/*
+		 * vmemmap_populate() creates vmemmap mapping either at pte
+		 * or pmd level. Unmapping request at any other level would
+		 * be a problem.
+		 */
+		WARN_ON((size != PAGE_SIZE) && (size != PMD_SIZE));
+		vmem_altmap_free(altmap, size >> PAGE_SHIFT);
+	} else {
+		WARN_ON(!page || PageReserved(page));
+		free_pages((unsigned long)page_address(page), get_order(size));
+	}
 }
 
 static void free_hotplug_pgtable_page(struct page *page)
 {
-	free_hotplug_page_range(page, PAGE_SIZE);
+	free_hotplug_page_range(page, PAGE_SIZE, NULL);
 }
 
 static void free_pte_table(pmd_t *pmdp, unsigned long addr)
@@ -807,7 +818,8 @@  static void free_pud_table(pgd_t *pgdp, unsigned long addr)
 }
 
 static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
-				    unsigned long end, bool sparse_vmap)
+				    unsigned long end, bool sparse_vmap,
+				    struct vmem_altmap *altmap)
 {
 	struct page *page;
 	pte_t *ptep, pte;
@@ -823,12 +835,13 @@  static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
 		pte_clear(&init_mm, addr, ptep);
 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 		if (sparse_vmap)
-			free_hotplug_page_range(page, PAGE_SIZE);
+			free_hotplug_page_range(page, PAGE_SIZE, altmap);
 	} while (addr += PAGE_SIZE, addr < end);
 }
 
 static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
-				    unsigned long end, bool sparse_vmap)
+				    unsigned long end, bool sparse_vmap,
+				    struct vmem_altmap *altmap)
 {
 	unsigned long next;
 	struct page *page;
@@ -847,16 +860,17 @@  static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
 			pmd_clear(pmdp);
 			flush_tlb_kernel_range(addr, next);
 			if (sparse_vmap)
-				free_hotplug_page_range(page, PMD_SIZE);
+				free_hotplug_page_range(page, PMD_SIZE, altmap);
 			continue;
 		}
 		WARN_ON(!pmd_table(pmd));
-		unmap_hotplug_pte_range(pmdp, addr, next, sparse_vmap);
+		unmap_hotplug_pte_range(pmdp, addr, next, sparse_vmap, altmap);
 	} while (addr = next, addr < end);
 }
 
 static void unmap_hotplug_pud_range(pgd_t *pgdp, unsigned long addr,
-				    unsigned long end, bool sparse_vmap)
+				    unsigned long end, bool sparse_vmap,
+				    struct vmem_altmap *altmap)
 {
 	unsigned long next;
 	struct page *page;
@@ -875,16 +889,16 @@  static void unmap_hotplug_pud_range(pgd_t *pgdp, unsigned long addr,
 			pud_clear(pudp);
 			flush_tlb_kernel_range(addr, next);
 			if (sparse_vmap)
-				free_hotplug_page_range(page, PUD_SIZE);
+				free_hotplug_page_range(page, PUD_SIZE, altmap);
 			continue;
 		}
 		WARN_ON(!pud_table(pud));
-		unmap_hotplug_pmd_range(pudp, addr, next, sparse_vmap);
+		unmap_hotplug_pmd_range(pudp, addr, next, sparse_vmap, altmap);
 	} while (addr = next, addr < end);
 }
 
 static void unmap_hotplug_range(unsigned long addr, unsigned long end,
-				bool sparse_vmap)
+				bool sparse_vmap, struct vmem_altmap *altmap)
 {
 	unsigned long next;
 	pgd_t *pgdp, pgd;
@@ -897,7 +911,7 @@  static void unmap_hotplug_range(unsigned long addr, unsigned long end,
 			continue;
 
 		WARN_ON(!pgd_present(pgd));
-		unmap_hotplug_pud_range(pgdp, addr, next, sparse_vmap);
+		unmap_hotplug_pud_range(pgdp, addr, next, sparse_vmap, altmap);
 	} while (addr = next, addr < end);
 }
 
@@ -970,9 +984,9 @@  static void free_empty_tables(unsigned long addr, unsigned long end)
 }
 
 static void remove_pagetable(unsigned long start, unsigned long end,
-			     bool sparse_vmap)
+			     bool sparse_vmap, struct vmem_altmap *altmap)
 {
-	unmap_hotplug_range(start, end, sparse_vmap);
+	unmap_hotplug_range(start, end, sparse_vmap, altmap);
 	free_empty_tables(start, end);
 }
 #endif
@@ -982,7 +996,7 @@  static void remove_pagetable(unsigned long start, unsigned long end,
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 		struct vmem_altmap *altmap)
 {
-	return vmemmap_populate_basepages(start, end, node, NULL);
+	return vmemmap_populate_basepages(start, end, node, altmap);
 }
 #else	/* !ARM64_SWAPPER_USES_SECTION_MAPS */
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
@@ -1009,7 +1023,10 @@  int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 		if (pmd_none(READ_ONCE(*pmdp))) {
 			void *p = NULL;
 
-			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
+			if (altmap)
+				p = altmap_alloc_block_buf(PMD_SIZE, altmap);
+			else
+				p = vmemmap_alloc_block_buf(PMD_SIZE, node);
 			if (!p)
 				return -ENOMEM;
 
@@ -1043,7 +1060,7 @@  void vmemmap_free(unsigned long start, unsigned long end,
 	 * given vmemmap range being hot-removed. Just unmap and free the
 	 * range instead.
 	 */
-	unmap_hotplug_range(start, end, true);
+	unmap_hotplug_range(start, end, true, altmap);
 #endif
 }
 #endif	/* CONFIG_SPARSEMEM_VMEMMAP */
@@ -1336,7 +1353,7 @@  static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
 	unsigned long end = start + size;
 
 	WARN_ON(pgdir != init_mm.pgd);
-	remove_pagetable(start, end, false);
+	remove_pagetable(start, end, false, NULL);
 }
 
 int arch_add_memory(int nid, u64 start, u64 size,