diff mbox series

[v8,4/7] mm, devm_memremap_pages: Add MEMORY_DEVICE_PRIVATE support

Message ID 154275559036.76910.12434636179931292607.stgit@dwillia2-desk3.amr.corp.intel.com (mailing list archive)
State New, archived
Headers show
Series mm: Merge hmm into devm_memremap_pages, mark GPL-only | expand

Commit Message

Dan Williams Nov. 20, 2018, 11:13 p.m. UTC
In preparation for consolidating all ZONE_DEVICE enabling via
devm_memremap_pages(), teach it how to handle the constraints of
MEMORY_DEVICE_PRIVATE ranges.

Reviewed-by: Jérôme Glisse <jglisse@redhat.com>
[jglisse: call move_pfn_range_to_zone for MEMORY_DEVICE_PRIVATE]
Acked-by: Christoph Hellwig <hch@lst.de>
Reported-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 kernel/memremap.c |   53 +++++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 41 insertions(+), 12 deletions(-)

Comments

David Hildenbrand Nov. 23, 2018, 10:48 a.m. UTC | #1
On 21.11.18 00:13, Dan Williams wrote:
> In preparation for consolidating all ZONE_DEVICE enabling via
> devm_memremap_pages(), teach it how to handle the constraints of
> MEMORY_DEVICE_PRIVATE ranges.
> 
> Reviewed-by: Jérôme Glisse <jglisse@redhat.com>
> [jglisse: call move_pfn_range_to_zone for MEMORY_DEVICE_PRIVATE]
> Acked-by: Christoph Hellwig <hch@lst.de>
> Reported-by: Logan Gunthorpe <logang@deltatee.com>
> Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
> ---
>  kernel/memremap.c |   53 +++++++++++++++++++++++++++++++++++++++++------------
>  1 file changed, 41 insertions(+), 12 deletions(-)
> 
> diff --git a/kernel/memremap.c b/kernel/memremap.c
> index 5e45f0c327a5..3eef989ef035 100644
> --- a/kernel/memremap.c
> +++ b/kernel/memremap.c
> @@ -98,9 +98,15 @@ static void devm_memremap_pages_release(void *data)
>  		- align_start;
>  
>  	mem_hotplug_begin();
> -	arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
> -			&pgmap->altmap : NULL);
> -	kasan_remove_zero_shadow(__va(align_start), align_size);
> +	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
> +		pfn = align_start >> PAGE_SHIFT;
> +		__remove_pages(page_zone(pfn_to_page(pfn)), pfn,
> +				align_size >> PAGE_SHIFT, NULL);
> +	} else {
> +		arch_remove_memory(align_start, align_size,
> +				pgmap->altmap_valid ? &pgmap->altmap : NULL);
> +		kasan_remove_zero_shadow(__va(align_start), align_size);
> +	}
>  	mem_hotplug_done();
>  
>  	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
> @@ -187,17 +193,40 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
>  		goto err_pfn_remap;
>  
>  	mem_hotplug_begin();
> -	error = kasan_add_zero_shadow(__va(align_start), align_size);
> -	if (error) {
> -		mem_hotplug_done();
> -		goto err_kasan;
> +
> +	/*
> +	 * For device private memory we call add_pages() as we only need to
> +	 * allocate and initialize struct page for the device memory. More-
> +	 * over the device memory is un-accessible thus we do not want to
> +	 * create a linear mapping for the memory like arch_add_memory()
> +	 * would do.
> +	 *
> +	 * For all other device memory types, which are accessible by
> +	 * the CPU, we do want the linear mapping and thus use
> +	 * arch_add_memory().
> +	 */

I consider this comment really useful. :)

Short question: Right now, MEMORY_DEVICE_PRIVATE always indicates HMM,
correct? (I am just confused by the naming but I assume
MEMORY_DEVICE_PRIVATE is more generic than HMM)
diff mbox series

Patch

diff --git a/kernel/memremap.c b/kernel/memremap.c
index 5e45f0c327a5..3eef989ef035 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -98,9 +98,15 @@  static void devm_memremap_pages_release(void *data)
 		- align_start;
 
 	mem_hotplug_begin();
-	arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
-			&pgmap->altmap : NULL);
-	kasan_remove_zero_shadow(__va(align_start), align_size);
+	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+		pfn = align_start >> PAGE_SHIFT;
+		__remove_pages(page_zone(pfn_to_page(pfn)), pfn,
+				align_size >> PAGE_SHIFT, NULL);
+	} else {
+		arch_remove_memory(align_start, align_size,
+				pgmap->altmap_valid ? &pgmap->altmap : NULL);
+		kasan_remove_zero_shadow(__va(align_start), align_size);
+	}
 	mem_hotplug_done();
 
 	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
@@ -187,17 +193,40 @@  void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
 		goto err_pfn_remap;
 
 	mem_hotplug_begin();
-	error = kasan_add_zero_shadow(__va(align_start), align_size);
-	if (error) {
-		mem_hotplug_done();
-		goto err_kasan;
+
+	/*
+	 * For device private memory we call add_pages() as we only need to
+	 * allocate and initialize struct page for the device memory. More-
+	 * over the device memory is un-accessible thus we do not want to
+	 * create a linear mapping for the memory like arch_add_memory()
+	 * would do.
+	 *
+	 * For all other device memory types, which are accessible by
+	 * the CPU, we do want the linear mapping and thus use
+	 * arch_add_memory().
+	 */
+	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+		error = add_pages(nid, align_start >> PAGE_SHIFT,
+				align_size >> PAGE_SHIFT, NULL, false);
+	} else {
+		error = kasan_add_zero_shadow(__va(align_start), align_size);
+		if (error) {
+			mem_hotplug_done();
+			goto err_kasan;
+		}
+
+		error = arch_add_memory(nid, align_start, align_size, altmap,
+				false);
+	}
+
+	if (!error) {
+		struct zone *zone;
+
+		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
+		move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
+				align_size >> PAGE_SHIFT, altmap);
 	}
 
-	error = arch_add_memory(nid, align_start, align_size, altmap, false);
-	if (!error)
-		move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
-					align_start >> PAGE_SHIFT,
-					align_size >> PAGE_SHIFT, altmap);
 	mem_hotplug_done();
 	if (error)
 		goto err_add_memory;