diff mbox series

[v3,22/25] mm/memremap_pages: Replace zone_device_page_init() with pgmap_request_folios()

Message ID 166579194621.2236710.8168919102434295671.stgit@dwillia2-xfh.jf.intel.com (mailing list archive)
State New
Headers show
Series Fix the DAX-gup mistake | expand

Commit Message

Dan Williams Oct. 14, 2022, 11:59 p.m. UTC
Switch to the common method, shared across all MEMORY_DEVICE_* types,
for requesting access to a ZONE_DEVICE page. The
MEMORY_DEVICE_{PRIVATE,COHERENT} specific expectation that newly
requested pages are locked is moved to the callers.

Cc: Matthew Wilcox <willy@infradead.org>
Cc: Jan Kara <jack@suse.cz>
Cc: "Darrick J. Wong" <djwong@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: "Pan, Xinhui" <Xinhui.Pan@amd.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Karol Herbst <kherbst@redhat.com>
Cc: Lyude Paul <lyude@redhat.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 arch/powerpc/kvm/book3s_hv_uvmem.c       |    3 ++-
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c |    3 ++-
 drivers/gpu/drm/nouveau/nouveau_dmem.c   |    3 ++-
 include/linux/memremap.h                 |    1 -
 lib/test_hmm.c                           |    3 ++-
 mm/memremap.c                            |   13 +------------
 6 files changed, 9 insertions(+), 17 deletions(-)

Comments

Lyude Paul Oct. 17, 2022, 7:17 p.m. UTC | #1
For the nouveau bits:

Reviewed-by: Lyude Paul <lyude@redhat.com>

On Fri, 2022-10-14 at 16:59 -0700, Dan Williams wrote:
> Switch to the common method, shared across all MEMORY_DEVICE_* types,
> for requesting access to a ZONE_DEVICE page. The
> MEMORY_DEVICE_{PRIVATE,COHERENT} specific expectation that newly
> requested pages are locked is moved to the callers.
> 
> Cc: Matthew Wilcox <willy@infradead.org>
> Cc: Jan Kara <jack@suse.cz>
> Cc: "Darrick J. Wong" <djwong@kernel.org>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: John Hubbard <jhubbard@nvidia.com>
> Cc: Alistair Popple <apopple@nvidia.com>
> Cc: Jason Gunthorpe <jgg@nvidia.com>
> Cc: Felix Kuehling <Felix.Kuehling@amd.com>
> Cc: Alex Deucher <alexander.deucher@amd.com>
> Cc: "Christian König" <christian.koenig@amd.com>
> Cc: "Pan, Xinhui" <Xinhui.Pan@amd.com>
> Cc: David Airlie <airlied@linux.ie>
> Cc: Daniel Vetter <daniel@ffwll.ch>
> Cc: Ben Skeggs <bskeggs@redhat.com>
> Cc: Karol Herbst <kherbst@redhat.com>
> Cc: Lyude Paul <lyude@redhat.com>
> Cc: "Jérôme Glisse" <jglisse@redhat.com>
> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
> ---
>  arch/powerpc/kvm/book3s_hv_uvmem.c       |    3 ++-
>  drivers/gpu/drm/amd/amdkfd/kfd_migrate.c |    3 ++-
>  drivers/gpu/drm/nouveau/nouveau_dmem.c   |    3 ++-
>  include/linux/memremap.h                 |    1 -
>  lib/test_hmm.c                           |    3 ++-
>  mm/memremap.c                            |   13 +------------
>  6 files changed, 9 insertions(+), 17 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
> index e2f11f9c3f2a..884ec112ad43 100644
> --- a/arch/powerpc/kvm/book3s_hv_uvmem.c
> +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
> @@ -718,7 +718,8 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
>  
>  	dpage = pfn_to_page(uvmem_pfn);
>  	dpage->zone_device_data = pvt;
> -	zone_device_page_init(dpage);
> +	pgmap_request_folios(dpage->pgmap, page_folio(dpage), 1);
> +	lock_page(dpage);
>  	return dpage;
>  out_clear:
>  	spin_lock(&kvmppc_uvmem_bitmap_lock);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> index 97a684568ae0..8cf97060122b 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> @@ -223,7 +223,8 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
>  	page = pfn_to_page(pfn);
>  	svm_range_bo_ref(prange->svm_bo);
>  	page->zone_device_data = prange->svm_bo;
> -	zone_device_page_init(page);
> +	pgmap_request_folios(page->pgmap, page_folio(page), 1);
> +	lock_page(page);
>  }
>  
>  static void
> diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> index 5fe209107246..1482533c7ca0 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> @@ -324,7 +324,8 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
>  			return NULL;
>  	}
>  
> -	zone_device_page_init(page);
> +	pgmap_request_folios(page->pgmap, page_folio(page), 1);
> +	lock_page(page);
>  	return page;
>  }
>  
> diff --git a/include/linux/memremap.h b/include/linux/memremap.h
> index 98196b8d3172..3fb3809d71f3 100644
> --- a/include/linux/memremap.h
> +++ b/include/linux/memremap.h
> @@ -187,7 +187,6 @@ static inline bool folio_is_device_coherent(const struct folio *folio)
>  }
>  
>  #ifdef CONFIG_ZONE_DEVICE
> -void zone_device_page_init(struct page *page);
>  void *memremap_pages(struct dev_pagemap *pgmap, int nid);
>  void memunmap_pages(struct dev_pagemap *pgmap);
>  void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
> diff --git a/lib/test_hmm.c b/lib/test_hmm.c
> index 67e6f83fe0f8..e4f7219ae3bb 100644
> --- a/lib/test_hmm.c
> +++ b/lib/test_hmm.c
> @@ -632,7 +632,8 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
>  			goto error;
>  	}
>  
> -	zone_device_page_init(dpage);
> +	pgmap_request_folios(dpage->pgmap, page_folio(dpage), 1);
> +	lock_page(dpage);
>  	dpage->zone_device_data = rpage;
>  	return dpage;
>  
> diff --git a/mm/memremap.c b/mm/memremap.c
> index 87a649ecdc54..c46e700f5245 100644
> --- a/mm/memremap.c
> +++ b/mm/memremap.c
> @@ -518,18 +518,6 @@ void free_zone_device_page(struct page *page)
>  		put_dev_pagemap(page->pgmap);
>  }
>  
> -void zone_device_page_init(struct page *page)
> -{
> -	/*
> -	 * Drivers shouldn't be allocating pages after calling
> -	 * memunmap_pages().
> -	 */
> -	WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref));
> -	set_page_count(page, 1);
> -	lock_page(page);
> -}
> -EXPORT_SYMBOL_GPL(zone_device_page_init);
> -
>  static bool folio_span_valid(struct dev_pagemap *pgmap, struct folio *folio,
>  			     int nr_folios)
>  {
> @@ -586,6 +574,7 @@ bool pgmap_request_folios(struct dev_pagemap *pgmap, struct folio *folio,
>  
>  	return true;
>  }
> +EXPORT_SYMBOL_GPL(pgmap_request_folios);
>  
>  void pgmap_release_folios(struct dev_pagemap *pgmap, struct folio *folio, int nr_folios)
>  {
>
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index e2f11f9c3f2a..884ec112ad43 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -718,7 +718,8 @@  static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
 
 	dpage = pfn_to_page(uvmem_pfn);
 	dpage->zone_device_data = pvt;
-	zone_device_page_init(dpage);
+	pgmap_request_folios(dpage->pgmap, page_folio(dpage), 1);
+	lock_page(dpage);
 	return dpage;
 out_clear:
 	spin_lock(&kvmppc_uvmem_bitmap_lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 97a684568ae0..8cf97060122b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -223,7 +223,8 @@  svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
 	page = pfn_to_page(pfn);
 	svm_range_bo_ref(prange->svm_bo);
 	page->zone_device_data = prange->svm_bo;
-	zone_device_page_init(page);
+	pgmap_request_folios(page->pgmap, page_folio(page), 1);
+	lock_page(page);
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 5fe209107246..1482533c7ca0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -324,7 +324,8 @@  nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
 			return NULL;
 	}
 
-	zone_device_page_init(page);
+	pgmap_request_folios(page->pgmap, page_folio(page), 1);
+	lock_page(page);
 	return page;
 }
 
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 98196b8d3172..3fb3809d71f3 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -187,7 +187,6 @@  static inline bool folio_is_device_coherent(const struct folio *folio)
 }
 
 #ifdef CONFIG_ZONE_DEVICE
-void zone_device_page_init(struct page *page);
 void *memremap_pages(struct dev_pagemap *pgmap, int nid);
 void memunmap_pages(struct dev_pagemap *pgmap);
 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 67e6f83fe0f8..e4f7219ae3bb 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -632,7 +632,8 @@  static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
 			goto error;
 	}
 
-	zone_device_page_init(dpage);
+	pgmap_request_folios(dpage->pgmap, page_folio(dpage), 1);
+	lock_page(dpage);
 	dpage->zone_device_data = rpage;
 	return dpage;
 
diff --git a/mm/memremap.c b/mm/memremap.c
index 87a649ecdc54..c46e700f5245 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -518,18 +518,6 @@  void free_zone_device_page(struct page *page)
 		put_dev_pagemap(page->pgmap);
 }
 
-void zone_device_page_init(struct page *page)
-{
-	/*
-	 * Drivers shouldn't be allocating pages after calling
-	 * memunmap_pages().
-	 */
-	WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref));
-	set_page_count(page, 1);
-	lock_page(page);
-}
-EXPORT_SYMBOL_GPL(zone_device_page_init);
-
 static bool folio_span_valid(struct dev_pagemap *pgmap, struct folio *folio,
 			     int nr_folios)
 {
@@ -586,6 +574,7 @@  bool pgmap_request_folios(struct dev_pagemap *pgmap, struct folio *folio,
 
 	return true;
 }
+EXPORT_SYMBOL_GPL(pgmap_request_folios);
 
 void pgmap_release_folios(struct dev_pagemap *pgmap, struct folio *folio, int nr_folios)
 {