diff mbox series

mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap

Message ID 20191113134528.21187-1-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap | expand

Commit Message

Christoph Hellwig Nov. 13, 2019, 1:45 p.m. UTC
These two functions have never been used since they were added.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/hmm.h |  23 -------
 mm/hmm.c            | 147 --------------------------------------------
 2 files changed, 170 deletions(-)

Comments

Jerome Glisse Nov. 13, 2019, 6:49 p.m. UTC | #1
On Wed, Nov 13, 2019 at 02:45:28PM +0100, Christoph Hellwig wrote:
> These two functions have never been used since they were added.

The mlx5 convertion (which has been posted few times now) uses them
dunno what Jason plans is on that front.

> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  include/linux/hmm.h |  23 -------
>  mm/hmm.c            | 147 --------------------------------------------
>  2 files changed, 170 deletions(-)
> 
> diff --git a/include/linux/hmm.h b/include/linux/hmm.h
> index b4af51735232..922c8d015cdb 100644
> --- a/include/linux/hmm.h
> +++ b/include/linux/hmm.h
> @@ -230,34 +230,11 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
>   * Please see Documentation/vm/hmm.rst for how to use the range API.
>   */
>  long hmm_range_fault(struct hmm_range *range, unsigned int flags);
> -
> -long hmm_range_dma_map(struct hmm_range *range,
> -		       struct device *device,
> -		       dma_addr_t *daddrs,
> -		       unsigned int flags);
> -long hmm_range_dma_unmap(struct hmm_range *range,
> -			 struct device *device,
> -			 dma_addr_t *daddrs,
> -			 bool dirty);
>  #else
>  static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags)
>  {
>  	return -EOPNOTSUPP;
>  }
> -
> -static inline long hmm_range_dma_map(struct hmm_range *range,
> -				     struct device *device, dma_addr_t *daddrs,
> -				     unsigned int flags)
> -{
> -	return -EOPNOTSUPP;
> -}
> -
> -static inline long hmm_range_dma_unmap(struct hmm_range *range,
> -				       struct device *device,
> -				       dma_addr_t *daddrs, bool dirty)
> -{
> -	return -EOPNOTSUPP;
> -}
>  #endif
>  
>  /*
> diff --git a/mm/hmm.c b/mm/hmm.c
> index 9e9d3f4ea17c..ab883eefe847 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -698,150 +698,3 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags)
>  	return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
>  }
>  EXPORT_SYMBOL(hmm_range_fault);
> -
> -/**
> - * hmm_range_dma_map - hmm_range_fault() and dma map page all in one.
> - * @range:	range being faulted
> - * @device:	device to map page to
> - * @daddrs:	array of dma addresses for the mapped pages
> - * @flags:	HMM_FAULT_*
> - *
> - * Return: the number of pages mapped on success (including zero), or any
> - * status return from hmm_range_fault() otherwise.
> - */
> -long hmm_range_dma_map(struct hmm_range *range, struct device *device,
> -		dma_addr_t *daddrs, unsigned int flags)
> -{
> -	unsigned long i, npages, mapped;
> -	long ret;
> -
> -	ret = hmm_range_fault(range, flags);
> -	if (ret <= 0)
> -		return ret ? ret : -EBUSY;
> -
> -	npages = (range->end - range->start) >> PAGE_SHIFT;
> -	for (i = 0, mapped = 0; i < npages; ++i) {
> -		enum dma_data_direction dir = DMA_TO_DEVICE;
> -		struct page *page;
> -
> -		/*
> -		 * FIXME need to update DMA API to provide invalid DMA address
> -		 * value instead of a function to test dma address value. This
> -		 * would remove lot of dumb code duplicated accross many arch.
> -		 *
> -		 * For now setting it to 0 here is good enough as the pfns[]
> -		 * value is what is use to check what is valid and what isn't.
> -		 */
> -		daddrs[i] = 0;
> -
> -		page = hmm_device_entry_to_page(range, range->pfns[i]);
> -		if (page == NULL)
> -			continue;
> -
> -		/* Check if range is being invalidated */
> -		if (mmu_range_check_retry(range->notifier,
> -					  range->notifier_seq)) {
> -			ret = -EBUSY;
> -			goto unmap;
> -		}
> -
> -		/* If it is read and write than map bi-directional. */
> -		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
> -			dir = DMA_BIDIRECTIONAL;
> -
> -		daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
> -		if (dma_mapping_error(device, daddrs[i])) {
> -			ret = -EFAULT;
> -			goto unmap;
> -		}
> -
> -		mapped++;
> -	}
> -
> -	return mapped;
> -
> -unmap:
> -	for (npages = i, i = 0; (i < npages) && mapped; ++i) {
> -		enum dma_data_direction dir = DMA_TO_DEVICE;
> -		struct page *page;
> -
> -		page = hmm_device_entry_to_page(range, range->pfns[i]);
> -		if (page == NULL)
> -			continue;
> -
> -		if (dma_mapping_error(device, daddrs[i]))
> -			continue;
> -
> -		/* If it is read and write than map bi-directional. */
> -		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
> -			dir = DMA_BIDIRECTIONAL;
> -
> -		dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
> -		mapped--;
> -	}
> -
> -	return ret;
> -}
> -EXPORT_SYMBOL(hmm_range_dma_map);
> -
> -/**
> - * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
> - * @range: range being unmapped
> - * @device: device against which dma map was done
> - * @daddrs: dma address of mapped pages
> - * @dirty: dirty page if it had the write flag set
> - * Return: number of page unmapped on success, -EINVAL otherwise
> - *
> - * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
> - * to the sync_cpu_device_pagetables() callback so that it is safe here to
> - * call set_page_dirty(). Caller must also take appropriate locks to avoid
> - * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
> - */
> -long hmm_range_dma_unmap(struct hmm_range *range,
> -			 struct device *device,
> -			 dma_addr_t *daddrs,
> -			 bool dirty)
> -{
> -	unsigned long i, npages;
> -	long cpages = 0;
> -
> -	/* Sanity check. */
> -	if (range->end <= range->start)
> -		return -EINVAL;
> -	if (!daddrs)
> -		return -EINVAL;
> -	if (!range->pfns)
> -		return -EINVAL;
> -
> -	npages = (range->end - range->start) >> PAGE_SHIFT;
> -	for (i = 0; i < npages; ++i) {
> -		enum dma_data_direction dir = DMA_TO_DEVICE;
> -		struct page *page;
> -
> -		page = hmm_device_entry_to_page(range, range->pfns[i]);
> -		if (page == NULL)
> -			continue;
> -
> -		/* If it is read and write than map bi-directional. */
> -		if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
> -			dir = DMA_BIDIRECTIONAL;
> -
> -			/*
> -			 * See comments in function description on why it is
> -			 * safe here to call set_page_dirty()
> -			 */
> -			if (dirty)
> -				set_page_dirty(page);
> -		}
> -
> -		/* Unmap and clear pfns/dma address */
> -		dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
> -		range->pfns[i] = range->values[HMM_PFN_NONE];
> -		/* FIXME see comments in hmm_vma_dma_map() */
> -		daddrs[i] = 0;
> -		cpages++;
> -	}
> -
> -	return cpages;
> -}
> -EXPORT_SYMBOL(hmm_range_dma_unmap);
> -- 
> 2.20.1
>
John Hubbard Nov. 15, 2019, 6:18 a.m. UTC | #2
On 11/13/19 5:45 AM, Christoph Hellwig wrote:
> These two functions have never been used since they were added.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  include/linux/hmm.h |  23 -------
>  mm/hmm.c            | 147 --------------------------------------------
>  2 files changed, 170 deletions(-)

Reviewed-by: John Hubbard <jhubbard@nvidia.com>

Also +CC Ralph just for another look. But I know we're not using these 
routines.


thanks,
Jason Gunthorpe Nov. 19, 2019, 6:49 p.m. UTC | #3
On Wed, Nov 13, 2019 at 01:49:43PM -0500, Jerome Glisse wrote:
> On Wed, Nov 13, 2019 at 02:45:28PM +0100, Christoph Hellwig wrote:
> > These two functions have never been used since they were added.
> 
> The mlx5 convertion (which has been posted few times now) uses them
> dunno what Jason plans is on that front.

IMHO if ODP is going to be the only user then we should just keep the
existing ODP code.

Lets drop them until someone can come up with a series to migrate
several drivers.. It is small so it would not be hard to bring them
back if needed.

So applied to hmm.git

Thanks,
Jason
diff mbox series

Patch

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index b4af51735232..922c8d015cdb 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -230,34 +230,11 @@  static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
  * Please see Documentation/vm/hmm.rst for how to use the range API.
  */
 long hmm_range_fault(struct hmm_range *range, unsigned int flags);
-
-long hmm_range_dma_map(struct hmm_range *range,
-		       struct device *device,
-		       dma_addr_t *daddrs,
-		       unsigned int flags);
-long hmm_range_dma_unmap(struct hmm_range *range,
-			 struct device *device,
-			 dma_addr_t *daddrs,
-			 bool dirty);
 #else
 static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags)
 {
 	return -EOPNOTSUPP;
 }
-
-static inline long hmm_range_dma_map(struct hmm_range *range,
-				     struct device *device, dma_addr_t *daddrs,
-				     unsigned int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline long hmm_range_dma_unmap(struct hmm_range *range,
-				       struct device *device,
-				       dma_addr_t *daddrs, bool dirty)
-{
-	return -EOPNOTSUPP;
-}
 #endif
 
 /*
diff --git a/mm/hmm.c b/mm/hmm.c
index 9e9d3f4ea17c..ab883eefe847 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -698,150 +698,3 @@  long hmm_range_fault(struct hmm_range *range, unsigned int flags)
 	return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
 }
 EXPORT_SYMBOL(hmm_range_fault);
-
-/**
- * hmm_range_dma_map - hmm_range_fault() and dma map page all in one.
- * @range:	range being faulted
- * @device:	device to map page to
- * @daddrs:	array of dma addresses for the mapped pages
- * @flags:	HMM_FAULT_*
- *
- * Return: the number of pages mapped on success (including zero), or any
- * status return from hmm_range_fault() otherwise.
- */
-long hmm_range_dma_map(struct hmm_range *range, struct device *device,
-		dma_addr_t *daddrs, unsigned int flags)
-{
-	unsigned long i, npages, mapped;
-	long ret;
-
-	ret = hmm_range_fault(range, flags);
-	if (ret <= 0)
-		return ret ? ret : -EBUSY;
-
-	npages = (range->end - range->start) >> PAGE_SHIFT;
-	for (i = 0, mapped = 0; i < npages; ++i) {
-		enum dma_data_direction dir = DMA_TO_DEVICE;
-		struct page *page;
-
-		/*
-		 * FIXME need to update DMA API to provide invalid DMA address
-		 * value instead of a function to test dma address value. This
-		 * would remove lot of dumb code duplicated accross many arch.
-		 *
-		 * For now setting it to 0 here is good enough as the pfns[]
-		 * value is what is use to check what is valid and what isn't.
-		 */
-		daddrs[i] = 0;
-
-		page = hmm_device_entry_to_page(range, range->pfns[i]);
-		if (page == NULL)
-			continue;
-
-		/* Check if range is being invalidated */
-		if (mmu_range_check_retry(range->notifier,
-					  range->notifier_seq)) {
-			ret = -EBUSY;
-			goto unmap;
-		}
-
-		/* If it is read and write than map bi-directional. */
-		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
-			dir = DMA_BIDIRECTIONAL;
-
-		daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
-		if (dma_mapping_error(device, daddrs[i])) {
-			ret = -EFAULT;
-			goto unmap;
-		}
-
-		mapped++;
-	}
-
-	return mapped;
-
-unmap:
-	for (npages = i, i = 0; (i < npages) && mapped; ++i) {
-		enum dma_data_direction dir = DMA_TO_DEVICE;
-		struct page *page;
-
-		page = hmm_device_entry_to_page(range, range->pfns[i]);
-		if (page == NULL)
-			continue;
-
-		if (dma_mapping_error(device, daddrs[i]))
-			continue;
-
-		/* If it is read and write than map bi-directional. */
-		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
-			dir = DMA_BIDIRECTIONAL;
-
-		dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
-		mapped--;
-	}
-
-	return ret;
-}
-EXPORT_SYMBOL(hmm_range_dma_map);
-
-/**
- * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
- * @range: range being unmapped
- * @device: device against which dma map was done
- * @daddrs: dma address of mapped pages
- * @dirty: dirty page if it had the write flag set
- * Return: number of page unmapped on success, -EINVAL otherwise
- *
- * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
- * to the sync_cpu_device_pagetables() callback so that it is safe here to
- * call set_page_dirty(). Caller must also take appropriate locks to avoid
- * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
- */
-long hmm_range_dma_unmap(struct hmm_range *range,
-			 struct device *device,
-			 dma_addr_t *daddrs,
-			 bool dirty)
-{
-	unsigned long i, npages;
-	long cpages = 0;
-
-	/* Sanity check. */
-	if (range->end <= range->start)
-		return -EINVAL;
-	if (!daddrs)
-		return -EINVAL;
-	if (!range->pfns)
-		return -EINVAL;
-
-	npages = (range->end - range->start) >> PAGE_SHIFT;
-	for (i = 0; i < npages; ++i) {
-		enum dma_data_direction dir = DMA_TO_DEVICE;
-		struct page *page;
-
-		page = hmm_device_entry_to_page(range, range->pfns[i]);
-		if (page == NULL)
-			continue;
-
-		/* If it is read and write than map bi-directional. */
-		if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
-			dir = DMA_BIDIRECTIONAL;
-
-			/*
-			 * See comments in function description on why it is
-			 * safe here to call set_page_dirty()
-			 */
-			if (dirty)
-				set_page_dirty(page);
-		}
-
-		/* Unmap and clear pfns/dma address */
-		dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
-		range->pfns[i] = range->values[HMM_PFN_NONE];
-		/* FIXME see comments in hmm_vma_dma_map() */
-		daddrs[i] = 0;
-		cpages++;
-	}
-
-	return cpages;
-}
-EXPORT_SYMBOL(hmm_range_dma_unmap);