diff mbox series

[3/4] dma-buf: dma-buf: stop mapping sg_tables on attach

Message ID 20250211163109.12200-4-christian.koenig@amd.com (mailing list archive)
State New
Headers show
Series [1/4] dma-buf: fix incorrect dma-fence documentation | expand

Commit Message

Christian König Feb. 11, 2025, 4:31 p.m. UTC
As a workaround to smoothly transit from static to dynamic DMA-buf
handling we cached the sg_table on attach if dynamic handling mismatched
between exporter and importer.

Since Dmitry and Thomas cleaned that up and also documented the lock
handling we can drop this workaround now.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-buf.c | 149 ++++++++++++++------------------------
 include/linux/dma-buf.h   |  14 ----
 2 files changed, 56 insertions(+), 107 deletions(-)

Comments

Simona Vetter Feb. 17, 2025, 4:24 p.m. UTC | #1
On Tue, Feb 11, 2025 at 05:31:08PM +0100, Christian König wrote:
> As a workaround to smoothly transit from static to dynamic DMA-buf
> handling we cached the sg_table on attach if dynamic handling mismatched
> between exporter and importer.
> 
> Since Dmitry and Thomas cleaned that up and also documented the lock
> handling we can drop this workaround now.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-buf.c | 149 ++++++++++++++------------------------
>  include/linux/dma-buf.h   |  14 ----
>  2 files changed, 56 insertions(+), 107 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 5baa83b85515..357b94a3dbaa 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -782,7 +782,7 @@ static void mangle_sg_table(struct sg_table *sg_table)
>  
>  	/* To catch abuse of the underlying struct page by importers mix
>  	 * up the bits, but take care to preserve the low SG_ bits to
> -	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
> +	 * not corrupt the sgt. The mixing is undone on unmap
>  	 * before passing the sgt back to the exporter.
>  	 */
>  	for_each_sgtable_sg(sg_table, sg, i)
> @@ -790,29 +790,20 @@ static void mangle_sg_table(struct sg_table *sg_table)
>  #endif
>  
>  }
> -static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
> -				       enum dma_data_direction direction)
> -{
> -	struct sg_table *sg_table;
> -	signed long ret;
> -
> -	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> -	if (IS_ERR_OR_NULL(sg_table))
> -		return sg_table;
> -
> -	if (!dma_buf_attachment_is_dynamic(attach)) {
> -		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
> -					    DMA_RESV_USAGE_KERNEL, true,
> -					    MAX_SCHEDULE_TIMEOUT);
> -		if (ret < 0) {
> -			attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
> -							   direction);
> -			return ERR_PTR(ret);
> -		}
> -	}
>  
> -	mangle_sg_table(sg_table);
> -	return sg_table;
> +/**
> + * dma_buf_pin_on_map - check if a DMA-buf should be pinned when mapped
> + * @attach: the DMA-buf attachment to check

Generally we don't do kerneldoc for static helper functions, the function
name should be clear enough here. I think you can just delete this.

> + *
> + * Returns: True if a DMA-buf export provided pin/unpin callbacks and we can't
> + * use the importers move notify for some reason.
> + */
> +static bool
> +dma_buf_pin_on_map(struct dma_buf_attachment *attach)
> +{
> +	return attach->dmabuf->ops->pin &&
> +		(!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) ||
> +		 !attach->importer_ops);

I think moving the dma_buf_attachment_is_dynamic helper into this file
right above as a static inline helper without kerneldoc would be good,
just as a piece of self-documentation of what this check here means. It's
a bit opaque otherwise, and if we add more importer_ops we might screw
this up otherwise.

>  }
>  
>  /**
> @@ -935,48 +926,11 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
>  	list_add(&attach->node, &dmabuf->attachments);
>  	dma_resv_unlock(dmabuf->resv);
>  
> -	/* When either the importer or the exporter can't handle dynamic
> -	 * mappings we cache the mapping here to avoid issues with the
> -	 * reservation object lock.
> -	 */
> -	if (dma_buf_attachment_is_dynamic(attach) !=
> -	    dma_buf_is_dynamic(dmabuf)) {
> -		struct sg_table *sgt;
> -
> -		dma_resv_lock(attach->dmabuf->resv, NULL);
> -		if (dma_buf_is_dynamic(attach->dmabuf)) {
> -			ret = dmabuf->ops->pin(attach);
> -			if (ret)
> -				goto err_unlock;
> -		}
> -
> -		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
> -		if (!sgt)
> -			sgt = ERR_PTR(-ENOMEM);
> -		if (IS_ERR(sgt)) {
> -			ret = PTR_ERR(sgt);
> -			goto err_unpin;
> -		}
> -		dma_resv_unlock(attach->dmabuf->resv);
> -		attach->sgt = sgt;
> -		attach->dir = DMA_BIDIRECTIONAL;
> -	}
> -
>  	return attach;
>  
>  err_attach:
>  	kfree(attach);
>  	return ERR_PTR(ret);
> -
> -err_unpin:
> -	if (dma_buf_is_dynamic(attach->dmabuf))
> -		dmabuf->ops->unpin(attach);
> -
> -err_unlock:
> -	dma_resv_unlock(attach->dmabuf->resv);
> -
> -	dma_buf_detach(dmabuf, attach);
> -	return ERR_PTR(ret);
>  }
>  EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
>  
> @@ -995,16 +949,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
>  }
>  EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
>  
> -static void __unmap_dma_buf(struct dma_buf_attachment *attach,
> -			    struct sg_table *sg_table,
> -			    enum dma_data_direction direction)
> -{
> -	/* uses XOR, hence this unmangles */
> -	mangle_sg_table(sg_table);
> -
> -	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> -}
> -
>  /**
>   * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
>   * @dmabuf:	[in]	buffer to detach from.
> @@ -1022,11 +966,12 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
>  	dma_resv_lock(dmabuf->resv, NULL);
>  
>  	if (attach->sgt) {
> +		mangle_sg_table(attach->sgt);
> +		attach->dmabuf->ops->unmap_dma_buf(attach, attach->sgt,
> +						   attach->dir);
>  
> -		__unmap_dma_buf(attach, attach->sgt, attach->dir);
> -
> -		if (dma_buf_is_dynamic(attach->dmabuf))
> -			dmabuf->ops->unpin(attach);
> +		if (dma_buf_pin_on_map(attach))
> +			dma_buf_unpin(attach);
>  	}
>  	list_del(&attach->node);
>  
> @@ -1058,7 +1003,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
>  	struct dma_buf *dmabuf = attach->dmabuf;
>  	int ret = 0;
>  
> -	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
> +	WARN_ON(!attach->importer_ops);
>  
>  	dma_resv_assert_held(dmabuf->resv);
>  
> @@ -1081,7 +1026,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
>  {
>  	struct dma_buf *dmabuf = attach->dmabuf;
>  
> -	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
> +	WARN_ON(!attach->importer_ops);
>  
>  	dma_resv_assert_held(dmabuf->resv);
>  
> @@ -1115,7 +1060,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
>  					enum dma_data_direction direction)
>  {
>  	struct sg_table *sg_table;
> -	int r;
> +	signed long ret;
>  
>  	might_sleep();
>  
> @@ -1136,29 +1081,37 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
>  		return attach->sgt;
>  	}
>  
> -	if (dma_buf_is_dynamic(attach->dmabuf)) {
> -		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
> -			r = attach->dmabuf->ops->pin(attach);
> -			if (r)
> -				return ERR_PTR(r);
> -		}
> +	if (dma_buf_pin_on_map(attach)) {
> +		ret = attach->dmabuf->ops->pin(attach);
> +		if (ret)

I do wonder whether we should have a WARN_ON(ret = -EBUSY) or similar, to
catch drivers who've moved the memory to an unsuitable place despite
attachments existing?

> +			return ERR_PTR(ret);
>  	}
>  
> -	sg_table = __map_dma_buf(attach, direction);

> +	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
>  	if (!sg_table)
>  		sg_table = ERR_PTR(-ENOMEM);
> +	if (IS_ERR(sg_table))
> +		goto error_unpin;
>  
> -	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
> -	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
> -		attach->dmabuf->ops->unpin(attach);
> +	/*
> +	 * When not providing ops the importer doesn't wait for fences either.
> +	 */
> +	if (!attach->importer_ops) {

Yeah we definitely want to keep this static helper function to make this
check less opaque. Also I think this is strictly speaking only needed for
the dma_buf_pin_on_map case and not for everyone, plus there shouldn't be
any harm to do this after pinning, but before calling map_dma_buf. But
maybe better to leave as-is.

> +		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
> +					    DMA_RESV_USAGE_KERNEL, true,
> +					    MAX_SCHEDULE_TIMEOUT);
> +		if (ret < 0)
> +			goto error_unmap;
> +	}
> +	mangle_sg_table(sg_table);
>  
> -	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
> +	if (attach->dmabuf->ops->cache_sgt_mapping) {
>  		attach->sgt = sg_table;
>  		attach->dir = direction;
>  	}
>  
>  #ifdef CONFIG_DMA_API_DEBUG
> -	if (!IS_ERR(sg_table)) {
> +	{
>  		struct scatterlist *sg;
>  		u64 addr;
>  		int len;
> @@ -1175,6 +1128,16 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
>  	}
>  #endif /* CONFIG_DMA_API_DEBUG */
>  	return sg_table;
> +
> +error_unmap:
> +	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> +	sg_table = ERR_PTR(ret);
> +
> +error_unpin:
> +	if (dma_buf_pin_on_map(attach))
> +		attach->dmabuf->ops->unpin(attach);
> +
> +	return sg_table;
>  }
>  EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
>  
> @@ -1230,11 +1193,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
>  	if (attach->sgt == sg_table)
>  		return;
>  
> -	__unmap_dma_buf(attach, sg_table, direction);
> +	mangle_sg_table(sg_table);
> +	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
>  
> -	if (dma_buf_is_dynamic(attach->dmabuf) &&
> -	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
> -		dma_buf_unpin(attach);
> +	if (dma_buf_pin_on_map(attach))
> +		attach->dmabuf->ops->unpin(attach);
>  }
>  EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
>  
> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> index 36216d28d8bd..c54ff2dda8cb 100644
> --- a/include/linux/dma-buf.h
> +++ b/include/linux/dma-buf.h
> @@ -583,20 +583,6 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
>  	return !!dmabuf->ops->pin;
>  }
>  
> -/**
> - * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
> - * mappings
> - * @attach: the DMA-buf attachment to check
> - *
> - * Returns true if a DMA-buf importer wants to call the map/unmap functions with
> - * the dma_resv lock held.

Yeah this kerneldoc is a bit much outdated :-)

> - */
> -static inline bool
> -dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
> -{
> -	return !!attach->importer_ops;
> -}

With the nits addressed:

Reviewed-by: Simona Vetter <simona.vetter@ffwll.ch>

Cheers, Sima


> -
>  struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
>  					  struct device *dev);
>  struct dma_buf_attachment *
> -- 
> 2.34.1
>
Dmitry Osipenko Feb. 17, 2025, 5:13 p.m. UTC | #2
On 2/11/25 19:31, Christian König wrote:
> As a workaround to smoothly transit from static to dynamic DMA-buf
> handling we cached the sg_table on attach if dynamic handling mismatched
> between exporter and importer.
> 
> Since Dmitry and Thomas cleaned that up and also documented the lock
> handling we can drop this workaround now.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-buf.c | 149 ++++++++++++++------------------------
>  include/linux/dma-buf.h   |  14 ----
>  2 files changed, 56 insertions(+), 107 deletions(-)

Looks good, as long as IGT passes.

Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
diff mbox series

Patch

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5baa83b85515..357b94a3dbaa 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -782,7 +782,7 @@  static void mangle_sg_table(struct sg_table *sg_table)
 
 	/* To catch abuse of the underlying struct page by importers mix
 	 * up the bits, but take care to preserve the low SG_ bits to
-	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+	 * not corrupt the sgt. The mixing is undone on unmap
 	 * before passing the sgt back to the exporter.
 	 */
 	for_each_sgtable_sg(sg_table, sg, i)
@@ -790,29 +790,20 @@  static void mangle_sg_table(struct sg_table *sg_table)
 #endif
 
 }
-static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
-				       enum dma_data_direction direction)
-{
-	struct sg_table *sg_table;
-	signed long ret;
-
-	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
-	if (IS_ERR_OR_NULL(sg_table))
-		return sg_table;
-
-	if (!dma_buf_attachment_is_dynamic(attach)) {
-		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
-					    DMA_RESV_USAGE_KERNEL, true,
-					    MAX_SCHEDULE_TIMEOUT);
-		if (ret < 0) {
-			attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
-							   direction);
-			return ERR_PTR(ret);
-		}
-	}
 
-	mangle_sg_table(sg_table);
-	return sg_table;
+/**
+ * dma_buf_pin_on_map - check if a DMA-buf should be pinned when mapped
+ * @attach: the DMA-buf attachment to check
+ *
+ * Returns: True if a DMA-buf export provided pin/unpin callbacks and we can't
+ * use the importers move notify for some reason.
+ */
+static bool
+dma_buf_pin_on_map(struct dma_buf_attachment *attach)
+{
+	return attach->dmabuf->ops->pin &&
+		(!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) ||
+		 !attach->importer_ops);
 }
 
 /**
@@ -935,48 +926,11 @@  dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 	list_add(&attach->node, &dmabuf->attachments);
 	dma_resv_unlock(dmabuf->resv);
 
-	/* When either the importer or the exporter can't handle dynamic
-	 * mappings we cache the mapping here to avoid issues with the
-	 * reservation object lock.
-	 */
-	if (dma_buf_attachment_is_dynamic(attach) !=
-	    dma_buf_is_dynamic(dmabuf)) {
-		struct sg_table *sgt;
-
-		dma_resv_lock(attach->dmabuf->resv, NULL);
-		if (dma_buf_is_dynamic(attach->dmabuf)) {
-			ret = dmabuf->ops->pin(attach);
-			if (ret)
-				goto err_unlock;
-		}
-
-		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
-		if (!sgt)
-			sgt = ERR_PTR(-ENOMEM);
-		if (IS_ERR(sgt)) {
-			ret = PTR_ERR(sgt);
-			goto err_unpin;
-		}
-		dma_resv_unlock(attach->dmabuf->resv);
-		attach->sgt = sgt;
-		attach->dir = DMA_BIDIRECTIONAL;
-	}
-
 	return attach;
 
 err_attach:
 	kfree(attach);
 	return ERR_PTR(ret);
-
-err_unpin:
-	if (dma_buf_is_dynamic(attach->dmabuf))
-		dmabuf->ops->unpin(attach);
-
-err_unlock:
-	dma_resv_unlock(attach->dmabuf->resv);
-
-	dma_buf_detach(dmabuf, attach);
-	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
 
@@ -995,16 +949,6 @@  struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
 
-static void __unmap_dma_buf(struct dma_buf_attachment *attach,
-			    struct sg_table *sg_table,
-			    enum dma_data_direction direction)
-{
-	/* uses XOR, hence this unmangles */
-	mangle_sg_table(sg_table);
-
-	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
-}
-
 /**
  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
  * @dmabuf:	[in]	buffer to detach from.
@@ -1022,11 +966,12 @@  void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 	dma_resv_lock(dmabuf->resv, NULL);
 
 	if (attach->sgt) {
+		mangle_sg_table(attach->sgt);
+		attach->dmabuf->ops->unmap_dma_buf(attach, attach->sgt,
+						   attach->dir);
 
-		__unmap_dma_buf(attach, attach->sgt, attach->dir);
-
-		if (dma_buf_is_dynamic(attach->dmabuf))
-			dmabuf->ops->unpin(attach);
+		if (dma_buf_pin_on_map(attach))
+			dma_buf_unpin(attach);
 	}
 	list_del(&attach->node);
 
@@ -1058,7 +1003,7 @@  int dma_buf_pin(struct dma_buf_attachment *attach)
 	struct dma_buf *dmabuf = attach->dmabuf;
 	int ret = 0;
 
-	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+	WARN_ON(!attach->importer_ops);
 
 	dma_resv_assert_held(dmabuf->resv);
 
@@ -1081,7 +1026,7 @@  void dma_buf_unpin(struct dma_buf_attachment *attach)
 {
 	struct dma_buf *dmabuf = attach->dmabuf;
 
-	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+	WARN_ON(!attach->importer_ops);
 
 	dma_resv_assert_held(dmabuf->resv);
 
@@ -1115,7 +1060,7 @@  struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 					enum dma_data_direction direction)
 {
 	struct sg_table *sg_table;
-	int r;
+	signed long ret;
 
 	might_sleep();
 
@@ -1136,29 +1081,37 @@  struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 		return attach->sgt;
 	}
 
-	if (dma_buf_is_dynamic(attach->dmabuf)) {
-		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
-			r = attach->dmabuf->ops->pin(attach);
-			if (r)
-				return ERR_PTR(r);
-		}
+	if (dma_buf_pin_on_map(attach)) {
+		ret = attach->dmabuf->ops->pin(attach);
+		if (ret)
+			return ERR_PTR(ret);
 	}
 
-	sg_table = __map_dma_buf(attach, direction);
+	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
 	if (!sg_table)
 		sg_table = ERR_PTR(-ENOMEM);
+	if (IS_ERR(sg_table))
+		goto error_unpin;
 
-	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
-	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
-		attach->dmabuf->ops->unpin(attach);
+	/*
+	 * When not providing ops the importer doesn't wait for fences either.
+	 */
+	if (!attach->importer_ops) {
+		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+					    DMA_RESV_USAGE_KERNEL, true,
+					    MAX_SCHEDULE_TIMEOUT);
+		if (ret < 0)
+			goto error_unmap;
+	}
+	mangle_sg_table(sg_table);
 
-	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
+	if (attach->dmabuf->ops->cache_sgt_mapping) {
 		attach->sgt = sg_table;
 		attach->dir = direction;
 	}
 
 #ifdef CONFIG_DMA_API_DEBUG
-	if (!IS_ERR(sg_table)) {
+	{
 		struct scatterlist *sg;
 		u64 addr;
 		int len;
@@ -1175,6 +1128,16 @@  struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 	}
 #endif /* CONFIG_DMA_API_DEBUG */
 	return sg_table;
+
+error_unmap:
+	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+	sg_table = ERR_PTR(ret);
+
+error_unpin:
+	if (dma_buf_pin_on_map(attach))
+		attach->dmabuf->ops->unpin(attach);
+
+	return sg_table;
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
 
@@ -1230,11 +1193,11 @@  void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 	if (attach->sgt == sg_table)
 		return;
 
-	__unmap_dma_buf(attach, sg_table, direction);
+	mangle_sg_table(sg_table);
+	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 
-	if (dma_buf_is_dynamic(attach->dmabuf) &&
-	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
-		dma_buf_unpin(attach);
+	if (dma_buf_pin_on_map(attach))
+		attach->dmabuf->ops->unpin(attach);
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
 
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 36216d28d8bd..c54ff2dda8cb 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -583,20 +583,6 @@  static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
 	return !!dmabuf->ops->pin;
 }
 
-/**
- * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
- * mappings
- * @attach: the DMA-buf attachment to check
- *
- * Returns true if a DMA-buf importer wants to call the map/unmap functions with
- * the dma_resv lock held.
- */
-static inline bool
-dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
-{
-	return !!attach->importer_ops;
-}
-
 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 					  struct device *dev);
 struct dma_buf_attachment *