diff mbox series

[net-next,v3,2/7] dma: avoid redundant calls for sync operations

Message ID 20240214162201.4168778-3-aleksander.lobakin@intel.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series dma: skip calling no-op sync ops when possible | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 14936 this patch: 14936
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 1 maintainers not CCed: james@equiv.tech
netdev/build_clang success Errors and warnings before: 3113 this patch: 3113
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 16173 this patch: 16173
netdev/checkpatch warning CHECK: Please use a blank line after function/struct/union/enum declarations
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 2 this patch: 2
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-02-15--00-00 (tests: 1443)

Commit Message

Alexander Lobakin Feb. 14, 2024, 4:21 p.m. UTC
Quite often, devices do not need dma_sync operations on x86_64 at least.
Indeed, when dev_is_dma_coherent(dev) is true and
dev_use_swiotlb(dev) is false, iommu_dma_sync_single_for_cpu()
and friends do nothing.

However, indirectly calling them when CONFIG_RETPOLINE=y consumes about
10% of cycles on a cpu receiving packets from softirq at ~100Gbit rate.
Even if/when CONFIG_RETPOLINE is not set, there is a cost of about 3%.

Add dev->skip_dma_sync boolean which is set during the device
initialization depending on the setup: dev_is_dma_coherent() for the
direct DMA, !(sync_single_for_device || sync_single_for_cpu) or the new
dma_map_ops flag, %DMA_F_CAN_SKIP_SYNC, advertised for non-NULL DMA ops.
Then later, if/when swiotlb is used for the first time, the flag
is turned off, from swiotlb_tbl_map_single().

On iavf, the UDP trafficgen with XDP_DROP in skb mode test shows
+3-5% increase for direct DMA.

Suggested-by: Christoph Hellwig <hch@lst.de> # direct DMA shortcut
Co-developed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
---
 include/linux/device.h      |  5 +++++
 include/linux/dma-map-ops.h | 21 ++++++++++++++++++++
 include/linux/dma-mapping.h |  6 +++++-
 drivers/base/dd.c           |  2 ++
 kernel/dma/mapping.c        | 39 ++++++++++++++++++++++++++++++++++++-
 kernel/dma/swiotlb.c        |  8 ++++++++
 6 files changed, 79 insertions(+), 2 deletions(-)

Comments

Robin Murphy Feb. 14, 2024, 5:55 p.m. UTC | #1
On 2024-02-14 4:21 pm, Alexander Lobakin wrote:
> Quite often, devices do not need dma_sync operations on x86_64 at least.
> Indeed, when dev_is_dma_coherent(dev) is true and
> dev_use_swiotlb(dev) is false, iommu_dma_sync_single_for_cpu()
> and friends do nothing.
> 
> However, indirectly calling them when CONFIG_RETPOLINE=y consumes about
> 10% of cycles on a cpu receiving packets from softirq at ~100Gbit rate.
> Even if/when CONFIG_RETPOLINE is not set, there is a cost of about 3%.
> 
> Add dev->skip_dma_sync boolean which is set during the device
> initialization depending on the setup: dev_is_dma_coherent() for the
> direct DMA, !(sync_single_for_device || sync_single_for_cpu) or the new
> dma_map_ops flag, %DMA_F_CAN_SKIP_SYNC, advertised for non-NULL DMA ops.
> Then later, if/when swiotlb is used for the first time, the flag
> is turned off, from swiotlb_tbl_map_single().
> 
> On iavf, the UDP trafficgen with XDP_DROP in skb mode test shows
> +3-5% increase for direct DMA.
> 
> Suggested-by: Christoph Hellwig <hch@lst.de> # direct DMA shortcut
> Co-developed-by: Eric Dumazet <edumazet@google.com>
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
> ---
>   include/linux/device.h      |  5 +++++
>   include/linux/dma-map-ops.h | 21 ++++++++++++++++++++
>   include/linux/dma-mapping.h |  6 +++++-
>   drivers/base/dd.c           |  2 ++
>   kernel/dma/mapping.c        | 39 ++++++++++++++++++++++++++++++++++++-
>   kernel/dma/swiotlb.c        |  8 ++++++++
>   6 files changed, 79 insertions(+), 2 deletions(-)
> 
> diff --git a/include/linux/device.h b/include/linux/device.h
> index 97c4b046c09d..f23e6a32bea0 100644
> --- a/include/linux/device.h
> +++ b/include/linux/device.h
> @@ -686,6 +686,8 @@ struct device_physical_location {
>    *		other devices probe successfully.
>    * @dma_coherent: this particular device is dma coherent, even if the
>    *		architecture supports non-coherent devices.
> + * @dma_skip_sync: DMA sync operations can be skipped for coherent non-SWIOTLB
> + *		buffers.
>    * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
>    *		streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
>    *		and optionall (if the coherent mask is large enough) also
> @@ -800,6 +802,9 @@ struct device {
>       defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
>   	bool			dma_coherent:1;
>   #endif
> +#ifdef CONFIG_DMA_NEED_SYNC
> +	bool			dma_skip_sync:1;
> +#endif
>   #ifdef CONFIG_DMA_OPS_BYPASS
>   	bool			dma_ops_bypass : 1;
>   #endif
> diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
> index 4abc60f04209..327b73f653ad 100644
> --- a/include/linux/dma-map-ops.h
> +++ b/include/linux/dma-map-ops.h
> @@ -18,8 +18,11 @@ struct iommu_ops;
>    *
>    * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
>    * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
> + * DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is
> + * coherent and it's not an SWIOTLB buffer.
>    */
>   #define DMA_F_PCI_P2PDMA_SUPPORTED     (1 << 0)
> +#define DMA_F_CAN_SKIP_SYNC		BIT(1)

Yuck, please be consistent - either match the style of the existing 
code, or change that to BIT(0) as well.

>   struct dma_map_ops {
>   	unsigned int flags;
> @@ -111,6 +114,24 @@ static inline void set_dma_ops(struct device *dev,
>   }
>   #endif /* CONFIG_DMA_OPS */
>   
> +#ifdef CONFIG_DMA_NEED_SYNC
> +void dma_setup_skip_sync(struct device *dev);
> +
> +static inline void dma_clear_skip_sync(struct device *dev)
> +{
> +	/* Clear it only once so that the function can be called on hotpath */
> +	if (unlikely(dev->dma_skip_sync))
> +		dev->dma_skip_sync = false;
> +}
> +#else /* !CONFIG_DMA_NEED_SYNC */
> +static inline void dma_setup_skip_sync(struct device *dev)
> +{
> +}
> +static inline void dma_clear_skip_sync(struct device *dev)
> +{
> +}
> +#endif /* !CONFIG_DMA_NEED_SYNC */
> +
>   #ifdef CONFIG_DMA_CMA
>   extern struct cma *dma_contiguous_default_area;
>   
> diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
> index 6c7640441214..d85ae541c267 100644
> --- a/include/linux/dma-mapping.h
> +++ b/include/linux/dma-mapping.h
> @@ -364,7 +364,11 @@ static inline void __dma_sync_single_range_for_device(struct device *dev,
>   
>   static inline bool dma_skip_sync(const struct device *dev)
>   {
> -	return !IS_ENABLED(CONFIG_DMA_NEED_SYNC);
> +#ifdef CONFIG_DMA_NEED_SYNC
> +	return dev->dma_skip_sync;
> +#else
> +	return true;
> +#endif
>   }
>   
>   static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
> diff --git a/drivers/base/dd.c b/drivers/base/dd.c
> index 85152537dbf1..67ad3e1d51f6 100644
> --- a/drivers/base/dd.c
> +++ b/drivers/base/dd.c
> @@ -642,6 +642,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
>   			goto pinctrl_bind_failed;
>   	}
>   
> +	dma_setup_skip_sync(dev);
> +
>   	ret = driver_sysfs_add(dev);
>   	if (ret) {
>   		pr_err("%s: driver_sysfs_add(%s) failed\n",
> diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
> index 85feaa0e008c..5f588e31ea89 100644
> --- a/kernel/dma/mapping.c
> +++ b/kernel/dma/mapping.c
> @@ -846,8 +846,14 @@ bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr)
>   	const struct dma_map_ops *ops = get_dma_ops(dev);
>   
>   	if (dma_map_direct(dev, ops))
> +		/*
> +		 * dma_skip_sync could've been set to false on first SWIOTLB
> +		 * buffer mapping, but @dma_addr is not necessary an SWIOTLB
> +		 * buffer. In this case, fall back to more granular check.
> +		 */
>   		return dma_direct_need_sync(dev, dma_addr);
> -	return ops->sync_single_for_cpu || ops->sync_single_for_device;
> +
> +	return true;
>   }
>   EXPORT_SYMBOL_GPL(__dma_need_sync);
>   
> @@ -861,3 +867,34 @@ unsigned long dma_get_merge_boundary(struct device *dev)
>   	return ops->get_merge_boundary(dev);
>   }
>   EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
> +
> +#ifdef CONFIG_DMA_NEED_SYNC
> +void dma_setup_skip_sync(struct device *dev)
> +{
> +	const struct dma_map_ops *ops = get_dma_ops(dev);
> +
> +	if (dma_map_direct(dev, ops))

For DMA_OPS_BYPASS this will be making the decision based on the default 
dma_mask, but a driver could subsequently set a smaller mask for which 
the bypass condition will no longer be true.

Maybe instead of driver probe this setup should actually be tied in to 
dma_set_mask() anyway?

> +		/*
> +		 * dma_skip_sync will be set to false on first SWIOTLB buffer
> +		 * mapping, if any. During the device initialization, it's
> +		 * enough to check only for DMA coherence.
> +		 */
> +		dev->dma_skip_sync = dev_is_dma_coherent(dev);
> +	else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu)

I guess this was the existing condition from dma_need_sync(), but now 
it's on a one-off slow path it might be nice to check the sync_sg_* ops 
as well for completeness, or at least comment that nobody should be 
implementing those without also implementing the sync_single_* ops.

> +		/*
> +		 * Synchronization is not possible when none of DMA sync ops
> +		 * is set. This check precedes the below one as it disables
> +		 * the synchronization unconditionally.
> +		 */
> +		dev->dma_skip_sync = true;
> +	else if (ops->flags & DMA_F_CAN_SKIP_SYNC)

Personally I'd combine this into the dma-direct condition.

> +		/*
> +		 * Assume that when ``DMA_F_CAN_SKIP_SYNC`` is advertised,
> +		 * the conditions for synchronizing are the same as with
> +		 * the direct DMA.
> +		 */
> +		dev->dma_skip_sync = dev_is_dma_coherent(dev);
> +	else
> +		dev->dma_skip_sync = false;
> +}
> +#endif /* CONFIG_DMA_NEED_SYNC */
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index b079a9a8e087..0b737eab4d48 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -1323,6 +1323,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
>   		return (phys_addr_t)DMA_MAPPING_ERROR;
>   	}
>   
> +	/*
> +	 * If dma_skip_sync was set, reset it to false on first SWIOTLB buffer
> +	 * mapping to always sync SWIOTLB buffers.
> +	 */
> +	dma_clear_skip_sync(dev);
> +
>   	/*
>   	 * Save away the mapping from the original address to the DMA address.
>   	 * This is needed when we sync the memory.  Then we sync the buffer if
> @@ -1640,6 +1646,8 @@ struct page *swiotlb_alloc(struct device *dev, size_t size)
>   	if (index == -1)
>   		return NULL;
>   
> +	dma_clear_skip_sync(dev);

We don't need this here, since this isn't a streaming API path.

Thanks,
Robin.

> +
>   	tlb_addr = slot_addr(pool->start, index);
>   
>   	return pfn_to_page(PFN_DOWN(tlb_addr));
Christoph Hellwig Feb. 15, 2024, 5:08 a.m. UTC | #2
On Wed, Feb 14, 2024 at 05:55:23PM +0000, Robin Murphy wrote:
>>   #define DMA_F_PCI_P2PDMA_SUPPORTED     (1 << 0)
>> +#define DMA_F_CAN_SKIP_SYNC		BIT(1)
>
> Yuck, please be consistent - either match the style of the existing code, 
> or change that to BIT(0) as well.

Just don't use BIT() ever.  It doesn't save any typing and creates a
totally pointless mental indirection.

> I guess this was the existing condition from dma_need_sync(), but now it's 
> on a one-off slow path it might be nice to check the sync_sg_* ops as well 
> for completeness, or at least comment that nobody should be implementing 
> those without also implementing the sync_single_* ops.

Implementing only one and not the other doesn't make any sense.  Maybe
a debug check for that is ok, but thing will break badly if they aren't
in sync anyway.
Robin Murphy Feb. 15, 2024, 11:36 a.m. UTC | #3
On 15/02/2024 5:08 am, Christoph Hellwig wrote:
> On Wed, Feb 14, 2024 at 05:55:23PM +0000, Robin Murphy wrote:
>>>    #define DMA_F_PCI_P2PDMA_SUPPORTED     (1 << 0)
>>> +#define DMA_F_CAN_SKIP_SYNC		BIT(1)
>>
>> Yuck, please be consistent - either match the style of the existing code,
>> or change that to BIT(0) as well.
> 
> Just don't use BIT() ever.  It doesn't save any typing and creates a
> totally pointless mental indirection.
> 
>> I guess this was the existing condition from dma_need_sync(), but now it's
>> on a one-off slow path it might be nice to check the sync_sg_* ops as well
>> for completeness, or at least comment that nobody should be implementing
>> those without also implementing the sync_single_* ops.
> 
> Implementing only one and not the other doesn't make any sense.  Maybe
> a debug check for that is ok, but thing will break badly if they aren't
> in sync anyway.

In principle we *could* have an implementation which used bouncing 
purely to merge coherent scatterlist segments, thus didn't need to do 
anything for single mappings. I agree that it wouldn't seem like a 
particularly realistic thing to do these days, but I don't believe the 
API rules it out, so it might be nice to enforce that assumption 
somewhere if we are actually relying on it (although I also concur that 
this may not necessarily be the ideal place to do that in general).

Thanks,
Robin.
Alexander Lobakin Feb. 19, 2024, 12:49 p.m. UTC | #4
From: Robin Murphy <robin.murphy@arm.com>
Date: Wed, 14 Feb 2024 17:55:23 +0000

> On 2024-02-14 4:21 pm, Alexander Lobakin wrote:

[...]

>> +        /*
>> +         * Synchronization is not possible when none of DMA sync ops
>> +         * is set. This check precedes the below one as it disables
>> +         * the synchronization unconditionally.
>> +         */
>> +        dev->dma_skip_sync = true;
>> +    else if (ops->flags & DMA_F_CAN_SKIP_SYNC)
> 
> Personally I'd combine this into the dma-direct condition.

Please read the code comment a couple lines above :D

> 
>> +        /*
>> +         * Assume that when ``DMA_F_CAN_SKIP_SYNC`` is advertised,
>> +         * the conditions for synchronizing are the same as with
>> +         * the direct DMA.
>> +         */
>> +        dev->dma_skip_sync = dev_is_dma_coherent(dev);
>> +    else
>> +        dev->dma_skip_sync = false;
>> +}
>> +#endif /* CONFIG_DMA_NEED_SYNC */

[...]

Thanks,
Olek
Robin Murphy Feb. 26, 2024, 3:45 p.m. UTC | #5
On 19/02/2024 12:49 pm, Alexander Lobakin wrote:
> From: Robin Murphy <robin.murphy@arm.com>
> Date: Wed, 14 Feb 2024 17:55:23 +0000
> 
>> On 2024-02-14 4:21 pm, Alexander Lobakin wrote:
> 
> [...]
> 
>>> +        /*
>>> +         * Synchronization is not possible when none of DMA sync ops
>>> +         * is set. This check precedes the below one as it disables
>>> +         * the synchronization unconditionally.
>>> +         */
>>> +        dev->dma_skip_sync = true;
>>> +    else if (ops->flags & DMA_F_CAN_SKIP_SYNC)
>>
>> Personally I'd combine this into the dma-direct condition.
> 
> Please read the code comment a couple lines above :D

And my point is that that logic is not actually useful, since it would 
be nonsensical for ops to set DMA_F_CAN_SKIP_SYNC if they don't even 
implement sync ops anyway.

If the intent of DMA_F_CAN_SKIP_SYNC is to mean "behaves like 
dma-direct", then "if (dma_map_direct(...) || ops->flags & 
DMA_F_CAN_SKIP_SYNC)" is an entirely logical and expected condition.

Thanks,
Robin.
diff mbox series

Patch

diff --git a/include/linux/device.h b/include/linux/device.h
index 97c4b046c09d..f23e6a32bea0 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -686,6 +686,8 @@  struct device_physical_location {
  *		other devices probe successfully.
  * @dma_coherent: this particular device is dma coherent, even if the
  *		architecture supports non-coherent devices.
+ * @dma_skip_sync: DMA sync operations can be skipped for coherent non-SWIOTLB
+ *		buffers.
  * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
  *		streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
  *		and optionall (if the coherent mask is large enough) also
@@ -800,6 +802,9 @@  struct device {
     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
 	bool			dma_coherent:1;
 #endif
+#ifdef CONFIG_DMA_NEED_SYNC
+	bool			dma_skip_sync:1;
+#endif
 #ifdef CONFIG_DMA_OPS_BYPASS
 	bool			dma_ops_bypass : 1;
 #endif
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 4abc60f04209..327b73f653ad 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -18,8 +18,11 @@  struct iommu_ops;
  *
  * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
  * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
+ * DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is
+ * coherent and it's not an SWIOTLB buffer.
  */
 #define DMA_F_PCI_P2PDMA_SUPPORTED     (1 << 0)
+#define DMA_F_CAN_SKIP_SYNC		BIT(1)
 
 struct dma_map_ops {
 	unsigned int flags;
@@ -111,6 +114,24 @@  static inline void set_dma_ops(struct device *dev,
 }
 #endif /* CONFIG_DMA_OPS */
 
+#ifdef CONFIG_DMA_NEED_SYNC
+void dma_setup_skip_sync(struct device *dev);
+
+static inline void dma_clear_skip_sync(struct device *dev)
+{
+	/* Clear it only once so that the function can be called on hotpath */
+	if (unlikely(dev->dma_skip_sync))
+		dev->dma_skip_sync = false;
+}
+#else /* !CONFIG_DMA_NEED_SYNC */
+static inline void dma_setup_skip_sync(struct device *dev)
+{
+}
+static inline void dma_clear_skip_sync(struct device *dev)
+{
+}
+#endif /* !CONFIG_DMA_NEED_SYNC */
+
 #ifdef CONFIG_DMA_CMA
 extern struct cma *dma_contiguous_default_area;
 
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 6c7640441214..d85ae541c267 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -364,7 +364,11 @@  static inline void __dma_sync_single_range_for_device(struct device *dev,
 
 static inline bool dma_skip_sync(const struct device *dev)
 {
-	return !IS_ENABLED(CONFIG_DMA_NEED_SYNC);
+#ifdef CONFIG_DMA_NEED_SYNC
+	return dev->dma_skip_sync;
+#else
+	return true;
+#endif
 }
 
 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 85152537dbf1..67ad3e1d51f6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -642,6 +642,8 @@  static int really_probe(struct device *dev, struct device_driver *drv)
 			goto pinctrl_bind_failed;
 	}
 
+	dma_setup_skip_sync(dev);
+
 	ret = driver_sysfs_add(dev);
 	if (ret) {
 		pr_err("%s: driver_sysfs_add(%s) failed\n",
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 85feaa0e008c..5f588e31ea89 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -846,8 +846,14 @@  bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr)
 	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	if (dma_map_direct(dev, ops))
+		/*
+		 * dma_skip_sync could've been set to false on first SWIOTLB
+		 * buffer mapping, but @dma_addr is not necessary an SWIOTLB
+		 * buffer. In this case, fall back to more granular check.
+		 */
 		return dma_direct_need_sync(dev, dma_addr);
-	return ops->sync_single_for_cpu || ops->sync_single_for_device;
+
+	return true;
 }
 EXPORT_SYMBOL_GPL(__dma_need_sync);
 
@@ -861,3 +867,34 @@  unsigned long dma_get_merge_boundary(struct device *dev)
 	return ops->get_merge_boundary(dev);
 }
 EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
+
+#ifdef CONFIG_DMA_NEED_SYNC
+void dma_setup_skip_sync(struct device *dev)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	if (dma_map_direct(dev, ops))
+		/*
+		 * dma_skip_sync will be set to false on first SWIOTLB buffer
+		 * mapping, if any. During the device initialization, it's
+		 * enough to check only for DMA coherence.
+		 */
+		dev->dma_skip_sync = dev_is_dma_coherent(dev);
+	else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu)
+		/*
+		 * Synchronization is not possible when none of DMA sync ops
+		 * is set. This check precedes the below one as it disables
+		 * the synchronization unconditionally.
+		 */
+		dev->dma_skip_sync = true;
+	else if (ops->flags & DMA_F_CAN_SKIP_SYNC)
+		/*
+		 * Assume that when ``DMA_F_CAN_SKIP_SYNC`` is advertised,
+		 * the conditions for synchronizing are the same as with
+		 * the direct DMA.
+		 */
+		dev->dma_skip_sync = dev_is_dma_coherent(dev);
+	else
+		dev->dma_skip_sync = false;
+}
+#endif /* CONFIG_DMA_NEED_SYNC */
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index b079a9a8e087..0b737eab4d48 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -1323,6 +1323,12 @@  phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
 		return (phys_addr_t)DMA_MAPPING_ERROR;
 	}
 
+	/*
+	 * If dma_skip_sync was set, reset it to false on first SWIOTLB buffer
+	 * mapping to always sync SWIOTLB buffers.
+	 */
+	dma_clear_skip_sync(dev);
+
 	/*
 	 * Save away the mapping from the original address to the DMA address.
 	 * This is needed when we sync the memory.  Then we sync the buffer if
@@ -1640,6 +1646,8 @@  struct page *swiotlb_alloc(struct device *dev, size_t size)
 	if (index == -1)
 		return NULL;
 
+	dma_clear_skip_sync(dev);
+
 	tlb_addr = slot_addr(pool->start, index);
 
 	return pfn_to_page(PFN_DOWN(tlb_addr));