diff mbox series

[5.10.y-cip,35/48] cache: ax45mp_cache: Add non coherent support

Message ID 20240205124135.14779-36-prabhakar.mahadev-lad.rj@bp.renesas.com (mailing list archive)
State New
Headers show
Series Add support for Renesas RZ/Five RISC-V SoC | expand

Commit Message

Lad Prabhakar Feb. 5, 2024, 12:41 p.m. UTC
As support for non-coherent DMA is missing in 5.10-cip for RISC-V
architecture, introducing a new patch to support non-coherent DMA
support on RZ/Five SoC.

This enables the required config and the callbacks required to handle
the non-coherent DMA support for Renesas RZ/Five SoC.

Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
---
 drivers/cache/Kconfig        |  3 ++
 drivers/cache/ax45mp_cache.c | 54 ++++++++++++++++++++++++++++++++++++
 2 files changed, 57 insertions(+)

Comments

Pavel Machek Feb. 5, 2024, 7:36 p.m. UTC | #1
Hi!

> As support for non-coherent DMA is missing in 5.10-cip for RISC-V
> architecture, introducing a new patch to support non-coherent DMA
> support on RZ/Five SoC.

> +void arch_sync_dma_for_cpu(phys_addr_t paddr,
> +			   size_t size, enum dma_data_direction dir)
> +{
> +	switch (dir) {
> +	case DMA_TO_DEVICE:
> +		break;
> +
> +	case DMA_FROM_DEVICE:
> +	case DMA_BIDIRECTIONAL:
> +		/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */

I believe the comment will need fixing w.r.t. english.

Plus, some explanation. The risc-v CPU you are using is not doing
speculative execution, right?

> +		ax45mp_dma_cache_inv(paddr, size);

If it is, it could do prefetch here, and break the rules, no?

If it is not, do we need the invalidate?

> +		break;

Best regards,
								Pavel
diff mbox series

Patch

diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
index 3370a5f0e77f..400cb09f6bd4 100644
--- a/drivers/cache/Kconfig
+++ b/drivers/cache/Kconfig
@@ -3,6 +3,9 @@  menu "Cache Drivers"
 
 config AX45MP_L2_CACHE
 	bool "Andes Technology AX45MP L2 Cache controller"
+	select ARCH_HAS_SYNC_DMA_FOR_CPU
+	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+	select ARCH_HAS_SETUP_DMA_OPS
 	help
 	  Support for the L2 cache controller on Andes Technology AX45MP platforms.
 
diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c
index 7984b90d04f2..f7db1ed3e973 100644
--- a/drivers/cache/ax45mp_cache.c
+++ b/drivers/cache/ax45mp_cache.c
@@ -132,6 +132,60 @@  static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
 	local_irq_restore(flags);
 }
 
+static void ax45mp_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
+{
+	ax45mp_dma_cache_wback(paddr, size);
+	ax45mp_dma_cache_inv(paddr, size);
+}
+
+void arch_sync_dma_for_device(phys_addr_t paddr,
+			      size_t size, enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_TO_DEVICE:
+		ax45mp_dma_cache_wback(paddr, size);
+		break;
+
+	case DMA_FROM_DEVICE:
+		fallthrough;
+
+	case DMA_BIDIRECTIONAL:
+		/* Skip the invalidate here if it's done later */
+		if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU))
+			ax45mp_dma_cache_wback(paddr, size);
+		else
+			ax45mp_dma_cache_wback_inv(paddr, size);
+		break;
+
+	default:
+		break;
+	}
+}
+
+void arch_sync_dma_for_cpu(phys_addr_t paddr,
+			   size_t size, enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_TO_DEVICE:
+		break;
+
+	case DMA_FROM_DEVICE:
+	case DMA_BIDIRECTIONAL:
+		/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
+		ax45mp_dma_cache_inv(paddr, size);
+		break;
+
+	default:
+		break;
+	}
+}
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+			const struct iommu_ops *iommu, bool coherent)
+{
+	dev->dma_coherent = coherent;
+}
+
 static int ax45mp_get_l2_line_size(struct device_node *np)
 {
 	int ret;