Message ID | 20230211031821.976408-6-cristian.ciocaltea@collabora.com (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | Enable networking support for StarFive JH7100 SoC | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Guessing tree name failed - patch did not apply |
On 11/02/2023 03:18, Cristian Ciocaltea wrote: > From: Emil Renner Berthing <kernel@esmil.dk> > > This variant is used on the StarFive JH7100 SoC. > > Signed-off-by: Emil Renner Berthing <kernel@esmil.dk> > Signed-off-by: Cristian Ciocaltea <cristian.ciocaltea@collabora.com> > --- > arch/riscv/Kconfig | 6 ++++-- > arch/riscv/mm/dma-noncoherent.c | 37 +++++++++++++++++++++++++++++++-- > 2 files changed, 39 insertions(+), 4 deletions(-) > > diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig > index 9c687da7756d..05f6c77faf6f 100644 > --- a/arch/riscv/Kconfig > +++ b/arch/riscv/Kconfig > @@ -232,12 +232,14 @@ config LOCKDEP_SUPPORT > def_bool y > > config RISCV_DMA_NONCOHERENT > - bool > + bool "Support non-coherent DMA" > + default SOC_STARFIVE > select ARCH_HAS_DMA_PREP_COHERENT > + select ARCH_HAS_DMA_SET_UNCACHED > + select ARCH_HAS_DMA_CLEAR_UNCACHED > select ARCH_HAS_SYNC_DMA_FOR_DEVICE > select ARCH_HAS_SYNC_DMA_FOR_CPU > select ARCH_HAS_SETUP_DMA_OPS > - select DMA_DIRECT_REMAP > > config AS_HAS_INSN > def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero) > diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c > index d919efab6eba..e07e53aea537 100644 > --- a/arch/riscv/mm/dma-noncoherent.c > +++ b/arch/riscv/mm/dma-noncoherent.c > @@ -9,14 +9,21 @@ > #include <linux/dma-map-ops.h> > #include <linux/mm.h> > #include <asm/cacheflush.h> > +#include <soc/sifive/sifive_ccache.h> > > static bool noncoherent_supported; > > void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, > enum dma_data_direction dir) > { > - void *vaddr = phys_to_virt(paddr); > + void *vaddr; > > + if (sifive_ccache_handle_noncoherent()) { > + sifive_ccache_flush_range(paddr, size); > + return; > + } > + > + vaddr = phys_to_virt(paddr); > switch (dir) { > case DMA_TO_DEVICE: > ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size); > @@ -35,8 +42,14 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, > void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, > enum dma_data_direction dir) > { > - void *vaddr = phys_to_virt(paddr); > + void *vaddr; > + > + if (sifive_ccache_handle_noncoherent()) { > + sifive_ccache_flush_range(paddr, size); > + return; > + } ok, what happens if we have an system where the ccache and another level of cache also requires maintenance operations? > > + vaddr = phys_to_virt(paddr); > switch (dir) { > case DMA_TO_DEVICE: > break; > @@ -49,10 +62,30 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, > } > } > > +void *arch_dma_set_uncached(void *addr, size_t size) > +{ > + if (sifive_ccache_handle_noncoherent()) > + return sifive_ccache_set_uncached(addr, size); > + > + return addr; > +} > + > +void arch_dma_clear_uncached(void *addr, size_t size) > +{ > + if (sifive_ccache_handle_noncoherent()) > + sifive_ccache_clear_uncached(addr, size); > +} > + > void arch_dma_prep_coherent(struct page *page, size_t size) > { > void *flush_addr = page_address(page); > > + if (sifive_ccache_handle_noncoherent()) { > + memset(flush_addr, 0, size); > + sifive_ccache_flush_range(__pa(flush_addr), size); > + return; > + } > + > ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size); > } >
On 2/13/23 10:30, Ben Dooks wrote: > On 11/02/2023 03:18, Cristian Ciocaltea wrote: >> From: Emil Renner Berthing <kernel@esmil.dk> >> >> This variant is used on the StarFive JH7100 SoC. >> >> Signed-off-by: Emil Renner Berthing <kernel@esmil.dk> >> Signed-off-by: Cristian Ciocaltea <cristian.ciocaltea@collabora.com> >> --- >> arch/riscv/Kconfig | 6 ++++-- >> arch/riscv/mm/dma-noncoherent.c | 37 +++++++++++++++++++++++++++++++-- >> 2 files changed, 39 insertions(+), 4 deletions(-) >> >> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig >> index 9c687da7756d..05f6c77faf6f 100644 >> --- a/arch/riscv/Kconfig >> +++ b/arch/riscv/Kconfig >> @@ -232,12 +232,14 @@ config LOCKDEP_SUPPORT >> def_bool y >> config RISCV_DMA_NONCOHERENT >> - bool >> + bool "Support non-coherent DMA" >> + default SOC_STARFIVE >> select ARCH_HAS_DMA_PREP_COHERENT >> + select ARCH_HAS_DMA_SET_UNCACHED >> + select ARCH_HAS_DMA_CLEAR_UNCACHED >> select ARCH_HAS_SYNC_DMA_FOR_DEVICE >> select ARCH_HAS_SYNC_DMA_FOR_CPU >> select ARCH_HAS_SETUP_DMA_OPS >> - select DMA_DIRECT_REMAP >> config AS_HAS_INSN >> def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) >> t0$(comma) t0$(comma) zero) >> diff --git a/arch/riscv/mm/dma-noncoherent.c >> b/arch/riscv/mm/dma-noncoherent.c >> index d919efab6eba..e07e53aea537 100644 >> --- a/arch/riscv/mm/dma-noncoherent.c >> +++ b/arch/riscv/mm/dma-noncoherent.c >> @@ -9,14 +9,21 @@ >> #include <linux/dma-map-ops.h> >> #include <linux/mm.h> >> #include <asm/cacheflush.h> >> +#include <soc/sifive/sifive_ccache.h> >> static bool noncoherent_supported; >> void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, >> enum dma_data_direction dir) >> { >> - void *vaddr = phys_to_virt(paddr); >> + void *vaddr; >> + if (sifive_ccache_handle_noncoherent()) { >> + sifive_ccache_flush_range(paddr, size); >> + return; >> + } >> + >> + vaddr = phys_to_virt(paddr); >> switch (dir) { >> case DMA_TO_DEVICE: >> ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size); >> @@ -35,8 +42,14 @@ void arch_sync_dma_for_device(phys_addr_t paddr, >> size_t size, >> void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, >> enum dma_data_direction dir) >> { >> - void *vaddr = phys_to_virt(paddr); >> + void *vaddr; >> + >> + if (sifive_ccache_handle_noncoherent()) { >> + sifive_ccache_flush_range(paddr, size); >> + return; >> + } > > ok, what happens if we have an system where the ccache and another level > of cache also requires maintenance operations? According to [1], the handling of non-coherent DMA on RISC-V is currently being worked on, so I will respin the series as soon as the proper support arrives. [1] https://lore.kernel.org/lkml/Y+d36nz0xdfXmDI1@spud/ >> + vaddr = phys_to_virt(paddr); >> switch (dir) { >> case DMA_TO_DEVICE: >> break; >> @@ -49,10 +62,30 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, >> size_t size, >> } >> } >> +void *arch_dma_set_uncached(void *addr, size_t size) >> +{ >> + if (sifive_ccache_handle_noncoherent()) >> + return sifive_ccache_set_uncached(addr, size); >> + >> + return addr; >> +} >> + >> +void arch_dma_clear_uncached(void *addr, size_t size) >> +{ >> + if (sifive_ccache_handle_noncoherent()) >> + sifive_ccache_clear_uncached(addr, size); >> +} >> + >> void arch_dma_prep_coherent(struct page *page, size_t size) >> { >> void *flush_addr = page_address(page); >> + if (sifive_ccache_handle_noncoherent()) { >> + memset(flush_addr, 0, size); >> + sifive_ccache_flush_range(__pa(flush_addr), size); >> + return; >> + } >> + >> ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size); >> } >
On Tue, Feb 14, 2023 at 08:06:49PM +0200, Cristian Ciocaltea wrote: > On 2/13/23 10:30, Ben Dooks wrote: > > On 11/02/2023 03:18, Cristian Ciocaltea wrote: > > > From: Emil Renner Berthing <kernel@esmil.dk> > > > diff --git a/arch/riscv/mm/dma-noncoherent.c > > > b/arch/riscv/mm/dma-noncoherent.c > > > index d919efab6eba..e07e53aea537 100644 > > > --- a/arch/riscv/mm/dma-noncoherent.c > > > +++ b/arch/riscv/mm/dma-noncoherent.c > > > @@ -9,14 +9,21 @@ > > > #include <linux/dma-map-ops.h> > > > #include <linux/mm.h> > > > #include <asm/cacheflush.h> > > > +#include <soc/sifive/sifive_ccache.h> > > > static bool noncoherent_supported; > > > void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, > > > enum dma_data_direction dir) > > > { > > > - void *vaddr = phys_to_virt(paddr); > > > + void *vaddr; > > > + if (sifive_ccache_handle_noncoherent()) { > > > + sifive_ccache_flush_range(paddr, size); > > > + return; > > > + } > > > + > > > + vaddr = phys_to_virt(paddr); > > > switch (dir) { > > > case DMA_TO_DEVICE: > > > ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size); > > > @@ -35,8 +42,14 @@ void arch_sync_dma_for_device(phys_addr_t paddr, > > > size_t size, > > > void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, > > > enum dma_data_direction dir) > > > { > > > - void *vaddr = phys_to_virt(paddr); > > > + void *vaddr; > > > + > > > + if (sifive_ccache_handle_noncoherent()) { > > > + sifive_ccache_flush_range(paddr, size); > > > + return; > > > + } > > > > ok, what happens if we have an system where the ccache and another level > > of cache also requires maintenance operations? TBH, I'd hope that a system with that complexity is also not trying to manage the cache in this manner! > According to [1], the handling of non-coherent DMA on RISC-V is currently > being worked on, so I will respin the series as soon as the proper support > arrives. But yeah, once that stuff lands we can carry out these operations only for the platforms that need/"need" it. Cheers, Conor.
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 9c687da7756d..05f6c77faf6f 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -232,12 +232,14 @@ config LOCKDEP_SUPPORT def_bool y config RISCV_DMA_NONCOHERENT - bool + bool "Support non-coherent DMA" + default SOC_STARFIVE select ARCH_HAS_DMA_PREP_COHERENT + select ARCH_HAS_DMA_SET_UNCACHED + select ARCH_HAS_DMA_CLEAR_UNCACHED select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SETUP_DMA_OPS - select DMA_DIRECT_REMAP config AS_HAS_INSN def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero) diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index d919efab6eba..e07e53aea537 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -9,14 +9,21 @@ #include <linux/dma-map-ops.h> #include <linux/mm.h> #include <asm/cacheflush.h> +#include <soc/sifive/sifive_ccache.h> static bool noncoherent_supported; void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - void *vaddr = phys_to_virt(paddr); + void *vaddr; + if (sifive_ccache_handle_noncoherent()) { + sifive_ccache_flush_range(paddr, size); + return; + } + + vaddr = phys_to_virt(paddr); switch (dir) { case DMA_TO_DEVICE: ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size); @@ -35,8 +42,14 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - void *vaddr = phys_to_virt(paddr); + void *vaddr; + + if (sifive_ccache_handle_noncoherent()) { + sifive_ccache_flush_range(paddr, size); + return; + } + vaddr = phys_to_virt(paddr); switch (dir) { case DMA_TO_DEVICE: break; @@ -49,10 +62,30 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, } } +void *arch_dma_set_uncached(void *addr, size_t size) +{ + if (sifive_ccache_handle_noncoherent()) + return sifive_ccache_set_uncached(addr, size); + + return addr; +} + +void arch_dma_clear_uncached(void *addr, size_t size) +{ + if (sifive_ccache_handle_noncoherent()) + sifive_ccache_clear_uncached(addr, size); +} + void arch_dma_prep_coherent(struct page *page, size_t size) { void *flush_addr = page_address(page); + if (sifive_ccache_handle_noncoherent()) { + memset(flush_addr, 0, size); + sifive_ccache_flush_range(__pa(flush_addr), size); + return; + } + ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size); }