diff mbox series

[RFC,v3,2/2] soc: renesas: Add L2 cache management for RZ/Five SoC

Message ID 20221019220242.4746-3-prabhakar.mahadev-lad.rj@bp.renesas.com (mailing list archive)
State Superseded
Delegated to: Geert Uytterhoeven
Headers show
Series AX45MP: Add support to non-coherent DMA | expand

Commit Message

Lad, Prabhakar Oct. 19, 2022, 10:02 p.m. UTC
From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>

On the AX45MP core, cache coherency is a specification option so it may
not be supported. In this case DMA will fail. As a workaround, firstly we
allocate a global dma coherent pool from which DMA allocations are taken
and marked as non-cacheable + bufferable using the PMA region as specified
in the device tree. Synchronization callbacks are implemented to
synchronize when doing DMA transactions.

The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
block that allows dynamic adjustment of memory attributes in the runtime.
It contains a configurable amount of PMA entries implemented as CSR
registers to control the attributes of memory locations in interest.

Below are the memory attributes supported:
* Device, Non-bufferable
* Device, bufferable
* Memory, Non-cacheable, Non-bufferable
* Memory, Non-cacheable, Bufferable
* Memory, Write-back, No-allocate
* Memory, Write-back, Read-allocate
* Memory, Write-back, Write-allocate
* Memory, Write-back, Read and Write-allocate

This patch adds support to configure the memory attributes of the memory
regions as passed from the l2 cache node and exposes the cache management
ops.

More info about PMA (section 10.3):
http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf

This feature is based on the work posted [0] by Vincent Chen
<vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.

[0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/

Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
---
 arch/riscv/include/asm/cacheflush.h    |   8 +
 arch/riscv/include/asm/errata_list.h   |   2 +
 arch/riscv/mm/dma-noncoherent.c        |  20 ++
 drivers/soc/renesas/Kconfig            |   5 +
 drivers/soc/renesas/Makefile           |   4 +
 drivers/soc/renesas/rzf/Kconfig        |   6 +
 drivers/soc/renesas/rzf/Makefile       |   3 +
 drivers/soc/renesas/rzf/ax45mp_cache.c | 431 +++++++++++++++++++++++++
 drivers/soc/renesas/rzf/ax45mp_sbi.h   |  29 ++
 9 files changed, 508 insertions(+)
 create mode 100644 drivers/soc/renesas/rzf/Kconfig
 create mode 100644 drivers/soc/renesas/rzf/Makefile
 create mode 100644 drivers/soc/renesas/rzf/ax45mp_cache.c
 create mode 100644 drivers/soc/renesas/rzf/ax45mp_sbi.h

Comments

Rob Herring (Arm) Oct. 21, 2022, 2:05 a.m. UTC | #1
On Wed, Oct 19, 2022 at 11:02:42PM +0100, Prabhakar wrote:
> From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> 
> On the AX45MP core, cache coherency is a specification option so it may
> not be supported. In this case DMA will fail. As a workaround, firstly we
> allocate a global dma coherent pool from which DMA allocations are taken
> and marked as non-cacheable + bufferable using the PMA region as specified
> in the device tree. Synchronization callbacks are implemented to
> synchronize when doing DMA transactions.
> 
> The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> block that allows dynamic adjustment of memory attributes in the runtime.
> It contains a configurable amount of PMA entries implemented as CSR
> registers to control the attributes of memory locations in interest.
> 
> Below are the memory attributes supported:
> * Device, Non-bufferable
> * Device, bufferable
> * Memory, Non-cacheable, Non-bufferable
> * Memory, Non-cacheable, Bufferable
> * Memory, Write-back, No-allocate
> * Memory, Write-back, Read-allocate
> * Memory, Write-back, Write-allocate
> * Memory, Write-back, Read and Write-allocate
> 
> This patch adds support to configure the memory attributes of the memory
> regions as passed from the l2 cache node and exposes the cache management
> ops.
> 
> More info about PMA (section 10.3):
> http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
> 
> This feature is based on the work posted [0] by Vincent Chen
> <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
> 
> [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
> 
> Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> ---
>  arch/riscv/include/asm/cacheflush.h    |   8 +
>  arch/riscv/include/asm/errata_list.h   |   2 +
>  arch/riscv/mm/dma-noncoherent.c        |  20 ++
>  drivers/soc/renesas/Kconfig            |   5 +
>  drivers/soc/renesas/Makefile           |   4 +
>  drivers/soc/renesas/rzf/Kconfig        |   6 +
>  drivers/soc/renesas/rzf/Makefile       |   3 +
>  drivers/soc/renesas/rzf/ax45mp_cache.c | 431 +++++++++++++++++++++++++

How many cache drivers do we have around now? I've seen a few bindings 
go by. I'm guessing it is time to stop putting the drivers in the
drivers/soc/ dumping ground.
 
>  drivers/soc/renesas/rzf/ax45mp_sbi.h   |  29 ++
>  9 files changed, 508 insertions(+)
>  create mode 100644 drivers/soc/renesas/rzf/Kconfig
>  create mode 100644 drivers/soc/renesas/rzf/Makefile
>  create mode 100644 drivers/soc/renesas/rzf/ax45mp_cache.c
>  create mode 100644 drivers/soc/renesas/rzf/ax45mp_sbi.h
> 
> diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
> index 8a5c246b0a21..40aa790be9a3 100644
> --- a/arch/riscv/include/asm/cacheflush.h
> +++ b/arch/riscv/include/asm/cacheflush.h
> @@ -65,6 +65,14 @@ static inline void riscv_noncoherent_supported(void) {}
>  #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
>  #define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
>  
> +#ifdef CONFIG_AX45MP_L2_CACHE
> +void ax45mp_cpu_dma_inval_range(void *vaddr, size_t end);
> +void ax45mp_cpu_dma_wb_range(void *vaddr, size_t end);
> +
> +#define ALT_CMO_OP(_op, _start, _size, _cachesize)	\
> +		   _op(_start, _size)
> +#endif
> +
>  #include <asm-generic/cacheflush.h>
>  
>  #endif /* _ASM_RISCV_CACHEFLUSH_H */
> diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
> index 19a771085781..d9cbf60c3b65 100644
> --- a/arch/riscv/include/asm/errata_list.h
> +++ b/arch/riscv/include/asm/errata_list.h
> @@ -89,6 +89,7 @@ asm volatile(ALTERNATIVE(						\
>  #define ALT_THEAD_PMA(_val)
>  #endif
>  
> +#ifdef CONFIG_ERRATA_THEAD_CMO
>  /*
>   * dcache.ipa rs1 (invalidate, physical address)
>   * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
> @@ -143,5 +144,6 @@ asm volatile(ALTERNATIVE_2(						\
>  	: "a0")
>  
>  #endif /* __ASSEMBLY__ */
> +#endif
>  
>  #endif
> diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
> index b0add983530a..5270acca6766 100644
> --- a/arch/riscv/mm/dma-noncoherent.c
> +++ b/arch/riscv/mm/dma-noncoherent.c
> @@ -24,13 +24,25 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
>  
>  	switch (dir) {
>  	case DMA_TO_DEVICE:
> +#ifdef CONFIG_ERRATA_THEAD_CMO
>  		ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
> +#elif CONFIG_AX45MP_L2_CACHE
> +		ALT_CMO_OP(ax45mp_cpu_dma_wb_range, vaddr, size, 0x0);
> +#endif

How do you support more than one platform in a build?

Rob
Lad, Prabhakar Oct. 21, 2022, 10:05 p.m. UTC | #2
Hi Rob,

Thank you for the review.

On Fri, Oct 21, 2022 at 3:05 AM Rob Herring <robh@kernel.org> wrote:
>
> On Wed, Oct 19, 2022 at 11:02:42PM +0100, Prabhakar wrote:
> > From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> >
> > On the AX45MP core, cache coherency is a specification option so it may
> > not be supported. In this case DMA will fail. As a workaround, firstly we
> > allocate a global dma coherent pool from which DMA allocations are taken
> > and marked as non-cacheable + bufferable using the PMA region as specified
> > in the device tree. Synchronization callbacks are implemented to
> > synchronize when doing DMA transactions.
> >
> > The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> > block that allows dynamic adjustment of memory attributes in the runtime.
> > It contains a configurable amount of PMA entries implemented as CSR
> > registers to control the attributes of memory locations in interest.
> >
> > Below are the memory attributes supported:
> > * Device, Non-bufferable
> > * Device, bufferable
> > * Memory, Non-cacheable, Non-bufferable
> > * Memory, Non-cacheable, Bufferable
> > * Memory, Write-back, No-allocate
> > * Memory, Write-back, Read-allocate
> > * Memory, Write-back, Write-allocate
> > * Memory, Write-back, Read and Write-allocate
> >
> > This patch adds support to configure the memory attributes of the memory
> > regions as passed from the l2 cache node and exposes the cache management
> > ops.
> >
> > More info about PMA (section 10.3):
> > http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
> >
> > This feature is based on the work posted [0] by Vincent Chen
> > <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
> >
> > [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
> >
> > Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> > ---
> >  arch/riscv/include/asm/cacheflush.h    |   8 +
> >  arch/riscv/include/asm/errata_list.h   |   2 +
> >  arch/riscv/mm/dma-noncoherent.c        |  20 ++
> >  drivers/soc/renesas/Kconfig            |   5 +
> >  drivers/soc/renesas/Makefile           |   4 +
> >  drivers/soc/renesas/rzf/Kconfig        |   6 +
> >  drivers/soc/renesas/rzf/Makefile       |   3 +
> >  drivers/soc/renesas/rzf/ax45mp_cache.c | 431 +++++++++++++++++++++++++
>
> How many cache drivers do we have around now? I've seen a few bindings
> go by. I'm guessing it is time to stop putting the drivers in the
> drivers/soc/ dumping ground.
>
The main reason this driver is not in arch/riscv is that it has vendor
specific extensions. Due to this reason it was agreed during the LPC
that vendor specific extension should be maintained by SoC vendors and
was agreed that this can go into drivers/soc/renesas folder instead.

> >  drivers/soc/renesas/rzf/ax45mp_sbi.h   |  29 ++
> >  9 files changed, 508 insertions(+)
> >  create mode 100644 drivers/soc/renesas/rzf/Kconfig
> >  create mode 100644 drivers/soc/renesas/rzf/Makefile
> >  create mode 100644 drivers/soc/renesas/rzf/ax45mp_cache.c
> >  create mode 100644 drivers/soc/renesas/rzf/ax45mp_sbi.h
> >
> > diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
> > index 8a5c246b0a21..40aa790be9a3 100644
> > --- a/arch/riscv/include/asm/cacheflush.h
> > +++ b/arch/riscv/include/asm/cacheflush.h
> > @@ -65,6 +65,14 @@ static inline void riscv_noncoherent_supported(void) {}
> >  #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
> >  #define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
> >
> > +#ifdef CONFIG_AX45MP_L2_CACHE
> > +void ax45mp_cpu_dma_inval_range(void *vaddr, size_t end);
> > +void ax45mp_cpu_dma_wb_range(void *vaddr, size_t end);
> > +
> > +#define ALT_CMO_OP(_op, _start, _size, _cachesize)   \
> > +                _op(_start, _size)
> > +#endif
> > +
> >  #include <asm-generic/cacheflush.h>
> >
> >  #endif /* _ASM_RISCV_CACHEFLUSH_H */
> > diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
> > index 19a771085781..d9cbf60c3b65 100644
> > --- a/arch/riscv/include/asm/errata_list.h
> > +++ b/arch/riscv/include/asm/errata_list.h
> > @@ -89,6 +89,7 @@ asm volatile(ALTERNATIVE(                                           \
> >  #define ALT_THEAD_PMA(_val)
> >  #endif
> >
> > +#ifdef CONFIG_ERRATA_THEAD_CMO
> >  /*
> >   * dcache.ipa rs1 (invalidate, physical address)
> >   * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
> > @@ -143,5 +144,6 @@ asm volatile(ALTERNATIVE_2(                                               \
> >       : "a0")
> >
> >  #endif /* __ASSEMBLY__ */
> > +#endif
> >
> >  #endif
> > diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
> > index b0add983530a..5270acca6766 100644
> > --- a/arch/riscv/mm/dma-noncoherent.c
> > +++ b/arch/riscv/mm/dma-noncoherent.c
> > @@ -24,13 +24,25 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
> >
> >       switch (dir) {
> >       case DMA_TO_DEVICE:
> > +#ifdef CONFIG_ERRATA_THEAD_CMO
> >               ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
> > +#elif CONFIG_AX45MP_L2_CACHE
> > +             ALT_CMO_OP(ax45mp_cpu_dma_wb_range, vaddr, size, 0x0);
> > +#endif
>
> How do you support more than one platform in a build?
>
Yes, that's one concern which I have mentioned in the cover letter too
(At that moment it's just a single platform). Suggestions welcome!

Cheers,
Prabhakar
Conor Dooley Oct. 21, 2022, 10:32 p.m. UTC | #3
On Fri, Oct 21, 2022 at 11:05:40PM +0100, Lad, Prabhakar wrote:
> Hi Rob,
> 
> Thank you for the review.
> 
> On Fri, Oct 21, 2022 at 3:05 AM Rob Herring <robh@kernel.org> wrote:
> >
> > On Wed, Oct 19, 2022 at 11:02:42PM +0100, Prabhakar wrote:
> > > From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> > >
> > > On the AX45MP core, cache coherency is a specification option so it may
> > > not be supported. In this case DMA will fail. As a workaround, firstly we
> > > allocate a global dma coherent pool from which DMA allocations are taken
> > > and marked as non-cacheable + bufferable using the PMA region as specified
> > > in the device tree. Synchronization callbacks are implemented to
> > > synchronize when doing DMA transactions.
> > >
> > > The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> > > block that allows dynamic adjustment of memory attributes in the runtime.
> > > It contains a configurable amount of PMA entries implemented as CSR
> > > registers to control the attributes of memory locations in interest.
> > >
> > > Below are the memory attributes supported:
> > > * Device, Non-bufferable
> > > * Device, bufferable
> > > * Memory, Non-cacheable, Non-bufferable
> > > * Memory, Non-cacheable, Bufferable
> > > * Memory, Write-back, No-allocate
> > > * Memory, Write-back, Read-allocate
> > > * Memory, Write-back, Write-allocate
> > > * Memory, Write-back, Read and Write-allocate
> > >
> > > This patch adds support to configure the memory attributes of the memory
> > > regions as passed from the l2 cache node and exposes the cache management
> > > ops.
> > >
> > > More info about PMA (section 10.3):
> > > http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
> > >
> > > This feature is based on the work posted [0] by Vincent Chen
> > > <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
> > >
> > > [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
> > >
> > > Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> > > ---
> > >  arch/riscv/include/asm/cacheflush.h    |   8 +
> > >  arch/riscv/include/asm/errata_list.h   |   2 +
> > >  arch/riscv/mm/dma-noncoherent.c        |  20 ++
> > >  drivers/soc/renesas/Kconfig            |   5 +
> > >  drivers/soc/renesas/Makefile           |   4 +
> > >  drivers/soc/renesas/rzf/Kconfig        |   6 +
> > >  drivers/soc/renesas/rzf/Makefile       |   3 +
> > >  drivers/soc/renesas/rzf/ax45mp_cache.c | 431 +++++++++++++++++++++++++
> >
> > How many cache drivers do we have around now? I've seen a few bindings
> > go by. I'm guessing it is time to stop putting the drivers in the
> > drivers/soc/ dumping ground.
> >
> The main reason this driver is not in arch/riscv is that it has vendor
> specific extensions. Due to this reason it was agreed during the LPC
> that vendor specific extension should be maintained by SoC vendors and
> was agreed that this can go into drivers/soc/renesas folder instead.

Does not in drivers/soc mean they need to go into arch/riscv?
The outcome of the chat at the LPC BoF was more that the cache drivers
themselves should not be be routed via the arch maintainers, no?

> 
> > >  drivers/soc/renesas/rzf/ax45mp_sbi.h   |  29 ++
> > >  9 files changed, 508 insertions(+)
> > >  create mode 100644 drivers/soc/renesas/rzf/Kconfig
> > >  create mode 100644 drivers/soc/renesas/rzf/Makefile
> > >  create mode 100644 drivers/soc/renesas/rzf/ax45mp_cache.c
> > >  create mode 100644 drivers/soc/renesas/rzf/ax45mp_sbi.h
> > >
> > > diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
> > > index 8a5c246b0a21..40aa790be9a3 100644
> > > --- a/arch/riscv/include/asm/cacheflush.h
> > > +++ b/arch/riscv/include/asm/cacheflush.h
> > > @@ -65,6 +65,14 @@ static inline void riscv_noncoherent_supported(void) {}
> > >  #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
> > >  #define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
> > >
> > > +#ifdef CONFIG_AX45MP_L2_CACHE
> > > +void ax45mp_cpu_dma_inval_range(void *vaddr, size_t end);
> > > +void ax45mp_cpu_dma_wb_range(void *vaddr, size_t end);
> > > +
> > > +#define ALT_CMO_OP(_op, _start, _size, _cachesize)   \
> > > +                _op(_start, _size)
> > > +#endif
> > > +
> > >  #include <asm-generic/cacheflush.h>
> > >
> > >  #endif /* _ASM_RISCV_CACHEFLUSH_H */
> > > diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
> > > index 19a771085781..d9cbf60c3b65 100644
> > > --- a/arch/riscv/include/asm/errata_list.h
> > > +++ b/arch/riscv/include/asm/errata_list.h
> > > @@ -89,6 +89,7 @@ asm volatile(ALTERNATIVE(                                           \
> > >  #define ALT_THEAD_PMA(_val)
> > >  #endif
> > >
> > > +#ifdef CONFIG_ERRATA_THEAD_CMO
> > >  /*
> > >   * dcache.ipa rs1 (invalidate, physical address)
> > >   * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
> > > @@ -143,5 +144,6 @@ asm volatile(ALTERNATIVE_2(                                               \
> > >       : "a0")
> > >
> > >  #endif /* __ASSEMBLY__ */
> > > +#endif
> > >
> > >  #endif
> > > diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
> > > index b0add983530a..5270acca6766 100644
> > > --- a/arch/riscv/mm/dma-noncoherent.c
> > > +++ b/arch/riscv/mm/dma-noncoherent.c
> > > @@ -24,13 +24,25 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
> > >
> > >       switch (dir) {
> > >       case DMA_TO_DEVICE:
> > > +#ifdef CONFIG_ERRATA_THEAD_CMO
> > >               ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
> > > +#elif CONFIG_AX45MP_L2_CACHE
> > > +             ALT_CMO_OP(ax45mp_cpu_dma_wb_range, vaddr, size, 0x0);
> > > +#endif
> >
> > How do you support more than one platform in a build?
> >
> Yes, that's one concern which I have mentioned in the cover letter too
> (At that moment it's just a single platform). Suggestions welcome!

I think I said it on one of the earlier version, but it needs to be
implemented w/ runtime patching via alternatives just like the thead
stuff patches in their functions.
Lad, Prabhakar Oct. 24, 2022, 11:55 a.m. UTC | #4
Hi Conor,

On Fri, Oct 21, 2022 at 11:32 PM Conor Dooley <conor@kernel.org> wrote:
>
> On Fri, Oct 21, 2022 at 11:05:40PM +0100, Lad, Prabhakar wrote:
> > Hi Rob,
> >
> > Thank you for the review.
> >
> > On Fri, Oct 21, 2022 at 3:05 AM Rob Herring <robh@kernel.org> wrote:
> > >
> > > On Wed, Oct 19, 2022 at 11:02:42PM +0100, Prabhakar wrote:
> > > > From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> > > >
> > > > On the AX45MP core, cache coherency is a specification option so it may
> > > > not be supported. In this case DMA will fail. As a workaround, firstly we
> > > > allocate a global dma coherent pool from which DMA allocations are taken
> > > > and marked as non-cacheable + bufferable using the PMA region as specified
> > > > in the device tree. Synchronization callbacks are implemented to
> > > > synchronize when doing DMA transactions.
> > > >
> > > > The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> > > > block that allows dynamic adjustment of memory attributes in the runtime.
> > > > It contains a configurable amount of PMA entries implemented as CSR
> > > > registers to control the attributes of memory locations in interest.
> > > >
> > > > Below are the memory attributes supported:
> > > > * Device, Non-bufferable
> > > > * Device, bufferable
> > > > * Memory, Non-cacheable, Non-bufferable
> > > > * Memory, Non-cacheable, Bufferable
> > > > * Memory, Write-back, No-allocate
> > > > * Memory, Write-back, Read-allocate
> > > > * Memory, Write-back, Write-allocate
> > > > * Memory, Write-back, Read and Write-allocate
> > > >
> > > > This patch adds support to configure the memory attributes of the memory
> > > > regions as passed from the l2 cache node and exposes the cache management
> > > > ops.
> > > >
> > > > More info about PMA (section 10.3):
> > > > http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
> > > >
> > > > This feature is based on the work posted [0] by Vincent Chen
> > > > <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
> > > >
> > > > [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
> > > >
> > > > Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> > > > ---
> > > >  arch/riscv/include/asm/cacheflush.h    |   8 +
> > > >  arch/riscv/include/asm/errata_list.h   |   2 +
> > > >  arch/riscv/mm/dma-noncoherent.c        |  20 ++
> > > >  drivers/soc/renesas/Kconfig            |   5 +
> > > >  drivers/soc/renesas/Makefile           |   4 +
> > > >  drivers/soc/renesas/rzf/Kconfig        |   6 +
> > > >  drivers/soc/renesas/rzf/Makefile       |   3 +
> > > >  drivers/soc/renesas/rzf/ax45mp_cache.c | 431 +++++++++++++++++++++++++
> > >
> > > How many cache drivers do we have around now? I've seen a few bindings
> > > go by. I'm guessing it is time to stop putting the drivers in the
> > > drivers/soc/ dumping ground.
> > >
> > The main reason this driver is not in arch/riscv is that it has vendor
> > specific extensions. Due to this reason it was agreed during the LPC
> > that vendor specific extension should be maintained by SoC vendors and
> > was agreed that this can go into drivers/soc/renesas folder instead.
>
> Does not in drivers/soc mean they need to go into arch/riscv?
I was under the impression Rob wanted them arch/riscv, sorry for the confusion.

> The outcome of the chat at the LPC BoF was more that the cache drivers
> themselves should not be be routed via the arch maintainers, no?
>
Indeed.

> >
> > > >  drivers/soc/renesas/rzf/ax45mp_sbi.h   |  29 ++
> > > >  9 files changed, 508 insertions(+)
> > > >  create mode 100644 drivers/soc/renesas/rzf/Kconfig
> > > >  create mode 100644 drivers/soc/renesas/rzf/Makefile
> > > >  create mode 100644 drivers/soc/renesas/rzf/ax45mp_cache.c
> > > >  create mode 100644 drivers/soc/renesas/rzf/ax45mp_sbi.h
> > > >
> > > > diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
> > > > index 8a5c246b0a21..40aa790be9a3 100644
> > > > --- a/arch/riscv/include/asm/cacheflush.h
> > > > +++ b/arch/riscv/include/asm/cacheflush.h
> > > > @@ -65,6 +65,14 @@ static inline void riscv_noncoherent_supported(void) {}
> > > >  #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
> > > >  #define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
> > > >
> > > > +#ifdef CONFIG_AX45MP_L2_CACHE
> > > > +void ax45mp_cpu_dma_inval_range(void *vaddr, size_t end);
> > > > +void ax45mp_cpu_dma_wb_range(void *vaddr, size_t end);
> > > > +
> > > > +#define ALT_CMO_OP(_op, _start, _size, _cachesize)   \
> > > > +                _op(_start, _size)
> > > > +#endif
> > > > +
> > > >  #include <asm-generic/cacheflush.h>
> > > >
> > > >  #endif /* _ASM_RISCV_CACHEFLUSH_H */
> > > > diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
> > > > index 19a771085781..d9cbf60c3b65 100644
> > > > --- a/arch/riscv/include/asm/errata_list.h
> > > > +++ b/arch/riscv/include/asm/errata_list.h
> > > > @@ -89,6 +89,7 @@ asm volatile(ALTERNATIVE(                                           \
> > > >  #define ALT_THEAD_PMA(_val)
> > > >  #endif
> > > >
> > > > +#ifdef CONFIG_ERRATA_THEAD_CMO
> > > >  /*
> > > >   * dcache.ipa rs1 (invalidate, physical address)
> > > >   * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
> > > > @@ -143,5 +144,6 @@ asm volatile(ALTERNATIVE_2(                                               \
> > > >       : "a0")
> > > >
> > > >  #endif /* __ASSEMBLY__ */
> > > > +#endif
> > > >
> > > >  #endif
> > > > diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
> > > > index b0add983530a..5270acca6766 100644
> > > > --- a/arch/riscv/mm/dma-noncoherent.c
> > > > +++ b/arch/riscv/mm/dma-noncoherent.c
> > > > @@ -24,13 +24,25 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
> > > >
> > > >       switch (dir) {
> > > >       case DMA_TO_DEVICE:
> > > > +#ifdef CONFIG_ERRATA_THEAD_CMO
> > > >               ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
> > > > +#elif CONFIG_AX45MP_L2_CACHE
> > > > +             ALT_CMO_OP(ax45mp_cpu_dma_wb_range, vaddr, size, 0x0);
> > > > +#endif
> > >
> > > How do you support more than one platform in a build?
> > >
> > Yes, that's one concern which I have mentioned in the cover letter too
> > (At that moment it's just a single platform). Suggestions welcome!
>
> I think I said it on one of the earlier version, but it needs to be
> implemented w/ runtime patching via alternatives just like the thead
> stuff patches in their functions.
>
I'm a bit stumped with alternatives() usage.

Currently I am just replacing the ALT_CMO_OP() macro if
CONFIG_AX45MP_L2_CACHE is enabled. For AX45MP currently we have two
exported functions ax45mp_cpu_dma_inval_range/ax45mp_cpu_dma_wb_range.
If I switch to
ALTERNATIVE() macro usage then I'll have to use the assembly version
of the above two mentioned functions?

Cheers,
Prabhakar
Heiko Stübner Oct. 24, 2022, 12:04 p.m. UTC | #5
Hi Prabhakar,

Am Montag, 24. Oktober 2022, 13:55:00 CEST schrieb Lad, Prabhakar:
> Hi Conor,
> 
> On Fri, Oct 21, 2022 at 11:32 PM Conor Dooley <conor@kernel.org> wrote:
> >
> > On Fri, Oct 21, 2022 at 11:05:40PM +0100, Lad, Prabhakar wrote:
> > > Hi Rob,
> > >
> > > Thank you for the review.
> > >
> > > On Fri, Oct 21, 2022 at 3:05 AM Rob Herring <robh@kernel.org> wrote:
> > > >
> > > > On Wed, Oct 19, 2022 at 11:02:42PM +0100, Prabhakar wrote:
> > > > > From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> > > > >
> > > > > On the AX45MP core, cache coherency is a specification option so it may
> > > > > not be supported. In this case DMA will fail. As a workaround, firstly we
> > > > > allocate a global dma coherent pool from which DMA allocations are taken
> > > > > and marked as non-cacheable + bufferable using the PMA region as specified
> > > > > in the device tree. Synchronization callbacks are implemented to
> > > > > synchronize when doing DMA transactions.
> > > > >
> > > > > The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> > > > > block that allows dynamic adjustment of memory attributes in the runtime.
> > > > > It contains a configurable amount of PMA entries implemented as CSR
> > > > > registers to control the attributes of memory locations in interest.
> > > > >
> > > > > Below are the memory attributes supported:
> > > > > * Device, Non-bufferable
> > > > > * Device, bufferable
> > > > > * Memory, Non-cacheable, Non-bufferable
> > > > > * Memory, Non-cacheable, Bufferable
> > > > > * Memory, Write-back, No-allocate
> > > > > * Memory, Write-back, Read-allocate
> > > > > * Memory, Write-back, Write-allocate
> > > > > * Memory, Write-back, Read and Write-allocate
> > > > >
> > > > > This patch adds support to configure the memory attributes of the memory
> > > > > regions as passed from the l2 cache node and exposes the cache management
> > > > > ops.
> > > > >
> > > > > More info about PMA (section 10.3):
> > > > > http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
> > > > >
> > > > > This feature is based on the work posted [0] by Vincent Chen
> > > > > <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
> > > > >
> > > > > [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
> > > > >
> > > > > Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> > > > > ---
> > > > >  arch/riscv/include/asm/cacheflush.h    |   8 +
> > > > >  arch/riscv/include/asm/errata_list.h   |   2 +
> > > > >  arch/riscv/mm/dma-noncoherent.c        |  20 ++
> > > > >  drivers/soc/renesas/Kconfig            |   5 +
> > > > >  drivers/soc/renesas/Makefile           |   4 +
> > > > >  drivers/soc/renesas/rzf/Kconfig        |   6 +
> > > > >  drivers/soc/renesas/rzf/Makefile       |   3 +
> > > > >  drivers/soc/renesas/rzf/ax45mp_cache.c | 431 +++++++++++++++++++++++++
> > > >
> > > > How many cache drivers do we have around now? I've seen a few bindings
> > > > go by. I'm guessing it is time to stop putting the drivers in the
> > > > drivers/soc/ dumping ground.
> > > >
> > > The main reason this driver is not in arch/riscv is that it has vendor
> > > specific extensions. Due to this reason it was agreed during the LPC
> > > that vendor specific extension should be maintained by SoC vendors and
> > > was agreed that this can go into drivers/soc/renesas folder instead.
> >
> > Does not in drivers/soc mean they need to go into arch/riscv?
> I was under the impression Rob wanted them arch/riscv, sorry for the confusion.
> 
> > The outcome of the chat at the LPC BoF was more that the cache drivers
> > themselves should not be be routed via the arch maintainers, no?
> >
> Indeed.
> 
> > >
> > > > >  drivers/soc/renesas/rzf/ax45mp_sbi.h   |  29 ++
> > > > >  9 files changed, 508 insertions(+)
> > > > >  create mode 100644 drivers/soc/renesas/rzf/Kconfig
> > > > >  create mode 100644 drivers/soc/renesas/rzf/Makefile
> > > > >  create mode 100644 drivers/soc/renesas/rzf/ax45mp_cache.c
> > > > >  create mode 100644 drivers/soc/renesas/rzf/ax45mp_sbi.h
> > > > >
> > > > > diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
> > > > > index 8a5c246b0a21..40aa790be9a3 100644
> > > > > --- a/arch/riscv/include/asm/cacheflush.h
> > > > > +++ b/arch/riscv/include/asm/cacheflush.h
> > > > > @@ -65,6 +65,14 @@ static inline void riscv_noncoherent_supported(void) {}
> > > > >  #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
> > > > >  #define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
> > > > >
> > > > > +#ifdef CONFIG_AX45MP_L2_CACHE
> > > > > +void ax45mp_cpu_dma_inval_range(void *vaddr, size_t end);
> > > > > +void ax45mp_cpu_dma_wb_range(void *vaddr, size_t end);
> > > > > +
> > > > > +#define ALT_CMO_OP(_op, _start, _size, _cachesize)   \
> > > > > +                _op(_start, _size)
> > > > > +#endif
> > > > > +
> > > > >  #include <asm-generic/cacheflush.h>
> > > > >
> > > > >  #endif /* _ASM_RISCV_CACHEFLUSH_H */
> > > > > diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
> > > > > index 19a771085781..d9cbf60c3b65 100644
> > > > > --- a/arch/riscv/include/asm/errata_list.h
> > > > > +++ b/arch/riscv/include/asm/errata_list.h
> > > > > @@ -89,6 +89,7 @@ asm volatile(ALTERNATIVE(                                           \
> > > > >  #define ALT_THEAD_PMA(_val)
> > > > >  #endif
> > > > >
> > > > > +#ifdef CONFIG_ERRATA_THEAD_CMO
> > > > >  /*
> > > > >   * dcache.ipa rs1 (invalidate, physical address)
> > > > >   * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
> > > > > @@ -143,5 +144,6 @@ asm volatile(ALTERNATIVE_2(                                               \
> > > > >       : "a0")
> > > > >
> > > > >  #endif /* __ASSEMBLY__ */
> > > > > +#endif
> > > > >
> > > > >  #endif
> > > > > diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
> > > > > index b0add983530a..5270acca6766 100644
> > > > > --- a/arch/riscv/mm/dma-noncoherent.c
> > > > > +++ b/arch/riscv/mm/dma-noncoherent.c
> > > > > @@ -24,13 +24,25 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
> > > > >
> > > > >       switch (dir) {
> > > > >       case DMA_TO_DEVICE:
> > > > > +#ifdef CONFIG_ERRATA_THEAD_CMO
> > > > >               ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
> > > > > +#elif CONFIG_AX45MP_L2_CACHE
> > > > > +             ALT_CMO_OP(ax45mp_cpu_dma_wb_range, vaddr, size, 0x0);
> > > > > +#endif
> > > >
> > > > How do you support more than one platform in a build?
> > > >
> > > Yes, that's one concern which I have mentioned in the cover letter too
> > > (At that moment it's just a single platform). Suggestions welcome!
> >
> > I think I said it on one of the earlier version, but it needs to be
> > implemented w/ runtime patching via alternatives just like the thead
> > stuff patches in their functions.
> >
> I'm a bit stumped with alternatives() usage.
> 
> Currently I am just replacing the ALT_CMO_OP() macro if
> CONFIG_AX45MP_L2_CACHE is enabled. For AX45MP currently we have two
> exported functions ax45mp_cpu_dma_inval_range/ax45mp_cpu_dma_wb_range.
> If I switch to
> ALTERNATIVE() macro usage then I'll have to use the assembly version
> of the above two mentioned functions?

The overarching goal should always be the unified-kernel-image.
So hardware-specific compile-time #ifeefs are normally a no-no :-) .

So yes, it most likely should be assembly-based, and you'll "just" need
to introduce an ALTERNATIVE_3 macro, similar to what ALTERNAITVE_2 does.

That is actually the really nice part of alternatives, that you can have as
many variants as you like.

Heiko
Geert Uytterhoeven Oct. 24, 2022, 2:22 p.m. UTC | #6
Hi Prabhakar,

(fixed Palmer's address)

On Thu, Oct 20, 2022 at 12:02 AM Prabhakar <prabhakar.csengg@gmail.com> wrote:
> From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
>
> On the AX45MP core, cache coherency is a specification option so it may
> not be supported. In this case DMA will fail. As a workaround, firstly we
> allocate a global dma coherent pool from which DMA allocations are taken
> and marked as non-cacheable + bufferable using the PMA region as specified
> in the device tree. Synchronization callbacks are implemented to
> synchronize when doing DMA transactions.
>
> The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> block that allows dynamic adjustment of memory attributes in the runtime.
> It contains a configurable amount of PMA entries implemented as CSR
> registers to control the attributes of memory locations in interest.
>
> Below are the memory attributes supported:
> * Device, Non-bufferable
> * Device, bufferable
> * Memory, Non-cacheable, Non-bufferable
> * Memory, Non-cacheable, Bufferable
> * Memory, Write-back, No-allocate
> * Memory, Write-back, Read-allocate
> * Memory, Write-back, Write-allocate
> * Memory, Write-back, Read and Write-allocate
>
> This patch adds support to configure the memory attributes of the memory
> regions as passed from the l2 cache node and exposes the cache management
> ops.
>
> More info about PMA (section 10.3):
> http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
>
> This feature is based on the work posted [0] by Vincent Chen
> <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
>
> [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
>
> Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>

Thanks for your patch!

> --- a/drivers/soc/renesas/Kconfig
> +++ b/drivers/soc/renesas/Kconfig
> @@ -340,9 +340,14 @@ if RISCV
>  config ARCH_R9A07G043
>         bool "RISC-V Platform support for RZ/Five"
>         select ARCH_RZG2L
> +       select AX45MP_L2_CACHE
> +       select DMA_GLOBAL_POOL
> +       select RISCV_DMA_NONCOHERENT
>         help
>           This enables support for the Renesas RZ/Five SoC.
>
> +source "drivers/soc/renesas/rzf/Kconfig"

s/rzf/rzfive/? (or "rz5"? "rzv"?)

> +
>  endif # RISCV
>
>  config RST_RCAR
> diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
> index 535868c9c7e4..a20cc7ad5b12 100644
> --- a/drivers/soc/renesas/Makefile
> +++ b/drivers/soc/renesas/Makefile
> @@ -31,6 +31,10 @@ ifdef CONFIG_SMP
>  obj-$(CONFIG_ARCH_R9A06G032)   += r9a06g032-smp.o
>  endif
>
> +ifdef CONFIG_RISCV
> +obj-y += rzf/
> +endif

obj-$(CONFIG_RISCV)

> --- /dev/null
> +++ b/drivers/soc/renesas/rzf/Kconfig
> @@ -0,0 +1,6 @@
> +# SPDX-License-Identifier: GPL-2.0
> +
> +config AX45MP_L2_CACHE
> +       bool "AX45MP L2 Cache controller"

Andes Technology ...

> +       help
> +         Support for the L2 cache controller on AX45MP platforms.

... Andes Technology ...

> --- /dev/null
> +++ b/drivers/soc/renesas/rzf/ax45mp_cache.c
> @@ -0,0 +1,431 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * PMA setup and non-coherent cache functions for AX45MP
> + *
> + * Copyright (C) 2022 Renesas Electronics Corp.
> + */
> +
> +#include <linux/cacheflush.h>
> +#include <linux/cacheinfo.h>
> +#include <linux/of_address.h>
> +#include <linux/of_platform.h>
> +
> +#include <asm/sbi.h>
> +
> +#include "ax45mp_sbi.h"
> +
> +/* L2 cache registers */
> +#define AX45MP_L2C_REG_CTL_OFFSET              0x8
> +#define AX45MP_L2C_IPREPETCH_OFF               3
> +#define AX45MP_L2C_DPREPETCH_OFF               5
> +#define AX45MP_L2C_IPREPETCH_MSK               (3 << AX45MP_L2C_IPREPETCH_OFF)
> +#define AX45MP_L2C_DPREPETCH_MSK               (3 << AX45MP_L2C_DPREPETCH_OFF)

#define AX45MP_L2C_IPREPETCH    GENMASK(4, 3)
etc., and then you can use the FIELD_PREP() macros.

> +#define AX45MP_L2C_TRAMOCTL_OFF                        8
> +#define AX45MP_L2C_TRAMICTL_OFF                        10
> +#define AX45MP_L2C_TRAMOCTL_MSK                        (3 << AX45MP_L2C_TRAMOCTL_OFF)
> +#define AX45MP_L2C_TRAMICTL_MSK                        BIT(AX45MP_L2C_TRAMICTL_OFF)
> +#define AX45MP_L2C_DRAMOCTL_OFF                        11
> +#define AX45MP_L2C_DRAMICTL_OFF                        13
> +#define AX45MP_L2C_DRAMOCTL_MSK                        (3 << AX45MP_L2C_DRAMOCTL_OFF)
> +#define AX45MP_L2C_DRAMICTL_MSK                        BIT(AX45MP_L2C_DRAMICTL_OFF)

> +
> +#define AX45MP_MAX_CACHE_LINE_SIZE             256
> +
> +#define AX45MP_MAX_PMA_REGIONS                 16
> +
> +struct ax45mp_priv {
> +       void __iomem *l2c_base;
> +       unsigned int ax45mp_cache_line_size;
> +       bool l2cache_enabled;
> +       bool ucctl_ok;
> +};
> +
> +static struct ax45mp_priv *ax45mp_priv;
> +static DEFINE_STATIC_KEY_FALSE(ax45mp_l2c_configured);
> +
> +/* PMA setup */
> +static long ax45mp_sbi_set_pma(unsigned long start,
> +                              unsigned long size,
> +                              unsigned long flags,
> +                              unsigned int entry_id)
> +{
> +       struct sbiret ret;
> +
> +       ret = sbi_ecall(SBI_EXT_ANDES, AX45MP_SBI_EXT_SET_PMA,
> +                       start, start + size, size, entry_id,
> +                       flags, 0);

Fits on two lines.

> +
> +       return ret.value;
> +}
> +
> +static int ax45mp_configure_pma_regions(struct device_node *np)
> +{
> +       const char *propname = "andestech,pma-regions";
> +       u64 start, size, flags;
> +       unsigned int entry_id;
> +       unsigned int i;
> +       int count;
> +       int ret;
> +
> +       count = of_property_count_elems_of_size(np, propname,
> +                                               sizeof(u32) * 6);

Fits on a single line.

> +static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void)
> +{
> +       return readl((void *)(ax45mp_priv->l2c_base + AX45MP_L2C_REG_STATUS_OFFSET));

Why the cast to "(void *)"?

> +}
> +
> +static inline uint32_t ax45mp_cpu_l2c_ctl_status(void)
> +{
> +       return readl((void *)(ax45mp_priv->l2c_base + AX45MP_L2C_REG_CTL_OFFSET));

Likewise.

> +}

> +static void ax45mp_cpu_dcache_wb_range(unsigned long start,
> +                                      unsigned long end,
> +                                      int line_size)
> +{
> +       void __iomem *base = ax45mp_priv->l2c_base;
> +       unsigned long pa;
> +       int mhartid = 0;
> +#ifdef CONFIG_SMP
> +       mhartid = smp_processor_id();
> +#endif
> +
> +       while (end > start) {
> +               if (ax45mp_priv->ucctl_ok) {
> +                       csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
> +                       csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, AX45MP_CCTL_L1D_VA_WB);
> +               }
> +
> +               if (ax45mp_priv->l2cache_enabled) {
> +                       pa = virt_to_phys((void *)start);

Looks like start and end should be "void *" instead of " unsigned long",
as they are virtual addresses. See also below...

> +                       writel(pa, (void *)(base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid)));
> +                       writel(AX45MP_CCTL_L2_PA_WB,
> +                              (void *)(base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid)));

Why the casts to "(void *)"?

> +                       while ((ax45mp_cpu_l2c_get_cctl_status() &
> +                               AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
> +                               AX45MP_CCTL_L2_STATUS_IDLE)
> +                               ;
> +               }
> +
> +               start += line_size;
> +       }
> +}
> +
> +static void ax45mp_cpu_dcache_inval_range(unsigned long start,
> +                                         unsigned long end,
> +                                         int line_size)
> +{
> +       void __iomem *base = ax45mp_priv->l2c_base;
> +       unsigned long pa;
> +       int mhartid = 0;
> +#ifdef CONFIG_SMP
> +       mhartid = smp_processor_id();
> +#endif
> +
> +       while (end > start) {
> +               if (ax45mp_priv->ucctl_ok) {
> +                       csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
> +                       csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, AX45MP_CCTL_L1D_VA_INVAL);
> +               }
> +
> +               if (ax45mp_priv->l2cache_enabled) {
> +                       pa = virt_to_phys((void *)start);

Looks like start and end should be "void *" instead of " unsigned long",
as they are virtual addresses. See also below...

> +                       writel(pa, (void *)(base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid)));
> +                       writel(AX45MP_CCTL_L2_PA_INVAL,
> +                              (void *)(base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid)));
> +                       while ((ax45mp_cpu_l2c_get_cctl_status() &
> +                               AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
> +                               AX45MP_CCTL_L2_STATUS_IDLE)
> +                               ;
> +               }
> +
> +               start += line_size;
> +       }
> +}
> +
> +void ax45mp_cpu_dma_inval_range(void *vaddr, size_t size)
> +{
> +       char cache_buf[2][AX45MP_MAX_CACHE_LINE_SIZE] = { 0 };

AX45MP_MAX_CACHE_LINE_SIZE = 256, so 512 bytes of data on the stack,
auto-initialized by memset().

Please remove the { 0 }, ...

> +       unsigned long start = (unsigned long)vaddr;
> +       unsigned long end = start + size;
> +       unsigned long old_start = start;
> +       unsigned long old_end = end;
> +       unsigned long line_size;
> +       unsigned long flags;
> +
> +       if (static_branch_unlikely(&ax45mp_l2c_configured) && !ax45mp_priv)
> +               return;
> +
> +       if (unlikely(start == end))
> +               return;
> +
> +       line_size = ax45mp_priv->ax45mp_cache_line_size;

... and call memset() here, so the buffer is not initialized when unused.
Perhaps use two buffers, so you can easily memset() only the part that is
used?

> +
> +       start = start & (~(line_size - 1));
> +       end = ((end + line_size - 1) & (~(line_size - 1)));

These are the only calculations that need to use "unsigned long"
instead of "void *", but you can use PTR_ALIGN_DOWN() and PTR_ALIGN()
to avoid explicit casts.

> +
> +       local_irq_save(flags);
> +       if (unlikely(start != old_start))
> +               memcpy(&cache_buf[0][0], (void *)start, line_size);
> +
> +       if (unlikely(end != old_end))
> +               memcpy(&cache_buf[1][0], (void *)(old_end & (~(line_size - 1))), line_size);

PTR_ALIGN_DOWN()

> +
> +       ax45mp_cpu_dcache_inval_range(start, end, line_size);
> +
> +       if (unlikely(start != old_start))
> +               memcpy((void *)start, &cache_buf[0][0], (old_start & (line_size - 1)));
> +
> +       if (unlikely(end != old_end))
> +               memcpy((void *)(old_end + 1),
> +                      &cache_buf[1][(old_end & (line_size - 1)) + 1],
> +                      end - old_end - 1);
> +
> +       local_irq_restore(flags);
> +}
> +EXPORT_SYMBOL(ax45mp_cpu_dma_inval_range);
> +
> +void ax45mp_cpu_dma_wb_range(void *vaddr, size_t size)
> +{
> +       unsigned long start = (unsigned long)vaddr;
> +       unsigned long end = start + size;
> +       unsigned long line_size;
> +       unsigned long flags;
> +
> +       if (static_branch_unlikely(&ax45mp_l2c_configured) && !ax45mp_priv)
> +               return;
> +
> +       line_size = ax45mp_priv->ax45mp_cache_line_size;
> +       local_irq_save(flags);
> +       start = start & (~(line_size - 1));

PTR_ALIGN_DOWN() etc...

> +       ax45mp_cpu_dcache_wb_range(start, end, line_size);
> +       local_irq_restore(flags);
> +}
> +EXPORT_SYMBOL(ax45mp_cpu_dma_wb_range);
> +
> +static int ax45mp_configure_l2_cache(struct device_node *np)
> +{
> +       u8 ram_ctl[2];
> +       u32 cache_ctl;
> +       u32 prefetch;
> +       int ret;
> +
> +       cache_ctl = ax45mp_cpu_l2c_ctl_status();
> +
> +       /* Instruction and data fetch prefetch depth */
> +       ret = of_property_read_u32(np, "andestech,inst-prefetch", &prefetch);
> +       if (!ret) {
> +               cache_ctl &= ~AX45MP_L2C_IPREPETCH_MSK;
> +               cache_ctl |= (prefetch << AX45MP_L2C_IPREPETCH_OFF);

FIELD_PREP(), also below

> +       }
> +
> +       ret = of_property_read_u32(np, "andestech,data-prefetch", &prefetch);
> +       if (!ret) {
> +               cache_ctl &= ~AX45MP_L2C_DPREPETCH_MSK;
> +               cache_ctl |= (prefetch << AX45MP_L2C_DPREPETCH_OFF);

prefect / 2

> +       }
> +
> +       /* tag RAM and data RAM setup and output cycle */
> +       ret = of_property_read_u8_array(np, "andestech,tag-ram-ctl", ram_ctl, 2);
> +       if (!ret) {
> +               cache_ctl &= ~(AX45MP_L2C_TRAMOCTL_MSK | AX45MP_L2C_TRAMICTL_MSK);
> +               cache_ctl |= ram_ctl[0] << AX45MP_L2C_TRAMOCTL_OFF;
> +               cache_ctl |= ram_ctl[1] << AX45MP_L2C_TRAMICTL_OFF;
> +       }
> +
> +       ret = of_property_read_u8_array(np, "andestech,data-ram-ctl", ram_ctl, 2);
> +       if (!ret) {
> +               cache_ctl &= ~(AX45MP_L2C_DRAMOCTL_MSK | AX45MP_L2C_DRAMICTL_MSK);
> +               cache_ctl |= ram_ctl[0] << AX45MP_L2C_DRAMOCTL_OFF;
> +               cache_ctl |= ram_ctl[1] << AX45MP_L2C_DRAMICTL_OFF;
> +       }
> +
> +       writel(cache_ctl, ax45mp_priv->l2c_base + AX45MP_L2C_REG_CTL_OFFSET);
> +
> +       ret = of_property_read_u32(np, "cache-line-size", &ax45mp_priv->ax45mp_cache_line_size);

According to the bindings, this must be 64?

> +       if (ret) {
> +               pr_err("Failed to get cache-line-size defaulting to 64 bytes\n");
> +               ax45mp_priv->ax45mp_cache_line_size = SZ_64;
> +       }
> +
> +       ax45mp_priv->ucctl_ok = ax45mp_cpu_cache_controlable();
> +       ax45mp_priv->l2cache_enabled = ax45mp_cpu_l2c_ctl_status() & AX45MP_L2_CACHE_CTL_CEN_MASK;
> +
> +       return 0;
> +}

Gr{oetje,eeting}s,

                        Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
                                -- Linus Torvalds
Lad, Prabhakar Oct. 25, 2022, 11:07 p.m. UTC | #7
Hi Geert,

Thank you for the review.

On Mon, Oct 24, 2022 at 3:22 PM Geert Uytterhoeven <geert@linux-m68k.org> wrote:
>
> Hi Prabhakar,
>
> (fixed Palmer's address)
>
> On Thu, Oct 20, 2022 at 12:02 AM Prabhakar <prabhakar.csengg@gmail.com> wrote:
> > From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> >
> > On the AX45MP core, cache coherency is a specification option so it may
> > not be supported. In this case DMA will fail. As a workaround, firstly we
> > allocate a global dma coherent pool from which DMA allocations are taken
> > and marked as non-cacheable + bufferable using the PMA region as specified
> > in the device tree. Synchronization callbacks are implemented to
> > synchronize when doing DMA transactions.
> >
> > The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> > block that allows dynamic adjustment of memory attributes in the runtime.
> > It contains a configurable amount of PMA entries implemented as CSR
> > registers to control the attributes of memory locations in interest.
> >
> > Below are the memory attributes supported:
> > * Device, Non-bufferable
> > * Device, bufferable
> > * Memory, Non-cacheable, Non-bufferable
> > * Memory, Non-cacheable, Bufferable
> > * Memory, Write-back, No-allocate
> > * Memory, Write-back, Read-allocate
> > * Memory, Write-back, Write-allocate
> > * Memory, Write-back, Read and Write-allocate
> >
> > This patch adds support to configure the memory attributes of the memory
> > regions as passed from the l2 cache node and exposes the cache management
> > ops.
> >
> > More info about PMA (section 10.3):
> > http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
> >
> > This feature is based on the work posted [0] by Vincent Chen
> > <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
> >
> > [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
> >
> > Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
>
> Thanks for your patch!
>
> > --- a/drivers/soc/renesas/Kconfig
> > +++ b/drivers/soc/renesas/Kconfig
> > @@ -340,9 +340,14 @@ if RISCV
> >  config ARCH_R9A07G043
> >         bool "RISC-V Platform support for RZ/Five"
> >         select ARCH_RZG2L
> > +       select AX45MP_L2_CACHE
> > +       select DMA_GLOBAL_POOL
> > +       select RISCV_DMA_NONCOHERENT
> >         help
> >           This enables support for the Renesas RZ/Five SoC.
> >
> > +source "drivers/soc/renesas/rzf/Kconfig"
>
> s/rzf/rzfive/? (or "rz5"? "rzv"?)
>
OK, I'll rename it to rzfive instead.

> > +
> >  endif # RISCV
> >
> >  config RST_RCAR
> > diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
> > index 535868c9c7e4..a20cc7ad5b12 100644
> > --- a/drivers/soc/renesas/Makefile
> > +++ b/drivers/soc/renesas/Makefile
> > @@ -31,6 +31,10 @@ ifdef CONFIG_SMP
> >  obj-$(CONFIG_ARCH_R9A06G032)   += r9a06g032-smp.o
> >  endif
> >
> > +ifdef CONFIG_RISCV
> > +obj-y += rzf/
> > +endif
>
> obj-$(CONFIG_RISCV)
>
Agreed.

> > --- /dev/null
> > +++ b/drivers/soc/renesas/rzf/Kconfig
> > @@ -0,0 +1,6 @@
> > +# SPDX-License-Identifier: GPL-2.0
> > +
> > +config AX45MP_L2_CACHE
> > +       bool "AX45MP L2 Cache controller"
>
> Andes Technology ...
>
OK, "Andes Technology AX45MP L2 Cache controller"

> > +       help
> > +         Support for the L2 cache controller on AX45MP platforms.
>
> ... Andes Technology ...
>
OK.

> > --- /dev/null
> > +++ b/drivers/soc/renesas/rzf/ax45mp_cache.c
> > @@ -0,0 +1,431 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/*
> > + * PMA setup and non-coherent cache functions for AX45MP
> > + *
> > + * Copyright (C) 2022 Renesas Electronics Corp.
> > + */
> > +
> > +#include <linux/cacheflush.h>
> > +#include <linux/cacheinfo.h>
> > +#include <linux/of_address.h>
> > +#include <linux/of_platform.h>
> > +
> > +#include <asm/sbi.h>
> > +
> > +#include "ax45mp_sbi.h"
> > +
> > +/* L2 cache registers */
> > +#define AX45MP_L2C_REG_CTL_OFFSET              0x8
> > +#define AX45MP_L2C_IPREPETCH_OFF               3
> > +#define AX45MP_L2C_DPREPETCH_OFF               5
> > +#define AX45MP_L2C_IPREPETCH_MSK               (3 << AX45MP_L2C_IPREPETCH_OFF)
> > +#define AX45MP_L2C_DPREPETCH_MSK               (3 << AX45MP_L2C_DPREPETCH_OFF)
>
> #define AX45MP_L2C_IPREPETCH    GENMASK(4, 3)
> etc., and then you can use the FIELD_PREP() macros.
>
Agreed, now that we have decided to drop the setup of l2c controls the
above macros can be dropped.

> > +#define AX45MP_L2C_TRAMOCTL_OFF                        8
> > +#define AX45MP_L2C_TRAMICTL_OFF                        10
> > +#define AX45MP_L2C_TRAMOCTL_MSK                        (3 << AX45MP_L2C_TRAMOCTL_OFF)
> > +#define AX45MP_L2C_TRAMICTL_MSK                        BIT(AX45MP_L2C_TRAMICTL_OFF)
> > +#define AX45MP_L2C_DRAMOCTL_OFF                        11
> > +#define AX45MP_L2C_DRAMICTL_OFF                        13
> > +#define AX45MP_L2C_DRAMOCTL_MSK                        (3 << AX45MP_L2C_DRAMOCTL_OFF)
> > +#define AX45MP_L2C_DRAMICTL_MSK                        BIT(AX45MP_L2C_DRAMICTL_OFF)
>
> > +
> > +#define AX45MP_MAX_CACHE_LINE_SIZE             256
> > +
> > +#define AX45MP_MAX_PMA_REGIONS                 16
> > +
> > +struct ax45mp_priv {
> > +       void __iomem *l2c_base;
> > +       unsigned int ax45mp_cache_line_size;
> > +       bool l2cache_enabled;
> > +       bool ucctl_ok;
> > +};
> > +
> > +static struct ax45mp_priv *ax45mp_priv;
> > +static DEFINE_STATIC_KEY_FALSE(ax45mp_l2c_configured);
> > +
> > +/* PMA setup */
> > +static long ax45mp_sbi_set_pma(unsigned long start,
> > +                              unsigned long size,
> > +                              unsigned long flags,
> > +                              unsigned int entry_id)
> > +{
> > +       struct sbiret ret;
> > +
> > +       ret = sbi_ecall(SBI_EXT_ANDES, AX45MP_SBI_EXT_SET_PMA,
> > +                       start, start + size, size, entry_id,
> > +                       flags, 0);
>
> Fits on two lines.
>
OK.

On a second look the "start + size" arg can be dropped as this can be
calculated in OpenSBI, so I'll fix it in OpenSBI and here too.

> > +
> > +       return ret.value;
> > +}
> > +
> > +static int ax45mp_configure_pma_regions(struct device_node *np)
> > +{
> > +       const char *propname = "andestech,pma-regions";
> > +       u64 start, size, flags;
> > +       unsigned int entry_id;
> > +       unsigned int i;
> > +       int count;
> > +       int ret;
> > +
> > +       count = of_property_count_elems_of_size(np, propname,
> > +                                               sizeof(u32) * 6);
>
> Fits on a single line.
>
OK.

> > +static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void)
> > +{
> > +       return readl((void *)(ax45mp_priv->l2c_base + AX45MP_L2C_REG_STATUS_OFFSET));
>
> Why the cast to "(void *)"?
>
Can be dropped.

> > +}
> > +
> > +static inline uint32_t ax45mp_cpu_l2c_ctl_status(void)
> > +{
> > +       return readl((void *)(ax45mp_priv->l2c_base + AX45MP_L2C_REG_CTL_OFFSET));
>
> Likewise.
>
Ditto.

> > +}
>
> > +static void ax45mp_cpu_dcache_wb_range(unsigned long start,
> > +                                      unsigned long end,
> > +                                      int line_size)
> > +{
> > +       void __iomem *base = ax45mp_priv->l2c_base;
> > +       unsigned long pa;
> > +       int mhartid = 0;
> > +#ifdef CONFIG_SMP
> > +       mhartid = smp_processor_id();
> > +#endif
> > +
> > +       while (end > start) {
> > +               if (ax45mp_priv->ucctl_ok) {
> > +                       csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
> > +                       csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, AX45MP_CCTL_L1D_VA_WB);
> > +               }
> > +
> > +               if (ax45mp_priv->l2cache_enabled) {
> > +                       pa = virt_to_phys((void *)start);
>
> Looks like start and end should be "void *" instead of " unsigned long",
> as they are virtual addresses. See also below...
>
OK.

> > +                       writel(pa, (void *)(base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid)));
> > +                       writel(AX45MP_CCTL_L2_PA_WB,
> > +                              (void *)(base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid)));
>
> Why the casts to "(void *)"?
>
Can be dropped.

> > +                       while ((ax45mp_cpu_l2c_get_cctl_status() &
> > +                               AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
> > +                               AX45MP_CCTL_L2_STATUS_IDLE)
> > +                               ;
> > +               }
> > +
> > +               start += line_size;
> > +       }
> > +}
> > +
> > +static void ax45mp_cpu_dcache_inval_range(unsigned long start,
> > +                                         unsigned long end,
> > +                                         int line_size)
> > +{
> > +       void __iomem *base = ax45mp_priv->l2c_base;
> > +       unsigned long pa;
> > +       int mhartid = 0;
> > +#ifdef CONFIG_SMP
> > +       mhartid = smp_processor_id();
> > +#endif
> > +
> > +       while (end > start) {
> > +               if (ax45mp_priv->ucctl_ok) {
> > +                       csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
> > +                       csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, AX45MP_CCTL_L1D_VA_INVAL);
> > +               }
> > +
> > +               if (ax45mp_priv->l2cache_enabled) {
> > +                       pa = virt_to_phys((void *)start);
>
> Looks like start and end should be "void *" instead of " unsigned long",
> as they are virtual addresses. See also below...
>
OK.

> > +                       writel(pa, (void *)(base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid)));
> > +                       writel(AX45MP_CCTL_L2_PA_INVAL,
> > +                              (void *)(base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid)));
> > +                       while ((ax45mp_cpu_l2c_get_cctl_status() &
> > +                               AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
> > +                               AX45MP_CCTL_L2_STATUS_IDLE)
> > +                               ;
> > +               }
> > +
> > +               start += line_size;
> > +       }
> > +}
> > +
> > +void ax45mp_cpu_dma_inval_range(void *vaddr, size_t size)
> > +{
> > +       char cache_buf[2][AX45MP_MAX_CACHE_LINE_SIZE] = { 0 };
>
> AX45MP_MAX_CACHE_LINE_SIZE = 256, so 512 bytes of data on the stack,
> auto-initialized by memset().
>
> Please remove the { 0 }, ...
>
Ok will do.

> > +       unsigned long start = (unsigned long)vaddr;
> > +       unsigned long end = start + size;
> > +       unsigned long old_start = start;
> > +       unsigned long old_end = end;
> > +       unsigned long line_size;
> > +       unsigned long flags;
> > +
> > +       if (static_branch_unlikely(&ax45mp_l2c_configured) && !ax45mp_priv)
> > +               return;
> > +
> > +       if (unlikely(start == end))
> > +               return;
> > +
> > +       line_size = ax45mp_priv->ax45mp_cache_line_size;
>
> ... and call memset() here, so the buffer is not initialized when unused.
> Perhaps use two buffers, so you can easily memset() only the part that is
> used?
>
OK.

> > +
> > +       start = start & (~(line_size - 1));
> > +       end = ((end + line_size - 1) & (~(line_size - 1)));
>
> These are the only calculations that need to use "unsigned long"
> instead of "void *", but you can use PTR_ALIGN_DOWN() and PTR_ALIGN()
> to avoid explicit casts.
>
Good point.

> > +
> > +       local_irq_save(flags);
> > +       if (unlikely(start != old_start))
> > +               memcpy(&cache_buf[0][0], (void *)start, line_size);
> > +
> > +       if (unlikely(end != old_end))
> > +               memcpy(&cache_buf[1][0], (void *)(old_end & (~(line_size - 1))), line_size);
>
> PTR_ALIGN_DOWN()
>
OK.

> > +
> > +       ax45mp_cpu_dcache_inval_range(start, end, line_size);
> > +
> > +       if (unlikely(start != old_start))
> > +               memcpy((void *)start, &cache_buf[0][0], (old_start & (line_size - 1)));
> > +
> > +       if (unlikely(end != old_end))
> > +               memcpy((void *)(old_end + 1),
> > +                      &cache_buf[1][(old_end & (line_size - 1)) + 1],
> > +                      end - old_end - 1);
> > +
> > +       local_irq_restore(flags);
> > +}
> > +EXPORT_SYMBOL(ax45mp_cpu_dma_inval_range);
> > +
> > +void ax45mp_cpu_dma_wb_range(void *vaddr, size_t size)
> > +{
> > +       unsigned long start = (unsigned long)vaddr;
> > +       unsigned long end = start + size;
> > +       unsigned long line_size;
> > +       unsigned long flags;
> > +
> > +       if (static_branch_unlikely(&ax45mp_l2c_configured) && !ax45mp_priv)
> > +               return;
> > +
> > +       line_size = ax45mp_priv->ax45mp_cache_line_size;
> > +       local_irq_save(flags);
> > +       start = start & (~(line_size - 1));
>
> PTR_ALIGN_DOWN() etc...
>
OK.

> > +       ax45mp_cpu_dcache_wb_range(start, end, line_size);
> > +       local_irq_restore(flags);
> > +}
> > +EXPORT_SYMBOL(ax45mp_cpu_dma_wb_range);
> > +
> > +static int ax45mp_configure_l2_cache(struct device_node *np)
> > +{
> > +       u8 ram_ctl[2];
> > +       u32 cache_ctl;
> > +       u32 prefetch;
> > +       int ret;
> > +
> > +       cache_ctl = ax45mp_cpu_l2c_ctl_status();
> > +
> > +       /* Instruction and data fetch prefetch depth */
> > +       ret = of_property_read_u32(np, "andestech,inst-prefetch", &prefetch);
> > +       if (!ret) {
> > +               cache_ctl &= ~AX45MP_L2C_IPREPETCH_MSK;
> > +               cache_ctl |= (prefetch << AX45MP_L2C_IPREPETCH_OFF);
>
> FIELD_PREP(), also below
>
This entire function will be dropped now...

> > +       }
> > +
> > +       ret = of_property_read_u32(np, "andestech,data-prefetch", &prefetch);
> > +       if (!ret) {
> > +               cache_ctl &= ~AX45MP_L2C_DPREPETCH_MSK;
> > +               cache_ctl |= (prefetch << AX45MP_L2C_DPREPETCH_OFF);
>
> prefect / 2
>
> > +       }
> > +
> > +       /* tag RAM and data RAM setup and output cycle */
> > +       ret = of_property_read_u8_array(np, "andestech,tag-ram-ctl", ram_ctl, 2);
> > +       if (!ret) {
> > +               cache_ctl &= ~(AX45MP_L2C_TRAMOCTL_MSK | AX45MP_L2C_TRAMICTL_MSK);
> > +               cache_ctl |= ram_ctl[0] << AX45MP_L2C_TRAMOCTL_OFF;
> > +               cache_ctl |= ram_ctl[1] << AX45MP_L2C_TRAMICTL_OFF;
> > +       }
> > +
> > +       ret = of_property_read_u8_array(np, "andestech,data-ram-ctl", ram_ctl, 2);
> > +       if (!ret) {
> > +               cache_ctl &= ~(AX45MP_L2C_DRAMOCTL_MSK | AX45MP_L2C_DRAMICTL_MSK);
> > +               cache_ctl |= ram_ctl[0] << AX45MP_L2C_DRAMOCTL_OFF;
> > +               cache_ctl |= ram_ctl[1] << AX45MP_L2C_DRAMICTL_OFF;
> > +       }
> > +
> > +       writel(cache_ctl, ax45mp_priv->l2c_base + AX45MP_L2C_REG_CTL_OFFSET);
> > +
> > +       ret = of_property_read_u32(np, "cache-line-size", &ax45mp_priv->ax45mp_cache_line_size);
>
> According to the bindings, this must be 64?
>
Agreed, I'll add a check to make sure it's always 64.

Cheers,
Prabhakar
Lad, Prabhakar Oct. 25, 2022, 11:21 p.m. UTC | #8
Hi Heiko,

On Mon, Oct 24, 2022 at 1:04 PM Heiko Stübner <heiko@sntech.de> wrote:
>
> Hi Prabhakar,
>
> Am Montag, 24. Oktober 2022, 13:55:00 CEST schrieb Lad, Prabhakar:
> > Hi Conor,
> >
> > On Fri, Oct 21, 2022 at 11:32 PM Conor Dooley <conor@kernel.org> wrote:
> > >
> > > On Fri, Oct 21, 2022 at 11:05:40PM +0100, Lad, Prabhakar wrote:
> > > > Hi Rob,
> > > >
> > > > Thank you for the review.
> > > >
> > > > On Fri, Oct 21, 2022 at 3:05 AM Rob Herring <robh@kernel.org> wrote:
> > > > >
> > > > > On Wed, Oct 19, 2022 at 11:02:42PM +0100, Prabhakar wrote:
> > > > > > From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> > > > > >
> > > > > > On the AX45MP core, cache coherency is a specification option so it may
> > > > > > not be supported. In this case DMA will fail. As a workaround, firstly we
> > > > > > allocate a global dma coherent pool from which DMA allocations are taken
> > > > > > and marked as non-cacheable + bufferable using the PMA region as specified
> > > > > > in the device tree. Synchronization callbacks are implemented to
> > > > > > synchronize when doing DMA transactions.
> > > > > >
> > > > > > The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> > > > > > block that allows dynamic adjustment of memory attributes in the runtime.
> > > > > > It contains a configurable amount of PMA entries implemented as CSR
> > > > > > registers to control the attributes of memory locations in interest.
> > > > > >
> > > > > > Below are the memory attributes supported:
> > > > > > * Device, Non-bufferable
> > > > > > * Device, bufferable
> > > > > > * Memory, Non-cacheable, Non-bufferable
> > > > > > * Memory, Non-cacheable, Bufferable
> > > > > > * Memory, Write-back, No-allocate
> > > > > > * Memory, Write-back, Read-allocate
> > > > > > * Memory, Write-back, Write-allocate
> > > > > > * Memory, Write-back, Read and Write-allocate
> > > > > >
> > > > > > This patch adds support to configure the memory attributes of the memory
> > > > > > regions as passed from the l2 cache node and exposes the cache management
> > > > > > ops.
> > > > > >
> > > > > > More info about PMA (section 10.3):
> > > > > > http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
> > > > > >
> > > > > > This feature is based on the work posted [0] by Vincent Chen
> > > > > > <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
> > > > > >
> > > > > > [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
> > > > > >
> > > > > > Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> > > > > > ---
> > > > > >  arch/riscv/include/asm/cacheflush.h    |   8 +
> > > > > >  arch/riscv/include/asm/errata_list.h   |   2 +
> > > > > >  arch/riscv/mm/dma-noncoherent.c        |  20 ++
> > > > > >  drivers/soc/renesas/Kconfig            |   5 +
> > > > > >  drivers/soc/renesas/Makefile           |   4 +
> > > > > >  drivers/soc/renesas/rzf/Kconfig        |   6 +
> > > > > >  drivers/soc/renesas/rzf/Makefile       |   3 +
> > > > > >  drivers/soc/renesas/rzf/ax45mp_cache.c | 431 +++++++++++++++++++++++++
> > > > >
> > > > > How many cache drivers do we have around now? I've seen a few bindings
> > > > > go by. I'm guessing it is time to stop putting the drivers in the
> > > > > drivers/soc/ dumping ground.
> > > > >
> > > > The main reason this driver is not in arch/riscv is that it has vendor
> > > > specific extensions. Due to this reason it was agreed during the LPC
> > > > that vendor specific extension should be maintained by SoC vendors and
> > > > was agreed that this can go into drivers/soc/renesas folder instead.
> > >
> > > Does not in drivers/soc mean they need to go into arch/riscv?
> > I was under the impression Rob wanted them arch/riscv, sorry for the confusion.
> >
> > > The outcome of the chat at the LPC BoF was more that the cache drivers
> > > themselves should not be be routed via the arch maintainers, no?
> > >
> > Indeed.
> >
> > > >
> > > > > >  drivers/soc/renesas/rzf/ax45mp_sbi.h   |  29 ++
> > > > > >  9 files changed, 508 insertions(+)
> > > > > >  create mode 100644 drivers/soc/renesas/rzf/Kconfig
> > > > > >  create mode 100644 drivers/soc/renesas/rzf/Makefile
> > > > > >  create mode 100644 drivers/soc/renesas/rzf/ax45mp_cache.c
> > > > > >  create mode 100644 drivers/soc/renesas/rzf/ax45mp_sbi.h
> > > > > >
> > > > > > diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
> > > > > > index 8a5c246b0a21..40aa790be9a3 100644
> > > > > > --- a/arch/riscv/include/asm/cacheflush.h
> > > > > > +++ b/arch/riscv/include/asm/cacheflush.h
> > > > > > @@ -65,6 +65,14 @@ static inline void riscv_noncoherent_supported(void) {}
> > > > > >  #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
> > > > > >  #define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
> > > > > >
> > > > > > +#ifdef CONFIG_AX45MP_L2_CACHE
> > > > > > +void ax45mp_cpu_dma_inval_range(void *vaddr, size_t end);
> > > > > > +void ax45mp_cpu_dma_wb_range(void *vaddr, size_t end);
> > > > > > +
> > > > > > +#define ALT_CMO_OP(_op, _start, _size, _cachesize)   \
> > > > > > +                _op(_start, _size)
> > > > > > +#endif
> > > > > > +
> > > > > >  #include <asm-generic/cacheflush.h>
> > > > > >
> > > > > >  #endif /* _ASM_RISCV_CACHEFLUSH_H */
> > > > > > diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
> > > > > > index 19a771085781..d9cbf60c3b65 100644
> > > > > > --- a/arch/riscv/include/asm/errata_list.h
> > > > > > +++ b/arch/riscv/include/asm/errata_list.h
> > > > > > @@ -89,6 +89,7 @@ asm volatile(ALTERNATIVE(                                           \
> > > > > >  #define ALT_THEAD_PMA(_val)
> > > > > >  #endif
> > > > > >
> > > > > > +#ifdef CONFIG_ERRATA_THEAD_CMO
> > > > > >  /*
> > > > > >   * dcache.ipa rs1 (invalidate, physical address)
> > > > > >   * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
> > > > > > @@ -143,5 +144,6 @@ asm volatile(ALTERNATIVE_2(                                               \
> > > > > >       : "a0")
> > > > > >
> > > > > >  #endif /* __ASSEMBLY__ */
> > > > > > +#endif
> > > > > >
> > > > > >  #endif
> > > > > > diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
> > > > > > index b0add983530a..5270acca6766 100644
> > > > > > --- a/arch/riscv/mm/dma-noncoherent.c
> > > > > > +++ b/arch/riscv/mm/dma-noncoherent.c
> > > > > > @@ -24,13 +24,25 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
> > > > > >
> > > > > >       switch (dir) {
> > > > > >       case DMA_TO_DEVICE:
> > > > > > +#ifdef CONFIG_ERRATA_THEAD_CMO
> > > > > >               ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
> > > > > > +#elif CONFIG_AX45MP_L2_CACHE
> > > > > > +             ALT_CMO_OP(ax45mp_cpu_dma_wb_range, vaddr, size, 0x0);
> > > > > > +#endif
> > > > >
> > > > > How do you support more than one platform in a build?
> > > > >
> > > > Yes, that's one concern which I have mentioned in the cover letter too
> > > > (At that moment it's just a single platform). Suggestions welcome!
> > >
> > > I think I said it on one of the earlier version, but it needs to be
> > > implemented w/ runtime patching via alternatives just like the thead
> > > stuff patches in their functions.
> > >
> > I'm a bit stumped with alternatives() usage.
> >
> > Currently I am just replacing the ALT_CMO_OP() macro if
> > CONFIG_AX45MP_L2_CACHE is enabled. For AX45MP currently we have two
> > exported functions ax45mp_cpu_dma_inval_range/ax45mp_cpu_dma_wb_range.
> > If I switch to
> > ALTERNATIVE() macro usage then I'll have to use the assembly version
> > of the above two mentioned functions?
>
> The overarching goal should always be the unified-kernel-image.
> So hardware-specific compile-time #ifeefs are normally a no-no :-) .
>
> So yes, it most likely should be assembly-based, and you'll "just" need
> to introduce an ALTERNATIVE_3 macro, similar to what ALTERNAITVE_2 does.
>
> That is actually the really nice part of alternatives, that you can have as
> many variants as you like.
>
Thank you for the pointer. I'm still going through the ALTERNATIVE()
macro implementation, do you think "call  <c_function>" would be an
acceptable approach (I haven't implemented/nor tested)? Or is it that
my understanding is completely invalid?

Cheers,
Prabhakar
Geert Uytterhoeven Nov. 1, 2022, 12:42 p.m. UTC | #9
Hi Prabhakar,

On Thu, Oct 20, 2022 at 12:02 AM Prabhakar <prabhakar.csengg@gmail.com> wrote:
> From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
>
> On the AX45MP core, cache coherency is a specification option so it may
> not be supported. In this case DMA will fail. As a workaround, firstly we
> allocate a global dma coherent pool from which DMA allocations are taken
> and marked as non-cacheable + bufferable using the PMA region as specified
> in the device tree. Synchronization callbacks are implemented to
> synchronize when doing DMA transactions.
>
> The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> block that allows dynamic adjustment of memory attributes in the runtime.
> It contains a configurable amount of PMA entries implemented as CSR
> registers to control the attributes of memory locations in interest.
>
> Below are the memory attributes supported:
> * Device, Non-bufferable
> * Device, bufferable
> * Memory, Non-cacheable, Non-bufferable
> * Memory, Non-cacheable, Bufferable
> * Memory, Write-back, No-allocate
> * Memory, Write-back, Read-allocate
> * Memory, Write-back, Write-allocate
> * Memory, Write-back, Read and Write-allocate
>
> This patch adds support to configure the memory attributes of the memory
> regions as passed from the l2 cache node and exposes the cache management
> ops.
>
> More info about PMA (section 10.3):
> http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
>
> This feature is based on the work posted [0] by Vincent Chen
> <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
>
> [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
>
> Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>

Thanks for your patch!

> --- a/arch/riscv/mm/dma-noncoherent.c
> +++ b/arch/riscv/mm/dma-noncoherent.c
> @@ -24,13 +24,25 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
>
>         switch (dir) {
>         case DMA_TO_DEVICE:
> +#ifdef CONFIG_ERRATA_THEAD_CMO
>                 ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
> +#elif CONFIG_AX45MP_L2_CACHE

"#elif defined(CONFIG_AX45MP_L2_CACHE)" (everywhere)

Else it may fail with:

    error: "CONFIG_AX45MP_L2_CACHE" is not defined, evaluates to 0
[-Werror=undef]


> +               ALT_CMO_OP(ax45mp_cpu_dma_wb_range, vaddr, size, 0x0);
> +#endif

Gr{oetje,eeting}s,

                        Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
                                -- Linus Torvalds
Geert Uytterhoeven Nov. 1, 2022, 1:38 p.m. UTC | #10
Hi Prabhakar,

On Thu, Oct 20, 2022 at 12:02 AM Prabhakar <prabhakar.csengg@gmail.com> wrote:
> From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
>
> On the AX45MP core, cache coherency is a specification option so it may
> not be supported. In this case DMA will fail. As a workaround, firstly we
> allocate a global dma coherent pool from which DMA allocations are taken
> and marked as non-cacheable + bufferable using the PMA region as specified
> in the device tree. Synchronization callbacks are implemented to
> synchronize when doing DMA transactions.
>
> The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> block that allows dynamic adjustment of memory attributes in the runtime.
> It contains a configurable amount of PMA entries implemented as CSR
> registers to control the attributes of memory locations in interest.
>
> Below are the memory attributes supported:
> * Device, Non-bufferable
> * Device, bufferable
> * Memory, Non-cacheable, Non-bufferable
> * Memory, Non-cacheable, Bufferable
> * Memory, Write-back, No-allocate
> * Memory, Write-back, Read-allocate
> * Memory, Write-back, Write-allocate
> * Memory, Write-back, Read and Write-allocate
>
> This patch adds support to configure the memory attributes of the memory
> regions as passed from the l2 cache node and exposes the cache management
> ops.
>
> More info about PMA (section 10.3):
> http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
>
> This feature is based on the work posted [0] by Vincent Chen
> <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
>
> [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
>
> Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>

Thanks for your patch!

> --- a/arch/riscv/include/asm/errata_list.h
> +++ b/arch/riscv/include/asm/errata_list.h
> @@ -89,6 +89,7 @@ asm volatile(ALTERNATIVE(                                             \
>  #define ALT_THEAD_PMA(_val)
>  #endif
>
> +#ifdef CONFIG_ERRATA_THEAD_CMO
>  /*
>   * dcache.ipa rs1 (invalidate, physical address)
>   * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
> @@ -143,5 +144,6 @@ asm volatile(ALTERNATIVE_2(                                         \
>         : "a0")
>
>  #endif /* __ASSEMBLY__ */
> +#endif

FTR, the new #endif should be above the old #endif.

I noticed because after rebasing on top of commit 65e9fb081877a18c
("drivers/perf: riscv_pmu_sbi: add support for PMU variant on T-Head
C9xx cores") in riscv/for-next, the build failed because the new
ALT_SBI_PMU_OVERFLOW() definition ended up inside both #endifs,
instead of between.

Gr{oetje,eeting}s,

                        Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
                                -- Linus Torvalds
Lad, Prabhakar Nov. 2, 2022, 12:59 a.m. UTC | #11
Hi Geert,

On Tue, Nov 1, 2022 at 1:38 PM Geert Uytterhoeven <geert@linux-m68k.org> wrote:
>
> Hi Prabhakar,
>
> On Thu, Oct 20, 2022 at 12:02 AM Prabhakar <prabhakar.csengg@gmail.com> wrote:
> > From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> >
> > On the AX45MP core, cache coherency is a specification option so it may
> > not be supported. In this case DMA will fail. As a workaround, firstly we
> > allocate a global dma coherent pool from which DMA allocations are taken
> > and marked as non-cacheable + bufferable using the PMA region as specified
> > in the device tree. Synchronization callbacks are implemented to
> > synchronize when doing DMA transactions.
> >
> > The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> > block that allows dynamic adjustment of memory attributes in the runtime.
> > It contains a configurable amount of PMA entries implemented as CSR
> > registers to control the attributes of memory locations in interest.
> >
> > Below are the memory attributes supported:
> > * Device, Non-bufferable
> > * Device, bufferable
> > * Memory, Non-cacheable, Non-bufferable
> > * Memory, Non-cacheable, Bufferable
> > * Memory, Write-back, No-allocate
> > * Memory, Write-back, Read-allocate
> > * Memory, Write-back, Write-allocate
> > * Memory, Write-back, Read and Write-allocate
> >
> > This patch adds support to configure the memory attributes of the memory
> > regions as passed from the l2 cache node and exposes the cache management
> > ops.
> >
> > More info about PMA (section 10.3):
> > http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
> >
> > This feature is based on the work posted [0] by Vincent Chen
> > <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
> >
> > [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
> >
> > Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
>
> Thanks for your patch!
>
> > --- a/arch/riscv/include/asm/errata_list.h
> > +++ b/arch/riscv/include/asm/errata_list.h
> > @@ -89,6 +89,7 @@ asm volatile(ALTERNATIVE(                                             \
> >  #define ALT_THEAD_PMA(_val)
> >  #endif
> >
> > +#ifdef CONFIG_ERRATA_THEAD_CMO
> >  /*
> >   * dcache.ipa rs1 (invalidate, physical address)
> >   * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
> > @@ -143,5 +144,6 @@ asm volatile(ALTERNATIVE_2(                                         \
> >         : "a0")
> >
> >  #endif /* __ASSEMBLY__ */
> > +#endif
>
> FTR, the new #endif should be above the old #endif.
>
> I noticed because after rebasing on top of commit 65e9fb081877a18c
> ("drivers/perf: riscv_pmu_sbi: add support for PMU variant on T-Head
> C9xx cores") in riscv/for-next, the build failed because the new
> ALT_SBI_PMU_OVERFLOW() definition ended up inside both #endifs,
> instead of between.
>
Thanks for pointing this out.

Cheers,
Prabhakar
Lad, Prabhakar Nov. 2, 2022, 1:02 a.m. UTC | #12
Hi Geert,

On Tue, Nov 1, 2022 at 12:43 PM Geert Uytterhoeven <geert@linux-m68k.org> wrote:
>
> Hi Prabhakar,
>
> On Thu, Oct 20, 2022 at 12:02 AM Prabhakar <prabhakar.csengg@gmail.com> wrote:
> > From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> >
> > On the AX45MP core, cache coherency is a specification option so it may
> > not be supported. In this case DMA will fail. As a workaround, firstly we
> > allocate a global dma coherent pool from which DMA allocations are taken
> > and marked as non-cacheable + bufferable using the PMA region as specified
> > in the device tree. Synchronization callbacks are implemented to
> > synchronize when doing DMA transactions.
> >
> > The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> > block that allows dynamic adjustment of memory attributes in the runtime.
> > It contains a configurable amount of PMA entries implemented as CSR
> > registers to control the attributes of memory locations in interest.
> >
> > Below are the memory attributes supported:
> > * Device, Non-bufferable
> > * Device, bufferable
> > * Memory, Non-cacheable, Non-bufferable
> > * Memory, Non-cacheable, Bufferable
> > * Memory, Write-back, No-allocate
> > * Memory, Write-back, Read-allocate
> > * Memory, Write-back, Write-allocate
> > * Memory, Write-back, Read and Write-allocate
> >
> > This patch adds support to configure the memory attributes of the memory
> > regions as passed from the l2 cache node and exposes the cache management
> > ops.
> >
> > More info about PMA (section 10.3):
> > http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
> >
> > This feature is based on the work posted [0] by Vincent Chen
> > <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
> >
> > [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
> >
> > Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
>
> Thanks for your patch!
>
> > --- a/arch/riscv/mm/dma-noncoherent.c
> > +++ b/arch/riscv/mm/dma-noncoherent.c
> > @@ -24,13 +24,25 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
> >
> >         switch (dir) {
> >         case DMA_TO_DEVICE:
> > +#ifdef CONFIG_ERRATA_THEAD_CMO
> >                 ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
> > +#elif CONFIG_AX45MP_L2_CACHE
>
> "#elif defined(CONFIG_AX45MP_L2_CACHE)" (everywhere)
>
> Else it may fail with:
>
>     error: "CONFIG_AX45MP_L2_CACHE" is not defined, evaluates to 0
> [-Werror=undef]
>
Agreed, thanks for pointing this out. Said that I plan to get rid of
these checks in the next version (only after getting around the
ALTERNATIVE() macro).

Cheers,
Prabhakar
Rob Herring (Arm) Nov. 3, 2022, 3:20 a.m. UTC | #13
On Fri, Oct 21, 2022 at 11:32:01PM +0100, Conor Dooley wrote:
> On Fri, Oct 21, 2022 at 11:05:40PM +0100, Lad, Prabhakar wrote:
> > Hi Rob,
> > 
> > Thank you for the review.
> > 
> > On Fri, Oct 21, 2022 at 3:05 AM Rob Herring <robh@kernel.org> wrote:
> > >
> > > On Wed, Oct 19, 2022 at 11:02:42PM +0100, Prabhakar wrote:
> > > > From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> > > >
> > > > On the AX45MP core, cache coherency is a specification option so it may
> > > > not be supported. In this case DMA will fail. As a workaround, firstly we
> > > > allocate a global dma coherent pool from which DMA allocations are taken
> > > > and marked as non-cacheable + bufferable using the PMA region as specified
> > > > in the device tree. Synchronization callbacks are implemented to
> > > > synchronize when doing DMA transactions.
> > > >
> > > > The Andes AX45MP core has a Programmable Physical Memory Attributes (PMA)
> > > > block that allows dynamic adjustment of memory attributes in the runtime.
> > > > It contains a configurable amount of PMA entries implemented as CSR
> > > > registers to control the attributes of memory locations in interest.
> > > >
> > > > Below are the memory attributes supported:
> > > > * Device, Non-bufferable
> > > > * Device, bufferable
> > > > * Memory, Non-cacheable, Non-bufferable
> > > > * Memory, Non-cacheable, Bufferable
> > > > * Memory, Write-back, No-allocate
> > > > * Memory, Write-back, Read-allocate
> > > > * Memory, Write-back, Write-allocate
> > > > * Memory, Write-back, Read and Write-allocate
> > > >
> > > > This patch adds support to configure the memory attributes of the memory
> > > > regions as passed from the l2 cache node and exposes the cache management
> > > > ops.
> > > >
> > > > More info about PMA (section 10.3):
> > > > http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
> > > >
> > > > This feature is based on the work posted [0] by Vincent Chen
> > > > <vincentc@andestech.com> for the Andes AndeStart RISC-V CPU.
> > > >
> > > > [0] https://lore.kernel.org/lkml/1540982130-28248-1-git-send-email-vincentc@andestech.com/
> > > >
> > > > Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
> > > > ---
> > > >  arch/riscv/include/asm/cacheflush.h    |   8 +
> > > >  arch/riscv/include/asm/errata_list.h   |   2 +
> > > >  arch/riscv/mm/dma-noncoherent.c        |  20 ++
> > > >  drivers/soc/renesas/Kconfig            |   5 +
> > > >  drivers/soc/renesas/Makefile           |   4 +
> > > >  drivers/soc/renesas/rzf/Kconfig        |   6 +
> > > >  drivers/soc/renesas/rzf/Makefile       |   3 +
> > > >  drivers/soc/renesas/rzf/ax45mp_cache.c | 431 +++++++++++++++++++++++++
> > >
> > > How many cache drivers do we have around now? I've seen a few bindings
> > > go by. I'm guessing it is time to stop putting the drivers in the
> > > drivers/soc/ dumping ground.
> > >
> > The main reason this driver is not in arch/riscv is that it has vendor
> > specific extensions. Due to this reason it was agreed during the LPC
> > that vendor specific extension should be maintained by SoC vendors and
> > was agreed that this can go into drivers/soc/renesas folder instead.
> 
> Does not in drivers/soc mean they need to go into arch/riscv?
> The outcome of the chat at the LPC BoF was more that the cache drivers
> themselves should not be be routed via the arch maintainers, no?

drivers/cache/ or something is what I'm suggesting starting. The first 
thing is probably making an inventory of how many we already have.

Rob
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 8a5c246b0a21..40aa790be9a3 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -65,6 +65,14 @@  static inline void riscv_noncoherent_supported(void) {}
 #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
 #define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
 
+#ifdef CONFIG_AX45MP_L2_CACHE
+void ax45mp_cpu_dma_inval_range(void *vaddr, size_t end);
+void ax45mp_cpu_dma_wb_range(void *vaddr, size_t end);
+
+#define ALT_CMO_OP(_op, _start, _size, _cachesize)	\
+		   _op(_start, _size)
+#endif
+
 #include <asm-generic/cacheflush.h>
 
 #endif /* _ASM_RISCV_CACHEFLUSH_H */
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 19a771085781..d9cbf60c3b65 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -89,6 +89,7 @@  asm volatile(ALTERNATIVE(						\
 #define ALT_THEAD_PMA(_val)
 #endif
 
+#ifdef CONFIG_ERRATA_THEAD_CMO
 /*
  * dcache.ipa rs1 (invalidate, physical address)
  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
@@ -143,5 +144,6 @@  asm volatile(ALTERNATIVE_2(						\
 	: "a0")
 
 #endif /* __ASSEMBLY__ */
+#endif
 
 #endif
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
index b0add983530a..5270acca6766 100644
--- a/arch/riscv/mm/dma-noncoherent.c
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -24,13 +24,25 @@  void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
 
 	switch (dir) {
 	case DMA_TO_DEVICE:
+#ifdef CONFIG_ERRATA_THEAD_CMO
 		ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
+#elif CONFIG_AX45MP_L2_CACHE
+		ALT_CMO_OP(ax45mp_cpu_dma_wb_range, vaddr, size, 0x0);
+#endif
 		break;
 	case DMA_FROM_DEVICE:
+#ifdef CONFIG_ERRATA_THEAD_CMO
 		ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
+#elif CONFIG_AX45MP_L2_CACHE
+		ALT_CMO_OP(ax45mp_cpu_dma_inval_range, vaddr, size, 0x0);
+#endif
 		break;
 	case DMA_BIDIRECTIONAL:
+#ifdef CONFIG_ERRATA_THEAD_CMO
 		ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
+#elif CONFIG_AX45MP_L2_CACHE
+		ALT_CMO_OP(ax45mp_cpu_dma_wb_range, vaddr, size, 0x0);
+#endif
 		break;
 	default:
 		break;
@@ -47,7 +59,11 @@  void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 		break;
 	case DMA_FROM_DEVICE:
 	case DMA_BIDIRECTIONAL:
+#ifdef CONFIG_ERRATA_THEAD_CMO
 		ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
+#elif CONFIG_AX45MP_L2_CACHE
+		ALT_CMO_OP(ax45mp_cpu_dma_inval_range, vaddr, size, 0x0);
+#endif
 		break;
 	default:
 		break;
@@ -56,14 +72,17 @@  void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 
 void arch_dma_prep_coherent(struct page *page, size_t size)
 {
+#ifdef CONFIG_ERRATA_THEAD_CMO
 	void *flush_addr = page_address(page);
 
 	ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
+#endif
 }
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 		const struct iommu_ops *iommu, bool coherent)
 {
+#ifdef CONFIG_ERRATA_THEAD_CMO
 	WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
 		   TAINT_CPU_OUT_OF_SPEC,
 		   "%s %s: ARCH_DMA_MINALIGN smaller than riscv,cbom-block-size (%d < %d)",
@@ -75,6 +94,7 @@  void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 		   dev_driver_string(dev), dev_name(dev));
 
 	dev->dma_coherent = coherent;
+#endif
 }
 
 #ifdef CONFIG_RISCV_ISA_ZICBOM
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 660498252ec5..ba2981eaeb34 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -340,9 +340,14 @@  if RISCV
 config ARCH_R9A07G043
 	bool "RISC-V Platform support for RZ/Five"
 	select ARCH_RZG2L
+	select AX45MP_L2_CACHE
+	select DMA_GLOBAL_POOL
+	select RISCV_DMA_NONCOHERENT
 	help
 	  This enables support for the Renesas RZ/Five SoC.
 
+source "drivers/soc/renesas/rzf/Kconfig"
+
 endif # RISCV
 
 config RST_RCAR
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 535868c9c7e4..a20cc7ad5b12 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -31,6 +31,10 @@  ifdef CONFIG_SMP
 obj-$(CONFIG_ARCH_R9A06G032)	+= r9a06g032-smp.o
 endif
 
+ifdef CONFIG_RISCV
+obj-y += rzf/
+endif
+
 # Family
 obj-$(CONFIG_RST_RCAR)		+= rcar-rst.o
 obj-$(CONFIG_SYSC_RCAR)		+= rcar-sysc.o
diff --git a/drivers/soc/renesas/rzf/Kconfig b/drivers/soc/renesas/rzf/Kconfig
new file mode 100644
index 000000000000..1e8198da3ba7
--- /dev/null
+++ b/drivers/soc/renesas/rzf/Kconfig
@@ -0,0 +1,6 @@ 
+# SPDX-License-Identifier: GPL-2.0
+
+config AX45MP_L2_CACHE
+	bool "AX45MP L2 Cache controller"
+	help
+	  Support for the L2 cache controller on AX45MP platforms.
diff --git a/drivers/soc/renesas/rzf/Makefile b/drivers/soc/renesas/rzf/Makefile
new file mode 100644
index 000000000000..2012e7fb978d
--- /dev/null
+++ b/drivers/soc/renesas/rzf/Makefile
@@ -0,0 +1,3 @@ 
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
diff --git a/drivers/soc/renesas/rzf/ax45mp_cache.c b/drivers/soc/renesas/rzf/ax45mp_cache.c
new file mode 100644
index 000000000000..2a1b82fc68d1
--- /dev/null
+++ b/drivers/soc/renesas/rzf/ax45mp_cache.c
@@ -0,0 +1,431 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PMA setup and non-coherent cache functions for AX45MP
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+
+#include <linux/cacheflush.h>
+#include <linux/cacheinfo.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <asm/sbi.h>
+
+#include "ax45mp_sbi.h"
+
+/* L2 cache registers */
+#define AX45MP_L2C_REG_CTL_OFFSET		0x8
+#define AX45MP_L2C_IPREPETCH_OFF		3
+#define AX45MP_L2C_DPREPETCH_OFF		5
+#define AX45MP_L2C_IPREPETCH_MSK		(3 << AX45MP_L2C_IPREPETCH_OFF)
+#define AX45MP_L2C_DPREPETCH_MSK		(3 << AX45MP_L2C_DPREPETCH_OFF)
+#define AX45MP_L2C_TRAMOCTL_OFF			8
+#define AX45MP_L2C_TRAMICTL_OFF			10
+#define AX45MP_L2C_TRAMOCTL_MSK			(3 << AX45MP_L2C_TRAMOCTL_OFF)
+#define AX45MP_L2C_TRAMICTL_MSK			BIT(AX45MP_L2C_TRAMICTL_OFF)
+#define AX45MP_L2C_DRAMOCTL_OFF			11
+#define AX45MP_L2C_DRAMICTL_OFF			13
+#define AX45MP_L2C_DRAMOCTL_MSK			(3 << AX45MP_L2C_DRAMOCTL_OFF)
+#define AX45MP_L2C_DRAMICTL_MSK			BIT(AX45MP_L2C_DRAMICTL_OFF)
+
+#define AX45MP_L2C_REG_C0_CMD_OFFSET		0x40
+#define AX45MP_L2C_REG_C0_ACC_OFFSET		0x48
+#define AX45MP_L2C_REG_STATUS_OFFSET		0x80
+
+/* D-cache operation */
+#define AX45MP_CCTL_L1D_VA_INVAL		0
+#define AX45MP_CCTL_L1D_VA_WB			1
+
+/* L2 cache */
+#define AX45MP_L2_CACHE_CTL_CEN_MASK		1
+
+/* L2 CCTL status */
+#define AX45MP_CCTL_L2_STATUS_IDLE		0
+
+/* L2 CCTL status cores mask */
+#define AX45MP_CCTL_L2_STATUS_C0_MASK		0xf
+
+/* L2 cache operation */
+#define AX45MP_CCTL_L2_PA_INVAL			0x8
+#define AX45MP_CCTL_L2_PA_WB			0x9
+
+#define AX45MP_L2C_HPM_PER_CORE_OFFSET		0x8
+#define AX45MP_L2C_REG_PER_CORE_OFFSET		0x10
+#define AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET	4
+
+#define AX45MP_L2C_REG_CN_CMD_OFFSET(n)	\
+	(AX45MP_L2C_REG_C0_CMD_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
+#define AX45MP_L2C_REG_CN_ACC_OFFSET(n)	\
+	(AX45MP_L2C_REG_C0_ACC_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
+#define AX45MP_CCTL_L2_STATUS_CN_MASK(n)	\
+	(AX45MP_CCTL_L2_STATUS_C0_MASK << ((n) * AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET))
+
+#define AX45MP_MICM_CFG_ISZ_OFFSET		6
+#define AX45MP_MICM_CFG_ISZ_MASK		(0x7  << AX45MP_MICM_CFG_ISZ_OFFSET)
+
+#define AX45MP_MDCM_CFG_DSZ_OFFSET		6
+#define AX45MP_MDCM_CFG_DSZ_MASK		(0x7  << AX45MP_MDCM_CFG_DSZ_OFFSET)
+
+#define AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM	0x80b
+#define AX45MP_CCTL_REG_UCCTLCOMMAND_NUM	0x80c
+
+#define AX45MP_MCACHE_CTL_CCTL_SUEN_OFFSET	8
+#define AX45MP_MMSC_CFG_CCTLCSR_OFFSET		16
+#define AX45MP_MISA_20_OFFSET			20
+
+#define AX45MP_MCACHE_CTL_CCTL_SUEN_MASK	(0x1 << AX45MP_MCACHE_CTL_CCTL_SUEN_OFFSET)
+#define AX45MP_MMSC_CFG_CCTLCSR_MASK		(0x1 << AX45MP_MMSC_CFG_CCTLCSR_OFFSET)
+#define AX45MP_MISA_20_MASK			(0x1 << AX45MP_MISA_20_OFFSET)
+
+#define AX45MP_MAX_CACHE_LINE_SIZE		256
+
+#define AX45MP_MAX_PMA_REGIONS			16
+
+struct ax45mp_priv {
+	void __iomem *l2c_base;
+	unsigned int ax45mp_cache_line_size;
+	bool l2cache_enabled;
+	bool ucctl_ok;
+};
+
+static struct ax45mp_priv *ax45mp_priv;
+static DEFINE_STATIC_KEY_FALSE(ax45mp_l2c_configured);
+
+/* PMA setup */
+static long ax45mp_sbi_set_pma(unsigned long start,
+			       unsigned long size,
+			       unsigned long flags,
+			       unsigned int entry_id)
+{
+	struct sbiret ret;
+
+	ret = sbi_ecall(SBI_EXT_ANDES, AX45MP_SBI_EXT_SET_PMA,
+			start, start + size, size, entry_id,
+			flags, 0);
+
+	return ret.value;
+}
+
+static int ax45mp_configure_pma_regions(struct device_node *np)
+{
+	const char *propname = "andestech,pma-regions";
+	u64 start, size, flags;
+	unsigned int entry_id;
+	unsigned int i;
+	int count;
+	int ret;
+
+	count = of_property_count_elems_of_size(np, propname,
+						sizeof(u32) * 6);
+	if (count <= 0)
+		return 0;
+
+	if (count > AX45MP_MAX_PMA_REGIONS)
+		return -EINVAL;
+
+	for (i = 0, entry_id = 0 ; entry_id < count ; i += 3, entry_id++) {
+		of_property_read_u64_index(np, propname, i, &start);
+		of_property_read_u64_index(np, propname, i + 1, &size);
+		of_property_read_u64_index(np, propname, i + 2, &flags);
+		ret = ax45mp_sbi_set_pma(start, size, flags, entry_id);
+		if (!ret)
+			pr_err("Failed to setup PMA region 0x%llx - 0x%llx",
+			       start, start + size);
+	}
+
+	return 0;
+}
+
+/* L2 Cache operations */
+static uint32_t ax45mp_cpu_get_mcache_ctl_status(void)
+{
+	struct sbiret ret;
+
+	ret = sbi_ecall(SBI_EXT_ANDES, AX45MP_SBI_EXT_GET_MCACHE_CTL_STATUS,
+			0, 0, 0, 0, 0, 0);
+	return ret.value;
+}
+
+static uint32_t ax45mp_cpu_get_micm_cfg_status(void)
+{
+	struct sbiret ret;
+
+	ret = sbi_ecall(SBI_EXT_ANDES, AX45MP_SBI_EXT_GET_MICM_CTL_STATUS,
+			0, 0, 0, 0, 0, 0);
+	return ret.value;
+}
+
+static uint32_t ax45mp_cpu_get_mdcm_cfg_status(void)
+{
+	struct sbiret ret;
+
+	ret = sbi_ecall(SBI_EXT_ANDES, AX45MP_SBI_EXT_GET_MDCM_CTL_STATUS,
+			0, 0, 0, 0, 0, 0);
+	return ret.value;
+}
+
+static uint32_t ax45mp_cpu_get_mmsc_cfg_status(void)
+{
+	struct sbiret ret;
+
+	ret = sbi_ecall(SBI_EXT_ANDES, AX45MP_SBI_EXT_GET_MMSC_CTL_STATUS,
+			0, 0, 0, 0, 0, 0);
+	return ret.value;
+}
+
+static uint32_t ax45mp_cpu_get_misa_cfg_status(void)
+{
+	struct sbiret ret;
+
+	ret = sbi_ecall(SBI_EXT_ANDES, AX45MP_SBI_EXT_GET_MISA_CTL_STATUS,
+			0, 0, 0, 0, 0, 0);
+	return ret.value;
+}
+
+static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void)
+{
+	return readl((void *)(ax45mp_priv->l2c_base + AX45MP_L2C_REG_STATUS_OFFSET));
+}
+
+static inline uint32_t ax45mp_cpu_l2c_ctl_status(void)
+{
+	return readl((void *)(ax45mp_priv->l2c_base + AX45MP_L2C_REG_CTL_OFFSET));
+}
+
+static bool ax45mp_cpu_cache_controlable(void)
+{
+	return (((ax45mp_cpu_get_micm_cfg_status() & AX45MP_MICM_CFG_ISZ_MASK) ||
+		 (ax45mp_cpu_get_mdcm_cfg_status() & AX45MP_MDCM_CFG_DSZ_MASK)) &&
+		(ax45mp_cpu_get_misa_cfg_status() & AX45MP_MISA_20_MASK) &&
+		(ax45mp_cpu_get_mmsc_cfg_status() & AX45MP_MMSC_CFG_CCTLCSR_MASK) &&
+		(ax45mp_cpu_get_mcache_ctl_status() & AX45MP_MCACHE_CTL_CCTL_SUEN_MASK));
+}
+
+static void ax45mp_cpu_dcache_wb_range(unsigned long start,
+				       unsigned long end,
+				       int line_size)
+{
+	void __iomem *base = ax45mp_priv->l2c_base;
+	unsigned long pa;
+	int mhartid = 0;
+#ifdef CONFIG_SMP
+	mhartid = smp_processor_id();
+#endif
+
+	while (end > start) {
+		if (ax45mp_priv->ucctl_ok) {
+			csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
+			csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, AX45MP_CCTL_L1D_VA_WB);
+		}
+
+		if (ax45mp_priv->l2cache_enabled) {
+			pa = virt_to_phys((void *)start);
+			writel(pa, (void *)(base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid)));
+			writel(AX45MP_CCTL_L2_PA_WB,
+			       (void *)(base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid)));
+			while ((ax45mp_cpu_l2c_get_cctl_status() &
+				AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
+				AX45MP_CCTL_L2_STATUS_IDLE)
+				;
+		}
+
+		start += line_size;
+	}
+}
+
+static void ax45mp_cpu_dcache_inval_range(unsigned long start,
+					  unsigned long end,
+					  int line_size)
+{
+	void __iomem *base = ax45mp_priv->l2c_base;
+	unsigned long pa;
+	int mhartid = 0;
+#ifdef CONFIG_SMP
+	mhartid = smp_processor_id();
+#endif
+
+	while (end > start) {
+		if (ax45mp_priv->ucctl_ok) {
+			csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
+			csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, AX45MP_CCTL_L1D_VA_INVAL);
+		}
+
+		if (ax45mp_priv->l2cache_enabled) {
+			pa = virt_to_phys((void *)start);
+			writel(pa, (void *)(base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid)));
+			writel(AX45MP_CCTL_L2_PA_INVAL,
+			       (void *)(base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid)));
+			while ((ax45mp_cpu_l2c_get_cctl_status() &
+				AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
+				AX45MP_CCTL_L2_STATUS_IDLE)
+				;
+		}
+
+		start += line_size;
+	}
+}
+
+void ax45mp_cpu_dma_inval_range(void *vaddr, size_t size)
+{
+	char cache_buf[2][AX45MP_MAX_CACHE_LINE_SIZE] = { 0 };
+	unsigned long start = (unsigned long)vaddr;
+	unsigned long end = start + size;
+	unsigned long old_start = start;
+	unsigned long old_end = end;
+	unsigned long line_size;
+	unsigned long flags;
+
+	if (static_branch_unlikely(&ax45mp_l2c_configured) && !ax45mp_priv)
+		return;
+
+	if (unlikely(start == end))
+		return;
+
+	line_size = ax45mp_priv->ax45mp_cache_line_size;
+
+	start = start & (~(line_size - 1));
+	end = ((end + line_size - 1) & (~(line_size - 1)));
+
+	local_irq_save(flags);
+	if (unlikely(start != old_start))
+		memcpy(&cache_buf[0][0], (void *)start, line_size);
+
+	if (unlikely(end != old_end))
+		memcpy(&cache_buf[1][0], (void *)(old_end & (~(line_size - 1))), line_size);
+
+	ax45mp_cpu_dcache_inval_range(start, end, line_size);
+
+	if (unlikely(start != old_start))
+		memcpy((void *)start, &cache_buf[0][0], (old_start & (line_size - 1)));
+
+	if (unlikely(end != old_end))
+		memcpy((void *)(old_end + 1),
+		       &cache_buf[1][(old_end & (line_size - 1)) + 1],
+		       end - old_end - 1);
+
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(ax45mp_cpu_dma_inval_range);
+
+void ax45mp_cpu_dma_wb_range(void *vaddr, size_t size)
+{
+	unsigned long start = (unsigned long)vaddr;
+	unsigned long end = start + size;
+	unsigned long line_size;
+	unsigned long flags;
+
+	if (static_branch_unlikely(&ax45mp_l2c_configured) && !ax45mp_priv)
+		return;
+
+	line_size = ax45mp_priv->ax45mp_cache_line_size;
+	local_irq_save(flags);
+	start = start & (~(line_size - 1));
+	ax45mp_cpu_dcache_wb_range(start, end, line_size);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(ax45mp_cpu_dma_wb_range);
+
+static int ax45mp_configure_l2_cache(struct device_node *np)
+{
+	u8 ram_ctl[2];
+	u32 cache_ctl;
+	u32 prefetch;
+	int ret;
+
+	cache_ctl = ax45mp_cpu_l2c_ctl_status();
+
+	/* Instruction and data fetch prefetch depth */
+	ret = of_property_read_u32(np, "andestech,inst-prefetch", &prefetch);
+	if (!ret) {
+		cache_ctl &= ~AX45MP_L2C_IPREPETCH_MSK;
+		cache_ctl |= (prefetch << AX45MP_L2C_IPREPETCH_OFF);
+	}
+
+	ret = of_property_read_u32(np, "andestech,data-prefetch", &prefetch);
+	if (!ret) {
+		cache_ctl &= ~AX45MP_L2C_DPREPETCH_MSK;
+		cache_ctl |= (prefetch << AX45MP_L2C_DPREPETCH_OFF);
+	}
+
+	/* tag RAM and data RAM setup and output cycle */
+	ret = of_property_read_u8_array(np, "andestech,tag-ram-ctl", ram_ctl, 2);
+	if (!ret) {
+		cache_ctl &= ~(AX45MP_L2C_TRAMOCTL_MSK | AX45MP_L2C_TRAMICTL_MSK);
+		cache_ctl |= ram_ctl[0] << AX45MP_L2C_TRAMOCTL_OFF;
+		cache_ctl |= ram_ctl[1] << AX45MP_L2C_TRAMICTL_OFF;
+	}
+
+	ret = of_property_read_u8_array(np, "andestech,data-ram-ctl", ram_ctl, 2);
+	if (!ret) {
+		cache_ctl &= ~(AX45MP_L2C_DRAMOCTL_MSK | AX45MP_L2C_DRAMICTL_MSK);
+		cache_ctl |= ram_ctl[0] << AX45MP_L2C_DRAMOCTL_OFF;
+		cache_ctl |= ram_ctl[1] << AX45MP_L2C_DRAMICTL_OFF;
+	}
+
+	writel(cache_ctl, ax45mp_priv->l2c_base + AX45MP_L2C_REG_CTL_OFFSET);
+
+	ret = of_property_read_u32(np, "cache-line-size", &ax45mp_priv->ax45mp_cache_line_size);
+	if (ret) {
+		pr_err("Failed to get cache-line-size defaulting to 64 bytes\n");
+		ax45mp_priv->ax45mp_cache_line_size = SZ_64;
+	}
+
+	ax45mp_priv->ucctl_ok = ax45mp_cpu_cache_controlable();
+	ax45mp_priv->l2cache_enabled = ax45mp_cpu_l2c_ctl_status() & AX45MP_L2_CACHE_CTL_CEN_MASK;
+
+	return 0;
+}
+
+static const struct of_device_id ax45mp_cache_ids[] = {
+	{ .compatible = "andestech,ax45mp-cache" },
+	{ /* sentinel */ }
+};
+
+static int ax45mp_l2c_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	int ret;
+
+	ax45mp_priv = devm_kzalloc(&pdev->dev, sizeof(*ax45mp_priv), GFP_KERNEL);
+	if (!ax45mp_priv)
+		return -ENOMEM;
+
+	ax45mp_priv->l2c_base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
+	if (!ax45mp_priv->l2c_base) {
+		ret = -ENOMEM;
+		goto l2c_err;
+	}
+
+	ret = ax45mp_configure_l2_cache(np);
+	if (ret)
+		goto l2c_err;
+
+	ret = ax45mp_configure_pma_regions(np);
+	if (ret)
+		goto l2c_err;
+
+	static_branch_disable(&ax45mp_l2c_configured);
+
+	return 0;
+
+l2c_err:
+	devm_kfree(&pdev->dev, ax45mp_priv);
+	ax45mp_priv = NULL;
+	return ret;
+}
+
+static struct platform_driver ax45mp_l2c_driver = {
+	.driver = {
+		.name = "ax45mp-l2c",
+		.of_match_table = ax45mp_cache_ids,
+	},
+	.probe = ax45mp_l2c_probe,
+};
+
+static int __init ax45mp_cache_init(void)
+{
+	static_branch_enable(&ax45mp_l2c_configured);
+	return platform_driver_register(&ax45mp_l2c_driver);
+}
+arch_initcall(ax45mp_cache_init);
diff --git a/drivers/soc/renesas/rzf/ax45mp_sbi.h b/drivers/soc/renesas/rzf/ax45mp_sbi.h
new file mode 100644
index 000000000000..1604874954d0
--- /dev/null
+++ b/drivers/soc/renesas/rzf/ax45mp_sbi.h
@@ -0,0 +1,29 @@ 
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __AX45MP_SBI_H
+#define __AX45MP_SBI_H
+
+#define SBI_EXT_ANDES		0x0900031E
+
+enum ax45mp_sbi_ext_fid {
+	AX45MP_SBI_EXT_GET_MCACHE_CTL_STATUS = 0,
+	AX45MP_SBI_EXT_GET_MMISC_CTL_STATUS,
+	AX45MP_SBI_EXT_SET_MCACHE_CTL,
+	AX45MP_SBI_EXT_SET_MMISC_CTL,
+	AX45MP_SBI_EXT_ICACHE_OP,
+	AX45MP_SBI_EXT_DCACHE_OP,
+	AX45MP_SBI_EXT_L1CACHE_I_PREFETCH,
+	AX45MP_SBI_EXT_L1CACHE_D_PREFETCH,
+	AX45MP_SBI_EXT_NON_BLOCKING_LOAD_STORE,
+	AX45MP_SBI_EXT_WRITE_AROUND,
+	AX45MP_SBI_EXT_SET_PMA,
+	AX45MP_SBI_EXT_FREE_PMA,
+	AX45MP_SBI_EXT_PROBE_PMA,
+	AX45MP_SBI_EXT_DCACHE_WBINVAL_ALL,
+	AX45MP_SBI_EXT_GET_MICM_CTL_STATUS,
+	AX45MP_SBI_EXT_GET_MDCM_CTL_STATUS,
+	AX45MP_SBI_EXT_GET_MMSC_CTL_STATUS,
+	AX45MP_SBI_EXT_GET_MISA_CTL_STATUS,
+};
+
+#endif