diff mbox series

[07/15] riscv: implement remote sfence.i using IPIs

Message ID 20191017173743.5430-8-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [01/15] riscv: cleanup <asm/bug.h> | expand

Commit Message

Christoph Hellwig Oct. 17, 2019, 5:37 p.m. UTC
The RISC-V ISA only supports flushing the instruction cache for the
local CPU core.  Currently we always offload the remote TLB flushing to
the SBI, which then issues an IPI under the hoods.  But with M-mode
we do not have an SBI so we have to do it ourselves.   IPI to the
other nodes using the existing kernel helpers instead if we have
native clint support and thus can IPI directly from the kernel.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/riscv/include/asm/sbi.h |  3 +++
 arch/riscv/mm/cacheflush.c   | 24 ++++++++++++++++++------
 2 files changed, 21 insertions(+), 6 deletions(-)

Comments

Anup Patel Oct. 18, 2019, 2:55 a.m. UTC | #1
On Thu, Oct 17, 2019 at 11:08 PM Christoph Hellwig <hch@lst.de> wrote:
>
> The RISC-V ISA only supports flushing the instruction cache for the
> local CPU core.  Currently we always offload the remote TLB flushing to
> the SBI, which then issues an IPI under the hoods.  But with M-mode
> we do not have an SBI so we have to do it ourselves.   IPI to the
> other nodes using the existing kernel helpers instead if we have
> native clint support and thus can IPI directly from the kernel.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  arch/riscv/include/asm/sbi.h |  3 +++
>  arch/riscv/mm/cacheflush.c   | 24 ++++++++++++++++++------
>  2 files changed, 21 insertions(+), 6 deletions(-)
>
> diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
> index b167af3e7470..0cb74eccc73f 100644
> --- a/arch/riscv/include/asm/sbi.h
> +++ b/arch/riscv/include/asm/sbi.h
> @@ -94,5 +94,8 @@ static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
>  {
>         SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
>  }
> +#else /* CONFIG_RISCV_SBI */
> +/* stub to for code is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */
> +void sbi_remote_fence_i(const unsigned long *hart_mask);
>  #endif /* CONFIG_RISCV_SBI */
>  #endif /* _ASM_RISCV_SBI_H */
> diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
> index 3f15938dec89..794c9ab256eb 100644
> --- a/arch/riscv/mm/cacheflush.c
> +++ b/arch/riscv/mm/cacheflush.c
> @@ -10,9 +10,17 @@
>
>  #include <asm/sbi.h>
>
> +static void ipi_remote_fence_i(void *info)
> +{
> +       return local_flush_icache_all();
> +}
> +
>  void flush_icache_all(void)
>  {
> -       sbi_remote_fence_i(NULL);
> +       if (IS_ENABLED(CONFIG_RISCV_SBI))
> +               sbi_remote_fence_i(NULL);
> +       else
> +               on_each_cpu(ipi_remote_fence_i, NULL, 1);
>  }
>
>  /*
> @@ -28,7 +36,7 @@ void flush_icache_all(void)
>  void flush_icache_mm(struct mm_struct *mm, bool local)
>  {
>         unsigned int cpu;
> -       cpumask_t others, hmask, *mask;
> +       cpumask_t others, *mask;
>
>         preempt_disable();
>
> @@ -46,10 +54,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
>          */
>         cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
>         local |= cpumask_empty(&others);
> -       if (mm != current->active_mm || !local) {
> -               riscv_cpuid_to_hartid_mask(&others, &hmask);
> -               sbi_remote_fence_i(hmask.bits);
> -       } else {
> +       if (mm == current->active_mm && local) {
>                 /*
>                  * It's assumed that at least one strongly ordered operation is
>                  * performed on this hart between setting a hart's cpumask bit
> @@ -59,6 +64,13 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
>                  * with flush_icache_deferred().
>                  */
>                 smp_mb();
> +       } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
> +               cpumask_t hartid_mask;
> +
> +               riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
> +               sbi_remote_fence_i(cpumask_bits(&hartid_mask));
> +       } else {
> +               on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
>         }
>
>         preempt_enable();
> --
> 2.20.1
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv

LGTM.

Reviewed-by: Anup Patel <anup@brainfault.org>

Regards,
Anup
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index b167af3e7470..0cb74eccc73f 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -94,5 +94,8 @@  static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
 {
 	SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
 }
+#else /* CONFIG_RISCV_SBI */
+/* stub to for code is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */
+void sbi_remote_fence_i(const unsigned long *hart_mask);
 #endif /* CONFIG_RISCV_SBI */
 #endif /* _ASM_RISCV_SBI_H */
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 3f15938dec89..794c9ab256eb 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -10,9 +10,17 @@ 
 
 #include <asm/sbi.h>
 
+static void ipi_remote_fence_i(void *info)
+{
+	return local_flush_icache_all();
+}
+
 void flush_icache_all(void)
 {
-	sbi_remote_fence_i(NULL);
+	if (IS_ENABLED(CONFIG_RISCV_SBI))
+		sbi_remote_fence_i(NULL);
+	else
+		on_each_cpu(ipi_remote_fence_i, NULL, 1);
 }
 
 /*
@@ -28,7 +36,7 @@  void flush_icache_all(void)
 void flush_icache_mm(struct mm_struct *mm, bool local)
 {
 	unsigned int cpu;
-	cpumask_t others, hmask, *mask;
+	cpumask_t others, *mask;
 
 	preempt_disable();
 
@@ -46,10 +54,7 @@  void flush_icache_mm(struct mm_struct *mm, bool local)
 	 */
 	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
 	local |= cpumask_empty(&others);
-	if (mm != current->active_mm || !local) {
-		riscv_cpuid_to_hartid_mask(&others, &hmask);
-		sbi_remote_fence_i(hmask.bits);
-	} else {
+	if (mm == current->active_mm && local) {
 		/*
 		 * It's assumed that at least one strongly ordered operation is
 		 * performed on this hart between setting a hart's cpumask bit
@@ -59,6 +64,13 @@  void flush_icache_mm(struct mm_struct *mm, bool local)
 		 * with flush_icache_deferred().
 		 */
 		smp_mb();
+	} else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
+		cpumask_t hartid_mask;
+
+		riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
+		sbi_remote_fence_i(cpumask_bits(&hartid_mask));
+	} else {
+		on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
 	}
 
 	preempt_enable();