diff mbox series

[6/6] riscv: move the TLB flush logic out of line

Message ID 20190821145837.3686-7-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [1/6] riscv: refactor the IPI code | expand

Commit Message

Christoph Hellwig Aug. 21, 2019, 2:58 p.m. UTC
The TLB flush logic is going to become more complex.  Start moving
it out of line.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/riscv/include/asm/tlbflush.h | 37 ++++++-------------------------
 arch/riscv/mm/Makefile            |  3 +++
 arch/riscv/mm/tlbflush.c          | 35 +++++++++++++++++++++++++++++
 3 files changed, 45 insertions(+), 30 deletions(-)
 create mode 100644 arch/riscv/mm/tlbflush.c

Comments

Atish Patra Aug. 24, 2019, 12:03 a.m. UTC | #1
On Wed, 2019-08-21 at 23:58 +0900, Christoph Hellwig wrote:
> The TLB flush logic is going to become more complex.  Start moving
> it out of line.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  arch/riscv/include/asm/tlbflush.h | 37 ++++++-----------------------
> --
>  arch/riscv/mm/Makefile            |  3 +++
>  arch/riscv/mm/tlbflush.c          | 35 +++++++++++++++++++++++++++++
>  3 files changed, 45 insertions(+), 30 deletions(-)
>  create mode 100644 arch/riscv/mm/tlbflush.c
> 
> diff --git a/arch/riscv/include/asm/tlbflush.h
> b/arch/riscv/include/asm/tlbflush.h
> index df31fe2ed09c..075a784c66c5 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -25,8 +25,13 @@ static inline void local_flush_tlb_page(unsigned
> long addr)
>  	__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) :
> "memory");
>  }
>  
> -#ifndef CONFIG_SMP
> -
> +#ifdef CONFIG_SMP
> +void flush_tlb_all(void);
> +void flush_tlb_mm(struct mm_struct *mm);
> +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
> +void flush_tlb_range(struct vm_area_struct *vma, unsigned long
> start,
> +		unsigned long end);
> +#else /* CONFIG_SMP */
>  #define flush_tlb_all() local_flush_tlb_all()
>  #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
>  
> @@ -37,34 +42,6 @@ static inline void flush_tlb_range(struct
> vm_area_struct *vma,
>  }
>  
>  #define flush_tlb_mm(mm) flush_tlb_all()
> -
> -#else /* CONFIG_SMP */
> -
> -#include <asm/sbi.h>
> -
> -static inline void remote_sfence_vma(struct cpumask *cmask, unsigned
> long start,
> -				     unsigned long size)
> -{
> -	struct cpumask hmask;
> -
> -	riscv_cpuid_to_hartid_mask(cmask, &hmask);
> -	sbi_remote_sfence_vma(hmask.bits, start, size);
> -}
> -
> -#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
> -
> -#define flush_tlb_range(vma, start, end) \
> -	remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) -
> (start))
> -
> -static inline void flush_tlb_page(struct vm_area_struct *vma,
> -				  unsigned long addr)
> -{
> -	flush_tlb_range(vma, addr, addr + PAGE_SIZE);
> -}
> -
> -#define flush_tlb_mm(mm)				\
> -	remote_sfence_vma(mm_cpumask(mm), 0, -1)
> -
>  #endif /* CONFIG_SMP */
>  
>  /* Flush a range of kernel pages */
> diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
> index 74055e1d6f21..9d9a17335686 100644
> --- a/arch/riscv/mm/Makefile
> +++ b/arch/riscv/mm/Makefile
> @@ -13,4 +13,7 @@ obj-y += cacheflush.o
>  obj-y += context.o
>  obj-y += sifive_l2_cache.o
>  
> +ifeq ($(CONFIG_MMU),y)
> +obj-$(CONFIG_SMP) += tlbflush.o
> +endif
>  obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> new file mode 100644
> index 000000000000..df93b26f1b9d
> --- /dev/null
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -0,0 +1,35 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include <linux/mm.h>
> +#include <linux/smp.h>
> +#include <asm/sbi.h>
> +
> +void flush_tlb_all(void)
> +{
> +	sbi_remote_sfence_vma(NULL, 0, -1);
> +}
> +
> +static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned
> long start,
> +		unsigned long size)
> +{
> +	struct cpumask hmask;
> +
> +	riscv_cpuid_to_hartid_mask(cmask, &hmask);
> +	sbi_remote_sfence_vma(hmask.bits, start, size);
> +}
> +
> +void flush_tlb_mm(struct mm_struct *mm)
> +{
> +	__sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
> +}
> +
> +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
> +{
> +	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
> +}
> +
> +void flush_tlb_range(struct vm_area_struct *vma, unsigned long
> start,
> +		unsigned long end)
> +{
> +	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end -
> start);
> +}

Reviewed-by: Atish Patra <atish.patra@wdc.com>
Paul Walmsley Sept. 5, 2019, 8:58 a.m. UTC | #2
On Wed, 21 Aug 2019, Christoph Hellwig wrote:

> The TLB flush logic is going to become more complex.  Start moving
> it out of line.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Thanks.  "checkpatch.pl --strict" reported several whitespace issues with 
this patch.  I fixed those up here and queued the following for v5.4-rc1.


- Paul

From: Christoph Hellwig <hch@lst.de>
Date: Wed, 21 Aug 2019 23:58:37 +0900
Subject: [PATCH] riscv: move the TLB flush logic out of line

The TLB flush logic is going to become more complex.  Start moving
it out of line.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
[paul.walmsley@sifive.com: fixed checkpatch whitespace warnings]
Signed-off-by: Paul Walmsley <paul.walmsley@sifive.com>
---
 arch/riscv/include/asm/tlbflush.h | 37 ++++++-------------------------
 arch/riscv/mm/Makefile            |  3 +++
 arch/riscv/mm/tlbflush.c          | 35 +++++++++++++++++++++++++++++
 3 files changed, 45 insertions(+), 30 deletions(-)
 create mode 100644 arch/riscv/mm/tlbflush.c

diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index df31fe2ed09c..37ae4e367ad2 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -25,8 +25,13 @@ static inline void local_flush_tlb_page(unsigned long addr)
 	__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
 }
 
-#ifndef CONFIG_SMP
-
+#ifdef CONFIG_SMP
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+		     unsigned long end);
+#else /* CONFIG_SMP */
 #define flush_tlb_all() local_flush_tlb_all()
 #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
 
@@ -37,34 +42,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 }
 
 #define flush_tlb_mm(mm) flush_tlb_all()
-
-#else /* CONFIG_SMP */
-
-#include <asm/sbi.h>
-
-static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
-				     unsigned long size)
-{
-	struct cpumask hmask;
-
-	riscv_cpuid_to_hartid_mask(cmask, &hmask);
-	sbi_remote_sfence_vma(hmask.bits, start, size);
-}
-
-#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
-
-#define flush_tlb_range(vma, start, end) \
-	remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
-				  unsigned long addr)
-{
-	flush_tlb_range(vma, addr, addr + PAGE_SIZE);
-}
-
-#define flush_tlb_mm(mm)				\
-	remote_sfence_vma(mm_cpumask(mm), 0, -1)
-
 #endif /* CONFIG_SMP */
 
 /* Flush a range of kernel pages */
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 74055e1d6f21..9d9a17335686 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,4 +13,7 @@ obj-y += cacheflush.o
 obj-y += context.o
 obj-y += sifive_l2_cache.o
 
+ifeq ($(CONFIG_MMU),y)
+obj-$(CONFIG_SMP) += tlbflush.o
+endif
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
new file mode 100644
index 000000000000..24cd33d2c48f
--- /dev/null
+++ b/arch/riscv/mm/tlbflush.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <asm/sbi.h>
+
+void flush_tlb_all(void)
+{
+	sbi_remote_sfence_vma(NULL, 0, -1);
+}
+
+static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
+				  unsigned long size)
+{
+	struct cpumask hmask;
+
+	riscv_cpuid_to_hartid_mask(cmask, &hmask);
+	sbi_remote_sfence_vma(hmask.bits, start, size);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+	__sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+		     unsigned long end)
+{
+	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
+}
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index df31fe2ed09c..075a784c66c5 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -25,8 +25,13 @@  static inline void local_flush_tlb_page(unsigned long addr)
 	__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
 }
 
-#ifndef CONFIG_SMP
-
+#ifdef CONFIG_SMP
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+		unsigned long end);
+#else /* CONFIG_SMP */
 #define flush_tlb_all() local_flush_tlb_all()
 #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
 
@@ -37,34 +42,6 @@  static inline void flush_tlb_range(struct vm_area_struct *vma,
 }
 
 #define flush_tlb_mm(mm) flush_tlb_all()
-
-#else /* CONFIG_SMP */
-
-#include <asm/sbi.h>
-
-static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
-				     unsigned long size)
-{
-	struct cpumask hmask;
-
-	riscv_cpuid_to_hartid_mask(cmask, &hmask);
-	sbi_remote_sfence_vma(hmask.bits, start, size);
-}
-
-#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
-
-#define flush_tlb_range(vma, start, end) \
-	remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
-				  unsigned long addr)
-{
-	flush_tlb_range(vma, addr, addr + PAGE_SIZE);
-}
-
-#define flush_tlb_mm(mm)				\
-	remote_sfence_vma(mm_cpumask(mm), 0, -1)
-
 #endif /* CONFIG_SMP */
 
 /* Flush a range of kernel pages */
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 74055e1d6f21..9d9a17335686 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,4 +13,7 @@  obj-y += cacheflush.o
 obj-y += context.o
 obj-y += sifive_l2_cache.o
 
+ifeq ($(CONFIG_MMU),y)
+obj-$(CONFIG_SMP) += tlbflush.o
+endif
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
new file mode 100644
index 000000000000..df93b26f1b9d
--- /dev/null
+++ b/arch/riscv/mm/tlbflush.c
@@ -0,0 +1,35 @@ 
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <asm/sbi.h>
+
+void flush_tlb_all(void)
+{
+	sbi_remote_sfence_vma(NULL, 0, -1);
+}
+
+static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
+		unsigned long size)
+{
+	struct cpumask hmask;
+
+	riscv_cpuid_to_hartid_mask(cmask, &hmask);
+	sbi_remote_sfence_vma(hmask.bits, start, size);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+	__sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+		unsigned long end)
+{
+	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
+}