diff mbox series

[1/2] riscv: pass the mm_struct to __sbi_tlb_flush_range

Message ID 20210606152050.636038-2-hch@lst.de (mailing list archive)
State New
Headers show
Series [1/2] riscv: pass the mm_struct to __sbi_tlb_flush_range | expand

Commit Message

Christoph Hellwig June 6, 2021, 3:20 p.m. UTC
Move the call mm_cpumask from the callers into __sbi_tlb_flush_range to
reduce a bit of duplicate code and prepare for future changes.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/riscv/mm/tlbflush.c | 15 ++++++---------
 1 file changed, 6 insertions(+), 9 deletions(-)

Comments

Guo Ren June 6, 2021, 4:43 p.m. UTC | #1
Rebase with "THP supprt for RISCV" & "Add DMA_COHERENT v2" on linux-5.13-rc4.

Tested-by: Guo Ren <guoren@kernel.org>

On Sun, Jun 6, 2021 at 11:21 PM Christoph Hellwig <hch@lst.de> wrote:
>
> Move the call mm_cpumask from the callers into __sbi_tlb_flush_range to
> reduce a bit of duplicate code and prepare for future changes.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  arch/riscv/mm/tlbflush.c | 15 ++++++---------
>  1 file changed, 6 insertions(+), 9 deletions(-)
>
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index fea45af91f53..b458949fa8df 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -10,13 +10,10 @@ void flush_tlb_all(void)
>         sbi_remote_sfence_vma(NULL, 0, -1);
>  }
>
> -/*
> - * This function must not be called with cmask being null.
> - * Kernel may panic if cmask is NULL.
> - */
> -static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
> +static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
>                                   unsigned long size, unsigned long stride)
>  {
> +       struct cpumask *cmask = mm_cpumask(mm);
>         struct cpumask hmask;
>         unsigned int cpuid;
>
> @@ -41,23 +38,23 @@ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
>
>  void flush_tlb_mm(struct mm_struct *mm)
>  {
> -       __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1, PAGE_SIZE);
> +       __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
>  }
>
>  void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
>  {
> -       __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE, PAGE_SIZE);
> +       __sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
>  }
>
>  void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
>                      unsigned long end)
>  {
> -       __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PAGE_SIZE);
> +       __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
>  }
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
>                         unsigned long end)
>  {
> -       __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PMD_SIZE);
> +       __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
>  }
>  #endif
> --
> 2.30.2
>


--
Best Regards
 Guo Ren

ML: https://lore.kernel.org/linux-csky/
diff mbox series

Patch

diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index fea45af91f53..b458949fa8df 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -10,13 +10,10 @@  void flush_tlb_all(void)
 	sbi_remote_sfence_vma(NULL, 0, -1);
 }
 
-/*
- * This function must not be called with cmask being null.
- * Kernel may panic if cmask is NULL.
- */
-static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
+static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
 				  unsigned long size, unsigned long stride)
 {
+	struct cpumask *cmask = mm_cpumask(mm);
 	struct cpumask hmask;
 	unsigned int cpuid;
 
@@ -41,23 +38,23 @@  static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
 
 void flush_tlb_mm(struct mm_struct *mm)
 {
-	__sbi_tlb_flush_range(mm_cpumask(mm), 0, -1, PAGE_SIZE);
+	__sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 {
-	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE, PAGE_SIZE);
+	__sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
 }
 
 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 		     unsigned long end)
 {
-	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PAGE_SIZE);
+	__sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
 }
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
 			unsigned long end)
 {
-	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PMD_SIZE);
+	__sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
 }
 #endif