diff mbox series

[1/3] riscv: Fixup _PAGE_GLOBAL in _PAGE_KERNEL

Message ID 1621839068-31738-1-git-send-email-guoren@kernel.org (mailing list archive)
State New, archived
Headers show
Series [1/3] riscv: Fixup _PAGE_GLOBAL in _PAGE_KERNEL | expand

Commit Message

Guo Ren May 24, 2021, 6:51 a.m. UTC
From: Guo Ren <guoren@linux.alibaba.com>

Kernel virtual address translation should avoid care asid or it'll
cause more TLB-miss and TLB-refill. Because the current asid in satp
belongs to the current process, but the target kernel va TLB entry's
asid still belongs to the previous process.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Cc: Anup Patel <anup.patel@wdc.com>
Cc: Palmer Dabbelt <palmerdabbelt@google.com>
---
 arch/riscv/include/asm/pgtable.h | 1 +
 1 file changed, 1 insertion(+)

Comments

Anup Patel May 24, 2021, 8:13 a.m. UTC | #1
On Mon, May 24, 2021 at 12:22 PM <guoren@kernel.org> wrote:
>
> From: Guo Ren <guoren@linux.alibaba.com>
>
> Use static_branch_unlikely(&use_asid_allocator) to keep the origin
> tlb flush style, so it's no effect on the existing machine. Here
> are the optimized functions:
>  - flush_tlb_mm
>  - flush_tlb_page
>  - flush_tlb_range
>
> All above are based on the below new implement functions:
>  - __sbi_tlb_flush_range_asid
>  - local_flush_tlb_range_asid
>
> Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
> Cc: Anup Patel <anup.patel@wdc.com>
> Cc: Palmer Dabbelt <palmerdabbelt@google.com>
> ---
>  arch/riscv/include/asm/mmu_context.h |  2 ++
>  arch/riscv/include/asm/tlbflush.h    | 22 ++++++++++++++++++++
>  arch/riscv/mm/context.c              |  2 +-
>  arch/riscv/mm/tlbflush.c             | 40 +++++++++++++++++++++++++++++++++---
>  4 files changed, 62 insertions(+), 4 deletions(-)
>
> diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
> index b065941..7030837 100644
> --- a/arch/riscv/include/asm/mmu_context.h
> +++ b/arch/riscv/include/asm/mmu_context.h
> @@ -33,6 +33,8 @@ static inline int init_new_context(struct task_struct *tsk,
>         return 0;
>  }
>
> +DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
> +
>  #include <asm-generic/mmu_context.h>
>
>  #endif /* _ASM_RISCV_MMU_CONTEXT_H */
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index c84218a..9390319 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -8,6 +8,7 @@
>  #define _ASM_RISCV_TLBFLUSH_H
>
>  #include <linux/mm_types.h>
> +#include <asm/page.h>
>  #include <asm/smp.h>
>  #include <asm/errata_list.h>
>
> @@ -22,9 +23,30 @@ static inline void local_flush_tlb_page(unsigned long addr)
>  {
>         ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
>  }
> +
> +static inline void local_flush_tlb_range_asid(unsigned long start, unsigned long size,
> +                                             unsigned long asid)
> +{
> +       unsigned long page_add = PAGE_DOWN(start);
> +       unsigned long page_end = PAGE_UP(start + size);

Your PATCH2 is not correct because PAGE_UP(x) should in-fact
return 0 when x == 0.

In fact, if both "start" and "size" are zero then both page_add and
page_end should be zero so that no "sfence.vma" is executed.

If you want at least one TLB entry to be invalidated when size == 0
then you can simply set "size = 1" when size is zero which will force
one TLB invalidation.

Please drop your PATCH2 and the rest of the things look good to me.

Reviewed-by: Anup Patel <anup@brainfault.org>

Regards,
Anup

> +
> +       if (size == -1) {
> +               __asm__ __volatile__ ("sfence.vma x0, %0" : : "r" (asid) : "memory");
> +               return;
> +       }
> +
> +       while(page_add < page_end) {
> +               __asm__ __volatile__ ("sfence.vma %0, %1"
> +                               :
> +                               : "r" (page_add), "r" (asid)
> +                               : "memory");
> +               page_add += PAGE_SIZE;
> +       }
> +}
>  #else /* CONFIG_MMU */
>  #define local_flush_tlb_all()                  do { } while (0)
>  #define local_flush_tlb_page(addr)             do { } while (0)
> +#define local_flush_tlb_range_asid(addr)       do { } while (0)
>  #endif /* CONFIG_MMU */
>
>  #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
> index 68aa312..45c1b04 100644
> --- a/arch/riscv/mm/context.c
> +++ b/arch/riscv/mm/context.c
> @@ -18,7 +18,7 @@
>
>  #ifdef CONFIG_MMU
>
> -static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
> +DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
>
>  static unsigned long asid_bits;
>  static unsigned long num_asids;
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 720b443..69588dc 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -4,6 +4,7 @@
>  #include <linux/smp.h>
>  #include <linux/sched.h>
>  #include <asm/sbi.h>
> +#include <asm/mmu_context.h>
>
>  void flush_tlb_all(void)
>  {
> @@ -39,18 +40,51 @@ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
>         put_cpu();
>  }
>
> +static void __sbi_tlb_flush_range_asid(struct cpumask *cmask, unsigned long start,
> +                                      unsigned long size, unsigned long asid)
> +{
> +       struct cpumask hmask;
> +       unsigned int cpuid;
> +
> +       if (cpumask_empty(cmask))
> +               return;
> +
> +       cpuid = get_cpu();
> +
> +       if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
> +               local_flush_tlb_range_asid(start, size, asid);
> +       } else {
> +               riscv_cpuid_to_hartid_mask(cmask, &hmask);
> +               sbi_remote_sfence_vma_asid(cpumask_bits(&hmask), start, size, asid);
> +       }
> +
> +       put_cpu();
> +}
> +
>  void flush_tlb_mm(struct mm_struct *mm)
>  {
> -       __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
> +       if (static_branch_unlikely(&use_asid_allocator))
> +               __sbi_tlb_flush_range_asid(mm_cpumask(mm), 0, -1,
> +                                          atomic_long_read(&mm->context.id));
> +       else
> +               __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
>  }
>
>  void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
>  {
> -       __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
> +       if (static_branch_unlikely(&use_asid_allocator))
> +               __sbi_tlb_flush_range_asid(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE,
> +                                          atomic_long_read(&vma->vm_mm->context.id));
> +       else
> +               __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
>  }
>
>  void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
>                      unsigned long end)
>  {
> -       __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
> +       if (static_branch_unlikely(&use_asid_allocator))
> +               __sbi_tlb_flush_range_asid(mm_cpumask(vma->vm_mm), start, end - start,
> +                                          atomic_long_read(&vma->vm_mm->context.id));
> +       else
> +               __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
>  }
> --
> 2.7.4
>
Guo Ren May 24, 2021, 8:22 a.m. UTC | #2
Hi Anup,

On Mon, May 24, 2021 at 4:03 PM Anup Patel <anup@brainfault.org> wrote:
>
> On Mon, May 24, 2021 at 12:22 PM <guoren@kernel.org> wrote:
> >
> > From: Guo Ren <guoren@linux.alibaba.com>
> >
> > Kernel virtual address translation should avoid care asid or it'll
> > cause more TLB-miss and TLB-refill. Because the current asid in satp
> > belongs to the current process, but the target kernel va TLB entry's
> > asid still belongs to the previous process.
> >
> > Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
> > Cc: Anup Patel <anup.patel@wdc.com>
> > Cc: Palmer Dabbelt <palmerdabbelt@google.com>
>
> First of all thanks for doing this series, I had similar changes in mind
> as follow-up to the ASID allocator.
>
> I went through all three patches and at least I don't see any
> obvious issue but I think we should try testing it more on a few
> existing platforms.
We've tested it on Allwinner D1 C906 and C910 SMP*4 for a long time,
just hope it won't affect u540.

(In fact, C910 has used ASID allocator for more than two years with
our own kernel-tree. I remember we've talked about it in 2019
plumber.)

>
> Reviewed-by: Anup Patel <anup@brainfault.org>
>
> Regards,
> Anup
>
> > ---
> >  arch/riscv/include/asm/pgtable.h | 1 +
> >  1 file changed, 1 insertion(+)
> >
> > diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> > index 78f2323..017da15 100644
> > --- a/arch/riscv/include/asm/pgtable.h
> > +++ b/arch/riscv/include/asm/pgtable.h
> > @@ -135,6 +135,7 @@
> >                                 | _PAGE_PRESENT \
> >                                 | _PAGE_ACCESSED \
> >                                 | _PAGE_DIRTY \
> > +                               | _PAGE_GLOBAL \
> >                                 | _PAGE_CACHE)
> >
> >  #define PAGE_KERNEL            __pgprot(_PAGE_KERNEL)
> > --
> > 2.7.4
> >
Guo Ren May 24, 2021, 12:01 p.m. UTC | #3
On Mon, May 24, 2021 at 6:42 PM Anup Patel <anup@brainfault.org> wrote:
>
> On Mon, May 24, 2021 at 12:22 PM <guoren@kernel.org> wrote:
> >
> > From: Guo Ren <guoren@linux.alibaba.com>
> >
> > Kernel virtual address translation should avoid care asid or it'll
> > cause more TLB-miss and TLB-refill. Because the current asid in satp
> > belongs to the current process, but the target kernel va TLB entry's
> > asid still belongs to the previous process.
> >
> > Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
> > Cc: Anup Patel <anup.patel@wdc.com>
> > Cc: Palmer Dabbelt <palmerdabbelt@google.com>
> > ---
> >  arch/riscv/include/asm/pgtable.h | 1 +
> >  1 file changed, 1 insertion(+)
> >
> > diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> > index 78f2323..017da15 100644
> > --- a/arch/riscv/include/asm/pgtable.h
> > +++ b/arch/riscv/include/asm/pgtable.h
> > @@ -135,6 +135,7 @@
> >                                 | _PAGE_PRESENT \
> >                                 | _PAGE_ACCESSED \
> >                                 | _PAGE_DIRTY \
> > +                               | _PAGE_GLOBAL \
> >                                 | _PAGE_CACHE)
>
> It seems this patch is not based on the upstream kernel. The
> _PAGE_CACHE seems to be from your other patch series.
>
> Please rebase these patches on the latest upstream kernel without
> dependency on any other patch series.
Yes, it based on DMA_COHERENT. I'll rebase in PATCH V2, thx.

>
> Regards,
> Anup
>
> >
> >  #define PAGE_KERNEL            __pgprot(_PAGE_KERNEL)
> > --
> > 2.7.4
> >
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 78f2323..017da15 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -135,6 +135,7 @@ 
 				| _PAGE_PRESENT \
 				| _PAGE_ACCESSED \
 				| _PAGE_DIRTY \
+				| _PAGE_GLOBAL \
 				| _PAGE_CACHE)
 
 #define PAGE_KERNEL		__pgprot(_PAGE_KERNEL)