diff mbox series

riscv: mm: Fix incorrect ASID argument when flushing TLB

Message ID 20230310103144.396214-1-dylan@andestech.com (mailing list archive)
State Superseded
Headers show
Series riscv: mm: Fix incorrect ASID argument when flushing TLB | expand

Checks

Context Check Description
conchuod/cover_letter success Single patches do not need cover letters
conchuod/tree_selection success Guessed tree name to be for-next
conchuod/fixes_present success Fixes tag not required for -next series
conchuod/maintainers_pattern success MAINTAINERS pattern errors before the patch: 1 and now 1
conchuod/verify_signedoff success Signed-off-by tag matches author and committer
conchuod/kdoc success Errors and warnings before: 0 this patch: 0
conchuod/build_rv64_clang_allmodconfig success Errors and warnings before: 2275 this patch: 2275
conchuod/module_param success Was 0 now: 0
conchuod/build_rv64_gcc_allmodconfig success Errors and warnings before: 17609 this patch: 17609
conchuod/alphanumeric_selects success Out of order selects before the patch: 728 and now 728
conchuod/build_rv32_defconfig success Build OK
conchuod/dtb_warn_rv64 success Errors and warnings before: 3 this patch: 3
conchuod/header_inline success No static functions without inline keyword in header files
conchuod/checkpatch success total: 0 errors, 0 warnings, 0 checks, 25 lines checked
conchuod/source_inline success Was 0 now: 0
conchuod/build_rv64_nommu_k210_defconfig success Build OK
conchuod/verify_fixes success Fixes tag looks correct
conchuod/build_rv64_nommu_virt_defconfig success Build OK

Commit Message

Dylan Jhong March 10, 2023, 10:31 a.m. UTC
Currently, we pass the CONTEXTID instead of the ASID to the TLB flush
function. We should only take the ASID field to prevent from touching
the reserved bit field.

Fixes: 3f1e782998cd ("riscv: add ASID-based tlbflushing methods")
Signed-off-by: Dylan Jhong <dylan@andestech.com>
---
 arch/riscv/include/asm/tlbflush.h | 2 ++
 arch/riscv/mm/context.c           | 3 ++-
 arch/riscv/mm/tlbflush.c          | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)

Comments

Guo Ren March 12, 2023, 12:40 p.m. UTC | #1
On Fri, Mar 10, 2023 at 6:32 PM Dylan Jhong <dylan@andestech.com> wrote:
>
> Currently, we pass the CONTEXTID instead of the ASID to the TLB flush
> function. We should only take the ASID field to prevent from touching
> the reserved bit field.
>
> Fixes: 3f1e782998cd ("riscv: add ASID-based tlbflushing methods")
> Signed-off-by: Dylan Jhong <dylan@andestech.com>
> ---
>  arch/riscv/include/asm/tlbflush.h | 2 ++
>  arch/riscv/mm/context.c           | 3 ++-
>  arch/riscv/mm/tlbflush.c          | 2 +-
>  3 files changed, 5 insertions(+), 2 deletions(-)
>
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index 907b9efd39a8..597d6d8aec28 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -12,6 +12,8 @@
>  #include <asm/errata_list.h>
>
>  #ifdef CONFIG_MMU
> +extern unsigned long asid_mask;
> +
>  static inline void local_flush_tlb_all(void)
>  {
>         __asm__ __volatile__ ("sfence.vma" : : : "memory");
> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
> index 80ce9caba8d2..a6b76b33e377 100644
> --- a/arch/riscv/mm/context.c
> +++ b/arch/riscv/mm/context.c
> @@ -22,7 +22,8 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
>
>  static unsigned long asid_bits;
>  static unsigned long num_asids;
> -static unsigned long asid_mask;
> +unsigned long asid_mask;
> +EXPORT_SYMBOL(asid_mask);
Why EXPORT_SYMBOL? (No module would use it by your patch.)

>
>  static atomic_long_t current_version;
>
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index ce7dfc81bb3f..ba4c27187c95 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -27,7 +27,7 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
>         /* check if the tlbflush needs to be sent to other CPUs */
>         broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
>         if (static_branch_unlikely(&use_asid_allocator)) {
> -               unsigned long asid = atomic_long_read(&mm->context.id);
> +               unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
>
>                 /*
>                  * TLB will be immediately flushed on harts concurrently
> --
> 2.34.1
>
Dylan Jhong March 13, 2023, 3:32 a.m. UTC | #2
On Sun, Mar 12, 2023 at 08:40:59PM +0800, Guo Ren wrote:
> On Fri, Mar 10, 2023 at 6:32 PM Dylan Jhong <dylan@andestech.com> wrote:
> >
> > Currently, we pass the CONTEXTID instead of the ASID to the TLB flush
> > function. We should only take the ASID field to prevent from touching
> > the reserved bit field.
> >
> > Fixes: 3f1e782998cd ("riscv: add ASID-based tlbflushing methods")
> > Signed-off-by: Dylan Jhong <dylan@andestech.com>
> > ---
> >  arch/riscv/include/asm/tlbflush.h | 2 ++
> >  arch/riscv/mm/context.c           | 3 ++-
> >  arch/riscv/mm/tlbflush.c          | 2 +-
> >  3 files changed, 5 insertions(+), 2 deletions(-)
> >
> > diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> > index 907b9efd39a8..597d6d8aec28 100644
> > --- a/arch/riscv/include/asm/tlbflush.h
> > +++ b/arch/riscv/include/asm/tlbflush.h
> > @@ -12,6 +12,8 @@
> >  #include <asm/errata_list.h>
> >
> >  #ifdef CONFIG_MMU
> > +extern unsigned long asid_mask;
> > +
> >  static inline void local_flush_tlb_all(void)
> >  {
> >         __asm__ __volatile__ ("sfence.vma" : : : "memory");
> > diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
> > index 80ce9caba8d2..a6b76b33e377 100644
> > --- a/arch/riscv/mm/context.c
> > +++ b/arch/riscv/mm/context.c
> > @@ -22,7 +22,8 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
> >
> >  static unsigned long asid_bits;
> >  static unsigned long num_asids;
> > -static unsigned long asid_mask;
> > +unsigned long asid_mask;
> > +EXPORT_SYMBOL(asid_mask);
> Why EXPORT_SYMBOL? (No module would use it by your patch.)
>
OK. I'll remove EXPORT_SYMBOL in v2.
Thanks.

> >
> >  static atomic_long_t current_version;
> >
> > diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> > index ce7dfc81bb3f..ba4c27187c95 100644
> > --- a/arch/riscv/mm/tlbflush.c
> > +++ b/arch/riscv/mm/tlbflush.c
> > @@ -27,7 +27,7 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
> >         /* check if the tlbflush needs to be sent to other CPUs */
> >         broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
> >         if (static_branch_unlikely(&use_asid_allocator)) {
> > -               unsigned long asid = atomic_long_read(&mm->context.id);
> > +               unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
> >
> >                 /*
> >                  * TLB will be immediately flushed on harts concurrently
> > --
> > 2.34.1
> >
> 
> 
> -- 
> Best Regards
>  Guo Ren
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 907b9efd39a8..597d6d8aec28 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -12,6 +12,8 @@ 
 #include <asm/errata_list.h>
 
 #ifdef CONFIG_MMU
+extern unsigned long asid_mask;
+
 static inline void local_flush_tlb_all(void)
 {
 	__asm__ __volatile__ ("sfence.vma" : : : "memory");
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 80ce9caba8d2..a6b76b33e377 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -22,7 +22,8 @@  DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
 
 static unsigned long asid_bits;
 static unsigned long num_asids;
-static unsigned long asid_mask;
+unsigned long asid_mask;
+EXPORT_SYMBOL(asid_mask);
 
 static atomic_long_t current_version;
 
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index ce7dfc81bb3f..ba4c27187c95 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -27,7 +27,7 @@  static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
 	/* check if the tlbflush needs to be sent to other CPUs */
 	broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
 	if (static_branch_unlikely(&use_asid_allocator)) {
-		unsigned long asid = atomic_long_read(&mm->context.id);
+		unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
 
 		/*
 		 * TLB will be immediately flushed on harts concurrently