Message ID | 20230226150137.1919750-2-geomatsi@gmail.com (mailing list archive) |
---|---|
State | Accepted |
Commit | e921050022f1f12d5029d1487a7dfc46cde15523 |
Headers | show |
Series | riscv: asid: switch to alternative way to fix stale TLB entries | expand |
Context | Check | Description |
---|---|---|
conchuod/cover_letter | success | Series has a cover letter |
conchuod/tree_selection | success | Guessed tree name to be fixes |
conchuod/fixes_present | success | Fixes tag present in non-next series |
conchuod/maintainers_pattern | success | MAINTAINERS pattern errors before the patch: 13 and now 13 |
conchuod/verify_signedoff | success | Signed-off-by tag matches author and committer |
conchuod/kdoc | success | Errors and warnings before: 0 this patch: 0 |
conchuod/build_rv64_clang_allmodconfig | success | Errors and warnings before: 2468 this patch: 2468 |
conchuod/module_param | success | Was 0 now: 0 |
conchuod/build_rv64_gcc_allmodconfig | success | Errors and warnings before: 17314 this patch: 17314 |
conchuod/alphanumeric_selects | success | Out of order selects before the patch: 730 and now 730 |
conchuod/build_rv32_defconfig | success | Build OK |
conchuod/dtb_warn_rv64 | success | Errors and warnings before: 2 this patch: 2 |
conchuod/header_inline | success | No static functions without inline keyword in header files |
conchuod/checkpatch | warning | CHECK: Alignment should match open parenthesis WARNING: Possible repeated word: 'harts' |
conchuod/source_inline | fail | Was 0 now: 2 |
conchuod/build_rv64_nommu_k210_defconfig | success | Build OK |
conchuod/verify_fixes | success | Fixes tag looks correct |
conchuod/build_rv64_nommu_virt_defconfig | success | Build OK |
Thx Reviewed-by: Guo Ren <guoren@kernel.org> On Sun, Feb 26, 2023 at 11:02 PM Sergey Matyukevich <geomatsi@gmail.com> wrote: > > From: Sergey Matyukevich <sergey.matyukevich@syntacore.com> > > This reverts the remaining bits of commit 4bd1d80efb5a ("riscv: mm: > notify remote harts harts about mmu cache updates"). > > According to bug reports, suggested approach to fix stale TLB entries > is not sufficient. It needs to be replaced by a more robust solution. > > Fixes: 4bd1d80efb5a ("riscv: mm: notify remote harts about mmu cache updates") > Reported-by: Zong Li <zong.li@sifive.com> > Reported-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> > Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com> > Cc: stable@vger.kernel.org > > --- > arch/riscv/include/asm/mmu.h | 2 -- > arch/riscv/include/asm/tlbflush.h | 18 ------------------ > arch/riscv/mm/context.c | 10 ---------- > arch/riscv/mm/tlbflush.c | 28 +++++++++++++++++----------- > 4 files changed, 17 insertions(+), 41 deletions(-) > > diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h > index 5ff1f19fd45c..0099dc116168 100644 > --- a/arch/riscv/include/asm/mmu.h > +++ b/arch/riscv/include/asm/mmu.h > @@ -19,8 +19,6 @@ typedef struct { > #ifdef CONFIG_SMP > /* A local icache flush is needed before user execution can resume. */ > cpumask_t icache_stale_mask; > - /* A local tlb flush is needed before user execution can resume. */ > - cpumask_t tlb_stale_mask; > #endif > } mm_context_t; > > diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h > index 907b9efd39a8..801019381dea 100644 > --- a/arch/riscv/include/asm/tlbflush.h > +++ b/arch/riscv/include/asm/tlbflush.h > @@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr) > { > ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); > } > - > -static inline void local_flush_tlb_all_asid(unsigned long asid) > -{ > - __asm__ __volatile__ ("sfence.vma x0, %0" > - : > - : "r" (asid) > - : "memory"); > -} > - > -static inline void local_flush_tlb_page_asid(unsigned long addr, > - unsigned long asid) > -{ > - __asm__ __volatile__ ("sfence.vma %0, %1" > - : > - : "r" (addr), "r" (asid) > - : "memory"); > -} > - > #else /* CONFIG_MMU */ > #define local_flush_tlb_all() do { } while (0) > #define local_flush_tlb_page(addr) do { } while (0) > diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c > index 80ce9caba8d2..7acbfbd14557 100644 > --- a/arch/riscv/mm/context.c > +++ b/arch/riscv/mm/context.c > @@ -196,16 +196,6 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu) > > if (need_flush_tlb) > local_flush_tlb_all(); > -#ifdef CONFIG_SMP > - else { > - cpumask_t *mask = &mm->context.tlb_stale_mask; > - > - if (cpumask_test_cpu(cpu, mask)) { > - cpumask_clear_cpu(cpu, mask); > - local_flush_tlb_all_asid(cntx & asid_mask); > - } > - } > -#endif > } > > static void set_mm_noasid(struct mm_struct *mm) > diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c > index ce7dfc81bb3f..37ed760d007c 100644 > --- a/arch/riscv/mm/tlbflush.c > +++ b/arch/riscv/mm/tlbflush.c > @@ -5,7 +5,23 @@ > #include <linux/sched.h> > #include <asm/sbi.h> > #include <asm/mmu_context.h> > -#include <asm/tlbflush.h> > + > +static inline void local_flush_tlb_all_asid(unsigned long asid) > +{ > + __asm__ __volatile__ ("sfence.vma x0, %0" > + : > + : "r" (asid) > + : "memory"); > +} > + > +static inline void local_flush_tlb_page_asid(unsigned long addr, > + unsigned long asid) > +{ > + __asm__ __volatile__ ("sfence.vma %0, %1" > + : > + : "r" (addr), "r" (asid) > + : "memory"); > +} > > void flush_tlb_all(void) > { > @@ -15,7 +31,6 @@ void flush_tlb_all(void) > static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, > unsigned long size, unsigned long stride) > { > - struct cpumask *pmask = &mm->context.tlb_stale_mask; > struct cpumask *cmask = mm_cpumask(mm); > unsigned int cpuid; > bool broadcast; > @@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, > if (static_branch_unlikely(&use_asid_allocator)) { > unsigned long asid = atomic_long_read(&mm->context.id); > > - /* > - * TLB will be immediately flushed on harts concurrently > - * executing this MM context. TLB flush on other harts > - * is deferred until this MM context migrates there. > - */ > - cpumask_setall(pmask); > - cpumask_clear_cpu(cpuid, pmask); > - cpumask_andnot(pmask, pmask, cmask); > - > if (broadcast) { > sbi_remote_sfence_vma_asid(cmask, start, size, asid); > } else if (size <= stride) { > -- > 2.39.2 >
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 5ff1f19fd45c..0099dc116168 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -19,8 +19,6 @@ typedef struct { #ifdef CONFIG_SMP /* A local icache flush is needed before user execution can resume. */ cpumask_t icache_stale_mask; - /* A local tlb flush is needed before user execution can resume. */ - cpumask_t tlb_stale_mask; #endif } mm_context_t; diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 907b9efd39a8..801019381dea 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr) { ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); } - -static inline void local_flush_tlb_all_asid(unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma x0, %0" - : - : "r" (asid) - : "memory"); -} - -static inline void local_flush_tlb_page_asid(unsigned long addr, - unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma %0, %1" - : - : "r" (addr), "r" (asid) - : "memory"); -} - #else /* CONFIG_MMU */ #define local_flush_tlb_all() do { } while (0) #define local_flush_tlb_page(addr) do { } while (0) diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 80ce9caba8d2..7acbfbd14557 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -196,16 +196,6 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu) if (need_flush_tlb) local_flush_tlb_all(); -#ifdef CONFIG_SMP - else { - cpumask_t *mask = &mm->context.tlb_stale_mask; - - if (cpumask_test_cpu(cpu, mask)) { - cpumask_clear_cpu(cpu, mask); - local_flush_tlb_all_asid(cntx & asid_mask); - } - } -#endif } static void set_mm_noasid(struct mm_struct *mm) diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index ce7dfc81bb3f..37ed760d007c 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -5,7 +5,23 @@ #include <linux/sched.h> #include <asm/sbi.h> #include <asm/mmu_context.h> -#include <asm/tlbflush.h> + +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma x0, %0" + : + : "r" (asid) + : "memory"); +} + +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma %0, %1" + : + : "r" (addr), "r" (asid) + : "memory"); +} void flush_tlb_all(void) { @@ -15,7 +31,6 @@ void flush_tlb_all(void) static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { - struct cpumask *pmask = &mm->context.tlb_stale_mask; struct cpumask *cmask = mm_cpumask(mm); unsigned int cpuid; bool broadcast; @@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, if (static_branch_unlikely(&use_asid_allocator)) { unsigned long asid = atomic_long_read(&mm->context.id); - /* - * TLB will be immediately flushed on harts concurrently - * executing this MM context. TLB flush on other harts - * is deferred until this MM context migrates there. - */ - cpumask_setall(pmask); - cpumask_clear_cpu(cpuid, pmask); - cpumask_andnot(pmask, pmask, cmask); - if (broadcast) { sbi_remote_sfence_vma_asid(cmask, start, size, asid); } else if (size <= stride) {