diff mbox series

[v5,06/13] riscv: mm: Combine the SMP and UP TLB flush code

Message ID 20240229232211.161961-7-samuel.holland@sifive.com (mailing list archive)
State Superseded
Headers show
Series riscv: ASID-related and UP-related TLB flush enhancements | expand

Checks

Context Check Description
conchuod/vmtest-fixes-PR fail merge-conflict
conchuod/vmtest-for-next-PR fail PR summary
conchuod/patch-6-test-1 fail .github/scripts/patches/tests/build_rv32_defconfig.sh
conchuod/patch-6-test-2 success .github/scripts/patches/tests/build_rv64_clang_allmodconfig.sh
conchuod/patch-6-test-3 success .github/scripts/patches/tests/build_rv64_gcc_allmodconfig.sh
conchuod/patch-6-test-4 success .github/scripts/patches/tests/build_rv64_nommu_k210_defconfig.sh
conchuod/patch-6-test-5 success .github/scripts/patches/tests/build_rv64_nommu_virt_defconfig.sh
conchuod/patch-6-test-6 success .github/scripts/patches/tests/checkpatch.sh
conchuod/patch-6-test-7 success .github/scripts/patches/tests/dtb_warn_rv64.sh
conchuod/patch-6-test-8 success .github/scripts/patches/tests/header_inline.sh
conchuod/patch-6-test-9 success .github/scripts/patches/tests/kdoc.sh
conchuod/patch-6-test-10 success .github/scripts/patches/tests/module_param.sh
conchuod/patch-6-test-11 success .github/scripts/patches/tests/verify_fixes.sh
conchuod/patch-6-test-12 success .github/scripts/patches/tests/verify_signedoff.sh

Commit Message

Samuel Holland Feb. 29, 2024, 11:21 p.m. UTC
In SMP configurations, all TLB flushing narrower than flush_tlb_all()
goes through __flush_tlb_range(). Do the same in UP configurations.

This allows UP configurations to take advantage of recent improvements
to the code in tlbflush.c, such as support for huge pages and flushing
multiple-page ranges.

Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
---

(no changes since v4)

Changes in v4:
 - Merge the two copies of __flush_tlb_range() and rely on the compiler
   to optimize out the broadcast path (both clang and gcc do this)
 - Merge the two copies of flush_tlb_all() and rely on constant folding

Changes in v2:
 - Move the SMP/UP merge earlier in the series to avoid build issues
 - Make a copy of __flush_tlb_range() instead of adding ifdefs inside
 - local_flush_tlb_all() is the only function used on !MMU (smpboot.c)

 arch/riscv/Kconfig                |  2 +-
 arch/riscv/include/asm/tlbflush.h | 30 +++---------------------------
 arch/riscv/mm/Makefile            |  5 +----
 3 files changed, 5 insertions(+), 32 deletions(-)

Comments

yunhui cui March 1, 2024, 2:12 a.m. UTC | #1
Hi Samuel,

On Fri, Mar 1, 2024 at 7:22 AM Samuel Holland <samuel.holland@sifive.com> wrote:
>
> In SMP configurations, all TLB flushing narrower than flush_tlb_all()
> goes through __flush_tlb_range(). Do the same in UP configurations.
>
> This allows UP configurations to take advantage of recent improvements
> to the code in tlbflush.c, such as support for huge pages and flushing
> multiple-page ranges.
>
> Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
> Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
> ---
>
> (no changes since v4)
>
> Changes in v4:
>  - Merge the two copies of __flush_tlb_range() and rely on the compiler
>    to optimize out the broadcast path (both clang and gcc do this)
>  - Merge the two copies of flush_tlb_all() and rely on constant folding
>
> Changes in v2:
>  - Move the SMP/UP merge earlier in the series to avoid build issues
>  - Make a copy of __flush_tlb_range() instead of adding ifdefs inside
>  - local_flush_tlb_all() is the only function used on !MMU (smpboot.c)
>
>  arch/riscv/Kconfig                |  2 +-
>  arch/riscv/include/asm/tlbflush.h | 30 +++---------------------------
>  arch/riscv/mm/Makefile            |  5 +----
>  3 files changed, 5 insertions(+), 32 deletions(-)
>
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index 0bfcfec67ed5..de9b6f2279ff 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -60,7 +60,7 @@ config RISCV
>         select ARCH_USE_MEMTEST
>         select ARCH_USE_QUEUED_RWLOCKS
>         select ARCH_USES_CFI_TRAPS if CFI_CLANG
> -       select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP && MMU
> +       select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU
>         select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
>         select ARCH_WANT_FRAME_POINTERS
>         select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index 928f096dca21..4f86424b1ba5 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -27,12 +27,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
>  {
>         ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
>  }
> -#else /* CONFIG_MMU */
> -#define local_flush_tlb_all()                  do { } while (0)
> -#define local_flush_tlb_page(addr)             do { } while (0)
> -#endif /* CONFIG_MMU */
>
> -#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
>  void flush_tlb_all(void);
>  void flush_tlb_mm(struct mm_struct *mm);
>  void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
> @@ -54,27 +49,8 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
>                                unsigned long uaddr);
>  void arch_flush_tlb_batched_pending(struct mm_struct *mm);
>  void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
> -
> -#else /* CONFIG_SMP && CONFIG_MMU */
> -
> -#define flush_tlb_all() local_flush_tlb_all()
> -#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
> -
> -static inline void flush_tlb_range(struct vm_area_struct *vma,
> -               unsigned long start, unsigned long end)
> -{
> -       local_flush_tlb_all();
> -}
> -
> -/* Flush a range of kernel pages */
> -static inline void flush_tlb_kernel_range(unsigned long start,
> -       unsigned long end)
> -{
> -       local_flush_tlb_all();
> -}
> -
> -#define flush_tlb_mm(mm) flush_tlb_all()
> -#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
> -#endif /* !CONFIG_SMP || !CONFIG_MMU */
> +#else /* CONFIG_MMU */
> +#define local_flush_tlb_all()                  do { } while (0)
> +#endif /* CONFIG_MMU */
>
>  #endif /* _ASM_RISCV_TLBFLUSH_H */
> diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
> index 2c869f8026a8..cbe4d775ef56 100644
> --- a/arch/riscv/mm/Makefile
> +++ b/arch/riscv/mm/Makefile
> @@ -13,14 +13,11 @@ endif
>  KCOV_INSTRUMENT_init.o := n
>
>  obj-y += init.o
> -obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
> +obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o tlbflush.o
>  obj-y += cacheflush.o
>  obj-y += context.o
>  obj-y += pmem.o
>
> -ifeq ($(CONFIG_MMU),y)
> -obj-$(CONFIG_SMP) += tlbflush.o
> -endif
>  obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
>  obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
>  obj-$(CONFIG_KASAN)   += kasan_init.o
> --
> 2.43.1
>

git am the patch failed. Was it a patch based on the top commit of linux-next ?

Thanks,
Yunhui
Samuel Holland March 1, 2024, 2:34 a.m. UTC | #2
Hi Yunhui,

On 2024-02-29 8:12 PM, yunhui cui wrote:
> On Fri, Mar 1, 2024 at 7:22 AM Samuel Holland <samuel.holland@sifive.com> wrote:
>>
>> In SMP configurations, all TLB flushing narrower than flush_tlb_all()
>> goes through __flush_tlb_range(). Do the same in UP configurations.
>>
>> This allows UP configurations to take advantage of recent improvements
>> to the code in tlbflush.c, such as support for huge pages and flushing
>> multiple-page ranges.
>>
>> Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
>> Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
>> ---
>>
>> (no changes since v4)
>>
>> Changes in v4:
>>  - Merge the two copies of __flush_tlb_range() and rely on the compiler
>>    to optimize out the broadcast path (both clang and gcc do this)
>>  - Merge the two copies of flush_tlb_all() and rely on constant folding
>>
>> Changes in v2:
>>  - Move the SMP/UP merge earlier in the series to avoid build issues
>>  - Make a copy of __flush_tlb_range() instead of adding ifdefs inside
>>  - local_flush_tlb_all() is the only function used on !MMU (smpboot.c)
>>
>>  arch/riscv/Kconfig                |  2 +-
>>  arch/riscv/include/asm/tlbflush.h | 30 +++---------------------------
>>  arch/riscv/mm/Makefile            |  5 +----
>>  3 files changed, 5 insertions(+), 32 deletions(-)
>>
>> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
>> index 0bfcfec67ed5..de9b6f2279ff 100644
>> --- a/arch/riscv/Kconfig
>> +++ b/arch/riscv/Kconfig
>> @@ -60,7 +60,7 @@ config RISCV
>>         select ARCH_USE_MEMTEST
>>         select ARCH_USE_QUEUED_RWLOCKS
>>         select ARCH_USES_CFI_TRAPS if CFI_CLANG
>> -       select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP && MMU
>> +       select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU
>>         select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
>>         select ARCH_WANT_FRAME_POINTERS
>>         select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
>> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
>> index 928f096dca21..4f86424b1ba5 100644
>> --- a/arch/riscv/include/asm/tlbflush.h
>> +++ b/arch/riscv/include/asm/tlbflush.h
>> @@ -27,12 +27,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
>>  {
>>         ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
>>  }
>> -#else /* CONFIG_MMU */
>> -#define local_flush_tlb_all()                  do { } while (0)
>> -#define local_flush_tlb_page(addr)             do { } while (0)
>> -#endif /* CONFIG_MMU */
>>
>> -#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
>>  void flush_tlb_all(void);
>>  void flush_tlb_mm(struct mm_struct *mm);
>>  void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
>> @@ -54,27 +49,8 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
>>                                unsigned long uaddr);
>>  void arch_flush_tlb_batched_pending(struct mm_struct *mm);
>>  void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
>> -
>> -#else /* CONFIG_SMP && CONFIG_MMU */
>> -
>> -#define flush_tlb_all() local_flush_tlb_all()
>> -#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
>> -
>> -static inline void flush_tlb_range(struct vm_area_struct *vma,
>> -               unsigned long start, unsigned long end)
>> -{
>> -       local_flush_tlb_all();
>> -}
>> -
>> -/* Flush a range of kernel pages */
>> -static inline void flush_tlb_kernel_range(unsigned long start,
>> -       unsigned long end)
>> -{
>> -       local_flush_tlb_all();
>> -}
>> -
>> -#define flush_tlb_mm(mm) flush_tlb_all()
>> -#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
>> -#endif /* !CONFIG_SMP || !CONFIG_MMU */
>> +#else /* CONFIG_MMU */
>> +#define local_flush_tlb_all()                  do { } while (0)
>> +#endif /* CONFIG_MMU */
>>
>>  #endif /* _ASM_RISCV_TLBFLUSH_H */
>> diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
>> index 2c869f8026a8..cbe4d775ef56 100644
>> --- a/arch/riscv/mm/Makefile
>> +++ b/arch/riscv/mm/Makefile
>> @@ -13,14 +13,11 @@ endif
>>  KCOV_INSTRUMENT_init.o := n
>>
>>  obj-y += init.o
>> -obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
>> +obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o tlbflush.o
>>  obj-y += cacheflush.o
>>  obj-y += context.o
>>  obj-y += pmem.o
>>
>> -ifeq ($(CONFIG_MMU),y)
>> -obj-$(CONFIG_SMP) += tlbflush.o
>> -endif
>>  obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
>>  obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
>>  obj-$(CONFIG_KASAN)   += kasan_init.o
>> --
>> 2.43.1
>>
> 
> git am the patch failed. Was it a patch based on the top commit of linux-next ?

This series is based on the for-next branch of riscv.git, which is where it
would be applied. There is a conflict with commit d9807d60c145 ("riscv: mm:
execute local TLB flush after populating vmemmap") in the riscv.git fixes
branch, which added a uniprocessor-specific local_flush_tlb_kernel_range()
definition. The appropriate merge conflict resolution is to remove that new
macro, i.e. take the version of the file from this patch series.

Regards,
Samuel
diff mbox series

Patch

diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 0bfcfec67ed5..de9b6f2279ff 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -60,7 +60,7 @@  config RISCV
 	select ARCH_USE_MEMTEST
 	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_USES_CFI_TRAPS if CFI_CLANG
-	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP && MMU
+	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU
 	select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
 	select ARCH_WANT_FRAME_POINTERS
 	select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 928f096dca21..4f86424b1ba5 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -27,12 +27,7 @@  static inline void local_flush_tlb_page(unsigned long addr)
 {
 	ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
 }
-#else /* CONFIG_MMU */
-#define local_flush_tlb_all()			do { } while (0)
-#define local_flush_tlb_page(addr)		do { } while (0)
-#endif /* CONFIG_MMU */
 
-#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
 void flush_tlb_all(void);
 void flush_tlb_mm(struct mm_struct *mm);
 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
@@ -54,27 +49,8 @@  void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
 			       unsigned long uaddr);
 void arch_flush_tlb_batched_pending(struct mm_struct *mm);
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
-
-#else /* CONFIG_SMP && CONFIG_MMU */
-
-#define flush_tlb_all() local_flush_tlb_all()
-#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
-		unsigned long start, unsigned long end)
-{
-	local_flush_tlb_all();
-}
-
-/* Flush a range of kernel pages */
-static inline void flush_tlb_kernel_range(unsigned long start,
-	unsigned long end)
-{
-	local_flush_tlb_all();
-}
-
-#define flush_tlb_mm(mm) flush_tlb_all()
-#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
-#endif /* !CONFIG_SMP || !CONFIG_MMU */
+#else /* CONFIG_MMU */
+#define local_flush_tlb_all()			do { } while (0)
+#endif /* CONFIG_MMU */
 
 #endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 2c869f8026a8..cbe4d775ef56 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,14 +13,11 @@  endif
 KCOV_INSTRUMENT_init.o := n
 
 obj-y += init.o
-obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
+obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o tlbflush.o
 obj-y += cacheflush.o
 obj-y += context.o
 obj-y += pmem.o
 
-ifeq ($(CONFIG_MMU),y)
-obj-$(CONFIG_SMP) += tlbflush.o
-endif
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
 obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
 obj-$(CONFIG_KASAN)   += kasan_init.o