diff mbox series

[v4,1/3] mm/rmap: Recognize read-only TLB entries during batched TLB flush

Message ID 20231109045908.54996-2-byungchul@sk.com (mailing list archive)
State New
Headers show
Series Reduce TLB flushes under some specific conditions | expand

Commit Message

Byungchul Park Nov. 9, 2023, 4:59 a.m. UTC
Functionally, no change. This is a preparation for migrc mechanism that
requires to recognize read-only TLB entries and makes use of them to
batch more aggressively.

Signed-off-by: Byungchul Park <byungchul@sk.com>
---
 arch/x86/include/asm/tlbflush.h |  3 +++
 arch/x86/mm/tlb.c               | 11 +++++++++++
 include/linux/sched.h           |  1 +
 mm/internal.h                   |  4 ++++
 mm/rmap.c                       | 30 +++++++++++++++++++++++++++++-
 5 files changed, 48 insertions(+), 1 deletion(-)

Comments

kernel test robot Nov. 9, 2023, 8:26 p.m. UTC | #1
Hi Byungchul,

kernel test robot noticed the following build errors:

[auto build test ERROR on tip/sched/core]
[also build test ERROR on tip/x86/core tip/x86/mm linus/master v6.6 next-20231109]
[cannot apply to akpm-mm/mm-everything]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Byungchul-Park/mm-rmap-Recognize-read-only-TLB-entries-during-batched-TLB-flush/20231109-163706
base:   tip/sched/core
patch link:    https://lore.kernel.org/r/20231109045908.54996-2-byungchul%40sk.com
patch subject: [v4 1/3] mm/rmap: Recognize read-only TLB entries during batched TLB flush
config: arm64-randconfig-002-20231109 (https://download.01.org/0day-ci/archive/20231110/202311100429.nc4jJoNu-lkp@intel.com/config)
compiler: aarch64-linux-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231110/202311100429.nc4jJoNu-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202311100429.nc4jJoNu-lkp@intel.com/

All errors (new ones prefixed by >>):

   mm/rmap.c: In function 'fold_ubc_ro':
>> mm/rmap.c:620:9: error: implicit declaration of function 'arch_tlbbatch_fold'; did you mean 'arch_tlbbatch_flush'? [-Werror=implicit-function-declaration]
     620 |         arch_tlbbatch_fold(&tlb_ubc->arch, &tlb_ubc_ro->arch);
         |         ^~~~~~~~~~~~~~~~~~
         |         arch_tlbbatch_flush
>> mm/rmap.c:626:9: error: implicit declaration of function 'arch_tlbbatch_clear'; did you mean 'arch_tlbbatch_flush'? [-Werror=implicit-function-declaration]
     626 |         arch_tlbbatch_clear(&tlb_ubc_ro->arch);
         |         ^~~~~~~~~~~~~~~~~~~
         |         arch_tlbbatch_flush
   cc1: some warnings being treated as errors


vim +620 mm/rmap.c

   608	
   609	void fold_ubc_ro(void)
   610	{
   611		struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
   612		struct tlbflush_unmap_batch *tlb_ubc_ro = &current->tlb_ubc_ro;
   613	
   614		if (!tlb_ubc_ro->flush_required)
   615			return;
   616	
   617		/*
   618		 * Fold tlb_ubc_ro's data to tlb_ubc.
   619		 */
 > 620		arch_tlbbatch_fold(&tlb_ubc->arch, &tlb_ubc_ro->arch);
   621		tlb_ubc->flush_required = true;
   622	
   623		/*
   624		 * Reset tlb_ubc_ro's data.
   625		 */
 > 626		arch_tlbbatch_clear(&tlb_ubc_ro->arch);
   627		tlb_ubc_ro->flush_required = false;
   628	}
   629
diff mbox series

Patch

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 25726893c6f4..5c618a8821de 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -292,6 +292,9 @@  static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
 }
 
 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+extern void arch_tlbbatch_clear(struct arch_tlbflush_unmap_batch *batch);
+extern void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
+			       struct arch_tlbflush_unmap_batch *bsrc);
 
 static inline bool pte_flags_need_flush(unsigned long oldflags,
 					unsigned long newflags,
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 453ea95b667d..d3c89a3d91eb 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1274,6 +1274,17 @@  void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 	put_cpu();
 }
 
+void arch_tlbbatch_clear(struct arch_tlbflush_unmap_batch *batch)
+{
+	cpumask_clear(&batch->cpumask);
+}
+
+void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
+			struct arch_tlbflush_unmap_batch *bsrc)
+{
+	cpumask_or(&bdst->cpumask, &bdst->cpumask, &bsrc->cpumask);
+}
+
 /*
  * Blindly accessing user memory from NMI context can be dangerous
  * if we're in the middle of switching the current user task or
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 77f01ac385f7..8a31527d9ed8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1324,6 +1324,7 @@  struct task_struct {
 #endif
 
 	struct tlbflush_unmap_batch	tlb_ubc;
+	struct tlbflush_unmap_batch	tlb_ubc_ro;
 
 	/* Cache last used pipe for splice(): */
 	struct pipe_inode_info		*splice_pipe;
diff --git a/mm/internal.h b/mm/internal.h
index 30cf724ddbce..9764b240e259 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -861,6 +861,7 @@  extern struct workqueue_struct *mm_percpu_wq;
 void try_to_unmap_flush(void);
 void try_to_unmap_flush_dirty(void);
 void flush_tlb_batched_pending(struct mm_struct *mm);
+void fold_ubc_ro(void);
 #else
 static inline void try_to_unmap_flush(void)
 {
@@ -871,6 +872,9 @@  static inline void try_to_unmap_flush_dirty(void)
 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
 {
 }
+static inline void fold_ubc_ro(void)
+{
+}
 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
 
 extern const struct trace_print_flags pageflag_names[];
diff --git a/mm/rmap.c b/mm/rmap.c
index 9f795b93cf40..c787ae94b4c6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -605,6 +605,28 @@  struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
 }
 
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+
+void fold_ubc_ro(void)
+{
+	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+	struct tlbflush_unmap_batch *tlb_ubc_ro = &current->tlb_ubc_ro;
+
+	if (!tlb_ubc_ro->flush_required)
+		return;
+
+	/*
+	 * Fold tlb_ubc_ro's data to tlb_ubc.
+	 */
+	arch_tlbbatch_fold(&tlb_ubc->arch, &tlb_ubc_ro->arch);
+	tlb_ubc->flush_required = true;
+
+	/*
+	 * Reset tlb_ubc_ro's data.
+	 */
+	arch_tlbbatch_clear(&tlb_ubc_ro->arch);
+	tlb_ubc_ro->flush_required = false;
+}
+
 /*
  * Flush TLB entries for recently unmapped pages from remote CPUs. It is
  * important if a PTE was dirty when it was unmapped that it's flushed
@@ -615,6 +637,7 @@  void try_to_unmap_flush(void)
 {
 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 
+	fold_ubc_ro();
 	if (!tlb_ubc->flush_required)
 		return;
 
@@ -645,13 +668,18 @@  void try_to_unmap_flush_dirty(void)
 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
 				      unsigned long uaddr)
 {
-	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+	struct tlbflush_unmap_batch *tlb_ubc;
 	int batch;
 	bool writable = pte_dirty(pteval);
 
 	if (!pte_accessible(mm, pteval))
 		return;
 
+	if (pte_write(pteval) || writable)
+		tlb_ubc = &current->tlb_ubc;
+	else
+		tlb_ubc = &current->tlb_ubc_ro;
+
 	arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
 	tlb_ubc->flush_required = true;