diff mbox series

[RFC,v2,3/5] mm/mmu_notifier: introduce a new callback .numa_protect

Message ID 20230810090008.26122-1-yan.y.zhao@intel.com (mailing list archive)
State New, archived
Headers show
Series Reduce NUMA balance caused TLB-shootdowns in a VM | expand

Commit Message

Yan Zhao Aug. 10, 2023, 9 a.m. UTC
This .numa_protect callback is called when PROT_NONE is set for sure on a
PTE or a huge PMD for numa migration purpose.

With this callback, subscriber of mmu notifier, (e.g. KVM), can unmap NUMA
migration protected pages only in the handler, rather than unmap a wider
range containing pages that are obvious none-NUMA-migratble.

Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
---
 include/linux/mmu_notifier.h | 15 +++++++++++++++
 mm/mmu_notifier.c            | 18 ++++++++++++++++++
 2 files changed, 33 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index a6dc829a4bce..a173db83b071 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -132,6 +132,10 @@  struct mmu_notifier_ops {
 			   unsigned long address,
 			   pte_t pte);
 
+	void (*numa_protect)(struct mmu_notifier *subscription,
+			     struct mm_struct *mm,
+			     unsigned long start,
+			     unsigned long end);
 	/*
 	 * invalidate_range_start() and invalidate_range_end() must be
 	 * paired and are called only when the mmap_lock and/or the
@@ -395,6 +399,9 @@  extern int __mmu_notifier_test_young(struct mm_struct *mm,
 				     unsigned long address);
 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
 				      unsigned long address, pte_t pte);
+extern void __mmu_notifier_numa_protect(struct mm_struct *mm,
+					unsigned long start,
+					unsigned long end);
 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
 				  bool only_end);
@@ -448,6 +455,14 @@  static inline void mmu_notifier_change_pte(struct mm_struct *mm,
 		__mmu_notifier_change_pte(mm, address, pte);
 }
 
+static inline void mmu_notifier_numa_protect(struct mm_struct *mm,
+					     unsigned long start,
+					     unsigned long end)
+{
+	if (mm_has_notifiers(mm))
+		__mmu_notifier_numa_protect(mm, start, end);
+}
+
 static inline void
 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
 {
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 50c0dde1354f..fc96fbd46e1d 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -382,6 +382,24 @@  int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
 	return young;
 }
 
+void __mmu_notifier_numa_protect(struct mm_struct *mm,
+				 unsigned long start,
+				 unsigned long end)
+{
+	struct mmu_notifier *subscription;
+	int id;
+
+	id = srcu_read_lock(&srcu);
+	hlist_for_each_entry_rcu(subscription,
+				 &mm->notifier_subscriptions->list, hlist,
+				 srcu_read_lock_held(&srcu)) {
+		if (subscription->ops->numa_protect)
+			subscription->ops->numa_protect(subscription, mm, start,
+							end);
+	}
+	srcu_read_unlock(&srcu, id);
+}
+
 int __mmu_notifier_clear_young(struct mm_struct *mm,
 			       unsigned long start,
 			       unsigned long end)