@@ -132,6 +132,10 @@ struct mmu_notifier_ops {
unsigned long address,
pte_t pte);
+ void (*numa_protect)(struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
/*
* invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_lock and/or the
@@ -395,6 +399,9 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte);
+extern void __mmu_notifier_numa_protect(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
bool only_end);
@@ -448,6 +455,14 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
__mmu_notifier_change_pte(mm, address, pte);
}
+static inline void mmu_notifier_numa_protect(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_numa_protect(mm, start, end);
+}
+
static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
@@ -382,6 +382,24 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
return young;
}
+void __mmu_notifier_numa_protect(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mmu_notifier *subscription;
+ int id;
+
+ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(subscription,
+ &mm->notifier_subscriptions->list, hlist,
+ srcu_read_lock_held(&srcu)) {
+ if (subscription->ops->numa_protect)
+ subscription->ops->numa_protect(subscription, mm, start,
+ end);
+ }
+ srcu_read_unlock(&srcu, id);
+}
+
int __mmu_notifier_clear_young(struct mm_struct *mm,
unsigned long start,
unsigned long end)
This .numa_protect callback is called when PROT_NONE is set for sure on a PTE or a huge PMD for numa migration purpose. With this callback, subscriber of mmu notifier, (e.g. KVM), can unmap NUMA migration protected pages only in the handler, rather than unmap a wider range containing pages that are obvious none-NUMA-migratble. Signed-off-by: Yan Zhao <yan.y.zhao@intel.com> --- include/linux/mmu_notifier.h | 15 +++++++++++++++ mm/mmu_notifier.c | 18 ++++++++++++++++++ 2 files changed, 33 insertions(+)