@@ -60,6 +60,7 @@ enum mmu_notifier_event {
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
+#define MMU_NOTIFIER_RANGE_NUMA (1 << 1)
struct mmu_notifier_ops {
/*
@@ -381,7 +381,9 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
/* invoke the mmu notifier if the pmd is populated */
if (!range.start) {
mmu_notifier_range_init(&range,
- MMU_NOTIFY_PROTECTION_VMA, 0,
+ MMU_NOTIFY_PROTECTION_VMA,
+ cp_flags & MM_CP_PROT_NUMA ?
+ MMU_NOTIFIER_RANGE_NUMA : 0,
vma->vm_mm, addr, end);
mmu_notifier_invalidate_range_start(&range);
}
Introduce a new mmu notifier flag MMU_NOTIFIER_RANGE_NUMA to indicate the notification of MMU_NOTIFY_PROTECTION_VMA is for NUMA balance purpose specifically. So that, the subscriber of mmu notifier, like KVM, can do some performance optimization according to this accurate information. Signed-off-by: Yan Zhao <yan.y.zhao@intel.com> --- include/linux/mmu_notifier.h | 1 + mm/mprotect.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-)