@@ -258,6 +258,9 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
union kvm_mmu_notifier_arg {
unsigned long attributes;
+#ifdef CONFIG_HAVE_KVM_YOUNG_FAST_ONLY_NOTIFIER
+ bool *failed;
+#endif
};
struct kvm_gfn_range {
@@ -271,7 +274,11 @@ struct kvm_gfn_range {
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+#ifdef CONFIG_HAVE_KVM_YOUNG_FAST_ONLY_NOTIFIER
+bool kvm_fast_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_fast_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
#endif
+#endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */
enum {
OUTSIDE_GUEST_MODE,
@@ -489,6 +489,28 @@ TRACE_EVENT(kvm_test_age_hva,
TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
);
+TRACE_EVENT(kvm_fast_test_age_hva,
+ TP_PROTO(unsigned long start, unsigned long end, bool clear),
+ TP_ARGS(start, end, clear),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, start )
+ __field( unsigned long, end )
+ __field( bool, clear )
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->clear = clear;
+ ),
+
+ TP_printk("mmu notifier fast test age: hva: %#016lx -- %#016lx "
+ "clear: %d",
+ __entry->start, __entry->end,
+ __entry->clear)
+);
+
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
@@ -100,6 +100,10 @@ config KVM_GENERIC_MMU_NOTIFIER
config KVM_MMU_NOTIFIER_YOUNG_LOCKLESS
bool
+config HAVE_KVM_YOUNG_FAST_ONLY_NOTIFIER
+ select KVM_GENERIC_MMU_NOTIFIER
+ bool
+
config KVM_GENERIC_MEMORY_ATTRIBUTES
depends on KVM_GENERIC_MMU_NOTIFIER
bool
@@ -699,7 +699,8 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
- gfn_handler_t handler)
+ gfn_handler_t handler,
+ bool *failed)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_mmu_notifier_range range = {
@@ -711,6 +712,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
.may_block = false,
.lockless =
IS_ENABLED(CONFIG_KVM_MMU_NOTIFIER_YOUNG_LOCKLESS),
+ .arg.failed = failed,
};
return __kvm_handle_hva_range(kvm, &range).ret;
@@ -901,7 +903,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
* cadence. If we find this inaccurate, we might come up with a
* more sophisticated heuristic later.
*/
- return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
+ return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn, NULL);
}
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
@@ -911,9 +913,32 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
trace_kvm_test_age_hva(address);
return kvm_handle_hva_range_no_flush(mn, address, address + 1,
- kvm_test_age_gfn);
+ kvm_test_age_gfn, NULL);
}
+#ifdef CONFIG_HAVE_KVM_YOUNG_FAST_ONLY_NOTIFIER
+static int kvm_mmu_notifier_test_clear_young_fast_only(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end,
+ bool clear)
+{
+ gfn_handler_t handler;
+ bool failed = false, young;
+
+ trace_kvm_fast_test_age_hva(start, end, clear);
+
+ handler = clear ? kvm_fast_age_gfn : kvm_fast_test_age_gfn;
+
+ young = kvm_handle_hva_range_no_flush(mn, start, end, handler, &failed);
+
+ if (failed)
+ return MMU_NOTIFIER_FAST_FAILED;
+
+ return young ? MMU_NOTIFIER_FAST_YOUNG : 0;
+}
+#endif
+
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
@@ -926,12 +951,16 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
- .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
- .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
- .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
- .clear_young = kvm_mmu_notifier_clear_young,
- .test_young = kvm_mmu_notifier_test_young,
- .release = kvm_mmu_notifier_release,
+ .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
+ .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
+ .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
+ .clear_young = kvm_mmu_notifier_clear_young,
+ .test_young = kvm_mmu_notifier_test_young,
+#ifdef CONFIG_HAVE_KVM_YOUNG_FAST_ONLY_NOTIFIER
+ .test_clear_young_fast_only =
+ kvm_mmu_notifier_test_clear_young_fast_only,
+#endif
+ .release = kvm_mmu_notifier_release,
};
static int kvm_init_mmu_notifier(struct kvm *kvm)
Provide the basics for allowing architectures to implement mmu_notifier_test_clear_young_fast_only(). Add CONFIG_HAVE_KVM_YOUNG_FAST_ONLY_NOTIFIER that architectures will set if they implement the fast-only notifier. kvm_fast_age_gfn and kvm_fast_test_age_gfn both need to support returning a tri-state state of: 1. fast && young, 2. fast && !young, 3. !fast This could be done by making gfn_handler_t return int, but that would mean a lot of churn. Instead, include a new kvm_mmu_notifier_arg 'bool *failed' for kvm_fast_{test,}_age_gfn to optionally use. Signed-off-by: James Houghton <jthoughton@google.com> --- include/linux/kvm_host.h | 7 ++++++ include/trace/events/kvm.h | 22 ++++++++++++++++++ virt/kvm/Kconfig | 4 ++++ virt/kvm/kvm_main.c | 47 ++++++++++++++++++++++++++++++-------- 4 files changed, 71 insertions(+), 9 deletions(-)