@@ -64,6 +64,7 @@ bool kvmi_descriptor_event(struct kvm_vcpu *vcpu, u8 descriptor, bool write);
bool kvmi_msr_event(struct kvm_vcpu *vcpu, struct msr_data *msr);
bool kvmi_monitor_msrw_intercept(struct kvm_vcpu *vcpu, u32 msr, bool enable);
bool kvmi_msrw_intercept_originator(struct kvm_vcpu *vcpu);
+bool kvmi_update_ad_flags(struct kvm_vcpu *vcpu);
#else /* CONFIG_KVM_INTROSPECTION */
@@ -88,6 +89,7 @@ static inline bool kvmi_monitor_msrw_intercept(struct kvm_vcpu *vcpu, u32 msr,
bool enable) { return false; }
static inline bool kvmi_msrw_intercept_originator(struct kvm_vcpu *vcpu)
{ return false; }
+static inline bool kvmi_update_ad_flags(struct kvm_vcpu *vcpu) { return false; }
#endif /* CONFIG_KVM_INTROSPECTION */
@@ -1378,3 +1378,36 @@ gpa_t kvmi_arch_cmd_translate_gva(struct kvm_vcpu *vcpu, gva_t gva)
{
return kvm_mmu_gva_to_gpa_system(vcpu, gva, 0, NULL);
}
+
+bool kvmi_update_ad_flags(struct kvm_vcpu *vcpu)
+{
+ struct kvm_introspection *kvmi;
+ bool ret = false;
+ gva_t gva;
+ gpa_t gpa;
+
+ kvmi = kvmi_get(vcpu->kvm);
+ if (!kvmi)
+ return false;
+
+ gva = kvm_x86_ops.fault_gla(vcpu);
+ if (gva == ~0ull) {
+ kvmi_warn_once(kvmi, "%s: cannot perform translation\n",
+ __func__);
+ goto out;
+ }
+
+ gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, PFERR_WRITE_MASK, NULL);
+ if (gpa == UNMAPPED_GVA) {
+ struct x86_exception exception = { };
+
+ gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, 0, &exception);
+ }
+
+ ret = (gpa != UNMAPPED_GVA);
+
+out:
+ kvmi_put(vcpu->kvm);
+
+ return ret;
+}
@@ -40,6 +40,7 @@
#include <linux/hash.h>
#include <linux/kern_levels.h>
#include <linux/kthread.h>
+#include <linux/kvmi_host.h>
#include <asm/page.h>
#include <asm/memtype.h>
@@ -5549,8 +5550,15 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
*/
if (vcpu->arch.mmu->direct_map &&
(error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
- kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
- return 1;
+ gfn_t gfn = gpa_to_gfn(cr2_or_gpa);
+
+ if (kvmi_tracked_gfn(vcpu, gfn)) {
+ if (kvmi_update_ad_flags(vcpu))
+ return 1;
+ } else {
+ kvm_mmu_unprotect_page(vcpu->kvm, gfn);
+ return 1;
+ }
}
/*
@@ -96,6 +96,7 @@ bool kvmi_enter_guest(struct kvm_vcpu *vcpu);
bool kvmi_vcpu_running_singlestep(struct kvm_vcpu *vcpu);
void kvmi_singlestep_done(struct kvm_vcpu *vcpu);
void kvmi_singlestep_failed(struct kvm_vcpu *vcpu);
+bool kvmi_tracked_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
#else
@@ -116,6 +117,8 @@ static inline bool kvmi_vcpu_running_singlestep(struct kvm_vcpu *vcpu)
{ return false; }
static inline void kvmi_singlestep_done(struct kvm_vcpu *vcpu) { }
static inline void kvmi_singlestep_failed(struct kvm_vcpu *vcpu) { }
+static inline bool kvmi_tracked_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+ { return false; }
#endif /* CONFIG_KVM_INTROSPECTION */
@@ -1381,3 +1381,29 @@ void kvmi_singlestep_failed(struct kvm_vcpu *vcpu)
kvmi_handle_singlestep_exit(vcpu, false);
}
EXPORT_SYMBOL(kvmi_singlestep_failed);
+
+static bool __kvmi_tracked_gfn(struct kvm_introspection *kvmi, gfn_t gfn)
+{
+ u8 ignored_access;
+
+ if (kvmi_get_gfn_access(kvmi, gfn, &ignored_access))
+ return false;
+
+ return true;
+}
+
+bool kvmi_tracked_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ struct kvm_introspection *kvmi;
+ bool ret;
+
+ kvmi = kvmi_get(vcpu->kvm);
+ if (!kvmi)
+ return false;
+
+ ret = __kvmi_tracked_gfn(kvmi, gfn);
+
+ kvmi_put(vcpu->kvm);
+
+ return ret;
+}