diff mbox

[RFC,01/19] kvm: x86: mmu: Add kvm_mmu_get_spte() and kvm_mmu_set_spte()

Message ID 20170616134348.17725-2-alazar@bitdefender.com (mailing list archive)
State New, archived
Headers show

Commit Message

Adalbert Lazăr June 16, 2017, 1:43 p.m. UTC
From: Mihai Dontu <mdontu@bitdefender.com>

These are helpers used by the introspection subsystem to adjust the SPTE
rwx flags. At present, the code assumes we're deadling with 4-level
hardware shadow page tables.

Signed-off-by: Mihai Dontu <mdontu@bitdefender.com>
---
 arch/x86/kvm/mmu.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/mmu.h |  3 +++
 2 files changed, 78 insertions(+)
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cb8225969255..12e4c33ff879 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5350,3 +5350,78 @@  void kvm_mmu_module_exit(void)
 	unregister_shrinker(&mmu_shrinker);
 	mmu_audit_disable();
 }
+
+u64 kvm_mmu_get_spte(struct kvm *kvm, struct kvm_vcpu *vcpu, gpa_t gpa)
+{
+	u64 spte = -1;
+	unsigned int c = 0;
+	const u64 mask = PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
+	struct kvm_shadow_walk_iterator iterator;
+
+	spin_lock(&kvm->mmu_lock);
+	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+		goto error;
+	for_each_shadow_entry(vcpu, gpa & PAGE_MASK, iterator) {
+		u64 __spte = *iterator.sptep;
+
+		if (!(__spte & mask))
+			break;
+		else if (++c == PT64_ROOT_LEVEL) {
+			spte = __spte;
+			break;
+		}
+	}
+	if (spte == (u64) -1)
+		goto error;
+	spin_unlock(&kvm->mmu_lock);
+	return spte & mask;
+error:
+	spin_unlock(&kvm->mmu_lock);
+	return -ENOENT;
+}
+
+int kvm_mmu_set_spte(struct kvm *kvm, struct kvm_vcpu *vcpu, gpa_t gpa,
+		     unsigned int r, unsigned int w, unsigned int x)
+{
+	int flush = 0;
+	u64 *pspte[4] = { };
+	u64 spte;
+	u64 old_spte;
+	unsigned int c = 0;
+	const u64 mask = PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
+	struct kvm_shadow_walk_iterator iterator;
+
+	spin_lock(&kvm->mmu_lock);
+	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+		goto error;
+	for_each_shadow_entry(vcpu, gpa & PAGE_MASK, iterator) {
+		u64 __spte = *iterator.sptep;
+
+		if (!(__spte & mask))
+			break;
+		pspte[c++] = iterator.sptep;
+	}
+	if (c < PT64_ROOT_LEVEL || !pspte[c - 1])
+		goto error;
+	c--;
+	old_spte = *pspte[c];
+	spte = old_spte & ~mask;
+	if (r)
+		spte |= PT_PRESENT_MASK;
+	if (w)
+		spte |= PT_WRITABLE_MASK;
+	if (x)
+		spte |= PT_USER_MASK;
+	if (old_spte != spte)
+		flush |= mmu_spte_update(pspte[c], spte);
+	while (c-- > 0) {
+		spte = *pspte[c];
+		if ((spte & mask) != mask)
+			flush |= mmu_spte_update(pspte[c], spte | mask);
+	}
+	spin_unlock(&kvm->mmu_lock);
+	return flush;
+error:
+	spin_unlock(&kvm->mmu_lock);
+	return -ENOENT;
+}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 330bf3a811fb..82246fdc0479 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -204,4 +204,7 @@  void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
 				    struct kvm_memory_slot *slot, u64 gfn);
 int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+u64 kvm_mmu_get_spte(struct kvm *kvm, struct kvm_vcpu *vcpu, gpa_t gpa);
+int kvm_mmu_set_spte(struct kvm *kvm, struct kvm_vcpu *vcpu, gpa_t gpa,
+		     unsigned int r, unsigned int w, unsigned int x);
 #endif