diff mbox series

[RFC,07/27] KVM: x86: Switch to host address space when may access sensitive data

Message ID 1557758315-12667-8-git-send-email-alexandre.chartre@oracle.com (mailing list archive)
State New, archived
Headers show
Series KVM Address Space Isolation | expand

Commit Message

Alexandre Chartre May 13, 2019, 2:38 p.m. UTC
From: Liran Alon <liran.alon@oracle.com>

Before this patch, we exited from KVM isolated address space to
host address space as soon as we exit guest.

Change code such that most of KVM #VMExit handlers will run in KVM
isolated address space and switch back to host address space
only before accessing sensitive data. Sensitive data is defined
as either host data or other VM data.

Currently, we switch from kvm_mm to host_mm on the following scenarios:
1) When handling guest page-faults:
   As this will access SPTs which contains host PFNs.
2) On schedule-out of vCPU thread
3) On write to guest virtual memory
   (kvm_write_guest_virt_system() can pull in tons of pages)
4) On return to userspace (e.g. QEMU)
5) On prelog of IRQ handlers

Signed-off-by: Liran Alon <liran.alon@oracle.com>
Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com>
---
 arch/x86/kvm/isolation.c |    7 ++++++-
 arch/x86/kvm/isolation.h |    3 +++
 arch/x86/kvm/mmu.c       |    3 ++-
 arch/x86/kvm/x86.c       |   12 +++++-------
 4 files changed, 16 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/isolation.c b/arch/x86/kvm/isolation.c
index 22ff9c2..eeb60c4 100644
--- a/arch/x86/kvm/isolation.c
+++ b/arch/x86/kvm/isolation.c
@@ -5,7 +5,6 @@ 
  * KVM Address Space Isolation
  */
 
-#include <linux/kvm_host.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/printk.h>
@@ -133,6 +132,12 @@  void kvm_isolation_uninit(void)
 	pr_info("KVM: x86: End of isolated address space\n");
 }
 
+void kvm_may_access_sensitive_data(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.l1tf_flush_l1d = true;
+	kvm_isolation_exit();
+}
+
 void kvm_isolation_enter(void)
 {
 	if (address_space_isolation) {
diff --git a/arch/x86/kvm/isolation.h b/arch/x86/kvm/isolation.h
index 595f62c..1290d32 100644
--- a/arch/x86/kvm/isolation.h
+++ b/arch/x86/kvm/isolation.h
@@ -2,9 +2,12 @@ 
 #ifndef ARCH_X86_KVM_ISOLATION_H
 #define ARCH_X86_KVM_ISOLATION_H
 
+#include <linux/kvm_host.h>
+
 extern int kvm_isolation_init(void);
 extern void kvm_isolation_uninit(void);
 extern void kvm_isolation_enter(void);
 extern void kvm_isolation_exit(void);
+extern void kvm_may_access_sensitive_data(struct kvm_vcpu *vcpu);
 
 #endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d9c7b45..a2b38de 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -23,6 +23,7 @@ 
 #include "x86.h"
 #include "kvm_cache_regs.h"
 #include "cpuid.h"
+#include "isolation.h"
 
 #include <linux/kvm_host.h>
 #include <linux/types.h>
@@ -4059,7 +4060,7 @@  int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
 {
 	int r = 1;
 
-	vcpu->arch.l1tf_flush_l1d = true;
+	kvm_may_access_sensitive_data(vcpu);
 	switch (vcpu->arch.apf.host_apf_reason) {
 	default:
 		trace_kvm_page_fault(fault_address, error_code);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 85700e0..1db72c3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3307,6 +3307,8 @@  void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 	 * guest. do_debug expects dr6 to be cleared after it runs, do the same.
 	 */
 	set_debugreg(0, 6);
+
+	kvm_may_access_sensitive_data(vcpu);
 }
 
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
@@ -5220,7 +5222,7 @@  int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
 				unsigned int bytes, struct x86_exception *exception)
 {
 	/* kvm_write_guest_virt_system can pull in tons of pages. */
-	vcpu->arch.l1tf_flush_l1d = true;
+	kvm_may_access_sensitive_data(vcpu);
 
 	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
 					   PFERR_WRITE_MASK, exception);
@@ -7948,12 +7950,6 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
 
-	/*
-	 * TODO: Move this to where we architectually need to access
-	 * host (or other VM) sensitive data
-	 */
-	kvm_isolation_exit();
-
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	smp_wmb();
 
@@ -8086,6 +8082,8 @@  static int vcpu_run(struct kvm_vcpu *vcpu)
 
 	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
 
+	kvm_may_access_sensitive_data(vcpu);
+
 	return r;
 }