diff mbox series

[RFC,14/18] KVM: x86: Use kvm_set_segment() directly when leaving SMM

Message ID 20190328175557.14408-15-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: clear HF_SMM_MASK before loading state | expand

Commit Message

Sean Christopherson March 28, 2019, 5:55 p.m. UTC
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/x86.c | 131 ++++++++++++++++++++++++---------------------
 1 file changed, 70 insertions(+), 61 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index aa769bb36f3b..55687273d428 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7564,22 +7564,38 @@  static void enter_smm(struct kvm_vcpu *vcpu)
 	 __val;								  \
 	})
 
-static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
+static void rsm_set_seg(struct kvm_vcpu *vcpu, int seg, u16 sel, u64 base,
+			u32 limit, u32 flags)
 {
-	desc->g    = (flags >> 23) & 1;
-	desc->d    = (flags >> 22) & 1;
-	desc->l    = (flags >> 21) & 1;
-	desc->avl  = (flags >> 20) & 1;
-	desc->p    = (flags >> 15) & 1;
-	desc->dpl  = (flags >> 13) & 3;
-	desc->s    = (flags >> 12) & 1;
-	desc->type = (flags >>  8) & 15;
+	struct kvm_segment var;
+
+	var.selector	= sel;
+	var.base	= base;
+	var.limit	= limit;
+	var.g		= (flags >> 23) & 1;
+	var.db		= (flags >> 22) & 1;
+	var.l		= (flags >> 21) & 1;
+	var.avl		= (flags >> 20) & 1;
+	var.present	= (flags >> 15) & 1;
+	var.dpl		= (flags >> 13) & 3;
+	var.s		= (flags >> 12) & 1;
+	var.type	= (flags >>  8) & 15;
+	var.unusable	= !var.present;
+	var.padding	= 0;
+	if (var.g)
+		var.limit = (var.limit << 12) | 0xfff;
+
+	kvm_set_segment(vcpu, &var, seg);
+}
+
+static void rsm_set_seg_64(struct kvm_vcpu *vcpu, int seg, u16 sel,
+			   u64 base_lo, u64 base_hi, u32 limit, u32 flags)
+{
+	rsm_set_seg(vcpu, seg, sel, base_lo | (base_hi << 32), limit, flags);
 }
 
-static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+static int rsm_load_seg_32(struct kvm_vcpu *vcpu, u64 smbase, int n)
 {
-	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-	struct desc_struct desc;
 	int offset;
 	u16 selector;
 
@@ -7590,30 +7606,27 @@  static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 	else
 		offset = 0x7f2c + (n - 3) * 12;
 
-	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
-	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
-	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
-	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
+	rsm_set_seg(vcpu, n, selector,
+		    GET_SMSTATE(u32, smbase, offset + 8),
+		    GET_SMSTATE(u32, smbase, offset + 4),
+		    GET_SMSTATE(u32, smbase, offset));
+
 	return X86EMUL_CONTINUE;
 }
 
-static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+static int rsm_load_seg_64(struct kvm_vcpu *vcpu, u64 smbase, int n)
 {
-	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-	struct desc_struct desc;
 	int offset;
-	u16 selector;
-	u32 base3;
 
 	offset = 0x7e00 + n * 16;
 
-	selector =                GET_SMSTATE(u16, smbase, offset);
-	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
-	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
-	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
-	base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
+	rsm_set_seg_64(vcpu, n,
+		       GET_SMSTATE(u16, smbase, offset),
+		       GET_SMSTATE(u32, smbase, offset + 8),
+		       GET_SMSTATE(u32, smbase, offset + 12),
+		       GET_SMSTATE(u32, smbase, offset + 4),
+		       GET_SMSTATE(u16, smbase, offset + 2) << 8);
 
-	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
 	return X86EMUL_CONTINUE;
 }
 
@@ -7672,9 +7685,7 @@  static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-	struct desc_struct desc;
 	struct desc_ptr dt;
-	u16 selector;
 	u32 val, cr0, cr3, cr4;
 	int i;
 
@@ -7691,17 +7702,17 @@  static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	val = GET_SMSTATE(u32, smbase, 0x7fc8);
 	__kvm_set_dr(vcpu, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
-	selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
-	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
-	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
-	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
-	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
+	rsm_set_seg(vcpu, VCPU_SREG_TR,
+		    GET_SMSTATE(u32, smbase, 0x7fc4),
+		    GET_SMSTATE(u32, smbase, 0x7f64),
+		    GET_SMSTATE(u32, smbase, 0x7f60),
+		    GET_SMSTATE(u32, smbase, 0x7f5c));
 
-	selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
-	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
-	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
-	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
-	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
+	rsm_set_seg(vcpu, VCPU_SREG_LDTR,
+		    GET_SMSTATE(u32, smbase, 0x7fc0),
+		    GET_SMSTATE(u32, smbase, 0x7f80),
+		    GET_SMSTATE(u32, smbase, 0x7f7c),
+		    GET_SMSTATE(u32, smbase, 0x7f78));
 
 	dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
 	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
@@ -7712,7 +7723,7 @@  static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	kvm_x86_ops->set_idt(vcpu, &dt);
 
 	for (i = 0; i < 6; i++) {
-		int r = rsm_load_seg_32(ctxt, smbase, i);
+		int r = rsm_load_seg_32(vcpu, smbase, i);
 		if (r != X86EMUL_CONTINUE)
 			return r;
 	}
@@ -7727,11 +7738,8 @@  static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-	struct desc_struct desc;
 	struct desc_ptr dt;
 	u64 val, cr0, cr3, cr4;
-	u32 base3;
-	u16 selector;
 	int i, r;
 
 	for (i = 0; i < 16; i++)
@@ -7752,23 +7760,23 @@  static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
 	emulator_set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
 
-	selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
-	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
-	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
-	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
-	base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
-	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
+	rsm_set_seg_64(vcpu, VCPU_SREG_TR,
+		       GET_SMSTATE(u32, smbase, 0x7e90),
+		       GET_SMSTATE(u32, smbase, 0x7e98),
+		       GET_SMSTATE(u32, smbase, 0x7e9c),
+		       GET_SMSTATE(u32, smbase, 0x7e94),
+		       GET_SMSTATE(u32, smbase, 0x7e92) << 8);
 
 	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
 	kvm_x86_ops->set_idt(vcpu, &dt);
 
-	selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
-	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
-	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
-	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
-	base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
-	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
+	rsm_set_seg_64(vcpu, VCPU_SREG_LDTR,
+		       GET_SMSTATE(u32, smbase, 0x7e70),
+		       GET_SMSTATE(u32, smbase, 0x7e78),
+		       GET_SMSTATE(u32, smbase, 0x7e7c),
+		       GET_SMSTATE(u32, smbase, 0x7e74),
+		       GET_SMSTATE(u32, smbase, 0x7e72) << 8);
 
 	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
@@ -7779,7 +7787,7 @@  static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 		return r;
 
 	for (i = 0; i < 6; i++) {
-		r = rsm_load_seg_64(ctxt, smbase, i);
+		r = rsm_load_seg_64(vcpu, smbase, i);
 		if (r != X86EMUL_CONTINUE)
 			return r;
 	}
@@ -7801,7 +7809,7 @@  static int leave_smm(struct kvm_vcpu *vcpu)
 	 */
 	cr4 = kvm_read_cr4(vcpu);
 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
-		struct desc_struct cs_desc;
+		struct kvm_segment cs_seg;
 
 		/* Zero CR4.PCIDE before CR0.PG.  */
 		if (cr4 & X86_CR4_PCIDE) {
@@ -7810,10 +7818,11 @@  static int leave_smm(struct kvm_vcpu *vcpu)
 		}
 
 		/* A 32-bit code segment is required to clear EFER.LMA.  */
-		memset(&cs_desc, 0, sizeof(cs_desc));
-		cs_desc.type = 0xb;
-		cs_desc.s = cs_desc.g = cs_desc.p = 1;
-		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
+		memset(&cs_seg, 0, sizeof(cs_seg));
+		cs_seg.type = 0xb;
+		cs_seg.s = cs_seg.g = cs_seg.present = 1;
+		cs_seg.limit = 0xffffffff;
+		kvm_set_segment(vcpu, &cs_seg, VCPU_SREG_CS);
 	}
 
 	/* For the 64-bit case, this will clear EFER.LMA.  */