diff mbox

x86 emulator: Add 'push/pop sreg' instructions

Message ID 1250655104-437-1-git-send-email-m.gamal005@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Mohammed Gamal Aug. 19, 2009, 4:11 a.m. UTC
Signed-off-by: Mohammed Gamal <m.gamal005@gmail.com>
---
 arch/x86/kvm/emulate.c |   99 +++++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 93 insertions(+), 6 deletions(-)

Comments

Avi Kivity Aug. 19, 2009, 8:32 a.m. UTC | #1
On 08/19/2009 07:11 AM, Mohammed Gamal wrote:
>
> +static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
> +{
> +	struct decode_cache *c =&ctxt->decode;
> +	struct kvm_segment segment;
> +	
> +	if (ctxt->mode == X86EMUL_MODE_PROT64&&  (seg != VCPU_SREG_FS ||
> +						   seg != VCPU_SREG_GS)) {
> +		kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
> +		return;
> +	}
>    

It's better to check at the callsite, in case the opcode is ever reused 
for a new instruction.  Or even better, add a new decode flag No64 so we 
can do this during the decode stage.

> +
> +static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
> +			     struct x86_emulate_ops *ops, int seg)
> +{
> +	struct decode_cache *c =&ctxt->decode;
> +	struct kvm_segment segment;
> +	int rc;
> +
> +	if (ctxt->mode == X86EMUL_MODE_PROT64&&  (seg != VCPU_SREG_FS ||
> +						    seg != VCPU_SREG_GS)) {
> +		kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
> +		return -1;
> +	}
> +
> +	kvm_x86_ops->get_segment(ctxt->vcpu,&segment, seg);
> +	rc = emulate_pop(ctxt, ops,&segment.selector, c->op_bytes);
> +	if (rc != 0)
> +		return rc;
> +	
> +	rc = kvm_load_segment_descriptor(ctxt->vcpu, segment.selector, 1, seg);
> +	return rc;
> +}
>    

Why do the ->get_segment() at all?  pop into a temporary variable, and 
call kvm_load_segment_descriptor() with that.
Mohammed Gamal Aug. 19, 2009, 12:52 p.m. UTC | #2
On Wed, Aug 19, 2009 at 11:32 AM, Avi Kivity<avi@redhat.com> wrote:
> On 08/19/2009 07:11 AM, Mohammed Gamal wrote:
>>
>> +static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
>> +{
>> +       struct decode_cache *c =&ctxt->decode;
>> +       struct kvm_segment segment;
>> +
>> +       if (ctxt->mode == X86EMUL_MODE_PROT64&&  (seg != VCPU_SREG_FS ||
>> +                                                  seg != VCPU_SREG_GS)) {
>> +               kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
>> +               return;
>> +       }
>>
>
> It's better to check at the callsite, in case the opcode is ever reused for
> a new instruction.  Or even better, add a new decode flag No64 so we can do
> this during the decode stage.

Good idea, but I believe it'd be better to introduce it in a separate
patch so that we can update all instructions incompatible with long
mode in one go. I'll move the checks to the call site for the time
being.

>
>> +
>> +static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
>> +                            struct x86_emulate_ops *ops, int seg)
>> +{
>> +       struct decode_cache *c =&ctxt->decode;
>> +       struct kvm_segment segment;
>> +       int rc;
>> +
>> +       if (ctxt->mode == X86EMUL_MODE_PROT64&&  (seg != VCPU_SREG_FS ||
>> +                                                   seg != VCPU_SREG_GS))
>> {
>> +               kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
>> +               return -1;
>> +       }
>> +
>> +       kvm_x86_ops->get_segment(ctxt->vcpu,&segment, seg);
>> +       rc = emulate_pop(ctxt, ops,&segment.selector, c->op_bytes);
>> +       if (rc != 0)
>> +               return rc;
>> +
>> +       rc = kvm_load_segment_descriptor(ctxt->vcpu, segment.selector, 1,
>> seg);
>> +       return rc;
>> +}
>>
>
> Why do the ->get_segment() at all?  pop into a temporary variable, and call
> kvm_load_segment_descriptor() with that.
>
>
> --
> error compiling committee.c: too many arguments to function
>
>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity Aug. 19, 2009, 1:03 p.m. UTC | #3
On 08/19/2009 03:52 PM, Mohammed Gamal wrote:
>
>>> +static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
>>> +{
>>> +       struct decode_cache *c =&ctxt->decode;
>>> +       struct kvm_segment segment;
>>> +
>>> +       if (ctxt->mode == X86EMUL_MODE_PROT64&&    (seg != VCPU_SREG_FS ||
>>> +                                                  seg != VCPU_SREG_GS)) {
>>> +               kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
>>> +               return;
>>> +       }
>>>
>>>        
>> It's better to check at the callsite, in case the opcode is ever reused for
>> a new instruction.  Or even better, add a new decode flag No64 so we can do
>> this during the decode stage.
>>      
> Good idea, but I believe it'd be better to introduce it in a separate
> patch so that we can update all instructions incompatible with long
> mode in one go. I'll move the checks to the call site for the time
> being.
>    

Sure.  Or do the No64 patch first.
diff mbox

Patch

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 1be5cd6..3d9fb44 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -92,19 +92,22 @@  static u32 opcode_table[256] = {
 	/* 0x00 - 0x07 */
 	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
 	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
-	ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0,
+	ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 
+	ImplicitOps | Stack, ImplicitOps | Stack,
 	/* 0x08 - 0x0F */
 	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
 	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
-	0, 0, 0, 0,
+	0, 0, ImplicitOps | Stack, 0,
 	/* 0x10 - 0x17 */
 	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
 	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
-	ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0,
+	ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
+	ImplicitOps | Stack, ImplicitOps | Stack,
 	/* 0x18 - 0x1F */
 	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
 	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
-	ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0,
+	ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
+	ImplicitOps | Stack, ImplicitOps | Stack,
 	/* 0x20 - 0x27 */
 	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
 	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
@@ -244,11 +247,13 @@  static u32 twobyte_table[256] = {
 	/* 0x90 - 0x9F */
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	/* 0xA0 - 0xA7 */
-	0, 0, 0, DstMem | SrcReg | ModRM | BitOp,
+	ImplicitOps | Stack, ImplicitOps | Stack,
+	0, DstMem | SrcReg | ModRM | BitOp,
 	DstMem | SrcReg | Src2ImmByte | ModRM,
 	DstMem | SrcReg | Src2CL | ModRM, 0, 0,
 	/* 0xA8 - 0xAF */
-	0, 0, 0, DstMem | SrcReg | ModRM | BitOp,
+	ImplicitOps | Stack, ImplicitOps | Stack,
+	0, DstMem | SrcReg | ModRM | BitOp,
 	DstMem | SrcReg | Src2ImmByte | ModRM,
 	DstMem | SrcReg | Src2CL | ModRM,
 	ModRM, 0,
@@ -1186,6 +1191,45 @@  static int emulate_pop(struct x86_emulate_ctxt *ctxt,
 	return rc;
 }
 
+static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
+{
+	struct decode_cache *c = &ctxt->decode;
+	struct kvm_segment segment;
+	
+	if (ctxt->mode == X86EMUL_MODE_PROT64 && (seg != VCPU_SREG_FS || 
+						   seg != VCPU_SREG_GS)) {
+		kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+		return;
+	}
+
+	kvm_x86_ops->get_segment(ctxt->vcpu, &segment, seg);
+	
+	c->src.val = segment.selector;
+	emulate_push(ctxt);
+}
+
+static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
+			     struct x86_emulate_ops *ops, int seg)
+{
+	struct decode_cache *c = &ctxt->decode;
+	struct kvm_segment segment;
+	int rc;
+
+	if (ctxt->mode == X86EMUL_MODE_PROT64 && (seg != VCPU_SREG_FS || 
+						    seg != VCPU_SREG_GS)) {
+		kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+		return -1;
+	}
+
+	kvm_x86_ops->get_segment(ctxt->vcpu, &segment, seg);
+	rc = emulate_pop(ctxt, ops, &segment.selector, c->op_bytes);
+	if (rc != 0)
+		return rc;
+	
+	rc = kvm_load_segment_descriptor(ctxt->vcpu, segment.selector, 1, seg);
+	return rc;
+}
+
 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
 				struct x86_emulate_ops *ops)
 {
@@ -1707,18 +1751,45 @@  special_insn:
 	      add:		/* add */
 		emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
 		break;
+	case 0x06:		/* push es */
+		emulate_push_sreg(ctxt, VCPU_SREG_ES);
+		break;
+	case 0x07:		/* pop es */
+		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
+		if (rc != 0)
+			goto done;
+		break;
 	case 0x08 ... 0x0d:
 	      or:		/* or */
 		emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
 		break;
+	case 0x0e:		/* push cs */
+		emulate_push_sreg(ctxt, VCPU_SREG_CS);
+		break;
 	case 0x10 ... 0x15:
 	      adc:		/* adc */
 		emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
 		break;
+	case 0x16:		/* push ss */
+		emulate_push_sreg(ctxt, VCPU_SREG_SS);
+		break;
+	case 0x17:		/* pop ss */
+		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
+		if (rc != 0)
+			goto done;
+		break;
 	case 0x18 ... 0x1d:
 	      sbb:		/* sbb */
 		emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
 		break;
+	case 0x1e:		/* push ds */
+		emulate_push_sreg(ctxt, VCPU_SREG_DS);
+		break;
+	case 0x1f:		/* pop ds */
+		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
+		if (rc != 0)
+			goto done;
+		break;
 	case 0x20 ... 0x25:
 	      and:		/* and */
 		emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
@@ -2297,6 +2368,14 @@  twobyte_insn:
 			jmp_rel(c, c->src.val);
 		c->dst.type = OP_NONE;
 		break;
+	case 0xa0:	  /* push fs */
+		emulate_push_sreg(ctxt, VCPU_SREG_FS);
+		break;
+	case 0xa1:	 /* pop fs */
+		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
+		if (rc != 0)
+			goto done;
+		break;
 	case 0xa3:
 	      bt:		/* bt */
 		c->dst.type = OP_NONE;
@@ -2308,6 +2387,14 @@  twobyte_insn:
 	case 0xa5: /* shld cl, r, r/m */
 		emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
 		break;
+	case 0xa8:	/* push gs */
+		emulate_push_sreg(ctxt, VCPU_SREG_GS);
+		break;
+	case 0xa9:	/* pop gs */
+		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
+		if (rc != 0)
+			goto done;
+		break;
 	case 0xab:
 	      bts:		/* bts */
 		/* only subword offset */