diff mbox

[PATCHv4,4/5] KVM: emulator: make linearize() callable from outside of emulator

Message ID 1342683653-32114-5-git-send-email-gleb@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gleb Natapov July 19, 2012, 7:40 a.m. UTC
The function will be used outside of the emulator.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
 arch/x86/include/asm/kvm_emulate.h |   16 +++++
 arch/x86/kvm/emulate.c             |  114 ++++++++++++++++++++----------------
 2 files changed, 79 insertions(+), 51 deletions(-)

Comments

Avi Kivity July 19, 2012, 10:32 a.m. UTC | #1
On 07/19/2012 10:40 AM, Gleb Natapov wrote:
> The function will be used outside of the emulator.
> 
> Signed-off-by: Gleb Natapov <gleb@redhat.com>
> ---
>  arch/x86/include/asm/kvm_emulate.h |   16 +++++
>  arch/x86/kvm/emulate.c             |  114 ++++++++++++++++++++----------------
>  2 files changed, 79 insertions(+), 51 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
> index 7c276ca..9ce651b 100644
> --- a/arch/x86/include/asm/kvm_emulate.h
> +++ b/arch/x86/include/asm/kvm_emulate.h
> @@ -374,6 +374,21 @@ enum x86_intercept {
>  	nr_x86_intercepts
>  };
>  
> +struct x86_linearize_params
> +{
> +	enum x86emul_mode mode;
> +	ulong ea;
> +	unsigned size;
> +	unsigned seg;
> +	struct desc_struct desc;
> +	u16 sel;
> +	bool usable;
> +	bool write;
> +	bool fetch;
> +	u8 ad_bytes;
> +	unsigned cpl;
> +};
> +
> @@ -470,14 +470,6 @@ static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
>  	ctxt->seg_override = seg;
>  }
>  
> -static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
> -{
> -	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
> -		return 0;
> -
> -	return ctxt->ops->get_cached_segment_base(ctxt, seg);
> -}
> -

> +int x86_linearize(struct x86_linearize_params *p, ulong *linear)
>  {
> -	struct desc_struct desc;
> -	bool usable;
>  	ulong la;
>  	u32 lim;
> -	u16 sel;
>  	unsigned cpl, rpl;
>  
> -	la = seg_base(ctxt, addr.seg) + addr.ea;
> -	switch (ctxt->mode) {
> +	la = get_desc_base(&p->desc) + p->ea;

This makes 64-bit mode slower, since before the patch it avoided reading
the segment base for non-fs/gs segments, and only read the segment base
for fs/gs.  After the patch we always execute 4 VMREADs (and decode the
results).
Gleb Natapov July 19, 2012, 10:51 a.m. UTC | #2
On Thu, Jul 19, 2012 at 01:32:59PM +0300, Avi Kivity wrote:
> On 07/19/2012 10:40 AM, Gleb Natapov wrote:
> > The function will be used outside of the emulator.
> > 
> > Signed-off-by: Gleb Natapov <gleb@redhat.com>
> > ---
> >  arch/x86/include/asm/kvm_emulate.h |   16 +++++
> >  arch/x86/kvm/emulate.c             |  114 ++++++++++++++++++++----------------
> >  2 files changed, 79 insertions(+), 51 deletions(-)
> > 
> > diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
> > index 7c276ca..9ce651b 100644
> > --- a/arch/x86/include/asm/kvm_emulate.h
> > +++ b/arch/x86/include/asm/kvm_emulate.h
> > @@ -374,6 +374,21 @@ enum x86_intercept {
> >  	nr_x86_intercepts
> >  };
> >  
> > +struct x86_linearize_params
> > +{
> > +	enum x86emul_mode mode;
> > +	ulong ea;
> > +	unsigned size;
> > +	unsigned seg;
> > +	struct desc_struct desc;
> > +	u16 sel;
> > +	bool usable;
> > +	bool write;
> > +	bool fetch;
> > +	u8 ad_bytes;
> > +	unsigned cpl;
> > +};
> > +
> > @@ -470,14 +470,6 @@ static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
> >  	ctxt->seg_override = seg;
> >  }
> >  
> > -static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
> > -{
> > -	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
> > -		return 0;
> > -
> > -	return ctxt->ops->get_cached_segment_base(ctxt, seg);
> > -}
> > -
> 
> > +int x86_linearize(struct x86_linearize_params *p, ulong *linear)
> >  {
> > -	struct desc_struct desc;
> > -	bool usable;
> >  	ulong la;
> >  	u32 lim;
> > -	u16 sel;
> >  	unsigned cpl, rpl;
> >  
> > -	la = seg_base(ctxt, addr.seg) + addr.ea;
> > -	switch (ctxt->mode) {
> > +	la = get_desc_base(&p->desc) + p->ea;
> 
> This makes 64-bit mode slower, since before the patch it avoided reading
> the segment base for non-fs/gs segments, and only read the segment base
> for fs/gs.  After the patch we always execute 4 VMREADs (and decode the
> results).
> 
That's easy to fix by making caller prepare fake desc if mode is 64-bit
and segment is non-fs/gs. The question is if this even measurable?

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity July 19, 2012, 12:52 p.m. UTC | #3
On 07/19/2012 01:51 PM, Gleb Natapov wrote:

>> > +int x86_linearize(struct x86_linearize_params *p, ulong *linear)
>> >  {
>> > -	struct desc_struct desc;
>> > -	bool usable;
>> >  	ulong la;
>> >  	u32 lim;
>> > -	u16 sel;
>> >  	unsigned cpl, rpl;
>> >  
>> > -	la = seg_base(ctxt, addr.seg) + addr.ea;
>> > -	switch (ctxt->mode) {
>> > +	la = get_desc_base(&p->desc) + p->ea;
>> 
>> This makes 64-bit mode slower, since before the patch it avoided reading
>> the segment base for non-fs/gs segments, and only read the segment base
>> for fs/gs.  After the patch we always execute 4 VMREADs (and decode the
>> results).
>> 
> That's easy to fix by making caller prepare fake desc if mode is 64-bit
> and segment is non-fs/gs. The question is if this even measurable?

I'm sure it will be measurable, esp. on older processors.  Why not
measure it?
Gleb Natapov July 19, 2012, 12:54 p.m. UTC | #4
On Thu, Jul 19, 2012 at 03:52:15PM +0300, Avi Kivity wrote:
> On 07/19/2012 01:51 PM, Gleb Natapov wrote:
> 
> >> > +int x86_linearize(struct x86_linearize_params *p, ulong *linear)
> >> >  {
> >> > -	struct desc_struct desc;
> >> > -	bool usable;
> >> >  	ulong la;
> >> >  	u32 lim;
> >> > -	u16 sel;
> >> >  	unsigned cpl, rpl;
> >> >  
> >> > -	la = seg_base(ctxt, addr.seg) + addr.ea;
> >> > -	switch (ctxt->mode) {
> >> > +	la = get_desc_base(&p->desc) + p->ea;
> >> 
> >> This makes 64-bit mode slower, since before the patch it avoided reading
> >> the segment base for non-fs/gs segments, and only read the segment base
> >> for fs/gs.  After the patch we always execute 4 VMREADs (and decode the
> >> results).
> >> 
> > That's easy to fix by making caller prepare fake desc if mode is 64-bit
> > and segment is non-fs/gs. The question is if this even measurable?
> 
> I'm sure it will be measurable, esp. on older processors.  Why not
> measure it?
> 
> 
It is easier to just fix it :) Will do and resend if you are agree with
general approach.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 7c276ca..9ce651b 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -374,6 +374,21 @@  enum x86_intercept {
 	nr_x86_intercepts
 };
 
+struct x86_linearize_params
+{
+	enum x86emul_mode mode;
+	ulong ea;
+	unsigned size;
+	unsigned seg;
+	struct desc_struct desc;
+	u16 sel;
+	bool usable;
+	bool write;
+	bool fetch;
+	u8 ad_bytes;
+	unsigned cpl;
+};
+
 /* Host execution mode. */
 #if defined(CONFIG_X86_32)
 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
@@ -392,4 +407,5 @@  int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
 			 u16 tss_selector, int idt_index, int reason,
 			 bool has_error_code, u32 error_code);
 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
+int x86_linearize(struct x86_linearize_params *p, ulong *linear);
 #endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e317588..79368d2 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -470,14 +470,6 @@  static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
 	ctxt->seg_override = seg;
 }
 
-static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
-{
-	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
-		return 0;
-
-	return ctxt->ops->get_cached_segment_base(ctxt, seg);
-}
-
 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
 {
 	if (!ctxt->has_seg_override)
@@ -505,11 +497,6 @@  static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
 	return emulate_exception(ctxt, GP_VECTOR, err, true);
 }
 
-static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
-{
-	return emulate_exception(ctxt, SS_VECTOR, err, true);
-}
-
 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
 {
 	return emulate_exception(ctxt, UD_VECTOR, 0, false);
@@ -573,79 +560,104 @@  static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
 		return true;
 }
 
-static int __linearize(struct x86_emulate_ctxt *ctxt,
-		     struct segmented_address addr,
-		     unsigned size, bool write, bool fetch,
-		     ulong *linear)
+int x86_linearize(struct x86_linearize_params *p, ulong *linear)
 {
-	struct desc_struct desc;
-	bool usable;
 	ulong la;
 	u32 lim;
-	u16 sel;
 	unsigned cpl, rpl;
 
-	la = seg_base(ctxt, addr.seg) + addr.ea;
-	switch (ctxt->mode) {
+	la = get_desc_base(&p->desc) + p->ea;
+	switch (p->mode) {
 	case X86EMUL_MODE_REAL:
 		break;
 	case X86EMUL_MODE_PROT64:
-		if (((signed long)la << 16) >> 16 != la)
-			return emulate_gp(ctxt, 0);
+		if (((signed long)la << 16) >> 16 != la) {
+			*linear = 0;
+			return GP_VECTOR;
+		}
 		break;
 	default:
-		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
-						addr.seg);
-		if (!usable)
+		if (!p->usable)
 			goto bad;
 		/* code segment or read-only data segment */
-		if (((desc.type & 8) || !(desc.type & 2)) && write)
+		if (((p->desc.type & 8) || !(p->desc.type & 2)) && p->write)
 			goto bad;
 		/* unreadable code segment */
-		if (!fetch && (desc.type & 8) && !(desc.type & 2))
+		if (!p->fetch && (p->desc.type & 8) && !(p->desc.type & 2))
 			goto bad;
-		lim = desc_limit_scaled(&desc);
-		if ((desc.type & 8) || !(desc.type & 4)) {
+		lim = desc_limit_scaled(&p->desc);
+		if ((p->desc.type & 8) || !(p->desc.type & 4)) {
 			/* expand-up segment */
-			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
+			if (p->ea > lim || (u32)(p->ea + p->size - 1) > lim)
 				goto bad;
 		} else {
 			/* exapand-down segment */
-			if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
+			if (p->ea <= lim || (u32)(p->ea + p->size - 1) <= lim)
 				goto bad;
-			lim = desc.d ? 0xffffffff : 0xffff;
-			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
+			lim = p->desc.d ? 0xffffffff : 0xffff;
+			if (p->ea > lim || (u32)(p->ea + p->size - 1) > lim)
 				goto bad;
 		}
-		cpl = ctxt->ops->cpl(ctxt);
-		rpl = sel & 3;
-		cpl = max(cpl, rpl);
-		if (!(desc.type & 8)) {
+		rpl = p->sel & 3;
+		cpl = max(p->cpl, rpl);
+		if (!(p->desc.type & 8)) {
 			/* data segment */
-			if (cpl > desc.dpl)
+			if (cpl > p->desc.dpl)
 				goto bad;
-		} else if ((desc.type & 8) && !(desc.type & 4)) {
+		} else if ((p->desc.type & 8) && !(p->desc.type & 4)) {
 			/* nonconforming code segment */
-			if (cpl != desc.dpl)
+			if (cpl != p->desc.dpl)
 				goto bad;
-		} else if ((desc.type & 8) && (desc.type & 4)) {
+		} else if ((p->desc.type & 8) && (p->desc.type & 4)) {
 			/* conforming code segment */
-			if (cpl < desc.dpl)
+			if (cpl < p->desc.dpl)
 				goto bad;
 		}
 		break;
 	}
-	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
+	if (p->fetch ? p->mode != X86EMUL_MODE_PROT64 : p->ad_bytes != 8)
 		la &= (u32)-1;
-	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
-		return emulate_gp(ctxt, 0);
 	*linear = la;
-	return X86EMUL_CONTINUE;
+	return -1;
 bad:
-	if (addr.seg == VCPU_SREG_SS)
-		return emulate_ss(ctxt, addr.seg);
+	*linear = (ulong)p->seg;
+	if (p->seg == VCPU_SREG_SS)
+		return SS_VECTOR;
 	else
-		return emulate_gp(ctxt, addr.seg);
+		return GP_VECTOR;
+}
+
+static int __linearize(struct x86_emulate_ctxt *ctxt,
+		     struct segmented_address addr,
+		     unsigned size, bool write, bool fetch,
+		     ulong *linear)
+{
+	int err;
+
+	struct x86_linearize_params param = {
+		.mode = ctxt->mode,
+		.ea = addr.ea,
+		.size = size,
+		.seg = addr.seg,
+		.write = write,
+		.fetch = fetch,
+		.ad_bytes = ctxt->ad_bytes,
+		.cpl = ctxt->ops->cpl(ctxt)
+	};
+
+	param.usable = ctxt->ops->get_segment(ctxt, &param.sel, &param.desc,
+			NULL, addr.seg);
+
+
+	err = x86_linearize(&param, linear);
+
+	if (err >= 0)
+		return emulate_exception(ctxt, err, (int)*linear, true);
+
+	if (insn_aligned(ctxt, size) && ((*linear & (size - 1)) != 0))
+		return emulate_gp(ctxt, 0);
+
+	return X86EMUL_CONTINUE;
 }
 
 static int linearize(struct x86_emulate_ctxt *ctxt,