diff mbox

[v2] KVM: nVMX: Set segment infomation of L1 when L2 exits

Message ID 1373542285-5799-1-git-send-email-yzt356@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Arthur Chunqi Li July 11, 2013, 11:31 a.m. UTC
When L2 exits to L1, segment infomations of L1 are not set correctly.
According to Intel SDM 27.5.2(Loading Host Segment and Descriptor
Table Registers), segment base/limit/access right of L1 should be
set to some designed value when L2 exits to L1. This patch fixes
this.

Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com>
---
 arch/x86/kvm/vmx.c |   43 ++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 38 insertions(+), 5 deletions(-)

Comments

Gleb Natapov July 11, 2013, 12:25 p.m. UTC | #1
On Thu, Jul 11, 2013 at 07:31:25PM +0800, Arthur Chunqi Li wrote:
> When L2 exits to L1, segment infomations of L1 are not set correctly.
> According to Intel SDM 27.5.2(Loading Host Segment and Descriptor
> Table Registers), segment base/limit/access right of L1 should be
> set to some designed value when L2 exits to L1. This patch fixes
> this.
> 
> Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com>
> ---
>  arch/x86/kvm/vmx.c |   43 ++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 38 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index a7e1855..84acb16 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -7941,6 +7941,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>  static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
>  				   struct vmcs12 *vmcs12)
>  {
> +	u32 cs_ar_bytes, ds_ar_bytes, tr_ar_bytes;
Empty line please.

>  	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
>  		vcpu->arch.efer = vmcs12->host_ia32_efer;
>  	else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
> @@ -8005,11 +8006,43 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
>  	vmcs_write16(GUEST_GS_SELECTOR, vmcs12->host_gs_selector);
>  	vmcs_write16(GUEST_TR_SELECTOR, vmcs12->host_tr_selector);
>  
> -	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT)
> -		vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
> -	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
> -		vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
> -			vmcs12->host_ia32_perf_global_ctrl);
> +	/* Set L1 segment info according to Intel SDM
> +	    27.5.2 Loading Host Segment and Descriptor-Table Registers */
> +	vmcs_writel(GUEST_CS_BASE, 0x0);
> +	vmcs_writel(GUEST_SS_BASE, 0x0);
> +	vmcs_writel(GUEST_DS_BASE, 0x0);
> +	vmcs_writel(GUEST_ES_BASE, 0x0);
> +	vmcs_write32(GUEST_CS_LIMIT, 0xFFFFFFFF);
> +	vmcs_write32(GUEST_SS_LIMIT, 0xFFFFFFFF);
> +	vmcs_write32(GUEST_DS_LIMIT, 0xFFFFFFFF);
> +	vmcs_write32(GUEST_ES_LIMIT, 0xFFFFFFFF);
> +	vmcs_write32(GUEST_FS_LIMIT, 0xFFFFFFFF);
> +	vmcs_write32(GUEST_GS_LIMIT, 0xFFFFFFFF);
> +	vmcs_write32(GUEST_TR_LIMIT, 0x67);
> +
> +	cs_ar_bytes = 0;
> +	cs_ar_bytes |= AR_TYPE_CODE_MASK | AR_TYPE_READABLE_MASK
> +		| AR_TYPE_ACCESSES_MASK;
> +	cs_ar_bytes |= AR_S_MASK | AR_P_MASK | AR_G_MASK;
Why split it on several lines like that?

> +	if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
> +		cs_ar_bytes |= AR_L_MASK;
> +	else
> +		cs_ar_bytes |= AR_DB_MASK;
> +	vmcs_write32(GUEST_CS_AR_BYTES, cs_ar_bytes);
> +	ds_ar_bytes = 0;
> +	ds_ar_bytes |= AR_TYPE_READABLE_MASK | AR_TYPE_ACCESSES_MASK;
> +	ds_ar_bytes |= AR_S_MASK | AR_P_MASK | AR_DB_MASK | AR_G_MASK;
> +	vmcs_write32(GUEST_ES_AR_BYTES, ds_ar_bytes);
> +	vmcs_write32(GUEST_SS_AR_BYTES, ds_ar_bytes);
> +	vmcs_write32(GUEST_DS_AR_BYTES, ds_ar_bytes);
> +	vmcs_write32(GUEST_FS_AR_BYTES, ds_ar_bytes);
> +	vmcs_write32(GUEST_GS_AR_BYTES, ds_ar_bytes);
> +	tr_ar_bytes = 0;
> +	tr_ar_bytes |= AR_TYPE_CODE_MASK | AR_TYPE_READABLE_MASK
> +		| AR_TYPE_ACCESSES_MASK;
> +	tr_ar_bytes |= AR_P_MASK;
> +	vmcs_write32(GUEST_TR_AR_BYTES, tr_ar_bytes);
> +	vmcs_writel(GUEST_RFLAGS, 0x2);
This does not looks nice. Lets use vmx_set_segment() to set segments.

>  
>  	kvm_set_dr(vcpu, 7, 0x400);
>  	vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
> -- 
> 1.7.9.5

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a7e1855..84acb16 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7941,6 +7941,7 @@  static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 				   struct vmcs12 *vmcs12)
 {
+	u32 cs_ar_bytes, ds_ar_bytes, tr_ar_bytes;
 	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
 		vcpu->arch.efer = vmcs12->host_ia32_efer;
 	else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
@@ -8005,11 +8006,43 @@  static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 	vmcs_write16(GUEST_GS_SELECTOR, vmcs12->host_gs_selector);
 	vmcs_write16(GUEST_TR_SELECTOR, vmcs12->host_tr_selector);
 
-	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT)
-		vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
-	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
-		vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
-			vmcs12->host_ia32_perf_global_ctrl);
+	/* Set L1 segment info according to Intel SDM
+	    27.5.2 Loading Host Segment and Descriptor-Table Registers */
+	vmcs_writel(GUEST_CS_BASE, 0x0);
+	vmcs_writel(GUEST_SS_BASE, 0x0);
+	vmcs_writel(GUEST_DS_BASE, 0x0);
+	vmcs_writel(GUEST_ES_BASE, 0x0);
+	vmcs_write32(GUEST_CS_LIMIT, 0xFFFFFFFF);
+	vmcs_write32(GUEST_SS_LIMIT, 0xFFFFFFFF);
+	vmcs_write32(GUEST_DS_LIMIT, 0xFFFFFFFF);
+	vmcs_write32(GUEST_ES_LIMIT, 0xFFFFFFFF);
+	vmcs_write32(GUEST_FS_LIMIT, 0xFFFFFFFF);
+	vmcs_write32(GUEST_GS_LIMIT, 0xFFFFFFFF);
+	vmcs_write32(GUEST_TR_LIMIT, 0x67);
+
+	cs_ar_bytes = 0;
+	cs_ar_bytes |= AR_TYPE_CODE_MASK | AR_TYPE_READABLE_MASK
+		| AR_TYPE_ACCESSES_MASK;
+	cs_ar_bytes |= AR_S_MASK | AR_P_MASK | AR_G_MASK;
+	if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
+		cs_ar_bytes |= AR_L_MASK;
+	else
+		cs_ar_bytes |= AR_DB_MASK;
+	vmcs_write32(GUEST_CS_AR_BYTES, cs_ar_bytes);
+	ds_ar_bytes = 0;
+	ds_ar_bytes |= AR_TYPE_READABLE_MASK | AR_TYPE_ACCESSES_MASK;
+	ds_ar_bytes |= AR_S_MASK | AR_P_MASK | AR_DB_MASK | AR_G_MASK;
+	vmcs_write32(GUEST_ES_AR_BYTES, ds_ar_bytes);
+	vmcs_write32(GUEST_SS_AR_BYTES, ds_ar_bytes);
+	vmcs_write32(GUEST_DS_AR_BYTES, ds_ar_bytes);
+	vmcs_write32(GUEST_FS_AR_BYTES, ds_ar_bytes);
+	vmcs_write32(GUEST_GS_AR_BYTES, ds_ar_bytes);
+	tr_ar_bytes = 0;
+	tr_ar_bytes |= AR_TYPE_CODE_MASK | AR_TYPE_READABLE_MASK
+		| AR_TYPE_ACCESSES_MASK;
+	tr_ar_bytes |= AR_P_MASK;
+	vmcs_write32(GUEST_TR_AR_BYTES, tr_ar_bytes);
+	vmcs_writel(GUEST_RFLAGS, 0x2);
 
 	kvm_set_dr(vcpu, 7, 0x400);
 	vmcs_write64(GUEST_IA32_DEBUGCTL, 0);