diff mbox

[2/3] KVM: nVMX: additional checks on vmxon region

Message ID 1398661204-4822-3-git-send-email-bsd@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Bandan Das April 28, 2014, 5 a.m. UTC
Currently, the vmxon region isn't used in the nested case.
However, according to the spec, the vmxon instruction performs
additional sanity checks on this region and the associated
pointer. Modify emulated vmxon to better adhere to the spec
requirements

Signed-off-by: Bandan Das <bsd@redhat.com>
---
 arch/x86/kvm/vmx.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 53 insertions(+)

Comments

Jan Kiszka April 28, 2014, 11:30 a.m. UTC | #1
On 2014-04-28 07:00, Bandan Das wrote:
> Currently, the vmxon region isn't used in the nested case.
> However, according to the spec, the vmxon instruction performs
> additional sanity checks on this region and the associated
> pointer. Modify emulated vmxon to better adhere to the spec
> requirements
> 
> Signed-off-by: Bandan Das <bsd@redhat.com>
> ---
>  arch/x86/kvm/vmx.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 53 insertions(+)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index c18fe9a4..d5342c7 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -354,6 +354,7 @@ struct vmcs02_list {
>  struct nested_vmx {
>  	/* Has the level1 guest done vmxon? */
>  	bool vmxon;
> +	gpa_t vmxon_ptr;
>  
>  	/* The guest-physical address of the current VMCS L1 keeps for L2 */
>  	gpa_t current_vmptr;
> @@ -5840,9 +5841,19 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>  	struct kvm_segment cs;
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	struct vmcs *shadow_vmcs;
> +	gva_t gva;
> +	gpa_t vmptr;
> +	struct x86_exception e;
> +	struct page *page;
> +
>  	const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
>  		| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
>  
> +	/* Guest physical Address Width */
> +	struct kvm_cpuid_entry2 *best =
> +		kvm_find_cpuid_entry(vcpu, 0x80000008, 0);

We have cpuid_maxphyaddr().

> +
> +
>  	/* The Intel VMX Instruction Reference lists a bunch of bits that
>  	 * are prerequisite to running VMXON, most notably cr4.VMXE must be
>  	 * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
> @@ -5865,6 +5876,46 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>  		kvm_inject_gp(vcpu, 0);
>  		return 1;
>  	}
> +
> +	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
> +				vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
> +		return 1;
> +
> +	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
> +				sizeof(vmptr), &e)) {
> +		kvm_inject_page_fault(vcpu, &e);
> +		return 1;
> +	}
> +
> +	/* Don't bailout if best is NULL */
> +	WARN_ON(!best);
> +
> +	/*
> +	 * SDM 3: 24.11.5
> +	 * VMXON pointer must be 4KB aligned
> +	 * VMXON pointer must not set any bits beyond processor's
> +	 * physical address width
> +	 * The first 4 bytes of VMXON region contain the supported
> +	 * VMCS revision identifier
> +	 *
> +	 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
> +	 * which replaces physical address width with 32
> +	 */
> +	if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (best &&
> +					      (vmptr >> (best->eax & 0xff)))) {
> +		nested_vmx_failInvalid(vcpu);
> +		skip_emulated_instruction(vcpu);
> +		return 1;
> +	}
> +
> +	page = nested_get_page(vcpu, vmptr);
> +	if ((page == NULL) || ((*(u32 *)kmap(page) != VMCS12_REVISION))) {

Style: you don't need braces around the comparisons.

> +		nested_vmx_failInvalid(vcpu);
> +		kunmap(page);
> +		skip_emulated_instruction(vcpu);
> +		return 1;
> +	}
> +
>  	if (vmx->nested.vmxon) {
>  		nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
>  		skip_emulated_instruction(vcpu);
> @@ -5896,9 +5947,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>  	vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
>  
>  	vmx->nested.vmxon = true;
> +	vmx->nested.vmxon_ptr = vmptr;
>  
>  	skip_emulated_instruction(vcpu);
>  	nested_vmx_succeed(vcpu);
> +	kunmap(page);

This late unmapping leaks the page in other error cases.

Jan

>  	return 1;
>  }
>  
>
Bandan Das April 29, 2014, 4:54 p.m. UTC | #2
Jan Kiszka <jan.kiszka@siemens.com> writes:

> On 2014-04-28 07:00, Bandan Das wrote:
>> Currently, the vmxon region isn't used in the nested case.
>> However, according to the spec, the vmxon instruction performs
>> additional sanity checks on this region and the associated
>> pointer. Modify emulated vmxon to better adhere to the spec
>> requirements
>> 
>> Signed-off-by: Bandan Das <bsd@redhat.com>
>> ---
>>  arch/x86/kvm/vmx.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 53 insertions(+)
>> 
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index c18fe9a4..d5342c7 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -354,6 +354,7 @@ struct vmcs02_list {
>>  struct nested_vmx {
>>  	/* Has the level1 guest done vmxon? */
>>  	bool vmxon;
>> +	gpa_t vmxon_ptr;
>>  
>>  	/* The guest-physical address of the current VMCS L1 keeps for L2 */
>>  	gpa_t current_vmptr;
>> @@ -5840,9 +5841,19 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>  	struct kvm_segment cs;
>>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>>  	struct vmcs *shadow_vmcs;
>> +	gva_t gva;
>> +	gpa_t vmptr;
>> +	struct x86_exception e;
>> +	struct page *page;
>> +
>>  	const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
>>  		| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
>>  
>> +	/* Guest physical Address Width */
>> +	struct kvm_cpuid_entry2 *best =
>> +		kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
>
> We have cpuid_maxphyaddr().
>
>> +
>> +
>>  	/* The Intel VMX Instruction Reference lists a bunch of bits that
>>  	 * are prerequisite to running VMXON, most notably cr4.VMXE must be
>>  	 * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
>> @@ -5865,6 +5876,46 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>  		kvm_inject_gp(vcpu, 0);
>>  		return 1;
>>  	}
>> +
>> +	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
>> +				vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
>> +		return 1;
>> +
>> +	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
>> +				sizeof(vmptr), &e)) {
>> +		kvm_inject_page_fault(vcpu, &e);
>> +		return 1;
>> +	}
>> +
>> +	/* Don't bailout if best is NULL */
>> +	WARN_ON(!best);
>> +
>> +	/*
>> +	 * SDM 3: 24.11.5
>> +	 * VMXON pointer must be 4KB aligned
>> +	 * VMXON pointer must not set any bits beyond processor's
>> +	 * physical address width
>> +	 * The first 4 bytes of VMXON region contain the supported
>> +	 * VMCS revision identifier
>> +	 *
>> +	 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
>> +	 * which replaces physical address width with 32
>> +	 */
>> +	if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (best &&
>> +					      (vmptr >> (best->eax & 0xff)))) {
>> +		nested_vmx_failInvalid(vcpu);
>> +		skip_emulated_instruction(vcpu);
>> +		return 1;
>> +	}
>> +
>> +	page = nested_get_page(vcpu, vmptr);
>> +	if ((page == NULL) || ((*(u32 *)kmap(page) != VMCS12_REVISION))) {
>
> Style: you don't need braces around the comparisons.
>
>> +		nested_vmx_failInvalid(vcpu);
>> +		kunmap(page);
>> +		skip_emulated_instruction(vcpu);
>> +		return 1;
>> +	}
>> +
>>  	if (vmx->nested.vmxon) {
>>  		nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
>>  		skip_emulated_instruction(vcpu);
>> @@ -5896,9 +5947,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>  	vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
>>  
>>  	vmx->nested.vmxon = true;
>> +	vmx->nested.vmxon_ptr = vmptr;
>>  
>>  	skip_emulated_instruction(vcpu);
>>  	nested_vmx_succeed(vcpu);
>> +	kunmap(page);
>
> This late unmapping leaks the page in other error cases.

Oops, sorry!

> Jan
>
>>  	return 1;
>>  }
>>  
>> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c18fe9a4..d5342c7 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -354,6 +354,7 @@  struct vmcs02_list {
 struct nested_vmx {
 	/* Has the level1 guest done vmxon? */
 	bool vmxon;
+	gpa_t vmxon_ptr;
 
 	/* The guest-physical address of the current VMCS L1 keeps for L2 */
 	gpa_t current_vmptr;
@@ -5840,9 +5841,19 @@  static int handle_vmon(struct kvm_vcpu *vcpu)
 	struct kvm_segment cs;
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct vmcs *shadow_vmcs;
+	gva_t gva;
+	gpa_t vmptr;
+	struct x86_exception e;
+	struct page *page;
+
 	const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
 		| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
 
+	/* Guest physical Address Width */
+	struct kvm_cpuid_entry2 *best =
+		kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+
+
 	/* The Intel VMX Instruction Reference lists a bunch of bits that
 	 * are prerequisite to running VMXON, most notably cr4.VMXE must be
 	 * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
@@ -5865,6 +5876,46 @@  static int handle_vmon(struct kvm_vcpu *vcpu)
 		kvm_inject_gp(vcpu, 0);
 		return 1;
 	}
+
+	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+				vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
+		return 1;
+
+	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
+				sizeof(vmptr), &e)) {
+		kvm_inject_page_fault(vcpu, &e);
+		return 1;
+	}
+
+	/* Don't bailout if best is NULL */
+	WARN_ON(!best);
+
+	/*
+	 * SDM 3: 24.11.5
+	 * VMXON pointer must be 4KB aligned
+	 * VMXON pointer must not set any bits beyond processor's
+	 * physical address width
+	 * The first 4 bytes of VMXON region contain the supported
+	 * VMCS revision identifier
+	 *
+	 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
+	 * which replaces physical address width with 32
+	 */
+	if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (best &&
+					      (vmptr >> (best->eax & 0xff)))) {
+		nested_vmx_failInvalid(vcpu);
+		skip_emulated_instruction(vcpu);
+		return 1;
+	}
+
+	page = nested_get_page(vcpu, vmptr);
+	if ((page == NULL) || ((*(u32 *)kmap(page) != VMCS12_REVISION))) {
+		nested_vmx_failInvalid(vcpu);
+		kunmap(page);
+		skip_emulated_instruction(vcpu);
+		return 1;
+	}
+
 	if (vmx->nested.vmxon) {
 		nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
 		skip_emulated_instruction(vcpu);
@@ -5896,9 +5947,11 @@  static int handle_vmon(struct kvm_vcpu *vcpu)
 	vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
 
 	vmx->nested.vmxon = true;
+	vmx->nested.vmxon_ptr = vmptr;
 
 	skip_emulated_instruction(vcpu);
 	nested_vmx_succeed(vcpu);
+	kunmap(page);
 	return 1;
 }