diff mbox

[v3,1/2] KVM: nVMX: enhance allocate/free_vpid to handle shadow vpid

Message ID BLU436-SMTP143573E1F6485E5653198E9805B0@phx.gbl (mailing list archive)
State New, archived
Headers show

Commit Message

Wanpeng Li Sept. 16, 2015, 3:51 a.m. UTC
Enhance allocate/free_vid to handle shadow vpid.

Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
---
 arch/x86/kvm/vmx.c | 24 +++++++++++-------------
 1 file changed, 11 insertions(+), 13 deletions(-)

Comments

Jan Kiszka Sept. 16, 2015, 6:42 a.m. UTC | #1
On 2015-09-16 05:51, Wanpeng Li wrote:
> Enhance allocate/free_vid to handle shadow vpid.
> 
> Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
> ---
>  arch/x86/kvm/vmx.c | 24 +++++++++++-------------
>  1 file changed, 11 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 9ff6a3f..4956081 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -4155,29 +4155,27 @@ static int alloc_identity_pagetable(struct kvm *kvm)
>  	return r;
>  }
>  
> -static void allocate_vpid(struct vcpu_vmx *vmx)
> +static int allocate_vpid(void)
>  {
> -	int vpid;
> +	int vpid = 0;

Initialization is not pointless with the current code.

>  
> -	vmx->vpid = 0;
>  	if (!enable_vpid)
> -		return;
> +		return 0;
>  	spin_lock(&vmx_vpid_lock);
>  	vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
> -	if (vpid < VMX_NR_VPIDS) {
> -		vmx->vpid = vpid;
> +	if (vpid < VMX_NR_VPIDS)
>  		__set_bit(vpid, vmx_vpid_bitmap);
> -	}
>  	spin_unlock(&vmx_vpid_lock);
> +	return vpid;

You should return 0 also if vpid == VMX_NR_VPIDS.

>  }
>  
> -static void free_vpid(struct vcpu_vmx *vmx)
> +static void free_vpid(int vpid)
>  {
>  	if (!enable_vpid)

You could already test for vpid == 0 here...

>  		return;
>  	spin_lock(&vmx_vpid_lock);
> -	if (vmx->vpid != 0)
> -		__clear_bit(vmx->vpid, vmx_vpid_bitmap);
> +	if (vpid != 0)

...then you could skip this.

> +		__clear_bit(vpid, vmx_vpid_bitmap);
>  	spin_unlock(&vmx_vpid_lock);
>  }
>  
> @@ -8482,7 +8480,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
>  
>  	if (enable_pml)
>  		vmx_disable_pml(vmx);
> -	free_vpid(vmx);
> +	free_vpid(vmx->vpid);
>  	leave_guest_mode(vcpu);
>  	vmx_load_vmcs01(vcpu);
>  	free_nested(vmx);
> @@ -8501,7 +8499,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
>  	if (!vmx)
>  		return ERR_PTR(-ENOMEM);
>  
> -	allocate_vpid(vmx);
> +	vmx->vpid = allocate_vpid();
>  
>  	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
>  	if (err)
> @@ -8577,7 +8575,7 @@ free_msrs:
>  uninit_vcpu:
>  	kvm_vcpu_uninit(&vmx->vcpu);
>  free_vcpu:
> -	free_vpid(vmx);
> +	free_vpid(vmx->vpid);
>  	kmem_cache_free(kvm_vcpu_cache, vmx);
>  	return ERR_PTR(err);
>  }
> 

Yes, this is what I had in mind.

Jan
Wanpeng Li Sept. 16, 2015, 6:51 a.m. UTC | #2
On 9/16/15 2:42 PM, Jan Kiszka wrote:
> On 2015-09-16 05:51, Wanpeng Li wrote:
>> Enhance allocate/free_vid to handle shadow vpid.
>>
>> Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
>> ---
>>   arch/x86/kvm/vmx.c | 24 +++++++++++-------------
>>   1 file changed, 11 insertions(+), 13 deletions(-)
>>
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index 9ff6a3f..4956081 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -4155,29 +4155,27 @@ static int alloc_identity_pagetable(struct kvm *kvm)
>>   	return r;
>>   }
>>   
>> -static void allocate_vpid(struct vcpu_vmx *vmx)
>> +static int allocate_vpid(void)
>>   {
>> -	int vpid;
>> +	int vpid = 0;
> Initialization is not pointless with the current code.
>
>>   
>> -	vmx->vpid = 0;
>>   	if (!enable_vpid)
>> -		return;
>> +		return 0;
>>   	spin_lock(&vmx_vpid_lock);
>>   	vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
>> -	if (vpid < VMX_NR_VPIDS) {
>> -		vmx->vpid = vpid;
>> +	if (vpid < VMX_NR_VPIDS)
>>   		__set_bit(vpid, vmx_vpid_bitmap);
>> -	}
>>   	spin_unlock(&vmx_vpid_lock);
>> +	return vpid;
> You should return 0 also if vpid == VMX_NR_VPIDS.

Agreed.

>
>>   }
>>   
>> -static void free_vpid(struct vcpu_vmx *vmx)
>> +static void free_vpid(int vpid)
>>   {
>>   	if (!enable_vpid)
> You could already test for vpid == 0 here...
>
>>   		return;
>>   	spin_lock(&vmx_vpid_lock);
>> -	if (vmx->vpid != 0)
>> -		__clear_bit(vmx->vpid, vmx_vpid_bitmap);
>> +	if (vpid != 0)
> ...then you could skip this.

Agreed.

>
>> +		__clear_bit(vpid, vmx_vpid_bitmap);
>>   	spin_unlock(&vmx_vpid_lock);
>>   }
>>   
>> @@ -8482,7 +8480,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
>>   
>>   	if (enable_pml)
>>   		vmx_disable_pml(vmx);
>> -	free_vpid(vmx);
>> +	free_vpid(vmx->vpid);
>>   	leave_guest_mode(vcpu);
>>   	vmx_load_vmcs01(vcpu);
>>   	free_nested(vmx);
>> @@ -8501,7 +8499,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
>>   	if (!vmx)
>>   		return ERR_PTR(-ENOMEM);
>>   
>> -	allocate_vpid(vmx);
>> +	vmx->vpid = allocate_vpid();
>>   
>>   	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
>>   	if (err)
>> @@ -8577,7 +8575,7 @@ free_msrs:
>>   uninit_vcpu:
>>   	kvm_vcpu_uninit(&vmx->vcpu);
>>   free_vcpu:
>> -	free_vpid(vmx);
>> +	free_vpid(vmx->vpid);
>>   	kmem_cache_free(kvm_vcpu_cache, vmx);
>>   	return ERR_PTR(err);
>>   }
>>
> Yes, this is what I had in mind.

Thanks for your review. :-)

Regards,
Wanpeng Li
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9ff6a3f..4956081 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4155,29 +4155,27 @@  static int alloc_identity_pagetable(struct kvm *kvm)
 	return r;
 }
 
-static void allocate_vpid(struct vcpu_vmx *vmx)
+static int allocate_vpid(void)
 {
-	int vpid;
+	int vpid = 0;
 
-	vmx->vpid = 0;
 	if (!enable_vpid)
-		return;
+		return 0;
 	spin_lock(&vmx_vpid_lock);
 	vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
-	if (vpid < VMX_NR_VPIDS) {
-		vmx->vpid = vpid;
+	if (vpid < VMX_NR_VPIDS)
 		__set_bit(vpid, vmx_vpid_bitmap);
-	}
 	spin_unlock(&vmx_vpid_lock);
+	return vpid;
 }
 
-static void free_vpid(struct vcpu_vmx *vmx)
+static void free_vpid(int vpid)
 {
 	if (!enable_vpid)
 		return;
 	spin_lock(&vmx_vpid_lock);
-	if (vmx->vpid != 0)
-		__clear_bit(vmx->vpid, vmx_vpid_bitmap);
+	if (vpid != 0)
+		__clear_bit(vpid, vmx_vpid_bitmap);
 	spin_unlock(&vmx_vpid_lock);
 }
 
@@ -8482,7 +8480,7 @@  static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 
 	if (enable_pml)
 		vmx_disable_pml(vmx);
-	free_vpid(vmx);
+	free_vpid(vmx->vpid);
 	leave_guest_mode(vcpu);
 	vmx_load_vmcs01(vcpu);
 	free_nested(vmx);
@@ -8501,7 +8499,7 @@  static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 	if (!vmx)
 		return ERR_PTR(-ENOMEM);
 
-	allocate_vpid(vmx);
+	vmx->vpid = allocate_vpid();
 
 	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
 	if (err)
@@ -8577,7 +8575,7 @@  free_msrs:
 uninit_vcpu:
 	kvm_vcpu_uninit(&vmx->vcpu);
 free_vcpu:
-	free_vpid(vmx);
+	free_vpid(vmx->vpid);
 	kmem_cache_free(kvm_vcpu_cache, vmx);
 	return ERR_PTR(err);
 }