diff mbox

KVM: x86: Drop unused return code from VCPU reset callback

Message ID 513EF358.3050909@siemens.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jan Kiszka March 12, 2013, 9:20 a.m. UTC
Neither vmx nor svm nor the common part may generate an error on
kvm_vcpu_reset. So drop the return code.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---

Will use this for refactoring INIT/SIPI handling.

 arch/x86/include/asm/kvm_host.h |    2 +-
 arch/x86/kvm/svm.c              |    4 +---
 arch/x86/kvm/vmx.c              |    7 +------
 arch/x86/kvm/x86.c              |   15 ++++++---------
 4 files changed, 9 insertions(+), 19 deletions(-)

Comments

Paolo Bonzini March 12, 2013, 9:35 a.m. UTC | #1
Il 12/03/2013 10:20, Jan Kiszka ha scritto:
> Neither vmx nor svm nor the common part may generate an error on
> kvm_vcpu_reset. So drop the return code.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> ---
> 
> Will use this for refactoring INIT/SIPI handling.
> 
>  arch/x86/include/asm/kvm_host.h |    2 +-
>  arch/x86/kvm/svm.c              |    4 +---
>  arch/x86/kvm/vmx.c              |    7 +------
>  arch/x86/kvm/x86.c              |   15 ++++++---------
>  4 files changed, 9 insertions(+), 19 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 635a74d..348d859 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -643,7 +643,7 @@ struct kvm_x86_ops {
>  	/* Create, but do not attach this VCPU */
>  	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
>  	void (*vcpu_free)(struct kvm_vcpu *vcpu);
> -	int (*vcpu_reset)(struct kvm_vcpu *vcpu);
> +	void (*vcpu_reset)(struct kvm_vcpu *vcpu);
>  
>  	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
>  	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index e1b1ce2..907e428 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1191,7 +1191,7 @@ static void init_vmcb(struct vcpu_svm *svm)
>  	enable_gif(svm);
>  }
>  
> -static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
> +static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  	u32 dummy;
> @@ -1207,8 +1207,6 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
>  
>  	kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
>  	kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
> -
> -	return 0;
>  }
>  
>  static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 260da9a..f17cd2a 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -4100,11 +4100,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>  	return 0;
>  }
>  
> -static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
> +static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	u64 msr;
> -	int ret;
>  
>  	vmx->rmode.vm86_active = 0;
>  
> @@ -4195,10 +4194,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
>  	update_exception_bitmap(&vmx->vcpu);
>  
>  	vpid_sync_context(vmx);
> -
> -	ret = 0;
> -
> -	return ret;
>  }
>  
>  /*
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 35b4912..b891ac3 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -162,7 +162,7 @@ u64 __read_mostly host_xcr0;
>  
>  static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
>  
> -static int kvm_vcpu_reset(struct kvm_vcpu *vcpu);
> +static void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
>  
>  static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
>  {
> @@ -5851,9 +5851,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
>  		pr_debug("vcpu %d received sipi with vector # %x\n",
>  			 vcpu->vcpu_id, vcpu->arch.sipi_vector);
>  		kvm_lapic_reset(vcpu);
> -		r = kvm_vcpu_reset(vcpu);
> -		if (r)
> -			return r;
> +		kvm_vcpu_reset(vcpu);
>  		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
>  	}
>  
> @@ -6479,9 +6477,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
>  	r = vcpu_load(vcpu);
>  	if (r)
>  		return r;
> -	r = kvm_vcpu_reset(vcpu);
> -	if (r == 0)
> -		r = kvm_mmu_setup(vcpu);
> +	kvm_vcpu_reset(vcpu);
> +	r = kvm_mmu_setup(vcpu);
>  	vcpu_put(vcpu);
>  
>  	return r;
> @@ -6518,7 +6515,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
>  	kvm_x86_ops->vcpu_free(vcpu);
>  }
>  
> -static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
> +static void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
>  {
>  	atomic_set(&vcpu->arch.nmi_queued, 0);
>  	vcpu->arch.nmi_pending = 0;
> @@ -6545,7 +6542,7 @@ static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
>  	vcpu->arch.regs_avail = ~0;
>  	vcpu->arch.regs_dirty = ~0;
>  
> -	return kvm_x86_ops->vcpu_reset(vcpu);
> +	kvm_x86_ops->vcpu_reset(vcpu);
>  }
>  
>  int kvm_arch_hardware_enable(void *garbage)
> 

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov March 12, 2013, 11:26 a.m. UTC | #2
On Tue, Mar 12, 2013 at 10:20:24AM +0100, Jan Kiszka wrote:
> Neither vmx nor svm nor the common part may generate an error on
> kvm_vcpu_reset. So drop the return code.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Applied, thanks.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 635a74d..348d859 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -643,7 +643,7 @@  struct kvm_x86_ops {
 	/* Create, but do not attach this VCPU */
 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
-	int (*vcpu_reset)(struct kvm_vcpu *vcpu);
+	void (*vcpu_reset)(struct kvm_vcpu *vcpu);
 
 	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e1b1ce2..907e428 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1191,7 +1191,7 @@  static void init_vmcb(struct vcpu_svm *svm)
 	enable_gif(svm);
 }
 
-static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
+static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u32 dummy;
@@ -1207,8 +1207,6 @@  static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
 
 	kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
 	kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
-
-	return 0;
 }
 
 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 260da9a..f17cd2a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4100,11 +4100,10 @@  static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 	return 0;
 }
 
-static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
+static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u64 msr;
-	int ret;
 
 	vmx->rmode.vm86_active = 0;
 
@@ -4195,10 +4194,6 @@  static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 	update_exception_bitmap(&vmx->vcpu);
 
 	vpid_sync_context(vmx);
-
-	ret = 0;
-
-	return ret;
 }
 
 /*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 35b4912..b891ac3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -162,7 +162,7 @@  u64 __read_mostly host_xcr0;
 
 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
 
-static int kvm_vcpu_reset(struct kvm_vcpu *vcpu);
+static void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
 
 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
 {
@@ -5851,9 +5851,7 @@  static int __vcpu_run(struct kvm_vcpu *vcpu)
 		pr_debug("vcpu %d received sipi with vector # %x\n",
 			 vcpu->vcpu_id, vcpu->arch.sipi_vector);
 		kvm_lapic_reset(vcpu);
-		r = kvm_vcpu_reset(vcpu);
-		if (r)
-			return r;
+		kvm_vcpu_reset(vcpu);
 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 	}
 
@@ -6479,9 +6477,8 @@  int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 	r = vcpu_load(vcpu);
 	if (r)
 		return r;
-	r = kvm_vcpu_reset(vcpu);
-	if (r == 0)
-		r = kvm_mmu_setup(vcpu);
+	kvm_vcpu_reset(vcpu);
+	r = kvm_mmu_setup(vcpu);
 	vcpu_put(vcpu);
 
 	return r;
@@ -6518,7 +6515,7 @@  void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 	kvm_x86_ops->vcpu_free(vcpu);
 }
 
-static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
+static void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
 {
 	atomic_set(&vcpu->arch.nmi_queued, 0);
 	vcpu->arch.nmi_pending = 0;
@@ -6545,7 +6542,7 @@  static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
 	vcpu->arch.regs_avail = ~0;
 	vcpu->arch.regs_dirty = ~0;
 
-	return kvm_x86_ops->vcpu_reset(vcpu);
+	kvm_x86_ops->vcpu_reset(vcpu);
 }
 
 int kvm_arch_hardware_enable(void *garbage)