diff mbox

[RFC,3/5] KVM: x86: add msr_exits_supported to kvm_x86_ops

Message ID 1439923615-10600-4-git-send-email-peterhornyack@google.com (mailing list archive)
State New, archived
Headers show

Commit Message

Peter Hornyack Aug. 18, 2015, 6:46 p.m. UTC
msr_exits_supported will be checked when user space attempts to enable
the KVM_CAP_UNHANDLED_MSR_EXITS capability for the vm. This is needed
because MSR exit support will be implemented for vmx but not svm later
in this patchset.

Signed-off-by: Peter Hornyack <peterhornyack@google.com>
---
 arch/x86/include/asm/kvm_host.h | 1 +
 arch/x86/kvm/svm.c              | 6 ++++++
 arch/x86/kvm/vmx.c              | 6 ++++++
 3 files changed, 13 insertions(+)

Comments

Bandan Das Aug. 24, 2015, 11:15 p.m. UTC | #1
Peter Hornyack <peterhornyack@google.com> writes:

> msr_exits_supported will be checked when user space attempts to enable
> the KVM_CAP_UNHANDLED_MSR_EXITS capability for the vm. This is needed
> because MSR exit support will be implemented for vmx but not svm later
> in this patchset.

Is svm future work ? :) Are there any such svm msrs ?


> Signed-off-by: Peter Hornyack <peterhornyack@google.com>
> ---
>  arch/x86/include/asm/kvm_host.h | 1 +
>  arch/x86/kvm/svm.c              | 6 ++++++
>  arch/x86/kvm/vmx.c              | 6 ++++++
>  3 files changed, 13 insertions(+)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index c12e845f59e6..a6e145b1e271 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -854,6 +854,7 @@ struct kvm_x86_ops {
>  	void (*handle_external_intr)(struct kvm_vcpu *vcpu);
>  	bool (*mpx_supported)(void);
>  	bool (*xsaves_supported)(void);
> +	bool (*msr_exits_supported)(void);
>  
>  	int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
>  
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 74d825716f4f..bcbb56f49b9f 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -4249,6 +4249,11 @@ static bool svm_xsaves_supported(void)
>  	return false;
>  }
>  
> +static bool svm_msr_exits_supported(void)
> +{
> +	return false;
> +}
> +
>  static bool svm_has_wbinvd_exit(void)
>  {
>  	return true;
> @@ -4540,6 +4545,7 @@ static struct kvm_x86_ops svm_x86_ops = {
>  	.invpcid_supported = svm_invpcid_supported,
>  	.mpx_supported = svm_mpx_supported,
>  	.xsaves_supported = svm_xsaves_supported,
> +	.msr_exits_supported = svm_msr_exits_supported,
>  
>  	.set_supported_cpuid = svm_set_supported_cpuid,
>  
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index acc38e27d221..27fec385d79d 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -8161,6 +8161,11 @@ static bool vmx_xsaves_supported(void)
>  		SECONDARY_EXEC_XSAVES;
>  }
>  
> +static bool vmx_msr_exits_supported(void)
> +{
> +	return false;
> +}
> +
>  static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
>  {
>  	u32 exit_intr_info;
> @@ -10413,6 +10418,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
>  	.handle_external_intr = vmx_handle_external_intr,
>  	.mpx_supported = vmx_mpx_supported,
>  	.xsaves_supported = vmx_xsaves_supported,
> +	.msr_exits_supported = vmx_msr_exits_supported,
>  
>  	.check_nested_events = vmx_check_nested_events,
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c12e845f59e6..a6e145b1e271 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -854,6 +854,7 @@  struct kvm_x86_ops {
 	void (*handle_external_intr)(struct kvm_vcpu *vcpu);
 	bool (*mpx_supported)(void);
 	bool (*xsaves_supported)(void);
+	bool (*msr_exits_supported)(void);
 
 	int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 74d825716f4f..bcbb56f49b9f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4249,6 +4249,11 @@  static bool svm_xsaves_supported(void)
 	return false;
 }
 
+static bool svm_msr_exits_supported(void)
+{
+	return false;
+}
+
 static bool svm_has_wbinvd_exit(void)
 {
 	return true;
@@ -4540,6 +4545,7 @@  static struct kvm_x86_ops svm_x86_ops = {
 	.invpcid_supported = svm_invpcid_supported,
 	.mpx_supported = svm_mpx_supported,
 	.xsaves_supported = svm_xsaves_supported,
+	.msr_exits_supported = svm_msr_exits_supported,
 
 	.set_supported_cpuid = svm_set_supported_cpuid,
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index acc38e27d221..27fec385d79d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8161,6 +8161,11 @@  static bool vmx_xsaves_supported(void)
 		SECONDARY_EXEC_XSAVES;
 }
 
+static bool vmx_msr_exits_supported(void)
+{
+	return false;
+}
+
 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
 {
 	u32 exit_intr_info;
@@ -10413,6 +10418,7 @@  static struct kvm_x86_ops vmx_x86_ops = {
 	.handle_external_intr = vmx_handle_external_intr,
 	.mpx_supported = vmx_mpx_supported,
 	.xsaves_supported = vmx_xsaves_supported,
+	.msr_exits_supported = vmx_msr_exits_supported,
 
 	.check_nested_events = vmx_check_nested_events,