diff mbox series

[04/26] KVM: x86: Add a kvm_x86_ops hook to query virtualized MSR support

Message ID 20200129234640.8147-5-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series None | expand

Commit Message

Sean Christopherson Jan. 29, 2020, 11:46 p.m. UTC
Add a hook, ->has_virtualized_msr(), to allow moving vendor specific
checks into SVM/VMX and ultimately facilitate the removal of the
piecemeal ->*_supported() hooks.

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/include/asm/kvm_host.h | 1 +
 arch/x86/kvm/svm.c              | 6 ++++++
 arch/x86/kvm/vmx/vmx.c          | 6 ++++++
 arch/x86/kvm/x86.c              | 2 ++
 4 files changed, 15 insertions(+)

Comments

Vitaly Kuznetsov Feb. 5, 2020, 2:34 p.m. UTC | #1
Sean Christopherson <sean.j.christopherson@intel.com> writes:

> Add a hook, ->has_virtualized_msr(), to allow moving vendor specific
> checks into SVM/VMX and ultimately facilitate the removal of the
> piecemeal ->*_supported() hooks.
>
> No functional change intended.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/include/asm/kvm_host.h | 1 +
>  arch/x86/kvm/svm.c              | 6 ++++++
>  arch/x86/kvm/vmx/vmx.c          | 6 ++++++
>  arch/x86/kvm/x86.c              | 2 ++
>  4 files changed, 15 insertions(+)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 5c2ad3fa0980..8fb32c27fa44 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1050,6 +1050,7 @@ struct kvm_x86_ops {
>  	int (*hardware_setup)(void);               /* __init */
>  	void (*hardware_unsetup)(void);            /* __exit */
>  	bool (*cpu_has_accelerated_tpr)(void);
> +	bool (*has_virtualized_msr)(u32 index);
>  	bool (*has_emulated_msr)(u32 index);
>  	void (*cpuid_update)(struct kvm_vcpu *vcpu);
>  
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index a7b944a3a0e2..1f9323fbad81 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -5985,6 +5985,11 @@ static bool svm_cpu_has_accelerated_tpr(void)
>  	return false;
>  }
>  
> +static bool svm_has_virtualized_msr(u32 index)
> +{
> +	return true;
> +}
> +
>  static bool svm_has_emulated_msr(u32 index)
>  {
>  	switch (index) {
> @@ -7379,6 +7384,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
>  	.hardware_enable = svm_hardware_enable,
>  	.hardware_disable = svm_hardware_disable,
>  	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
> +	.has_virtualized_msr = svm_has_virtualized_msr,
>  	.has_emulated_msr = svm_has_emulated_msr,
>  
>  	.vcpu_create = svm_create_vcpu,
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index f5bb1ad2e9fa..3f2c094434e8 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -6274,6 +6274,11 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu,
>  		*exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
>  }
>  
> +static bool vmx_has_virtualized_msr(u32 index)
> +{
> +	return true;
> +}
> +
>  static bool vmx_has_emulated_msr(u32 index)
>  {
>  	switch (index) {
> @@ -7754,6 +7759,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
>  	.hardware_enable = hardware_enable,
>  	.hardware_disable = hardware_disable,
>  	.cpu_has_accelerated_tpr = report_flexpriority,
> +	.has_virtualized_msr = vmx_has_virtualized_msr,
>  	.has_emulated_msr = vmx_has_emulated_msr,
>  
>  	.vm_init = vmx_vm_init,
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 3d4a5326d84e..94f90fe1c0de 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -5279,6 +5279,8 @@ static void kvm_init_msr_list(void)
>  				continue;
>  			break;
>  		default:
> +			if (!kvm_x86_ops->has_virtualized_msr(msr_index))
> +				continue;
>  			break;
>  		}

Shouldn't break anything by itself, so

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Sean Christopherson Feb. 5, 2020, 2:59 p.m. UTC | #2
On Wed, Feb 05, 2020 at 03:34:29PM +0100, Vitaly Kuznetsov wrote:
> Sean Christopherson <sean.j.christopherson@intel.com> writes:
> 
> Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>

Stooooooop!  Everything from this point on is obsoleted by kvm_cpu_caps!
Vitaly Kuznetsov Feb. 5, 2020, 3:22 p.m. UTC | #3
Sean Christopherson <sean.j.christopherson@intel.com> writes:

> On Wed, Feb 05, 2020 at 03:34:29PM +0100, Vitaly Kuznetsov wrote:
>> Sean Christopherson <sean.j.christopherson@intel.com> writes:
>> 
>> Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
>
> Stooooooop!  Everything from this point on is obsoleted by kvm_cpu_caps!
>

Oops, this was only a week old series! Patches are rottening fast
nowadays!
Sean Christopherson Feb. 5, 2020, 3:35 p.m. UTC | #4
On Wed, Feb 05, 2020 at 04:22:48PM +0100, Vitaly Kuznetsov wrote:
> Sean Christopherson <sean.j.christopherson@intel.com> writes:
> 
> > On Wed, Feb 05, 2020 at 03:34:29PM +0100, Vitaly Kuznetsov wrote:
> >> Sean Christopherson <sean.j.christopherson@intel.com> writes:
> >> 
> >> Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
> >
> > Stooooooop!  Everything from this point on is obsoleted by kvm_cpu_caps!
> >
> 
> Oops, this was only a week old series! Patches are rottening fast
> nowadays!

Sorry :-(

I dug deeper into the CPUID crud after posting this series because I really
didn't like the end result for vendor-specific leafs, and ended up coming
up with (IMO) a much more elegant solution.

https://lkml.kernel.org/r/20200201185218.24473-1-sean.j.christopherson@intel.com/

or on patchwork

https://patchwork.kernel.org/cover/11361361/
Vitaly Kuznetsov Feb. 5, 2020, 4:55 p.m. UTC | #5
Sean Christopherson <sean.j.christopherson@intel.com> writes:

> On Wed, Feb 05, 2020 at 04:22:48PM +0100, Vitaly Kuznetsov wrote:
>> Sean Christopherson <sean.j.christopherson@intel.com> writes:
>> 
>> > On Wed, Feb 05, 2020 at 03:34:29PM +0100, Vitaly Kuznetsov wrote:
>> >> Sean Christopherson <sean.j.christopherson@intel.com> writes:
>> >> 
>> >> Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
>> >
>> > Stooooooop!  Everything from this point on is obsoleted by kvm_cpu_caps!
>> >
>> 
>> Oops, this was only a week old series! Patches are rottening fast
>> nowadays!
>
> Sorry :-(
>
> I dug deeper into the CPUID crud after posting this series because I really
> didn't like the end result for vendor-specific leafs, and ended up coming
> up with (IMO) a much more elegant solution.
>
> https://lkml.kernel.org/r/20200201185218.24473-1-sean.j.christopherson@intel.com/
>
> or on patchwork
>
> https://patchwork.kernel.org/cover/11361361/
>

Thanks, I saw it. I tried applying it to kvm/next earlier today but
failed. Do you by any chance have a git branch somewhere? I'll try to
review it and test at least AMD stuff (if AMD people don't beat me to it
of course).
Sean Christopherson Feb. 5, 2020, 5:02 p.m. UTC | #6
On Wed, Feb 05, 2020 at 05:55:32PM +0100, Vitaly Kuznetsov wrote:
> Sean Christopherson <sean.j.christopherson@intel.com> writes:
> > I dug deeper into the CPUID crud after posting this series because I really
> > didn't like the end result for vendor-specific leafs, and ended up coming
> > up with (IMO) a much more elegant solution.
> >
> > https://lkml.kernel.org/r/20200201185218.24473-1-sean.j.christopherson@intel.com/
> >
> > or on patchwork
> >
> > https://patchwork.kernel.org/cover/11361361/
> >
> 
> Thanks, I saw it. I tried applying it to kvm/next earlier today but
> failed. Do you by any chance have a git branch somewhere? I'll try to
> review it and test at least AMD stuff (if AMD people don't beat me to it
> of course).

Have you tried kvm/queue?  I'm pretty sure I based the code on kvm/queue.
If that doesn't work, I'll push a tag to my github repo.

This is exactly why I usually note the base for large series.  *sigh*
Vitaly Kuznetsov Feb. 6, 2020, 12:08 p.m. UTC | #7
Sean Christopherson <sean.j.christopherson@intel.com> writes:

> On Wed, Feb 05, 2020 at 05:55:32PM +0100, Vitaly Kuznetsov wrote:
>> Sean Christopherson <sean.j.christopherson@intel.com> writes:
>> > I dug deeper into the CPUID crud after posting this series because I really
>> > didn't like the end result for vendor-specific leafs, and ended up coming
>> > up with (IMO) a much more elegant solution.
>> >
>> > https://lkml.kernel.org/r/20200201185218.24473-1-sean.j.christopherson@intel.com/
>> >
>> > or on patchwork
>> >
>> > https://patchwork.kernel.org/cover/11361361/
>> >
>> 
>> Thanks, I saw it. I tried applying it to kvm/next earlier today but
>> failed. Do you by any chance have a git branch somewhere? I'll try to
>> review it and test at least AMD stuff (if AMD people don't beat me to it
>> of course).
>
> Have you tried kvm/queue?  I'm pretty sure I based the code on kvm/queue.
> If that doesn't work, I'll push a tag to my github repo.

My bad, kvm/queue worked like a charm!

>
> This is exactly why I usually note the base for large series.  *sigh*

Pull requests, anyone? :-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 5c2ad3fa0980..8fb32c27fa44 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1050,6 +1050,7 @@  struct kvm_x86_ops {
 	int (*hardware_setup)(void);               /* __init */
 	void (*hardware_unsetup)(void);            /* __exit */
 	bool (*cpu_has_accelerated_tpr)(void);
+	bool (*has_virtualized_msr)(u32 index);
 	bool (*has_emulated_msr)(u32 index);
 	void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a7b944a3a0e2..1f9323fbad81 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5985,6 +5985,11 @@  static bool svm_cpu_has_accelerated_tpr(void)
 	return false;
 }
 
+static bool svm_has_virtualized_msr(u32 index)
+{
+	return true;
+}
+
 static bool svm_has_emulated_msr(u32 index)
 {
 	switch (index) {
@@ -7379,6 +7384,7 @@  static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.hardware_enable = svm_hardware_enable,
 	.hardware_disable = svm_hardware_disable,
 	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
+	.has_virtualized_msr = svm_has_virtualized_msr,
 	.has_emulated_msr = svm_has_emulated_msr,
 
 	.vcpu_create = svm_create_vcpu,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f5bb1ad2e9fa..3f2c094434e8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6274,6 +6274,11 @@  static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu,
 		*exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
 }
 
+static bool vmx_has_virtualized_msr(u32 index)
+{
+	return true;
+}
+
 static bool vmx_has_emulated_msr(u32 index)
 {
 	switch (index) {
@@ -7754,6 +7759,7 @@  static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.hardware_enable = hardware_enable,
 	.hardware_disable = hardware_disable,
 	.cpu_has_accelerated_tpr = report_flexpriority,
+	.has_virtualized_msr = vmx_has_virtualized_msr,
 	.has_emulated_msr = vmx_has_emulated_msr,
 
 	.vm_init = vmx_vm_init,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3d4a5326d84e..94f90fe1c0de 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5279,6 +5279,8 @@  static void kvm_init_msr_list(void)
 				continue;
 			break;
 		default:
+			if (!kvm_x86_ops->has_virtualized_msr(msr_index))
+				continue;
 			break;
 		}