diff mbox series

[1/2] KVM: x86: Invert APICv/AVIC enablement check

Message ID 20210513113710.1740398-2-vkuznets@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: hyper-v: Conditionally allow SynIC with APICv/AVIC | expand

Commit Message

Vitaly Kuznetsov May 13, 2021, 11:37 a.m. UTC
Currently, APICv/AVIC enablement is global ('enable_apicv' module parameter
for Intel, 'avic' module parameter for AMD) but there's no way to check
it from vendor-neutral code. Add 'apicv_supported()' to kvm_x86_ops and
invert kvm_apicv_init() (which now doesn't need to be called from arch-
specific code).

No functional change intended.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/include/asm/kvm_host.h | 2 +-
 arch/x86/kvm/svm/svm.c          | 7 ++++++-
 arch/x86/kvm/vmx/vmx.c          | 7 ++++++-
 arch/x86/kvm/x86.c              | 6 +++---
 4 files changed, 16 insertions(+), 6 deletions(-)

Comments

Sean Christopherson May 17, 2021, 9:03 p.m. UTC | #1
On Thu, May 13, 2021, Vitaly Kuznetsov wrote:
> Currently, APICv/AVIC enablement is global ('enable_apicv' module parameter
> for Intel, 'avic' module parameter for AMD) but there's no way to check
> it from vendor-neutral code. Add 'apicv_supported()' to kvm_x86_ops and
> invert kvm_apicv_init() (which now doesn't need to be called from arch-
> specific code).

Rather than add a new hook, just move the variable to x86.c, and export it so
that VMX and SVM can give it different module names.  The only hiccup is that
avic is off by default, but I don't see why that can't be changed.

On a related topic, the AVIC dependency on CONFIG_X86_LOCAL_APIC is dead code
since commit e42eef4ba388 ("KVM: add X86_LOCAL_APIC dependency").  Ditto for
cpu_has_vmx_posted_intr().


diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55efbacfc244..bf5807d35339 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1422,6 +1422,7 @@ struct kvm_arch_async_pf {
 extern u32 __read_mostly kvm_nr_uret_msrs;
 extern u64 __read_mostly host_efer;
 extern bool __read_mostly allow_smaller_maxphyaddr;
+extern bool __read_mostly enable_apicv;
 extern struct kvm_x86_ops kvm_x86_ops;

 #define KVM_X86_OP(func) \
@@ -1661,7 +1662,6 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
                                struct x86_exception *exception);

 bool kvm_apicv_activated(struct kvm *kvm);
-void kvm_apicv_init(struct kvm *kvm, bool enable);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
 void kvm_request_apicv_update(struct kvm *kvm, bool activate,
                              unsigned long bit);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 712b4e0de481..ec4aa804395b 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -28,10 +28,7 @@
 #include "svm.h"

 /* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
+module_param_named(avic, enable_apicv, bool, S_IRUGO);

 #define SVM_AVIC_DOORBELL      0xc001011b

@@ -126,7 +123,7 @@ void avic_vm_destroy(struct kvm *kvm)
        unsigned long flags;
        struct kvm_svm *kvm_svm = to_kvm_svm(kvm);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (kvm_svm->avic_logical_id_table_page)
@@ -149,7 +146,7 @@ int avic_vm_init(struct kvm *kvm)
        struct page *l_page;
        u32 vm_id;

-       if (!avic)
+       if (!enable_apicv)
                return 0;

        /* Allocating physical APIC ID table (4KB) */
@@ -571,7 +568,7 @@ int avic_init_vcpu(struct vcpu_svm *svm)
        int ret;
        struct kvm_vcpu *vcpu = &svm->vcpu;

-       if (!avic || !irqchip_in_kernel(vcpu->kvm))
+       if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
                return 0;

        ret = avic_init_backing_page(vcpu);
@@ -595,7 +592,7 @@ void avic_post_state_restore(struct kvm_vcpu *vcpu)

 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
 {
-       if (!avic || !lapic_in_kernel(vcpu))
+       if (!enable_apicv || !lapic_in_kernel(vcpu))
                return;

        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -655,7 +652,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
        struct vmcb *vmcb = svm->vmcb;
        bool activated = kvm_vcpu_apicv_active(vcpu);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (activated) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index dfa351e605de..e650d4c466e1 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1009,11 +1009,9 @@ static __init int svm_hardware_setup(void)
                        nrips = false;
        }

-       if (avic) {
-               if (!npt_enabled ||
-                   !boot_cpu_has(X86_FEATURE_AVIC) ||
-                   !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
-                       avic = false;
+       if (enable_apicv) {
+               if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)) {
+                       enable_apicv = false;
                } else {
                        pr_info("AVIC enabled\n");

@@ -4429,13 +4427,12 @@ static int svm_vm_init(struct kvm *kvm)
        if (!pause_filter_count || !pause_filter_thresh)
                kvm->arch.pause_in_guest = true;

-       if (avic) {
+       if (enable_apicv) {
                int ret = avic_vm_init(kvm);
                if (ret)
                        return ret;
        }

-       kvm_apicv_init(kvm, avic);
        return 0;
 }

diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index e44567ceb865..a514b490db4a 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -479,8 +479,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55efbacfc244..bf5807d35339 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1422,6 +1422,7 @@ struct kvm_arch_async_pf {
 extern u32 __read_mostly kvm_nr_uret_msrs;
 extern u64 __read_mostly host_efer;
 extern bool __read_mostly allow_smaller_maxphyaddr;
+extern bool __read_mostly enable_apicv;
 extern struct kvm_x86_ops kvm_x86_ops;

 #define KVM_X86_OP(func) \
@@ -1661,7 +1662,6 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
                                struct x86_exception *exception);

 bool kvm_apicv_activated(struct kvm *kvm);
-void kvm_apicv_init(struct kvm *kvm, bool enable);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
 void kvm_request_apicv_update(struct kvm *kvm, bool activate,
                              unsigned long bit);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 712b4e0de481..ec4aa804395b 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -28,10 +28,7 @@
 #include "svm.h"

 /* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
+module_param_named(avic, enable_apicv, bool, S_IRUGO);

 #define SVM_AVIC_DOORBELL      0xc001011b

@@ -126,7 +123,7 @@ void avic_vm_destroy(struct kvm *kvm)
        unsigned long flags;
        struct kvm_svm *kvm_svm = to_kvm_svm(kvm);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (kvm_svm->avic_logical_id_table_page)
@@ -149,7 +146,7 @@ int avic_vm_init(struct kvm *kvm)
        struct page *l_page;
        u32 vm_id;

-       if (!avic)
+       if (!enable_apicv)
                return 0;

        /* Allocating physical APIC ID table (4KB) */
@@ -571,7 +568,7 @@ int avic_init_vcpu(struct vcpu_svm *svm)
        int ret;
        struct kvm_vcpu *vcpu = &svm->vcpu;

-       if (!avic || !irqchip_in_kernel(vcpu->kvm))
+       if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
                return 0;

        ret = avic_init_backing_page(vcpu);
@@ -595,7 +592,7 @@ void avic_post_state_restore(struct kvm_vcpu *vcpu)

 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
 {
-       if (!avic || !lapic_in_kernel(vcpu))
+       if (!enable_apicv || !lapic_in_kernel(vcpu))
                return;

        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -655,7 +652,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
        struct vmcb *vmcb = svm->vmcb;
        bool activated = kvm_vcpu_apicv_active(vcpu);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (activated) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index dfa351e605de..e650d4c466e1 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1009,11 +1009,9 @@ static __init int svm_hardware_setup(void)
                        nrips = false;
        }

-       if (avic) {
-               if (!npt_enabled ||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55efbacfc244..bf5807d35339 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1422,6 +1422,7 @@ struct kvm_arch_async_pf {
 extern u32 __read_mostly kvm_nr_uret_msrs;
 extern u64 __read_mostly host_efer;
 extern bool __read_mostly allow_smaller_maxphyaddr;
+extern bool __read_mostly enable_apicv;
 extern struct kvm_x86_ops kvm_x86_ops;

 #define KVM_X86_OP(func) \
@@ -1661,7 +1662,6 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
                                struct x86_exception *exception);

 bool kvm_apicv_activated(struct kvm *kvm);
-void kvm_apicv_init(struct kvm *kvm, bool enable);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
 void kvm_request_apicv_update(struct kvm *kvm, bool activate,
                              unsigned long bit);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 712b4e0de481..ec4aa804395b 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -28,10 +28,7 @@
 #include "svm.h"

 /* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
+module_param_named(avic, enable_apicv, bool, S_IRUGO);

 #define SVM_AVIC_DOORBELL      0xc001011b

@@ -126,7 +123,7 @@ void avic_vm_destroy(struct kvm *kvm)
        unsigned long flags;
        struct kvm_svm *kvm_svm = to_kvm_svm(kvm);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (kvm_svm->avic_logical_id_table_page)
@@ -149,7 +146,7 @@ int avic_vm_init(struct kvm *kvm)
        struct page *l_page;
        u32 vm_id;

-       if (!avic)
+       if (!enable_apicv)
                return 0;

        /* Allocating physical APIC ID table (4KB) */
@@ -571,7 +568,7 @@ int avic_init_vcpu(struct vcpu_svm *svm)
        int ret;
        struct kvm_vcpu *vcpu = &svm->vcpu;

-       if (!avic || !irqchip_in_kernel(vcpu->kvm))
+       if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
                return 0;

        ret = avic_init_backing_page(vcpu);
@@ -595,7 +592,7 @@ void avic_post_state_restore(struct kvm_vcpu *vcpu)

 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
 {
-       if (!avic || !lapic_in_kernel(vcpu))
+       if (!enable_apicv || !lapic_in_kernel(vcpu))
                return;

        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -655,7 +652,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
        struct vmcb *vmcb = svm->vmcb;
        bool activated = kvm_vcpu_apicv_active(vcpu);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (activated) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index dfa351e605de..e650d4c466e1 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1009,11 +1009,9 @@ static __init int svm_hardware_setup(void)
                        nrips = false;
        }

-       if (avic) {
-               if (!npt_enabled ||
...skipping...

 #define VMCB_AVIC_APIC_BAR_MASK                0xFFFFFFFFFF000ULL

-extern int avic;
-
 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
 {
        svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 8dee8a5fbc17..4705ad55abb5 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -12,7 +12,6 @@ extern bool __read_mostly enable_ept;
 extern bool __read_mostly enable_unrestricted_guest;
 extern bool __read_mostly enable_ept_ad_bits;
 extern bool __read_mostly enable_pml;
-extern bool __read_mostly enable_apicv;
 extern int __read_mostly pt_mode;

 #define PT_MODE_SYSTEM         0
@@ -90,8 +89,7 @@ static inline bool cpu_has_vmx_preemption_timer(void)

 static inline bool cpu_has_vmx_posted_intr(void)
 {
-       return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
-               vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
+       return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
 }

 static inline bool cpu_has_load_ia32_efer(void)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4bceb5ca3a89..697dd54c7df8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -101,7 +101,6 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
 static bool __read_mostly fasteoi = 1;
 module_param(fasteoi, bool, S_IRUGO);

-bool __read_mostly enable_apicv = 1;
 module_param(enable_apicv, bool, S_IRUGO);

 /*
@@ -7001,7 +7000,6 @@ static int vmx_vm_init(struct kvm *kvm)
                        break;
                }
        }
-       kvm_apicv_init(kvm, enable_apicv);
        return 0;
 }

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9b6bca616929..22a1e2b438c3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -209,6 +209,9 @@ EXPORT_SYMBOL_GPL(host_efer);
 bool __read_mostly allow_smaller_maxphyaddr = 0;
 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);

+bool __read_mostly enable_apicv = true;
+EXPORT_SYMBOL_GPL(enable_apicv);
+
 u64 __read_mostly host_xss;
 EXPORT_SYMBOL_GPL(host_xss);
 u64 __read_mostly supported_xss;
@@ -8342,16 +8345,15 @@ bool kvm_apicv_activated(struct kvm *kvm)
 }
 EXPORT_SYMBOL_GPL(kvm_apicv_activated);

-void kvm_apicv_init(struct kvm *kvm, bool enable)
+static void kvm_apicv_init(struct kvm *kvm)
 {
-       if (enable)
+       if (enable_apicv)
                clear_bit(APICV_INHIBIT_REASON_DISABLE,
                          &kvm->arch.apicv_inhibit_reasons);
        else
                set_bit(APICV_INHIBIT_REASON_DISABLE,
                        &kvm->arch.apicv_inhibit_reasons);
 }
-EXPORT_SYMBOL_GPL(kvm_apicv_init);

 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
 {
@@ -10736,6 +10738,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);

+       kvm_apicv_init(kvm);
        kvm_hv_init_vm(kvm);
        kvm_page_track_init(kvm);
        kvm_mmu_init_vm(kvm);
Jim Mattson May 17, 2021, 9:09 p.m. UTC | #2
On Mon, May 17, 2021 at 2:03 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Thu, May 13, 2021, Vitaly Kuznetsov wrote:
> > Currently, APICv/AVIC enablement is global ('enable_apicv' module parameter
> > for Intel, 'avic' module parameter for AMD) but there's no way to check
> > it from vendor-neutral code. Add 'apicv_supported()' to kvm_x86_ops and
> > invert kvm_apicv_init() (which now doesn't need to be called from arch-
> > specific code).
>
> Rather than add a new hook, just move the variable to x86.c, and export it so
> that VMX and SVM can give it different module names.  The only hiccup is that
> avic is off by default, but I don't see why that can't be changed.

See https://www.spinics.net/lists/kvm/msg208722.html.
Sean Christopherson May 17, 2021, 9:26 p.m. UTC | #3
On Mon, May 17, 2021, Jim Mattson wrote:
> On Mon, May 17, 2021 at 2:03 PM Sean Christopherson <seanjc@google.com> wrote:
> >
> > On Thu, May 13, 2021, Vitaly Kuznetsov wrote:
> > > Currently, APICv/AVIC enablement is global ('enable_apicv' module parameter
> > > for Intel, 'avic' module parameter for AMD) but there's no way to check
> > > it from vendor-neutral code. Add 'apicv_supported()' to kvm_x86_ops and
> > > invert kvm_apicv_init() (which now doesn't need to be called from arch-
> > > specific code).
> >
> > Rather than add a new hook, just move the variable to x86.c, and export it so
> > that VMX and SVM can give it different module names.  The only hiccup is that
> > avic is off by default, but I don't see why that can't be changed.
> 
> See https://www.spinics.net/lists/kvm/msg208722.html.

Boo.  A common enable_apicv can still work, SVM just needs an intermediary
between the module param and enable_apicv.

--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -185,6 +185,10 @@ module_param(vls, int, 0444);
 static int vgif = true;
 module_param(vgif, int, 0444);

+/* enable / disable AVIC */
+static bool avic;
+module_param(avic, bool, S_IRUGO);
+
 bool __read_mostly dump_invalid_vmcb;
 module_param(dump_invalid_vmcb, bool, 0644);

@@ -1009,16 +1013,19 @@ static __init int svm_hardware_setup(void)
                        nrips = false;
        }

-       if (avic) {
-               if (!npt_enabled ||
-                   !boot_cpu_has(X86_FEATURE_AVIC) ||
-                   !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
-                       avic = false;
-               } else {
-                       pr_info("AVIC enabled\n");
+       if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC))
+               avic = false;

-                       amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
-               }
+       /*
+        * Override the common enable_apicv.  AVIC is disabled by default
+        * because Jim said so.
+        */
+       enable_apicv = avic;
+
+       if (enable_apicv) {
+               pr_info("AVIC enabled\n");
+
+               amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
        }

        if (vls) {
Jim Mattson May 17, 2021, 9:56 p.m. UTC | #4
On Mon, May 17, 2021 at 2:26 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Mon, May 17, 2021, Jim Mattson wrote:
> > On Mon, May 17, 2021 at 2:03 PM Sean Christopherson <seanjc@google.com> wrote:
> > >
> > > On Thu, May 13, 2021, Vitaly Kuznetsov wrote:
> > > > Currently, APICv/AVIC enablement is global ('enable_apicv' module parameter
> > > > for Intel, 'avic' module parameter for AMD) but there's no way to check
> > > > it from vendor-neutral code. Add 'apicv_supported()' to kvm_x86_ops and
> > > > invert kvm_apicv_init() (which now doesn't need to be called from arch-
> > > > specific code).
> > >
> > > Rather than add a new hook, just move the variable to x86.c, and export it so
> > > that VMX and SVM can give it different module names.  The only hiccup is that
> > > avic is off by default, but I don't see why that can't be changed.
> >
> > See https://www.spinics.net/lists/kvm/msg208722.html.
>
> Boo.  A common enable_apicv can still work, SVM just needs an intermediary
> between the module param and enable_apicv.
>
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -185,6 +185,10 @@ module_param(vls, int, 0444);
>  static int vgif = true;
>  module_param(vgif, int, 0444);
>
> +/* enable / disable AVIC */
> +static bool avic;
> +module_param(avic, bool, S_IRUGO);
> +
>  bool __read_mostly dump_invalid_vmcb;
>  module_param(dump_invalid_vmcb, bool, 0644);
>
> @@ -1009,16 +1013,19 @@ static __init int svm_hardware_setup(void)
>                         nrips = false;
>         }
>
> -       if (avic) {
> -               if (!npt_enabled ||
> -                   !boot_cpu_has(X86_FEATURE_AVIC) ||
> -                   !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
> -                       avic = false;
> -               } else {
> -                       pr_info("AVIC enabled\n");
> +       if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC))
> +               avic = false;
>
> -                       amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
> -               }
> +       /*
> +        * Override the common enable_apicv.  AVIC is disabled by default
> +        * because Jim said so.
> +        */

Hey! I'm just the messenger. Wei Huang said so.

> +       enable_apicv = avic;
> +
> +       if (enable_apicv) {
> +               pr_info("AVIC enabled\n");
> +
> +               amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
>         }
>
>         if (vls) {
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55efbacfc244..ffafdb7b24cb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1205,6 +1205,7 @@  struct kvm_x86_ops {
 	void (*hardware_unsetup)(void);
 	bool (*cpu_has_accelerated_tpr)(void);
 	bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
+	bool (*apicv_supported)(void);
 	void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
 
 	unsigned int vm_size;
@@ -1661,7 +1662,6 @@  gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
 				struct x86_exception *exception);
 
 bool kvm_apicv_activated(struct kvm *kvm);
-void kvm_apicv_init(struct kvm *kvm, bool enable);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
 void kvm_request_apicv_update(struct kvm *kvm, bool activate,
 			      unsigned long bit);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 4dd9b7856e5b..360b3000c5a8 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4470,16 +4470,21 @@  static int svm_vm_init(struct kvm *kvm)
 			return ret;
 	}
 
-	kvm_apicv_init(kvm, avic);
 	return 0;
 }
 
+static bool svm_avic_supported(void)
+{
+	return avic;
+}
+
 static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.hardware_unsetup = svm_hardware_teardown,
 	.hardware_enable = svm_hardware_enable,
 	.hardware_disable = svm_hardware_disable,
 	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
 	.has_emulated_msr = svm_has_emulated_msr,
+	.apicv_supported = svm_avic_supported,
 
 	.vcpu_create = svm_create_vcpu,
 	.vcpu_free = svm_free_vcpu,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f2fd447eed45..3b0f4f9c21b3 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7034,7 +7034,6 @@  static int vmx_vm_init(struct kvm *kvm)
 			break;
 		}
 	}
-	kvm_apicv_init(kvm, enable_apicv);
 	return 0;
 }
 
@@ -7645,6 +7644,11 @@  static bool vmx_check_apicv_inhibit_reasons(ulong bit)
 	return supported & BIT(bit);
 }
 
+static bool vmx_apicv_supported(void)
+{
+	return enable_apicv;
+}
+
 static struct kvm_x86_ops vmx_x86_ops __initdata = {
 	.hardware_unsetup = hardware_unsetup,
 
@@ -7652,6 +7656,7 @@  static struct kvm_x86_ops vmx_x86_ops __initdata = {
 	.hardware_disable = hardware_disable,
 	.cpu_has_accelerated_tpr = report_flexpriority,
 	.has_emulated_msr = vmx_has_emulated_msr,
+	.apicv_supported = vmx_apicv_supported,
 
 	.vm_size = sizeof(struct kvm_vmx),
 	.vm_init = vmx_vm_init,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5bd550eaf683..fe7248e11e13 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8342,16 +8342,15 @@  bool kvm_apicv_activated(struct kvm *kvm)
 }
 EXPORT_SYMBOL_GPL(kvm_apicv_activated);
 
-void kvm_apicv_init(struct kvm *kvm, bool enable)
+static void kvm_apicv_init(struct kvm *kvm)
 {
-	if (enable)
+	if (kvm_x86_ops.apicv_supported())
 		clear_bit(APICV_INHIBIT_REASON_DISABLE,
 			  &kvm->arch.apicv_inhibit_reasons);
 	else
 		set_bit(APICV_INHIBIT_REASON_DISABLE,
 			&kvm->arch.apicv_inhibit_reasons);
 }
-EXPORT_SYMBOL_GPL(kvm_apicv_init);
 
 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
 {
@@ -10727,6 +10726,7 @@  int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
 
+	kvm_apicv_init(kvm);
 	kvm_hv_init_vm(kvm);
 	kvm_page_track_init(kvm);
 	kvm_mmu_init_vm(kvm);