diff mbox series

[v3,12/16] kvm: x86: Introduce struct kvm_x86_ops.apicv_eoi_accelerate

Message ID 1568401242-260374-13-git-send-email-suravee.suthikulpanit@amd.com (mailing list archive)
State New, archived
Headers show
Series kvm: x86: Support AMD SVM AVIC w/ in-kernel irqchip mode | expand

Commit Message

Suthikulpanit, Suravee Sept. 13, 2019, 7:01 p.m. UTC
AMD SVM AVIC accelerates write access to APIC EOI register for edge-trigger
interrupts, and does not trap. This breaks in-kernel irqchip, which expects
the EOI trap to send notifier for acked irq.

Introduce struct kvm_x86_ops.apicv_eoi_accelerate to allow check
for such behavior.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/svm.c              | 10 ++++++++++
 arch/x86/kvm/x86.c              | 15 +++++++++++++++
 3 files changed, 27 insertions(+)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 624e883..0bc8b29 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1105,6 +1105,7 @@  struct kvm_x86_ops {
 	void (*pre_update_apicv_exec_ctrl)(struct kvm_vcpu *vcpu,
 					   bool activate);
 	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
+	bool (*apicv_eoi_accelerate)(bool edge_trig);
 	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
 	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
 	bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
@@ -1438,6 +1439,7 @@  gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
 void kvm_vcpu_activate_apicv(struct kvm_vcpu *vcpu);
 void kvm_make_apicv_activate_request(struct kvm_vcpu *vcpu);
 void kvm_make_apicv_deactivate_request(struct kvm_vcpu *vcpu, bool disable);
+bool kvm_apicv_eoi_accelerate(struct kvm *kvm, bool edge_trig);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f04a17e..457ffe1 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -7357,6 +7357,15 @@  static void svm_pre_update_apicv_exec_ctrl(struct kvm_vcpu *vcpu, bool activate)
 		avic_destroy_access_page(vcpu);
 }
 
+static bool svm_apicv_eoi_accelerate(bool edge_trig)
+{
+	/*
+	 * AVIC accelerates write access to APIC EOI register for
+	 * edge-trigger interrupts.
+	 */
+	return edge_trig;
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,
@@ -7435,6 +7444,7 @@  static void svm_pre_update_apicv_exec_ctrl(struct kvm_vcpu *vcpu, bool activate)
 	.get_enable_apicv = svm_get_enable_apicv,
 	.refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
 	.pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl,
+	.apicv_eoi_accelerate = svm_apicv_eoi_accelerate,
 	.load_eoi_exitmap = svm_load_eoi_exitmap,
 	.hwapic_irr_update = svm_hwapic_irr_update,
 	.hwapic_isr_update = svm_hwapic_isr_update,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1540629..fa55960 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7221,6 +7221,21 @@  void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_deactivate_apicv);
 
+bool kvm_apicv_eoi_accelerate(struct kvm *kvm, bool edge_trig)
+{
+	bool ret = false;
+
+	if (!kvm_x86_ops->apicv_eoi_accelerate)
+		return ret;
+
+	mutex_lock(&kvm->arch.apicv_lock);
+	if (kvm->arch.apicv_state == APICV_ACTIVATED)
+		ret = kvm_x86_ops->apicv_eoi_accelerate(edge_trig);
+	mutex_unlock(&kvm->arch.apicv_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_apicv_eoi_accelerate);
+
 static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
 {
 	struct kvm_vcpu *target = NULL;