===================================================================
@@ -1320,6 +1320,8 @@ struct kvm_x86_ops {
void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
+ int (*vcpu_check_block)(struct kvm_vcpu *vcpu);
+
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void (*start_assignment)(struct kvm *kvm, int device_count);
@@ -1801,6 +1803,15 @@ static inline bool kvm_irq_is_postable(s
irq->delivery_mode == APIC_DM_LOWEST);
}
+#define __KVM_HAVE_ARCH_VCPU_CHECK_BLOCK
+static inline int kvm_arch_vcpu_check_block(struct kvm_vcpu *vcpu)
+{
+ if (kvm_x86_ops.vcpu_check_block)
+ return static_call(kvm_x86_vcpu_check_block)(vcpu);
+
+ return 0;
+}
+
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
static_call_cond(kvm_x86_vcpu_blocking)(vcpu);
===================================================================
@@ -7727,6 +7727,7 @@ static struct kvm_x86_ops vmx_x86_ops __
.pre_block = vmx_pre_block,
.post_block = vmx_post_block,
+ .vcpu_check_block = NULL,
.pmu_ops = &intel_pmu_ops,
.nested_ops = &vmx_nested_ops,
===================================================================
@@ -98,6 +98,7 @@ KVM_X86_OP_NULL(pre_block)
KVM_X86_OP_NULL(post_block)
KVM_X86_OP_NULL(vcpu_blocking)
KVM_X86_OP_NULL(vcpu_unblocking)
+KVM_X86_OP_NULL(vcpu_check_block)
KVM_X86_OP_NULL(update_pi_irte)
KVM_X86_OP_NULL(start_assignment)
KVM_X86_OP_NULL(apicv_post_state_restore)
===================================================================
@@ -4517,6 +4517,7 @@ static struct kvm_x86_ops svm_x86_ops __
.vcpu_put = svm_vcpu_put,
.vcpu_blocking = svm_vcpu_blocking,
.vcpu_unblocking = svm_vcpu_unblocking,
+ .vcpu_check_block = NULL,
.update_exception_bitmap = svm_update_exception_bitmap,
.get_msr_feature = svm_get_msr_feature,
Implement kvm_arch_vcpu_check_block for x86. Next patch will add implementation of kvm_x86_ops.vcpu_check_block for VMX. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>