@@ -136,8 +136,9 @@ KVM_X86_OP_OPTIONAL(migrate_timers)
KVM_X86_OP(msr_filter_changed)
KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
-KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
+KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons)
KVM_X86_OP_OPTIONAL(get_untagged_addr)
+KVM_X86_OP_OPTIONAL(update_hfi)
#undef KVM_X86_OP
#undef KVM_X86_OP_OPTIONAL
@@ -121,6 +121,7 @@
KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_TLB_FLUSH \
KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_HFI_UPDATE KVM_ARCH_REQ(33)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -1794,6 +1795,7 @@ struct kvm_x86_ops {
unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
+ void (*update_hfi)(struct kvm_vcpu *vcpu);
};
struct kvm_x86_nested_ops {
@@ -1651,6 +1651,35 @@ static void vmx_update_hfi_table(struct kvm *kvm)
vmx_inject_therm_interrupt(kvm_get_vcpu(kvm, 0));
}
+static void vmx_dynamic_update_hfi_table(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
+ struct hfi_desc *kvm_vmx_hfi = &kvm_vmx->pkg_therm.hfi_desc;
+
+ if (!intel_hfi_enabled())
+ return;
+
+ mutex_lock(&kvm_vmx->pkg_therm.pkg_therm_lock);
+
+ /*
+ * If Guest hasn't handled the previous update, just mark a pending
+ * flag to indicate that Host has more updates that KVM needs to sync.
+ */
+ if (kvm_vmx_hfi->hfi_update_status) {
+ kvm_vmx_hfi->hfi_update_pending = true;
+ mutex_unlock(&kvm_vmx->pkg_therm.pkg_therm_lock);
+ return;
+ }
+
+ /*
+ * The virtual HFI table is maintained at VM level so that vCPUs
+ * of the same VM are sharing the one HFI table. Therefore, one
+ * vCPU can update the HFI table for the whole VM.
+ */
+ vmx_update_hfi_table(vcpu->kvm);
+ mutex_unlock(&kvm_vmx->pkg_therm.pkg_therm_lock);
+}
+
/*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
@@ -8703,6 +8732,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
.get_untagged_addr = vmx_get_untagged_addr,
+ .update_hfi = vmx_dynamic_update_hfi_table,
};
static unsigned int vmx_handle_intel_pt_intr(void)
@@ -10850,6 +10850,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
static_call(kvm_x86_update_cpu_dirty_logging)(vcpu);
+ if (kvm_check_request(KVM_REQ_HFI_UPDATE, vcpu))
+ static_call(kvm_x86_update_hfi)(vcpu);
}
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||