diff mbox series

[2/3] KVM: x86: Use actual kvm_cpuid.base for clearing KVM_FEATURE_PV_UNHALT

Message ID 20240228101837.93642-3-vkuznets@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: Fix KVM_FEATURE_PV_UNHALT update logic | expand

Commit Message

Vitaly Kuznetsov Feb. 28, 2024, 10:18 a.m. UTC
Commit ee3a5f9e3d9b ("KVM: x86: Do runtime CPUID update before updating
vcpu->arch.cpuid_entries") moved tweaking of the supplied CPUID
data earlier in kvm_set_cpuid() but __kvm_update_cpuid_runtime() actually
uses 'vcpu->arch.kvm_cpuid' (though __kvm_find_kvm_cpuid_features()) which
gets set later in kvm_set_cpuid(). In some cases, e.g. when kvm_set_cpuid()
is called for the first time and 'vcpu->arch.kvm_cpuid' is clear,
__kvm_find_kvm_cpuid_features() fails to find KVM PV feature entry and the
logic which clears KVM_FEATURE_PV_UNHALT after enabling
KVM_X86_DISABLE_EXITS_HLT does not work.

The logic, introduced by the commit ee3a5f9e3d9b ("KVM: x86: Do runtime
CPUID update before updating vcpu->arch.cpuid_entries") must stay: the
supplied CPUID data is tweaked by KVM first (__kvm_update_cpuid_runtime())
and checked later (kvm_check_cpuid()) and the actual data
(vcpu->arch.cpuid_*, vcpu->arch.kvm_cpuid, vcpu->arch.xen.cpuid,..) is only
updated on success.

Switch to searching for KVM_SIGNATURE in the supplied CPUID data to
discover KVM PV feature entry instead of using stale 'vcpu->arch.kvm_cpuid'.

While on it, drop pointless "&& (best->eax & (1 << KVM_FEATURE_PV_UNHALT)"
check when clearing KVM_FEATURE_PV_UNHALT bit.

Fixes: ee3a5f9e3d9b ("KVM: x86: Do runtime CPUID update before updating vcpu->arch.cpuid_entries")
Reported-and-tested-by: Li RongQing <lirongqing@baidu.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/kvm/cpuid.c | 31 +++++++++++++++++--------------
 1 file changed, 17 insertions(+), 14 deletions(-)

Comments

Sean Christopherson Feb. 28, 2024, 11:27 p.m. UTC | #1
On Wed, Feb 28, 2024, Vitaly Kuznetsov wrote:
> @@ -273,6 +273,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
>  				       int nent)
>  {
>  	struct kvm_cpuid_entry2 *best;
> +	struct kvm_hypervisor_cpuid kvm_cpuid;
>  
>  	best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
>  	if (best) {
> @@ -299,10 +300,12 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
>  		     cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
>  		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
>  
> -	best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
> -	if (kvm_hlt_in_guest(vcpu->kvm) && best &&
> -		(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
> -		best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
> +	kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE);
> +	if (kvm_cpuid.base) {
> +		best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base);
> +		if (kvm_hlt_in_guest(vcpu->kvm) && best)
> +			best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
> +	}
>  
>  	if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
>  		best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);

Not now, as we need a minimal fix, but we need to fix the root problem, this is
way to brittle.  Multiple helpers take @vcpu, including __kvm_update_cpuid_runtime(),
before the incoming CPUID is set.  That's just asking for new bugs to crop up.

Am I missing something, or can we just swap() the new and old, update the new
in the context of the vCPU, and then undo the swap() if there's an issue?
vcpu->mutex is held, and accessing this state from a different task is wildly
unsafe, so I don't see any problem with temporarily having an in-flux state.

If we want to be paranoid, we can probably get away with killing the VM if the
vCPU has run and the incoming CPUID is "bad", e.g. to guard against something
in kvm_set_cpuid() consuming soon-to-be-stale state.  And that's actually a
feature of sorts, because _if_ something in kvm_set_cpuid() consumes the vCPU's
CPUID, then we have a bug _now_ that affects the happy path.

Completely untested (I haven't updated the myriad helpers), but this would allow
us to revert/remove all of the changes that allow peeking at a CPUID array that
lives outside of the vCPU.

static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
                        int nent)
{
	int r, i;

	swap(vcpu->arch.cpuid_entries, e2);
	swap(vcpu->arch.cpuid_nent, nent);

#ifdef CONFIG_KVM_HYPERV
	if (kvm_cpuid_has_hyperv(vcpu)) {
		r = kvm_hv_vcpu_init(vcpu);
		if (r)
			goto err;
	}
#endif

	r = kvm_check_cpuid(vcpu);
	if (r)
		goto err;

	kvm_update_cpuid_runtime(vcpu);

	/*
	 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
	 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
	 * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
	 * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
	 * the core vCPU model on the fly. It would've been better to forbid any
	 * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
	 * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
	 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
	 * whether the supplied CPUID data is equal to what's already set.
	 */
	if (kvm_vcpu_has_run(vcpu)) {
		r = kvm_cpuid_check_equal(vcpu, e2, nent);
		if (r)
			goto err;
	}

	vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE);
#ifdef CONFIG_KVM_XEN
	vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE);
#endif
	kvm_vcpu_after_set_cpuid(vcpu);

	kvfree(e2);
	return 0;

err:
	swap(vcpu->arch.cpuid_entries, e2);
	swap(vcpu->arch.cpuid_nent, nent);
	return r;
}
Vitaly Kuznetsov Feb. 29, 2024, 1:20 p.m. UTC | #2
Sean Christopherson <seanjc@google.com> writes:

> On Wed, Feb 28, 2024, Vitaly Kuznetsov wrote:
>> @@ -273,6 +273,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
>>  				       int nent)
>>  {
>>  	struct kvm_cpuid_entry2 *best;
>> +	struct kvm_hypervisor_cpuid kvm_cpuid;
>>  
>>  	best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
>>  	if (best) {
>> @@ -299,10 +300,12 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
>>  		     cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
>>  		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
>>  
>> -	best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
>> -	if (kvm_hlt_in_guest(vcpu->kvm) && best &&
>> -		(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
>> -		best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
>> +	kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE);
>> +	if (kvm_cpuid.base) {
>> +		best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base);
>> +		if (kvm_hlt_in_guest(vcpu->kvm) && best)
>> +			best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
>> +	}
>>  
>>  	if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
>>  		best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
>
> Not now, as we need a minimal fix, but we need to fix the root problem, this is
> way to brittle.  Multiple helpers take @vcpu, including __kvm_update_cpuid_runtime(),
> before the incoming CPUID is set.  That's just asking for new bugs to
> crop up.

Yes, I'm all for making this all more robust but I think it would be
nice to have a smaller, backportable fix for the real, observed issue first.

>
> Am I missing something, or can we just swap() the new and old, update the new
> in the context of the vCPU, and then undo the swap() if there's an issue?
> vcpu->mutex is held, and accessing this state from a different task is wildly
> unsafe, so I don't see any problem with temporarily having an in-flux state.
>

I don't see why this approach shouldn't work and I agree it looks like
it would make things better but I can't say that I'm in love with
it. Ideally, I would want to see the following "atomic" workflow for all
updates:

- Check that the supplied data is correct, return an error if not. No
changes to the state on this step.
- Tweak the data if needed.
- Update the state and apply the side-effects of the update. Ideally,
there should be no errors on this step as rollback can be
problemmatic. In the real world we will have to handle e.g. failed
memory allocations here but in most cases the best course of action is
to kill the VM.

Well, kvm_set_cpuid() is not like that. At least:
- kvm_hv_vcpu_init() is a side-effect but we apply it before all checks
are complete. There's no way back.
- kvm_check_cpuid() sounds like a pure checker but in reallity we end up
mangling guest FPU state in fpstate_realloc()

Both are probably "no big deal" but certainly break the atomicity.

> If we want to be paranoid, we can probably get away with killing the VM if the
> vCPU has run and the incoming CPUID is "bad", e.g. to guard against something
> in kvm_set_cpuid() consuming soon-to-be-stale state.  And that's actually a
> feature of sorts, because _if_ something in kvm_set_cpuid() consumes the vCPU's
> CPUID, then we have a bug _now_ that affects the happy path.
>
> Completely untested (I haven't updated the myriad helpers), but this would allow
> us to revert/remove all of the changes that allow peeking at a CPUID array that
> lives outside of the vCPU.

Thanks, assuming there's no urgency, let me take a look at this in the
course of the next week or so.

>
> static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
>                         int nent)
> {
> 	int r, i;
>
> 	swap(vcpu->arch.cpuid_entries, e2);
> 	swap(vcpu->arch.cpuid_nent, nent);
>
> #ifdef CONFIG_KVM_HYPERV
> 	if (kvm_cpuid_has_hyperv(vcpu)) {
> 		r = kvm_hv_vcpu_init(vcpu);
> 		if (r)
> 			goto err;
> 	}
> #endif
>
> 	r = kvm_check_cpuid(vcpu);
> 	if (r)
> 		goto err;
>
> 	kvm_update_cpuid_runtime(vcpu);
>
> 	/*
> 	 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
> 	 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
> 	 * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
> 	 * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
> 	 * the core vCPU model on the fly. It would've been better to forbid any
> 	 * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
> 	 * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
> 	 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
> 	 * whether the supplied CPUID data is equal to what's already set.
> 	 */
> 	if (kvm_vcpu_has_run(vcpu)) {
> 		r = kvm_cpuid_check_equal(vcpu, e2, nent);
> 		if (r)
> 			goto err;
> 	}
>
> 	vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE);
> #ifdef CONFIG_KVM_XEN
> 	vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE);
> #endif
> 	kvm_vcpu_after_set_cpuid(vcpu);
>
> 	kvfree(e2);
> 	return 0;
>
> err:
> 	swap(vcpu->arch.cpuid_entries, e2);
> 	swap(vcpu->arch.cpuid_nent, nent);
> 	return r;
> }
>
Sean Christopherson Feb. 29, 2024, 6:54 p.m. UTC | #3
On Thu, Feb 29, 2024, Vitaly Kuznetsov wrote:
> Sean Christopherson <seanjc@google.com> writes:
> > Am I missing something, or can we just swap() the new and old, update the new
> > in the context of the vCPU, and then undo the swap() if there's an issue?
> > vcpu->mutex is held, and accessing this state from a different task is wildly
> > unsafe, so I don't see any problem with temporarily having an in-flux state.
> >
> 
> I don't see why this approach shouldn't work and I agree it looks like
> it would make things better but I can't say that I'm in love with
> it.

Agreed, but the lack of atomicity is a pre-existing problem, though as proposed,
my idea would make it worse.  More below.

> Ideally, I would want to see the following "atomic" workflow for all
> updates:
> 
> - Check that the supplied data is correct, return an error if not. No
> changes to the state on this step.
> - Tweak the data if needed.
> - Update the state and apply the side-effects of the update. Ideally,
> there should be no errors on this step as rollback can be
> problemmatic. In the real world we will have to handle e.g. failed
> memory allocations here but in most cases the best course of action is
> to kill the VM.
> 
> Well, kvm_set_cpuid() is not like that. At least:
> - kvm_hv_vcpu_init() is a side-effect but we apply it before all checks
> are complete. There's no way back.
> - kvm_check_cpuid() sounds like a pure checker but in reallity we end up
> mangling guest FPU state in fpstate_realloc()

Yeah, I really, really don't like the call to fpu_enable_guest_xfd_features().
But to not make it worse, that call could be hoisted out of kvm_check_cpuid()
so that it can be performed after kvm_cpuid_check_equal(), i.e. be kept dead last
(and with a comment saying it needs to be dead last due to side effects that are
visible to serspace).

> Both are probably "no big deal" but certainly break the atomicity.
>
> > If we want to be paranoid, we can probably get away with killing the VM if the
> > vCPU has run and the incoming CPUID is "bad", e.g. to guard against something
> > in kvm_set_cpuid() consuming soon-to-be-stale state.  And that's actually a
> > feature of sorts, because _if_ something in kvm_set_cpuid() consumes the vCPU's
> > CPUID, then we have a bug _now_ that affects the happy path.
> >
> > Completely untested (I haven't updated the myriad helpers), but this would allow
> > us to revert/remove all of the changes that allow peeking at a CPUID array that
> > lives outside of the vCPU.
> 
> Thanks, assuming there's no urgency

Definitely no urgency.

> let me take a look at this in the course of the next week or so.

No need, it was more of an "FYI, this is what I may go futz with".  Specifically,
it will impact what I want to do with guest cpu_caps[*], hopefully in a good way.
My plan is to play around with it when I get back to that series.

[*] https://lore.kernel.org/all/20231110235528.1561679-1-seanjc@google.com
diff mbox series

Patch

diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 0e1e3e5df6dd..bfc0bfcb2bc6 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -224,22 +224,22 @@  static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcp
 					  vcpu->arch.cpuid_nent, sig);
 }
 
-static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
-					      struct kvm_cpuid_entry2 *entries, int nent)
+static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_cpuid_entry2 *entries,
+							      int nent, u32 kvm_cpuid_base)
 {
-	u32 base = vcpu->arch.kvm_cpuid.base;
-
-	if (!base)
-		return NULL;
-
-	return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES,
+	return cpuid_entry2_find(entries, nent, kvm_cpuid_base | KVM_CPUID_FEATURES,
 				 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
 }
 
 static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
 {
-	return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
-					     vcpu->arch.cpuid_nent);
+	u32 base = vcpu->arch.kvm_cpuid.base;
+
+	if (!base)
+		return NULL;
+
+	return __kvm_find_kvm_cpuid_features(vcpu->arch.cpuid_entries,
+					     vcpu->arch.cpuid_nent, base);
 }
 
 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
@@ -273,6 +273,7 @@  static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
 				       int nent)
 {
 	struct kvm_cpuid_entry2 *best;
+	struct kvm_hypervisor_cpuid kvm_cpuid;
 
 	best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
 	if (best) {
@@ -299,10 +300,12 @@  static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
 		     cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
 		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
 
-	best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
-	if (kvm_hlt_in_guest(vcpu->kvm) && best &&
-		(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
-		best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
+	kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE);
+	if (kvm_cpuid.base) {
+		best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base);
+		if (kvm_hlt_in_guest(vcpu->kvm) && best)
+			best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
+	}
 
 	if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
 		best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);