Message ID | 20220811210605.402337-7-pbonzini@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86: never write to memory from kvm_vcpu_check_block | expand |
On Thu, Aug 11, 2022, Paolo Bonzini wrote: > Interrupts, NMIs etc. sent while in guest mode are already handled > properly by the *_interrupt_allowed callbacks, but other events can > cause a vCPU to be runnable that are specific to guest mode. > > In the case of VMX there are two, the preemption timer and the > monitor trap. The VMX preemption timer is already special cased via > the hv_timer_pending callback, but the purpose of the callback can be > easily extended to MTF or in fact any other event that can occur only > in guest mode. > > Rename the callback and add an MTF check; kvm_arch_vcpu_runnable() > now will return true if an MTF is pending, without relying on > kvm_vcpu_running()'s call to kvm_check_nested_events(). Until that call > is removed, however, the patch introduces no functional change. > > Reported-by: Maxim Levitsky <mlevitsk@redhat.com> > Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > arch/x86/include/asm/kvm_host.h | 2 +- > arch/x86/kvm/vmx/nested.c | 9 ++++++++- > arch/x86/kvm/x86.c | 8 ++++---- > 3 files changed, 13 insertions(+), 6 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 5ffa578cafe1..293ff678fff5 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1636,7 +1636,7 @@ struct kvm_x86_nested_ops { > int (*check_events)(struct kvm_vcpu *vcpu); > bool (*handle_page_fault_workaround)(struct kvm_vcpu *vcpu, > struct x86_exception *fault); > - bool (*hv_timer_pending)(struct kvm_vcpu *vcpu); > + bool (*has_events)(struct kvm_vcpu *vcpu); > void (*triple_fault)(struct kvm_vcpu *vcpu); > int (*get_state)(struct kvm_vcpu *vcpu, > struct kvm_nested_state __user *user_kvm_nested_state, > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c > index ddd4367d4826..9631cdcdd058 100644 > --- a/arch/x86/kvm/vmx/nested.c > +++ b/arch/x86/kvm/vmx/nested.c > @@ -3876,6 +3876,13 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) > to_vmx(vcpu)->nested.preemption_timer_expired; > } > > +static bool vmx_has_nested_events(struct kvm_vcpu *vcpu) > +{ > + struct vcpu_vmx *vmx = to_vmx(vcpu); > + > + return nested_vmx_preemption_timer_pending(vcpu) || vmx->nested.mtf_pending; How about: return nested_vmx_preemption_timer_pending(vcpu) || to_vmx(vcpu)->nested.mtf_pending; to use less lines and honor the 80 char soft-limit?
On Thu, 2022-08-11 at 17:06 -0400, Paolo Bonzini wrote: > Interrupts, NMIs etc. sent while in guest mode are already handled > properly by the *_interrupt_allowed callbacks, but other events can > cause a vCPU to be runnable that are specific to guest mode. > > In the case of VMX there are two, the preemption timer and the > monitor trap. The VMX preemption timer is already special cased via > the hv_timer_pending callback, but the purpose of the callback can be > easily extended to MTF or in fact any other event that can occur only > in guest mode. I am just curious, can this happen with MTF? I see that 'vmx->nested.mtf_pending' is only set from 'vmx_update_emulated_instruction' and that should only in turn be called when we emulate an instruction, which implies that the guest is not halted. > > Rename the callback and add an MTF check; kvm_arch_vcpu_runnable() > now will return true if an MTF is pending, without relying on > kvm_vcpu_running()'s call to kvm_check_nested_events(). Until that call > is removed, however, the patch introduces no functional change. > > Reported-by: Maxim Levitsky <mlevitsk@redhat.com> > Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > arch/x86/include/asm/kvm_host.h | 2 +- > arch/x86/kvm/vmx/nested.c | 9 ++++++++- > arch/x86/kvm/x86.c | 8 ++++---- > 3 files changed, 13 insertions(+), 6 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 5ffa578cafe1..293ff678fff5 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1636,7 +1636,7 @@ struct kvm_x86_nested_ops { > int (*check_events)(struct kvm_vcpu *vcpu); > bool (*handle_page_fault_workaround)(struct kvm_vcpu *vcpu, > struct x86_exception *fault); > - bool (*hv_timer_pending)(struct kvm_vcpu *vcpu); > + bool (*has_events)(struct kvm_vcpu *vcpu); > void (*triple_fault)(struct kvm_vcpu *vcpu); > int (*get_state)(struct kvm_vcpu *vcpu, > struct kvm_nested_state __user *user_kvm_nested_state, > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c > index ddd4367d4826..9631cdcdd058 100644 > --- a/arch/x86/kvm/vmx/nested.c > +++ b/arch/x86/kvm/vmx/nested.c > @@ -3876,6 +3876,13 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) > to_vmx(vcpu)->nested.preemption_timer_expired; > } > > +static bool vmx_has_nested_events(struct kvm_vcpu *vcpu) > +{ > + struct vcpu_vmx *vmx = to_vmx(vcpu); > + > + return nested_vmx_preemption_timer_pending(vcpu) || vmx->nested.mtf_pending; > +} > + > static int vmx_check_nested_events(struct kvm_vcpu *vcpu) > { > struct vcpu_vmx *vmx = to_vmx(vcpu); > @@ -6816,7 +6823,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = { > .leave_nested = vmx_leave_nested, > .check_events = vmx_check_nested_events, > .handle_page_fault_workaround = nested_vmx_handle_page_fault_workaround, > - .hv_timer_pending = nested_vmx_preemption_timer_pending, > + .has_events = vmx_has_nested_events, > .triple_fault = nested_vmx_triple_fault, > .get_state = vmx_get_nested_state, > .set_state = vmx_set_nested_state, > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 7f084613fac8..0f9f24793b8a 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -9789,8 +9789,8 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) > } > > if (is_guest_mode(vcpu) && > - kvm_x86_ops.nested_ops->hv_timer_pending && > - kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) > + kvm_x86_ops.nested_ops->has_events && > + kvm_x86_ops.nested_ops->has_events(vcpu)) > *req_immediate_exit = true; > > WARN_ON(vcpu->arch.exception.pending); > @@ -12562,8 +12562,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) > return true; > > if (is_guest_mode(vcpu) && > - kvm_x86_ops.nested_ops->hv_timer_pending && > - kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) > + kvm_x86_ops.nested_ops->has_events && > + kvm_x86_ops.nested_ops->has_events(vcpu)) > return true; > > if (kvm_xen_has_pending_events(vcpu)) Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Best regards, Maxim Levitsky
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5ffa578cafe1..293ff678fff5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1636,7 +1636,7 @@ struct kvm_x86_nested_ops { int (*check_events)(struct kvm_vcpu *vcpu); bool (*handle_page_fault_workaround)(struct kvm_vcpu *vcpu, struct x86_exception *fault); - bool (*hv_timer_pending)(struct kvm_vcpu *vcpu); + bool (*has_events)(struct kvm_vcpu *vcpu); void (*triple_fault)(struct kvm_vcpu *vcpu); int (*get_state)(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index ddd4367d4826..9631cdcdd058 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -3876,6 +3876,13 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) to_vmx(vcpu)->nested.preemption_timer_expired; } +static bool vmx_has_nested_events(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + return nested_vmx_preemption_timer_pending(vcpu) || vmx->nested.mtf_pending; +} + static int vmx_check_nested_events(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -6816,7 +6823,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = { .leave_nested = vmx_leave_nested, .check_events = vmx_check_nested_events, .handle_page_fault_workaround = nested_vmx_handle_page_fault_workaround, - .hv_timer_pending = nested_vmx_preemption_timer_pending, + .has_events = vmx_has_nested_events, .triple_fault = nested_vmx_triple_fault, .get_state = vmx_get_nested_state, .set_state = vmx_set_nested_state, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7f084613fac8..0f9f24793b8a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9789,8 +9789,8 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) } if (is_guest_mode(vcpu) && - kvm_x86_ops.nested_ops->hv_timer_pending && - kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) + kvm_x86_ops.nested_ops->has_events && + kvm_x86_ops.nested_ops->has_events(vcpu)) *req_immediate_exit = true; WARN_ON(vcpu->arch.exception.pending); @@ -12562,8 +12562,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) return true; if (is_guest_mode(vcpu) && - kvm_x86_ops.nested_ops->hv_timer_pending && - kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) + kvm_x86_ops.nested_ops->has_events && + kvm_x86_ops.nested_ops->has_events(vcpu)) return true; if (kvm_xen_has_pending_events(vcpu))