diff mbox series

[v2,3/3] KVM: arm64: Use generic KVM xfer to guest work function

Message ID 20210729220916.1672875-4-oupton@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Use generic guest entry infrastructure | expand

Commit Message

Oliver Upton July 29, 2021, 10:09 p.m. UTC
Clean up handling of checks for pending work by switching to the generic
infrastructure to do so.

We pick up handling for TIF_NOTIFY_RESUME from this switch, meaning that
task work will be correctly handled.

Signed-off-by: Oliver Upton <oupton@google.com>
---
 arch/arm64/kvm/Kconfig |  1 +
 arch/arm64/kvm/arm.c   | 27 ++++++++++++++-------------
 2 files changed, 15 insertions(+), 13 deletions(-)

Comments

Marc Zyngier July 30, 2021, 9:41 a.m. UTC | #1
Hi Oliver,

On Thu, 29 Jul 2021 23:09:16 +0100,
Oliver Upton <oupton@google.com> wrote:
> 
> Clean up handling of checks for pending work by switching to the generic
> infrastructure to do so.
> 
> We pick up handling for TIF_NOTIFY_RESUME from this switch, meaning that
> task work will be correctly handled.
> 
> Signed-off-by: Oliver Upton <oupton@google.com>
> ---
>  arch/arm64/kvm/Kconfig |  1 +
>  arch/arm64/kvm/arm.c   | 27 ++++++++++++++-------------
>  2 files changed, 15 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
> index a4eba0908bfa..8bc1fac5fa26 100644
> --- a/arch/arm64/kvm/Kconfig
> +++ b/arch/arm64/kvm/Kconfig
> @@ -26,6 +26,7 @@ menuconfig KVM
>  	select HAVE_KVM_ARCH_TLB_FLUSH_ALL
>  	select KVM_MMIO
>  	select KVM_GENERIC_DIRTYLOG_READ_PROTECT
> +	select KVM_XFER_TO_GUEST_WORK
>  	select SRCU
>  	select KVM_VFIO
>  	select HAVE_KVM_EVENTFD
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 60d0a546d7fd..9762e2129813 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -6,6 +6,7 @@
>  
>  #include <linux/bug.h>
>  #include <linux/cpu_pm.h>
> +#include <linux/entry-kvm.h>
>  #include <linux/errno.h>
>  #include <linux/err.h>
>  #include <linux/kvm_host.h>
> @@ -714,6 +715,13 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
>  		static_branch_unlikely(&arm64_mismatched_32bit_el0);
>  }
>  
> +static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
> +{
> +	return kvm_request_pending(vcpu) ||
> +			need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
> +			xfer_to_guest_mode_work_pending();

Here's what xfer_to_guest_mode_work_pending() says:

<quote>
 * Has to be invoked with interrupts disabled before the transition to
 * guest mode.
</quote>

At the point where you call this, we already are in guest mode, at
least in the KVM sense.

> +}
> +
>  /**
>   * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
>   * @vcpu:	The VCPU pointer
> @@ -757,7 +765,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
>  		/*
>  		 * Check conditions before entering the guest
>  		 */
> -		cond_resched();
> +		if (__xfer_to_guest_mode_work_pending()) {
> +			ret = xfer_to_guest_mode_handle_work(vcpu);

xfer_to_guest_mode_handle_work() already does the exact equivalent of
__xfer_to_guest_mode_work_pending(). Why do we need to do it twice?

> +			if (!ret)
> +				ret = 1;
> +		}
>  
>  		update_vmid(&vcpu->arch.hw_mmu->vmid);
>  
> @@ -776,16 +788,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
>  
>  		kvm_vgic_flush_hwstate(vcpu);
>  
> -		/*
> -		 * Exit if we have a signal pending so that we can deliver the
> -		 * signal to user space.
> -		 */
> -		if (signal_pending(current)) {
> -			ret = -EINTR;
> -			run->exit_reason = KVM_EXIT_INTR;
> -			++vcpu->stat.signal_exits;
> -		}
> -
>  		/*
>  		 * If we're using a userspace irqchip, then check if we need
>  		 * to tell a userspace irqchip about timer or PMU level
> @@ -809,8 +811,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
>  		 */
>  		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
>  
> -		if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
> -		    kvm_request_pending(vcpu)) {
> +		if (ret <= 0 || kvm_vcpu_exit_request(vcpu)) {

If you are doing this, please move the userspace irqchip handling into
the helper as well, so that we have a single function dealing with
collecting exit reasons.

>  			vcpu->mode = OUTSIDE_GUEST_MODE;
>  			isb(); /* Ensure work in x_flush_hwstate is committed */
>  			kvm_pmu_sync_hwstate(vcpu);

Thanks,

	M.
Oliver Upton July 30, 2021, 2:33 p.m. UTC | #2
Marc,

On Fri, Jul 30, 2021 at 2:41 AM Marc Zyngier <maz@kernel.org> wrote:
>
> Hi Oliver,
>
> On Thu, 29 Jul 2021 23:09:16 +0100,
> Oliver Upton <oupton@google.com> wrote:
> >
> > Clean up handling of checks for pending work by switching to the generic
> > infrastructure to do so.
> >
> > We pick up handling for TIF_NOTIFY_RESUME from this switch, meaning that
> > task work will be correctly handled.
> >
> > Signed-off-by: Oliver Upton <oupton@google.com>
> > ---
> >  arch/arm64/kvm/Kconfig |  1 +
> >  arch/arm64/kvm/arm.c   | 27 ++++++++++++++-------------
> >  2 files changed, 15 insertions(+), 13 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
> > index a4eba0908bfa..8bc1fac5fa26 100644
> > --- a/arch/arm64/kvm/Kconfig
> > +++ b/arch/arm64/kvm/Kconfig
> > @@ -26,6 +26,7 @@ menuconfig KVM
> >       select HAVE_KVM_ARCH_TLB_FLUSH_ALL
> >       select KVM_MMIO
> >       select KVM_GENERIC_DIRTYLOG_READ_PROTECT
> > +     select KVM_XFER_TO_GUEST_WORK
> >       select SRCU
> >       select KVM_VFIO
> >       select HAVE_KVM_EVENTFD
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 60d0a546d7fd..9762e2129813 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -6,6 +6,7 @@
> >
> >  #include <linux/bug.h>
> >  #include <linux/cpu_pm.h>
> > +#include <linux/entry-kvm.h>
> >  #include <linux/errno.h>
> >  #include <linux/err.h>
> >  #include <linux/kvm_host.h>
> > @@ -714,6 +715,13 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
> >               static_branch_unlikely(&arm64_mismatched_32bit_el0);
> >  }
> >
> > +static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
> > +{
> > +     return kvm_request_pending(vcpu) ||
> > +                     need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
> > +                     xfer_to_guest_mode_work_pending();
>
> Here's what xfer_to_guest_mode_work_pending() says:
>
> <quote>
>  * Has to be invoked with interrupts disabled before the transition to
>  * guest mode.
> </quote>
>
> At the point where you call this, we already are in guest mode, at
> least in the KVM sense.

I believe the comment is suggestive of guest mode in the hardware
sense, not KVM's vcpu->mode designation. I got this from
arch/x86/kvm/x86.c:vcpu_enter_guest() to infer the author's
intentions.

>
> > +}
> > +
> >  /**
> >   * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
> >   * @vcpu:    The VCPU pointer
> > @@ -757,7 +765,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> >               /*
> >                * Check conditions before entering the guest
> >                */
> > -             cond_resched();
> > +             if (__xfer_to_guest_mode_work_pending()) {
> > +                     ret = xfer_to_guest_mode_handle_work(vcpu);
>
> xfer_to_guest_mode_handle_work() already does the exact equivalent of
> __xfer_to_guest_mode_work_pending(). Why do we need to do it twice?

Right, there's no need to do the check twice.

>
> > +                     if (!ret)
> > +                             ret = 1;
> > +             }
> >
> >               update_vmid(&vcpu->arch.hw_mmu->vmid);
> >
> > @@ -776,16 +788,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> >
> >               kvm_vgic_flush_hwstate(vcpu);
> >
> > -             /*
> > -              * Exit if we have a signal pending so that we can deliver the
> > -              * signal to user space.
> > -              */
> > -             if (signal_pending(current)) {
> > -                     ret = -EINTR;
> > -                     run->exit_reason = KVM_EXIT_INTR;
> > -                     ++vcpu->stat.signal_exits;
> > -             }
> > -
> >               /*
> >                * If we're using a userspace irqchip, then check if we need
> >                * to tell a userspace irqchip about timer or PMU level
> > @@ -809,8 +811,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> >                */
> >               smp_store_mb(vcpu->mode, IN_GUEST_MODE);
> >
> > -             if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
> > -                 kvm_request_pending(vcpu)) {
> > +             if (ret <= 0 || kvm_vcpu_exit_request(vcpu)) {
>
> If you are doing this, please move the userspace irqchip handling into
> the helper as well, so that we have a single function dealing with
> collecting exit reasons.

Sure thing.

Thanks for the quick review, Marc!

--
Best,
Oliver
Sean Christopherson July 30, 2021, 4:56 p.m. UTC | #3
On Fri, Jul 30, 2021, Oliver Upton wrote:
> 
> On Fri, Jul 30, 2021 at 2:41 AM Marc Zyngier <maz@kernel.org> wrote:
> >
> > On Thu, 29 Jul 2021 23:09:16 +0100, Oliver Upton <oupton@google.com> wrote:
> > > @@ -714,6 +715,13 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
> > >               static_branch_unlikely(&arm64_mismatched_32bit_el0);
> > >  }
> > >
> > > +static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
> > > +{
> > > +     return kvm_request_pending(vcpu) ||
> > > +                     need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
> > > +                     xfer_to_guest_mode_work_pending();
> >
> > Here's what xfer_to_guest_mode_work_pending() says:
> >
> > <quote>
> >  * Has to be invoked with interrupts disabled before the transition to
> >  * guest mode.
> > </quote>
> >
> > At the point where you call this, we already are in guest mode, at
> > least in the KVM sense.
> 
> I believe the comment is suggestive of guest mode in the hardware
> sense, not KVM's vcpu->mode designation. I got this from
> arch/x86/kvm/x86.c:vcpu_enter_guest() to infer the author's
> intentions.

Yeah, the comment is referring to hardware guest mode.  The intent is to verify
there is no work to be done before making the expensive world switch.  There's
no meaningful interaction with vcpu->mode, on x86 it's simply more convenient
from a code perspective to throw it into kvm_vcpu_exit_request().
Oliver Upton July 30, 2021, 5:52 p.m. UTC | #4
On Fri, Jul 30, 2021 at 9:56 AM Sean Christopherson <seanjc@google.com> wrote:
>
> On Fri, Jul 30, 2021, Oliver Upton wrote:
> >
> > On Fri, Jul 30, 2021 at 2:41 AM Marc Zyngier <maz@kernel.org> wrote:
> > >
> > > On Thu, 29 Jul 2021 23:09:16 +0100, Oliver Upton <oupton@google.com> wrote:
> > > > @@ -714,6 +715,13 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
> > > >               static_branch_unlikely(&arm64_mismatched_32bit_el0);
> > > >  }
> > > >
> > > > +static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
> > > > +{
> > > > +     return kvm_request_pending(vcpu) ||
> > > > +                     need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
> > > > +                     xfer_to_guest_mode_work_pending();
> > >
> > > Here's what xfer_to_guest_mode_work_pending() says:
> > >
> > > <quote>
> > >  * Has to be invoked with interrupts disabled before the transition to
> > >  * guest mode.
> > > </quote>
> > >
> > > At the point where you call this, we already are in guest mode, at
> > > least in the KVM sense.
> >
> > I believe the comment is suggestive of guest mode in the hardware
> > sense, not KVM's vcpu->mode designation. I got this from
> > arch/x86/kvm/x86.c:vcpu_enter_guest() to infer the author's
> > intentions.
>
> Yeah, the comment is referring to hardware guest mode.  The intent is to verify
> there is no work to be done before making the expensive world switch.  There's
> no meaningful interaction with vcpu->mode, on x86 it's simply more convenient
> from a code perspective to throw it into kvm_vcpu_exit_request().

Yep, the same is true for ARM as well, doing it the way it appears in
this patch allows for the recycling of the block to enable irqs and
preemption.

--
Thanks,
Oliver
diff mbox series

Patch

diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index a4eba0908bfa..8bc1fac5fa26 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -26,6 +26,7 @@  menuconfig KVM
 	select HAVE_KVM_ARCH_TLB_FLUSH_ALL
 	select KVM_MMIO
 	select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+	select KVM_XFER_TO_GUEST_WORK
 	select SRCU
 	select KVM_VFIO
 	select HAVE_KVM_EVENTFD
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 60d0a546d7fd..9762e2129813 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -6,6 +6,7 @@ 
 
 #include <linux/bug.h>
 #include <linux/cpu_pm.h>
+#include <linux/entry-kvm.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/kvm_host.h>
@@ -714,6 +715,13 @@  static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
 		static_branch_unlikely(&arm64_mismatched_32bit_el0);
 }
 
+static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
+{
+	return kvm_request_pending(vcpu) ||
+			need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
+			xfer_to_guest_mode_work_pending();
+}
+
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:	The VCPU pointer
@@ -757,7 +765,11 @@  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 		/*
 		 * Check conditions before entering the guest
 		 */
-		cond_resched();
+		if (__xfer_to_guest_mode_work_pending()) {
+			ret = xfer_to_guest_mode_handle_work(vcpu);
+			if (!ret)
+				ret = 1;
+		}
 
 		update_vmid(&vcpu->arch.hw_mmu->vmid);
 
@@ -776,16 +788,6 @@  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 
 		kvm_vgic_flush_hwstate(vcpu);
 
-		/*
-		 * Exit if we have a signal pending so that we can deliver the
-		 * signal to user space.
-		 */
-		if (signal_pending(current)) {
-			ret = -EINTR;
-			run->exit_reason = KVM_EXIT_INTR;
-			++vcpu->stat.signal_exits;
-		}
-
 		/*
 		 * If we're using a userspace irqchip, then check if we need
 		 * to tell a userspace irqchip about timer or PMU level
@@ -809,8 +811,7 @@  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 		 */
 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
 
-		if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
-		    kvm_request_pending(vcpu)) {
+		if (ret <= 0 || kvm_vcpu_exit_request(vcpu)) {
 			vcpu->mode = OUTSIDE_GUEST_MODE;
 			isb(); /* Ensure work in x_flush_hwstate is committed */
 			kvm_pmu_sync_hwstate(vcpu);