diff mbox series

[7/7] ioreq: provide support for long-running operations...

Message ID 20190821145903.45934-8-roger.pau@citrix.com (mailing list archive)
State Superseded
Headers show
Series ioreq: add support for internal servers | expand

Commit Message

Roger Pau Monné Aug. 21, 2019, 2:59 p.m. UTC
...and switch vPCI to use this infrastructure for long running
physmap modification operations.

This allows to get rid of the vPCI specific modifications done to
handle_hvm_io_completion and allows generalizing the support for
long-running operations to other internal ioreq servers. Such support
is implemented as a specific handler that can be registers by internal
ioreq servers and that will be called to check for pending work.
Returning true from this handler will prevent the vcpu from running
until the handler returns false.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
 xen/arch/x86/hvm/ioreq.c         | 55 ++++++++++++++++++++++++++++----
 xen/drivers/vpci/vpci.c          |  3 ++
 xen/include/asm-x86/hvm/domain.h |  1 +
 xen/include/asm-x86/hvm/ioreq.h  |  2 ++
 4 files changed, 55 insertions(+), 6 deletions(-)

Comments

Paul Durrant Aug. 22, 2019, 9:15 a.m. UTC | #1
> -----Original Message-----
> From: Roger Pau Monne <roger.pau@citrix.com>
> Sent: 21 August 2019 15:59
> To: xen-devel@lists.xenproject.org
> Cc: Roger Pau Monne <roger.pau@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>; Jan Beulich
> <jbeulich@suse.com>; Andrew Cooper <Andrew.Cooper3@citrix.com>; Wei Liu <wl@xen.org>; George Dunlap
> <George.Dunlap@citrix.com>; Ian Jackson <Ian.Jackson@citrix.com>; Julien Grall <julien.grall@arm.com>;
> Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>; Stefano Stabellini <sstabellini@kernel.org>; Tim
> (Xen.org) <tim@xen.org>
> Subject: [PATCH 7/7] ioreq: provide support for long-running operations...
> 
> ...and switch vPCI to use this infrastructure for long running
> physmap modification operations.
> 
> This allows to get rid of the vPCI specific modifications done to
> handle_hvm_io_completion and allows generalizing the support for
> long-running operations to other internal ioreq servers. Such support
> is implemented as a specific handler that can be registers by internal
> ioreq servers and that will be called to check for pending work.
> Returning true from this handler will prevent the vcpu from running
> until the handler returns false.

Rather than having another callback can the handler not be re-called with same ioreq? It could return different values depending on whether there is more work to do (requiring another call) or whether it is done and the vcpu can be resumed. Would that work?

  Paul

> 
> Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
> ---
>  xen/arch/x86/hvm/ioreq.c         | 55 ++++++++++++++++++++++++++++----
>  xen/drivers/vpci/vpci.c          |  3 ++
>  xen/include/asm-x86/hvm/domain.h |  1 +
>  xen/include/asm-x86/hvm/ioreq.h  |  2 ++
>  4 files changed, 55 insertions(+), 6 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index b2582bd3a0..8e160a0a14 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -186,18 +186,29 @@ bool handle_hvm_io_completion(struct vcpu *v)
>      enum hvm_io_completion io_completion;
>      unsigned int id;
> 
> -    if ( has_vpci(d) && vpci_process_pending(v) )
> -    {
> -        raise_softirq(SCHEDULE_SOFTIRQ);
> -        return false;
> -    }
> -
>      FOR_EACH_IOREQ_SERVER(d, id, s)
>      {
>          struct hvm_ioreq_vcpu *sv;
> 
>          if ( s->internal )
> +        {
> +            if ( s->pending && s->pending(v) )
> +            {
> +                /*
> +                 * Need to raise a scheduler irq in order to prevent the guest
> +                 * vcpu from resuming execution.
> +                 *
> +                 * Note this is not required for external ioreq operations
> +                 * because in that case the vcpu is marked as blocked, but this
> +                 * cannot be done for long-running internal operations, since
> +                 * it would prevent the vcpu from being scheduled and thus the
> +                 * long running operation from finishing.
> +                 */
> +                raise_softirq(SCHEDULE_SOFTIRQ);
> +                return false;
> +            }
>              continue;
> +        }
> 
>          list_for_each_entry ( sv,
>                                &s->ioreq_vcpu_list,
> @@ -518,6 +529,38 @@ int hvm_add_ioreq_handler(struct domain *d, ioservid_t id,
>      return rc;
>  }
> 
> +int hvm_add_ioreq_pending_handler(struct domain *d, ioservid_t id,
> +                                  bool (*pending)(struct vcpu *v))
> +{
> +    struct hvm_ioreq_server *s;
> +    int rc = 0;
> +
> +    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
> +    s = get_ioreq_server(d, id);
> +    if ( !s )
> +    {
> +        rc = -ENOENT;
> +        goto out;
> +    }
> +    if ( !s->internal )
> +    {
> +        rc = -EINVAL;
> +        goto out;
> +    }
> +    if ( s->pending != NULL )
> +    {
> +        rc = -EBUSY;
> +        goto out;
> +    }
> +
> +    s->pending = pending;
> +
> + out:
> +    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
> +
> +    return rc;
> +}
> +
>  static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s,
>                                      struct hvm_ioreq_vcpu *sv)
>  {
> diff --git a/xen/drivers/vpci/vpci.c b/xen/drivers/vpci/vpci.c
> index 510e3ee771..54b0f31612 100644
> --- a/xen/drivers/vpci/vpci.c
> +++ b/xen/drivers/vpci/vpci.c
> @@ -508,6 +508,9 @@ int vpci_register_ioreq(struct domain *d)
>          return rc;
> 
>      rc = hvm_add_ioreq_handler(d, id, ioreq_handler);
> +    if ( rc )
> +        return rc;
> +    rc = hvm_add_ioreq_pending_handler(d, id, vpci_process_pending);
>      if ( rc )
>          return rc;
> 
> diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
> index f0be303517..80a38ffe48 100644
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -73,6 +73,7 @@ struct hvm_ioreq_server {
>          };
>          struct {
>              int (*handler)(struct vcpu *v, ioreq_t *);
> +            bool (*pending)(struct vcpu *v);
>          };
>      };
>  };
> diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
> index 10b9586885..cc3e27d059 100644
> --- a/xen/include/asm-x86/hvm/ioreq.h
> +++ b/xen/include/asm-x86/hvm/ioreq.h
> @@ -57,6 +57,8 @@ void hvm_ioreq_init(struct domain *d);
> 
>  int hvm_add_ioreq_handler(struct domain *d, ioservid_t id,
>                            int (*handler)(struct vcpu *v, ioreq_t *));
> +int hvm_add_ioreq_pending_handler(struct domain *d, ioservid_t id,
> +                                  bool (*pending)(struct vcpu *v));
> 
>  int hvm_ioreq_register_mmcfg(struct domain *d, paddr_t addr,
>                               unsigned int start_bus, unsigned int end_bus,
> --
> 2.22.0
Roger Pau Monné Aug. 22, 2019, 12:55 p.m. UTC | #2
On Thu, Aug 22, 2019 at 11:15:50AM +0200, Paul Durrant wrote:
> > -----Original Message-----
> > From: Roger Pau Monne <roger.pau@citrix.com>
> > Sent: 21 August 2019 15:59
> > To: xen-devel@lists.xenproject.org
> > Cc: Roger Pau Monne <roger.pau@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>; Jan Beulich
> > <jbeulich@suse.com>; Andrew Cooper <Andrew.Cooper3@citrix.com>; Wei Liu <wl@xen.org>; George Dunlap
> > <George.Dunlap@citrix.com>; Ian Jackson <Ian.Jackson@citrix.com>; Julien Grall <julien.grall@arm.com>;
> > Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>; Stefano Stabellini <sstabellini@kernel.org>; Tim
> > (Xen.org) <tim@xen.org>
> > Subject: [PATCH 7/7] ioreq: provide support for long-running operations...
> > 
> > ...and switch vPCI to use this infrastructure for long running
> > physmap modification operations.
> > 
> > This allows to get rid of the vPCI specific modifications done to
> > handle_hvm_io_completion and allows generalizing the support for
> > long-running operations to other internal ioreq servers. Such support
> > is implemented as a specific handler that can be registers by internal
> > ioreq servers and that will be called to check for pending work.
> > Returning true from this handler will prevent the vcpu from running
> > until the handler returns false.
> 
> Rather than having another callback can the handler not be re-called with same ioreq? It could return different values depending on whether there is more work to do (requiring another call) or whether it is done and the vcpu can be resumed. Would that work?

I guess this would work also. The issue with this approach is that I
would have to find somewhere to store the ioreq while the operation is
being processed, which is not required with the proposed two handler
approach.

Thanks, Roger.
Paul Durrant Aug. 22, 2019, 1:07 p.m. UTC | #3
> -----Original Message-----
> From: Roger Pau Monne <roger.pau@citrix.com>
> Sent: 22 August 2019 13:55
> To: Paul Durrant <Paul.Durrant@citrix.com>
> Cc: xen-devel@lists.xenproject.org; Jan Beulich <jbeulich@suse.com>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Wei Liu <wl@xen.org>; George Dunlap <George.Dunlap@citrix.com>; Ian
> Jackson <Ian.Jackson@citrix.com>; Julien Grall <julien.grall@arm.com>; Konrad Rzeszutek Wilk
> <konrad.wilk@oracle.com>; Stefano Stabellini <sstabellini@kernel.org>; Tim (Xen.org) <tim@xen.org>
> Subject: Re: [PATCH 7/7] ioreq: provide support for long-running operations...
> 
> On Thu, Aug 22, 2019 at 11:15:50AM +0200, Paul Durrant wrote:
> > > -----Original Message-----
> > > From: Roger Pau Monne <roger.pau@citrix.com>
> > > Sent: 21 August 2019 15:59
> > > To: xen-devel@lists.xenproject.org
> > > Cc: Roger Pau Monne <roger.pau@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>; Jan Beulich
> > > <jbeulich@suse.com>; Andrew Cooper <Andrew.Cooper3@citrix.com>; Wei Liu <wl@xen.org>; George
> Dunlap
> > > <George.Dunlap@citrix.com>; Ian Jackson <Ian.Jackson@citrix.com>; Julien Grall
> <julien.grall@arm.com>;
> > > Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>; Stefano Stabellini <sstabellini@kernel.org>; Tim
> > > (Xen.org) <tim@xen.org>
> > > Subject: [PATCH 7/7] ioreq: provide support for long-running operations...
> > >
> > > ...and switch vPCI to use this infrastructure for long running
> > > physmap modification operations.
> > >
> > > This allows to get rid of the vPCI specific modifications done to
> > > handle_hvm_io_completion and allows generalizing the support for
> > > long-running operations to other internal ioreq servers. Such support
> > > is implemented as a specific handler that can be registers by internal
> > > ioreq servers and that will be called to check for pending work.
> > > Returning true from this handler will prevent the vcpu from running
> > > until the handler returns false.
> >
> > Rather than having another callback can the handler not be re-called with same ioreq? It could
> return different values depending on whether there is more work to do (requiring another call) or
> whether it is done and the vcpu can be resumed. Would that work?
> 
> I guess this would work also. The issue with this approach is that I
> would have to find somewhere to store the ioreq while the operation is
> being processed, which is not required with the proposed two handler
> approach.

The ioreq already is stored in v->arch.hvm.hvm_io.io_req anyway, so can't you use that copy?

  Paul

> 
> Thanks, Roger.
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index b2582bd3a0..8e160a0a14 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -186,18 +186,29 @@  bool handle_hvm_io_completion(struct vcpu *v)
     enum hvm_io_completion io_completion;
     unsigned int id;
 
-    if ( has_vpci(d) && vpci_process_pending(v) )
-    {
-        raise_softirq(SCHEDULE_SOFTIRQ);
-        return false;
-    }
-
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
         struct hvm_ioreq_vcpu *sv;
 
         if ( s->internal )
+        {
+            if ( s->pending && s->pending(v) )
+            {
+                /*
+                 * Need to raise a scheduler irq in order to prevent the guest
+                 * vcpu from resuming execution.
+                 *
+                 * Note this is not required for external ioreq operations
+                 * because in that case the vcpu is marked as blocked, but this
+                 * cannot be done for long-running internal operations, since
+                 * it would prevent the vcpu from being scheduled and thus the
+                 * long running operation from finishing.
+                 */
+                raise_softirq(SCHEDULE_SOFTIRQ);
+                return false;
+            }
             continue;
+        }
 
         list_for_each_entry ( sv,
                               &s->ioreq_vcpu_list,
@@ -518,6 +529,38 @@  int hvm_add_ioreq_handler(struct domain *d, ioservid_t id,
     return rc;
 }
 
+int hvm_add_ioreq_pending_handler(struct domain *d, ioservid_t id,
+                                  bool (*pending)(struct vcpu *v))
+{
+    struct hvm_ioreq_server *s;
+    int rc = 0;
+
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    s = get_ioreq_server(d, id);
+    if ( !s )
+    {
+        rc = -ENOENT;
+        goto out;
+    }
+    if ( !s->internal )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+    if ( s->pending != NULL )
+    {
+        rc = -EBUSY;
+        goto out;
+    }
+
+    s->pending = pending;
+
+ out:
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+
+    return rc;
+}
+
 static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s,
                                     struct hvm_ioreq_vcpu *sv)
 {
diff --git a/xen/drivers/vpci/vpci.c b/xen/drivers/vpci/vpci.c
index 510e3ee771..54b0f31612 100644
--- a/xen/drivers/vpci/vpci.c
+++ b/xen/drivers/vpci/vpci.c
@@ -508,6 +508,9 @@  int vpci_register_ioreq(struct domain *d)
         return rc;
 
     rc = hvm_add_ioreq_handler(d, id, ioreq_handler);
+    if ( rc )
+        return rc;
+    rc = hvm_add_ioreq_pending_handler(d, id, vpci_process_pending);
     if ( rc )
         return rc;
 
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index f0be303517..80a38ffe48 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -73,6 +73,7 @@  struct hvm_ioreq_server {
         };
         struct {
             int (*handler)(struct vcpu *v, ioreq_t *);
+            bool (*pending)(struct vcpu *v);
         };
     };
 };
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index 10b9586885..cc3e27d059 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -57,6 +57,8 @@  void hvm_ioreq_init(struct domain *d);
 
 int hvm_add_ioreq_handler(struct domain *d, ioservid_t id,
                           int (*handler)(struct vcpu *v, ioreq_t *));
+int hvm_add_ioreq_pending_handler(struct domain *d, ioservid_t id,
+                                  bool (*pending)(struct vcpu *v));
 
 int hvm_ioreq_register_mmcfg(struct domain *d, paddr_t addr,
                              unsigned int start_bus, unsigned int end_bus,