diff mbox series

[3/3] x86/HVM: fold both instances of looking up a hvm_ioreq_vcpu with a request pending

Message ID ab92e0ec-d869-dae6-f47c-b7ac55bea326@suse.com (mailing list archive)
State New, archived
Headers show
Series x86/hvm: follow-up to f7039ee41b3d | expand

Commit Message

Jan Beulich July 15, 2020, 12:05 p.m. UTC
It seems pretty likely that the "break" in the loop getting replaced in
handle_hvm_io_completion() was meant to exit both nested loops at the
same time. Re-purpose what has been hvm_io_pending() to hand back the
struct hvm_ioreq_vcpu instance found, and use it to replace the two
nested loops.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

Comments

Paul Durrant July 15, 2020, 12:52 p.m. UTC | #1
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: 15 July 2020 13:05
> To: xen-devel@lists.xenproject.org
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>; Roger Pau Monné <roger.pau@citrix.com>; Wei Liu
> <wl@xen.org>; Paul Durrant <paul@xen.org>
> Subject: [PATCH 3/3] x86/HVM: fold both instances of looking up a hvm_ioreq_vcpu with a request
> pending
> 
> It seems pretty likely that the "break" in the loop getting replaced in
> handle_hvm_io_completion() was meant to exit both nested loops at the
> same time. Re-purpose what has been hvm_io_pending() to hand back the
> struct hvm_ioreq_vcpu instance found, and use it to replace the two
> nested loops.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Yes, much neater...

Reviewed-by: Paul Durrant <paul@xen.org>

> 
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -81,7 +81,8 @@ static ioreq_t *get_ioreq(struct hvm_ior
>      return &p->vcpu_ioreq[v->vcpu_id];
>  }
> 
> -bool hvm_io_pending(struct vcpu *v)
> +static struct hvm_ioreq_vcpu *get_pending_vcpu(const struct vcpu *v,
> +                                               struct hvm_ioreq_server **srvp)
>  {
>      struct domain *d = v->domain;
>      struct hvm_ioreq_server *s;
> @@ -96,11 +97,20 @@ bool hvm_io_pending(struct vcpu *v)
>                                list_entry )
>          {
>              if ( sv->vcpu == v && sv->pending )
> -                return true;
> +            {
> +                if ( srvp )
> +                    *srvp = s;
> +                return sv;
> +            }
>          }
>      }
> 
> -    return false;
> +    return NULL;
> +}
> +
> +bool hvm_io_pending(struct vcpu *v)
> +{
> +    return get_pending_vcpu(v, NULL);
>  }
> 
>  static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
> @@ -165,8 +175,8 @@ bool handle_hvm_io_completion(struct vcp
>      struct domain *d = v->domain;
>      struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
>      struct hvm_ioreq_server *s;
> +    struct hvm_ioreq_vcpu *sv;
>      enum hvm_io_completion io_completion;
> -    unsigned int id;
> 
>      if ( has_vpci(d) && vpci_process_pending(v) )
>      {
> @@ -174,23 +184,9 @@ bool handle_hvm_io_completion(struct vcp
>          return false;
>      }
> 
> -    FOR_EACH_IOREQ_SERVER(d, id, s)
> -    {
> -        struct hvm_ioreq_vcpu *sv;
> -
> -        list_for_each_entry ( sv,
> -                              &s->ioreq_vcpu_list,
> -                              list_entry )
> -        {
> -            if ( sv->vcpu == v && sv->pending )
> -            {
> -                if ( !hvm_wait_for_io(sv, get_ioreq(s, v)) )
> -                    return false;
> -
> -                break;
> -            }
> -        }
> -    }
> +    sv = get_pending_vcpu(v, &s);
> +    if ( sv && !hvm_wait_for_io(sv, get_ioreq(s, v)) )
> +        return false;
> 
>      vio->io_req.state = hvm_ioreq_needs_completion(&vio->io_req) ?
>          STATE_IORESP_READY : STATE_IOREQ_NONE;
diff mbox series

Patch

--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -81,7 +81,8 @@  static ioreq_t *get_ioreq(struct hvm_ior
     return &p->vcpu_ioreq[v->vcpu_id];
 }
 
-bool hvm_io_pending(struct vcpu *v)
+static struct hvm_ioreq_vcpu *get_pending_vcpu(const struct vcpu *v,
+                                               struct hvm_ioreq_server **srvp)
 {
     struct domain *d = v->domain;
     struct hvm_ioreq_server *s;
@@ -96,11 +97,20 @@  bool hvm_io_pending(struct vcpu *v)
                               list_entry )
         {
             if ( sv->vcpu == v && sv->pending )
-                return true;
+            {
+                if ( srvp )
+                    *srvp = s;
+                return sv;
+            }
         }
     }
 
-    return false;
+    return NULL;
+}
+
+bool hvm_io_pending(struct vcpu *v)
+{
+    return get_pending_vcpu(v, NULL);
 }
 
 static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
@@ -165,8 +175,8 @@  bool handle_hvm_io_completion(struct vcp
     struct domain *d = v->domain;
     struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
     struct hvm_ioreq_server *s;
+    struct hvm_ioreq_vcpu *sv;
     enum hvm_io_completion io_completion;
-    unsigned int id;
 
     if ( has_vpci(d) && vpci_process_pending(v) )
     {
@@ -174,23 +184,9 @@  bool handle_hvm_io_completion(struct vcp
         return false;
     }
 
-    FOR_EACH_IOREQ_SERVER(d, id, s)
-    {
-        struct hvm_ioreq_vcpu *sv;
-
-        list_for_each_entry ( sv,
-                              &s->ioreq_vcpu_list,
-                              list_entry )
-        {
-            if ( sv->vcpu == v && sv->pending )
-            {
-                if ( !hvm_wait_for_io(sv, get_ioreq(s, v)) )
-                    return false;
-
-                break;
-            }
-        }
-    }
+    sv = get_pending_vcpu(v, &s);
+    if ( sv && !hvm_wait_for_io(sv, get_ioreq(s, v)) )
+        return false;
 
     vio->io_req.state = hvm_ioreq_needs_completion(&vio->io_req) ?
         STATE_IORESP_READY : STATE_IOREQ_NONE;