diff mbox series

[v2,04/10] vm_event: Simplify vm_event interface

Message ID 46863526d6b28433a75914399d52954c4ca19950.1563293545.git.ppircalabu@bitdefender.com (mailing list archive)
State New, archived
Headers show
Series Per vcpu vm_event channels | expand

Commit Message

Petre Ovidiu PIRCALABU July 16, 2019, 5:06 p.m. UTC
Remove the domain reference from calls to vm_event interface function
and use the backpointer from vm_event_domain.

Affected functions:
- __vm_event_claim_slot / vm_event_claim_slot / vm_event_claim_slot_nosleep
- vm_event_cancel_slot
- vm_event_put_request

Signed-off-by: Petre Pircalabu <ppircalabu@bitdefender.com>
---
 xen/arch/x86/mm/mem_sharing.c |  5 ++---
 xen/arch/x86/mm/p2m.c         | 10 +++++-----
 xen/common/monitor.c          |  4 ++--
 xen/common/vm_event.c         | 37 ++++++++++++++++++-------------------
 xen/include/xen/vm_event.h    | 17 +++++++----------
 5 files changed, 34 insertions(+), 39 deletions(-)

Comments

Tamas K Lengyel July 16, 2019, 9:04 p.m. UTC | #1
On Tue, Jul 16, 2019 at 11:06 AM Petre Pircalabu
<ppircalabu@bitdefender.com> wrote:
>
> Remove the domain reference from calls to vm_event interface function
> and use the backpointer from vm_event_domain.
>
> Affected functions:
> - __vm_event_claim_slot / vm_event_claim_slot / vm_event_claim_slot_nosleep
> - vm_event_cancel_slot
> - vm_event_put_request
>
> Signed-off-by: Petre Pircalabu <ppircalabu@bitdefender.com>

Acked-by: Tamas K Lengyel <tamas@tklengyel.com>
Alexandru Stefan ISAILA July 17, 2019, 11:13 a.m. UTC | #2
On 16.07.2019 20:06, Petre Pircalabu wrote:
> Remove the domain reference from calls to vm_event interface function
> and use the backpointer from vm_event_domain.
> 
> Affected functions:
> - __vm_event_claim_slot / vm_event_claim_slot / vm_event_claim_slot_nosleep
> - vm_event_cancel_slot
> - vm_event_put_request
> 
> Signed-off-by: Petre Pircalabu <ppircalabu@bitdefender.com>

Reviewed-by: Alexandru Isaila <aisaila@bitdefender.com>
diff mbox series

Patch

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index f16a3f5..9d80389 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -557,8 +557,7 @@  int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
         .u.mem_sharing.p2mt = p2m_ram_shared
     };
 
-    if ( (rc = __vm_event_claim_slot(d, 
-                        d->vm_event_share, allow_sleep)) < 0 )
+    if ( (rc = __vm_event_claim_slot(d->vm_event_share, allow_sleep)) < 0 )
         return rc;
 
     if ( v->domain == d )
@@ -567,7 +566,7 @@  int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
         vm_event_vcpu_pause(v);
     }
 
-    vm_event_put_request(d, d->vm_event_share, &req);
+    vm_event_put_request(d->vm_event_share, &req);
 
     return 0;
 }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 4c99548..85de64f 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1652,7 +1652,7 @@  void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
      * correctness of the guest execution at this point.  If this is the only
      * page that happens to be paged-out, we'll be okay..  but it's likely the
      * guest will crash shortly anyways. */
-    int rc = vm_event_claim_slot(d, d->vm_event_paging);
+    int rc = vm_event_claim_slot(d->vm_event_paging);
     if ( rc < 0 )
         return;
 
@@ -1666,7 +1666,7 @@  void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
         /* Evict will fail now, tag this request for pager */
         req.u.mem_paging.flags |= MEM_PAGING_EVICT_FAIL;
 
-    vm_event_put_request(d, d->vm_event_paging, &req);
+    vm_event_put_request(d->vm_event_paging, &req);
 }
 
 /**
@@ -1704,7 +1704,7 @@  void p2m_mem_paging_populate(struct domain *d, unsigned long gfn_l)
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* We're paging. There should be a ring */
-    int rc = vm_event_claim_slot(d, d->vm_event_paging);
+    int rc = vm_event_claim_slot(d->vm_event_paging);
 
     if ( rc == -EOPNOTSUPP )
     {
@@ -1746,7 +1746,7 @@  void p2m_mem_paging_populate(struct domain *d, unsigned long gfn_l)
     {
         /* gfn is already on its way back and vcpu is not paused */
     out_cancel:
-        vm_event_cancel_slot(d, d->vm_event_paging);
+        vm_event_cancel_slot(d->vm_event_paging);
         return;
     }
 
@@ -1754,7 +1754,7 @@  void p2m_mem_paging_populate(struct domain *d, unsigned long gfn_l)
     req.u.mem_paging.p2mt = p2mt;
     req.vcpu_id = v->vcpu_id;
 
-    vm_event_put_request(d, d->vm_event_paging, &req);
+    vm_event_put_request(d->vm_event_paging, &req);
 }
 
 /**
diff --git a/xen/common/monitor.c b/xen/common/monitor.c
index d5c9ff1..b8d33c4 100644
--- a/xen/common/monitor.c
+++ b/xen/common/monitor.c
@@ -93,7 +93,7 @@  int monitor_traps(struct vcpu *v, bool sync, vm_event_request_t *req)
     int rc;
     struct domain *d = v->domain;
 
-    rc = vm_event_claim_slot(d, d->vm_event_monitor);
+    rc = vm_event_claim_slot(d->vm_event_monitor);
     switch ( rc )
     {
     case 0:
@@ -125,7 +125,7 @@  int monitor_traps(struct vcpu *v, bool sync, vm_event_request_t *req)
     }
 
     vm_event_fill_regs(req);
-    vm_event_put_request(d, d->vm_event_monitor, req);
+    vm_event_put_request(d->vm_event_monitor, req);
 
     return rc;
 }
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 787c61c..a235d25 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -119,10 +119,11 @@  static unsigned int vm_event_ring_available(struct vm_event_domain *ved)
  * but need to be resumed where the ring is capable of processing at least
  * one event from them.
  */
-static void vm_event_wake_blocked(struct domain *d, struct vm_event_domain *ved)
+static void vm_event_wake_blocked(struct vm_event_domain *ved)
 {
     struct vcpu *v;
     unsigned int i, j, k, avail_req = vm_event_ring_available(ved);
+    struct domain *d = ved->d;
 
     if ( avail_req == 0 || ved->blocked == 0 )
         return;
@@ -154,7 +155,7 @@  static void vm_event_wake_blocked(struct domain *d, struct vm_event_domain *ved)
  * was unable to do so, it is queued on a wait queue.  These are woken as
  * needed, and take precedence over the blocked vCPUs.
  */
-static void vm_event_wake_queued(struct domain *d, struct vm_event_domain *ved)
+static void vm_event_wake_queued(struct vm_event_domain *ved)
 {
     unsigned int avail_req = vm_event_ring_available(ved);
 
@@ -169,12 +170,12 @@  static void vm_event_wake_queued(struct domain *d, struct vm_event_domain *ved)
  * call vm_event_wake() again, ensuring that any blocked vCPUs will get
  * unpaused once all the queued vCPUs have made it through.
  */
-void vm_event_wake(struct domain *d, struct vm_event_domain *ved)
+void vm_event_wake(struct vm_event_domain *ved)
 {
     if ( !list_empty(&ved->wq.list) )
-        vm_event_wake_queued(d, ved);
+        vm_event_wake_queued(ved);
     else
-        vm_event_wake_blocked(d, ved);
+        vm_event_wake_blocked(ved);
 }
 
 static int vm_event_disable(struct domain *d, struct vm_event_domain **p_ved)
@@ -219,17 +220,16 @@  static int vm_event_disable(struct domain *d, struct vm_event_domain **p_ved)
     return 0;
 }
 
-static void vm_event_release_slot(struct domain *d,
-                                  struct vm_event_domain *ved)
+static void vm_event_release_slot(struct vm_event_domain *ved)
 {
     /* Update the accounting */
-    if ( current->domain == d )
+    if ( current->domain == ved->d )
         ved->target_producers--;
     else
         ved->foreign_producers--;
 
     /* Kick any waiters */
-    vm_event_wake(d, ved);
+    vm_event_wake(ved);
 }
 
 /*
@@ -251,8 +251,7 @@  static void vm_event_mark_and_pause(struct vcpu *v, struct vm_event_domain *ved)
  * overly full and its continued execution would cause stalling and excessive
  * waiting.  The vCPU will be automatically unpaused when the ring clears.
  */
-void vm_event_put_request(struct domain *d,
-                          struct vm_event_domain *ved,
+void vm_event_put_request(struct vm_event_domain *ved,
                           vm_event_request_t *req)
 {
     vm_event_front_ring_t *front_ring;
@@ -260,6 +259,7 @@  void vm_event_put_request(struct domain *d,
     unsigned int avail_req;
     RING_IDX req_prod;
     struct vcpu *curr = current;
+    struct domain *d = ved->d;
 
     if( !vm_event_check(ved) )
         return;
@@ -292,7 +292,7 @@  void vm_event_put_request(struct domain *d,
     RING_PUSH_REQUESTS(front_ring);
 
     /* We've actually *used* our reservation, so release the slot. */
-    vm_event_release_slot(d, ved);
+    vm_event_release_slot(ved);
 
     /* Give this vCPU a black eye if necessary, on the way out.
      * See the comments above wake_blocked() for more information
@@ -332,7 +332,7 @@  static int vm_event_get_response(struct domain *d, struct vm_event_domain *ved,
 
     /* Kick any waiters -- since we've just consumed an event,
      * there may be additional space available in the ring. */
-    vm_event_wake(d, ved);
+    vm_event_wake(ved);
 
     rc = 1;
 
@@ -433,13 +433,13 @@  static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
     return 0;
 }
 
-void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved)
+void vm_event_cancel_slot(struct vm_event_domain *ved)
 {
     if( !vm_event_check(ved) )
         return;
 
     spin_lock(&ved->lock);
-    vm_event_release_slot(d, ved);
+    vm_event_release_slot(ved);
     spin_unlock(&ved->lock);
 }
 
@@ -507,16 +507,15 @@  bool vm_event_check(struct vm_event_domain *ved)
  *               0: a spot has been reserved
  *
  */
-int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
-                          bool allow_sleep)
+int __vm_event_claim_slot(struct vm_event_domain *ved, bool allow_sleep)
 {
     if ( !vm_event_check(ved) )
         return -EOPNOTSUPP;
 
-    if ( (current->domain == d) && allow_sleep )
+    if ( (current->domain == ved->d) && allow_sleep )
         return vm_event_wait_slot(ved);
     else
-        return vm_event_grab_slot(ved, current->domain != d);
+        return vm_event_grab_slot(ved, current->domain != ved->d);
 }
 
 #ifdef CONFIG_HAS_MEM_PAGING
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 381be0b..ff30999 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -45,23 +45,20 @@  bool vm_event_check(struct vm_event_domain *ved);
  * cancel_slot(), both of which are guaranteed to
  * succeed.
  */
-int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
-                          bool allow_sleep);
-static inline int vm_event_claim_slot(struct domain *d,
-                                      struct vm_event_domain *ved)
+int __vm_event_claim_slot(struct vm_event_domain *ved, bool allow_sleep);
+static inline int vm_event_claim_slot(struct vm_event_domain *ved)
 {
-    return __vm_event_claim_slot(d, ved, true);
+    return __vm_event_claim_slot(ved, true);
 }
 
-static inline int vm_event_claim_slot_nosleep(struct domain *d,
-                                              struct vm_event_domain *ved)
+static inline int vm_event_claim_slot_nosleep(struct vm_event_domain *ved)
 {
-    return __vm_event_claim_slot(d, ved, false);
+    return __vm_event_claim_slot(ved, false);
 }
 
-void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved);
+void vm_event_cancel_slot(struct vm_event_domain *ved);
 
-void vm_event_put_request(struct domain *d, struct vm_event_domain *ved,
+void vm_event_put_request(struct vm_event_domain *ved,
                           vm_event_request_t *req);
 
 int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec);