diff mbox

[v8,for-4.9,1/5] hvm/dmop: Box dmop_args rather than passing multiple parameters around

Message ID 1492783552-29472-1-git-send-email-jennifer.herbert@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jennifer Herbert April 21, 2017, 2:05 p.m. UTC
From: Jennifer Herbert <Jennifer.Herbert@citrix.com>

No functional change.

Signed-off-by: Jennifer Herbert <Jennifer.Herbert@citrix.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

--
CC: Paul Durrant <paul.durrant@citrix.com>
CC: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Jan Beulich <JBeulich@suse.com>
CC: Julien Grall <julien.grall@arm.com>
---
No change.
---
 xen/arch/x86/hvm/dm.c | 49 +++++++++++++++++++++++++++++--------------------
 1 file changed, 29 insertions(+), 20 deletions(-)

Comments

Julien Grall April 21, 2017, 2:17 p.m. UTC | #1
Hi Jennifer,

I don't see any cover letter for this series, so I will answer here.

Looking at the code, it looks like a new feature rather than a bug fix. 
Am I right?

Could you explain what would be the benefits and risks to get this code 
in the release?

I also like to hear the opinion of the x86 maintainers about getting 
this code in Xen 4.9.

Cheers,

On 21/04/17 15:05, jennifer.herbert@citrix.com wrote:
> From: Jennifer Herbert <Jennifer.Herbert@citrix.com>
>
> No functional change.
>
> Signed-off-by: Jennifer Herbert <Jennifer.Herbert@citrix.com>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
>
> --
> CC: Paul Durrant <paul.durrant@citrix.com>
> CC: Andrew Cooper <andrew.cooper3@citrix.com>
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Julien Grall <julien.grall@arm.com>
> ---
> No change.
> ---
>  xen/arch/x86/hvm/dm.c | 49 +++++++++++++++++++++++++++++--------------------
>  1 file changed, 29 insertions(+), 20 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
> index d72b7bd..e583e41 100644
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -25,6 +25,13 @@
>
>  #include <xsm/xsm.h>
>
> +struct dmop_args {
> +    domid_t domid;
> +    unsigned int nr_bufs;
> +    /* Reserve enough buf elements for all current hypercalls. */
> +    struct xen_dm_op_buf buf[2];
> +};
> +
>  static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[],
>                                  unsigned int nr_bufs, void *dst,
>                                  unsigned int idx, size_t dst_size)
> @@ -56,7 +63,7 @@ static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[],
>  }
>
>  static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
> -                            unsigned int nr, struct xen_dm_op_buf *buf)
> +                            unsigned int nr, const struct xen_dm_op_buf *buf)
>  {
>      if ( nr > (GB(1) >> PAGE_SHIFT) )
>          return -EINVAL;
> @@ -287,16 +294,14 @@ static int inject_event(struct domain *d,
>      return 0;
>  }
>
> -static int dm_op(domid_t domid,
> -                 unsigned int nr_bufs,
> -                 xen_dm_op_buf_t bufs[])
> +static int dm_op(const struct dmop_args *op_args)
>  {
>      struct domain *d;
>      struct xen_dm_op op;
>      bool const_op = true;
>      long rc;
>
> -    rc = rcu_lock_remote_domain_by_id(domid, &d);
> +    rc = rcu_lock_remote_domain_by_id(op_args->domid, &d);
>      if ( rc )
>          return rc;
>
> @@ -307,7 +312,7 @@ static int dm_op(domid_t domid,
>      if ( rc )
>          goto out;
>
> -    if ( !copy_buf_from_guest(bufs, nr_bufs, &op, 0, sizeof(op)) )
> +    if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, &op, 0, sizeof(op)) )
>      {
>          rc = -EFAULT;
>          goto out;
> @@ -466,10 +471,10 @@ static int dm_op(domid_t domid,
>          if ( data->pad )
>              break;
>
> -        if ( nr_bufs < 2 )
> +        if ( op_args->nr_bufs < 2 )
>              break;
>
> -        rc = track_dirty_vram(d, data->first_pfn, data->nr, &bufs[1]);
> +        rc = track_dirty_vram(d, data->first_pfn, data->nr, &op_args->buf[1]);
>          break;
>      }
>
> @@ -564,7 +569,7 @@ static int dm_op(domid_t domid,
>
>      if ( (!rc || rc == -ERESTART) &&
>           !const_op &&
> -         !copy_buf_to_guest(bufs, nr_bufs, 0, &op, sizeof(op)) )
> +         !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, &op, sizeof(op)) )
>          rc = -EFAULT;
>
>   out:
> @@ -587,20 +592,21 @@ CHECK_dm_op_set_mem_type;
>  CHECK_dm_op_inject_event;
>  CHECK_dm_op_inject_msi;
>
> -#define MAX_NR_BUFS 2
> -
>  int compat_dm_op(domid_t domid,
>                   unsigned int nr_bufs,
>                   XEN_GUEST_HANDLE_PARAM(void) bufs)
>  {
> -    struct xen_dm_op_buf nat[MAX_NR_BUFS];
> +    struct dmop_args args;
>      unsigned int i;
>      int rc;
>
> -    if ( nr_bufs > MAX_NR_BUFS )
> +    if ( nr_bufs > ARRAY_SIZE(args.buf) )
>          return -E2BIG;
>
> -    for ( i = 0; i < nr_bufs; i++ )
> +    args.domid = domid;
> +    args.nr_bufs = nr_bufs;
> +
> +    for ( i = 0; i < args.nr_bufs; i++ )
>      {
>          struct compat_dm_op_buf cmp;
>
> @@ -610,12 +616,12 @@ int compat_dm_op(domid_t domid,
>  #define XLAT_dm_op_buf_HNDL_h(_d_, _s_) \
>          guest_from_compat_handle((_d_)->h, (_s_)->h)
>
> -        XLAT_dm_op_buf(&nat[i], &cmp);
> +        XLAT_dm_op_buf(&args.buf[i], &cmp);
>
>  #undef XLAT_dm_op_buf_HNDL_h
>      }
>
> -    rc = dm_op(domid, nr_bufs, nat);
> +    rc = dm_op(&args);
>
>      if ( rc == -ERESTART )
>          rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
> @@ -628,16 +634,19 @@ long do_dm_op(domid_t domid,
>                unsigned int nr_bufs,
>                XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
>  {
> -    struct xen_dm_op_buf nat[MAX_NR_BUFS];
> +    struct dmop_args args;
>      int rc;
>
> -    if ( nr_bufs > MAX_NR_BUFS )
> +    if ( nr_bufs > ARRAY_SIZE(args.buf) )
>          return -E2BIG;
>
> -    if ( copy_from_guest_offset(nat, bufs, 0, nr_bufs) )
> +    args.domid = domid;
> +    args.nr_bufs = nr_bufs;
> +
> +    if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) )
>          return -EFAULT;
>
> -    rc = dm_op(domid, nr_bufs, nat);
> +    rc = dm_op(&args);
>
>      if ( rc == -ERESTART )
>          rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
>
Andrew Cooper April 21, 2017, 2:42 p.m. UTC | #2
On 21/04/17 15:17, Julien Grall wrote:
> Hi Jennifer,
>
> I don't see any cover letter for this series, so I will answer here.
>
> Looking at the code, it looks like a new feature rather than a bug
> fix. Am I right?
>
> Could you explain what would be the benefits and risks to get this
> code in the release?
>
> I also like to hear the opinion of the x86 maintainers about getting
> this code in Xen 4.9.

Patch 1 is a bug in the existing implementation, which absolutely needs
fixing.

Patch 4 it is a correction to the DM OP ABI (which is still modifiable,
before becoming properly stable when 4.9 releases).  If that were
pospond to 4.10, We'd have to burn the existing modified_memory subop
and introduce a new corrected one.

The intermediate patches are fallout from previous rounds of review.


One item on my TODO list is to re-review all dmops for proper
continuability before 4.9 release, so I make no promises that there wont
be further bugfixes needing to get into 4.9.

~Andrew
Jennifer Herbert April 21, 2017, 2:44 p.m. UTC | #3
Hi Julien,

This is extending an existing feature.
Once 4.9 is released, the existing feature will be frozen, and the only 
way to later get the
extra functionality would be to created a completely new dm_op, which 
does something very similar
to an existing one.  Although not the end of the world, this wouldnt 
look so nice.

The benefits of the feature are that a VM can request multiple extents 
to be marked as modified at once,
without having to loop though them, calling the existing call many many 
times.  This will be more efficient and faster.
As an extra, additional accessors have been created for dm_op 
operations, which new dm_ops can take advantage of.

The benefits of introducing the feature for 4.9 as opposed to later is 
that we wont' have to support the same feature, with multiple dm_opts 
with varying parameters - which as well as looking less good, also 
unnesseserily bloats the code.

I think risks are low, with a minor, affecting dm_op operations only.  
The core change, in 5/5 will only affect the modified memory call, which 
has been tested.  The remaining patches are to tidy up and fix existing 
behaviour.

-jenny

On 21/04/17 15:17, Julien Grall wrote:
> Hi Jennifer,
>
> I don't see any cover letter for this series, so I will answer here.
>
> Looking at the code, it looks like a new feature rather than a bug 
> fix. Am I right?
>
> Could you explain what would be the benefits and risks to get this 
> code in the release?
>
> I also like to hear the opinion of the x86 maintainers about getting 
> this code in Xen 4.9.
>
> Cheers,
>
> On 21/04/17 15:05, jennifer.herbert@citrix.com wrote:
>> From: Jennifer Herbert <Jennifer.Herbert@citrix.com>
>>
>> No functional change.
>>
>> Signed-off-by: Jennifer Herbert <Jennifer.Herbert@citrix.com>
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>> Reviewed-by: Jan Beulich <jbeulich@suse.com>
>> Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
>>
>> -- 
>> CC: Paul Durrant <paul.durrant@citrix.com>
>> CC: Andrew Cooper <andrew.cooper3@citrix.com>
>> CC: Jan Beulich <JBeulich@suse.com>
>> CC: Julien Grall <julien.grall@arm.com>
>> ---
>> No change.
>> ---
>>  xen/arch/x86/hvm/dm.c | 49 
>> +++++++++++++++++++++++++++++--------------------
>>  1 file changed, 29 insertions(+), 20 deletions(-)
>>
>> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
>> index d72b7bd..e583e41 100644
>> --- a/xen/arch/x86/hvm/dm.c
>> +++ b/xen/arch/x86/hvm/dm.c
>> @@ -25,6 +25,13 @@
>>
>>  #include <xsm/xsm.h>
>>
>> +struct dmop_args {
>> +    domid_t domid;
>> +    unsigned int nr_bufs;
>> +    /* Reserve enough buf elements for all current hypercalls. */
>> +    struct xen_dm_op_buf buf[2];
>> +};
>> +
>>  static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[],
>>                                  unsigned int nr_bufs, void *dst,
>>                                  unsigned int idx, size_t dst_size)
>> @@ -56,7 +63,7 @@ static bool copy_buf_to_guest(const xen_dm_op_buf_t 
>> bufs[],
>>  }
>>
>>  static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
>> -                            unsigned int nr, struct xen_dm_op_buf *buf)
>> +                            unsigned int nr, const struct 
>> xen_dm_op_buf *buf)
>>  {
>>      if ( nr > (GB(1) >> PAGE_SHIFT) )
>>          return -EINVAL;
>> @@ -287,16 +294,14 @@ static int inject_event(struct domain *d,
>>      return 0;
>>  }
>>
>> -static int dm_op(domid_t domid,
>> -                 unsigned int nr_bufs,
>> -                 xen_dm_op_buf_t bufs[])
>> +static int dm_op(const struct dmop_args *op_args)
>>  {
>>      struct domain *d;
>>      struct xen_dm_op op;
>>      bool const_op = true;
>>      long rc;
>>
>> -    rc = rcu_lock_remote_domain_by_id(domid, &d);
>> +    rc = rcu_lock_remote_domain_by_id(op_args->domid, &d);
>>      if ( rc )
>>          return rc;
>>
>> @@ -307,7 +312,7 @@ static int dm_op(domid_t domid,
>>      if ( rc )
>>          goto out;
>>
>> -    if ( !copy_buf_from_guest(bufs, nr_bufs, &op, 0, sizeof(op)) )
>> +    if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, 
>> &op, 0, sizeof(op)) )
>>      {
>>          rc = -EFAULT;
>>          goto out;
>> @@ -466,10 +471,10 @@ static int dm_op(domid_t domid,
>>          if ( data->pad )
>>              break;
>>
>> -        if ( nr_bufs < 2 )
>> +        if ( op_args->nr_bufs < 2 )
>>              break;
>>
>> -        rc = track_dirty_vram(d, data->first_pfn, data->nr, &bufs[1]);
>> +        rc = track_dirty_vram(d, data->first_pfn, data->nr, 
>> &op_args->buf[1]);
>>          break;
>>      }
>>
>> @@ -564,7 +569,7 @@ static int dm_op(domid_t domid,
>>
>>      if ( (!rc || rc == -ERESTART) &&
>>           !const_op &&
>> -         !copy_buf_to_guest(bufs, nr_bufs, 0, &op, sizeof(op)) )
>> +         !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, 
>> &op, sizeof(op)) )
>>          rc = -EFAULT;
>>
>>   out:
>> @@ -587,20 +592,21 @@ CHECK_dm_op_set_mem_type;
>>  CHECK_dm_op_inject_event;
>>  CHECK_dm_op_inject_msi;
>>
>> -#define MAX_NR_BUFS 2
>> -
>>  int compat_dm_op(domid_t domid,
>>                   unsigned int nr_bufs,
>>                   XEN_GUEST_HANDLE_PARAM(void) bufs)
>>  {
>> -    struct xen_dm_op_buf nat[MAX_NR_BUFS];
>> +    struct dmop_args args;
>>      unsigned int i;
>>      int rc;
>>
>> -    if ( nr_bufs > MAX_NR_BUFS )
>> +    if ( nr_bufs > ARRAY_SIZE(args.buf) )
>>          return -E2BIG;
>>
>> -    for ( i = 0; i < nr_bufs; i++ )
>> +    args.domid = domid;
>> +    args.nr_bufs = nr_bufs;
>> +
>> +    for ( i = 0; i < args.nr_bufs; i++ )
>>      {
>>          struct compat_dm_op_buf cmp;
>>
>> @@ -610,12 +616,12 @@ int compat_dm_op(domid_t domid,
>>  #define XLAT_dm_op_buf_HNDL_h(_d_, _s_) \
>>          guest_from_compat_handle((_d_)->h, (_s_)->h)
>>
>> -        XLAT_dm_op_buf(&nat[i], &cmp);
>> +        XLAT_dm_op_buf(&args.buf[i], &cmp);
>>
>>  #undef XLAT_dm_op_buf_HNDL_h
>>      }
>>
>> -    rc = dm_op(domid, nr_bufs, nat);
>> +    rc = dm_op(&args);
>>
>>      if ( rc == -ERESTART )
>>          rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
>> @@ -628,16 +634,19 @@ long do_dm_op(domid_t domid,
>>                unsigned int nr_bufs,
>>                XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
>>  {
>> -    struct xen_dm_op_buf nat[MAX_NR_BUFS];
>> +    struct dmop_args args;
>>      int rc;
>>
>> -    if ( nr_bufs > MAX_NR_BUFS )
>> +    if ( nr_bufs > ARRAY_SIZE(args.buf) )
>>          return -E2BIG;
>>
>> -    if ( copy_from_guest_offset(nat, bufs, 0, nr_bufs) )
>> +    args.domid = domid;
>> +    args.nr_bufs = nr_bufs;
>> +
>> +    if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) )
>>          return -EFAULT;
>>
>> -    rc = dm_op(domid, nr_bufs, nat);
>> +    rc = dm_op(&args);
>>
>>      if ( rc == -ERESTART )
>>          rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
>>
>
Julien Grall April 24, 2017, 10:23 a.m. UTC | #4
On 21/04/17 15:44, Jennifer Herbert wrote:
> Hi Julien,

Hello Jenny,

> This is extending an existing feature.
> Once 4.9 is released, the existing feature will be frozen, and the only
> way to later get the
> extra functionality would be to created a completely new dm_op, which
> does something very similar
> to an existing one.  Although not the end of the world, this wouldnt
> look so nice.
>
> The benefits of the feature are that a VM can request multiple extents
> to be marked as modified at once,
> without having to loop though them, calling the existing call many many
> times.  This will be more efficient and faster.
> As an extra, additional accessors have been created for dm_op
> operations, which new dm_ops can take advantage of.
>
> The benefits of introducing the feature for 4.9 as opposed to later is
> that we wont' have to support the same feature, with multiple dm_opts
> with varying parameters - which as well as looking less good, also
> unnesseserily bloats the code.
>
> I think risks are low, with a minor, affecting dm_op operations only.
> The core change, in 5/5 will only affect the modified memory call, which
> has been tested.  The remaining patches are to tidy up and fix existing
> behaviour.

It would have been useful to have a cover letter explaining that.

Anyway, I think I agree it would be better to get the DM OP ABI in shape 
for continuability before it gets stable. Although, it would be nice if 
we can get that done in early RCs.

Release-acked-by: Julien Grall <julien.grall@arm.com>

Cheers,
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index d72b7bd..e583e41 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -25,6 +25,13 @@ 
 
 #include <xsm/xsm.h>
 
+struct dmop_args {
+    domid_t domid;
+    unsigned int nr_bufs;
+    /* Reserve enough buf elements for all current hypercalls. */
+    struct xen_dm_op_buf buf[2];
+};
+
 static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[],
                                 unsigned int nr_bufs, void *dst,
                                 unsigned int idx, size_t dst_size)
@@ -56,7 +63,7 @@  static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[],
 }
 
 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
-                            unsigned int nr, struct xen_dm_op_buf *buf)
+                            unsigned int nr, const struct xen_dm_op_buf *buf)
 {
     if ( nr > (GB(1) >> PAGE_SHIFT) )
         return -EINVAL;
@@ -287,16 +294,14 @@  static int inject_event(struct domain *d,
     return 0;
 }
 
-static int dm_op(domid_t domid,
-                 unsigned int nr_bufs,
-                 xen_dm_op_buf_t bufs[])
+static int dm_op(const struct dmop_args *op_args)
 {
     struct domain *d;
     struct xen_dm_op op;
     bool const_op = true;
     long rc;
 
-    rc = rcu_lock_remote_domain_by_id(domid, &d);
+    rc = rcu_lock_remote_domain_by_id(op_args->domid, &d);
     if ( rc )
         return rc;
 
@@ -307,7 +312,7 @@  static int dm_op(domid_t domid,
     if ( rc )
         goto out;
 
-    if ( !copy_buf_from_guest(bufs, nr_bufs, &op, 0, sizeof(op)) )
+    if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, &op, 0, sizeof(op)) )
     {
         rc = -EFAULT;
         goto out;
@@ -466,10 +471,10 @@  static int dm_op(domid_t domid,
         if ( data->pad )
             break;
 
-        if ( nr_bufs < 2 )
+        if ( op_args->nr_bufs < 2 )
             break;
 
-        rc = track_dirty_vram(d, data->first_pfn, data->nr, &bufs[1]);
+        rc = track_dirty_vram(d, data->first_pfn, data->nr, &op_args->buf[1]);
         break;
     }
 
@@ -564,7 +569,7 @@  static int dm_op(domid_t domid,
 
     if ( (!rc || rc == -ERESTART) &&
          !const_op &&
-         !copy_buf_to_guest(bufs, nr_bufs, 0, &op, sizeof(op)) )
+         !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, &op, sizeof(op)) )
         rc = -EFAULT;
 
  out:
@@ -587,20 +592,21 @@  CHECK_dm_op_set_mem_type;
 CHECK_dm_op_inject_event;
 CHECK_dm_op_inject_msi;
 
-#define MAX_NR_BUFS 2
-
 int compat_dm_op(domid_t domid,
                  unsigned int nr_bufs,
                  XEN_GUEST_HANDLE_PARAM(void) bufs)
 {
-    struct xen_dm_op_buf nat[MAX_NR_BUFS];
+    struct dmop_args args;
     unsigned int i;
     int rc;
 
-    if ( nr_bufs > MAX_NR_BUFS )
+    if ( nr_bufs > ARRAY_SIZE(args.buf) )
         return -E2BIG;
 
-    for ( i = 0; i < nr_bufs; i++ )
+    args.domid = domid;
+    args.nr_bufs = nr_bufs;
+
+    for ( i = 0; i < args.nr_bufs; i++ )
     {
         struct compat_dm_op_buf cmp;
 
@@ -610,12 +616,12 @@  int compat_dm_op(domid_t domid,
 #define XLAT_dm_op_buf_HNDL_h(_d_, _s_) \
         guest_from_compat_handle((_d_)->h, (_s_)->h)
 
-        XLAT_dm_op_buf(&nat[i], &cmp);
+        XLAT_dm_op_buf(&args.buf[i], &cmp);
 
 #undef XLAT_dm_op_buf_HNDL_h
     }
 
-    rc = dm_op(domid, nr_bufs, nat);
+    rc = dm_op(&args);
 
     if ( rc == -ERESTART )
         rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
@@ -628,16 +634,19 @@  long do_dm_op(domid_t domid,
               unsigned int nr_bufs,
               XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
 {
-    struct xen_dm_op_buf nat[MAX_NR_BUFS];
+    struct dmop_args args;
     int rc;
 
-    if ( nr_bufs > MAX_NR_BUFS )
+    if ( nr_bufs > ARRAY_SIZE(args.buf) )
         return -E2BIG;
 
-    if ( copy_from_guest_offset(nat, bufs, 0, nr_bufs) )
+    args.domid = domid;
+    args.nr_bufs = nr_bufs;
+
+    if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) )
         return -EFAULT;
 
-    rc = dm_op(domid, nr_bufs, nat);
+    rc = dm_op(&args);
 
     if ( rc == -ERESTART )
         rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",