diff mbox

[2/2] x86/HVM: fix forwarding of internally cached requests (part 2)

Message ID 5721F50602000078000E6ACB@prv-mh.provo.novell.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jan Beulich April 28, 2016, 9:33 a.m. UTC
Commit 96ae556569 ("x86/HVM: fix forwarding of internally cached
requests") wasn't quite complete: hvmemul_do_io() also needs to
propagate up the clipped count. (I really should have re-tested the
forward port resulting in the earlier change, instead of relying on the
testing done on the older version of Xen which the fix was first needed
for.)

Signed-off-by: Jan Beulich <jbeulich@suse.com>
x86/HVM: fix forwarding of internally cached requests (part 2)

Commit 96ae556569 ("x86/HVM: fix forwarding of internally cached
requests") wasn't quite complete: hvmemul_do_io() also needs to
propagate up the clipped count. (I really should have re-tested the
forward port resulting in the earlier change, instead of relying on the
testing done on the older version of Xen which the fix was first needed
for.)

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -137,7 +137,7 @@ static int hvmemul_do_io(
         if ( (p.type != (is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO)) ||
              (p.addr != addr) ||
              (p.size != size) ||
-             (p.count != *reps) ||
+             (p.count > *reps) ||
              (p.dir != dir) ||
              (p.df != df) ||
              (p.data_is_ptr != data_is_addr) )
@@ -145,6 +145,8 @@ static int hvmemul_do_io(
 
         if ( data_is_addr )
             return X86EMUL_UNHANDLEABLE;
+
+        *reps = p.count;
         goto finish_access;
     default:
         return X86EMUL_UNHANDLEABLE;
@@ -162,6 +164,13 @@ static int hvmemul_do_io(
 
     rc = hvm_io_intercept(&p);
 
+    /*
+     * p.count may have got reduced (see hvm_process_io_intercept()) - inform
+     * our callers and mirror this into latched state.
+     */
+    ASSERT(p.count <= *reps);
+    *reps = vio->io_req.count = p.count;
+
     switch ( rc )
     {
     case X86EMUL_OKAY:

Comments

Paul Durrant April 28, 2016, 9:49 a.m. UTC | #1
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 28 April 2016 10:33
> To: xen-devel
> Cc: Paul Durrant; Wei Liu
> Subject: [PATCH 2/2] x86/HVM: fix forwarding of internally cached requests
> (part 2)
> 
> Commit 96ae556569 ("x86/HVM: fix forwarding of internally cached
> requests") wasn't quite complete: hvmemul_do_io() also needs to
> propagate up the clipped count. (I really should have re-tested the
> forward port resulting in the earlier change, instead of relying on the
> testing done on the older version of Xen which the fix was first needed
> for.)
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -137,7 +137,7 @@ static int hvmemul_do_io(
>          if ( (p.type != (is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO)) ||
>               (p.addr != addr) ||
>               (p.size != size) ||
> -             (p.count != *reps) ||
> +             (p.count > *reps) ||
>               (p.dir != dir) ||
>               (p.df != df) ||
>               (p.data_is_ptr != data_is_addr) )
> @@ -145,6 +145,8 @@ static int hvmemul_do_io(
> 
>          if ( data_is_addr )
>              return X86EMUL_UNHANDLEABLE;
> +
> +        *reps = p.count;
>          goto finish_access;
>      default:
>          return X86EMUL_UNHANDLEABLE;
> @@ -162,6 +164,13 @@ static int hvmemul_do_io(
> 
>      rc = hvm_io_intercept(&p);
> 
> +    /*
> +     * p.count may have got reduced (see hvm_process_io_intercept()) -
> inform
> +     * our callers and mirror this into latched state.
> +     */
> +    ASSERT(p.count <= *reps);
> +    *reps = vio->io_req.count = p.count;
> +
>      switch ( rc )
>      {
>      case X86EMUL_OKAY:
> 
>
Andrew Cooper April 28, 2016, 9:52 a.m. UTC | #2
On 28/04/16 10:33, Jan Beulich wrote:
> Commit 96ae556569 ("x86/HVM: fix forwarding of internally cached
> requests") wasn't quite complete: hvmemul_do_io() also needs to
> propagate up the clipped count. (I really should have re-tested the
> forward port resulting in the earlier change, instead of relying on the
> testing done on the older version of Xen which the fix was first needed
> for.)
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
diff mbox

Patch

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -137,7 +137,7 @@  static int hvmemul_do_io(
         if ( (p.type != (is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO)) ||
              (p.addr != addr) ||
              (p.size != size) ||
-             (p.count != *reps) ||
+             (p.count > *reps) ||
              (p.dir != dir) ||
              (p.df != df) ||
              (p.data_is_ptr != data_is_addr) )
@@ -145,6 +145,8 @@  static int hvmemul_do_io(
 
         if ( data_is_addr )
             return X86EMUL_UNHANDLEABLE;
+
+        *reps = p.count;
         goto finish_access;
     default:
         return X86EMUL_UNHANDLEABLE;
@@ -162,6 +164,13 @@  static int hvmemul_do_io(
 
     rc = hvm_io_intercept(&p);
 
+    /*
+     * p.count may have got reduced (see hvm_process_io_intercept()) - inform
+     * our callers and mirror this into latched state.
+     */
+    ASSERT(p.count <= *reps);
+    *reps = vio->io_req.count = p.count;
+
     switch ( rc )
     {
     case X86EMUL_OKAY: