diff mbox series

[V3,11/23] xen/ioreq: Move x86's io_completion/io_req fields to struct vcpu

Message ID 1606732298-22107-12-git-send-email-olekstysh@gmail.com (mailing list archive)
State New, archived
Headers show
Series [V3,01/23] x86/ioreq: Prepare IOREQ feature for making it common | expand

Commit Message

Oleksandr Tyshchenko Nov. 30, 2020, 10:31 a.m. UTC
From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

The IOREQ is a common feature now and these fields will be used
on Arm as is. Move them to common struct vcpu as a part of new
struct vcpu_io and drop duplicating "io" prefixes. Also move
enum hvm_io_completion to xen/sched.h and remove "hvm" prefixes.

This patch completely removes layering violation in the common code.

Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
CC: Julien Grall <julien.grall@arm.com>

---
Please note, this is a split/cleanup/hardening of Julien's PoC:
"Add support for Guest IO forwarding to a device emulator"

Changes V1 -> V2:
   - new patch

Changes V2 -> V3:
   - update patch according the "legacy interface" is x86 specific
   - update patch description
   - drop the "io" prefixes from the field names
   - wrap IO_realmode_completion
---
---
 xen/arch/x86/hvm/emulate.c        | 72 +++++++++++++++++++--------------------
 xen/arch/x86/hvm/hvm.c            |  2 +-
 xen/arch/x86/hvm/io.c             |  8 ++---
 xen/arch/x86/hvm/ioreq.c          |  4 +--
 xen/arch/x86/hvm/svm/nestedsvm.c  |  2 +-
 xen/arch/x86/hvm/vmx/realmode.c   |  6 ++--
 xen/common/ioreq.c                | 22 ++++++------
 xen/include/asm-x86/hvm/emulate.h |  2 +-
 xen/include/asm-x86/hvm/ioreq.h   |  2 +-
 xen/include/asm-x86/hvm/vcpu.h    | 11 ------
 xen/include/xen/sched.h           | 19 +++++++++++
 11 files changed, 79 insertions(+), 71 deletions(-)

Comments

Jan Beulich Dec. 7, 2020, 12:32 p.m. UTC | #1
On 30.11.2020 11:31, Oleksandr Tyshchenko wrote:
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -142,8 +142,8 @@ void hvmemul_cancel(struct vcpu *v)
>  {
>      struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
>  
> -    vio->io_req.state = STATE_IOREQ_NONE;
> -    vio->io_completion = HVMIO_no_completion;
> +    v->io.req.state = STATE_IOREQ_NONE;
> +    v->io.completion = IO_no_completion;
>      vio->mmio_cache_count = 0;
>      vio->mmio_insn_bytes = 0;
>      vio->mmio_access = (struct npfec){};
> @@ -159,7 +159,7 @@ static int hvmemul_do_io(
>  {
>      struct vcpu *curr = current;
>      struct domain *currd = curr->domain;
> -    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
> +    struct vcpu_io *vio = &curr->io;

Taking just these two hunks: "vio" would now stand for two entirely
different things. I realize the name is applicable to both, but I
wonder if such naming isn't going to risk confusion. Despite being
relatively familiar with the involved code, I've been repeatedly
unsure what exactly "vio" covers, and needed to go back to the
header. So together with the name possible adjustment mentioned
further down, maybe "vcpu_io" also wants it name changed, such that
the variable then also could sensibly be named (slightly)
differently? struct vcpu_io_state maybe? Or alternatively rename
variables of type struct hvm_vcpu_io * to hvio or hio? Otoh the
savings aren't very big for just ->io, so maybe better to stick to
the prior name with the prior type, and not introduce local
variables at all for the new field, like you already have it in the
former case?

> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -145,6 +145,21 @@ void evtchn_destroy_final(struct domain *d); /* from complete_domain_destroy */
>  
>  struct waitqueue_vcpu;
>  
> +enum io_completion {
> +    IO_no_completion,
> +    IO_mmio_completion,
> +    IO_pio_completion,
> +#ifdef CONFIG_X86
> +    IO_realmode_completion,
> +#endif
> +};

I'm not entirely happy with io_ / IO_ here - they seem a little
too generic. How about ioreq_ / IOREQ_ respectively?

> +struct vcpu_io {
> +    /* I/O request in flight to device model. */
> +    enum io_completion   completion;
> +    ioreq_t              req;
> +};
> +
>  struct vcpu
>  {
>      int              vcpu_id;
> @@ -256,6 +271,10 @@ struct vcpu
>      struct vpci_vcpu vpci;
>  
>      struct arch_vcpu arch;
> +
> +#ifdef CONFIG_IOREQ_SERVER
> +    struct vcpu_io io;
> +#endif
>  };

I don't have a good solution in mind, and I'm also not meaning to
necessarily request a change here, but I'd like to point out that
this does away (for this part of it only, of course) with the
overlaying of the PV and HVM sub-structs on x86. As long as the
HVM part is the far bigger one, that's not a problem, but I wanted
to mention the aspect nevertheless.

Jan
Oleksandr Tyshchenko Dec. 7, 2020, 8:59 p.m. UTC | #2
On 07.12.20 14:32, Jan Beulich wrote:

Hi Jan, Paul.

> On 30.11.2020 11:31, Oleksandr Tyshchenko wrote:
>> --- a/xen/arch/x86/hvm/emulate.c
>> +++ b/xen/arch/x86/hvm/emulate.c
>> @@ -142,8 +142,8 @@ void hvmemul_cancel(struct vcpu *v)
>>   {
>>       struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
>>   
>> -    vio->io_req.state = STATE_IOREQ_NONE;
>> -    vio->io_completion = HVMIO_no_completion;
>> +    v->io.req.state = STATE_IOREQ_NONE;
>> +    v->io.completion = IO_no_completion;
>>       vio->mmio_cache_count = 0;
>>       vio->mmio_insn_bytes = 0;
>>       vio->mmio_access = (struct npfec){};
>> @@ -159,7 +159,7 @@ static int hvmemul_do_io(
>>   {
>>       struct vcpu *curr = current;
>>       struct domain *currd = curr->domain;
>> -    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
>> +    struct vcpu_io *vio = &curr->io;
> Taking just these two hunks: "vio" would now stand for two entirely
> different things. I realize the name is applicable to both, but I
> wonder if such naming isn't going to risk confusion.Despite being
> relatively familiar with the involved code, I've been repeatedly
> unsure what exactly "vio" covers, and needed to go back to the

  Good comment... Agree that with the naming scheme in current patch the 
code became a little bit confusing to read.


> header. So together with the name possible adjustment mentioned
> further down, maybe "vcpu_io" also wants it name changed, such that
> the variable then also could sensibly be named (slightly)
> differently? struct vcpu_io_state maybe? Or alternatively rename
> variables of type struct hvm_vcpu_io * to hvio or hio? Otoh the
> savings aren't very big for just ->io, so maybe better to stick to
> the prior name with the prior type, and not introduce local
> variables at all for the new field, like you already have it in the
> former case?
I would much prefer the last suggestion which is "not introduce local
variables at all for the new field" (I admit I was thinking almost the 
same, but haven't chosen this direction).
But I am OK with any suggestions here. Paul what do you think?


>
>> --- a/xen/include/xen/sched.h
>> +++ b/xen/include/xen/sched.h
>> @@ -145,6 +145,21 @@ void evtchn_destroy_final(struct domain *d); /* from complete_domain_destroy */
>>   
>>   struct waitqueue_vcpu;
>>   
>> +enum io_completion {
>> +    IO_no_completion,
>> +    IO_mmio_completion,
>> +    IO_pio_completion,
>> +#ifdef CONFIG_X86
>> +    IO_realmode_completion,
>> +#endif
>> +};
> I'm not entirely happy with io_ / IO_ here - they seem a little
> too generic. How about ioreq_ / IOREQ_ respectively?

I am OK, would like to hear Paul's opinion on both questions.


>
>> +struct vcpu_io {
>> +    /* I/O request in flight to device model. */
>> +    enum io_completion   completion;
>> +    ioreq_t              req;
>> +};
>> +
>>   struct vcpu
>>   {
>>       int              vcpu_id;
>> @@ -256,6 +271,10 @@ struct vcpu
>>       struct vpci_vcpu vpci;
>>   
>>       struct arch_vcpu arch;
>> +
>> +#ifdef CONFIG_IOREQ_SERVER
>> +    struct vcpu_io io;
>> +#endif
>>   };
> I don't have a good solution in mind, and I'm also not meaning to
> necessarily request a change here, but I'd like to point out that
> this does away (for this part of it only, of course) with the
> overlaying of the PV and HVM sub-structs on x86. As long as the
> HVM part is the far bigger one, that's not a problem, but I wanted
> to mention the aspect nevertheless.
>
> Jan
Paul Durrant Dec. 8, 2020, 7:52 a.m. UTC | #3
> -----Original Message-----
> From: Oleksandr <olekstysh@gmail.com>
> Sent: 07 December 2020 21:00
> To: Jan Beulich <jbeulich@suse.com>; Paul Durrant <paul@xen.org>
> Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>; Andrew Cooper <andrew.cooper3@citrix.com>;
> Roger Pau Monné <roger.pau@citrix.com>; Wei Liu <wl@xen.org>; George Dunlap
> <george.dunlap@citrix.com>; Ian Jackson <iwj@xenproject.org>; Julien Grall <julien@xen.org>; Stefano
> Stabellini <sstabellini@kernel.org>; Jun Nakajima <jun.nakajima@intel.com>; Kevin Tian
> <kevin.tian@intel.com>; Julien Grall <julien.grall@arm.com>; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH V3 11/23] xen/ioreq: Move x86's io_completion/io_req fields to struct vcpu
> 
> 
> On 07.12.20 14:32, Jan Beulich wrote:
> 
> Hi Jan, Paul.
> 
> > On 30.11.2020 11:31, Oleksandr Tyshchenko wrote:
> >> --- a/xen/arch/x86/hvm/emulate.c
> >> +++ b/xen/arch/x86/hvm/emulate.c
> >> @@ -142,8 +142,8 @@ void hvmemul_cancel(struct vcpu *v)
> >>   {
> >>       struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
> >>
> >> -    vio->io_req.state = STATE_IOREQ_NONE;
> >> -    vio->io_completion = HVMIO_no_completion;
> >> +    v->io.req.state = STATE_IOREQ_NONE;
> >> +    v->io.completion = IO_no_completion;
> >>       vio->mmio_cache_count = 0;
> >>       vio->mmio_insn_bytes = 0;
> >>       vio->mmio_access = (struct npfec){};
> >> @@ -159,7 +159,7 @@ static int hvmemul_do_io(
> >>   {
> >>       struct vcpu *curr = current;
> >>       struct domain *currd = curr->domain;
> >> -    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
> >> +    struct vcpu_io *vio = &curr->io;
> > Taking just these two hunks: "vio" would now stand for two entirely
> > different things. I realize the name is applicable to both, but I
> > wonder if such naming isn't going to risk confusion.Despite being
> > relatively familiar with the involved code, I've been repeatedly
> > unsure what exactly "vio" covers, and needed to go back to the
> 
>   Good comment... Agree that with the naming scheme in current patch the
> code became a little bit confusing to read.
> 
> 
> > header. So together with the name possible adjustment mentioned
> > further down, maybe "vcpu_io" also wants it name changed, such that
> > the variable then also could sensibly be named (slightly)
> > differently? struct vcpu_io_state maybe? Or alternatively rename
> > variables of type struct hvm_vcpu_io * to hvio or hio? Otoh the
> > savings aren't very big for just ->io, so maybe better to stick to
> > the prior name with the prior type, and not introduce local
> > variables at all for the new field, like you already have it in the
> > former case?
> I would much prefer the last suggestion which is "not introduce local
> variables at all for the new field" (I admit I was thinking almost the
> same, but haven't chosen this direction).
> But I am OK with any suggestions here. Paul what do you think?
> 

I personally don't think there is that much risk of confusion. If there is a desire to disambiguate though, I would go the route of naming hvm_vcpu_io locals 'hvio'.

> 
> >
> >> --- a/xen/include/xen/sched.h
> >> +++ b/xen/include/xen/sched.h
> >> @@ -145,6 +145,21 @@ void evtchn_destroy_final(struct domain *d); /* from complete_domain_destroy
> */
> >>
> >>   struct waitqueue_vcpu;
> >>
> >> +enum io_completion {
> >> +    IO_no_completion,
> >> +    IO_mmio_completion,
> >> +    IO_pio_completion,
> >> +#ifdef CONFIG_X86
> >> +    IO_realmode_completion,
> >> +#endif
> >> +};
> > I'm not entirely happy with io_ / IO_ here - they seem a little
> > too generic. How about ioreq_ / IOREQ_ respectively?
> 
> I am OK, would like to hear Paul's opinion on both questions.
> 

No, I think the 'IO_' prefix is better. They relate to a field in the vcpu_io struct. An alternative might be 'VIO_'...

> 
> >
> >> +struct vcpu_io {
> >> +    /* I/O request in flight to device model. */
> >> +    enum io_completion   completion;

... in which case, you could also name the enum 'vio_completion'.

  Paul

> >> +    ioreq_t              req;
> >> +};
> >> +
Jan Beulich Dec. 8, 2020, 9:35 a.m. UTC | #4
On 08.12.2020 08:52, Paul Durrant wrote:
>> From: Oleksandr <olekstysh@gmail.com>
>> Sent: 07 December 2020 21:00
>>
>> On 07.12.20 14:32, Jan Beulich wrote:
>>> On 30.11.2020 11:31, Oleksandr Tyshchenko wrote:
>>>> --- a/xen/include/xen/sched.h
>>>> +++ b/xen/include/xen/sched.h
>>>> @@ -145,6 +145,21 @@ void evtchn_destroy_final(struct domain *d); /* from complete_domain_destroy
>> */
>>>>
>>>>   struct waitqueue_vcpu;
>>>>
>>>> +enum io_completion {
>>>> +    IO_no_completion,
>>>> +    IO_mmio_completion,
>>>> +    IO_pio_completion,
>>>> +#ifdef CONFIG_X86
>>>> +    IO_realmode_completion,
>>>> +#endif
>>>> +};
>>> I'm not entirely happy with io_ / IO_ here - they seem a little
>>> too generic. How about ioreq_ / IOREQ_ respectively?
>>
>> I am OK, would like to hear Paul's opinion on both questions.
>>
> 
> No, I think the 'IO_' prefix is better. They relate to a field in the vcpu_io struct. An alternative might be 'VIO_'...
> 
>>
>>>
>>>> +struct vcpu_io {
>>>> +    /* I/O request in flight to device model. */
>>>> +    enum io_completion   completion;
> 
> ... in which case, you could also name the enum 'vio_completion'.

I'd be okay with these - still better than just "io".

Jan
Oleksandr Tyshchenko Dec. 8, 2020, 6:21 p.m. UTC | #5
On 08.12.20 09:52, Paul Durrant wrote:

Hi Paul, Jan

>>> On 30.11.2020 11:31, Oleksandr Tyshchenko wrote:
>>>> --- a/xen/arch/x86/hvm/emulate.c
>>>> +++ b/xen/arch/x86/hvm/emulate.c
>>>> @@ -142,8 +142,8 @@ void hvmemul_cancel(struct vcpu *v)
>>>>    {
>>>>        struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
>>>>
>>>> -    vio->io_req.state = STATE_IOREQ_NONE;
>>>> -    vio->io_completion = HVMIO_no_completion;
>>>> +    v->io.req.state = STATE_IOREQ_NONE;
>>>> +    v->io.completion = IO_no_completion;
>>>>        vio->mmio_cache_count = 0;
>>>>        vio->mmio_insn_bytes = 0;
>>>>        vio->mmio_access = (struct npfec){};
>>>> @@ -159,7 +159,7 @@ static int hvmemul_do_io(
>>>>    {
>>>>        struct vcpu *curr = current;
>>>>        struct domain *currd = curr->domain;
>>>> -    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
>>>> +    struct vcpu_io *vio = &curr->io;
>>> Taking just these two hunks: "vio" would now stand for two entirely
>>> different things. I realize the name is applicable to both, but I
>>> wonder if such naming isn't going to risk confusion.Despite being
>>> relatively familiar with the involved code, I've been repeatedly
>>> unsure what exactly "vio" covers, and needed to go back to the
>>    Good comment... Agree that with the naming scheme in current patch the
>> code became a little bit confusing to read.
>>
>>
>>> header. So together with the name possible adjustment mentioned
>>> further down, maybe "vcpu_io" also wants it name changed, such that
>>> the variable then also could sensibly be named (slightly)
>>> differently? struct vcpu_io_state maybe? Or alternatively rename
>>> variables of type struct hvm_vcpu_io * to hvio or hio? Otoh the
>>> savings aren't very big for just ->io, so maybe better to stick to
>>> the prior name with the prior type, and not introduce local
>>> variables at all for the new field, like you already have it in the
>>> former case?
>> I would much prefer the last suggestion which is "not introduce local
>> variables at all for the new field" (I admit I was thinking almost the
>> same, but haven't chosen this direction).
>> But I am OK with any suggestions here. Paul what do you think?
>>
> I personally don't think there is that much risk of confusion. If there is a desire to disambiguate though, I would go the route of naming hvm_vcpu_io locals 'hvio'.
Well, I assume I should rename all hvm_vcpu_io locals in the code to 
"hvio" (even if this patch didn't touch these places so far since no new 
vcpu_io fields were involved).
I am OK, although expecting a lot of places which need touching here...


>
>>>> --- a/xen/include/xen/sched.h
>>>> +++ b/xen/include/xen/sched.h
>>>> @@ -145,6 +145,21 @@ void evtchn_destroy_final(struct domain *d); /* from complete_domain_destroy
>> */
>>>>    struct waitqueue_vcpu;
>>>>
>>>> +enum io_completion {
>>>> +    IO_no_completion,
>>>> +    IO_mmio_completion,
>>>> +    IO_pio_completion,
>>>> +#ifdef CONFIG_X86
>>>> +    IO_realmode_completion,
>>>> +#endif
>>>> +};
>>> I'm not entirely happy with io_ / IO_ here - they seem a little
>>> too generic. How about ioreq_ / IOREQ_ respectively?
>> I am OK, would like to hear Paul's opinion on both questions.
>>
> No, I think the 'IO_' prefix is better. They relate to a field in the vcpu_io struct. An alternative might be 'VIO_'...
>
>>>> +struct vcpu_io {
>>>> +    /* I/O request in flight to device model. */
>>>> +    enum io_completion   completion;
> ... in which case, you could also name the enum 'vio_completion'.
>
>    Paul

ok, will follow new renaming scheme IO_ -> VIO_ (io_ -> vio_).
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 4746d5a..04e4994 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -142,8 +142,8 @@  void hvmemul_cancel(struct vcpu *v)
 {
     struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
 
-    vio->io_req.state = STATE_IOREQ_NONE;
-    vio->io_completion = HVMIO_no_completion;
+    v->io.req.state = STATE_IOREQ_NONE;
+    v->io.completion = IO_no_completion;
     vio->mmio_cache_count = 0;
     vio->mmio_insn_bytes = 0;
     vio->mmio_access = (struct npfec){};
@@ -159,7 +159,7 @@  static int hvmemul_do_io(
 {
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
+    struct vcpu_io *vio = &curr->io;
     ioreq_t p = {
         .type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
         .addr = addr,
@@ -184,13 +184,13 @@  static int hvmemul_do_io(
         return X86EMUL_UNHANDLEABLE;
     }
 
-    switch ( vio->io_req.state )
+    switch ( vio->req.state )
     {
     case STATE_IOREQ_NONE:
         break;
     case STATE_IORESP_READY:
-        vio->io_req.state = STATE_IOREQ_NONE;
-        p = vio->io_req;
+        vio->req.state = STATE_IOREQ_NONE;
+        p = vio->req;
 
         /* Verify the emulation request has been correctly re-issued */
         if ( (p.type != (is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO)) ||
@@ -238,7 +238,7 @@  static int hvmemul_do_io(
     }
     ASSERT(p.count);
 
-    vio->io_req = p;
+    vio->req = p;
 
     rc = hvm_io_intercept(&p);
 
@@ -247,12 +247,12 @@  static int hvmemul_do_io(
      * our callers and mirror this into latched state.
      */
     ASSERT(p.count <= *reps);
-    *reps = vio->io_req.count = p.count;
+    *reps = vio->req.count = p.count;
 
     switch ( rc )
     {
     case X86EMUL_OKAY:
-        vio->io_req.state = STATE_IOREQ_NONE;
+        vio->req.state = STATE_IOREQ_NONE;
         break;
     case X86EMUL_UNHANDLEABLE:
     {
@@ -305,7 +305,7 @@  static int hvmemul_do_io(
                 if ( s == NULL )
                 {
                     rc = X86EMUL_RETRY;
-                    vio->io_req.state = STATE_IOREQ_NONE;
+                    vio->req.state = STATE_IOREQ_NONE;
                     break;
                 }
 
@@ -316,7 +316,7 @@  static int hvmemul_do_io(
                 if ( dir == IOREQ_READ )
                 {
                     rc = hvm_process_io_intercept(&ioreq_server_handler, &p);
-                    vio->io_req.state = STATE_IOREQ_NONE;
+                    vio->req.state = STATE_IOREQ_NONE;
                     break;
                 }
             }
@@ -329,14 +329,14 @@  static int hvmemul_do_io(
         if ( !s )
         {
             rc = hvm_process_io_intercept(&null_handler, &p);
-            vio->io_req.state = STATE_IOREQ_NONE;
+            vio->req.state = STATE_IOREQ_NONE;
         }
         else
         {
             rc = hvm_send_ioreq(s, &p, 0);
             if ( rc != X86EMUL_RETRY || currd->is_shutting_down )
-                vio->io_req.state = STATE_IOREQ_NONE;
-            else if ( !ioreq_needs_completion(&vio->io_req) )
+                vio->req.state = STATE_IOREQ_NONE;
+            else if ( !ioreq_needs_completion(&vio->req) )
                 rc = X86EMUL_OKAY;
         }
         break;
@@ -1854,7 +1854,7 @@  static int hvmemul_rep_movs(
           * cheaper than multiple round trips through the device model. Yet
           * when processing a response we can always re-use the translation.
           */
-         (vio->io_req.state == STATE_IORESP_READY ||
+         (curr->io.req.state == STATE_IORESP_READY ||
           ((!df || *reps == 1) &&
            PAGE_SIZE - (saddr & ~PAGE_MASK) >= *reps * bytes_per_rep)) )
         sgpa = pfn_to_paddr(vio->mmio_gpfn) | (saddr & ~PAGE_MASK);
@@ -1870,7 +1870,7 @@  static int hvmemul_rep_movs(
     if ( vio->mmio_access.write_access &&
          (vio->mmio_gla == (daddr & PAGE_MASK)) &&
          /* See comment above. */
-         (vio->io_req.state == STATE_IORESP_READY ||
+         (curr->io.req.state == STATE_IORESP_READY ||
           ((!df || *reps == 1) &&
            PAGE_SIZE - (daddr & ~PAGE_MASK) >= *reps * bytes_per_rep)) )
         dgpa = pfn_to_paddr(vio->mmio_gpfn) | (daddr & ~PAGE_MASK);
@@ -2007,7 +2007,7 @@  static int hvmemul_rep_stos(
     if ( vio->mmio_access.write_access &&
          (vio->mmio_gla == (addr & PAGE_MASK)) &&
          /* See respective comment in MOVS processing. */
-         (vio->io_req.state == STATE_IORESP_READY ||
+         (curr->io.req.state == STATE_IORESP_READY ||
           ((!df || *reps == 1) &&
            PAGE_SIZE - (addr & ~PAGE_MASK) >= *reps * bytes_per_rep)) )
         gpa = pfn_to_paddr(vio->mmio_gpfn) | (addr & ~PAGE_MASK);
@@ -2613,13 +2613,13 @@  static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
 };
 
 /*
- * Note that passing HVMIO_no_completion into this function serves as kind
+ * Note that passing IO_no_completion into this function serves as kind
  * of (but not fully) an "auto select completion" indicator.  When there's
  * no completion needed, the passed in value will be ignored in any case.
  */
 static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
     const struct x86_emulate_ops *ops,
-    enum hvm_io_completion completion)
+    enum io_completion completion)
 {
     const struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
     struct vcpu *curr = current;
@@ -2634,11 +2634,11 @@  static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
      */
     if ( vio->cache->num_ents > vio->cache->max_ents )
     {
-        ASSERT(vio->io_req.state == STATE_IOREQ_NONE);
+        ASSERT(curr->io.req.state == STATE_IOREQ_NONE);
         vio->cache->num_ents = 0;
     }
     else
-        ASSERT(vio->io_req.state == STATE_IORESP_READY);
+        ASSERT(curr->io.req.state == STATE_IORESP_READY);
 
     hvm_emulate_init_per_insn(hvmemul_ctxt, vio->mmio_insn,
                               vio->mmio_insn_bytes);
@@ -2649,25 +2649,25 @@  static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
     if ( rc == X86EMUL_OKAY && vio->mmio_retry )
         rc = X86EMUL_RETRY;
 
-    if ( !ioreq_needs_completion(&vio->io_req) )
-        completion = HVMIO_no_completion;
-    else if ( completion == HVMIO_no_completion )
-        completion = (vio->io_req.type != IOREQ_TYPE_PIO ||
-                      hvmemul_ctxt->is_mem_access) ? HVMIO_mmio_completion
-                                                   : HVMIO_pio_completion;
+    if ( !ioreq_needs_completion(&curr->io.req) )
+        completion = IO_no_completion;
+    else if ( completion == IO_no_completion )
+        completion = (curr->io.req.type != IOREQ_TYPE_PIO ||
+                      hvmemul_ctxt->is_mem_access) ? IO_mmio_completion
+                                                   : IO_pio_completion;
 
-    switch ( vio->io_completion = completion )
+    switch ( curr->io.completion = completion )
     {
-    case HVMIO_no_completion:
-    case HVMIO_pio_completion:
+    case IO_no_completion:
+    case IO_pio_completion:
         vio->mmio_cache_count = 0;
         vio->mmio_insn_bytes = 0;
         vio->mmio_access = (struct npfec){};
         hvmemul_cache_disable(curr);
         break;
 
-    case HVMIO_mmio_completion:
-    case HVMIO_realmode_completion:
+    case IO_mmio_completion:
+    case IO_realmode_completion:
         BUILD_BUG_ON(sizeof(vio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf));
         vio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes;
         memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes);
@@ -2716,7 +2716,7 @@  static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
 
 int hvm_emulate_one(
     struct hvm_emulate_ctxt *hvmemul_ctxt,
-    enum hvm_io_completion completion)
+    enum io_completion completion)
 {
     return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops, completion);
 }
@@ -2754,7 +2754,7 @@  int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
                           guest_cpu_user_regs());
     ctxt.ctxt.data = &mmio_ro_ctxt;
 
-    switch ( rc = _hvm_emulate_one(&ctxt, ops, HVMIO_no_completion) )
+    switch ( rc = _hvm_emulate_one(&ctxt, ops, IO_no_completion) )
     {
     case X86EMUL_UNHANDLEABLE:
     case X86EMUL_UNIMPLEMENTED:
@@ -2782,7 +2782,7 @@  void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
     {
     case EMUL_KIND_NOWRITE:
         rc = _hvm_emulate_one(&ctx, &hvm_emulate_ops_no_write,
-                              HVMIO_no_completion);
+                              IO_no_completion);
         break;
     case EMUL_KIND_SET_CONTEXT_INSN: {
         struct vcpu *curr = current;
@@ -2803,7 +2803,7 @@  void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
     /* Fall-through */
     default:
         ctx.set_context = (kind == EMUL_KIND_SET_CONTEXT_DATA);
-        rc = hvm_emulate_one(&ctx, HVMIO_no_completion);
+        rc = hvm_emulate_one(&ctx, IO_no_completion);
     }
 
     switch ( rc )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 54e32e4..cc46909 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3800,7 +3800,7 @@  void hvm_ud_intercept(struct cpu_user_regs *regs)
         return;
     }
 
-    switch ( hvm_emulate_one(&ctxt, HVMIO_no_completion) )
+    switch ( hvm_emulate_one(&ctxt, IO_no_completion) )
     {
     case X86EMUL_UNHANDLEABLE:
     case X86EMUL_UNIMPLEMENTED:
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index b220d6b..327a6a2 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -85,7 +85,7 @@  bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate, const char *descr)
 
     hvm_emulate_init_once(&ctxt, validate, guest_cpu_user_regs());
 
-    switch ( rc = hvm_emulate_one(&ctxt, HVMIO_no_completion) )
+    switch ( rc = hvm_emulate_one(&ctxt, IO_no_completion) )
     {
     case X86EMUL_UNHANDLEABLE:
         hvm_dump_emulation_state(XENLOG_G_WARNING, descr, &ctxt, rc);
@@ -122,7 +122,7 @@  bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
 bool handle_pio(uint16_t port, unsigned int size, int dir)
 {
     struct vcpu *curr = current;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
+    struct vcpu_io *vio = &curr->io;
     unsigned int data;
     int rc;
 
@@ -135,8 +135,8 @@  bool handle_pio(uint16_t port, unsigned int size, int dir)
 
     rc = hvmemul_do_pio_buffer(port, size, dir, &data);
 
-    if ( ioreq_needs_completion(&vio->io_req) )
-        vio->io_completion = HVMIO_pio_completion;
+    if ( ioreq_needs_completion(&vio->req) )
+        vio->completion = IO_pio_completion;
 
     switch ( rc )
     {
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 009a95a..7808b75 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -41,11 +41,11 @@  bool ioreq_complete_mmio(void)
     return handle_mmio();
 }
 
-bool arch_vcpu_ioreq_completion(enum hvm_io_completion io_completion)
+bool arch_vcpu_ioreq_completion(enum io_completion io_completion)
 {
     switch ( io_completion )
     {
-    case HVMIO_realmode_completion:
+    case IO_realmode_completion:
     {
         struct hvm_emulate_ctxt ctxt;
 
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index fcfccf7..6d90630 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1266,7 +1266,7 @@  enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
          * Delay the injection because this would result in delivering
          * an interrupt *within* the execution of an instruction.
          */
-        if ( v->arch.hvm.hvm_io.io_req.state != STATE_IOREQ_NONE )
+        if ( v->io.req.state != STATE_IOREQ_NONE )
             return hvm_intblk_shadow;
 
         if ( !nv->nv_vmexit_pending && n2vmcb->exit_int_info.v )
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 768f01e..3033143 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -101,7 +101,7 @@  void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
 
     perfc_incr(realmode_emulations);
 
-    rc = hvm_emulate_one(hvmemul_ctxt, HVMIO_realmode_completion);
+    rc = hvm_emulate_one(hvmemul_ctxt, IO_realmode_completion);
 
     if ( rc == X86EMUL_UNHANDLEABLE )
     {
@@ -188,7 +188,7 @@  void vmx_realmode(struct cpu_user_regs *regs)
 
         vmx_realmode_emulate_one(&hvmemul_ctxt);
 
-        if ( vio->io_req.state != STATE_IOREQ_NONE || vio->mmio_retry )
+        if ( curr->io.req.state != STATE_IOREQ_NONE || vio->mmio_retry )
             break;
 
         /* Stop emulating unless our segment state is not safe */
@@ -202,7 +202,7 @@  void vmx_realmode(struct cpu_user_regs *regs)
     }
 
     /* Need to emulate next time if we've started an IO operation */
-    if ( vio->io_req.state != STATE_IOREQ_NONE )
+    if ( curr->io.req.state != STATE_IOREQ_NONE )
         curr->arch.hvm.vmx.vmx_emulate = 1;
 
     if ( !curr->arch.hvm.vmx.vmx_emulate && !curr->arch.hvm.vmx.vmx_realmode )
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index b7c2d5a..caf4543 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -159,7 +159,7 @@  static bool hvm_wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p)
         break;
     }
 
-    p = &sv->vcpu->arch.hvm.hvm_io.io_req;
+    p = &sv->vcpu->io.req;
     if ( ioreq_needs_completion(p) )
         p->data = data;
 
@@ -171,10 +171,10 @@  static bool hvm_wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p)
 bool handle_hvm_io_completion(struct vcpu *v)
 {
     struct domain *d = v->domain;
-    struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
+    struct vcpu_io *vio = &v->io;
     struct ioreq_server *s;
     struct ioreq_vcpu *sv;
-    enum hvm_io_completion io_completion;
+    enum io_completion io_completion;
 
     if ( has_vpci(d) && vpci_process_pending(v) )
     {
@@ -186,26 +186,26 @@  bool handle_hvm_io_completion(struct vcpu *v)
     if ( sv && !hvm_wait_for_io(sv, get_ioreq(s, v)) )
         return false;
 
-    vio->io_req.state = ioreq_needs_completion(&vio->io_req) ?
+    vio->req.state = ioreq_needs_completion(&vio->req) ?
         STATE_IORESP_READY : STATE_IOREQ_NONE;
 
     msix_write_completion(v);
     vcpu_end_shutdown_deferral(v);
 
-    io_completion = vio->io_completion;
-    vio->io_completion = HVMIO_no_completion;
+    io_completion = vio->completion;
+    vio->completion = IO_no_completion;
 
     switch ( io_completion )
     {
-    case HVMIO_no_completion:
+    case IO_no_completion:
         break;
 
-    case HVMIO_mmio_completion:
+    case IO_mmio_completion:
         return ioreq_complete_mmio();
 
-    case HVMIO_pio_completion:
-        return handle_pio(vio->io_req.addr, vio->io_req.size,
-                          vio->io_req.dir);
+    case IO_pio_completion:
+        return handle_pio(vio->req.addr, vio->req.size,
+                          vio->req.dir);
 
     default:
         return arch_vcpu_ioreq_completion(io_completion);
diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h
index 1620cc7..131cdf4 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -65,7 +65,7 @@  bool __nonnull(1, 2) hvm_emulate_one_insn(
     const char *descr);
 int hvm_emulate_one(
     struct hvm_emulate_ctxt *hvmemul_ctxt,
-    enum hvm_io_completion completion);
+    enum io_completion completion);
 void hvm_emulate_one_vm_event(enum emul_kind kind,
     unsigned int trapnr,
     unsigned int errcode);
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index 854dc77..ca3bf29 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -21,7 +21,7 @@ 
 
 #include <xen/ioreq.h>
 
-bool arch_vcpu_ioreq_completion(enum hvm_io_completion io_completion);
+bool arch_vcpu_ioreq_completion(enum io_completion io_completion);
 int arch_ioreq_server_map_pages(struct ioreq_server *s);
 void arch_ioreq_server_unmap_pages(struct ioreq_server *s);
 void arch_ioreq_server_enable(struct ioreq_server *s);
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 6c1feda..8adf455 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -28,13 +28,6 @@ 
 #include <asm/mtrr.h>
 #include <public/hvm/ioreq.h>
 
-enum hvm_io_completion {
-    HVMIO_no_completion,
-    HVMIO_mmio_completion,
-    HVMIO_pio_completion,
-    HVMIO_realmode_completion
-};
-
 struct hvm_vcpu_asid {
     uint64_t generation;
     uint32_t asid;
@@ -52,10 +45,6 @@  struct hvm_mmio_cache {
 };
 
 struct hvm_vcpu_io {
-    /* I/O request in flight to device model. */
-    enum hvm_io_completion io_completion;
-    ioreq_t                io_req;
-
     /*
      * HVM emulation:
      *  Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn.
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 62cbcdb..8269f84 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -145,6 +145,21 @@  void evtchn_destroy_final(struct domain *d); /* from complete_domain_destroy */
 
 struct waitqueue_vcpu;
 
+enum io_completion {
+    IO_no_completion,
+    IO_mmio_completion,
+    IO_pio_completion,
+#ifdef CONFIG_X86
+    IO_realmode_completion,
+#endif
+};
+
+struct vcpu_io {
+    /* I/O request in flight to device model. */
+    enum io_completion   completion;
+    ioreq_t              req;
+};
+
 struct vcpu
 {
     int              vcpu_id;
@@ -256,6 +271,10 @@  struct vcpu
     struct vpci_vcpu vpci;
 
     struct arch_vcpu arch;
+
+#ifdef CONFIG_IOREQ_SERVER
+    struct vcpu_io io;
+#endif
 };
 
 struct sched_unit {