diff mbox series

[V1,01/16] x86/ioreq: Prepare IOREQ feature for making it common

Message ID 1599769330-17656-2-git-send-email-olekstysh@gmail.com (mailing list archive)
State Superseded
Headers show
Series IOREQ feature (+ virtio-mmio) on Arm | expand

Commit Message

Oleksandr Tyshchenko Sept. 10, 2020, 8:21 p.m. UTC
From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

As a lot of x86 code can be re-used on Arm later on, this patch
prepares IOREQ support before moving to the common code. This way
we will get almost a verbatim copy for a code movement.

This support is going to be used on Arm to be able run device
emulator outside of Xen hypervisor.

Signed-off-by: Julien Grall <julien.grall@arm.com>
Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

---
Please note, this is a split/cleanup/hardening of Julien's PoC:
"Add support for Guest IO forwarding to a device emulator"

Changes RFC -> V1:
   - new patch, was split from:
     "[RFC PATCH V1 01/12] hvm/ioreq: Make x86's IOREQ feature common"
   - fold the check of p->type into hvm_get_ioreq_server_range_type()
     and make it return success/failure
   - remove relocate_portio_handler() call from arch_hvm_ioreq_destroy()
     in arch/x86/hvm/ioreq.c
   - introduce arch_hvm_destroy_ioreq_server()/arch_handle_hvm_io_completion()
---
---
 xen/arch/x86/hvm/ioreq.c        | 117 ++++++++++++++++++++++++++--------------
 xen/include/asm-x86/hvm/ioreq.h |  16 ++++++
 2 files changed, 93 insertions(+), 40 deletions(-)

Comments

Jan Beulich Sept. 14, 2020, 1:52 p.m. UTC | #1
On 10.09.2020 22:21, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> 
> As a lot of x86 code can be re-used on Arm later on, this patch
> prepares IOREQ support before moving to the common code. This way
> we will get almost a verbatim copy for a code movement.
> 
> This support is going to be used on Arm to be able run device
> emulator outside of Xen hypervisor.

This is all fine, but you should then go on and explain what you're
doing, and why (at which point it may become obvious that it would
be more helpful to split this into a couple of steps). In particular
something as suspicious as ...

> Changes RFC -> V1:
>    - new patch, was split from:
>      "[RFC PATCH V1 01/12] hvm/ioreq: Make x86's IOREQ feature common"
>    - fold the check of p->type into hvm_get_ioreq_server_range_type()
>      and make it return success/failure
>    - remove relocate_portio_handler() call from arch_hvm_ioreq_destroy()
>      in arch/x86/hvm/ioreq.c

... this (see below).

> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -170,6 +170,29 @@ static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
>      return true;
>  }
>  
> +bool arch_handle_hvm_io_completion(enum hvm_io_completion io_completion)

Do we need "handle" in here? Without it, I'd also not have to ask about
moving hvm further to the front of the name...

> @@ -836,6 +848,12 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
>      return rc;
>  }
>  
> +/* Called when target domain is paused */
> +int arch_hvm_destroy_ioreq_server(struct hvm_ioreq_server *s)
> +{
> +    return p2m_set_ioreq_server(s->target, 0, s);
> +}

Why return "int" when ...

> @@ -855,7 +873,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
>  
>      domain_pause(d);
>  
> -    p2m_set_ioreq_server(d, 0, s);
> +    arch_hvm_destroy_ioreq_server(s);

... the result has been ignored anyway? Or otherwise I guess you'd
want to add error handling here (but then the result of
p2m_set_ioreq_server() should still get ignored, for backwards
compatibility).

> @@ -1215,8 +1233,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
>      struct hvm_ioreq_server *s;
>      unsigned int id;
>  
> -    if ( !relocate_portio_handler(d, 0xcf8, 0xcf8, 4) )
> -        return;
> +    arch_hvm_ioreq_destroy(d);

So the call to relocate_portio_handler() simply goes away. No
replacement, no explanation?

> @@ -1239,19 +1256,15 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
>      spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
>  }
>  
> -struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
> -                                                 ioreq_t *p)
> +int hvm_get_ioreq_server_range_type(struct domain *d,
> +                                    ioreq_t *p,

At least p, but perhaps also d can gain const?

> +                                    uint8_t *type,
> +                                    uint64_t *addr)

By the name the function returns a type for a range (albeit without
it being clear where the two bounds of such a range actually live).
By the implementation is looks more like you mean "range_and_type",
albeit still without there really being a range passed back to the
caller. Therefore I think I need some clarification on what's
intended before even being able to suggest something. From ...

> +struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
> +                                                 ioreq_t *p)
> +{
> +    struct hvm_ioreq_server *s;
> +    uint8_t type;
> +    uint64_t addr;
> +    unsigned int id;
> +
> +    if ( hvm_get_ioreq_server_range_type(d, p, &type, &addr) )
> +        return NULL;

... this use - maybe hvm_ioreq_server_get_type_addr() (albeit I
don't like this name very much)?

> @@ -1351,7 +1378,7 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
>      pg = iorp->va;
>  
>      if ( !pg )
> -        return X86EMUL_UNHANDLEABLE;
> +        return IOREQ_IO_UNHANDLED;

At this example - why the IO infix, duplicating the prefix? I'd
suggest to either drop it (if the remaining identifiers are deemed
unambiguous enough) or use e.g. IOREQ_STATUS_*.

> @@ -1515,11 +1542,21 @@ static int hvm_access_cf8(
>      return X86EMUL_UNHANDLEABLE;
>  }
>  
> +void arch_hvm_ioreq_init(struct domain *d)
> +{
> +    register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
> +}
> +
> +void arch_hvm_ioreq_destroy(struct domain *d)
> +{
> +
> +}

Stray blank line?

Jan
Oleksandr Tyshchenko Sept. 21, 2020, 12:22 p.m. UTC | #2
On 14.09.20 16:52, Jan Beulich wrote:

Hi Jan

> On 10.09.2020 22:21, Oleksandr Tyshchenko wrote:
>> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
>>
>> As a lot of x86 code can be re-used on Arm later on, this patch
>> prepares IOREQ support before moving to the common code. This way
>> we will get almost a verbatim copy for a code movement.
>>
>> This support is going to be used on Arm to be able run device
>> emulator outside of Xen hypervisor.
> This is all fine, but you should then go on and explain what you're
> doing, and why (at which point it may become obvious that it would
> be more helpful to split this into a couple of steps).

Got it. Will add an explanation.


> In particular
> something as suspicious as ...
>
>> Changes RFC -> V1:
>>     - new patch, was split from:
>>       "[RFC PATCH V1 01/12] hvm/ioreq: Make x86's IOREQ feature common"
>>     - fold the check of p->type into hvm_get_ioreq_server_range_type()
>>       and make it return success/failure
>>     - remove relocate_portio_handler() call from arch_hvm_ioreq_destroy()
>>       in arch/x86/hvm/ioreq.c
> ... this (see below).
>
>> --- a/xen/arch/x86/hvm/ioreq.c
>> +++ b/xen/arch/x86/hvm/ioreq.c
>> @@ -170,6 +170,29 @@ static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
>>       return true;
>>   }
>>   
>> +bool arch_handle_hvm_io_completion(enum hvm_io_completion io_completion)
> Do we need "handle" in here? Without it, I'd also not have to ask about
> moving hvm further to the front of the name...
For me without "handle" it will sound a bit confusing because of the 
enum type which
has a similar name but without "arch" prefix:
bool arch_hvm_io_completion(enum hvm_io_completion io_completion)


Shall I then move "hvm" to the front of the name here?


>
>> @@ -836,6 +848,12 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
>>       return rc;
>>   }
>>   
>> +/* Called when target domain is paused */
>> +int arch_hvm_destroy_ioreq_server(struct hvm_ioreq_server *s)
>> +{
>> +    return p2m_set_ioreq_server(s->target, 0, s);
>> +}
> Why return "int" when ...
>
>> @@ -855,7 +873,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
>>   
>>       domain_pause(d);
>>   
>> -    p2m_set_ioreq_server(d, 0, s);
>> +    arch_hvm_destroy_ioreq_server(s);
> ... the result has been ignored anyway? Or otherwise I guess you'd
> want to add error handling here (but then the result of
> p2m_set_ioreq_server() should still get ignored, for backwards
> compatibility).

I didn't have a plan to add error handling here. Agree, will make 
arch_hvm_destroy_ioreq_server() returning void.


>
>> @@ -1215,8 +1233,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
>>       struct hvm_ioreq_server *s;
>>       unsigned int id;
>>   
>> -    if ( !relocate_portio_handler(d, 0xcf8, 0xcf8, 4) )
>> -        return;
>> +    arch_hvm_ioreq_destroy(d);
> So the call to relocate_portio_handler() simply goes away. No
> replacement, no explanation?
As I understand from the review comment on that for the RFC patch, there 
is no
a lot of point of keeping this. Indeed, looking at the code I got the 
same opinion.
I should have added an explanation in the commit description at least.
Or shall I return the call back?


>
>> @@ -1239,19 +1256,15 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
>>       spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
>>   }
>>   
>> -struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
>> -                                                 ioreq_t *p)
>> +int hvm_get_ioreq_server_range_type(struct domain *d,
>> +                                    ioreq_t *p,
> At least p, but perhaps also d can gain const?

Agree, will add.


>
>> +                                    uint8_t *type,
>> +                                    uint64_t *addr)
> By the name the function returns a type for a range (albeit without
> it being clear where the two bounds of such a range actually live).
> By the implementation is looks more like you mean "range_and_type",
> albeit still without there really being a range passed back to the
> caller. Therefore I think I need some clarification on what's
> intended before even being able to suggest something.

This function is just an attempt to split arch specific things (cf8 
handling) from "common" hvm_select_ioreq_server().


>  From ...
>
>> +struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
>> +                                                 ioreq_t *p)
>> +{
>> +    struct hvm_ioreq_server *s;
>> +    uint8_t type;
>> +    uint64_t addr;
>> +    unsigned int id;
>> +
>> +    if ( hvm_get_ioreq_server_range_type(d, p, &type, &addr) )
>> +        return NULL;
> ... this use - maybe hvm_ioreq_server_get_type_addr() (albeit I
> don't like this name very much)?

Yes, hvm_ioreq_server_get_type_addr() better represents what function does.


>
>> @@ -1351,7 +1378,7 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
>>       pg = iorp->va;
>>   
>>       if ( !pg )
>> -        return X86EMUL_UNHANDLEABLE;
>> +        return IOREQ_IO_UNHANDLED;
> At this example - why the IO infix, duplicating the prefix? I'd
> suggest to either drop it (if the remaining identifiers are deemed
> unambiguous enough) or use e.g. IOREQ_STATUS_*.

Agree, I would prefer IOREQ_STATUS_*


>> @@ -1515,11 +1542,21 @@ static int hvm_access_cf8(
>>       return X86EMUL_UNHANDLEABLE;
>>   }
>>   
>> +void arch_hvm_ioreq_init(struct domain *d)
>> +{
>> +    register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
>> +}
>> +
>> +void arch_hvm_ioreq_destroy(struct domain *d)
>> +{
>> +
>> +}
> Stray blank line?

Will remove.
Jan Beulich Sept. 21, 2020, 12:31 p.m. UTC | #3
On 21.09.2020 14:22, Oleksandr wrote:
> On 14.09.20 16:52, Jan Beulich wrote:
>> On 10.09.2020 22:21, Oleksandr Tyshchenko wrote:
>>> --- a/xen/arch/x86/hvm/ioreq.c
>>> +++ b/xen/arch/x86/hvm/ioreq.c
>>> @@ -170,6 +170,29 @@ static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
>>>       return true;
>>>   }
>>>   
>>> +bool arch_handle_hvm_io_completion(enum hvm_io_completion io_completion)
>> Do we need "handle" in here? Without it, I'd also not have to ask about
>> moving hvm further to the front of the name...
> For me without "handle" it will sound a bit confusing because of the 
> enum type which
> has a similar name but without "arch" prefix:
> bool arch_hvm_io_completion(enum hvm_io_completion io_completion)

Every function handles something; there's no point including
"handle" in every name. Or else we'd have handle_memset()
instead of just memset(), for example.

> Shall I then move "hvm" to the front of the name here?

As per comments on later patches, I think we want to consider dropping
hvm prefixes or infixes altogether from the functions involved here.

>>> @@ -1215,8 +1233,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
>>>       struct hvm_ioreq_server *s;
>>>       unsigned int id;
>>>   
>>> -    if ( !relocate_portio_handler(d, 0xcf8, 0xcf8, 4) )
>>> -        return;
>>> +    arch_hvm_ioreq_destroy(d);
>> So the call to relocate_portio_handler() simply goes away. No
>> replacement, no explanation?
> As I understand from the review comment on that for the RFC patch, there 
> is no
> a lot of point of keeping this. Indeed, looking at the code I got the 
> same opinion.
> I should have added an explanation in the commit description at least.
> Or shall I return the call back?

If there's a reason to drop it (which I can't see, but I also
don't recall seeing the discussion you're mentioning), then doing
so should be a separate patch with suitable reasoning. In the
patch here you definitely should only transform what's already
there.

Jan
Oleksandr Tyshchenko Sept. 21, 2020, 12:47 p.m. UTC | #4
On 21.09.20 15:31, Jan Beulich wrote:

Hi

> On 21.09.2020 14:22, Oleksandr wrote:
>> On 14.09.20 16:52, Jan Beulich wrote:
>>> On 10.09.2020 22:21, Oleksandr Tyshchenko wrote:
>>>> --- a/xen/arch/x86/hvm/ioreq.c
>>>> +++ b/xen/arch/x86/hvm/ioreq.c
>>>> @@ -170,6 +170,29 @@ static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
>>>>        return true;
>>>>    }
>>>>    
>>>> +bool arch_handle_hvm_io_completion(enum hvm_io_completion io_completion)
>>> Do we need "handle" in here? Without it, I'd also not have to ask about
>>> moving hvm further to the front of the name...
>> For me without "handle" it will sound a bit confusing because of the
>> enum type which
>> has a similar name but without "arch" prefix:
>> bool arch_hvm_io_completion(enum hvm_io_completion io_completion)
> Every function handles something; there's no point including
> "handle" in every name. Or else we'd have handle_memset()
> instead of just memset(), for example.

Got it. Will remove "handle" here.


>
>> Shall I then move "hvm" to the front of the name here?
> As per comments on later patches, I think we want to consider dropping
> hvm prefixes or infixes altogether from the functions involved here.
>>>> @@ -1215,8 +1233,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
>>>>        struct hvm_ioreq_server *s;
>>>>        unsigned int id;
>>>>    
>>>> -    if ( !relocate_portio_handler(d, 0xcf8, 0xcf8, 4) )
>>>> -        return;
>>>> +    arch_hvm_ioreq_destroy(d);
>>> So the call to relocate_portio_handler() simply goes away. No
>>> replacement, no explanation?
>> As I understand from the review comment on that for the RFC patch, there
>> is no
>> a lot of point of keeping this. Indeed, looking at the code I got the
>> same opinion.
>> I should have added an explanation in the commit description at least.
>> Or shall I return the call back?
> If there's a reason to drop it (which I can't see, but I also
> don't recall seeing the discussion you're mentioning), then doing
> so should be a separate patch with suitable reasoning. In the
> patch here you definitely should only transform what's already
> there.
Sounds reasonable. Please see the comment below 
relocate_portio_handler() here:
https://www.mail-archive.com/xen-devel@lists.xenproject.org/msg78512.html

However, I might interpret the request incorrectly.
Jan Beulich Sept. 21, 2020, 1:29 p.m. UTC | #5
On 21.09.2020 14:47, Oleksandr wrote:
> On 21.09.20 15:31, Jan Beulich wrote:
>> On 21.09.2020 14:22, Oleksandr wrote:
>>> On 14.09.20 16:52, Jan Beulich wrote:
>>>> On 10.09.2020 22:21, Oleksandr Tyshchenko wrote:
>>>>> @@ -1215,8 +1233,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
>>>>>        struct hvm_ioreq_server *s;
>>>>>        unsigned int id;
>>>>>    
>>>>> -    if ( !relocate_portio_handler(d, 0xcf8, 0xcf8, 4) )
>>>>> -        return;
>>>>> +    arch_hvm_ioreq_destroy(d);
>>>> So the call to relocate_portio_handler() simply goes away. No
>>>> replacement, no explanation?
>>> As I understand from the review comment on that for the RFC patch, there
>>> is no
>>> a lot of point of keeping this. Indeed, looking at the code I got the
>>> same opinion.
>>> I should have added an explanation in the commit description at least.
>>> Or shall I return the call back?
>> If there's a reason to drop it (which I can't see, but I also
>> don't recall seeing the discussion you're mentioning), then doing
>> so should be a separate patch with suitable reasoning. In the
>> patch here you definitely should only transform what's already
>> there.
> Sounds reasonable. Please see the comment below 
> relocate_portio_handler() here:
> https://www.mail-archive.com/xen-devel@lists.xenproject.org/msg78512.html
> 
> However, I might interpret the request incorrectly.

I'm afraid you do: The way you've coded it the function was a no-op.
But that's because you broke the caller by not bailing from
hvm_destroy_all_ioreq_servers() if relocate_portio_handler() returned
false. IOW you did assume that moving the "return" statement into an
inline function would have an effect on its caller(s). For questions
like this one it also often helps to look at the commit introducing
the construct in question (b3344bb1cae0 in this case): Chances are
that the description helps, albeit I agree there are many cases
(particularly the farther you get into distant past) where it isn't
of much help.

Jan
Oleksandr Tyshchenko Sept. 21, 2020, 2:43 p.m. UTC | #6
On 21.09.20 16:29, Jan Beulich wrote:

Hi

> On 21.09.2020 14:47, Oleksandr wrote:
>> On 21.09.20 15:31, Jan Beulich wrote:
>>> On 21.09.2020 14:22, Oleksandr wrote:
>>>> On 14.09.20 16:52, Jan Beulich wrote:
>>>>> On 10.09.2020 22:21, Oleksandr Tyshchenko wrote:
>>>>>> @@ -1215,8 +1233,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
>>>>>>         struct hvm_ioreq_server *s;
>>>>>>         unsigned int id;
>>>>>>     
>>>>>> -    if ( !relocate_portio_handler(d, 0xcf8, 0xcf8, 4) )
>>>>>> -        return;
>>>>>> +    arch_hvm_ioreq_destroy(d);
>>>>> So the call to relocate_portio_handler() simply goes away. No
>>>>> replacement, no explanation?
>>>> As I understand from the review comment on that for the RFC patch, there
>>>> is no
>>>> a lot of point of keeping this. Indeed, looking at the code I got the
>>>> same opinion.
>>>> I should have added an explanation in the commit description at least.
>>>> Or shall I return the call back?
>>> If there's a reason to drop it (which I can't see, but I also
>>> don't recall seeing the discussion you're mentioning), then doing
>>> so should be a separate patch with suitable reasoning. In the
>>> patch here you definitely should only transform what's already
>>> there.
>> Sounds reasonable. Please see the comment below
>> relocate_portio_handler() here:
>> https://www.mail-archive.com/xen-devel@lists.xenproject.org/msg78512.html
>>
>> However, I might interpret the request incorrectly.
> I'm afraid you do: The way you've coded it the function was a no-op.
> But that's because you broke the caller by not bailing from
> hvm_destroy_all_ioreq_servers() if relocate_portio_handler() returned
> false. IOW you did assume that moving the "return" statement into an
> inline function would have an effect on its caller(s). For questions
> like this one it also often helps to look at the commit introducing
> the construct in question (b3344bb1cae0 in this case): Chances are
> that the description helps, albeit I agree there are many cases
> (particularly the farther you get into distant past) where it isn't
> of much help.


Hmm, now it's clear to me what I did wrong. By calling 
relocate_portio_handler() here we don't really want to relocate 
something, we just use it as a flag to indicate whether we need to 
actually release IOREQ resources down the 
hvm_destroy_all_ioreq_servers(). Thank you for the explanation, it 
wasn't obvious to me at the beginning. But, now the question is how to 
do it in a correct way and retain current behavior (to not break callers)?

I see two options here:
1. Place the check of relocate_portio_handler() in 
arch_hvm_ioreq_destroy() and make the later returning bool.
     The "common" hvm_destroy_all_ioreq_servers() will check for the 
return value and bail out if false.
2. Don't use relocate_portio_handler(), instead introduce a flag into 
struct hvm_domain's ioreq_server sub-structure.


Personally I don't like much the option 1 and option 2 is a little bit 
overhead.

What do you think?
Jan Beulich Sept. 21, 2020, 3:28 p.m. UTC | #7
On 21.09.2020 16:43, Oleksandr wrote:
> On 21.09.20 16:29, Jan Beulich wrote:
>> On 21.09.2020 14:47, Oleksandr wrote:
>>> On 21.09.20 15:31, Jan Beulich wrote:
>>>> On 21.09.2020 14:22, Oleksandr wrote:
>>>>> On 14.09.20 16:52, Jan Beulich wrote:
>>>>>> On 10.09.2020 22:21, Oleksandr Tyshchenko wrote:
>>>>>>> @@ -1215,8 +1233,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
>>>>>>>         struct hvm_ioreq_server *s;
>>>>>>>         unsigned int id;
>>>>>>>     
>>>>>>> -    if ( !relocate_portio_handler(d, 0xcf8, 0xcf8, 4) )
>>>>>>> -        return;
>>>>>>> +    arch_hvm_ioreq_destroy(d);
>>>>>> So the call to relocate_portio_handler() simply goes away. No
>>>>>> replacement, no explanation?
>>>>> As I understand from the review comment on that for the RFC patch, there
>>>>> is no
>>>>> a lot of point of keeping this. Indeed, looking at the code I got the
>>>>> same opinion.
>>>>> I should have added an explanation in the commit description at least.
>>>>> Or shall I return the call back?
>>>> If there's a reason to drop it (which I can't see, but I also
>>>> don't recall seeing the discussion you're mentioning), then doing
>>>> so should be a separate patch with suitable reasoning. In the
>>>> patch here you definitely should only transform what's already
>>>> there.
>>> Sounds reasonable. Please see the comment below
>>> relocate_portio_handler() here:
>>> https://www.mail-archive.com/xen-devel@lists.xenproject.org/msg78512.html
>>>
>>> However, I might interpret the request incorrectly.
>> I'm afraid you do: The way you've coded it the function was a no-op.
>> But that's because you broke the caller by not bailing from
>> hvm_destroy_all_ioreq_servers() if relocate_portio_handler() returned
>> false. IOW you did assume that moving the "return" statement into an
>> inline function would have an effect on its caller(s). For questions
>> like this one it also often helps to look at the commit introducing
>> the construct in question (b3344bb1cae0 in this case): Chances are
>> that the description helps, albeit I agree there are many cases
>> (particularly the farther you get into distant past) where it isn't
>> of much help.
> 
> 
> Hmm, now it's clear to me what I did wrong. By calling 
> relocate_portio_handler() here we don't really want to relocate 
> something, we just use it as a flag to indicate whether we need to 
> actually release IOREQ resources down the 
> hvm_destroy_all_ioreq_servers(). Thank you for the explanation, it 
> wasn't obvious to me at the beginning. But, now the question is how to 
> do it in a correct way and retain current behavior (to not break callers)?
> 
> I see two options here:
> 1. Place the check of relocate_portio_handler() in 
> arch_hvm_ioreq_destroy() and make the later returning bool.
>      The "common" hvm_destroy_all_ioreq_servers() will check for the 
> return value and bail out if false.
> 2. Don't use relocate_portio_handler(), instead introduce a flag into 
> struct hvm_domain's ioreq_server sub-structure.
> 
> 
> Personally I don't like much the option 1 and option 2 is a little bit 
> overhead.

Well, 1 is what matches current behavior, so I'd advocate for you
not changing the abstract model. Or else, again, change the abstract
model in a separate prereq patch.

Jan
Julien Grall Sept. 23, 2020, 5:22 p.m. UTC | #8
Hi,

On 10/09/2020 21:21, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> 
> As a lot of x86 code can be re-used on Arm later on, this patch
> prepares IOREQ support before moving to the common code. This way
> we will get almost a verbatim copy for a code movement.

FWIW, I agree with Jan that we need more details what and why you are 
going it. It would be worth considering to split in smaller patch.

> 
> This support is going to be used on Arm to be able run device
> emulator outside of Xen hypervisor.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>

Usually the first signed-off is the author of the patch. However, this 
patch look quite far off from what I originally wrote.

So I don't feel my signed-off-by is actually warrant here. If you want 
to credit me, then you can mention it in the commit message.

> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> 

Cheers,
Oleksandr Tyshchenko Sept. 23, 2020, 6:08 p.m. UTC | #9
On 23.09.20 20:22, Julien Grall wrote:
> Hi,

Hi Julien


>
> On 10/09/2020 21:21, Oleksandr Tyshchenko wrote:
>> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
>>
>> As a lot of x86 code can be re-used on Arm later on, this patch
>> prepares IOREQ support before moving to the common code. This way
>> we will get almost a verbatim copy for a code movement.
>
> FWIW, I agree with Jan that we need more details what and why you are 
> going it. It would be worth considering to split in smaller patch.

ok


>
>
>>
>> This support is going to be used on Arm to be able run device
>> emulator outside of Xen hypervisor.
>>
>> Signed-off-by: Julien Grall <julien.grall@arm.com>
>
> Usually the first signed-off is the author of the patch. However, this 
> patch look quite far off from what I originally wrote.
>
> So I don't feel my signed-off-by is actually warrant here. If you want 
> to credit me, then you can mention it in the commit message.
This is related to all patches is this series. This patch series is the 
second attempt (the first was RFC) to make IOREQ support common and it 
became quite different from the initial commit.
I am sorry, I got completely lost whether the particular patch in this 
series is close to what you originally wrote or far from, I mean whether 
I should retain your SoB and whether I should drop it. So in order *not 
to make a mistake* is such an important question, I decided to add your 
SoB to each patch in this series and also add a note to each patch 
describing where this series came from.


>
>> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
>>
>
> Cheers,
>
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 1cc27df..d912655 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -170,6 +170,29 @@  static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
     return true;
 }
 
+bool arch_handle_hvm_io_completion(enum hvm_io_completion io_completion)
+{
+    switch ( io_completion )
+    {
+    case HVMIO_realmode_completion:
+    {
+        struct hvm_emulate_ctxt ctxt;
+
+        hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
+        vmx_realmode_emulate_one(&ctxt);
+        hvm_emulate_writeback(&ctxt);
+
+        break;
+    }
+
+    default:
+        ASSERT_UNREACHABLE();
+        break;
+    }
+
+    return true;
+}
+
 bool handle_hvm_io_completion(struct vcpu *v)
 {
     struct domain *d = v->domain;
@@ -209,19 +232,8 @@  bool handle_hvm_io_completion(struct vcpu *v)
         return handle_pio(vio->io_req.addr, vio->io_req.size,
                           vio->io_req.dir);
 
-    case HVMIO_realmode_completion:
-    {
-        struct hvm_emulate_ctxt ctxt;
-
-        hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
-        vmx_realmode_emulate_one(&ctxt);
-        hvm_emulate_writeback(&ctxt);
-
-        break;
-    }
     default:
-        ASSERT_UNREACHABLE();
-        break;
+        return arch_handle_hvm_io_completion(io_completion);
     }
 
     return true;
@@ -836,6 +848,12 @@  int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
     return rc;
 }
 
+/* Called when target domain is paused */
+int arch_hvm_destroy_ioreq_server(struct hvm_ioreq_server *s)
+{
+    return p2m_set_ioreq_server(s->target, 0, s);
+}
+
 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
 {
     struct hvm_ioreq_server *s;
@@ -855,7 +873,7 @@  int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
 
     domain_pause(d);
 
-    p2m_set_ioreq_server(d, 0, s);
+    arch_hvm_destroy_ioreq_server(s);
 
     hvm_ioreq_server_disable(s);
 
@@ -1215,8 +1233,7 @@  void hvm_destroy_all_ioreq_servers(struct domain *d)
     struct hvm_ioreq_server *s;
     unsigned int id;
 
-    if ( !relocate_portio_handler(d, 0xcf8, 0xcf8, 4) )
-        return;
+    arch_hvm_ioreq_destroy(d);
 
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
@@ -1239,19 +1256,15 @@  void hvm_destroy_all_ioreq_servers(struct domain *d)
     spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 }
 
-struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
-                                                 ioreq_t *p)
+int hvm_get_ioreq_server_range_type(struct domain *d,
+                                    ioreq_t *p,
+                                    uint8_t *type,
+                                    uint64_t *addr)
 {
-    struct hvm_ioreq_server *s;
-    uint32_t cf8;
-    uint8_t type;
-    uint64_t addr;
-    unsigned int id;
+    uint32_t cf8 = d->arch.hvm.pci_cf8;
 
     if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
-        return NULL;
-
-    cf8 = d->arch.hvm.pci_cf8;
+        return -EINVAL;
 
     if ( p->type == IOREQ_TYPE_PIO &&
          (p->addr & ~3) == 0xcfc &&
@@ -1264,8 +1277,8 @@  struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
         reg = hvm_pci_decode_addr(cf8, p->addr, &sbdf);
 
         /* PCI config data cycle */
-        type = XEN_DMOP_IO_RANGE_PCI;
-        addr = ((uint64_t)sbdf.sbdf << 32) | reg;
+        *type = XEN_DMOP_IO_RANGE_PCI;
+        *addr = ((uint64_t)sbdf.sbdf << 32) | reg;
         /* AMD extended configuration space access? */
         if ( CF8_ADDR_HI(cf8) &&
              d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
@@ -1277,16 +1290,30 @@  struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
 
             if ( !rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) &&
                  (msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) )
-                addr |= CF8_ADDR_HI(cf8);
+                *addr |= CF8_ADDR_HI(cf8);
         }
     }
     else
     {
-        type = (p->type == IOREQ_TYPE_PIO) ?
-                XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
-        addr = p->addr;
+        *type = (p->type == IOREQ_TYPE_PIO) ?
+                 XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
+        *addr = p->addr;
     }
 
+    return 0;
+}
+
+struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
+                                                 ioreq_t *p)
+{
+    struct hvm_ioreq_server *s;
+    uint8_t type;
+    uint64_t addr;
+    unsigned int id;
+
+    if ( hvm_get_ioreq_server_range_type(d, p, &type, &addr) )
+        return NULL;
+
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
         struct rangeset *r;
@@ -1351,7 +1378,7 @@  static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
     pg = iorp->va;
 
     if ( !pg )
-        return X86EMUL_UNHANDLEABLE;
+        return IOREQ_IO_UNHANDLED;
 
     /*
      * Return 0 for the cases we can't deal with:
@@ -1381,7 +1408,7 @@  static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
         break;
     default:
         gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
-        return X86EMUL_UNHANDLEABLE;
+        return IOREQ_IO_UNHANDLED;
     }
 
     spin_lock(&s->bufioreq_lock);
@@ -1391,7 +1418,7 @@  static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
     {
         /* The queue is full: send the iopacket through the normal path. */
         spin_unlock(&s->bufioreq_lock);
-        return X86EMUL_UNHANDLEABLE;
+        return IOREQ_IO_UNHANDLED;
     }
 
     pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
@@ -1422,7 +1449,7 @@  static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
     notify_via_xen_event_channel(d, s->bufioreq_evtchn);
     spin_unlock(&s->bufioreq_lock);
 
-    return X86EMUL_OKAY;
+    return IOREQ_IO_HANDLED;
 }
 
 int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
@@ -1438,7 +1465,7 @@  int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
         return hvm_send_buffered_ioreq(s, proto_p);
 
     if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
-        return X86EMUL_RETRY;
+        return IOREQ_IO_RETRY;
 
     list_for_each_entry ( sv,
                           &s->ioreq_vcpu_list,
@@ -1478,11 +1505,11 @@  int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
             notify_via_xen_event_channel(d, port);
 
             sv->pending = true;
-            return X86EMUL_RETRY;
+            return IOREQ_IO_RETRY;
         }
     }
 
-    return X86EMUL_UNHANDLEABLE;
+    return IOREQ_IO_UNHANDLED;
 }
 
 unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
@@ -1496,7 +1523,7 @@  unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
         if ( !s->enabled )
             continue;
 
-        if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
+        if ( hvm_send_ioreq(s, p, buffered) == IOREQ_IO_UNHANDLED )
             failed++;
     }
 
@@ -1515,11 +1542,21 @@  static int hvm_access_cf8(
     return X86EMUL_UNHANDLEABLE;
 }
 
+void arch_hvm_ioreq_init(struct domain *d)
+{
+    register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
+}
+
+void arch_hvm_ioreq_destroy(struct domain *d)
+{
+
+}
+
 void hvm_ioreq_init(struct domain *d)
 {
     spin_lock_init(&d->arch.hvm.ioreq_server.lock);
 
-    register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
+    arch_hvm_ioreq_init(d);
 }
 
 /*
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index e2588e9..151b92b 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -55,6 +55,22 @@  unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered);
 
 void hvm_ioreq_init(struct domain *d);
 
+int arch_hvm_destroy_ioreq_server(struct hvm_ioreq_server *s);
+
+bool arch_handle_hvm_io_completion(enum hvm_io_completion io_completion);
+
+int hvm_get_ioreq_server_range_type(struct domain *d,
+                                    ioreq_t *p,
+                                    uint8_t *type,
+                                    uint64_t *addr);
+
+void arch_hvm_ioreq_init(struct domain *d);
+void arch_hvm_ioreq_destroy(struct domain *d);
+
+#define IOREQ_IO_HANDLED     X86EMUL_OKAY
+#define IOREQ_IO_UNHANDLED   X86EMUL_UNHANDLEABLE
+#define IOREQ_IO_RETRY       X86EMUL_RETRY
+
 #endif /* __ASM_X86_HVM_IOREQ_H__ */
 
 /*