diff mbox series

[2/4] x86/svm: split svm_intercept_msr() into svm_{set,clear}_msr_intercept()

Message ID 20230227075652.3782973-3-burzalodowa@gmail.com (mailing list archive)
State New, archived
Headers show
Series hvm: add hvm_funcs hooks for msr intercept handling | expand

Commit Message

Xenia Ragiadakou Feb. 27, 2023, 7:56 a.m. UTC
This change aims to render the control interface of MSR intercepts identical
between SVM and VMX code, so that the control of the MSR intercept in common
code can be done through an hvm_funcs callback.

Create two new functions:
- svm_set_msr_intercept(), enables interception of read/write accesses to the
  corresponding MSR, by setting the corresponding read/write bits in the MSRPM
  based on the flags
- svm_clear_msr_intercept(), disables interception of read/write accesses to
  the corresponding MSR, by clearing the corresponding read/write bits in the
  MSRPM based on the flags

More specifically:
- if flag is MSR_R, the functions {set,clear} the MSRPM bit that controls read
  access to the MSR
- if flag is MSR_W, the functions {set,clear} the MSRPM bit that controls write
  access to the MSR
- if flag is MSR_RW, the functions {set,clear} both MSRPM bits

Place the definitions of the flags in asm/hvm/hvm.h because there is the
intention to be used by VMX code as well.

Remove svm_intercept_msr() and MSR_INTERCEPT_* definitions, and use the new
functions and flags instead.

No functional change intended.

Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
---
 xen/arch/x86/cpu/vpmu_amd.c             |  9 +--
 xen/arch/x86/hvm/svm/svm.c              | 80 ++++++++++++++++---------
 xen/arch/x86/include/asm/hvm/hvm.h      |  4 ++
 xen/arch/x86/include/asm/hvm/svm/vmcb.h | 13 ++--
 4 files changed, 66 insertions(+), 40 deletions(-)

Comments

Jan Beulich Feb. 28, 2023, 2:20 p.m. UTC | #1
On 27.02.2023 08:56, Xenia Ragiadakou wrote:
> This change aims to render the control interface of MSR intercepts identical
> between SVM and VMX code, so that the control of the MSR intercept in common
> code can be done through an hvm_funcs callback.
> 
> Create two new functions:
> - svm_set_msr_intercept(), enables interception of read/write accesses to the
>   corresponding MSR, by setting the corresponding read/write bits in the MSRPM
>   based on the flags
> - svm_clear_msr_intercept(), disables interception of read/write accesses to
>   the corresponding MSR, by clearing the corresponding read/write bits in the
>   MSRPM based on the flags

In how far is VMX'es present model better than SVM's? They both have pros and
cons, depending on the specific use. I'm not asking to do it the other way
around (at least not yet), I'd merely like to understand why we're going to
gain two new hooks (if I'm not mistaken) when we could also get away with
just one.

> --- a/xen/arch/x86/cpu/vpmu_amd.c
> +++ b/xen/arch/x86/cpu/vpmu_amd.c
> @@ -165,8 +165,9 @@ static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
>  
>      for ( i = 0; i < num_counters; i++ )
>      {
> -        svm_intercept_msr(v, counters[i], MSR_INTERCEPT_NONE);
> -        svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_WRITE);
> +        svm_clear_msr_intercept(v, counters[i], MSR_RW);
> +        svm_set_msr_intercept(v, ctrls[i], MSR_W);
> +        svm_clear_msr_intercept(v, ctrls[i], MSR_R);
>      }
>  
>      msr_bitmap_on(vpmu);
> @@ -179,8 +180,8 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
>  
>      for ( i = 0; i < num_counters; i++ )
>      {
> -        svm_intercept_msr(v, counters[i], MSR_INTERCEPT_RW);
> -        svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_RW);
> +        svm_set_msr_intercept(v, counters[i], MSR_RW);
> +        svm_set_msr_intercept(v, ctrls[i], MSR_RW);
>      }

This, aiui, restores back original state (I question the condition that the
caller uses, though, but that's a separate issue). Therefore is the single
"set" in the earlier function actually needed?

> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -288,23 +288,34 @@ svm_msrbit(unsigned long *msr_bitmap, uint32_t msr)
>      return msr_bit;
>  }
>  
> -void svm_intercept_msr(struct vcpu *v, uint32_t msr, int flags)
> +void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags)

Can the last parameter become "unsigned int", please?

>  {
> -    unsigned long *msr_bit;
> -    const struct domain *d = v->domain;
> +    unsigned long *msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
> +
> +    if ( msr_bit == NULL )
> +        return;
>  
> -    msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
> -    BUG_ON(msr_bit == NULL);

The conversion from BUG_ON() to "return" needs explanation; I don't see
why that's warranted here. From all I can tell the case is impossible
due to the way construct_vmcb() works, and hence BUG_ON() is appropriate
(and personally I would also be fine with no check at all, provided I'm
not overlooking anything).

> @@ -312,8 +323,10 @@ static void cf_check svm_enable_msr_interception(struct domain *d, uint32_t msr)
>  {
>      struct vcpu *v;
>  
> -    for_each_vcpu ( d, v )
> -        svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE);
> +    for_each_vcpu ( d, v ) {

Nit: Brace placement.

> @@ -595,22 +608,31 @@ static void cf_check svm_cpuid_policy_changed(struct vcpu *v)
>      vmcb_set_exception_intercepts(vmcb, bitmap);
>  
>      /* Give access to MSR_SPEC_CTRL if the guest has been told about it. */
> -    svm_intercept_msr(v, MSR_SPEC_CTRL,
> -                      cp->extd.ibrs ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
> +    if ( cp->extd.ibrs )
> +        svm_clear_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
> +    else
> +        svm_set_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
>  
>      /*
>       * Always trap write accesses to VIRT_SPEC_CTRL in order to cache the guest
>       * setting and avoid having to perform a rdmsr on vmexit to get the guest
>       * setting even if VIRT_SSBD is offered to Xen itself.
>       */
> -    svm_intercept_msr(v, MSR_VIRT_SPEC_CTRL,
> -                      cp->extd.virt_ssbd && cpu_has_virt_ssbd &&
> -                      !cpu_has_amd_ssbd ?
> -                      MSR_INTERCEPT_WRITE : MSR_INTERCEPT_RW);
> +    if ( cp->extd.virt_ssbd && cpu_has_virt_ssbd && !cpu_has_amd_ssbd )
> +    {
> +        svm_set_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_W);
> +        svm_clear_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_R);
> +    }
> +    else
> +    {
> +        svm_set_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_RW);
> +    }

Preferably omit the braces for "else" here, just like you do above and ...

>      /* Give access to MSR_PRED_CMD if the guest has been told about it. */
> -    svm_intercept_msr(v, MSR_PRED_CMD,
> -                      cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
> +    if ( cp->extd.ibpb )
> +        svm_clear_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_RW);
> +    else
> +        svm_set_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_RW);

... here.

> --- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
> +++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
> @@ -585,13 +585,12 @@ void svm_destroy_vmcb(struct vcpu *v);
>  
>  void setup_vmcb_dump(void);
>  
> -#define MSR_INTERCEPT_NONE    0
> -#define MSR_INTERCEPT_READ    1
> -#define MSR_INTERCEPT_WRITE   2
> -#define MSR_INTERCEPT_RW      (MSR_INTERCEPT_WRITE | MSR_INTERCEPT_READ)
> -void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable);
> -#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_NONE)
> -#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_RW)
> +void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
> +void svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
> +#define svm_disable_intercept_for_msr(v, msr) \
> +    svm_clear_msr_intercept((v), (msr), MSR_RW)
> +#define svm_enable_intercept_for_msr(v, msr) \
> +    svm_set_intercept_msr((v), (msr), MSR_RW)

Please avoid excess parentheses. Also could you clarify why you retain
these shorthands when you don't use them in the conversion that you're
doing (e.g. amd_vpmu_unset_msr_bitmap())? Are you intending them to go
away down the road?

Jan
Xenia Ragiadakou Feb. 28, 2023, 3:05 p.m. UTC | #2
Hi Jan,

On 2/28/23 16:20, Jan Beulich wrote:
> On 27.02.2023 08:56, Xenia Ragiadakou wrote:
>> This change aims to render the control interface of MSR intercepts identical
>> between SVM and VMX code, so that the control of the MSR intercept in common
>> code can be done through an hvm_funcs callback.
>>
>> Create two new functions:
>> - svm_set_msr_intercept(), enables interception of read/write accesses to the
>>    corresponding MSR, by setting the corresponding read/write bits in the MSRPM
>>    based on the flags
>> - svm_clear_msr_intercept(), disables interception of read/write accesses to
>>    the corresponding MSR, by clearing the corresponding read/write bits in the
>>    MSRPM based on the flags
> 
> In how far is VMX'es present model better than SVM's? They both have pros and
> cons, depending on the specific use. I'm not asking to do it the other way
> around (at least not yet), I'd merely like to understand why we're going to
> gain two new hooks (if I'm not mistaken) when we could also get away with
> just one.

SVM approach always touches both read/write bits (either by setting or 
clearing them). I thought that using the SVM approach on VMX could be 
considered a functional change (because there are parts where VMX 
assumes that a bit is already set or cleared and does not touch it).

> 
>> --- a/xen/arch/x86/cpu/vpmu_amd.c
>> +++ b/xen/arch/x86/cpu/vpmu_amd.c
>> @@ -165,8 +165,9 @@ static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
>>   
>>       for ( i = 0; i < num_counters; i++ )
>>       {
>> -        svm_intercept_msr(v, counters[i], MSR_INTERCEPT_NONE);
>> -        svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_WRITE);
>> +        svm_clear_msr_intercept(v, counters[i], MSR_RW);
>> +        svm_set_msr_intercept(v, ctrls[i], MSR_W);
>> +        svm_clear_msr_intercept(v, ctrls[i], MSR_R);
>>       }
>>   
>>       msr_bitmap_on(vpmu);
>> @@ -179,8 +180,8 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
>>   
>>       for ( i = 0; i < num_counters; i++ )
>>       {
>> -        svm_intercept_msr(v, counters[i], MSR_INTERCEPT_RW);
>> -        svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_RW);
>> +        svm_set_msr_intercept(v, counters[i], MSR_RW);
>> +        svm_set_msr_intercept(v, ctrls[i], MSR_RW);
>>       }
> 
> This, aiui, restores back original state (I question the condition that the
> caller uses, though, but that's a separate issue). Therefore is the single
> "set" in the earlier function actually needed?

This is what the svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_WRITE) 
does, i.e it sets the WRITE and clears the READ. It is not needed if it 
is already set, but in my opinion the redundant parts should be removed 
in another patch.

> 
>> --- a/xen/arch/x86/hvm/svm/svm.c
>> +++ b/xen/arch/x86/hvm/svm/svm.c
>> @@ -288,23 +288,34 @@ svm_msrbit(unsigned long *msr_bitmap, uint32_t msr)
>>       return msr_bit;
>>   }
>>   
>> -void svm_intercept_msr(struct vcpu *v, uint32_t msr, int flags)
>> +void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
> 
> Can the last parameter become "unsigned int", please?
> 
>>   {
>> -    unsigned long *msr_bit;
>> -    const struct domain *d = v->domain;
>> +    unsigned long *msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
>> +
>> +    if ( msr_bit == NULL )
>> +        return;
>>   
>> -    msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
>> -    BUG_ON(msr_bit == NULL);
> 
> The conversion from BUG_ON() to "return" needs explanation; I don't see
> why that's warranted here. From all I can tell the case is impossible
> due to the way construct_vmcb() works, and hence BUG_ON() is appropriate
> (and personally I would also be fine with no check at all, provided I'm
> not overlooking anything).

It was my mistake I should have not removed it.

> 
>> @@ -312,8 +323,10 @@ static void cf_check svm_enable_msr_interception(struct domain *d, uint32_t msr)
>>   {
>>       struct vcpu *v;
>>   
>> -    for_each_vcpu ( d, v )
>> -        svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE);
>> +    for_each_vcpu ( d, v ) {
> 
> Nit: Brace placement.

Sorry. I will fix.

> 
>> @@ -595,22 +608,31 @@ static void cf_check svm_cpuid_policy_changed(struct vcpu *v)
>>       vmcb_set_exception_intercepts(vmcb, bitmap);
>>   
>>       /* Give access to MSR_SPEC_CTRL if the guest has been told about it. */
>> -    svm_intercept_msr(v, MSR_SPEC_CTRL,
>> -                      cp->extd.ibrs ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
>> +    if ( cp->extd.ibrs )
>> +        svm_clear_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
>> +    else
>> +        svm_set_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
>>   
>>       /*
>>        * Always trap write accesses to VIRT_SPEC_CTRL in order to cache the guest
>>        * setting and avoid having to perform a rdmsr on vmexit to get the guest
>>        * setting even if VIRT_SSBD is offered to Xen itself.
>>        */
>> -    svm_intercept_msr(v, MSR_VIRT_SPEC_CTRL,
>> -                      cp->extd.virt_ssbd && cpu_has_virt_ssbd &&
>> -                      !cpu_has_amd_ssbd ?
>> -                      MSR_INTERCEPT_WRITE : MSR_INTERCEPT_RW);
>> +    if ( cp->extd.virt_ssbd && cpu_has_virt_ssbd && !cpu_has_amd_ssbd )
>> +    {
>> +        svm_set_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_W);
>> +        svm_clear_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_R);
>> +    }
>> +    else
>> +    {
>> +        svm_set_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_RW);
>> +    }
> 
> Preferably omit the braces for "else" here, just like you do above and ...

I added them for symmetry, since the first has. I find it easier to 
follow, personally. I can omit it.

> 
>>       /* Give access to MSR_PRED_CMD if the guest has been told about it. */
>> -    svm_intercept_msr(v, MSR_PRED_CMD,
>> -                      cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
>> +    if ( cp->extd.ibpb )
>> +        svm_clear_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_RW);
>> +    else
>> +        svm_set_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_RW);
> 
> ... here.
> 
>> --- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
>> +++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
>> @@ -585,13 +585,12 @@ void svm_destroy_vmcb(struct vcpu *v);
>>   
>>   void setup_vmcb_dump(void);
>>   
>> -#define MSR_INTERCEPT_NONE    0
>> -#define MSR_INTERCEPT_READ    1
>> -#define MSR_INTERCEPT_WRITE   2
>> -#define MSR_INTERCEPT_RW      (MSR_INTERCEPT_WRITE | MSR_INTERCEPT_READ)
>> -void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable);
>> -#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_NONE)
>> -#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_RW)
>> +void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
>> +void svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
>> +#define svm_disable_intercept_for_msr(v, msr) \
>> +    svm_clear_msr_intercept((v), (msr), MSR_RW)
>> +#define svm_enable_intercept_for_msr(v, msr) \
>> +    svm_set_intercept_msr((v), (msr), MSR_RW)
> 
> Please avoid excess parentheses. Also could you clarify why you retain
> these shorthands when you don't use them in the conversion that you're
> doing (e.g. ())? Are you intending them to go
> away down the road?

Ok.
I did not understand the question. Which shorthands?

> 
> Jan
Jan Beulich Feb. 28, 2023, 3:10 p.m. UTC | #3
On 28.02.2023 16:05, Xenia Ragiadakou wrote:
> On 2/28/23 16:20, Jan Beulich wrote:
>> On 27.02.2023 08:56, Xenia Ragiadakou wrote:
>>> This change aims to render the control interface of MSR intercepts identical
>>> between SVM and VMX code, so that the control of the MSR intercept in common
>>> code can be done through an hvm_funcs callback.
>>>
>>> Create two new functions:
>>> - svm_set_msr_intercept(), enables interception of read/write accesses to the
>>>    corresponding MSR, by setting the corresponding read/write bits in the MSRPM
>>>    based on the flags
>>> - svm_clear_msr_intercept(), disables interception of read/write accesses to
>>>    the corresponding MSR, by clearing the corresponding read/write bits in the
>>>    MSRPM based on the flags
>>
>> In how far is VMX'es present model better than SVM's? They both have pros and
>> cons, depending on the specific use. I'm not asking to do it the other way
>> around (at least not yet), I'd merely like to understand why we're going to
>> gain two new hooks (if I'm not mistaken) when we could also get away with
>> just one.
> 
> SVM approach always touches both read/write bits (either by setting or 
> clearing them). I thought that using the SVM approach on VMX could be 
> considered a functional change (because there are parts where VMX 
> assumes that a bit is already set or cleared and does not touch it).

As per my comment on the last patch a question is whether both actually
need to become uniform. But if they do, then the new model should imo
be followed right away, and that VMX'es simply leaving bits alone when
already in known state.

>>> --- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
>>> +++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
>>> @@ -585,13 +585,12 @@ void svm_destroy_vmcb(struct vcpu *v);
>>>   
>>>   void setup_vmcb_dump(void);
>>>   
>>> -#define MSR_INTERCEPT_NONE    0
>>> -#define MSR_INTERCEPT_READ    1
>>> -#define MSR_INTERCEPT_WRITE   2
>>> -#define MSR_INTERCEPT_RW      (MSR_INTERCEPT_WRITE | MSR_INTERCEPT_READ)
>>> -void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable);
>>> -#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_NONE)
>>> -#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_RW)
>>> +void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
>>> +void svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
>>> +#define svm_disable_intercept_for_msr(v, msr) \
>>> +    svm_clear_msr_intercept((v), (msr), MSR_RW)
>>> +#define svm_enable_intercept_for_msr(v, msr) \
>>> +    svm_set_intercept_msr((v), (msr), MSR_RW)
>>
>> Please avoid excess parentheses. Also could you clarify why you retain
>> these shorthands when you don't use them in the conversion that you're
>> doing (e.g. ())? Are you intending them to go
>> away down the road?
> 
> Ok.
> I did not understand the question. Which shorthands?

svm_disable_intercept_for_msr() and svm_enable_intercept_for_msr().

Jan
Xenia Ragiadakou Feb. 28, 2023, 3:17 p.m. UTC | #4
On 2/28/23 17:10, Jan Beulich wrote:
> On 28.02.2023 16:05, Xenia Ragiadakou wrote:
>> On 2/28/23 16:20, Jan Beulich wrote:
>>> On 27.02.2023 08:56, Xenia Ragiadakou wrote:
>>>> This change aims to render the control interface of MSR intercepts identical
>>>> between SVM and VMX code, so that the control of the MSR intercept in common
>>>> code can be done through an hvm_funcs callback.
>>>>
>>>> Create two new functions:
>>>> - svm_set_msr_intercept(), enables interception of read/write accesses to the
>>>>     corresponding MSR, by setting the corresponding read/write bits in the MSRPM
>>>>     based on the flags
>>>> - svm_clear_msr_intercept(), disables interception of read/write accesses to
>>>>     the corresponding MSR, by clearing the corresponding read/write bits in the
>>>>     MSRPM based on the flags
>>>
>>> In how far is VMX'es present model better than SVM's? They both have pros and
>>> cons, depending on the specific use. I'm not asking to do it the other way
>>> around (at least not yet), I'd merely like to understand why we're going to
>>> gain two new hooks (if I'm not mistaken) when we could also get away with
>>> just one.
>>
>> SVM approach always touches both read/write bits (either by setting or
>> clearing them). I thought that using the SVM approach on VMX could be
>> considered a functional change (because there are parts where VMX
>> assumes that a bit is already set or cleared and does not touch it).
> 
> As per my comment on the last patch a question is whether both actually
> need to become uniform. But if they do, then the new model should imo
> be followed right away, and that VMX'es simply leaving bits alone when
> already in known state.

But the SVM implementation does not assume. I can do it and remove the 
no functional change part.

> 
>>>> --- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
>>>> +++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
>>>> @@ -585,13 +585,12 @@ void svm_destroy_vmcb(struct vcpu *v);
>>>>    
>>>>    void setup_vmcb_dump(void);
>>>>    
>>>> -#define MSR_INTERCEPT_NONE    0
>>>> -#define MSR_INTERCEPT_READ    1
>>>> -#define MSR_INTERCEPT_WRITE   2
>>>> -#define MSR_INTERCEPT_RW      (MSR_INTERCEPT_WRITE | MSR_INTERCEPT_READ)
>>>> -void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable);
>>>> -#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_NONE)
>>>> -#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_RW)
>>>> +void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
>>>> +void svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
>>>> +#define svm_disable_intercept_for_msr(v, msr) \
>>>> +    svm_clear_msr_intercept((v), (msr), MSR_RW)
>>>> +#define svm_enable_intercept_for_msr(v, msr) \
>>>> +    svm_set_intercept_msr((v), (msr), MSR_RW)
>>>
>>> Please avoid excess parentheses. Also could you clarify why you retain
>>> these shorthands when you don't use them in the conversion that you're
>>> doing (e.g. ())? Are you intending them to go
>>> away down the road?
>>
>> Ok.
>> I did not understand the question. Which shorthands?
> 
> svm_disable_intercept_for_msr() and svm_enable_intercept_for_msr().

Are you suggesting to replace svm_{en,dis}able_intercept_for_msr() with 
svm_{ser,clear}_msr_intercept()?  svm_disable_intercept_for_msr() is 
used in svm.c and vmcb.c.
Jan Beulich Feb. 28, 2023, 4:14 p.m. UTC | #5
On 28.02.2023 16:17, Xenia Ragiadakou wrote:
> On 2/28/23 17:10, Jan Beulich wrote:
>> On 28.02.2023 16:05, Xenia Ragiadakou wrote:
>>> On 2/28/23 16:20, Jan Beulich wrote:
>>>> On 27.02.2023 08:56, Xenia Ragiadakou wrote:
>>>>> --- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
>>>>> +++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
>>>>> @@ -585,13 +585,12 @@ void svm_destroy_vmcb(struct vcpu *v);
>>>>>    
>>>>>    void setup_vmcb_dump(void);
>>>>>    
>>>>> -#define MSR_INTERCEPT_NONE    0
>>>>> -#define MSR_INTERCEPT_READ    1
>>>>> -#define MSR_INTERCEPT_WRITE   2
>>>>> -#define MSR_INTERCEPT_RW      (MSR_INTERCEPT_WRITE | MSR_INTERCEPT_READ)
>>>>> -void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable);
>>>>> -#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_NONE)
>>>>> -#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_RW)
>>>>> +void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
>>>>> +void svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
>>>>> +#define svm_disable_intercept_for_msr(v, msr) \
>>>>> +    svm_clear_msr_intercept((v), (msr), MSR_RW)
>>>>> +#define svm_enable_intercept_for_msr(v, msr) \
>>>>> +    svm_set_intercept_msr((v), (msr), MSR_RW)
>>>>
>>>> Please avoid excess parentheses. Also could you clarify why you retain
>>>> these shorthands when you don't use them in the conversion that you're
>>>> doing (e.g. ())? Are you intending them to go
>>>> away down the road?
>>>
>>> Ok.
>>> I did not understand the question. Which shorthands?
>>
>> svm_disable_intercept_for_msr() and svm_enable_intercept_for_msr().
> 
> Are you suggesting to replace svm_{en,dis}able_intercept_for_msr() with 
> svm_{ser,clear}_msr_intercept()?  svm_disable_intercept_for_msr() is 
> used in svm.c and vmcb.c.

I'm suggesting one of two possible routes leading to consistent use:
1) drop the shorthands
2) retain the shorthands and don't ever open-code them
Depending on which route we want to go either your code adjustments in
this regard are fine, and only a remark would want adding that they're
retained until remaining uses can be cleaned up, or you want to use
them in your changes wherever possible.

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 9df739aa3f..ed6706959e 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -165,8 +165,9 @@  static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
 
     for ( i = 0; i < num_counters; i++ )
     {
-        svm_intercept_msr(v, counters[i], MSR_INTERCEPT_NONE);
-        svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_WRITE);
+        svm_clear_msr_intercept(v, counters[i], MSR_RW);
+        svm_set_msr_intercept(v, ctrls[i], MSR_W);
+        svm_clear_msr_intercept(v, ctrls[i], MSR_R);
     }
 
     msr_bitmap_on(vpmu);
@@ -179,8 +180,8 @@  static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
 
     for ( i = 0; i < num_counters; i++ )
     {
-        svm_intercept_msr(v, counters[i], MSR_INTERCEPT_RW);
-        svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_RW);
+        svm_set_msr_intercept(v, counters[i], MSR_RW);
+        svm_set_msr_intercept(v, ctrls[i], MSR_RW);
     }
 
     msr_bitmap_off(vpmu);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index a43bcf2e92..eb144272f4 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -288,23 +288,34 @@  svm_msrbit(unsigned long *msr_bitmap, uint32_t msr)
     return msr_bit;
 }
 
-void svm_intercept_msr(struct vcpu *v, uint32_t msr, int flags)
+void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
 {
-    unsigned long *msr_bit;
-    const struct domain *d = v->domain;
+    unsigned long *msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
+
+    if ( msr_bit == NULL )
+        return;
 
-    msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
-    BUG_ON(msr_bit == NULL);
     msr &= 0x1fff;
 
-    if ( flags & MSR_INTERCEPT_READ )
+    if ( flags & MSR_R )
          __set_bit(msr * 2, msr_bit);
-    else if ( !monitored_msr(d, msr) )
-         __clear_bit(msr * 2, msr_bit);
-
-    if ( flags & MSR_INTERCEPT_WRITE )
+    if ( flags & MSR_W )
         __set_bit(msr * 2 + 1, msr_bit);
-    else if ( !monitored_msr(d, msr) )
+}
+
+void svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
+{
+    unsigned long *msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
+
+    if ( msr_bit == NULL )
+        return;
+
+    if ( monitored_msr(v->domain, msr) )
+        return;
+
+    if ( flags & MSR_R )
+        __clear_bit(msr * 2, msr_bit);
+    if ( flags & MSR_W )
         __clear_bit(msr * 2 + 1, msr_bit);
 }
 
@@ -312,8 +323,10 @@  static void cf_check svm_enable_msr_interception(struct domain *d, uint32_t msr)
 {
     struct vcpu *v;
 
-    for_each_vcpu ( d, v )
-        svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE);
+    for_each_vcpu ( d, v ) {
+        svm_set_msr_intercept(v, msr, MSR_W);
+        svm_clear_msr_intercept(v, msr, MSR_R);
+    }
 }
 
 static void svm_save_dr(struct vcpu *v)
@@ -330,10 +343,10 @@  static void svm_save_dr(struct vcpu *v)
 
     if ( v->domain->arch.cpuid->extd.dbext )
     {
-        svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_RW);
-        svm_intercept_msr(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_RW);
-        svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_RW);
-        svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_RW);
+        svm_set_msr_intercept(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_RW);
+        svm_set_msr_intercept(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_RW);
+        svm_set_msr_intercept(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_RW);
+        svm_set_msr_intercept(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_RW);
 
         rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.msrs->dr_mask[0]);
         rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.msrs->dr_mask[1]);
@@ -361,10 +374,10 @@  static void __restore_debug_registers(struct vmcb_struct *vmcb, struct vcpu *v)
 
     if ( v->domain->arch.cpuid->extd.dbext )
     {
-        svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_NONE);
-        svm_intercept_msr(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_NONE);
-        svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_NONE);
-        svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_NONE);
+        svm_clear_msr_intercept(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_RW);
+        svm_clear_msr_intercept(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_RW);
+        svm_clear_msr_intercept(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_RW);
+        svm_clear_msr_intercept(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_RW);
 
         wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.msrs->dr_mask[0]);
         wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.msrs->dr_mask[1]);
@@ -595,22 +608,31 @@  static void cf_check svm_cpuid_policy_changed(struct vcpu *v)
     vmcb_set_exception_intercepts(vmcb, bitmap);
 
     /* Give access to MSR_SPEC_CTRL if the guest has been told about it. */
-    svm_intercept_msr(v, MSR_SPEC_CTRL,
-                      cp->extd.ibrs ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
+    if ( cp->extd.ibrs )
+        svm_clear_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
+    else
+        svm_set_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
 
     /*
      * Always trap write accesses to VIRT_SPEC_CTRL in order to cache the guest
      * setting and avoid having to perform a rdmsr on vmexit to get the guest
      * setting even if VIRT_SSBD is offered to Xen itself.
      */
-    svm_intercept_msr(v, MSR_VIRT_SPEC_CTRL,
-                      cp->extd.virt_ssbd && cpu_has_virt_ssbd &&
-                      !cpu_has_amd_ssbd ?
-                      MSR_INTERCEPT_WRITE : MSR_INTERCEPT_RW);
+    if ( cp->extd.virt_ssbd && cpu_has_virt_ssbd && !cpu_has_amd_ssbd )
+    {
+        svm_set_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_W);
+        svm_clear_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_R);
+    }
+    else
+    {
+        svm_set_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_RW);
+    }
 
     /* Give access to MSR_PRED_CMD if the guest has been told about it. */
-    svm_intercept_msr(v, MSR_PRED_CMD,
-                      cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
+    if ( cp->extd.ibpb )
+        svm_clear_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_RW);
+    else
+        svm_set_msr_intercept(v, MSR_VIRT_SPEC_CTRL, MSR_RW);
 }
 
 void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h b/xen/arch/x86/include/asm/hvm/hvm.h
index 43d3fc2498..f853e2f3e8 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -261,6 +261,10 @@  extern struct hvm_function_table hvm_funcs;
 extern bool_t hvm_enabled;
 extern s8 hvm_port80_allowed;
 
+#define MSR_R       BIT(0, U)
+#define MSR_W       BIT(1, U)
+#define MSR_RW      (MSR_W | MSR_R)
+
 extern const struct hvm_function_table *start_svm(void);
 extern const struct hvm_function_table *start_vmx(void);
 
diff --git a/xen/arch/x86/include/asm/hvm/svm/vmcb.h b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
index e87728fa81..ed2e55e5cf 100644
--- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
+++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
@@ -585,13 +585,12 @@  void svm_destroy_vmcb(struct vcpu *v);
 
 void setup_vmcb_dump(void);
 
-#define MSR_INTERCEPT_NONE    0
-#define MSR_INTERCEPT_READ    1
-#define MSR_INTERCEPT_WRITE   2
-#define MSR_INTERCEPT_RW      (MSR_INTERCEPT_WRITE | MSR_INTERCEPT_READ)
-void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable);
-#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_NONE)
-#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_RW)
+void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
+void svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
+#define svm_disable_intercept_for_msr(v, msr) \
+    svm_clear_msr_intercept((v), (msr), MSR_RW)
+#define svm_enable_intercept_for_msr(v, msr) \
+    svm_set_intercept_msr((v), (msr), MSR_RW)
 
 /*
  * VMCB accessor functions.