diff mbox series

[v3,1/9] KVM: x86: Rename cr4_reserved/rsvd_* variables to be more readable

Message ID 20221209044557.1496580-2-robert.hu@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Linear Address Masking (LAM) KVM Enabling | expand

Commit Message

Robert Hoo Dec. 9, 2022, 4:45 a.m. UTC
kvm_vcpu_arch::cr4_guest_owned_bits and kvm_vcpu_arch::cr4_guest_rsvd_bits
looks confusing. Rename latter to cr4_host_rsvd_bits, because it in fact
decribes the effective host reserved cr4 bits from the vcpu's perspective.

Meanwhile, rename other related variables/macros to be better descriptive:
* CR4_RESERVED_BITS --> CR4_HOST_RESERVED_BITS, which describes host bare
metal CR4 reserved bits.

* cr4_reserved_bits --> cr4_kvm_reserved_bits, which describes
CR4_HOST_RESERVED_BITS + !kvm_cap_has() = kvm level cr4 reserved bits.

* __cr4_reserved_bits() --> __cr4_calc_reserved_bits(), which to calc
effective cr4 reserved bits for kvm or vm level, by corresponding
x_cpu_has() input.

Thus, by these renames, the hierarchical relations of those reserved CR4
bits is more clear.

Just renames, no functional changes intended.

Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
---
 arch/x86/include/asm/kvm_host.h |  4 ++--
 arch/x86/kvm/cpuid.c            |  4 ++--
 arch/x86/kvm/vmx/vmx.c          |  2 +-
 arch/x86/kvm/x86.c              | 12 ++++++------
 arch/x86/kvm/x86.h              |  4 ++--
 5 files changed, 13 insertions(+), 13 deletions(-)

Comments

Binbin Wu Dec. 28, 2022, 3:37 a.m. UTC | #1
On 12/9/2022 12:45 PM, Robert Hoo wrote:
> kvm_vcpu_arch::cr4_guest_owned_bits and kvm_vcpu_arch::cr4_guest_rsvd_bits
> looks confusing. Rename latter to cr4_host_rsvd_bits, because it in fact
> decribes the effective host reserved cr4 bits from the vcpu's perspective.

IMO, the current name cr4_guest_rsvd_bits is OK becuase it shows that 
these bits are reserved bits from the pointview of guest.

Change to *host* is OK, but seems not easier to understand.


>
> Meanwhile, rename other related variables/macros to be better descriptive:
> * CR4_RESERVED_BITS --> CR4_HOST_RESERVED_BITS, which describes host bare
> metal CR4 reserved bits.
>
> * cr4_reserved_bits --> cr4_kvm_reserved_bits, which describes
> CR4_HOST_RESERVED_BITS + !kvm_cap_has() = kvm level cr4 reserved bits.
>
> * __cr4_reserved_bits() --> __cr4_calc_reserved_bits(), which to calc
> effective cr4 reserved bits for kvm or vm level, by corresponding
> x_cpu_has() input.
>
> Thus, by these renames, the hierarchical relations of those reserved CR4
> bits is more clear.
>
> Just renames, no functional changes intended.
>
> Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
> ---
>   arch/x86/include/asm/kvm_host.h |  4 ++--
>   arch/x86/kvm/cpuid.c            |  4 ++--
>   arch/x86/kvm/vmx/vmx.c          |  2 +-
>   arch/x86/kvm/x86.c              | 12 ++++++------
>   arch/x86/kvm/x86.h              |  4 ++--
>   5 files changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index f05ebaa26f0f..3c736e00b6b1 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -114,7 +114,7 @@
>   			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
>   			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
>   
> -#define CR4_RESERVED_BITS                                               \
> +#define CR4_HOST_RESERVED_BITS                                               \
>   	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
>   			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
>   			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
> @@ -671,7 +671,7 @@ struct kvm_vcpu_arch {
>   	unsigned long cr3;
>   	unsigned long cr4;
>   	unsigned long cr4_guest_owned_bits;
> -	unsigned long cr4_guest_rsvd_bits;
> +	unsigned long cr4_host_rsvd_bits;
>   	unsigned long cr8;
>   	u32 host_pkru;
>   	u32 pkru;
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index c92c49a0b35b..01e2b93ef563 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -352,8 +352,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>   	vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
>   
>   	kvm_pmu_refresh(vcpu);
> -	vcpu->arch.cr4_guest_rsvd_bits =
> -	    __cr4_reserved_bits(guest_cpuid_has, vcpu);
> +	vcpu->arch.cr4_host_rsvd_bits =
> +	    __cr4_calc_reserved_bits(guest_cpuid_has, vcpu);
>   
>   	kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries,
>   						    vcpu->arch.cpuid_nent));
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 63247c57c72c..cfa06c7c062e 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -4250,7 +4250,7 @@ void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
>   	struct kvm_vcpu *vcpu = &vmx->vcpu;
>   
>   	vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
> -					  ~vcpu->arch.cr4_guest_rsvd_bits;
> +					  ~vcpu->arch.cr4_host_rsvd_bits;
>   	if (!enable_ept) {
>   		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
>   		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 69227f77b201..eb1f2c20e19e 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -108,7 +108,7 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
>   static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
>   #endif
>   
> -static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
> +static u64 __read_mostly cr4_kvm_reserved_bits = CR4_HOST_RESERVED_BITS;
>   
>   #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
>   
> @@ -1102,10 +1102,10 @@ EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
>   
>   bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
>   {
> -	if (cr4 & cr4_reserved_bits)
> +	if (cr4 & cr4_kvm_reserved_bits)
>   		return false;
>   
> -	if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
> +	if (cr4 & vcpu->arch.cr4_host_rsvd_bits)
>   		return false;
>   
>   	return true;
> @@ -12290,7 +12290,7 @@ int kvm_arch_hardware_setup(void *opaque)
>   		kvm_caps.supported_xss = 0;
>   
>   #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
> -	cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
> +	cr4_kvm_reserved_bits = __cr4_calc_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
>   #undef __kvm_cpu_cap_has
>   
>   	if (kvm_caps.has_tsc_control) {
> @@ -12323,8 +12323,8 @@ int kvm_arch_check_processor_compat(void *opaque)
>   
>   	WARN_ON(!irqs_disabled());
>   
> -	if (__cr4_reserved_bits(cpu_has, c) !=
> -	    __cr4_reserved_bits(cpu_has, &boot_cpu_data))
> +	if (__cr4_calc_reserved_bits(cpu_has, c) !=
> +	    __cr4_calc_reserved_bits(cpu_has, &boot_cpu_data))
>   		return -EIO;
>   
>   	return ops->check_processor_compatibility();
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index 829d3134c1eb..d92e580768e5 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -452,9 +452,9 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
>   #define  KVM_MSR_RET_INVALID	2	/* in-kernel MSR emulation #GP condition */
>   #define  KVM_MSR_RET_FILTERED	3	/* #GP due to userspace MSR filter */
>   
> -#define __cr4_reserved_bits(__cpu_has, __c)             \
> +#define __cr4_calc_reserved_bits(__cpu_has, __c)             \
>   ({                                                      \
> -	u64 __reserved_bits = CR4_RESERVED_BITS;        \
> +	u64 __reserved_bits = CR4_HOST_RESERVED_BITS;        \
>                                                           \
>   	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
>   		__reserved_bits |= X86_CR4_OSXSAVE;     \
Robert Hoo Dec. 29, 2022, 1:42 a.m. UTC | #2
On Wed, 2022-12-28 at 11:37 +0800, Binbin Wu wrote:
> On 12/9/2022 12:45 PM, Robert Hoo wrote:
> > kvm_vcpu_arch::cr4_guest_owned_bits and
> > kvm_vcpu_arch::cr4_guest_rsvd_bits
> > looks confusing. Rename latter to cr4_host_rsvd_bits, because it in
> > fact
> > decribes the effective host reserved cr4 bits from the vcpu's
> > perspective.
> 
> IMO, the current name cr4_guest_rsvd_bits is OK becuase it shows
> that 
> these bits are reserved bits from the pointview of guest.

Actually, it's cr4_guest_owned_bits that from the perspective of guest.

> 
cr4_guest_owned_bits and cr4_guest_rsvd_bits together looks quite
confusing. The first looks like "guest owns these CR4 bits"; the latter
looks like "guest reserved these bits". My first response of the seeing
is: hey, what's difference between guest owns and guest reserves?

Then take a look at their calculations, we'll find that
cr4_guest_owned_bits comes from cr4_guest_rsvd_bits, and coarsely "~"
relationship.

vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
			~vcpu->arch.cr4_guest_rsvd_bits;

Those set bits in cr4_guest_owned_bits means guest owns;
Those set bits in cr4_guest_rsvd_bits means HOST owns. So the ~
calculation is naturally explained.

The more hierarchical relationships among these macros/structure
elements surrounding CR4 virtualization, can be found below.

(P.S. more story: this isn't the first time I dig into these code. I
had got clear on their relationship long ago, but when I come to it
this time, I got confused by the name again. Therefore, I would rather
cook this patch now to avoid next time;-))

> Change to *host* is OK, but seems not easier to understand.
> 
Perhaps "host_reserved" isn't the best name, welcome other suggestions.
But guest_own v.s. guest_rsvd, is really confusing, or even misleading.
> 
> > 
> > Meanwhile, rename other related variables/macros to be better
> > descriptive:
> > * CR4_RESERVED_BITS --> CR4_HOST_RESERVED_BITS, which describes
> > host bare
> > metal CR4 reserved bits.
> > 
> > * cr4_reserved_bits --> cr4_kvm_reserved_bits, which describes
> > CR4_HOST_RESERVED_BITS + !kvm_cap_has() = kvm level cr4 reserved
> > bits.
> > 
> > * __cr4_reserved_bits() --> __cr4_calc_reserved_bits(), which to
> > calc
> > effective cr4 reserved bits for kvm or vm level, by corresponding
> > x_cpu_has() input.
> > 
> > Thus, by these renames, the hierarchical relations of those
> > reserved CR4
> > bits is more clear.
> > 
> > Just renames, no functional changes intended.
> > 
> > Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
> > ---
> >   arch/x86/include/asm/kvm_host.h |  4 ++--
> >   arch/x86/kvm/cpuid.c            |  4 ++--
> >   arch/x86/kvm/vmx/vmx.c          |  2 +-
> >   arch/x86/kvm/x86.c              | 12 ++++++------
> >   arch/x86/kvm/x86.h              |  4 ++--
> >   5 files changed, 13 insertions(+), 13 deletions(-)
> > 
> > diff --git a/arch/x86/include/asm/kvm_host.h
> > b/arch/x86/include/asm/kvm_host.h
> > index f05ebaa26f0f..3c736e00b6b1 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -114,7 +114,7 @@
> >   			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP |
> > X86_CR0_AM \
> >   			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
> >   
> > -#define
> > CR4_RESERVED_BITS                                               \
> > +#define
> > CR4_HOST_RESERVED_BITS                                             
> >   \
> >   	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD |
> > X86_CR4_DE\
> >   			  | X86_CR4_PSE | X86_CR4_PAE |
> > X86_CR4_MCE     \
> >   			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR
> > | X86_CR4_PCIDE \
> > @@ -671,7 +671,7 @@ struct kvm_vcpu_arch {
> >   	unsigned long cr3;
> >   	unsigned long cr4;
> >   	unsigned long cr4_guest_owned_bits;
> > -	unsigned long cr4_guest_rsvd_bits;
> > +	unsigned long cr4_host_rsvd_bits;
> >   	unsigned long cr8;
> >   	u32 host_pkru;
> >   	u32 pkru;
> > diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> > index c92c49a0b35b..01e2b93ef563 100644
> > --- a/arch/x86/kvm/cpuid.c
> > +++ b/arch/x86/kvm/cpuid.c
> > @@ -352,8 +352,8 @@ static void kvm_vcpu_after_set_cpuid(struct
> > kvm_vcpu *vcpu)
> >   	vcpu->arch.reserved_gpa_bits =
> > kvm_vcpu_reserved_gpa_bits_raw(vcpu);
> >   
> >   	kvm_pmu_refresh(vcpu);
> > -	vcpu->arch.cr4_guest_rsvd_bits =
> > -	    __cr4_reserved_bits(guest_cpuid_has, vcpu);
> > +	vcpu->arch.cr4_host_rsvd_bits =
> > +	    __cr4_calc_reserved_bits(guest_cpuid_has, vcpu);
> >   
> >   	kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu-
> > >arch.cpuid_entries,
> >   						    vcpu-
> > >arch.cpuid_nent));
> > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> > index 63247c57c72c..cfa06c7c062e 100644
> > --- a/arch/x86/kvm/vmx/vmx.c
> > +++ b/arch/x86/kvm/vmx/vmx.c
> > @@ -4250,7 +4250,7 @@ void set_cr4_guest_host_mask(struct vcpu_vmx
> > *vmx)
> >   	struct kvm_vcpu *vcpu = &vmx->vcpu;
> >   
> >   	vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
> > -					  ~vcpu-
> > >arch.cr4_guest_rsvd_bits;
> > +					  ~vcpu-
> > >arch.cr4_host_rsvd_bits;
> >   	if (!enable_ept) {
> >   		vcpu->arch.cr4_guest_owned_bits &=
> > ~X86_CR4_TLBFLUSH_BITS;
> >   		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 69227f77b201..eb1f2c20e19e 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -108,7 +108,7 @@ u64 __read_mostly efer_reserved_bits =
> > ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
> >   static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
> >   #endif
> >   
> > -static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
> > +static u64 __read_mostly cr4_kvm_reserved_bits =
> > CR4_HOST_RESERVED_BITS;
> >   
> >   #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
> >   
> > @@ -1102,10 +1102,10 @@ EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
> >   
> >   bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
> >   {
> > -	if (cr4 & cr4_reserved_bits)
> > +	if (cr4 & cr4_kvm_reserved_bits)
> >   		return false;
> >   
> > -	if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
> > +	if (cr4 & vcpu->arch.cr4_host_rsvd_bits)
> >   		return false;
> >   
> >   	return true;
> > @@ -12290,7 +12290,7 @@ int kvm_arch_hardware_setup(void *opaque)
> >   		kvm_caps.supported_xss = 0;
> >   
> >   #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
> > -	cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has,
> > UNUSED_);
> > +	cr4_kvm_reserved_bits =
> > __cr4_calc_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
> >   #undef __kvm_cpu_cap_has
> >   
> >   	if (kvm_caps.has_tsc_control) {
> > @@ -12323,8 +12323,8 @@ int kvm_arch_check_processor_compat(void
> > *opaque)
> >   
> >   	WARN_ON(!irqs_disabled());
> >   
> > -	if (__cr4_reserved_bits(cpu_has, c) !=
> > -	    __cr4_reserved_bits(cpu_has, &boot_cpu_data))
> > +	if (__cr4_calc_reserved_bits(cpu_has, c) !=
> > +	    __cr4_calc_reserved_bits(cpu_has, &boot_cpu_data))
> >   		return -EIO;
> >   
> >   	return ops->check_processor_compatibility();
> > diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> > index 829d3134c1eb..d92e580768e5 100644
> > --- a/arch/x86/kvm/x86.h
> > +++ b/arch/x86/kvm/x86.h
> > @@ -452,9 +452,9 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32
> > index, u32 type);
> >   #define  KVM_MSR_RET_INVALID	2	/* in-kernel MSR emulation
> > #GP condition */
> >   #define  KVM_MSR_RET_FILTERED	3	/* #GP due to userspace MSR
> > filter */
> >   
> > -#define __cr4_reserved_bits(__cpu_has, __c)             \
> > +#define __cr4_calc_reserved_bits(__cpu_has, __c)             \
> >   ({                                                      \
> > -	u64 __reserved_bits = CR4_RESERVED_BITS;        \
> > +	u64 __reserved_bits = CR4_HOST_RESERVED_BITS;        \
> >                                                           \
> >   	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
> >   		__reserved_bits |= X86_CR4_OSXSAVE;     \
Sean Christopherson Jan. 7, 2023, 12:35 a.m. UTC | #3
On Thu, Dec 29, 2022, Robert Hoo wrote:
> On Wed, 2022-12-28 at 11:37 +0800, Binbin Wu wrote:
> > On 12/9/2022 12:45 PM, Robert Hoo wrote:
> > > kvm_vcpu_arch::cr4_guest_owned_bits and
> > > kvm_vcpu_arch::cr4_guest_rsvd_bits
> > > looks confusing. Rename latter to cr4_host_rsvd_bits, because it in
> > > fact decribes the effective host reserved cr4 bits from the vcpu's
> > > perspective.
> > 
> > IMO, the current name cr4_guest_rsvd_bits is OK becuase it shows that these
> > bits are reserved bits from the pointview of guest.
> 
> Actually, it's cr4_guest_owned_bits that from the perspective of guest.

No, cr4_guest_owned_bits is KVM's view of things.  It tracks which bits have
effectively been passed through to the guest and so need to be read out of the
VMCS after running the vCPU.

> cr4_guest_owned_bits and cr4_guest_rsvd_bits together looks quite
> confusing.

I disagree, KVM (and the SDM and the APM) uses "reserved" or "rsvd" all over the
place to indicate reserved bits/values/fields.

> > > * cr4_reserved_bits --> cr4_kvm_reserved_bits, which describes

Hard no.  They aren't just KVM reserved, many of those bits are reserved by
hardware, which is 100% dependent on the host.

> > > CR4_HOST_RESERVED_BITS + !kvm_cap_has() = kvm level cr4 reserved
> > > bits.
> > > 
> > > * __cr4_reserved_bits() --> __cr4_calc_reserved_bits(), which to
> > > calc
> > > effective cr4 reserved bits for kvm or vm level, by corresponding
> > > x_cpu_has() input.
> > > 
> > > Thus, by these renames, the hierarchical relations of those reserved CR4
> > > bits is more clear.

Sorry, but I don't like any of the changes in this patch.  At best, some of the
changes are a wash (neither better nor worse), and in that case the churn, however
minor isn't worth it.
Robert Hoo Jan. 7, 2023, 1:30 p.m. UTC | #4
On Sat, 2023-01-07 at 00:35 +0000, Sean Christopherson wrote:
> On Thu, Dec 29, 2022, Robert Hoo wrote:
> > On Wed, 2022-12-28 at 11:37 +0800, Binbin Wu wrote:
> > > On 12/9/2022 12:45 PM, Robert Hoo wrote:
> > > > kvm_vcpu_arch::cr4_guest_owned_bits and
> > > > kvm_vcpu_arch::cr4_guest_rsvd_bits
> > > > looks confusing. Rename latter to cr4_host_rsvd_bits, because
> > > > it in
> > > > fact decribes the effective host reserved cr4 bits from the
> > > > vcpu's
> > > > perspective.
> > > 
> > > IMO, the current name cr4_guest_rsvd_bits is OK becuase it shows
> > > that these
> > > bits are reserved bits from the pointview of guest.
> > 
> > Actually, it's cr4_guest_owned_bits that from the perspective of
> > guest.
> 
> No, cr4_guest_owned_bits is KVM's view of things.  

That's all right. Perhaps my expression wasn't very accurate. Perhaps I
would have said "cr4_guest_owned_bits stands on guest's points, as it
reads, guest owns these (set) bits". Whereas, "cr4_guest_rsvd_bits"
doesn't literally as the word reads, its set bits doesn't mean "guest
reserved these bits" but the opposite, those set bits are reserved by
host:

set_cr4_guest_host_mask()
{
...
vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
				~vcpu->arch.cr4_guest_rsvd_bits;
// cr4_guest_owned_bit = (~host owned bits) &
KVM_POSSIBLE_CR4_GUEST_BITS (the filter)
// cr4_guest_owned_bit and cr4_guest_rsvd_bits are generally opposite
relationship
...

vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
//the opposite of guest owned bits are effectively written to
CR4_GUEST_HOST_MASK
}

These code are the implementation of SDM 25.6.6 "Guest/Host Masks and
Read Shadows for CR0 and CR4"
"In general, bits set to 1 in a guest/host mask correspond to bits
“owned” by the host."

> It tracks which bits have
> effectively been passed through to the guest and so need to be read
> out of the
> VMCS after running the vCPU.

Yes, as above, ~cr4_guest_owned_bits is effective final guest/host CR4
mask that's written to VMCS.CR4_GUEST_HOST_MASK.
> 
> > cr4_guest_owned_bits and cr4_guest_rsvd_bits together looks quite
> > confusing.
> 
> I disagree, KVM (and the SDM and the APM) uses "reserved" or "rsvd"
> all over the
> place to indicate reserved bits/values/fields.

I wouldn't object the word "reserved". I was objecting that
"cr4_guest_rsvd_bits" contains guest reserved; it actually contains
"host reserved". ;)
> 
> > > > * cr4_reserved_bits --> cr4_kvm_reserved_bits, which describes
> 
> Hard no.  They aren't just KVM reserved, many of those bits are
> reserved by
> hardware, which is 100% dependent on the host.

That's right. KVM stands on top of HW, then Host, doesn't it? ;)
My interpretation is that, also the theme of this patch, those
xxx_cr4_reserved consts/variables are actually layered relationships.
> 
> > > > CR4_HOST_RESERVED_BITS + !kvm_cap_has() = kvm level cr4
> > > > reserved
> > > > bits.
> > > > 
> > > > * __cr4_reserved_bits() --> __cr4_calc_reserved_bits(), which
> > > > to
> > > > calc
> > > > effective cr4 reserved bits for kvm or vm level, by
> > > > corresponding
> > > > x_cpu_has() input.
> > > > 
> > > > Thus, by these renames, the hierarchical relations of those
> > > > reserved CR4
> > > > bits is more clear.
> 
> Sorry, but I don't like any of the changes in this patch.  At best,
> some of the
> changes are a wash (neither better nor worse), and in that case the
> churn, however
> minor isn't worth it.

That's all right. Regretful I cannot convince you. This patch just
demonstrates my interpretation when I come confused by the code, then
dig into and SDM reading. Anyway it's not LAM necessity, I'll abandon
this patch in next version.
Xiaoyao Li Jan. 8, 2023, 2:18 p.m. UTC | #5
On 1/7/2023 9:30 PM, Robert Hoo wrote:
> On Sat, 2023-01-07 at 00:35 +0000, Sean Christopherson wrote:
>> On Thu, Dec 29, 2022, Robert Hoo wrote:
>>> On Wed, 2022-12-28 at 11:37 +0800, Binbin Wu wrote:
>>>> On 12/9/2022 12:45 PM, Robert Hoo wrote:
>>>>> kvm_vcpu_arch::cr4_guest_owned_bits and
>>>>> kvm_vcpu_arch::cr4_guest_rsvd_bits
>>>>> looks confusing. Rename latter to cr4_host_rsvd_bits, because
>>>>> it in
>>>>> fact decribes the effective host reserved cr4 bits from the
>>>>> vcpu's
>>>>> perspective.
>>>>
>>>> IMO, the current name cr4_guest_rsvd_bits is OK becuase it shows
>>>> that these
>>>> bits are reserved bits from the pointview of guest.
>>>
>>> Actually, it's cr4_guest_owned_bits that from the perspective of
>>> guest.
>>
>> No, cr4_guest_owned_bits is KVM's view of things.
> 
> That's all right. Perhaps my expression wasn't very accurate. Perhaps I
> would have said "cr4_guest_owned_bits stands on guest's points, as it
> reads, guest owns these (set) bits". Whereas, "cr4_guest_rsvd_bits"
> doesn't literally as the word reads, its set bits doesn't mean "guest
> reserved these bits" but the opposite, those set bits are reserved by
> host:
>

I think you can interpret guest_rsvd_bits as bits reserved *for* guest 
stead of *by* guest
Robert Hoo Jan. 9, 2023, 3:07 a.m. UTC | #6
On Sun, 2023-01-08 at 22:18 +0800, Xiaoyao Li wrote:
> On 1/7/2023 9:30 PM, Robert Hoo wrote:
> > On Sat, 2023-01-07 at 00:35 +0000, Sean Christopherson wrote:
> > > On Thu, Dec 29, 2022, Robert Hoo wrote:
> > > > On Wed, 2022-12-28 at 11:37 +0800, Binbin Wu wrote:
> > > > > On 12/9/2022 12:45 PM, Robert Hoo wrote:
> > > > > > kvm_vcpu_arch::cr4_guest_owned_bits and
> > > > > > kvm_vcpu_arch::cr4_guest_rsvd_bits
> > > > > > looks confusing. Rename latter to cr4_host_rsvd_bits,
> > > > > > because
> > > > > > it in
> > > > > > fact decribes the effective host reserved cr4 bits from the
> > > > > > vcpu's
> > > > > > perspective.
> > > > > 
> > > > > IMO, the current name cr4_guest_rsvd_bits is OK becuase it
> > > > > shows
> > > > > that these
> > > > > bits are reserved bits from the pointview of guest.
> > > > 
> > > > Actually, it's cr4_guest_owned_bits that from the perspective
> > > > of
> > > > guest.
> > > 
> > > No, cr4_guest_owned_bits is KVM's view of things.
> > 
> > That's all right. Perhaps my expression wasn't very accurate.
> > Perhaps I
> > would have said "cr4_guest_owned_bits stands on guest's points, as
> > it
> > reads, guest owns these (set) bits". Whereas, "cr4_guest_rsvd_bits"
> > doesn't literally as the word reads, its set bits doesn't mean
> > "guest
> > reserved these bits" but the opposite, those set bits are reserved
> > by
> > host:
> > 
> 
> I think you can interpret guest_rsvd_bits as bits reserved *for*
> guest 
> stead of *by* guest
> 
I think you mean reserved-by guest. OK, buy in, as well as Binbin's
interpretation.
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f05ebaa26f0f..3c736e00b6b1 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -114,7 +114,7 @@ 
 			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
 			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
 
-#define CR4_RESERVED_BITS                                               \
+#define CR4_HOST_RESERVED_BITS                                               \
 	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
 			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
 			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
@@ -671,7 +671,7 @@  struct kvm_vcpu_arch {
 	unsigned long cr3;
 	unsigned long cr4;
 	unsigned long cr4_guest_owned_bits;
-	unsigned long cr4_guest_rsvd_bits;
+	unsigned long cr4_host_rsvd_bits;
 	unsigned long cr8;
 	u32 host_pkru;
 	u32 pkru;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index c92c49a0b35b..01e2b93ef563 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -352,8 +352,8 @@  static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 	vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
 
 	kvm_pmu_refresh(vcpu);
-	vcpu->arch.cr4_guest_rsvd_bits =
-	    __cr4_reserved_bits(guest_cpuid_has, vcpu);
+	vcpu->arch.cr4_host_rsvd_bits =
+	    __cr4_calc_reserved_bits(guest_cpuid_has, vcpu);
 
 	kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries,
 						    vcpu->arch.cpuid_nent));
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 63247c57c72c..cfa06c7c062e 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4250,7 +4250,7 @@  void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
 	struct kvm_vcpu *vcpu = &vmx->vcpu;
 
 	vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
-					  ~vcpu->arch.cr4_guest_rsvd_bits;
+					  ~vcpu->arch.cr4_host_rsvd_bits;
 	if (!enable_ept) {
 		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
 		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 69227f77b201..eb1f2c20e19e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -108,7 +108,7 @@  u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
 #endif
 
-static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
+static u64 __read_mostly cr4_kvm_reserved_bits = CR4_HOST_RESERVED_BITS;
 
 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
 
@@ -1102,10 +1102,10 @@  EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
 
 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
-	if (cr4 & cr4_reserved_bits)
+	if (cr4 & cr4_kvm_reserved_bits)
 		return false;
 
-	if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
+	if (cr4 & vcpu->arch.cr4_host_rsvd_bits)
 		return false;
 
 	return true;
@@ -12290,7 +12290,7 @@  int kvm_arch_hardware_setup(void *opaque)
 		kvm_caps.supported_xss = 0;
 
 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
-	cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
+	cr4_kvm_reserved_bits = __cr4_calc_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
 #undef __kvm_cpu_cap_has
 
 	if (kvm_caps.has_tsc_control) {
@@ -12323,8 +12323,8 @@  int kvm_arch_check_processor_compat(void *opaque)
 
 	WARN_ON(!irqs_disabled());
 
-	if (__cr4_reserved_bits(cpu_has, c) !=
-	    __cr4_reserved_bits(cpu_has, &boot_cpu_data))
+	if (__cr4_calc_reserved_bits(cpu_has, c) !=
+	    __cr4_calc_reserved_bits(cpu_has, &boot_cpu_data))
 		return -EIO;
 
 	return ops->check_processor_compatibility();
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 829d3134c1eb..d92e580768e5 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -452,9 +452,9 @@  bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
 #define  KVM_MSR_RET_INVALID	2	/* in-kernel MSR emulation #GP condition */
 #define  KVM_MSR_RET_FILTERED	3	/* #GP due to userspace MSR filter */
 
-#define __cr4_reserved_bits(__cpu_has, __c)             \
+#define __cr4_calc_reserved_bits(__cpu_has, __c)             \
 ({                                                      \
-	u64 __reserved_bits = CR4_RESERVED_BITS;        \
+	u64 __reserved_bits = CR4_HOST_RESERVED_BITS;        \
                                                         \
 	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
 		__reserved_bits |= X86_CR4_OSXSAVE;     \