diff mbox

[v1,1/2] KVM: VMX: cleanup EPTP definitions

Message ID 20170810133512.13442-2-david@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

David Hildenbrand Aug. 10, 2017, 1:35 p.m. UTC
Don't use shifts, tag them correctly as EPTP and use better matching
names (PWL vs. GAW).

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 arch/x86/include/asm/vmx.h | 11 ++++++-----
 arch/x86/kvm/vmx.c         | 25 +++++++++++--------------
 2 files changed, 17 insertions(+), 19 deletions(-)

Comments

Bandan Das Aug. 10, 2017, 7:38 p.m. UTC | #1
David Hildenbrand <david@redhat.com> writes:

> Don't use shifts, tag them correctly as EPTP and use better matching
> names (PWL vs. GAW).

Fair enough. This reminds me, I also don't particularly like the vmx->nested.nested...
accesses that are spread all throughout :)

> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  arch/x86/include/asm/vmx.h | 11 ++++++-----
>  arch/x86/kvm/vmx.c         | 25 +++++++++++--------------
>  2 files changed, 17 insertions(+), 19 deletions(-)
>
> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
> index 5f63a2e..929b3fc 100644
> --- a/arch/x86/include/asm/vmx.h
> +++ b/arch/x86/include/asm/vmx.h
> @@ -468,12 +468,13 @@ enum vmcs_field {
>  #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT      (1ull << 10) /* (42 - 32) */
>  #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT   (1ull << 11) /* (43 - 32) */
>  
> -#define VMX_EPT_DEFAULT_GAW			3
> -#define VMX_EPT_MAX_GAW				0x4
>  #define VMX_EPT_MT_EPTE_SHIFT			3
> -#define VMX_EPT_GAW_EPTP_SHIFT			3
> -#define VMX_EPT_AD_ENABLE_BIT			(1ull << 6)
> -#define VMX_EPT_DEFAULT_MT			0x6ull
> +#define VMX_EPTP_PWL_MASK			0x38ull
> +#define VMX_EPTP_PWL_4				0x38ull
> +#define VMX_EPTP_AD_ENABLE_BIT			(1ull << 6)
> +#define VMX_EPTP_MT_MASK			0x7ull
> +#define VMX_EPTP_MT_WB				0x6ull
> +#define VMX_EPTP_MT_UC				0x0ull
>  #define VMX_EPT_READABLE_MASK			0x1ull
>  #define VMX_EPT_WRITABLE_MASK			0x2ull
>  #define VMX_EPT_EXECUTABLE_MASK			0x4ull
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index c483f99..f6638ed 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -4300,14 +4300,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
>  
>  static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
>  {
> -	u64 eptp;
> +	u64 eptp = VMX_EPTP_MT_WB | VMX_EPTP_PWL_4;
>  
>  	/* TODO write the value reading from MSR */
> -	eptp = VMX_EPT_DEFAULT_MT |
> -		VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
>  	if (enable_ept_ad_bits &&
>  	    (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
> -		eptp |= VMX_EPT_AD_ENABLE_BIT;
> +		eptp |= VMX_EPTP_AD_ENABLE_BIT;
>  	eptp |= (root_hpa & PAGE_MASK);
>  
>  	return eptp;
> @@ -7879,16 +7877,15 @@ static int handle_preemption_timer(struct kvm_vcpu *vcpu)
>  static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> -	u64 mask = address & 0x7;
>  	int maxphyaddr = cpuid_maxphyaddr(vcpu);
>  
>  	/* Check for memory type validity */
> -	switch (mask) {
> -	case 0:
> +	switch (address & VMX_EPTP_MT_MASK) {
> +	case VMX_EPTP_MT_UC:
>  		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_UC_BIT))
>  			return false;
>  		break;
> -	case 6:
> +	case VMX_EPTP_MT_WB:
>  		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_WB_BIT))
>  			return false;
>  		break;
> @@ -7896,8 +7893,8 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
>  		return false;
>  	}
>  
> -	/* Bits 5:3 must be 3 */
> -	if (((address >> VMX_EPT_GAW_EPTP_SHIFT) & 0x7) != VMX_EPT_DEFAULT_GAW)
> +	/* only 4 levels page-walk length are valid */
> +	if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
>  		return false;
>  
>  	/* Reserved bits should not be set */
> @@ -7905,7 +7902,7 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
>  		return false;
>  
>  	/* AD, if set, should be supported */
> -	if ((address & VMX_EPT_AD_ENABLE_BIT)) {
> +	if (address & VMX_EPTP_AD_ENABLE_BIT) {
>  		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPT_AD_BIT))
>  			return false;
>  	}
> @@ -7933,7 +7930,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
>  				     &address, index * 8, 8))
>  		return 1;
>  
> -	accessed_dirty = !!(address & VMX_EPT_AD_ENABLE_BIT);
> +	accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
>  
>  	/*
>  	 * If the (L2) guest does a vmfunc to the currently
> @@ -9499,7 +9496,7 @@ static void __init vmx_check_processor_compat(void *rtn)
>  
>  static int get_ept_level(void)
>  {
> -	return VMX_EPT_DEFAULT_GAW + 1;
> +	return 4;
>  }
>  
>  static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
> @@ -9702,7 +9699,7 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
>  
>  static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
>  {
> -	return nested_ept_get_cr3(vcpu) & VMX_EPT_AD_ENABLE_BIT;
> +	return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
>  }
>  
>  /* Callbacks for nested_ept_init_mmu_context: */
David Hildenbrand Aug. 10, 2017, 7:53 p.m. UTC | #2
On 10.08.2017 21:38, Bandan Das wrote:
> David Hildenbrand <david@redhat.com> writes:
> 
>> Don't use shifts, tag them correctly as EPTP and use better matching
>> names (PWL vs. GAW).
> 
> Fair enough. This reminds me, I also don't particularly like the vmx->nested.nested...
> accesses that are spread all throughout :)

Me too, I got a bunch of patches lying around that abstract VMX features
into something like X86 features.

#define VMX_FEATURE_BASIC_INOUT                (0*32 + 18)

#define VMX_FEATURE_PREEMPTION_TIMER           (1*32 + 6)
#define VMX_FEATURE_POSTED_INTR                (1*32 + 7)

#define VMX_FEATURE_TPR_SHADOW                 (2*32 + 21)
#define VMX_FEATURE_MSR_BITMAP                 (2*32 + 28)
#define VMX_FEATURE_SECONDARY_EXEC_CTRLS       (2*32 + 31)

#define VMX_FEATURE_VIRTUALIZE_APIC_ACCESSES   (3*32 + 0)
...
#define VMX_FEATURE_EXIT_LOAD_IA32_PAT         (4*32 + 19)
...
#define VMX_FEATURE_ENTRY_LOAD_IA32_PAT        (5*32 + 14)
...
#define VMX_FEATURE_EPT_EXECUTE_ONLY           (6*32 + 0)
...
#define VMX_FEATURE_INVVPID                    (7*32 + 0)
...

/* whether our CPU supports a certain VMX feature */
bool vmx_has(unsigned int feature);
  -> replaces most cpu_has_vmx_*

/* whether our nested CPU is allowed to use a certain VMX feature */
bool nested_vmx_has(struct kvm_vcpu *vcpu, unsigned int feature);
  -> replaces most checks against
     vmx->nested.nested_vmx_secondary_ctls_high and friends

/* whether L1 enabled a certain VMX feature for L2 */
bool nested_cpu_has(struct vmcs12 *vmcs12, unsigned int feature);
  -> merges/replaces most checks like nested_cpu_has/nested_cpu_has2 ...


I am not yet happy with the end result. Anybody interested or has any
suggestions?
David Hildenbrand Aug. 10, 2017, 8:58 p.m. UTC | #3
On 10.08.2017 15:35, David Hildenbrand wrote:
> Don't use shifts, tag them correctly as EPTP and use better matching
> names (PWL vs. GAW).
> 
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  arch/x86/include/asm/vmx.h | 11 ++++++-----
>  arch/x86/kvm/vmx.c         | 25 +++++++++++--------------
>  2 files changed, 17 insertions(+), 19 deletions(-)
> 
> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
> index 5f63a2e..929b3fc 100644
> --- a/arch/x86/include/asm/vmx.h
> +++ b/arch/x86/include/asm/vmx.h
> @@ -468,12 +468,13 @@ enum vmcs_field {
>  #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT      (1ull << 10) /* (42 - 32) */
>  #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT   (1ull << 11) /* (43 - 32) */
>  
> -#define VMX_EPT_DEFAULT_GAW			3
> -#define VMX_EPT_MAX_GAW				0x4
>  #define VMX_EPT_MT_EPTE_SHIFT			3
> -#define VMX_EPT_GAW_EPTP_SHIFT			3
> -#define VMX_EPT_AD_ENABLE_BIT			(1ull << 6)
> -#define VMX_EPT_DEFAULT_MT			0x6ull
> +#define VMX_EPTP_PWL_MASK			0x38ull
> +#define VMX_EPTP_PWL_4				0x38ull

stupid typo, this should be 0x18ull (otherwise you'll get a kernel panic
...)
diff mbox

Patch

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 5f63a2e..929b3fc 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -468,12 +468,13 @@  enum vmcs_field {
 #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT      (1ull << 10) /* (42 - 32) */
 #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT   (1ull << 11) /* (43 - 32) */
 
-#define VMX_EPT_DEFAULT_GAW			3
-#define VMX_EPT_MAX_GAW				0x4
 #define VMX_EPT_MT_EPTE_SHIFT			3
-#define VMX_EPT_GAW_EPTP_SHIFT			3
-#define VMX_EPT_AD_ENABLE_BIT			(1ull << 6)
-#define VMX_EPT_DEFAULT_MT			0x6ull
+#define VMX_EPTP_PWL_MASK			0x38ull
+#define VMX_EPTP_PWL_4				0x38ull
+#define VMX_EPTP_AD_ENABLE_BIT			(1ull << 6)
+#define VMX_EPTP_MT_MASK			0x7ull
+#define VMX_EPTP_MT_WB				0x6ull
+#define VMX_EPTP_MT_UC				0x0ull
 #define VMX_EPT_READABLE_MASK			0x1ull
 #define VMX_EPT_WRITABLE_MASK			0x2ull
 #define VMX_EPT_EXECUTABLE_MASK			0x4ull
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c483f99..f6638ed 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4300,14 +4300,12 @@  static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
 {
-	u64 eptp;
+	u64 eptp = VMX_EPTP_MT_WB | VMX_EPTP_PWL_4;
 
 	/* TODO write the value reading from MSR */
-	eptp = VMX_EPT_DEFAULT_MT |
-		VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
 	if (enable_ept_ad_bits &&
 	    (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
-		eptp |= VMX_EPT_AD_ENABLE_BIT;
+		eptp |= VMX_EPTP_AD_ENABLE_BIT;
 	eptp |= (root_hpa & PAGE_MASK);
 
 	return eptp;
@@ -7879,16 +7877,15 @@  static int handle_preemption_timer(struct kvm_vcpu *vcpu)
 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	u64 mask = address & 0x7;
 	int maxphyaddr = cpuid_maxphyaddr(vcpu);
 
 	/* Check for memory type validity */
-	switch (mask) {
-	case 0:
+	switch (address & VMX_EPTP_MT_MASK) {
+	case VMX_EPTP_MT_UC:
 		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_UC_BIT))
 			return false;
 		break;
-	case 6:
+	case VMX_EPTP_MT_WB:
 		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_WB_BIT))
 			return false;
 		break;
@@ -7896,8 +7893,8 @@  static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
 		return false;
 	}
 
-	/* Bits 5:3 must be 3 */
-	if (((address >> VMX_EPT_GAW_EPTP_SHIFT) & 0x7) != VMX_EPT_DEFAULT_GAW)
+	/* only 4 levels page-walk length are valid */
+	if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
 		return false;
 
 	/* Reserved bits should not be set */
@@ -7905,7 +7902,7 @@  static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
 		return false;
 
 	/* AD, if set, should be supported */
-	if ((address & VMX_EPT_AD_ENABLE_BIT)) {
+	if (address & VMX_EPTP_AD_ENABLE_BIT) {
 		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPT_AD_BIT))
 			return false;
 	}
@@ -7933,7 +7930,7 @@  static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
 				     &address, index * 8, 8))
 		return 1;
 
-	accessed_dirty = !!(address & VMX_EPT_AD_ENABLE_BIT);
+	accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
 
 	/*
 	 * If the (L2) guest does a vmfunc to the currently
@@ -9499,7 +9496,7 @@  static void __init vmx_check_processor_compat(void *rtn)
 
 static int get_ept_level(void)
 {
-	return VMX_EPT_DEFAULT_GAW + 1;
+	return 4;
 }
 
 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
@@ -9702,7 +9699,7 @@  static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
 
 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
 {
-	return nested_ept_get_cr3(vcpu) & VMX_EPT_AD_ENABLE_BIT;
+	return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
 }
 
 /* Callbacks for nested_ept_init_mmu_context: */