diff mbox series

[RESEND,v10,02/10] vmx: spp: Add control flags for Sub-Page Protection(SPP)

Message ID 20200102061319.10077-3-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Enable Sub-Page Write Protection Support | expand

Commit Message

Yang, Weijiang Jan. 2, 2020, 6:13 a.m. UTC
Check SPP capability in MSR_IA32_VMX_PROCBASED_CTLS2, its 23-bit
indicates SPP capability. Enable SPP feature bit in CPU capabilities
bitmap if it's supported.

Co-developed-by: He Chen <he.chen@linux.intel.com>
Signed-off-by: He Chen <he.chen@linux.intel.com>
Co-developed-by: Zhang Yi <yi.z.zhang@linux.intel.com>
Signed-off-by: Zhang Yi <yi.z.zhang@linux.intel.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/include/asm/vmx.h      | 1 +
 arch/x86/kvm/mmu.h              | 2 ++
 arch/x86/kvm/vmx/capabilities.h | 5 +++++
 arch/x86/kvm/vmx/vmx.c          | 9 +++++++++
 4 files changed, 17 insertions(+)

Comments

Sean Christopherson Jan. 10, 2020, 4:58 p.m. UTC | #1
On Thu, Jan 02, 2020 at 02:13:11PM +0800, Yang Weijiang wrote:
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index e3394c839dea..5713e8a6224c 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -60,6 +60,7 @@
>  #include "vmcs12.h"
>  #include "vmx.h"
>  #include "x86.h"
> +#include "../mmu/spp.h"

The ".." should be unnecessary, e.g. x86.h is obviously a level up.

>  MODULE_AUTHOR("Qumranet");
>  MODULE_LICENSE("GPL");
> @@ -111,6 +112,7 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
>  
>  static bool __read_mostly dump_invalid_vmcs = 0;
>  module_param(dump_invalid_vmcs, bool, 0644);
> +static bool __read_mostly spp_supported = 0;

s/spp_supported/enable_spp to be consistent with all the other booleans.

Is there a reason this isn't exposed as a module param?

And if this is to be on by default, then the flag itself should be
initialized to '1' so that it's clear to readers that the feature is
enabled by default (if it's supported).  Looking at only this code, I would
think that SPP is forced off and can't be enabled.

That being said, turning on the enable_spp control flag should be the last
patch in the series, i.e. it shouldn't be turned on until all the
underlying support code is in place.  So, I would keep this as is, but
invert the code in hardware_setup() below.  That way the flag exists and
is checked, but can't be turned on without modifying the code.  Then when
all is said and done, you can add a patch to introduce the module param
and turn on the flag by default (if that's indeed what we want).

>  #define MSR_BITMAP_MODE_X2APIC		1
>  #define MSR_BITMAP_MODE_X2APIC_APICV	2
> @@ -2391,6 +2393,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
>  			SECONDARY_EXEC_RDSEED_EXITING |
>  			SECONDARY_EXEC_RDRAND_EXITING |
>  			SECONDARY_EXEC_ENABLE_PML |
> +			SECONDARY_EXEC_ENABLE_SPP |
>  			SECONDARY_EXEC_TSC_SCALING |
>  			SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
>  			SECONDARY_EXEC_PT_USE_GPA |
> @@ -4039,6 +4042,9 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
>  	if (!enable_pml)
>  		exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
>  
> +	if (!spp_supported)
> +		exec_control &= ~SECONDARY_EXEC_ENABLE_SPP;
> +
>  	if (vmx_xsaves_supported()) {
>  		/* Exposing XSAVES only when XSAVE is exposed */
>  		bool xsaves_enabled =
> @@ -7630,6 +7636,9 @@ static __init int hardware_setup(void)
>  	if (!cpu_has_vmx_flexpriority())
>  		flexpriority_enabled = 0;
>  
> +	if (cpu_has_vmx_ept_spp() && enable_ept)
> +		spp_supported = 1;

As above, invert this to disable spp when it's not supported, or when EPT
is disabled (or not supported).

> +
>  	if (!cpu_has_virtual_nmis())
>  		enable_vnmi = 0;
>  
> -- 
> 2.17.2
>
Yang, Weijiang Jan. 13, 2020, 5:44 a.m. UTC | #2
On Fri, Jan 10, 2020 at 08:58:59AM -0800, Sean Christopherson wrote:
> On Thu, Jan 02, 2020 at 02:13:11PM +0800, Yang Weijiang wrote:
> > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> > index e3394c839dea..5713e8a6224c 100644
> > --- a/arch/x86/kvm/vmx/vmx.c
> > +++ b/arch/x86/kvm/vmx/vmx.c
> > @@ -60,6 +60,7 @@
> >  #include "vmcs12.h"
> >  #include "vmx.h"
> >  #include "x86.h"
> > +#include "../mmu/spp.h"
> 
> The ".." should be unnecessary, e.g. x86.h is obviously a level up.
>
Sean, thanks a lot for the feedback! Will change this.
> >  MODULE_AUTHOR("Qumranet");
> >  MODULE_LICENSE("GPL");
> > @@ -111,6 +112,7 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
> >  
> >  static bool __read_mostly dump_invalid_vmcs = 0;
> >  module_param(dump_invalid_vmcs, bool, 0644);
> > +static bool __read_mostly spp_supported = 0;
> 
> s/spp_supported/enable_spp to be consistent with all the other booleans.
> 
> Is there a reason this isn't exposed as a module param?
> 
Yes, in original versions, SPP is enbled by a module param, so called
"static enable", considering the SPP bitmap pre-allocated is a bit
large, the from v3, it's changed to "dynamic enable", i.e., user
application need to enable SPP via init_spp IOCTL(later changed to via
ENABLE_CAP) to remove the pre-allocation, so the flag now is used to
cross-check SPP status between functions. Will change the name.

> And if this is to be on by default, then the flag itself should be
> initialized to '1' so that it's clear to readers that the feature is
> enabled by default (if it's supported).  Looking at only this code, I would
> think that SPP is forced off and can't be enabled.
> 
> That being said, turning on the enable_spp control flag should be the last
> patch in the series, i.e. it shouldn't be turned on until all the
> underlying support code is in place.  So, I would keep this as is, but
> invert the code in hardware_setup() below.  That way the flag exists and
> is checked, but can't be turned on without modifying the code.  Then when
> all is said and done, you can add a patch to introduce the module param
> and turn on the flag by default (if that's indeed what we want).
> 
You're right, I'll re-order the patch to enable SPP bit in the last
patch, thanks!

> >  #define MSR_BITMAP_MODE_X2APIC		1
> >  #define MSR_BITMAP_MODE_X2APIC_APICV	2
> > @@ -2391,6 +2393,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
> >  			SECONDARY_EXEC_RDSEED_EXITING |
> >  			SECONDARY_EXEC_RDRAND_EXITING |
> >  			SECONDARY_EXEC_ENABLE_PML |
> > +			SECONDARY_EXEC_ENABLE_SPP |
> >  			SECONDARY_EXEC_TSC_SCALING |
> >  			SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
> >  			SECONDARY_EXEC_PT_USE_GPA |
> > @@ -4039,6 +4042,9 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
> >  	if (!enable_pml)
> >  		exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
> >  
> > +	if (!spp_supported)
> > +		exec_control &= ~SECONDARY_EXEC_ENABLE_SPP;
> > +
> >  	if (vmx_xsaves_supported()) {
> >  		/* Exposing XSAVES only when XSAVE is exposed */
> >  		bool xsaves_enabled =
> > @@ -7630,6 +7636,9 @@ static __init int hardware_setup(void)
> >  	if (!cpu_has_vmx_flexpriority())
> >  		flexpriority_enabled = 0;
> >  
> > +	if (cpu_has_vmx_ept_spp() && enable_ept)
> > +		spp_supported = 1;
> 
> As above, invert this to disable spp when it's not supported, or when EPT
> is disabled (or not supported).
>
Sure,thank you!
> > +
> >  	if (!cpu_has_virtual_nmis())
> >  		enable_vnmi = 0;
> >  
> > -- 
> > 2.17.2
> >
diff mbox series

Patch

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 1835767aa335..de79a4de0723 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -68,6 +68,7 @@ 
 #define SECONDARY_EXEC_XSAVES			0x00100000
 #define SECONDARY_EXEC_PT_USE_GPA		0x01000000
 #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC	0x00400000
+#define SECONDARY_EXEC_ENABLE_SPP		0x00800000
 #define SECONDARY_EXEC_TSC_SCALING              0x02000000
 #define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE	0x04000000
 
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index d55674f44a18..5b2807222465 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -26,6 +26,8 @@ 
 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
 #define PT_PAT_MASK (1ULL << 7)
 #define PT_GLOBAL_MASK (1ULL << 8)
+#define PT_SPP_SHIFT 61
+#define PT_SPP_MASK (1ULL << PT_SPP_SHIFT)
 #define PT64_NX_SHIFT 63
 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
 
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 7aa69716d516..3d79fd116687 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -241,6 +241,11 @@  static inline bool cpu_has_vmx_pml(void)
 	return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
 }
 
+static inline bool cpu_has_vmx_ept_spp(void)
+{
+	return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_SPP;
+}
+
 static inline bool vmx_xsaves_supported(void)
 {
 	return vmcs_config.cpu_based_2nd_exec_ctrl &
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index e3394c839dea..5713e8a6224c 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -60,6 +60,7 @@ 
 #include "vmcs12.h"
 #include "vmx.h"
 #include "x86.h"
+#include "../mmu/spp.h"
 
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
@@ -111,6 +112,7 @@  module_param_named(pml, enable_pml, bool, S_IRUGO);
 
 static bool __read_mostly dump_invalid_vmcs = 0;
 module_param(dump_invalid_vmcs, bool, 0644);
+static bool __read_mostly spp_supported = 0;
 
 #define MSR_BITMAP_MODE_X2APIC		1
 #define MSR_BITMAP_MODE_X2APIC_APICV	2
@@ -2391,6 +2393,7 @@  static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
 			SECONDARY_EXEC_RDSEED_EXITING |
 			SECONDARY_EXEC_RDRAND_EXITING |
 			SECONDARY_EXEC_ENABLE_PML |
+			SECONDARY_EXEC_ENABLE_SPP |
 			SECONDARY_EXEC_TSC_SCALING |
 			SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
 			SECONDARY_EXEC_PT_USE_GPA |
@@ -4039,6 +4042,9 @@  static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
 	if (!enable_pml)
 		exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
 
+	if (!spp_supported)
+		exec_control &= ~SECONDARY_EXEC_ENABLE_SPP;
+
 	if (vmx_xsaves_supported()) {
 		/* Exposing XSAVES only when XSAVE is exposed */
 		bool xsaves_enabled =
@@ -7630,6 +7636,9 @@  static __init int hardware_setup(void)
 	if (!cpu_has_vmx_flexpriority())
 		flexpriority_enabled = 0;
 
+	if (cpu_has_vmx_ept_spp() && enable_ept)
+		spp_supported = 1;
+
 	if (!cpu_has_virtual_nmis())
 		enable_vnmi = 0;