diff mbox

[nVMX,1/2] x86: Enforce NMI controls on vmentry of L2 guests

Message ID 20171223001358.19629-2-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show

Commit Message

Krish Sadhukhan Dec. 23, 2017, 12:13 a.m. UTC
According to Intel SDM 26.2.1.1, the following rules should be enforced
on vmentry:

 *  If the "NMI exiting" VM-execution control is 0, "Virtual NMIs"
    VM-execution control must be 0.
 *  If the “virtual NMIs” VM-execution control is 0, the “NMI-window
    exiting” VM-execution control must be 0.

This patch enforces these rules when entering an L2 guest.

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Reviewed-by: Liran Alon <liran.alon@oracle.com>
---
 arch/x86/kvm/vmx.c | 29 +++++++++++++++++++++++++++--
 1 file changed, 27 insertions(+), 2 deletions(-)

Comments

Jim Mattson Jan. 9, 2018, 9:49 p.m. UTC | #1
Reviewed-by: Jim Mattson <jmattson@google.com>

On Fri, Dec 22, 2017 at 4:13 PM, Krish Sadhukhan
<krish.sadhukhan@oracle.com> wrote:
> According to Intel SDM 26.2.1.1, the following rules should be enforced
> on vmentry:
>
>  *  If the "NMI exiting" VM-execution control is 0, "Virtual NMIs"
>     VM-execution control must be 0.
>  *  If the “virtual NMIs” VM-execution control is 0, the “NMI-window
>     exiting” VM-execution control must be 0.
>
> This patch enforces these rules when entering an L2 guest.
>
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> Reviewed-by: Liran Alon <liran.alon@oracle.com>
> ---
>  arch/x86/kvm/vmx.c | 29 +++++++++++++++++++++++++++--
>  1 file changed, 27 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 8eba631..24b88db 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -1366,6 +1366,16 @@ static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
>                 PIN_BASED_VMX_PREEMPTION_TIMER;
>  }
>
> +static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
> +{
> +       return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
> +}
> +
> +static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
> +{
> +       return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
> +}
> +
>  static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
>  {
>         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
> @@ -5667,8 +5677,7 @@ static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
>
>  static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
>  {
> -       return get_vmcs12(vcpu)->pin_based_vm_exec_control &
> -               PIN_BASED_NMI_EXITING;
> +       return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
>  }
>
>  static void enable_irq_window(struct kvm_vcpu *vcpu)
> @@ -10752,6 +10761,19 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
>         return 0;
>  }
>
> +static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
> +{
> +       if (!nested_cpu_has_nmi_exiting(vmcs12) &&
> +           nested_cpu_has_virtual_nmis(vmcs12))
> +               return -EINVAL;
> +
> +       if (!nested_cpu_has_virtual_nmis(vmcs12) &&
> +           nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
> +               return -EINVAL;
> +
> +       return 0;
> +}
> +
>  static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>  {
>         struct vcpu_vmx *vmx = to_vmx(vcpu);
> @@ -10796,6 +10818,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>                                 vmx->nested.nested_vmx_entry_ctls_high))
>                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
>
> +       if (nested_vmx_check_nmi_controls(vmcs12))
> +               return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
> +
>         if (nested_cpu_has_vmfunc(vmcs12)) {
>                 if (vmcs12->vm_function_control &
>                     ~vmx->nested.nested_vmx_vmfunc_controls)
> --
> 2.9.5
>
Paolo Bonzini July 15, 2018, 4:14 p.m. UTC | #2
On 23/12/2017 01:13, Krish Sadhukhan wrote:
> According to Intel SDM 26.2.1.1, the following rules should be enforced
> on vmentry:
> 
>  *  If the "NMI exiting" VM-execution control is 0, "Virtual NMIs"
>     VM-execution control must be 0.
>  *  If the “virtual NMIs” VM-execution control is 0, the “NMI-window
>     exiting” VM-execution control must be 0.
> 
> This patch enforces these rules when entering an L2 guest.
> 
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> Reviewed-by: Liran Alon <liran.alon@oracle.com>
> ---
>  arch/x86/kvm/vmx.c | 29 +++++++++++++++++++++++++++--
>  1 file changed, 27 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 8eba631..24b88db 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -1366,6 +1366,16 @@ static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
>  		PIN_BASED_VMX_PREEMPTION_TIMER;
>  }
>  
> +static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
> +{
> +	return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
> +}
> +
> +static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
> +{
> +	return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
> +}
> +
>  static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
>  {
>  	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
> @@ -5667,8 +5677,7 @@ static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
>  
>  static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
>  {
> -	return get_vmcs12(vcpu)->pin_based_vm_exec_control &
> -		PIN_BASED_NMI_EXITING;
> +	return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
>  }
>  
>  static void enable_irq_window(struct kvm_vcpu *vcpu)
> @@ -10752,6 +10761,19 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
>  	return 0;
>  }
>  
> +static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
> +{
> +	if (!nested_cpu_has_nmi_exiting(vmcs12) &&
> +	    nested_cpu_has_virtual_nmis(vmcs12))
> +		return -EINVAL;
> +
> +	if (!nested_cpu_has_virtual_nmis(vmcs12) &&
> +	    nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
>  static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> @@ -10796,6 +10818,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>  				vmx->nested.nested_vmx_entry_ctls_high))
>  		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
>  
> +	if (nested_vmx_check_nmi_controls(vmcs12))
> +		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
> +
>  	if (nested_cpu_has_vmfunc(vmcs12)) {
>  		if (vmcs12->vm_function_control &
>  		    ~vmx->nested.nested_vmx_vmfunc_controls)
> 

Queued (also in the "better late than never" department), thanks.

Paolo
diff mbox

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8eba631..24b88db 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1366,6 +1366,16 @@  static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
 		PIN_BASED_VMX_PREEMPTION_TIMER;
 }
 
+static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
+{
+	return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
+}
+
+static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
+{
+	return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
+}
+
 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
 {
 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
@@ -5667,8 +5677,7 @@  static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
 
 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
 {
-	return get_vmcs12(vcpu)->pin_based_vm_exec_control &
-		PIN_BASED_NMI_EXITING;
+	return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
 }
 
 static void enable_irq_window(struct kvm_vcpu *vcpu)
@@ -10752,6 +10761,19 @@  static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 	return 0;
 }
 
+static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
+{
+	if (!nested_cpu_has_nmi_exiting(vmcs12) &&
+	    nested_cpu_has_virtual_nmis(vmcs12))
+		return -EINVAL;
+
+	if (!nested_cpu_has_virtual_nmis(vmcs12) &&
+	    nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
+		return -EINVAL;
+
+	return 0;
+}
+
 static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -10796,6 +10818,9 @@  static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 				vmx->nested.nested_vmx_entry_ctls_high))
 		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
+	if (nested_vmx_check_nmi_controls(vmcs12))
+		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
 	if (nested_cpu_has_vmfunc(vmcs12)) {
 		if (vmcs12->vm_function_control &
 		    ~vmx->nested.nested_vmx_vmfunc_controls)