diff mbox series

[v2,3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite

Message ID 20191206193144.33209-3-jmattson@google.com (mailing list archive)
State New, archived
Headers show
Series [v2,1/3] kvm: nVMX: VMWRITE checks VMCS-link pointer before VMCS field | expand

Commit Message

Jim Mattson Dec. 6, 2019, 7:31 p.m. UTC
Apply reverse fir tree declaration order, wrap long lines, reformat a
block comment, delete an extra blank line, and use BIT_ULL(10) instead
of (1u << 10i).

Signed-off-by: Jim Mattson <jmattson@google.com>
Reviewed-by: Peter Shier <pshier@google.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Reviewed-by: Jon Cargille <jcargill@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/vmx/nested.c | 47 +++++++++++++++++++++------------------
 1 file changed, 25 insertions(+), 22 deletions(-)

Comments

Sean Christopherson Dec. 6, 2019, 9:23 p.m. UTC | #1
On Fri, Dec 06, 2019 at 11:31:44AM -0800, Jim Mattson wrote:
> Apply reverse fir tree declaration order, wrap long lines, reformat a
> block comment, delete an extra blank line, and use BIT_ULL(10) instead
> of (1u << 10i).
> 
> Signed-off-by: Jim Mattson <jmattson@google.com>
> Reviewed-by: Peter Shier <pshier@google.com>
> Reviewed-by: Oliver Upton <oupton@google.com>
> Reviewed-by: Jon Cargille <jcargill@google.com>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/kvm/vmx/nested.c | 47 +++++++++++++++++++++------------------
>  1 file changed, 25 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 94ec089d6d1a..aff163192369 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -4751,17 +4751,17 @@ static int handle_vmresume(struct kvm_vcpu *vcpu)
>  
>  static int handle_vmread(struct kvm_vcpu *vcpu)
>  {
> -	unsigned long field;
> -	u64 field_value;
> -	struct vcpu_vmx *vmx = to_vmx(vcpu);
> -	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> -	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> -	int len;
> -	gva_t gva = 0;
>  	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
>  						    : get_vmcs12(vcpu);
> +	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> +	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	struct x86_exception e;
> +	unsigned long field;
> +	u64 field_value;
> +	gva_t gva = 0;
>  	short offset;
> +	int len;
>  
>  	if (!nested_vmx_check_permission(vcpu))
>  		return 1;
> @@ -4776,7 +4776,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>  		return nested_vmx_failInvalid(vcpu);
>  
>  	/* Decode instruction info and find the field to read */
> -	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
> +	field = kvm_register_readl(vcpu,
> +				   (((vmx_instruction_info) >> 28) & 0xf));

I find the current version to be more readable.  Alternatively, rename the
local variable to instr_info and eliminate several of these in one shot.

>  
>  	offset = vmcs_field_to_offset(field);
>  	if (offset < 0)
> @@ -4794,7 +4795,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>  	 * Note that the number of bits actually copied is 32 or 64 depending
>  	 * on the guest's mode (32 or 64 bit), not on the given field's length.
>  	 */
> -	if (vmx_instruction_info & (1u << 10)) {
> +	if (vmx_instruction_info & BIT_ULL(10)) {

BIT_ULL() is overkill and inaccurate in a sense since instr_info is a
32-bit value.

>  		kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
>  			field_value);
>  	} else {
> @@ -4803,7 +4804,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>  				vmx_instruction_info, true, len, &gva))
>  			return 1;
>  		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
> -		if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
> +		if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
> +						len, &e))

I'd prefer to let this poke out.

>  			kvm_inject_page_fault(vcpu, &e);
>  	}
>  
> @@ -4836,24 +4838,25 @@ static bool is_shadow_field_ro(unsigned long field)
>  
>  static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  {
> -	unsigned long field;
> -	int len;
> -	gva_t gva;
> -	struct vcpu_vmx *vmx = to_vmx(vcpu);
> +	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
> +						    : get_vmcs12(vcpu);
>  	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
>  	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
> +	struct x86_exception e;
> +	unsigned long field;
> +	short offset;
> +	gva_t gva;
> +	int len;
>  
> -	/* The value to write might be 32 or 64 bits, depending on L1's long
> +	/*
> +	 * The value to write might be 32 or 64 bits, depending on L1's long
>  	 * mode, and eventually we need to write that into a field of several
>  	 * possible lengths. The code below first zero-extends the value to 64
>  	 * bit (field_value), and then copies only the appropriate number of
>  	 * bits into the vmcs12 field.
>  	 */
>  	u64 field_value = 0;
> -	struct x86_exception e;
> -	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
> -						    : get_vmcs12(vcpu);
> -	short offset;
>  
>  	if (!nested_vmx_check_permission(vcpu))
>  		return 1;
> @@ -4867,7 +4870,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
>  		return nested_vmx_failInvalid(vcpu);
>  
> -	if (vmx_instruction_info & (1u << 10))
> +	if (vmx_instruction_info & BIT_ULL(10))

Same thing here, BIT() is sufficient.

>  		field_value = kvm_register_readl(vcpu,
>  			(((vmx_instruction_info) >> 3) & 0xf));
>  	else {
> @@ -4881,8 +4884,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  		}
>  	}
>  
> -
> -	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
> +	field = kvm_register_readl(vcpu,
> +				   (((vmx_instruction_info) >> 28) & 0xf));
>  
>  	offset = vmcs_field_to_offset(field);
>  	if (offset < 0)
> -- 
> 2.24.0.393.g34dc348eaf-goog
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 94ec089d6d1a..aff163192369 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4751,17 +4751,17 @@  static int handle_vmresume(struct kvm_vcpu *vcpu)
 
 static int handle_vmread(struct kvm_vcpu *vcpu)
 {
-	unsigned long field;
-	u64 field_value;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
-	int len;
-	gva_t gva = 0;
 	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
 						    : get_vmcs12(vcpu);
+	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct x86_exception e;
+	unsigned long field;
+	u64 field_value;
+	gva_t gva = 0;
 	short offset;
+	int len;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
@@ -4776,7 +4776,8 @@  static int handle_vmread(struct kvm_vcpu *vcpu)
 		return nested_vmx_failInvalid(vcpu);
 
 	/* Decode instruction info and find the field to read */
-	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+	field = kvm_register_readl(vcpu,
+				   (((vmx_instruction_info) >> 28) & 0xf));
 
 	offset = vmcs_field_to_offset(field);
 	if (offset < 0)
@@ -4794,7 +4795,7 @@  static int handle_vmread(struct kvm_vcpu *vcpu)
 	 * Note that the number of bits actually copied is 32 or 64 depending
 	 * on the guest's mode (32 or 64 bit), not on the given field's length.
 	 */
-	if (vmx_instruction_info & (1u << 10)) {
+	if (vmx_instruction_info & BIT_ULL(10)) {
 		kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
 			field_value);
 	} else {
@@ -4803,7 +4804,8 @@  static int handle_vmread(struct kvm_vcpu *vcpu)
 				vmx_instruction_info, true, len, &gva))
 			return 1;
 		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
-		if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
+		if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
+						len, &e))
 			kvm_inject_page_fault(vcpu, &e);
 	}
 
@@ -4836,24 +4838,25 @@  static bool is_shadow_field_ro(unsigned long field)
 
 static int handle_vmwrite(struct kvm_vcpu *vcpu)
 {
-	unsigned long field;
-	int len;
-	gva_t gva;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
+						    : get_vmcs12(vcpu);
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct x86_exception e;
+	unsigned long field;
+	short offset;
+	gva_t gva;
+	int len;
 
-	/* The value to write might be 32 or 64 bits, depending on L1's long
+	/*
+	 * The value to write might be 32 or 64 bits, depending on L1's long
 	 * mode, and eventually we need to write that into a field of several
 	 * possible lengths. The code below first zero-extends the value to 64
 	 * bit (field_value), and then copies only the appropriate number of
 	 * bits into the vmcs12 field.
 	 */
 	u64 field_value = 0;
-	struct x86_exception e;
-	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
-						    : get_vmcs12(vcpu);
-	short offset;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
@@ -4867,7 +4870,7 @@  static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
 		return nested_vmx_failInvalid(vcpu);
 
-	if (vmx_instruction_info & (1u << 10))
+	if (vmx_instruction_info & BIT_ULL(10))
 		field_value = kvm_register_readl(vcpu,
 			(((vmx_instruction_info) >> 3) & 0xf));
 	else {
@@ -4881,8 +4884,8 @@  static int handle_vmwrite(struct kvm_vcpu *vcpu)
 		}
 	}
 
-
-	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+	field = kvm_register_readl(vcpu,
+				   (((vmx_instruction_info) >> 28) & 0xf));
 
 	offset = vmcs_field_to_offset(field);
 	if (offset < 0)