diff mbox series

[v7,09/26] KVM: x86: Rename kvm_{g,s}et_msr() to menifest emulation operations

Message ID 20231124055330.138870-10-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Enable CET Virtualization | expand

Commit Message

Yang, Weijiang Nov. 24, 2023, 5:53 a.m. UTC
Rename kvm_{g,s}et_msr() to kvm_emulate_msr_{read,write}() to make it
more obvious that KVM uses these helpers to emulate guest behaviors,
i.e., host_initiated == false in these helpers.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/include/asm/kvm_host.h |  4 ++--
 arch/x86/kvm/smm.c              |  4 ++--
 arch/x86/kvm/vmx/nested.c       | 13 +++++++------
 arch/x86/kvm/x86.c              | 10 +++++-----
 4 files changed, 16 insertions(+), 15 deletions(-)

Comments

Maxim Levitsky Nov. 30, 2023, 5:36 p.m. UTC | #1
On Fri, 2023-11-24 at 00:53 -0500, Yang Weijiang wrote:
> Rename kvm_{g,s}et_msr() to kvm_emulate_msr_{read,write}() to make it
> more obvious that KVM uses these helpers to emulate guest behaviors,
> i.e., host_initiated == false in these helpers.
> 
> Suggested-by: Sean Christopherson <seanjc@google.com>
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> ---
>  arch/x86/include/asm/kvm_host.h |  4 ++--
>  arch/x86/kvm/smm.c              |  4 ++--
>  arch/x86/kvm/vmx/nested.c       | 13 +++++++------
>  arch/x86/kvm/x86.c              | 10 +++++-----
>  4 files changed, 16 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index d7036982332e..5cfa18aaf33f 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1967,8 +1967,8 @@ void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
>  void kvm_enable_efer_bits(u64);
>  bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
>  int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
> -int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
> -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
> +int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
> +int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
>  int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
>  int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
>  int kvm_emulate_as_nop(struct kvm_vcpu *vcpu);
> diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
> index dc3d95fdca7d..45c855389ea7 100644
> --- a/arch/x86/kvm/smm.c
> +++ b/arch/x86/kvm/smm.c
> @@ -535,7 +535,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
>  
>  	vcpu->arch.smbase =         smstate->smbase;
>  
> -	if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
> +	if (kvm_emulate_msr_write(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
>  		return X86EMUL_UNHANDLEABLE;
>  
>  	rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR);
> @@ -626,7 +626,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
>  
>  		/* And finally go back to 32-bit mode.  */
>  		efer = 0;
> -		kvm_set_msr(vcpu, MSR_EFER, efer);
> +		kvm_emulate_msr_write(vcpu, MSR_EFER, efer);
>  	}
>  #endif
>  
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index c5ec0ef51ff7..2034337681f9 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -927,7 +927,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
>  				__func__, i, e.index, e.reserved);
>  			goto fail;
>  		}
> -		if (kvm_set_msr(vcpu, e.index, e.value)) {
> +		if (kvm_emulate_msr_write(vcpu, e.index, e.value)) {
>  			pr_debug_ratelimited(
>  				"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
>  				__func__, i, e.index, e.value);
> @@ -963,7 +963,7 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
>  		}
>  	}
>  
> -	if (kvm_get_msr(vcpu, msr_index, data)) {
> +	if (kvm_emulate_msr_read(vcpu, msr_index, data)) {
>  		pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
>  			msr_index);
>  		return false;
> @@ -2649,7 +2649,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
>  
>  	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
>  	    kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
> -	    WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
> +	    WARN_ON_ONCE(kvm_emulate_msr_write(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
>  				     vmcs12->guest_ia32_perf_global_ctrl))) {
>  		*entry_failure_code = ENTRY_FAIL_DEFAULT;
>  		return -EINVAL;
> @@ -4524,8 +4524,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
>  	}
>  	if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
>  	    kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
> -		WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
> -					 vmcs12->host_ia32_perf_global_ctrl));
> +		WARN_ON_ONCE(kvm_emulate_msr_write(vcpu,
> +					MSR_CORE_PERF_GLOBAL_CTRL,
> +					vmcs12->host_ia32_perf_global_ctrl));
>  
>  	/* Set L1 segment info according to Intel SDM
>  	    27.5.2 Loading Host Segment and Descriptor-Table Registers */
> @@ -4700,7 +4701,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
>  				goto vmabort;
>  			}
>  
> -			if (kvm_set_msr(vcpu, h.index, h.value)) {
> +			if (kvm_emulate_msr_write(vcpu, h.index, h.value)) {
>  				pr_debug_ratelimited(
>  					"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
>  					__func__, j, h.index, h.value);
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 2c924075f6f1..b9c2c0cd4cf5 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1973,17 +1973,17 @@ static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
>  	return kvm_set_msr_ignored_check(vcpu, index, data, false);
>  }
>  
> -int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
> +int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
>  {
>  	return kvm_get_msr_ignored_check(vcpu, index, data, false);
>  }
> -EXPORT_SYMBOL_GPL(kvm_get_msr);
> +EXPORT_SYMBOL_GPL(kvm_emulate_msr_read);
>  
> -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
> +int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
>  {
>  	return kvm_set_msr_ignored_check(vcpu, index, data, false);
>  }
> -EXPORT_SYMBOL_GPL(kvm_set_msr);
> +EXPORT_SYMBOL_GPL(kvm_emulate_msr_write);
>  
>  static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
>  {
> @@ -8329,7 +8329,7 @@ static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
>  static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
>  			    u32 msr_index, u64 *pdata)
>  {
> -	return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
> +	return kvm_emulate_msr_read(emul_to_vcpu(ctxt), msr_index, pdata);
>  }
>  
>  static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,

Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>

Best regards,
	Maxim Levitsky
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d7036982332e..5cfa18aaf33f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1967,8 +1967,8 @@  void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
 void kvm_enable_efer_bits(u64);
 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
+int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
+int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index dc3d95fdca7d..45c855389ea7 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -535,7 +535,7 @@  static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
 
 	vcpu->arch.smbase =         smstate->smbase;
 
-	if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
+	if (kvm_emulate_msr_write(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
 		return X86EMUL_UNHANDLEABLE;
 
 	rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR);
@@ -626,7 +626,7 @@  int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
 
 		/* And finally go back to 32-bit mode.  */
 		efer = 0;
-		kvm_set_msr(vcpu, MSR_EFER, efer);
+		kvm_emulate_msr_write(vcpu, MSR_EFER, efer);
 	}
 #endif
 
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index c5ec0ef51ff7..2034337681f9 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -927,7 +927,7 @@  static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
 				__func__, i, e.index, e.reserved);
 			goto fail;
 		}
-		if (kvm_set_msr(vcpu, e.index, e.value)) {
+		if (kvm_emulate_msr_write(vcpu, e.index, e.value)) {
 			pr_debug_ratelimited(
 				"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
 				__func__, i, e.index, e.value);
@@ -963,7 +963,7 @@  static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
 		}
 	}
 
-	if (kvm_get_msr(vcpu, msr_index, data)) {
+	if (kvm_emulate_msr_read(vcpu, msr_index, data)) {
 		pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
 			msr_index);
 		return false;
@@ -2649,7 +2649,7 @@  static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 
 	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
 	    kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
-	    WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
+	    WARN_ON_ONCE(kvm_emulate_msr_write(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
 				     vmcs12->guest_ia32_perf_global_ctrl))) {
 		*entry_failure_code = ENTRY_FAIL_DEFAULT;
 		return -EINVAL;
@@ -4524,8 +4524,9 @@  static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 	}
 	if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
 	    kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
-		WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
-					 vmcs12->host_ia32_perf_global_ctrl));
+		WARN_ON_ONCE(kvm_emulate_msr_write(vcpu,
+					MSR_CORE_PERF_GLOBAL_CTRL,
+					vmcs12->host_ia32_perf_global_ctrl));
 
 	/* Set L1 segment info according to Intel SDM
 	    27.5.2 Loading Host Segment and Descriptor-Table Registers */
@@ -4700,7 +4701,7 @@  static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
 				goto vmabort;
 			}
 
-			if (kvm_set_msr(vcpu, h.index, h.value)) {
+			if (kvm_emulate_msr_write(vcpu, h.index, h.value)) {
 				pr_debug_ratelimited(
 					"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
 					__func__, j, h.index, h.value);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2c924075f6f1..b9c2c0cd4cf5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1973,17 +1973,17 @@  static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
 	return kvm_set_msr_ignored_check(vcpu, index, data, false);
 }
 
-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
 {
 	return kvm_get_msr_ignored_check(vcpu, index, data, false);
 }
-EXPORT_SYMBOL_GPL(kvm_get_msr);
+EXPORT_SYMBOL_GPL(kvm_emulate_msr_read);
 
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
+int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
 {
 	return kvm_set_msr_ignored_check(vcpu, index, data, false);
 }
-EXPORT_SYMBOL_GPL(kvm_set_msr);
+EXPORT_SYMBOL_GPL(kvm_emulate_msr_write);
 
 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
 {
@@ -8329,7 +8329,7 @@  static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
 			    u32 msr_index, u64 *pdata)
 {
-	return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
+	return kvm_emulate_msr_read(emul_to_vcpu(ctxt), msr_index, pdata);
 }
 
 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,