diff mbox series

[1/2] KVM: VMX: Keep registers read/write consistent with definition

Message ID 20210422093436.78683-2-yang.zhong@intel.com (mailing list archive)
State New, archived
Headers show
Series Cleanup the registers read/write access | expand

Commit Message

Yang Zhong April 22, 2021, 9:34 a.m. UTC
The kvm_cache_regs.h file has defined inline functions for those general
purpose registers and pointer register read/write operations, we need keep
those related registers operations consistent with header file definition
in the VMX side.

Signed-off-by: Yang Zhong <yang.zhong@intel.com>
---
 arch/x86/kvm/vmx/vmx.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

Comments

Paolo Bonzini April 22, 2021, 10:38 a.m. UTC | #1
On 22/04/21 11:34, Yang Zhong wrote:
> The kvm_cache_regs.h file has defined inline functions for those general
> purpose registers and pointer register read/write operations, we need keep
> those related registers operations consistent with header file definition
> in the VMX side.
> 
> Signed-off-by: Yang Zhong <yang.zhong@intel.com>
> ---
>   arch/x86/kvm/vmx/vmx.c | 11 ++++++-----
>   1 file changed, 6 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 29b40e092d13..d56505fc7a71 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -2266,10 +2266,10 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
>   
>   	switch (reg) {
>   	case VCPU_REGS_RSP:
> -		vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
> +		kvm_rsp_write(vcpu, vmcs_readl(GUEST_RSP));
>   		break;
>   	case VCPU_REGS_RIP:
> -		vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
> +		kvm_rip_write(vcpu, vmcs_readl(GUEST_RIP));
>   		break;

This is on purpose, because you don't want to mark those register dirty.

Likewise, in the case below it's more confusing to go through the helper 
because it checks kvm_register_is_available and calls vmx_cache_reg if 
false.

Because these functions are the once that handle the caching, it makes 
sense for them not to use the helpers.

Paolo

>   	case VCPU_EXREG_PDPTR:
>   		if (enable_ept)
> @@ -4432,7 +4432,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
>   
>   	vmx->msr_ia32_umwait_control = 0;
>   
> -	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
> +	kvm_rdx_write(&vmx->vcpu, get_rdx_init_val());
>   	vmx->hv_deadline_tsc = -1;
>   	kvm_set_cr8(vcpu, 0);
>   
> @@ -6725,9 +6725,10 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
>   	WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
>   
>   	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
> -		vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
> +		vmcs_writel(GUEST_RSP, kvm_rsp_read(vcpu));
> +
>   	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
> -		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
> +		vmcs_writel(GUEST_RIP, kvm_rip_read(vcpu));
>   
>   	cr3 = __get_current_cr3_fast();
>   	if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
>
Yang Zhong April 24, 2021, 12:31 p.m. UTC | #2
On Thu, Apr 22, 2021 at 12:38:19PM +0200, Paolo Bonzini wrote:
> On 22/04/21 11:34, Yang Zhong wrote:
> >The kvm_cache_regs.h file has defined inline functions for those general
> >purpose registers and pointer register read/write operations, we need keep
> >those related registers operations consistent with header file definition
> >in the VMX side.
> >
> >Signed-off-by: Yang Zhong <yang.zhong@intel.com>
> >---
> >  arch/x86/kvm/vmx/vmx.c | 11 ++++++-----
> >  1 file changed, 6 insertions(+), 5 deletions(-)
> >
> >diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> >index 29b40e092d13..d56505fc7a71 100644
> >--- a/arch/x86/kvm/vmx/vmx.c
> >+++ b/arch/x86/kvm/vmx/vmx.c
> >@@ -2266,10 +2266,10 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
> >  	switch (reg) {
> >  	case VCPU_REGS_RSP:
> >-		vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
> >+		kvm_rsp_write(vcpu, vmcs_readl(GUEST_RSP));
> >  		break;
> >  	case VCPU_REGS_RIP:
> >-		vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
> >+		kvm_rip_write(vcpu, vmcs_readl(GUEST_RIP));
> >  		break;
> 
> This is on purpose, because you don't want to mark those register dirty.
> 
> Likewise, in the case below it's more confusing to go through the
> helper because it checks kvm_register_is_available and calls
> vmx_cache_reg if false.
> 
> Because these functions are the once that handle the caching, it
> makes sense for them not to use the helpers.
> 

  Paolo, thanks for pointing out this issue, i will drop those two patches, thanks!

  Yang
 
> Paolo
> 
> >  	case VCPU_EXREG_PDPTR:
> >  		if (enable_ept)
> >@@ -4432,7 +4432,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
> >  	vmx->msr_ia32_umwait_control = 0;
> >-	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
> >+	kvm_rdx_write(&vmx->vcpu, get_rdx_init_val());
> >  	vmx->hv_deadline_tsc = -1;
> >  	kvm_set_cr8(vcpu, 0);
> >@@ -6725,9 +6725,10 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
> >  	WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
> >  	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
> >-		vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
> >+		vmcs_writel(GUEST_RSP, kvm_rsp_read(vcpu));
> >+
> >  	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
> >-		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
> >+		vmcs_writel(GUEST_RIP, kvm_rip_read(vcpu));
> >  	cr3 = __get_current_cr3_fast();
> >  	if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
> >
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 29b40e092d13..d56505fc7a71 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2266,10 +2266,10 @@  static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
 
 	switch (reg) {
 	case VCPU_REGS_RSP:
-		vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
+		kvm_rsp_write(vcpu, vmcs_readl(GUEST_RSP));
 		break;
 	case VCPU_REGS_RIP:
-		vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
+		kvm_rip_write(vcpu, vmcs_readl(GUEST_RIP));
 		break;
 	case VCPU_EXREG_PDPTR:
 		if (enable_ept)
@@ -4432,7 +4432,7 @@  static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 
 	vmx->msr_ia32_umwait_control = 0;
 
-	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
+	kvm_rdx_write(&vmx->vcpu, get_rdx_init_val());
 	vmx->hv_deadline_tsc = -1;
 	kvm_set_cr8(vcpu, 0);
 
@@ -6725,9 +6725,10 @@  static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
 
 	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
-		vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
+		vmcs_writel(GUEST_RSP, kvm_rsp_read(vcpu));
+
 	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
-		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+		vmcs_writel(GUEST_RIP, kvm_rip_read(vcpu));
 
 	cr3 = __get_current_cr3_fast();
 	if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {