diff mbox

KVM: VMX: shadow VM_(ENTRY|EXIT)_CONTROLS vmcs field

Message ID 20131125133713.GB959@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gleb Natapov Nov. 25, 2013, 1:37 p.m. UTC
VM_(ENTRY|EXIT)_CONTROLS vmcs fields are read/written on each guest
entry but most times it can be avoided since values do not changes.
Keep fields copy in memory to avoid unnecessary reads from vmcs.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Paolo Bonzini Nov. 27, 2013, 4:58 p.m. UTC | #1
Il 25/11/2013 14:37, Gleb Natapov ha scritto:
> VM_(ENTRY|EXIT)_CONTROLS vmcs fields are read/written on each guest
> entry but most times it can be avoided since values do not changes.
> Keep fields copy in memory to avoid unnecessary reads from vmcs.
> 
> Signed-off-by: Gleb Natapov <gleb@redhat.com>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index b2fe1c2..29c1c7f 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -418,6 +418,8 @@ struct vcpu_vmx {
>  	u64 		      msr_host_kernel_gs_base;
>  	u64 		      msr_guest_kernel_gs_base;
>  #endif
> +	u32 vm_entry_controls_shadow;
> +	u32 vm_exit_controls_shadow;
>  	/*
>  	 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
>  	 * non-nested (L1) guest, it always points to vmcs01. For a nested
> @@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
>  	vmcs_writel(field, vmcs_readl(field) | mask);
>  }
>  
> +static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
> +{
> +	vmcs_write32(VM_ENTRY_CONTROLS, val);
> +	vmx->vm_entry_controls_shadow = val;
> +}
> +
> +static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
> +{
> +	if (vmx->vm_entry_controls_shadow != val)
> +		vm_entry_controls_init(vmx, val);
> +}
> +
> +static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
> +{
> +	return vmx->vm_entry_controls_shadow;
> +}
> +
> +
> +static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
> +{
> +	vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
> +}
> +
> +static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
> +{
> +	vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
> +}
> +
> +static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
> +{
> +	vmcs_write32(VM_EXIT_CONTROLS, val);
> +	vmx->vm_exit_controls_shadow = val;
> +}
> +
> +static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
> +{
> +	if (vmx->vm_exit_controls_shadow != val)
> +		vm_exit_controls_init(vmx, val);
> +}
> +
> +static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
> +{
> +	return vmx->vm_exit_controls_shadow;
> +}
> +
> +
> +static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
> +{
> +	vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
> +}
> +
> +static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
> +{
> +	vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
> +}
> +
>  static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
>  {
>  	vmx->segment_cache.bitmask = 0;
> @@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
>  	vmcs_write32(EXCEPTION_BITMAP, eb);
>  }
>  
> -static void clear_atomic_switch_msr_special(unsigned long entry,
> -		unsigned long exit)
> +static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
> +		unsigned long entry, unsigned long exit)
>  {
> -	vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
> -	vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
> +	vm_entry_controls_clearbit(vmx, entry);
> +	vm_exit_controls_clearbit(vmx, exit);
>  }
>  
>  static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
> @@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>  	switch (msr) {
>  	case MSR_EFER:
>  		if (cpu_has_load_ia32_efer) {
> -			clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
> +			clear_atomic_switch_msr_special(vmx,
> +					VM_ENTRY_LOAD_IA32_EFER,
>  					VM_EXIT_LOAD_IA32_EFER);
>  			return;
>  		}
>  		break;
>  	case MSR_CORE_PERF_GLOBAL_CTRL:
>  		if (cpu_has_load_perf_global_ctrl) {
> -			clear_atomic_switch_msr_special(
> +			clear_atomic_switch_msr_special(vmx,
>  					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
>  					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
>  			return;
> @@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>  	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
>  }
>  
> -static void add_atomic_switch_msr_special(unsigned long entry,
> -		unsigned long exit, unsigned long guest_val_vmcs,
> -		unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
> +static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
> +		unsigned long entry, unsigned long exit,
> +		unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
> +		u64 guest_val, u64 host_val)
>  {
>  	vmcs_write64(guest_val_vmcs, guest_val);
>  	vmcs_write64(host_val_vmcs, host_val);
> -	vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
> -	vmcs_set_bits(VM_EXIT_CONTROLS, exit);
> +	vm_entry_controls_setbit(vmx, entry);
> +	vm_exit_controls_setbit(vmx, exit);
>  }
>  
>  static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
> @@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>  	switch (msr) {
>  	case MSR_EFER:
>  		if (cpu_has_load_ia32_efer) {
> -			add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
> +			add_atomic_switch_msr_special(vmx,
> +					VM_ENTRY_LOAD_IA32_EFER,
>  					VM_EXIT_LOAD_IA32_EFER,
>  					GUEST_IA32_EFER,
>  					HOST_IA32_EFER,
> @@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>  		break;
>  	case MSR_CORE_PERF_GLOBAL_CTRL:
>  		if (cpu_has_load_perf_global_ctrl) {
> -			add_atomic_switch_msr_special(
> +			add_atomic_switch_msr_special(vmx,
>  					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
>  					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
>  					GUEST_IA32_PERF_GLOBAL_CTRL,
> @@ -3182,14 +3243,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
>  	vmx_load_host_state(to_vmx(vcpu));
>  	vcpu->arch.efer = efer;
>  	if (efer & EFER_LMA) {
> -		vmcs_write32(VM_ENTRY_CONTROLS,
> -			     vmcs_read32(VM_ENTRY_CONTROLS) |
> -			     VM_ENTRY_IA32E_MODE);
> +		vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>  		msr->data = efer;
>  	} else {
> -		vmcs_write32(VM_ENTRY_CONTROLS,
> -			     vmcs_read32(VM_ENTRY_CONTROLS) &
> -			     ~VM_ENTRY_IA32E_MODE);
> +		vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>  
>  		msr->data = efer & ~EFER_LME;
>  	}
> @@ -3217,9 +3274,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
>  
>  static void exit_lmode(struct kvm_vcpu *vcpu)
>  {
> -	vmcs_write32(VM_ENTRY_CONTROLS,
> -		     vmcs_read32(VM_ENTRY_CONTROLS)
> -		     & ~VM_ENTRY_IA32E_MODE);
> +	vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>  	vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
>  }
>  
> @@ -4346,10 +4401,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>  		++vmx->nmsrs;
>  	}
>  
> -	vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
> +
> +	vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
>  
>  	/* 22.2.1, 20.8.1 */
> -	vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
> +	vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
>  
>  	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
>  	set_cr4_guest_host_mask(vmx);
> @@ -7759,12 +7815,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>  	exit_control = vmcs_config.vmexit_ctrl;
>  	if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
>  		exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
> -	vmcs_write32(VM_EXIT_CONTROLS, exit_control);
> +	vm_exit_controls_init(vmx, exit_control);
>  
>  	/* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
>  	 * emulated by vmx_set_efer(), below.
>  	 */
> -	vmcs_write32(VM_ENTRY_CONTROLS,
> +	vm_entry_controls_init(vmx, 
>  		(vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
>  			~VM_ENTRY_IA32E_MODE) |
>  		(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
> @@ -8186,7 +8242,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>  
>  	vmcs12->vm_entry_controls =
>  		(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
> -		(vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
> +		(vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
>  
>  	/* TODO: These cannot have changed unless we have MSR bitmaps and
>  	 * the relevant bit asks not to trap the change */
> @@ -8390,6 +8446,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
>  	vcpu->cpu = cpu;
>  	put_cpu();
>  
> +	vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
>  	vmx_segment_cache_clear(vmx);
>  
>  	/* if no vmcs02 cache requested, remove the one we used */
> --
> 			Gleb.
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 

Applied to kvm/queue, thanks.

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jan Kiszka Dec. 8, 2013, 1 p.m. UTC | #2
On 2013-11-27 17:58, Paolo Bonzini wrote:
> Il 25/11/2013 14:37, Gleb Natapov ha scritto:
>> VM_(ENTRY|EXIT)_CONTROLS vmcs fields are read/written on each guest
>> entry but most times it can be avoided since values do not changes.
>> Keep fields copy in memory to avoid unnecessary reads from vmcs.
>>
>> Signed-off-by: Gleb Natapov <gleb@redhat.com>
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index b2fe1c2..29c1c7f 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -418,6 +418,8 @@ struct vcpu_vmx {
>>  	u64 		      msr_host_kernel_gs_base;
>>  	u64 		      msr_guest_kernel_gs_base;
>>  #endif
>> +	u32 vm_entry_controls_shadow;
>> +	u32 vm_exit_controls_shadow;
>>  	/*
>>  	 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
>>  	 * non-nested (L1) guest, it always points to vmcs01. For a nested
>> @@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
>>  	vmcs_writel(field, vmcs_readl(field) | mask);
>>  }
>>  
>> +static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
>> +{
>> +	vmcs_write32(VM_ENTRY_CONTROLS, val);
>> +	vmx->vm_entry_controls_shadow = val;
>> +}
>> +
>> +static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
>> +{
>> +	if (vmx->vm_entry_controls_shadow != val)
>> +		vm_entry_controls_init(vmx, val);
>> +}
>> +
>> +static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
>> +{
>> +	return vmx->vm_entry_controls_shadow;
>> +}
>> +
>> +
>> +static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
>> +{
>> +	vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
>> +}
>> +
>> +static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
>> +{
>> +	vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
>> +}
>> +
>> +static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
>> +{
>> +	vmcs_write32(VM_EXIT_CONTROLS, val);
>> +	vmx->vm_exit_controls_shadow = val;
>> +}
>> +
>> +static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
>> +{
>> +	if (vmx->vm_exit_controls_shadow != val)
>> +		vm_exit_controls_init(vmx, val);
>> +}
>> +
>> +static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
>> +{
>> +	return vmx->vm_exit_controls_shadow;
>> +}
>> +
>> +
>> +static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
>> +{
>> +	vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
>> +}
>> +
>> +static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
>> +{
>> +	vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
>> +}
>> +
>>  static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
>>  {
>>  	vmx->segment_cache.bitmask = 0;
>> @@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
>>  	vmcs_write32(EXCEPTION_BITMAP, eb);
>>  }
>>  
>> -static void clear_atomic_switch_msr_special(unsigned long entry,
>> -		unsigned long exit)
>> +static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
>> +		unsigned long entry, unsigned long exit)
>>  {
>> -	vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
>> -	vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
>> +	vm_entry_controls_clearbit(vmx, entry);
>> +	vm_exit_controls_clearbit(vmx, exit);
>>  }
>>  
>>  static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>> @@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>>  	switch (msr) {
>>  	case MSR_EFER:
>>  		if (cpu_has_load_ia32_efer) {
>> -			clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
>> +			clear_atomic_switch_msr_special(vmx,
>> +					VM_ENTRY_LOAD_IA32_EFER,
>>  					VM_EXIT_LOAD_IA32_EFER);
>>  			return;
>>  		}
>>  		break;
>>  	case MSR_CORE_PERF_GLOBAL_CTRL:
>>  		if (cpu_has_load_perf_global_ctrl) {
>> -			clear_atomic_switch_msr_special(
>> +			clear_atomic_switch_msr_special(vmx,
>>  					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
>>  					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
>>  			return;
>> @@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>>  	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
>>  }
>>  
>> -static void add_atomic_switch_msr_special(unsigned long entry,
>> -		unsigned long exit, unsigned long guest_val_vmcs,
>> -		unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
>> +static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
>> +		unsigned long entry, unsigned long exit,
>> +		unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
>> +		u64 guest_val, u64 host_val)
>>  {
>>  	vmcs_write64(guest_val_vmcs, guest_val);
>>  	vmcs_write64(host_val_vmcs, host_val);
>> -	vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
>> -	vmcs_set_bits(VM_EXIT_CONTROLS, exit);
>> +	vm_entry_controls_setbit(vmx, entry);
>> +	vm_exit_controls_setbit(vmx, exit);
>>  }
>>  
>>  static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>> @@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>>  	switch (msr) {
>>  	case MSR_EFER:
>>  		if (cpu_has_load_ia32_efer) {
>> -			add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
>> +			add_atomic_switch_msr_special(vmx,
>> +					VM_ENTRY_LOAD_IA32_EFER,
>>  					VM_EXIT_LOAD_IA32_EFER,
>>  					GUEST_IA32_EFER,
>>  					HOST_IA32_EFER,
>> @@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>>  		break;
>>  	case MSR_CORE_PERF_GLOBAL_CTRL:
>>  		if (cpu_has_load_perf_global_ctrl) {
>> -			add_atomic_switch_msr_special(
>> +			add_atomic_switch_msr_special(vmx,
>>  					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
>>  					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
>>  					GUEST_IA32_PERF_GLOBAL_CTRL,
>> @@ -3182,14 +3243,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
>>  	vmx_load_host_state(to_vmx(vcpu));
>>  	vcpu->arch.efer = efer;
>>  	if (efer & EFER_LMA) {
>> -		vmcs_write32(VM_ENTRY_CONTROLS,
>> -			     vmcs_read32(VM_ENTRY_CONTROLS) |
>> -			     VM_ENTRY_IA32E_MODE);
>> +		vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>>  		msr->data = efer;
>>  	} else {
>> -		vmcs_write32(VM_ENTRY_CONTROLS,
>> -			     vmcs_read32(VM_ENTRY_CONTROLS) &
>> -			     ~VM_ENTRY_IA32E_MODE);
>> +		vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>>  
>>  		msr->data = efer & ~EFER_LME;
>>  	}
>> @@ -3217,9 +3274,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
>>  
>>  static void exit_lmode(struct kvm_vcpu *vcpu)
>>  {
>> -	vmcs_write32(VM_ENTRY_CONTROLS,
>> -		     vmcs_read32(VM_ENTRY_CONTROLS)
>> -		     & ~VM_ENTRY_IA32E_MODE);
>> +	vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>>  	vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
>>  }
>>  
>> @@ -4346,10 +4401,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>>  		++vmx->nmsrs;
>>  	}
>>  
>> -	vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
>> +
>> +	vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
>>  
>>  	/* 22.2.1, 20.8.1 */
>> -	vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
>> +	vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
>>  
>>  	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
>>  	set_cr4_guest_host_mask(vmx);
>> @@ -7759,12 +7815,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>>  	exit_control = vmcs_config.vmexit_ctrl;
>>  	if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
>>  		exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
>> -	vmcs_write32(VM_EXIT_CONTROLS, exit_control);
>> +	vm_exit_controls_init(vmx, exit_control);
>>  
>>  	/* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
>>  	 * emulated by vmx_set_efer(), below.
>>  	 */
>> -	vmcs_write32(VM_ENTRY_CONTROLS,
>> +	vm_entry_controls_init(vmx, 
>>  		(vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
>>  			~VM_ENTRY_IA32E_MODE) |
>>  		(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
>> @@ -8186,7 +8242,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>>  
>>  	vmcs12->vm_entry_controls =
>>  		(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
>> -		(vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
>> +		(vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
>>  
>>  	/* TODO: These cannot have changed unless we have MSR bitmaps and
>>  	 * the relevant bit asks not to trap the change */
>> @@ -8390,6 +8446,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
>>  	vcpu->cpu = cpu;
>>  	put_cpu();
>>  
>> +	vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
>>  	vmx_segment_cache_clear(vmx);
>>  
>>  	/* if no vmcs02 cache requested, remove the one we used */
>> --
>> 			Gleb.
>> --
>> To unsubscribe from this list: send the line "unsubscribe kvm" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>>
> 
> Applied to kvm/queue, thanks.

Seems to have or cause an issue with nVMX. The (still not merged) vmx
unit test breaks here after apply this commit:

enabling apic
paging enabled
cr0 = 80010011
cr3 = 7fff000
cr4 = 20
PASS: test vmx capability
PASS: test vmxon
PASS: test vmptrld
PASS: test vmclear
PASS: test vmptrst
        Hello World, this is null_guest_main!
PASS: test vmxoff

Test suite : vmenter
PASS: test vmlaunch
PASS: test vmresume

Test suite : preemption timer
KVM: entry failed, hardware error 0x7
RAX=000000002ced133a RBX=0000000000000000 RCX=0000000000000000 RDX=0000000000000000
RSI=0000000000000000 RDI=0000000000000000 RBP=0000000000000000 RSP=000000000044f9a0
R8 =0000000000000000 R9 =0000000000000000 R10=0000000000000000 R11=0000000000000000
R12=0000000000000000 R13=0000000000000000 R14=0000000000000000 R15=0000000000000000
RIP=0000000000400c63 RFL=00000002 [-------] CPL=0 II=0 A20=1 SMM=0 HLT=0
ES =0010 0000000000000000 ffffffff 00c09300 DPL=0 DS   [-WA]
CS =0008 0000000000000000 ffffffff 00a09b00 DPL=0 CS64 [-RA]
SS =0010 0000000000000000 ffffffff 00c09300 DPL=0 DS   [-WA]
DS =0010 0000000000000000 ffffffff 00c09300 DPL=0 DS   [-WA]
FS =0010 0000000000000000 ffffffff 00c09300 DPL=0 DS   [-WA]
GS =0010 0000000000000000 ffffffff 00c09300 DPL=0 DS   [-WA]
LDT=0000 0000000000000000 0000ffff 00008200 DPL=0 LDT
TR =0048 000000000040b052 00000067 00008b00 DPL=0 TSS64-busy
GDT=     000000000040b000 00000447
IDT=     0000000000400296 00000fff
CR0=80010031 CR2=0000000000000000 CR3=0000000007fff000 CR4=00002021
DR0=0000000000000000 DR1=0000000000000000 DR2=0000000000000000 DR3=0000000000000000 
DR6=00000000ffff0ff0 DR7=0000000000000400
EFER=0000000000000500
Code=34 25 00 0c 45 00 9d 0f 01 c2 eb 03 0f 01 c3 0f 96 44 24 0c <48> 87 04 25 40 0c 45 00 48 87 1c 25 48 0c 45 00 48 87 0c 25 50 0c 45 00 48 87 14 25 58 0c

Jan
diff mbox

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b2fe1c2..29c1c7f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -418,6 +418,8 @@  struct vcpu_vmx {
 	u64 		      msr_host_kernel_gs_base;
 	u64 		      msr_guest_kernel_gs_base;
 #endif
+	u32 vm_entry_controls_shadow;
+	u32 vm_exit_controls_shadow;
 	/*
 	 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
 	 * non-nested (L1) guest, it always points to vmcs01. For a nested
@@ -1326,6 +1328,62 @@  static void vmcs_set_bits(unsigned long field, u32 mask)
 	vmcs_writel(field, vmcs_readl(field) | mask);
 }
 
+static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
+{
+	vmcs_write32(VM_ENTRY_CONTROLS, val);
+	vmx->vm_entry_controls_shadow = val;
+}
+
+static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
+{
+	if (vmx->vm_entry_controls_shadow != val)
+		vm_entry_controls_init(vmx, val);
+}
+
+static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
+{
+	return vmx->vm_entry_controls_shadow;
+}
+
+
+static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
+{
+	vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
+}
+
+static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
+{
+	vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
+}
+
+static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
+{
+	vmcs_write32(VM_EXIT_CONTROLS, val);
+	vmx->vm_exit_controls_shadow = val;
+}
+
+static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
+{
+	if (vmx->vm_exit_controls_shadow != val)
+		vm_exit_controls_init(vmx, val);
+}
+
+static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
+{
+	return vmx->vm_exit_controls_shadow;
+}
+
+
+static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
+{
+	vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
+}
+
+static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
+{
+	vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
+}
+
 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
 {
 	vmx->segment_cache.bitmask = 0;
@@ -1410,11 +1468,11 @@  static void update_exception_bitmap(struct kvm_vcpu *vcpu)
 	vmcs_write32(EXCEPTION_BITMAP, eb);
 }
 
-static void clear_atomic_switch_msr_special(unsigned long entry,
-		unsigned long exit)
+static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+		unsigned long entry, unsigned long exit)
 {
-	vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
-	vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
+	vm_entry_controls_clearbit(vmx, entry);
+	vm_exit_controls_clearbit(vmx, exit);
 }
 
 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
@@ -1425,14 +1483,15 @@  static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
 	switch (msr) {
 	case MSR_EFER:
 		if (cpu_has_load_ia32_efer) {
-			clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
+			clear_atomic_switch_msr_special(vmx,
+					VM_ENTRY_LOAD_IA32_EFER,
 					VM_EXIT_LOAD_IA32_EFER);
 			return;
 		}
 		break;
 	case MSR_CORE_PERF_GLOBAL_CTRL:
 		if (cpu_has_load_perf_global_ctrl) {
-			clear_atomic_switch_msr_special(
+			clear_atomic_switch_msr_special(vmx,
 					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
 					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
 			return;
@@ -1453,14 +1512,15 @@  static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
 }
 
-static void add_atomic_switch_msr_special(unsigned long entry,
-		unsigned long exit, unsigned long guest_val_vmcs,
-		unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
+static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+		unsigned long entry, unsigned long exit,
+		unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
+		u64 guest_val, u64 host_val)
 {
 	vmcs_write64(guest_val_vmcs, guest_val);
 	vmcs_write64(host_val_vmcs, host_val);
-	vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
-	vmcs_set_bits(VM_EXIT_CONTROLS, exit);
+	vm_entry_controls_setbit(vmx, entry);
+	vm_exit_controls_setbit(vmx, exit);
 }
 
 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
@@ -1472,7 +1532,8 @@  static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
 	switch (msr) {
 	case MSR_EFER:
 		if (cpu_has_load_ia32_efer) {
-			add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
+			add_atomic_switch_msr_special(vmx,
+					VM_ENTRY_LOAD_IA32_EFER,
 					VM_EXIT_LOAD_IA32_EFER,
 					GUEST_IA32_EFER,
 					HOST_IA32_EFER,
@@ -1482,7 +1543,7 @@  static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
 		break;
 	case MSR_CORE_PERF_GLOBAL_CTRL:
 		if (cpu_has_load_perf_global_ctrl) {
-			add_atomic_switch_msr_special(
+			add_atomic_switch_msr_special(vmx,
 					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
 					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
 					GUEST_IA32_PERF_GLOBAL_CTRL,
@@ -3182,14 +3243,10 @@  static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 	vmx_load_host_state(to_vmx(vcpu));
 	vcpu->arch.efer = efer;
 	if (efer & EFER_LMA) {
-		vmcs_write32(VM_ENTRY_CONTROLS,
-			     vmcs_read32(VM_ENTRY_CONTROLS) |
-			     VM_ENTRY_IA32E_MODE);
+		vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
 		msr->data = efer;
 	} else {
-		vmcs_write32(VM_ENTRY_CONTROLS,
-			     vmcs_read32(VM_ENTRY_CONTROLS) &
-			     ~VM_ENTRY_IA32E_MODE);
+		vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
 
 		msr->data = efer & ~EFER_LME;
 	}
@@ -3217,9 +3274,7 @@  static void enter_lmode(struct kvm_vcpu *vcpu)
 
 static void exit_lmode(struct kvm_vcpu *vcpu)
 {
-	vmcs_write32(VM_ENTRY_CONTROLS,
-		     vmcs_read32(VM_ENTRY_CONTROLS)
-		     & ~VM_ENTRY_IA32E_MODE);
+	vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
 	vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
 }
 
@@ -4346,10 +4401,11 @@  static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 		++vmx->nmsrs;
 	}
 
-	vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
+
+	vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
 
 	/* 22.2.1, 20.8.1 */
-	vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
+	vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
 
 	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
 	set_cr4_guest_host_mask(vmx);
@@ -7759,12 +7815,12 @@  static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	exit_control = vmcs_config.vmexit_ctrl;
 	if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
 		exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
-	vmcs_write32(VM_EXIT_CONTROLS, exit_control);
+	vm_exit_controls_init(vmx, exit_control);
 
 	/* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
 	 * emulated by vmx_set_efer(), below.
 	 */
-	vmcs_write32(VM_ENTRY_CONTROLS,
+	vm_entry_controls_init(vmx, 
 		(vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
 			~VM_ENTRY_IA32E_MODE) |
 		(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
@@ -8186,7 +8242,7 @@  static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
 	vmcs12->vm_entry_controls =
 		(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
-		(vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
+		(vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
 
 	/* TODO: These cannot have changed unless we have MSR bitmaps and
 	 * the relevant bit asks not to trap the change */
@@ -8390,6 +8446,7 @@  static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
 	vcpu->cpu = cpu;
 	put_cpu();
 
+	vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
 	vmx_segment_cache_clear(vmx);
 
 	/* if no vmcs02 cache requested, remove the one we used */