diff mbox

[v3,4/8] KVM-HV: Add VCPU running/pre-empted state for guest

Message ID 20120731104838.16662.82021.stgit@abhimanyu.in.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Nikunj A. Dadhania July 31, 2012, 10:48 a.m. UTC
From: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>

Hypervisor code to indicate guest running/pre-empteded status through
msr. The page is now pinned during MSR write time and use
kmap_atomic/kunmap_atomic to access the shared area vcpu_state area.

Suggested-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
---
 arch/x86/include/asm/kvm_host.h |    7 ++++
 arch/x86/kvm/cpuid.c            |    1 +
 arch/x86/kvm/x86.c              |   71 ++++++++++++++++++++++++++++++++++++++-
 3 files changed, 77 insertions(+), 2 deletions(-)


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Marcelo Tosatti Aug. 2, 2012, 7:56 p.m. UTC | #1
On Tue, Jul 31, 2012 at 04:18:41PM +0530, Nikunj A. Dadhania wrote:
> From: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
> 
> Hypervisor code to indicate guest running/pre-empteded status through
> msr. The page is now pinned during MSR write time and use
> kmap_atomic/kunmap_atomic to access the shared area vcpu_state area.
> 
> Suggested-by: Marcelo Tosatti <mtosatti@redhat.com>
> Signed-off-by: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
> ---
>  arch/x86/include/asm/kvm_host.h |    7 ++++
>  arch/x86/kvm/cpuid.c            |    1 +
>  arch/x86/kvm/x86.c              |   71 ++++++++++++++++++++++++++++++++++++++-
>  3 files changed, 77 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 09155d6..441348f 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -429,6 +429,13 @@ struct kvm_vcpu_arch {
>  		struct kvm_steal_time steal;
>  	} st;
>  
> +	/* indicates vcpu is running or preempted */
> +	struct {
> +		u64 msr_val;
> +		struct page *vs_page;
> +		unsigned int vs_offset;
> +	} v_state;
> +
>  	u64 last_guest_tsc;
>  	u64 last_kernel_ns;
>  	u64 last_host_tsc;
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index 0595f13..37ab364 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -411,6 +411,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
>  			     (1 << KVM_FEATURE_CLOCKSOURCE2) |
>  			     (1 << KVM_FEATURE_ASYNC_PF) |
>  			     (1 << KVM_FEATURE_PV_EOI) |
> +			     (1 << KVM_FEATURE_VCPU_STATE) |
>  			     (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
>  
>  		if (sched_info_on())
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 59b5950..580abcf 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -806,13 +806,13 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
>   * kvm-specific. Those are put in the beginning of the list.
>   */
>  
> -#define KVM_SAVE_MSRS_BEGIN	9
> +#define KVM_SAVE_MSRS_BEGIN	10
>  static u32 msrs_to_save[] = {
>  	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
>  	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
>  	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
>  	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
> -	MSR_KVM_PV_EOI_EN,
> +	MSR_KVM_VCPU_STATE, MSR_KVM_PV_EOI_EN,
>  	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
>  	MSR_STAR,
>  #ifdef CONFIG_X86_64
> @@ -1557,6 +1557,53 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>  }
>  
> +static void kvm_set_atomic(u64 *addr, u64 old, u64 new)
> +{
> +	int loop = 1000000;
> +	while (1) {
> +		if (cmpxchg(addr, old, new) == old)
> +			break;
> +		loop--;
> +		if (!loop) {
> +			pr_info("atomic cur: %lx old: %lx new: %lx\n",
> +				*addr, old, new);
> +			break;
> +		}
> +	}
> +}

A generic "kvm_set_atomic" would need that loop, but in the particular
TLB flush case we know that the only information being transmitted is 
a TLB flush.

So this idea should work:

old = *addr;
if (cmpxchg(addr, old, IN_GUEST_MODE) == FAILURE) 
	kvm_x86_ops->tlb_flush()
        atomic_set(addr, IN_GUEST_MODE);
} else if {
        if (old & TLB_SHOULD_FLUSH)
		kvm_x86_ops->tlb_flush()
}

(the actual pseucode above is pretty ugly and 
mus be improved but it should be enough to transmit
the idea).

Of course as long as you make sure the atomic_set does not
overwrite information.


> +	char *kaddr;
> +
> +	if (!(vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) ||
> +		!vcpu->arch.v_state.vs_page)
> +		return;

If its not enabled vs_page should be NULL?

> +
> +	kaddr = kmap_atomic(vcpu->arch.v_state.vs_page);
> +	kaddr += vcpu->arch.v_state.vs_offset;
> +	vs = kaddr;
> +	kvm_set_atomic(&vs->state, 0, 1 << KVM_VCPU_STATE_IN_GUEST_MODE);
> +	kunmap_atomic(kaddr);
> +}
> +
> +static void kvm_clear_vcpu_state(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm_vcpu_state *vs;
> +	char *kaddr;
> +
> +	if (!(vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) ||
> +		!vcpu->arch.v_state.vs_page)
> +		return;

Like above.

> +	kaddr = kmap_atomic(vcpu->arch.v_state.vs_page);
> +	kaddr += vcpu->arch.v_state.vs_offset;
> +	vs = kaddr;
> +	kvm_set_atomic(&vs->state, 1 << KVM_VCPU_STATE_IN_GUEST_MODE, 0);
> +	kunmap_atomic(kaddr);
> +}
> +
>  int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
>  {
>  	bool pr = false;
> @@ -1676,6 +1723,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
>  			return 1;
>  		break;
>  
> +	case MSR_KVM_VCPU_STATE:
> +		vcpu->arch.v_state.vs_page = gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
> +		vcpu->arch.v_state.vs_offset = data & ~(PAGE_MASK | KVM_MSR_ENABLED);

Assign vs_offset after success.

> +
> +		if (is_error_page(vcpu->arch.v_state.vs_page)) {
> +			kvm_release_page_clean(vcpu->arch.time_page);
> +			vcpu->arch.v_state.vs_page = NULL;
> +			pr_info("KVM: VCPU_STATE - Unable to pin the page\n");

Missing break or return;

> +		}
> +		vcpu->arch.v_state.msr_val = data;
> +		break;
> +
>  	case MSR_IA32_MCG_CTL:

Please verify this code carefully again.

Also leaking the page reference.

>  	vcpu->arch.apf.msr_val = 0;
>  	vcpu->arch.st.msr_val = 0;
> +	vcpu->arch.v_state.msr_val = 0;

Add a newline and comment (or even better a new helper).
>  
>  	kvmclock_reset(vcpu);
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Nikunj A. Dadhania Aug. 3, 2012, 5:17 a.m. UTC | #2
On Thu, 2 Aug 2012 16:56:28 -0300, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> On Tue, Jul 31, 2012 at 04:18:41PM +0530, Nikunj A. Dadhania wrote:
> > From: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
> > 
> > Hypervisor code to indicate guest running/pre-empteded status through
> > msr. The page is now pinned during MSR write time and use
> > kmap_atomic/kunmap_atomic to access the shared area vcpu_state area.
> > 
> > Suggested-by: Marcelo Tosatti <mtosatti@redhat.com>
> > Signed-off-by: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
> > ---
> >  arch/x86/include/asm/kvm_host.h |    7 ++++
> >  arch/x86/kvm/cpuid.c            |    1 +
> >  arch/x86/kvm/x86.c              |   71 ++++++++++++++++++++++++++++++++++++++-
> >  3 files changed, 77 insertions(+), 2 deletions(-)

[...]

> > +static void kvm_set_atomic(u64 *addr, u64 old, u64 new)
> > +{
> > +	int loop = 1000000;
> > +	while (1) {
> > +		if (cmpxchg(addr, old, new) == old)
> > +			break;
> > +		loop--;
> > +		if (!loop) {
> > +			pr_info("atomic cur: %lx old: %lx new: %lx\n",
> > +				*addr, old, new);
> > +			break;
> > +		}
> > +	}
> > +}
> 
> A generic "kvm_set_atomic" would need that loop, but in the particular
> TLB flush case we know that the only information being transmitted is 
> a TLB flush.
> 
yes, so the next patch gets rid of this in a neater way.

> So this idea should work:
> 
> old = *addr;
> if (cmpxchg(addr, old, IN_GUEST_MODE) == FAILURE) 
> 	kvm_x86_ops->tlb_flush()
>         atomic_set(addr, IN_GUEST_MODE);
> } else if {
>         if (old & TLB_SHOULD_FLUSH)
> 		kvm_x86_ops->tlb_flush()
> }
> 
> (the actual pseucode above is pretty ugly and 
> mus be improved but it should be enough to transmit
> the idea).
> 
> Of course as long as you make sure the atomic_set does not
> overwrite information.
> 
> 
> > +	char *kaddr;
> > +
> > +	if (!(vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) ||
> > +		!vcpu->arch.v_state.vs_page)
> > +		return;
>
> If its not enabled vs_page should be NULL?
> 
Yes, it should be:

if (!(enabled && vs_page))
   return;

> > +
> > +	kaddr = kmap_atomic(vcpu->arch.v_state.vs_page);
> > +	kaddr += vcpu->arch.v_state.vs_offset;
> > +	vs = kaddr;
> > +	kvm_set_atomic(&vs->state, 0, 1 << KVM_VCPU_STATE_IN_GUEST_MODE);
> > +	kunmap_atomic(kaddr);
> > +}
> > +
> > +static void kvm_clear_vcpu_state(struct kvm_vcpu *vcpu)
> > +{
> > +	struct kvm_vcpu_state *vs;
> > +	char *kaddr;
> > +
> > +	if (!(vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) ||
> > +		!vcpu->arch.v_state.vs_page)
> > +		return;
> 
> Like above.
> 
> > +	kaddr = kmap_atomic(vcpu->arch.v_state.vs_page);
> > +	kaddr += vcpu->arch.v_state.vs_offset;
> > +	vs = kaddr;
> > +	kvm_set_atomic(&vs->state, 1 << KVM_VCPU_STATE_IN_GUEST_MODE, 0);
> > +	kunmap_atomic(kaddr);
> > +}
> > +
> >  int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
> >  {
> >  	bool pr = false;
> > @@ -1676,6 +1723,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
> >  			return 1;
> >  		break;
> >  
> > +	case MSR_KVM_VCPU_STATE:
> > +		vcpu->arch.v_state.vs_page = gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
> > +		vcpu->arch.v_state.vs_offset = data & ~(PAGE_MASK | KVM_MSR_ENABLED);
> 
> Assign vs_offset after success.
>
Will do that.
 
> > +
> > +		if (is_error_page(vcpu->arch.v_state.vs_page)) {
> > +			kvm_release_page_clean(vcpu->arch.time_page);
C&P error :(
kvm_release_page_clean(vcpu->arch.v_state.vs_page);

> > +			vcpu->arch.v_state.vs_page = NULL;
> > +			pr_info("KVM: VCPU_STATE - Unable to pin the page\n");
> 
> Missing break or return;
> 
Right

> > +		}
> > +		vcpu->arch.v_state.msr_val = data;
> > +		break;
> > +
> >  	case MSR_IA32_MCG_CTL:
> 
> Please verify this code carefully again.
> 
> Also leaking the page reference.
> 
> >  	vcpu->arch.apf.msr_val = 0;
> >  	vcpu->arch.st.msr_val = 0;
> > +	vcpu->arch.v_state.msr_val = 0;
> 
> Add a newline and comment (or even better a new helper).
>
Will do.

Thanks for the detailed review.

Nikunj

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 09155d6..441348f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -429,6 +429,13 @@  struct kvm_vcpu_arch {
 		struct kvm_steal_time steal;
 	} st;
 
+	/* indicates vcpu is running or preempted */
+	struct {
+		u64 msr_val;
+		struct page *vs_page;
+		unsigned int vs_offset;
+	} v_state;
+
 	u64 last_guest_tsc;
 	u64 last_kernel_ns;
 	u64 last_host_tsc;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 0595f13..37ab364 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -411,6 +411,7 @@  static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 			     (1 << KVM_FEATURE_CLOCKSOURCE2) |
 			     (1 << KVM_FEATURE_ASYNC_PF) |
 			     (1 << KVM_FEATURE_PV_EOI) |
+			     (1 << KVM_FEATURE_VCPU_STATE) |
 			     (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
 
 		if (sched_info_on())
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 59b5950..580abcf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -806,13 +806,13 @@  EXPORT_SYMBOL_GPL(kvm_rdpmc);
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN	9
+#define KVM_SAVE_MSRS_BEGIN	10
 static u32 msrs_to_save[] = {
 	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
 	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
 	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
 	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
-	MSR_KVM_PV_EOI_EN,
+	MSR_KVM_VCPU_STATE, MSR_KVM_PV_EOI_EN,
 	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
 	MSR_STAR,
 #ifdef CONFIG_X86_64
@@ -1557,6 +1557,53 @@  static void record_steal_time(struct kvm_vcpu *vcpu)
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 }
 
+static void kvm_set_atomic(u64 *addr, u64 old, u64 new)
+{
+	int loop = 1000000;
+	while (1) {
+		if (cmpxchg(addr, old, new) == old)
+			break;
+		loop--;
+		if (!loop) {
+			pr_info("atomic cur: %lx old: %lx new: %lx\n",
+				*addr, old, new);
+			break;
+		}
+	}
+}
+
+static void kvm_set_vcpu_state(struct kvm_vcpu *vcpu)
+{
+	struct kvm_vcpu_state *vs;
+	char *kaddr;
+
+	if (!(vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) ||
+		!vcpu->arch.v_state.vs_page)
+		return;
+
+	kaddr = kmap_atomic(vcpu->arch.v_state.vs_page);
+	kaddr += vcpu->arch.v_state.vs_offset;
+	vs = kaddr;
+	kvm_set_atomic(&vs->state, 0, 1 << KVM_VCPU_STATE_IN_GUEST_MODE);
+	kunmap_atomic(kaddr);
+}
+
+static void kvm_clear_vcpu_state(struct kvm_vcpu *vcpu)
+{
+	struct kvm_vcpu_state *vs;
+	char *kaddr;
+
+	if (!(vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) ||
+		!vcpu->arch.v_state.vs_page)
+		return;
+
+	kaddr = kmap_atomic(vcpu->arch.v_state.vs_page);
+	kaddr += vcpu->arch.v_state.vs_offset;
+	vs = kaddr;
+	kvm_set_atomic(&vs->state, 1 << KVM_VCPU_STATE_IN_GUEST_MODE, 0);
+	kunmap_atomic(kaddr);
+}
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
 	bool pr = false;
@@ -1676,6 +1723,18 @@  int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 			return 1;
 		break;
 
+	case MSR_KVM_VCPU_STATE:
+		vcpu->arch.v_state.vs_page = gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+		vcpu->arch.v_state.vs_offset = data & ~(PAGE_MASK | KVM_MSR_ENABLED);
+
+		if (is_error_page(vcpu->arch.v_state.vs_page)) {
+			kvm_release_page_clean(vcpu->arch.time_page);
+			vcpu->arch.v_state.vs_page = NULL;
+			pr_info("KVM: VCPU_STATE - Unable to pin the page\n");
+		}
+		vcpu->arch.v_state.msr_val = data;
+		break;
+
 	case MSR_IA32_MCG_CTL:
 	case MSR_IA32_MCG_STATUS:
 	case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
@@ -1996,6 +2055,9 @@  int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 	case MSR_KVM_STEAL_TIME:
 		data = vcpu->arch.st.msr_val;
 		break;
+	case MSR_KVM_VCPU_STATE:
+		data = vcpu->arch.v_state.msr_val;
+		break;
 	case MSR_IA32_P5_MC_ADDR:
 	case MSR_IA32_P5_MC_TYPE:
 	case MSR_IA32_MCG_CAP:
@@ -5312,6 +5374,8 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 		kvm_load_guest_fpu(vcpu);
 	kvm_load_guest_xcr0(vcpu);
 
+	kvm_set_vcpu_state(vcpu);
+
 	vcpu->mode = IN_GUEST_MODE;
 
 	/* We should set ->mode before check ->requests,
@@ -5323,6 +5387,7 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
 	if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
 	    || need_resched() || signal_pending(current)) {
+		kvm_clear_vcpu_state(vcpu);
 		vcpu->mode = OUTSIDE_GUEST_MODE;
 		smp_wmb();
 		local_irq_enable();
@@ -5361,6 +5426,7 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
 
+	kvm_clear_vcpu_state(vcpu);
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	smp_wmb();
 	local_irq_enable();
@@ -6043,6 +6109,7 @@  int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
 	vcpu->arch.apf.msr_val = 0;
 	vcpu->arch.st.msr_val = 0;
+	vcpu->arch.v_state.msr_val = 0;
 
 	kvmclock_reset(vcpu);