diff mbox

[v4,4/8] KVM-HV: Add VCPU running/pre-empted state for guest

Message ID 20120821112640.3512.43771.stgit@abhimanyu (mailing list archive)
State New, archived
Headers show

Commit Message

Nikunj A. Dadhania Aug. 21, 2012, 11:26 a.m. UTC
From: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>

Hypervisor code to indicate guest running/pre-empteded status through
msr. The page is now pinned during MSR write time and use
kmap_atomic/kunmap_atomic to access the shared area vcpu_state area.

Suggested-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
---
 arch/x86/include/asm/kvm_host.h |    7 +++
 arch/x86/kvm/cpuid.c            |    1 
 arch/x86/kvm/x86.c              |   88 ++++++++++++++++++++++++++++++++++++++-
 3 files changed, 94 insertions(+), 2 deletions(-)


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Marcelo Tosatti Aug. 23, 2012, 11:46 a.m. UTC | #1
On Tue, Aug 21, 2012 at 04:56:43PM +0530, Nikunj A. Dadhania wrote:
> From: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
> 
> Hypervisor code to indicate guest running/pre-empteded status through
> msr. The page is now pinned during MSR write time and use
> kmap_atomic/kunmap_atomic to access the shared area vcpu_state area.
> 
> Suggested-by: Marcelo Tosatti <mtosatti@redhat.com>
> Signed-off-by: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
> ---
>  arch/x86/include/asm/kvm_host.h |    7 +++
>  arch/x86/kvm/cpuid.c            |    1 
>  arch/x86/kvm/x86.c              |   88 ++++++++++++++++++++++++++++++++++++++-
>  3 files changed, 94 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 09155d6..441348f 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -429,6 +429,13 @@ struct kvm_vcpu_arch {
>  		struct kvm_steal_time steal;
>  	} st;
>  
> +	/* indicates vcpu is running or preempted */
> +	struct {
> +		u64 msr_val;
> +		struct page *vs_page;
> +		unsigned int vs_offset;
> +	} v_state;
> +
>  	u64 last_guest_tsc;
>  	u64 last_kernel_ns;
>  	u64 last_host_tsc;
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index 0595f13..37ab364 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -411,6 +411,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
>  			     (1 << KVM_FEATURE_CLOCKSOURCE2) |
>  			     (1 << KVM_FEATURE_ASYNC_PF) |
>  			     (1 << KVM_FEATURE_PV_EOI) |
> +			     (1 << KVM_FEATURE_VCPU_STATE) |
>  			     (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
>  
>  		if (sched_info_on())
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 59b5950..43f2c19 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -806,13 +806,13 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
>   * kvm-specific. Those are put in the beginning of the list.
>   */
>  
> -#define KVM_SAVE_MSRS_BEGIN	9
> +#define KVM_SAVE_MSRS_BEGIN	10
>  static u32 msrs_to_save[] = {
>  	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
>  	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
>  	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
>  	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
> -	MSR_KVM_PV_EOI_EN,
> +	MSR_KVM_VCPU_STATE, MSR_KVM_PV_EOI_EN,
>  	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
>  	MSR_STAR,
>  #ifdef CONFIG_X86_64
> @@ -1557,6 +1557,63 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>  }
>  
> +static void kvm_set_atomic(u64 *addr, u64 old, u64 new)
> +{
> +	int loop = 1000000;
> +	while (1) {
> +		if (cmpxchg(addr, old, new) == old)
> +			break;
> +		loop--;
> +		if (!loop) {
> +			pr_info("atomic cur: %lx old: %lx new: %lx\n",
> +				*addr, old, new);
> +			break;
> +		}
> +	}
> +}
> +
> +static void kvm_set_vcpu_state(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm_vcpu_state *vs;
> +	char *kaddr;
> +
> +	if (!((vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) &&
> +			vcpu->arch.v_state.vs_page))
> +		return;

It was agreed it was necessary to have valid vs_page only if MSR was
enabled? Or was that a misunderstanding?

Looks good otherwise.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Nikunj A. Dadhania Aug. 24, 2012, 5:19 a.m. UTC | #2
On Thu, 23 Aug 2012 08:46:22 -0300, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> On Tue, Aug 21, 2012 at 04:56:43PM +0530, Nikunj A. Dadhania wrote:
> > From: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
> > 
> > Hypervisor code to indicate guest running/pre-empteded status through
> > msr. The page is now pinned during MSR write time and use
> > kmap_atomic/kunmap_atomic to access the shared area vcpu_state area.
> > 
> > Suggested-by: Marcelo Tosatti <mtosatti@redhat.com>
> > Signed-off-by: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
> > ---

[...]

> > +
> > +static void kvm_set_vcpu_state(struct kvm_vcpu *vcpu)
> > +{
> > +	struct kvm_vcpu_state *vs;
> > +	char *kaddr;
> > +
> > +	if (!((vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) &&
> > +			vcpu->arch.v_state.vs_page))
> > +		return;
> 
> It was agreed it was necessary to have valid vs_page only if MSR was
> enabled? Or was that a misunderstanding?
>
There is a case where MSR is enabled but vs_page is NULL, this is
gaurding that case. The check is now:

if (!(msr_enabled && vs_page))
   return;

I had proposed that here:
http://www.spinics.net/lists/kvm/msg77147.html

Regards
Nikunj

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Marcelo Tosatti Aug. 24, 2012, 3:02 p.m. UTC | #3
On Fri, Aug 24, 2012 at 10:49:26AM +0530, Nikunj A Dadhania wrote:
> On Thu, 23 Aug 2012 08:46:22 -0300, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> > On Tue, Aug 21, 2012 at 04:56:43PM +0530, Nikunj A. Dadhania wrote:
> > > From: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
> > > 
> > > Hypervisor code to indicate guest running/pre-empteded status through
> > > msr. The page is now pinned during MSR write time and use
> > > kmap_atomic/kunmap_atomic to access the shared area vcpu_state area.
> > > 
> > > Suggested-by: Marcelo Tosatti <mtosatti@redhat.com>
> > > Signed-off-by: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
> > > ---
> 
> [...]
> 
> > > +
> > > +static void kvm_set_vcpu_state(struct kvm_vcpu *vcpu)
> > > +{
> > > +	struct kvm_vcpu_state *vs;
> > > +	char *kaddr;
> > > +
> > > +	if (!((vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) &&
> > > +			vcpu->arch.v_state.vs_page))
> > > +		return;
> > 
> > It was agreed it was necessary to have valid vs_page only if MSR was
> > enabled? Or was that a misunderstanding?
> >
> There is a case where MSR is enabled but vs_page is NULL, this is
> gaurding that case. The check is now:
> 
> if (!(msr_enabled && vs_page))
>    return;
> 
> I had proposed that here:
> http://www.spinics.net/lists/kvm/msg77147.html

OK, its fine.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 09155d6..441348f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -429,6 +429,13 @@  struct kvm_vcpu_arch {
 		struct kvm_steal_time steal;
 	} st;
 
+	/* indicates vcpu is running or preempted */
+	struct {
+		u64 msr_val;
+		struct page *vs_page;
+		unsigned int vs_offset;
+	} v_state;
+
 	u64 last_guest_tsc;
 	u64 last_kernel_ns;
 	u64 last_host_tsc;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 0595f13..37ab364 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -411,6 +411,7 @@  static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 			     (1 << KVM_FEATURE_CLOCKSOURCE2) |
 			     (1 << KVM_FEATURE_ASYNC_PF) |
 			     (1 << KVM_FEATURE_PV_EOI) |
+			     (1 << KVM_FEATURE_VCPU_STATE) |
 			     (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
 
 		if (sched_info_on())
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 59b5950..43f2c19 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -806,13 +806,13 @@  EXPORT_SYMBOL_GPL(kvm_rdpmc);
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN	9
+#define KVM_SAVE_MSRS_BEGIN	10
 static u32 msrs_to_save[] = {
 	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
 	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
 	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
 	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
-	MSR_KVM_PV_EOI_EN,
+	MSR_KVM_VCPU_STATE, MSR_KVM_PV_EOI_EN,
 	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
 	MSR_STAR,
 #ifdef CONFIG_X86_64
@@ -1557,6 +1557,63 @@  static void record_steal_time(struct kvm_vcpu *vcpu)
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 }
 
+static void kvm_set_atomic(u64 *addr, u64 old, u64 new)
+{
+	int loop = 1000000;
+	while (1) {
+		if (cmpxchg(addr, old, new) == old)
+			break;
+		loop--;
+		if (!loop) {
+			pr_info("atomic cur: %lx old: %lx new: %lx\n",
+				*addr, old, new);
+			break;
+		}
+	}
+}
+
+static void kvm_set_vcpu_state(struct kvm_vcpu *vcpu)
+{
+	struct kvm_vcpu_state *vs;
+	char *kaddr;
+
+	if (!((vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) &&
+			vcpu->arch.v_state.vs_page))
+		return;
+
+	kaddr = kmap_atomic(vcpu->arch.v_state.vs_page);
+	kaddr += vcpu->arch.v_state.vs_offset;
+	vs = kaddr;
+	kvm_set_atomic(&vs->state, 0, 1 << KVM_VCPU_STATE_IN_GUEST_MODE);
+	kunmap_atomic(kaddr);
+}
+
+static void kvm_clear_vcpu_state(struct kvm_vcpu *vcpu)
+{
+	struct kvm_vcpu_state *vs;
+	char *kaddr;
+
+	if (!((vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) &&
+			vcpu->arch.v_state.vs_page))
+		return;
+
+	kaddr = kmap_atomic(vcpu->arch.v_state.vs_page);
+	kaddr += vcpu->arch.v_state.vs_offset;
+	vs = kaddr;
+	kvm_set_atomic(&vs->state, 1 << KVM_VCPU_STATE_IN_GUEST_MODE, 0);
+	kunmap_atomic(kaddr);
+}
+
+static void kvm_vcpu_state_reset(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.v_state.msr_val = 0;
+	vcpu->arch.v_state.vs_offset = 0;
+	if (vcpu->arch.v_state.vs_page) {
+		kvm_release_page_dirty(vcpu->arch.v_state.vs_page);
+		vcpu->arch.v_state.vs_page = NULL;
+	}
+}
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
 	bool pr = false;
@@ -1676,6 +1733,24 @@  int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 			return 1;
 		break;
 
+	case MSR_KVM_VCPU_STATE:
+		kvm_vcpu_state_reset(vcpu);
+
+		if (!(data & KVM_MSR_ENABLED))
+			break;
+
+		vcpu->arch.v_state.vs_page = gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+
+		if (is_error_page(vcpu->arch.v_state.vs_page)) {
+			kvm_release_page_clean(vcpu->arch.v_state.vs_page);
+			vcpu->arch.v_state.vs_page = NULL;
+			pr_info("KVM: VCPU_STATE - Unable to pin the page\n");
+			break;
+		}
+		vcpu->arch.v_state.vs_offset = data & ~(PAGE_MASK | KVM_MSR_ENABLED);
+		vcpu->arch.v_state.msr_val = data;
+		break;
+
 	case MSR_IA32_MCG_CTL:
 	case MSR_IA32_MCG_STATUS:
 	case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
@@ -1996,6 +2071,9 @@  int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 	case MSR_KVM_STEAL_TIME:
 		data = vcpu->arch.st.msr_val;
 		break;
+	case MSR_KVM_VCPU_STATE:
+		data = vcpu->arch.v_state.msr_val;
+		break;
 	case MSR_IA32_P5_MC_ADDR:
 	case MSR_IA32_P5_MC_TYPE:
 	case MSR_IA32_MCG_CAP:
@@ -5312,6 +5390,8 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 		kvm_load_guest_fpu(vcpu);
 	kvm_load_guest_xcr0(vcpu);
 
+	kvm_set_vcpu_state(vcpu);
+
 	vcpu->mode = IN_GUEST_MODE;
 
 	/* We should set ->mode before check ->requests,
@@ -5323,6 +5403,7 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
 	if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
 	    || need_resched() || signal_pending(current)) {
+		kvm_clear_vcpu_state(vcpu);
 		vcpu->mode = OUTSIDE_GUEST_MODE;
 		smp_wmb();
 		local_irq_enable();
@@ -5361,6 +5442,7 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
 
+	kvm_clear_vcpu_state(vcpu);
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	smp_wmb();
 	local_irq_enable();
@@ -5987,6 +6069,7 @@  void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
 	kvmclock_reset(vcpu);
+	kvm_vcpu_state_reset(vcpu);
 
 	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
 	fx_free(vcpu);
@@ -6045,6 +6128,7 @@  int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
 	vcpu->arch.st.msr_val = 0;
 
 	kvmclock_reset(vcpu);
+	kvm_vcpu_state_reset(vcpu);
 
 	kvm_clear_async_pf_completion_queue(vcpu);
 	kvm_async_pf_hash_reset(vcpu);