diff mbox

[v3,3/5] KVM: Modify kvm_write_guest_cached() and kvm_write_guest_offset_cached()

Message ID CY1PR08MB19929BD2AC47A291FD680E83F04F0@CY1PR08MB1992.namprd08.prod.outlook.com (mailing list archive)
State New, archived
Headers show

Commit Message

Cao, Lei Feb. 3, 2017, 8:04 p.m. UTC
Modify kvm_write_guest_cached() and kvm_write_guest_offset_cached() to
take vcpu as a parameter instead kvm. Rename the two functions to
kvm_vcpu_write_*. This is to allow dirty pages to be logged in the vcpu
dirty ring, instead of the global dirty ring, for ring-based dirty
memory tracking.

Signed-off-by: Lei Cao <lei.cao@stratus.com>
---
 arch/x86/kvm/lapic.c     |  4 ++--
 arch/x86/kvm/x86.c       | 16 ++++++++--------
 include/linux/kvm_host.h |  4 ++--
 virt/kvm/kvm_main.c      | 16 ++++++++--------
 4 files changed, 20 insertions(+), 20 deletions(-)

Comments

Paolo Bonzini Feb. 4, 2017, 4:53 a.m. UTC | #1
On 03/02/2017 12:04, Cao, Lei wrote:
> Modify kvm_write_guest_cached() and kvm_write_guest_offset_cached() to
> take vcpu as a parameter instead kvm. Rename the two functions to
> kvm_vcpu_write_*. This is to allow dirty pages to be logged in the vcpu
> dirty ring, instead of the global dirty ring, for ring-based dirty
> memory tracking.
> 
> Signed-off-by: Lei Cao <lei.cao@stratus.com>

I think I prefer to change everything to be vCPU-based, including
kvm_read_guest_cached and kvm_gfn_to_hva_cache_init.  That is, using
kvm_vcpu_memslots and so on.

However, slots->generation right can have false positives if the
kvm_memslots address space id differs.  I'll take care of that and post
a small patch series.

Paolo

> ---
>  arch/x86/kvm/lapic.c     |  4 ++--
>  arch/x86/kvm/x86.c       | 16 ++++++++--------
>  include/linux/kvm_host.h |  4 ++--
>  virt/kvm/kvm_main.c      | 16 ++++++++--------
>  4 files changed, 20 insertions(+), 20 deletions(-)
> 
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index 2f6ef51..6275369 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -501,7 +501,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
>  static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
>  {
>  
> -	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
> +	return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val,
>  				      sizeof(val));
>  }
>  
> @@ -2273,7 +2273,7 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
>  		max_isr = 0;
>  	data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
>  
> -	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
> +	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
>  				sizeof(u32));
>  }
>  
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 1889f62..378fc98 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1793,7 +1793,7 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
>  	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
>  
>  	vcpu->hv_clock.version = guest_hv_clock.version + 1;
> -	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
> +	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
>  				&vcpu->hv_clock,
>  				sizeof(vcpu->hv_clock.version));
>  
> @@ -1809,14 +1809,14 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
>  
>  	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
>  
> -	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
> +	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
>  				&vcpu->hv_clock,
>  				sizeof(vcpu->hv_clock));
>  
>  	smp_wmb();
>  
>  	vcpu->hv_clock.version++;
> -	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
> +	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
>  				&vcpu->hv_clock,
>  				sizeof(vcpu->hv_clock.version));
>  }
> @@ -2081,7 +2081,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
>  
>  	vcpu->arch.st.steal.version += 1;
>  
> -	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
> +	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>  
>  	smp_wmb();
> @@ -2090,14 +2090,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
>  		vcpu->arch.st.last_steal;
>  	vcpu->arch.st.last_steal = current->sched_info.run_delay;
>  
> -	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
> +	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>  
>  	smp_wmb();
>  
>  	vcpu->arch.st.steal.version += 1;
>  
> -	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
> +	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>  }
>  
> @@ -2835,7 +2835,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
>  
>  	vcpu->arch.st.steal.preempted = 1;
>  
> -	kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
> +	kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime,
>  			&vcpu->arch.st.steal.preempted,
>  			offsetof(struct kvm_steal_time, preempted),
>  			sizeof(vcpu->arch.st.steal.preempted));
> @@ -8422,7 +8422,7 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
>  static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
>  {
>  
> -	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
> +	return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val,
>  				      sizeof(val));
>  }
>  
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 33d9974..65561bf 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -648,9 +648,9 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
>  			 int offset, int len);
>  int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
>  		    unsigned long len);
> -int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> +int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
>  			   void *data, unsigned long len);
> -int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> +int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
>  			   void *data, int offset, unsigned long len);
>  int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  			      gpa_t gpa, unsigned long len);
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 016be4d..417c0ff 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -1974,20 +1974,20 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  }
>  EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
>  
> -int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> +int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
>  			   void *data, int offset, unsigned long len)
>  {
> -	struct kvm_memslots *slots = kvm_memslots(kvm);
> +	struct kvm_memslots *slots = kvm_memslots(v->kvm);
>  	int r;
>  	gpa_t gpa = ghc->gpa + offset;
>  
>  	BUG_ON(len + offset > ghc->len);
>  
>  	if (slots->generation != ghc->generation)
> -		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
> +		kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len);
>  
>  	if (unlikely(!ghc->memslot))
> -		return kvm_write_guest(kvm, gpa, data, len);
> +		return kvm_write_guest(v->kvm, gpa, data, len);
>  
>  	if (kvm_is_error_hva(ghc->hva))
>  		return -EFAULT;
> @@ -1999,14 +1999,14 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  
>  	return 0;
>  }
> -EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
> +EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached);
>  
> -int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> +int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
>  			   void *data, unsigned long len)
>  {
> -	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
> +	return kvm_vcpu_write_guest_offset_cached(v, ghc, data, 0, len);
>  }
> -EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
> +EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached);
>  
>  int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  			   void *data, unsigned long len)
>
diff mbox

Patch

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 2f6ef51..6275369 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -501,7 +501,7 @@  int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
 {
 
-	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
+	return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val,
 				      sizeof(val));
 }
 
@@ -2273,7 +2273,7 @@  void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
 		max_isr = 0;
 	data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
 
-	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
 				sizeof(u32));
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1889f62..378fc98 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1793,7 +1793,7 @@  static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
 	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
 
 	vcpu->hv_clock.version = guest_hv_clock.version + 1;
-	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
 				&vcpu->hv_clock,
 				sizeof(vcpu->hv_clock.version));
 
@@ -1809,14 +1809,14 @@  static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
 
 	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
 
-	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
 				&vcpu->hv_clock,
 				sizeof(vcpu->hv_clock));
 
 	smp_wmb();
 
 	vcpu->hv_clock.version++;
-	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
 				&vcpu->hv_clock,
 				sizeof(vcpu->hv_clock.version));
 }
@@ -2081,7 +2081,7 @@  static void record_steal_time(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.st.steal.version += 1;
 
-	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 
 	smp_wmb();
@@ -2090,14 +2090,14 @@  static void record_steal_time(struct kvm_vcpu *vcpu)
 		vcpu->arch.st.last_steal;
 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
 
-	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 
 	smp_wmb();
 
 	vcpu->arch.st.steal.version += 1;
 
-	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 }
 
@@ -2835,7 +2835,7 @@  static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.st.steal.preempted = 1;
 
-	kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
+	kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime,
 			&vcpu->arch.st.steal.preempted,
 			offsetof(struct kvm_steal_time, preempted),
 			sizeof(vcpu->arch.st.steal.preempted));
@@ -8422,7 +8422,7 @@  static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
 {
 
-	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
+	return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val,
 				      sizeof(val));
 }
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 33d9974..65561bf 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -648,9 +648,9 @@  int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
 			 int offset, int len);
 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 		    unsigned long len);
-int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
 			   void *data, unsigned long len);
-int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
 			   void *data, int offset, unsigned long len);
 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 			      gpa_t gpa, unsigned long len);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 016be4d..417c0ff 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1974,20 +1974,20 @@  int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 }
 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
 
-int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
 			   void *data, int offset, unsigned long len)
 {
-	struct kvm_memslots *slots = kvm_memslots(kvm);
+	struct kvm_memslots *slots = kvm_memslots(v->kvm);
 	int r;
 	gpa_t gpa = ghc->gpa + offset;
 
 	BUG_ON(len + offset > ghc->len);
 
 	if (slots->generation != ghc->generation)
-		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+		kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len);
 
 	if (unlikely(!ghc->memslot))
-		return kvm_write_guest(kvm, gpa, data, len);
+		return kvm_write_guest(v->kvm, gpa, data, len);
 
 	if (kvm_is_error_hva(ghc->hva))
 		return -EFAULT;
@@ -1999,14 +1999,14 @@  int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
+EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached);
 
-int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
 			   void *data, unsigned long len)
 {
-	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
+	return kvm_vcpu_write_guest_offset_cached(v, ghc, data, 0, len);
 }
-EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached);
 
 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 			   void *data, unsigned long len)