diff mbox

[v3,03/11] KVM: MMU: introduce kvm_mmu_slot_gfn_write_protect

Message ID 1455449503-20993-4-git-send-email-guangrong.xiao@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong Feb. 14, 2016, 11:31 a.m. UTC
Split rmap_write_protect() and introduce the function to abstract the write
protection based on the slot

This function will be used in the later patch

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
---
 arch/x86/kvm/mmu.c | 16 +++++++++++-----
 arch/x86/kvm/mmu.h |  2 ++
 2 files changed, 13 insertions(+), 5 deletions(-)

Comments

Paolo Bonzini Feb. 19, 2016, 11:18 a.m. UTC | #1
On 14/02/2016 12:31, Xiao Guangrong wrote:
> Split rmap_write_protect() and introduce the function to abstract the write
> protection based on the slot
> 
> This function will be used in the later patch
> 
> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
> ---
>  arch/x86/kvm/mmu.c | 16 +++++++++++-----
>  arch/x86/kvm/mmu.h |  2 ++
>  2 files changed, 13 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index e1bb66c..edad3c7 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -1336,23 +1336,29 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
>  		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
>  }
>  
> -static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
> +bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
> +				    struct kvm_memory_slot *slot, u64 gfn)
>  {
> -	struct kvm_memory_slot *slot;
>  	struct kvm_rmap_head *rmap_head;
>  	int i;
>  	bool write_protected = false;
>  
> -	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> -
>  	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
>  		rmap_head = __gfn_to_rmap(gfn, i, slot);
> -		write_protected |= __rmap_write_protect(vcpu->kvm, rmap_head, true);
> +		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
>  	}
>  
>  	return write_protected;
>  }
>  
> +static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
> +{
> +	struct kvm_memory_slot *slot;
> +
> +	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> +	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
> +}
> +
>  static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
>  {
>  	u64 *sptep;
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index de92bed..58fe98a 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -177,4 +177,6 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
>  
>  void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
>  void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
> +bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
> +				    struct kvm_memory_slot *slot, u64 gfn);
>  #endif
> 

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e1bb66c..edad3c7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1336,23 +1336,29 @@  void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
 }
 
-static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
+bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
+				    struct kvm_memory_slot *slot, u64 gfn)
 {
-	struct kvm_memory_slot *slot;
 	struct kvm_rmap_head *rmap_head;
 	int i;
 	bool write_protected = false;
 
-	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-
 	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
 		rmap_head = __gfn_to_rmap(gfn, i, slot);
-		write_protected |= __rmap_write_protect(vcpu->kvm, rmap_head, true);
+		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
 	}
 
 	return write_protected;
 }
 
+static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
+{
+	struct kvm_memory_slot *slot;
+
+	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
+}
+
 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
 {
 	u64 *sptep;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index de92bed..58fe98a 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -177,4 +177,6 @@  void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
 
 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
+bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
+				    struct kvm_memory_slot *slot, u64 gfn);
 #endif