diff mbox

[v3,02/13] nEPT: Move gpte_access() and prefetch_invalid_gpte() to paging_tmpl.h

Message ID 1368939152-11406-2-git-send-email-jun.nakajima@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Nakajima, Jun May 19, 2013, 4:52 a.m. UTC
From: Nadav Har'El <nyh@il.ibm.com>

For preparation, we just move gpte_access() and prefetch_invalid_gpte() from mmu.c to paging_tmpl.h.

Signed-off-by: Nadav Har'El <nyh@il.ibm.com>
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Xinhao Xu <xinhao.xu@intel.com>
---
 arch/x86/kvm/mmu.c         | 30 ------------------------------
 arch/x86/kvm/paging_tmpl.h | 40 +++++++++++++++++++++++++++++++++++-----
 2 files changed, 35 insertions(+), 35 deletions(-)

Comments

Paolo Bonzini May 20, 2013, 12:34 p.m. UTC | #1
Il 19/05/2013 06:52, Jun Nakajima ha scritto:
> From: Nadav Har'El <nyh@il.ibm.com>
> 
> For preparation, we just move gpte_access() and prefetch_invalid_gpte() from mmu.c to paging_tmpl.h.
> 
> Signed-off-by: Nadav Har'El <nyh@il.ibm.com>
> Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
> Signed-off-by: Xinhao Xu <xinhao.xu@intel.com>
> ---
>  arch/x86/kvm/mmu.c         | 30 ------------------------------
>  arch/x86/kvm/paging_tmpl.h | 40 +++++++++++++++++++++++++++++++++++-----
>  2 files changed, 35 insertions(+), 35 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 004cc87..117233f 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2488,26 +2488,6 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
>  	return gfn_to_pfn_memslot_atomic(slot, gfn);
>  }
>  
> -static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu,
> -				  struct kvm_mmu_page *sp, u64 *spte,
> -				  u64 gpte)
> -{
> -	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
> -		goto no_present;
> -
> -	if (!is_present_gpte(gpte))
> -		goto no_present;
> -
> -	if (!(gpte & PT_ACCESSED_MASK))
> -		goto no_present;
> -
> -	return false;
> -
> -no_present:
> -	drop_spte(vcpu->kvm, spte);
> -	return true;
> -}
> -
>  static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
>  				    struct kvm_mmu_page *sp,
>  				    u64 *start, u64 *end)
> @@ -3408,16 +3388,6 @@ static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
>  	return false;
>  }
>  
> -static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
> -{
> -	unsigned access;
> -
> -	access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
> -	access &= ~(gpte >> PT64_NX_SHIFT);
> -
> -	return access;
> -}
> -
>  static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte)
>  {
>  	unsigned index;
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index da20860..df34d4a 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -103,6 +103,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
>  	return (ret != orig_pte);
>  }
>  
> +static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
> +				  struct kvm_mmu_page *sp, u64 *spte,
> +				  u64 gpte)
> +{
> +	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
> +		goto no_present;
> +
> +	if (!is_present_gpte(gpte))
> +		goto no_present;
> +
> +	if (!(gpte & PT_ACCESSED_MASK))
> +		goto no_present;
> +
> +	return false;
> +
> +no_present:
> +	drop_spte(vcpu->kvm, spte);
> +	return true;
> +}
> +
> +static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
> +{
> +	unsigned access;
> +
> +	access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
> +	access &= ~(gpte >> PT64_NX_SHIFT);
> +
> +	return access;
> +}
> +
>  static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
>  					     struct kvm_mmu *mmu,
>  					     struct guest_walker *walker,
> @@ -225,7 +255,7 @@ retry_walk:
>  		}
>  
>  		accessed_dirty &= pte;
> -		pte_access = pt_access & gpte_access(vcpu, pte);
> +		pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
>  
>  		walker->ptes[walker->level - 1] = pte;
>  	} while (!is_last_gpte(mmu, walker->level, pte));
> @@ -309,13 +339,13 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
>  	gfn_t gfn;
>  	pfn_t pfn;
>  
> -	if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
> +	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
>  		return false;
>  
>  	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
>  
>  	gfn = gpte_to_gfn(gpte);
> -	pte_access = sp->role.access & gpte_access(vcpu, gpte);
> +	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
>  	protect_clean_gpte(&pte_access, gpte);
>  	pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
>  			no_dirty_log && (pte_access & ACC_WRITE_MASK));
> @@ -782,14 +812,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
>  					  sizeof(pt_element_t)))
>  			return -EINVAL;
>  
> -		if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) {
> +		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
>  			vcpu->kvm->tlbs_dirty++;
>  			continue;
>  		}
>  
>  		gfn = gpte_to_gfn(gpte);
>  		pte_access = sp->role.access;
> -		pte_access &= gpte_access(vcpu, gpte);
> +		pte_access &= FNAME(gpte_access)(vcpu, gpte);
>  		protect_clean_gpte(&pte_access, gpte);
>  
>  		if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
> 

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 004cc87..117233f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2488,26 +2488,6 @@  static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
 	return gfn_to_pfn_memslot_atomic(slot, gfn);
 }
 
-static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu,
-				  struct kvm_mmu_page *sp, u64 *spte,
-				  u64 gpte)
-{
-	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
-		goto no_present;
-
-	if (!is_present_gpte(gpte))
-		goto no_present;
-
-	if (!(gpte & PT_ACCESSED_MASK))
-		goto no_present;
-
-	return false;
-
-no_present:
-	drop_spte(vcpu->kvm, spte);
-	return true;
-}
-
 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
 				    struct kvm_mmu_page *sp,
 				    u64 *start, u64 *end)
@@ -3408,16 +3388,6 @@  static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
 	return false;
 }
 
-static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
-{
-	unsigned access;
-
-	access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
-	access &= ~(gpte >> PT64_NX_SHIFT);
-
-	return access;
-}
-
 static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte)
 {
 	unsigned index;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index da20860..df34d4a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -103,6 +103,36 @@  static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 	return (ret != orig_pte);
 }
 
+static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
+				  struct kvm_mmu_page *sp, u64 *spte,
+				  u64 gpte)
+{
+	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+		goto no_present;
+
+	if (!is_present_gpte(gpte))
+		goto no_present;
+
+	if (!(gpte & PT_ACCESSED_MASK))
+		goto no_present;
+
+	return false;
+
+no_present:
+	drop_spte(vcpu->kvm, spte);
+	return true;
+}
+
+static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
+{
+	unsigned access;
+
+	access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
+	access &= ~(gpte >> PT64_NX_SHIFT);
+
+	return access;
+}
+
 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
 					     struct kvm_mmu *mmu,
 					     struct guest_walker *walker,
@@ -225,7 +255,7 @@  retry_walk:
 		}
 
 		accessed_dirty &= pte;
-		pte_access = pt_access & gpte_access(vcpu, pte);
+		pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
 
 		walker->ptes[walker->level - 1] = pte;
 	} while (!is_last_gpte(mmu, walker->level, pte));
@@ -309,13 +339,13 @@  FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 	gfn_t gfn;
 	pfn_t pfn;
 
-	if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
+	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 		return false;
 
 	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
 
 	gfn = gpte_to_gfn(gpte);
-	pte_access = sp->role.access & gpte_access(vcpu, gpte);
+	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
 	protect_clean_gpte(&pte_access, gpte);
 	pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
 			no_dirty_log && (pte_access & ACC_WRITE_MASK));
@@ -782,14 +812,14 @@  static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 					  sizeof(pt_element_t)))
 			return -EINVAL;
 
-		if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) {
+		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
 			vcpu->kvm->tlbs_dirty++;
 			continue;
 		}
 
 		gfn = gpte_to_gfn(gpte);
 		pte_access = sp->role.access;
-		pte_access &= gpte_access(vcpu, gpte);
+		pte_access &= FNAME(gpte_access)(vcpu, gpte);
 		protect_clean_gpte(&pte_access, gpte);
 
 		if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))