Message ID | 1374750001-28527-5-git-send-email-gleb@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 07/25/2013 06:59 PM, Gleb Natapov wrote: > From: Nadav Har'El <nyh@il.ibm.com> > > For preparation, we just move gpte_access(), prefetch_invalid_gpte(), > s_rsvd_bits_set(), protect_clean_gpte() and is_dirty_gpte() from mmu.c > to paging_tmpl.h. > > Signed-off-by: Nadav Har'El <nyh@il.ibm.com> > Signed-off-by: Jun Nakajima <jun.nakajima@intel.com> > Signed-off-by: Xinhao Xu <xinhao.xu@intel.com> > Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> > Signed-off-by: Jun Nakajima <jun.nakajima@intel.com> > Signed-off-by: Gleb Natapov <gleb@redhat.com> > --- > arch/x86/kvm/mmu.c | 55 ------------------------------ > arch/x86/kvm/paging_tmpl.h | 80 +++++++++++++++++++++++++++++++++++++------- > 2 files changed, 68 insertions(+), 67 deletions(-) > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 3a9493a..4c4274d 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -331,11 +331,6 @@ static int is_large_pte(u64 pte) > return pte & PT_PAGE_SIZE_MASK; > } > > -static int is_dirty_gpte(unsigned long pte) > -{ > - return pte & PT_DIRTY_MASK; > -} > - > static int is_rmap_spte(u64 pte) > { > return is_shadow_present_pte(pte); > @@ -2574,14 +2569,6 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) > mmu_free_roots(vcpu); > } > > -static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) > -{ > - int bit7; > - > - bit7 = (gpte >> 7) & 1; > - return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; > -} > - > static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, > bool no_dirty_log) > { > @@ -2594,26 +2581,6 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, > return gfn_to_pfn_memslot_atomic(slot, gfn); > } > > -static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu, > - struct kvm_mmu_page *sp, u64 *spte, > - u64 gpte) > -{ > - if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) > - goto no_present; > - > - if (!is_present_gpte(gpte)) > - goto no_present; > - > - if (!(gpte & PT_ACCESSED_MASK)) > - goto no_present; > - > - return false; > - > -no_present: > - drop_spte(vcpu->kvm, spte); > - return true; > -} > - > static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, > struct kvm_mmu_page *sp, > u64 *start, u64 *end) > @@ -3501,18 +3468,6 @@ static void paging_free(struct kvm_vcpu *vcpu) > nonpaging_free(vcpu); > } > > -static inline void protect_clean_gpte(unsigned *access, unsigned gpte) > -{ > - unsigned mask; > - > - BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); > - > - mask = (unsigned)~ACC_WRITE_MASK; > - /* Allow write access to dirty gptes */ > - mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK; > - *access &= mask; > -} > - > static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, > unsigned access, int *nr_present) > { > @@ -3530,16 +3485,6 @@ static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, > return false; > } > > -static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte) > -{ > - unsigned access; > - > - access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; > - access &= ~(gpte >> PT64_NX_SHIFT); > - > - return access; > -} > - > static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) > { > unsigned index; > diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h > index 7769699..fb26ca9 100644 > --- a/arch/x86/kvm/paging_tmpl.h > +++ b/arch/x86/kvm/paging_tmpl.h > @@ -80,6 +80,31 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) > return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; > } > > +static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte) > +{ > + unsigned mask; > + > + BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); > + > + mask = (unsigned)~ACC_WRITE_MASK; > + /* Allow write access to dirty gptes */ > + mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK; > + *access &= mask; > +} > + > +static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) > +{ > + int bit7; > + > + bit7 = (gpte >> 7) & 1; > + return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; > +} > + > +static inline int FNAME(is_present_gpte)(unsigned long pte) > +{ > + return is_present_gpte(pte); > +} It is adding a new function not just move and failed to mention it in the change log. :) > + > static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > pt_element_t __user *ptep_user, unsigned index, > pt_element_t orig_pte, pt_element_t new_pte) > @@ -103,6 +128,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > return (ret != orig_pte); > } > > +static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, > + struct kvm_mmu_page *sp, u64 *spte, > + u64 gpte) > +{ > + if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) > + goto no_present; > + > + if (!FNAME(is_present_gpte)(gpte)) > + goto no_present; > + > + if (!(gpte & PT_ACCESSED_MASK)) > + goto no_present; > + > + return false; > + > +no_present: > + drop_spte(vcpu->kvm, spte); > + return true; > +} > + > +static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) > +{ > + unsigned access; > + > + access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; > + access &= ~(gpte >> PT64_NX_SHIFT); > + > + return access; > +} > + > static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, > struct kvm_mmu *mmu, > struct guest_walker *walker, > @@ -123,7 +178,8 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, > trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); > pte |= PT_ACCESSED_MASK; > } > - if (level == walker->level && write_fault && !is_dirty_gpte(pte)) { > + if (level == walker->level && write_fault && > + !(pte & PT_DIRTY_MASK)) { Why use the raw code instead of the function? Since it is the only user of this function, so drop the function? Others look good to me. Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Wed, Jul 31, 2013 at 04:02:49PM +0800, Xiao Guangrong wrote: > On 07/25/2013 06:59 PM, Gleb Natapov wrote: > > From: Nadav Har'El <nyh@il.ibm.com> > > > > For preparation, we just move gpte_access(), prefetch_invalid_gpte(), > > s_rsvd_bits_set(), protect_clean_gpte() and is_dirty_gpte() from mmu.c > > to paging_tmpl.h. > > > > Signed-off-by: Nadav Har'El <nyh@il.ibm.com> > > Signed-off-by: Jun Nakajima <jun.nakajima@intel.com> > > Signed-off-by: Xinhao Xu <xinhao.xu@intel.com> > > Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> > > Signed-off-by: Jun Nakajima <jun.nakajima@intel.com> > > Signed-off-by: Gleb Natapov <gleb@redhat.com> > > --- > > arch/x86/kvm/mmu.c | 55 ------------------------------ > > arch/x86/kvm/paging_tmpl.h | 80 +++++++++++++++++++++++++++++++++++++------- > > 2 files changed, 68 insertions(+), 67 deletions(-) > > > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > > index 3a9493a..4c4274d 100644 > > --- a/arch/x86/kvm/mmu.c > > +++ b/arch/x86/kvm/mmu.c > > @@ -331,11 +331,6 @@ static int is_large_pte(u64 pte) > > return pte & PT_PAGE_SIZE_MASK; > > } > > > > -static int is_dirty_gpte(unsigned long pte) > > -{ > > - return pte & PT_DIRTY_MASK; > > -} > > - > > static int is_rmap_spte(u64 pte) > > { > > return is_shadow_present_pte(pte); > > @@ -2574,14 +2569,6 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) > > mmu_free_roots(vcpu); > > } > > > > -static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) > > -{ > > - int bit7; > > - > > - bit7 = (gpte >> 7) & 1; > > - return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; > > -} > > - > > static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, > > bool no_dirty_log) > > { > > @@ -2594,26 +2581,6 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, > > return gfn_to_pfn_memslot_atomic(slot, gfn); > > } > > > > -static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu, > > - struct kvm_mmu_page *sp, u64 *spte, > > - u64 gpte) > > -{ > > - if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) > > - goto no_present; > > - > > - if (!is_present_gpte(gpte)) > > - goto no_present; > > - > > - if (!(gpte & PT_ACCESSED_MASK)) > > - goto no_present; > > - > > - return false; > > - > > -no_present: > > - drop_spte(vcpu->kvm, spte); > > - return true; > > -} > > - > > static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, > > struct kvm_mmu_page *sp, > > u64 *start, u64 *end) > > @@ -3501,18 +3468,6 @@ static void paging_free(struct kvm_vcpu *vcpu) > > nonpaging_free(vcpu); > > } > > > > -static inline void protect_clean_gpte(unsigned *access, unsigned gpte) > > -{ > > - unsigned mask; > > - > > - BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); > > - > > - mask = (unsigned)~ACC_WRITE_MASK; > > - /* Allow write access to dirty gptes */ > > - mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK; > > - *access &= mask; > > -} > > - > > static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, > > unsigned access, int *nr_present) > > { > > @@ -3530,16 +3485,6 @@ static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, > > return false; > > } > > > > -static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte) > > -{ > > - unsigned access; > > - > > - access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; > > - access &= ~(gpte >> PT64_NX_SHIFT); > > - > > - return access; > > -} > > - > > static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) > > { > > unsigned index; > > diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h > > index 7769699..fb26ca9 100644 > > --- a/arch/x86/kvm/paging_tmpl.h > > +++ b/arch/x86/kvm/paging_tmpl.h > > @@ -80,6 +80,31 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) > > return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; > > } > > > > +static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte) > > +{ > > + unsigned mask; > > + > > + BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); > > + > > + mask = (unsigned)~ACC_WRITE_MASK; > > + /* Allow write access to dirty gptes */ > > + mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK; > > + *access &= mask; > > +} > > + > > +static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) > > +{ > > + int bit7; > > + > > + bit7 = (gpte >> 7) & 1; > > + return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; > > +} > > + > > +static inline int FNAME(is_present_gpte)(unsigned long pte) > > +{ > > + return is_present_gpte(pte); > > +} > > It is adding a new function not just move and failed to mention it in the > change log. :) > > > + > > static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > > pt_element_t __user *ptep_user, unsigned index, > > pt_element_t orig_pte, pt_element_t new_pte) > > @@ -103,6 +128,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > > return (ret != orig_pte); > > } > > > > +static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, > > + struct kvm_mmu_page *sp, u64 *spte, > > + u64 gpte) > > +{ > > + if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) > > + goto no_present; > > + > > + if (!FNAME(is_present_gpte)(gpte)) > > + goto no_present; > > + > > + if (!(gpte & PT_ACCESSED_MASK)) > > + goto no_present; > > + > > + return false; > > + > > +no_present: > > + drop_spte(vcpu->kvm, spte); > > + return true; > > +} > > + > > +static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) > > +{ > > + unsigned access; > > + > > + access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; > > + access &= ~(gpte >> PT64_NX_SHIFT); > > + > > + return access; > > +} > > + > > static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, > > struct kvm_mmu *mmu, > > struct guest_walker *walker, > > @@ -123,7 +178,8 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, > > trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); > > pte |= PT_ACCESSED_MASK; > > } > > - if (level == walker->level && write_fault && !is_dirty_gpte(pte)) { > > + if (level == walker->level && write_fault && > > + !(pte & PT_DIRTY_MASK)) { > > Why use the raw code instead of the function? Since it is the only user of > this function, so drop the function? > Yes, I have two choses either move the function to paging_tmpl.h and make it FNAME(is_dirty_gpte)() or just drop it since other bits we check in pte are open coded anyway. Dropping function seems more consistent with the rest of the code. > Others look good to me. > > Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 3a9493a..4c4274d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -331,11 +331,6 @@ static int is_large_pte(u64 pte) return pte & PT_PAGE_SIZE_MASK; } -static int is_dirty_gpte(unsigned long pte) -{ - return pte & PT_DIRTY_MASK; -} - static int is_rmap_spte(u64 pte) { return is_shadow_present_pte(pte); @@ -2574,14 +2569,6 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) mmu_free_roots(vcpu); } -static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) -{ - int bit7; - - bit7 = (gpte >> 7) & 1; - return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; -} - static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) { @@ -2594,26 +2581,6 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, return gfn_to_pfn_memslot_atomic(slot, gfn); } -static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, u64 *spte, - u64 gpte) -{ - if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) - goto no_present; - - if (!is_present_gpte(gpte)) - goto no_present; - - if (!(gpte & PT_ACCESSED_MASK)) - goto no_present; - - return false; - -no_present: - drop_spte(vcpu->kvm, spte); - return true; -} - static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *start, u64 *end) @@ -3501,18 +3468,6 @@ static void paging_free(struct kvm_vcpu *vcpu) nonpaging_free(vcpu); } -static inline void protect_clean_gpte(unsigned *access, unsigned gpte) -{ - unsigned mask; - - BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); - - mask = (unsigned)~ACC_WRITE_MASK; - /* Allow write access to dirty gptes */ - mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK; - *access &= mask; -} - static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, unsigned access, int *nr_present) { @@ -3530,16 +3485,6 @@ static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, return false; } -static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte) -{ - unsigned access; - - access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; - access &= ~(gpte >> PT64_NX_SHIFT); - - return access; -} - static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) { unsigned index; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 7769699..fb26ca9 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -80,6 +80,31 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; } +static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte) +{ + unsigned mask; + + BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); + + mask = (unsigned)~ACC_WRITE_MASK; + /* Allow write access to dirty gptes */ + mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK; + *access &= mask; +} + +static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) +{ + int bit7; + + bit7 = (gpte >> 7) & 1; + return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; +} + +static inline int FNAME(is_present_gpte)(unsigned long pte) +{ + return is_present_gpte(pte); +} + static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, pt_element_t __user *ptep_user, unsigned index, pt_element_t orig_pte, pt_element_t new_pte) @@ -103,6 +128,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, return (ret != orig_pte); } +static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, u64 *spte, + u64 gpte) +{ + if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) + goto no_present; + + if (!FNAME(is_present_gpte)(gpte)) + goto no_present; + + if (!(gpte & PT_ACCESSED_MASK)) + goto no_present; + + return false; + +no_present: + drop_spte(vcpu->kvm, spte); + return true; +} + +static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) +{ + unsigned access; + + access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; + access &= ~(gpte >> PT64_NX_SHIFT); + + return access; +} + static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, struct guest_walker *walker, @@ -123,7 +178,8 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); pte |= PT_ACCESSED_MASK; } - if (level == walker->level && write_fault && !is_dirty_gpte(pte)) { + if (level == walker->level && write_fault && + !(pte & PT_DIRTY_MASK)) { trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); pte |= PT_DIRTY_MASK; } @@ -170,7 +226,7 @@ retry_walk: if (walker->level == PT32E_ROOT_LEVEL) { pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); trace_kvm_mmu_paging_element(pte, walker->level); - if (!is_present_gpte(pte)) + if (!FNAME(is_present_gpte)(pte)) goto error; --walker->level; } @@ -215,17 +271,17 @@ retry_walk: trace_kvm_mmu_paging_element(pte, walker->level); - if (unlikely(!is_present_gpte(pte))) + if (unlikely(!FNAME(is_present_gpte)(pte))) goto error; - if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte, + if (unlikely(FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, pte, walker->level))) { errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK; goto error; } accessed_dirty &= pte; - pte_access = pt_access & gpte_access(vcpu, pte); + pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); walker->ptes[walker->level - 1] = pte; } while (!is_last_gpte(mmu, walker->level, pte)); @@ -248,7 +304,7 @@ retry_walk: walker->gfn = real_gpa >> PAGE_SHIFT; if (!write_fault) - protect_clean_gpte(&pte_access, pte); + FNAME(protect_clean_gpte)(&pte_access, pte); else /* * On a write fault, fold the dirty bit into accessed_dirty by @@ -309,14 +365,14 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, gfn_t gfn; pfn_t pfn; - if (prefetch_invalid_gpte(vcpu, sp, spte, gpte)) + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) return false; pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); gfn = gpte_to_gfn(gpte); - pte_access = sp->role.access & gpte_access(vcpu, gpte); - protect_clean_gpte(&pte_access, gpte); + pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); + FNAME(protect_clean_gpte)(&pte_access, gpte); pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, no_dirty_log && (pte_access & ACC_WRITE_MASK)); if (is_error_pfn(pfn)) @@ -785,15 +841,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) sizeof(pt_element_t))) return -EINVAL; - if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) { + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { vcpu->kvm->tlbs_dirty++; continue; } gfn = gpte_to_gfn(gpte); pte_access = sp->role.access; - pte_access &= gpte_access(vcpu, gpte); - protect_clean_gpte(&pte_access, gpte); + pte_access &= FNAME(gpte_access)(vcpu, gpte); + FNAME(protect_clean_gpte)(&pte_access, gpte); if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access, &nr_present))