@@ -129,6 +129,9 @@ struct kvm_page_fault {
/* Derived from mmu. */
const bool is_tdp;
+
+ /* Input to FNAME(fetch), __direct_map and kvm_tdp_mmu_map. */
+ u8 max_level;
};
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
@@ -146,6 +149,8 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
.user = err & PFERR_USER_MASK,
.prefault = prefault,
.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
+
+ .max_level = KVM_MAX_HUGEPAGE_LEVEL,
};
#ifdef CONFIG_RETPOLINE
if (fault.is_tdp)
@@ -3878,11 +3878,11 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
return false;
}
-static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
- bool prefault, int max_level, bool is_tdp)
+static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
+ gpa_t gpa = fault->addr;
+ u32 error_code = fault->error_code;
bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
- bool write = error_code & PFERR_WRITE_MASK;
bool map_writable;
gfn_t gfn = gpa >> PAGE_SHIFT;
@@ -3905,11 +3905,11 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
- if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva,
- write, &map_writable))
+ if (try_async_pf(vcpu, fault->prefault, gfn, gpa, &pfn, &hva,
+ fault->write, &map_writable))
return RET_PF_RETRY;
- if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
+ if (handle_abnormal_pfn(vcpu, fault->is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
return r;
r = RET_PF_RETRY;
@@ -3926,11 +3926,11 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
goto out_unlock;
if (is_tdp_mmu_fault)
- r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
- pfn, prefault);
+ r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, fault->max_level,
+ pfn, fault->prefault);
else
- r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn,
- prefault, is_tdp);
+ r = __direct_map(vcpu, gpa, error_code, map_writable, fault->max_level, pfn,
+ fault->prefault, fault->is_tdp);
out_unlock:
if (is_tdp_mmu_fault)
@@ -3944,12 +3944,11 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
- pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
+ pgprintk("%s: gva %lx error %x\n", __func__, fault->addr, fault->error_code);
/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
- return direct_page_fault(vcpu, fault->addr,
- fault->error_code, fault->prefault,
- PG_LEVEL_2M, false);
+ fault->max_level = PG_LEVEL_2M;
+ return direct_page_fault(vcpu, fault);
}
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
@@ -3987,21 +3986,17 @@ EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
- int max_level;
- gpa_t gpa = fault->addr;
-
- for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
- max_level > PG_LEVEL_4K;
- max_level--) {
- int page_num = KVM_PAGES_PER_HPAGE(max_level);
- gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
+ while (fault->max_level > PG_LEVEL_4K) {
+ int page_num = KVM_PAGES_PER_HPAGE(fault->max_level);
+ gfn_t base = (fault->addr >> PAGE_SHIFT) & ~(page_num - 1);
if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
break;
+
+ --fault->max_level;
}
- return direct_page_fault(vcpu, gpa, fault->error_code,
- fault->prefault, max_level, true);
+ return direct_page_fault(vcpu, fault);
}
static void nonpaging_init_context(struct kvm_mmu *context)
@@ -832,7 +832,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
hva_t hva;
unsigned long mmu_seq;
bool map_writable, is_self_change_mapping;
- int max_level;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
WARN_ON_ONCE(fault->is_tdp);
@@ -874,9 +873,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
&walker, fault->user, &vcpu->arch.write_fault_to_shadow_pgtable);
if (is_self_change_mapping)
- max_level = PG_LEVEL_4K;
+ fault->max_level = PG_LEVEL_4K;
else
- max_level = walker.level;
+ fault->max_level = walker.level;
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
@@ -916,7 +915,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
r = make_mmu_pages_available(vcpu);
if (r)
goto out_unlock;
- r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn,
+ r = FNAME(fetch)(vcpu, addr, &walker, error_code, fault->max_level, pfn,
map_writable, fault->prefault);
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
Add fields to struct kvm_page_fault corresponding to the arguments of direct_page_fault(). The fields are initialized in the callers, and direct_page_fault() receives a struct kvm_page_fault instead of having to extract the arguments out of it. Also adjust FNAME(page_fault) to store the max_level in struct kvm_page_fault, to keep it similar to the direct map path. Suggested-by: Isaku Yamahata <isaku.yamahata@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- arch/x86/kvm/mmu.h | 5 ++++ arch/x86/kvm/mmu/mmu.c | 43 +++++++++++++++------------------- arch/x86/kvm/mmu/paging_tmpl.h | 7 +++--- 3 files changed, 27 insertions(+), 28 deletions(-)