@@ -3131,6 +3131,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
unsigned int access = sp->role.access;
int i, ret;
gfn_t gfn;
+ u32 *wp_bitmap;
gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
@@ -3144,6 +3145,13 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
for (i = 0; i < ret; i++, gfn++, start++) {
mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
page_to_pfn(pages[i]), true, true);
+ if (vcpu->kvm->arch.spp_active) {
+ wp_bitmap = gfn_to_subpage_wp_info(slot, gfn);
+ if (wp_bitmap && *wp_bitmap != FULL_SPP_ACCESS)
+ kvm_spp_mark_protection(vcpu->kvm,
+ gfn,
+ *wp_bitmap);
+ }
put_page(pages[i]);
}
@@ -3336,6 +3344,15 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
map_writable);
direct_pte_prefetch(vcpu, it.sptep);
++vcpu->stat.pf_fixed;
+ if (level == PT_PAGE_TABLE_LEVEL) {
+ int ret;
+ u32 access;
+
+ ret = kvm_spp_get_permission(vcpu->kvm, gfn, 1, &access);
+ if (ret == 1 && access != FULL_SPP_ACCESS)
+ kvm_spp_mark_protection(vcpu->kvm, gfn, access);
+ }
+
return ret;
}
@@ -4125,6 +4142,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (lpage_disallowed)
max_level = PT_PAGE_TABLE_LEVEL;
+ check_spp_protection(vcpu, gfn, &max_level);
+
r = fast_page_fault(vcpu, gpa, error_code);
if (r != RET_PF_INVALID)
@@ -433,6 +433,49 @@ int kvm_spp_mark_protection(struct kvm *kvm, u64 gfn, u32 access)
return ret;
}
+static bool is_spp_protected(struct kvm_memory_slot *slot, gfn_t gfn, int level)
+{
+ int page_num = KVM_PAGES_PER_HPAGE(level);
+ u32 *access;
+ gfn_t gfn_max;
+
+ gfn &= ~(page_num - 1);
+ gfn_max = gfn + page_num - 1;
+ for (; gfn <= gfn_max; gfn++) {
+ access = gfn_to_subpage_wp_info(slot, gfn);
+ if (access && *access != FULL_SPP_ACCESS)
+ return true;
+ }
+ return false;
+}
+
+void check_spp_protection(struct kvm_vcpu *vcpu, gfn_t gfn,
+ int *max_level)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_memory_slot *slot;
+ bool protected;
+
+ if (!kvm->arch.spp_active)
+ return;
+
+ slot = gfn_to_memslot(kvm, gfn);
+
+ if (!slot)
+ return;
+
+ protected = is_spp_protected(slot, gfn, PT_DIRECTORY_LEVEL);
+
+ if (protected) {
+ *max_level = PT_PAGE_TABLE_LEVEL;
+ } else if (*max_level == PT_PDPE_LEVEL &&
+ is_spp_protected(slot, gfn, PT_PDPE_LEVEL)) {
+ *max_level = PT_DIRECTORY_LEVEL;
+ }
+
+ return;
+}
+
int kvm_vm_ioctl_get_subpages(struct kvm *kvm,
u64 gfn,
u32 npages,
@@ -11,6 +11,8 @@ int kvm_spp_get_permission(struct kvm *kvm, u64 gfn, u32 npages,
int kvm_spp_set_permission(struct kvm *kvm, u64 gfn, u32 npages,
u32 *access_map);
int kvm_spp_mark_protection(struct kvm *kvm, u64 gfn, u32 access);
+void check_spp_protection(struct kvm_vcpu *vcpu, gfn_t gfn,
+ int *max_level);
int kvm_vm_ioctl_get_subpages(struct kvm *kvm,
u64 gfn,
u32 npages,
When SPP protection is set but the gfn->pfn mapping isn't there, we need to check and mark SPP protection in EPT while gfn->pfn mapping is being built, but the setup for SPPT is deferred to handle_spp() handler. From HW's capability, SPP only works for 4KB mappings, to apply SPP protection for hugepage(2MB,1GB) cases, hugepage entries need to be zapped before SPP set up. In tdp_page_fault(), there's check for SPP protection before sets 4KB, 2MB or 1GB mapping, the target is introducing least impact to hugepage setup, i.e., falls back to most possible hugepage mapping. Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> --- arch/x86/kvm/mmu/mmu.c | 19 +++++++++++++++++++ arch/x86/kvm/mmu/spp.c | 43 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/mmu/spp.h | 2 ++ 3 files changed, 64 insertions(+)