@@ -4400,6 +4400,26 @@ check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
}
+static int kvm_enable_spp_protection(struct kvm *kvm, u64 gfn)
+{
+ struct kvm_subpage spp_info = {0};
+ struct kvm_memory_slot *slot;
+
+ slot = gfn_to_memslot(kvm, gfn);
+ if (!slot)
+ return -EFAULT;
+
+ spp_info.base_gfn = gfn;
+ spp_info.npages = 1;
+
+ if (kvm_mmu_get_subpages(kvm, &spp_info, true) < 0)
+ return -EFAULT;
+
+ if (spp_info.access_map[0] != FULL_SPP_ACCESS)
+ kvm_mmu_set_subpages(kvm, &spp_info, true);
+
+ return 0;
+}
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
bool prefault)
{
@@ -4451,6 +4471,10 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
+
+ if (vcpu->kvm->arch.spp_active && level == PT_PAGE_TABLE_LEVEL)
+ kvm_enable_spp_protection(vcpu->kvm, gfn);
+
spin_unlock(&vcpu->kvm->mmu_lock);
return r;