@@ -2491,6 +2491,20 @@ static void clear_sp_write_flooding_count(u64 *spte)
__clear_sp_write_flooding_count(sp);
}
+static unsigned int kvm_mmu_page_track_acc(struct kvm_vcpu *vcpu, gfn_t gfn,
+ unsigned int acc)
+{
+ if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREREAD))
+ acc &= ~ACC_USER_MASK;
+ if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREWRITE) ||
+ kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+ acc &= ~ACC_WRITE_MASK;
+ if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREEXEC))
+ acc &= ~ACC_EXEC_MASK;
+
+ return acc;
+}
+
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
gfn_t gfn,
gva_t gaddr,
@@ -2511,7 +2525,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
role.direct = direct;
if (role.direct)
role.cr4_pae = 0;
- role.access = access;
+ role.access = kvm_mmu_page_track_acc(vcpu, gfn, access);
if (!vcpu->arch.mmu->direct_map
&& vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
@@ -3234,7 +3248,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
if (iterator.level == level) {
- emulate = mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
+ unsigned int acc = kvm_mmu_page_track_acc(vcpu, gfn,
+ ACC_ALL);
+
+ emulate = mmu_set_spte(vcpu, iterator.sptep, acc,
write, level, gfn, pfn, prefault,
map_writable);
direct_pte_prefetch(vcpu, iterator.sptep);