@@ -436,7 +436,6 @@ struct kvm_mmu {
union kvm_mmu_role mmu_role;
u8 root_level;
u8 shadow_root_level;
- u8 ept_ad;
bool direct_map;
struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
@@ -4915,7 +4915,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->shadow_root_level = level;
- context->ept_ad = accessed_dirty;
context->page_fault = ept_page_fault;
context->gva_to_gpa = ept_gva_to_gpa;
context->sync_page = ept_sync_page;
@@ -64,7 +64,7 @@
#define PT_LEVEL_BITS PT64_LEVEL_BITS
#define PT_GUEST_DIRTY_SHIFT 9
#define PT_GUEST_ACCESSED_SHIFT 8
- #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
+ #define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_role.base.ad_disabled)
#define CMPXCHG cmpxchg64
#define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
#else
The similar field in the CPU role, ad_disabled, is initialized to the opposite value for shadow EPT, and left zero for "normal" EPT because guest page tables always have A/D bits. So, read it from the CPU role, like other page-format fields; it just has to be inverted to account for the different polarity. In the MMU role, instead, the ad_disabled bit is set according to shadow_accessed_mask, so it would have been incorrect to replace PT_HAVE_ACCESSED_DIRTY with just !mmu->mmu_role.base.ad_disabled. However, with the separation of CPU and MMU roles, we might even get rid of the PT_HAVE_ACCESSED_DIRTY macro altogether. I am not doing this because the macro has a small effect in terms of dead code elimination: text data bss dec hex 103544 16665 112 120321 1d601 # as of this patch 103746 16665 112 120523 1d6cb # without PT_HAVE_ACCESSED_DIRTY Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu/mmu.c | 1 - arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-)