@@ -2180,7 +2180,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
- if (sp->role.cr4_pae != !!is_pae(vcpu)
+ if (sp->role.cr4_pae != vcpu->arch.mmu.base_role.cr4_pae
|| vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
@@ -4838,6 +4838,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
role.direct = false;
role.ad_disabled = !accessed_dirty;
role.guest_mode = true;
+ role.cr4_pae = true;
role.access = ACC_ALL;
return role;
@@ -5023,7 +5024,7 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
* as the current vcpu paging mode since we update the sptes only
* when they have the same mode.
*/
- if (is_pae(vcpu) && *bytes == 4) {
+ if (vcpu->arch.mmu.base_role.cr4_pae && *bytes == 4) {
/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
*gpa &= ~(gpa_t)7;
*bytes = 8;