@@ -427,11 +427,11 @@ struct kvm_mmu {
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- gpa_t gva_or_gpa, u32 access,
+ gpa_t gva_or_l2pa, u32 access,
struct x86_exception *exception);
int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp);
- void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
+ void (*invlpg)(struct kvm_vcpu *vcpu, gpa_t gva_or_l2pa, hpa_t root_hpa);
hpa_t root_hpa;
gpa_t root_pgd;
union kvm_mmu_role mmu_role;
@@ -1785,7 +1785,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- gva_t gva, hpa_t root_hpa);
+ gpa_t gva_or_l2pa, hpa_t root_hpa);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
@@ -5313,24 +5313,24 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- gva_t gva, hpa_t root_hpa)
+ gpa_t gva_or_l2pa, hpa_t root_hpa)
{
int i;
- /* It's actually a GPA for vcpu->arch.guest_mmu. */
+ /* It's actually a L2 GPA for vcpu->arch.guest_mmu. */
if (mmu != &vcpu->arch.guest_mmu) {
/* INVLPG on a non-canonical address is a NOP according to the SDM. */
- if (is_noncanonical_address(gva, vcpu))
+ if (is_noncanonical_address(gva_or_l2pa, vcpu))
return;
- static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
+ static_call(kvm_x86_tlb_flush_gva)(vcpu, gva_or_l2pa);
}
if (!mmu->invlpg)
return;
if (root_hpa == INVALID_PAGE) {
- mmu->invlpg(vcpu, gva, mmu->root_hpa);
+ mmu->invlpg(vcpu, gva_or_l2pa, mmu->root_hpa);
/*
* INVLPG is required to invalidate any global mappings for the VA,
@@ -5345,9 +5345,9 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
*/
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if (VALID_PAGE(mmu->prev_roots[i].hpa))
- mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
+ mmu->invlpg(vcpu, gva_or_l2pa, mmu->prev_roots[i].hpa);
} else {
- mmu->invlpg(vcpu, gva, root_hpa);
+ mmu->invlpg(vcpu, gva_or_l2pa, root_hpa);
}
}
@@ -928,7 +928,8 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
}
-static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
+/* Note, @gva_or_l2pa is a GPA when invlpg() invalidates an L2 GPA. */
+static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gpa_t gva_or_l2pa, hpa_t root_hpa)
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
@@ -936,7 +937,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
int level;
u64 *sptep;
- vcpu_clear_mmio_info(vcpu, gva);
+ vcpu_clear_mmio_info(vcpu, gva_or_l2pa);
/*
* No need to check return value here, rmap_can_add() can
@@ -950,7 +951,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
}
write_lock(&vcpu->kvm->mmu_lock);
- for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
+ for_each_shadow_entry_using_root(vcpu, root_hpa, gva_or_l2pa, iterator) {
level = iterator.level;
sptep = iterator.sptep;