diff mbox series

[V2,4/8] kvm: x86/mmu: Set mmu->sync_page as NULL for direct paging

Message ID 20230207155735.2845-5-jiangshanlai@gmail.com (mailing list archive)
State New, archived
Headers show
Series [V2,1/8] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva() | expand

Commit Message

Lai Jiangshan Feb. 7, 2023, 3:57 p.m. UTC
From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

mmu->sync_page for direct paging is never called.

And both mmu->sync_page and mm->invlpg only make sense in shadowpaging.
Setting mmu->sync_page as NULL for direct paging makes it consistent
with mm->invlpg which is set NULL for the case.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/kvm/mmu/mmu.c | 10 ++--------
 1 file changed, 2 insertions(+), 8 deletions(-)

Comments

Paolo Bonzini Feb. 7, 2023, 4:17 p.m. UTC | #1
On 2/7/23 16:57, Lai Jiangshan wrote:
> From: Lai Jiangshan<jiangshan.ljs@antgroup.com>
> 
> mmu->sync_page for direct paging is never called.
> 
> And both mmu->sync_page and mm->invlpg only make sense in shadowpaging.
> Setting mmu->sync_page as NULL for direct paging makes it consistent
> with mm->invlpg which is set NULL for the case.
> 
> Signed-off-by: Lai Jiangshan<jiangshan.ljs@antgroup.com>

I'd rather have a WARN_ON_ONCE in kvm_sync_page(), otherwise looks good.

Paolo
Sean Christopherson Feb. 10, 2023, 12:57 a.m. UTC | #2
On Tue, Feb 07, 2023, Paolo Bonzini wrote:
> On 2/7/23 16:57, Lai Jiangshan wrote:
> > From: Lai Jiangshan<jiangshan.ljs@antgroup.com>
> > 
> > mmu->sync_page for direct paging is never called.
> > 
> > And both mmu->sync_page and mm->invlpg only make sense in shadowpaging.
> > Setting mmu->sync_page as NULL for direct paging makes it consistent
> > with mm->invlpg which is set NULL for the case.
> > 
> > Signed-off-by: Lai Jiangshan<jiangshan.ljs@antgroup.com>
> 
> I'd rather have a WARN_ON_ONCE in kvm_sync_page(), otherwise looks good.

Agreed, there's even a WARN_ON_ONCE() to piggyback:

	if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
			 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
		return -1;

Of course, the direct + match checks on the role effectively do the same, but I
would rather be explicit.
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index e03cf5558773..e30ca652f6ff 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1789,12 +1789,6 @@  static void mark_unsync(u64 *spte)
 	kvm_mmu_mark_parents_unsync(sp);
 }
 
-static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
-			       struct kvm_mmu_page *sp)
-{
-	return -1;
-}
-
 #define KVM_PAGE_ARRAY_NR 16
 
 struct kvm_mmu_pages {
@@ -4469,7 +4463,7 @@  static void nonpaging_init_context(struct kvm_mmu *context)
 {
 	context->page_fault = nonpaging_page_fault;
 	context->gva_to_gpa = nonpaging_gva_to_gpa;
-	context->sync_page = nonpaging_sync_page;
+	context->sync_page = NULL;
 	context->invlpg = NULL;
 }
 
@@ -5157,7 +5151,7 @@  static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
 	context->cpu_role.as_u64 = cpu_role.as_u64;
 	context->root_role.word = root_role.word;
 	context->page_fault = kvm_tdp_page_fault;
-	context->sync_page = nonpaging_sync_page;
+	context->sync_page = NULL;
 	context->invlpg = NULL;
 	context->get_guest_pgd = get_cr3;
 	context->get_pdptr = kvm_pdptr_read;