diff mbox series

[1/7] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva()

Message ID 20230105095848.6061-2-jiangshanlai@gmail.com (mailing list archive)
State New, archived
Headers show
Series [1/7] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva() | expand

Commit Message

Lai Jiangshan Jan. 5, 2023, 9:58 a.m. UTC
From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

The @root_hpa for kvm_mmu_invalidate_gva() is called with @mmu->root.hpa
or INVALID_PAGE.

Replace them with KVM_MMU_ROOT_XXX.

No fuctionalities changed.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/include/asm/kvm_host.h |  2 +-
 arch/x86/kvm/mmu/mmu.c          | 39 ++++++++++++++++-----------------
 arch/x86/kvm/x86.c              |  2 +-
 3 files changed, 21 insertions(+), 22 deletions(-)

Comments

Sean Christopherson Feb. 2, 2023, 1:21 a.m. UTC | #1
On Thu, Jan 05, 2023, Lai Jiangshan wrote:
> From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> 
> The @root_hpa for kvm_mmu_invalidate_gva() is called with @mmu->root.hpa
> or INVALID_PAGE.
> 
> Replace them with KVM_MMU_ROOT_XXX.

Please explain _why_.  I can (and did) figure it out on my own, but doing that
takes time and slows down reviews.

> No fuctionalities changed.
> 
> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> ---
>  arch/x86/include/asm/kvm_host.h |  2 +-
>  arch/x86/kvm/mmu/mmu.c          | 39 ++++++++++++++++-----------------
>  arch/x86/kvm/x86.c              |  2 +-
>  3 files changed, 21 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 2f5bf581d00a..dbea616bccce 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -2026,7 +2026,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
>  		       void *insn, int insn_len);
>  void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
>  void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> -			    gva_t gva, hpa_t root_hpa);
> +			    gva_t gva, ulong roots_to_invalidate);
>  void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
>  void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
>  
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 5407649de547..90339b71bd56 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5693,8 +5693,9 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
>  }
>  EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
>  
> +/* roots_to_invalidte must be some combination of the KVM_MMU_ROOT_* flags */

Typo, though I would just drop this comment.  If we want some form of sanity check,
it should be totally doable to add a WARN_ON_ONCE() that verifies the parameter
is a subset of all possible root flags.

>  void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> -			    gva_t gva, hpa_t root_hpa)
> +			    gva_t gva, ulong roots_to_invalidate)

s/ulong/unsigned long

And I got confused by "roots_to_invalidate"; I thought it meant "invalidate these
entire trees" as opposed to "invalidate the gva in these trees".  Best I can come
up with is simply "roots".

>  {
>  	int i;
>  
> @@ -5710,31 +5711,29 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
>  	if (!mmu->invlpg)
>  		return;
>  
> -	if (root_hpa == INVALID_PAGE) {
> +	if ((roots_to_invalidate & KVM_MMU_ROOT_CURRENT) && VALID_PAGE(mmu->root.hpa))
>  		mmu->invlpg(vcpu, gva, mmu->root.hpa);
>  
> -		/*
> -		 * INVLPG is required to invalidate any global mappings for the VA,
> -		 * irrespective of PCID. Since it would take us roughly similar amount
> -		 * of work to determine whether any of the prev_root mappings of the VA
> -		 * is marked global, or to just sync it blindly, so we might as well
> -		 * just always sync it.
> -		 *
> -		 * Mappings not reachable via the current cr3 or the prev_roots will be
> -		 * synced when switching to that cr3, so nothing needs to be done here
> -		 * for them.
> -		 */
> -		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
> -			if (VALID_PAGE(mmu->prev_roots[i].hpa))
> -				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
> -	} else {
> -		mmu->invlpg(vcpu, gva, root_hpa);
> -	}
> +	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)

for-loop needs curly braces.

> +		if ((roots_to_invalidate & KVM_MMU_ROOT_PREVIOUS(i)) &&
> +		    VALID_PAGE(mmu->prev_roots[i].hpa))
> +			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);

I think it has to go at the end of this series, but please add a patch somewhere
to move the VALID_PAGE() check into __kvm_mmu_invalidate_gva(), e.g. end up with

	if (roots & KVM_MMU_ROOT_CURRENT)
		__kvm_mmu_invalidate_gva(vcpu, mmu, gva, mmu->root.hpa);

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		if (roots & KVM_MMU_ROOT_PREVIOUS(i))
			__kvm_mmu_invalidate_gva(vcpu, mmu, gva,
						 mmu->prev_roots[i].hpa);
	}

> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index c936f8d28a53..4696cbb40545 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -799,7 +799,7 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
>  	if ((fault->error_code & PFERR_PRESENT_MASK) &&
>  	    !(fault->error_code & PFERR_RSVD_MASK))
>  		kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
> -				       fault_mmu->root.hpa);
> +				       KVM_MMU_ROOT_CURRENT);

This is logically correct, but there's potential (weird) functional change here.
If this is called with an invalid root, then KVM will invalidate the GVA in all
roots prior to this patch, but in no roots after this patch.

I _think_ it should be impossible get here with an invalid root.  Can you try
adding a prep patch to assert that the root is valid so that this patch can
reasonably assert that there's no functional change?


diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 508074e47bc0..fffd9b610196 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -792,6 +792,8 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
        fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
                                               vcpu->arch.walk_mmu;
 
+       WARN_ON_ONCE(!VALID_PAGE(fault_mmu->root.hpa));
+
        /*
         * Invalidate the TLB entry for the faulting address, if it exists,
         * else the access will fault indefinitely (and to emulate hardware).
Lai Jiangshan Feb. 3, 2023, 2:51 p.m. UTC | #2
On Thu, Feb 2, 2023 at 9:21 AM Sean Christopherson <seanjc@google.com> wrote:

>
> This is logically correct, but there's potential (weird) functional change here.
> If this is called with an invalid root, then KVM will invalidate the GVA in all
> roots prior to this patch, but in no roots after this patch.
>
> I _think_ it should be impossible get here with an invalid root.  Can you try
> adding a prep patch to assert that the root is valid so that this patch can
> reasonably assert that there's no functional change?
>
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 508074e47bc0..fffd9b610196 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -792,6 +792,8 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
>         fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
>                                                vcpu->arch.walk_mmu;
>
> +       WARN_ON_ONCE(!VALID_PAGE(fault_mmu->root.hpa));
> +

I've been updating the patches as per your suggestions.

And I suddenly realized that when fault->nested_page_fault=false
with nested EPT, fault_mmu->root.hpa is always unset.

fault_mmu->root.hpa is just meaningless when fault_mmu is not
vcpu->arch.mmu.

I will add it as one of the reasons for replacing the argument
with KVM_MMU_ROOT_XXX.

>         /*
>          * Invalidate the TLB entry for the faulting address, if it exists,
>          * else the access will fault indefinitely (and to emulate hardware).
>
Sean Christopherson Feb. 3, 2023, 8:54 p.m. UTC | #3
On Fri, Feb 03, 2023, Lai Jiangshan wrote:
> On Thu, Feb 2, 2023 at 9:21 AM Sean Christopherson <seanjc@google.com> wrote:
> 
> >
> > This is logically correct, but there's potential (weird) functional change here.
> > If this is called with an invalid root, then KVM will invalidate the GVA in all
> > roots prior to this patch, but in no roots after this patch.
> >
> > I _think_ it should be impossible get here with an invalid root.  Can you try
> > adding a prep patch to assert that the root is valid so that this patch can
> > reasonably assert that there's no functional change?
> >
> >
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 508074e47bc0..fffd9b610196 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -792,6 +792,8 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
> >         fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
> >                                                vcpu->arch.walk_mmu;
> >
> > +       WARN_ON_ONCE(!VALID_PAGE(fault_mmu->root.hpa));
> > +
> 
> I've been updating the patches as per your suggestions.
> 
> And I suddenly realized that when fault->nested_page_fault=false
> with nested EPT, fault_mmu->root.hpa is always unset.
> 
> fault_mmu->root.hpa is just meaningless when fault_mmu is not
> vcpu->arch.mmu.

Right, because there's no KVM-managed MMU. 

> I will add it as one of the reasons for replacing the argument
> with KVM_MMU_ROOT_XXX.

And maybe call out that when using walk_mmu, the ->invlpg() implementation is
NULL, i.e. using CURRENT root is a nop.

Thanks!
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2f5bf581d00a..dbea616bccce 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2026,7 +2026,7 @@  int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
 		       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-			    gva_t gva, hpa_t root_hpa);
+			    gva_t gva, ulong roots_to_invalidate);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
 
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 5407649de547..90339b71bd56 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5693,8 +5693,9 @@  int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
+/* roots_to_invalidte must be some combination of the KVM_MMU_ROOT_* flags */
 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-			    gva_t gva, hpa_t root_hpa)
+			    gva_t gva, ulong roots_to_invalidate)
 {
 	int i;
 
@@ -5710,31 +5711,29 @@  void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 	if (!mmu->invlpg)
 		return;
 
-	if (root_hpa == INVALID_PAGE) {
+	if ((roots_to_invalidate & KVM_MMU_ROOT_CURRENT) && VALID_PAGE(mmu->root.hpa))
 		mmu->invlpg(vcpu, gva, mmu->root.hpa);
 
-		/*
-		 * INVLPG is required to invalidate any global mappings for the VA,
-		 * irrespective of PCID. Since it would take us roughly similar amount
-		 * of work to determine whether any of the prev_root mappings of the VA
-		 * is marked global, or to just sync it blindly, so we might as well
-		 * just always sync it.
-		 *
-		 * Mappings not reachable via the current cr3 or the prev_roots will be
-		 * synced when switching to that cr3, so nothing needs to be done here
-		 * for them.
-		 */
-		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
-			if (VALID_PAGE(mmu->prev_roots[i].hpa))
-				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
-	} else {
-		mmu->invlpg(vcpu, gva, root_hpa);
-	}
+	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+		if ((roots_to_invalidate & KVM_MMU_ROOT_PREVIOUS(i)) &&
+		    VALID_PAGE(mmu->prev_roots[i].hpa))
+			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
 }
 
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
-	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
+	/*
+	 * INVLPG is required to invalidate any global mappings for the VA,
+	 * irrespective of PCID. Since it would take us roughly similar amount
+	 * of work to determine whether any of the prev_root mappings of the VA
+	 * is marked global, or to just sync it blindly, so we might as well
+	 * just always sync it.
+	 *
+	 * Mappings not reachable via the current cr3 or the prev_roots will be
+	 * synced when switching to that cr3, so nothing needs to be done here
+	 * for them.
+	 */
+	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
 	++vcpu->stat.invlpg;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c936f8d28a53..4696cbb40545 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -799,7 +799,7 @@  void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
 	if ((fault->error_code & PFERR_PRESENT_MASK) &&
 	    !(fault->error_code & PFERR_RSVD_MASK))
 		kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
-				       fault_mmu->root.hpa);
+				       KVM_MMU_ROOT_CURRENT);
 
 	fault_mmu->inject_page_fault(vcpu, fault);
 }