diff mbox series

[1/8] KVM: x86/mmu: Refactor is_tdp_mmu_root()

Message ID 20210611235701.3941724-2-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Fast page fault support for the TDP MMU | expand

Commit Message

David Matlack June 11, 2021, 11:56 p.m. UTC
Refactor is_tdp_mmu_root() into is_vcpu_using_tdp_mmu() to reduce
duplicated code at call sites and make the code more readable.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmu.c     | 10 +++++-----
 arch/x86/kvm/mmu/tdp_mmu.c |  2 +-
 arch/x86/kvm/mmu/tdp_mmu.h |  8 +++++---
 3 files changed, 11 insertions(+), 9 deletions(-)

Comments

Ben Gardon June 14, 2021, 5:56 p.m. UTC | #1
On Fri, Jun 11, 2021 at 4:57 PM David Matlack <dmatlack@google.com> wrote:
>
> Refactor is_tdp_mmu_root() into is_vcpu_using_tdp_mmu() to reduce
> duplicated code at call sites and make the code more readable.
>
> Signed-off-by: David Matlack <dmatlack@google.com>

Nice cleanup!

Reviewed-by: Ben Gardon <bgardon@google.com>


> ---
>  arch/x86/kvm/mmu/mmu.c     | 10 +++++-----
>  arch/x86/kvm/mmu/tdp_mmu.c |  2 +-
>  arch/x86/kvm/mmu/tdp_mmu.h |  8 +++++---
>  3 files changed, 11 insertions(+), 9 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 0144c40d09c7..eccd889d20a5 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3545,7 +3545,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>                 return reserved;
>         }
>
> -       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
> +       if (is_vcpu_using_tdp_mmu(vcpu))
>                 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
>         else
>                 leaf = get_walk(vcpu, addr, sptes, &root);
> @@ -3729,7 +3729,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>         if (page_fault_handle_page_track(vcpu, error_code, gfn))
>                 return RET_PF_EMULATE;
>
> -       if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
> +       if (!is_vcpu_using_tdp_mmu(vcpu)) {
>                 r = fast_page_fault(vcpu, gpa, error_code);
>                 if (r != RET_PF_INVALID)
>                         return r;
> @@ -3751,7 +3751,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>
>         r = RET_PF_RETRY;
>
> -       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
> +       if (is_vcpu_using_tdp_mmu(vcpu))
>                 read_lock(&vcpu->kvm->mmu_lock);
>         else
>                 write_lock(&vcpu->kvm->mmu_lock);
> @@ -3762,7 +3762,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>         if (r)
>                 goto out_unlock;
>
> -       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
> +       if (is_vcpu_using_tdp_mmu(vcpu))
>                 r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
>                                     pfn, prefault);
>         else
> @@ -3770,7 +3770,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>                                  prefault, is_tdp);
>
>  out_unlock:
> -       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
> +       if (is_vcpu_using_tdp_mmu(vcpu))
>                 read_unlock(&vcpu->kvm->mmu_lock);
>         else
>                 write_unlock(&vcpu->kvm->mmu_lock);
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 237317b1eddd..f4cc79dabeae 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -979,7 +979,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>
>         if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
>                 return RET_PF_RETRY;
> -       if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
> +       if (WARN_ON(!is_vcpu_using_tdp_mmu(vcpu)))
>                 return RET_PF_RETRY;
>
>         level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> index 5fdf63090451..c8cf12809fcf 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.h
> +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> @@ -91,16 +91,18 @@ static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
>  static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
>  #endif
>
> -static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
> +static inline bool is_vcpu_using_tdp_mmu(struct kvm_vcpu *vcpu)
>  {
> +       struct kvm *kvm = vcpu->kvm;
>         struct kvm_mmu_page *sp;
> +       hpa_t root_hpa = vcpu->arch.mmu->root_hpa;
>
>         if (!is_tdp_mmu_enabled(kvm))
>                 return false;
> -       if (WARN_ON(!VALID_PAGE(hpa)))
> +       if (WARN_ON(!VALID_PAGE(root_hpa)))
>                 return false;
>
> -       sp = to_shadow_page(hpa);
> +       sp = to_shadow_page(root_hpa);
>         if (WARN_ON(!sp))
>                 return false;
>
> --
> 2.32.0.272.g935e593368-goog
>
Sean Christopherson June 14, 2021, 7:07 p.m. UTC | #2
On Fri, Jun 11, 2021, David Matlack wrote:
> Refactor is_tdp_mmu_root() into is_vcpu_using_tdp_mmu() to reduce
> duplicated code at call sites and make the code more readable.
> 
> Signed-off-by: David Matlack <dmatlack@google.com>
> ---
>  arch/x86/kvm/mmu/mmu.c     | 10 +++++-----
>  arch/x86/kvm/mmu/tdp_mmu.c |  2 +-
>  arch/x86/kvm/mmu/tdp_mmu.h |  8 +++++---
>  3 files changed, 11 insertions(+), 9 deletions(-)
> 

...

> diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> index 5fdf63090451..c8cf12809fcf 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.h
> +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> @@ -91,16 +91,18 @@ static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
>  static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
>  #endif
>  
> -static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
> +static inline bool is_vcpu_using_tdp_mmu(struct kvm_vcpu *vcpu)

I normally like code reduction, but I think in this case we should stay with
something closer to the existing code.

If interpreted literally as "is the vCPU using the TDP MMU", the helper is flat
out broken if arch.mmu points at guest_mmu, in which case the function will
evaluate "false" even if the _vCPU_ is using the TDP MMU for the non-nested MMU.

We could dance around that by doing something like:

	if (is_current_mmu_tdp_mmu(vcpu))
		....

but IMO that's a net negative versus:

	if (is_tdp_mmu(vcpu, vcpu->arch.mmu))
		...

because it's specifically the MMU that is of interest.

>  {
> +	struct kvm *kvm = vcpu->kvm;
>  	struct kvm_mmu_page *sp;
> +	hpa_t root_hpa = vcpu->arch.mmu->root_hpa;
>  
>  	if (!is_tdp_mmu_enabled(kvm))

If we want to eliminate the second param from the prototype, I think we'd be
better off evaluating whether or not this check buys us anything.  Or even better,
eliminate redundant calls to minimize the benefits so that the "fast" check is
unnecessary.

The extra check in kvm_tdp_mmu_map() can simply be dropped; it's sole caller
literally wraps it with a is_tdp_mmu... check.

>  		return false;
> -	if (WARN_ON(!VALID_PAGE(hpa)))
> +	if (WARN_ON(!VALID_PAGE(root_hpa)))

And hoist this check out of is_tdp_mmu().  I think it's entirely reasonable to
expect the caller to first test the validity of the root before caring about
whether it's a TDP MMU or legacy MMU.

The VALID_PAGE() checks here and in__direct_map() and FNAME(page_fault) are also
mostly worthless, but they're at least a marginally not-awful sanity check and
not fully redundant.

E.g. achieve this over 2-4 patches:

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f96fc3cd5c18..527305c8cede 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2899,9 +2899,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
        gfn_t gfn = gpa >> PAGE_SHIFT;
        gfn_t base_gfn = gfn;

-       if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
-               return RET_PF_RETRY;
-
        level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
                                        huge_page_disallowed, &req_level);

@@ -3635,7 +3632,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
                return reserved;
        }

-       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+       if (is_tdp_mmu(vcpu->arch.mmu))
                leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
        else
                leaf = get_walk(vcpu, addr, sptes, &root);
@@ -3801,6 +3798,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
 static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
                             bool prefault, int max_level, bool is_tdp)
 {
+       bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
        bool write = error_code & PFERR_WRITE_MASK;
        bool map_writable;

@@ -3810,10 +3808,13 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
        hva_t hva;
        int r;

+       if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
+               return RET_PF_RETRY;
+
        if (page_fault_handle_page_track(vcpu, error_code, gfn))
                return RET_PF_EMULATE;

-       if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
+       if (!is_tdp_mmu_fault) {
                r = fast_page_fault(vcpu, gpa, error_code);
                if (r != RET_PF_INVALID)
                        return r;
@@ -3835,7 +3836,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,

        r = RET_PF_RETRY;

-       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+       if (is_tdp_mmu_fault)
                read_lock(&vcpu->kvm->mmu_lock);
        else
                write_lock(&vcpu->kvm->mmu_lock);
@@ -3846,7 +3847,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
        if (r)
                goto out_unlock;

-       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+       if (is_tdp_mmu_fault)
                r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
                                    pfn, prefault);
        else
@@ -3854,7 +3855,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
                                 prefault, is_tdp);

 out_unlock:
-       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+       if (is_tdp_mmu_fault)
                read_unlock(&vcpu->kvm->mmu_lock);
        else
                write_unlock(&vcpu->kvm->mmu_lock);
		diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index cc13e001f3de..7ce90bc50774 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -979,11 +979,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
        int level;
        int req_level;

-       if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
-               return RET_PF_RETRY;
-       if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
-               return RET_PF_RETRY;
-
        level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
                                        huge_page_disallowed, &req_level);

diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index f7a7990da11d..6ebe2331a641 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -92,16 +92,11 @@ static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
 #endif

-static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
+static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
 {
        struct kvm_mmu_page *sp;

-       if (!is_tdp_mmu_enabled(kvm))
-               return false;
-       if (WARN_ON(!VALID_PAGE(hpa)))
-               return false;
-
-       sp = to_shadow_page(hpa);
+       sp = to_shadow_page(mmu->root_hpa);
        if (WARN_ON(!sp))
                return false;
David Matlack June 14, 2021, 9:23 p.m. UTC | #3
On Mon, Jun 14, 2021 at 07:07:56PM +0000, Sean Christopherson wrote:
> On Fri, Jun 11, 2021, David Matlack wrote:
> > Refactor is_tdp_mmu_root() into is_vcpu_using_tdp_mmu() to reduce
> > duplicated code at call sites and make the code more readable.
> > 
> > Signed-off-by: David Matlack <dmatlack@google.com>
> > ---
> >  arch/x86/kvm/mmu/mmu.c     | 10 +++++-----
> >  arch/x86/kvm/mmu/tdp_mmu.c |  2 +-
> >  arch/x86/kvm/mmu/tdp_mmu.h |  8 +++++---
> >  3 files changed, 11 insertions(+), 9 deletions(-)
> > 
> 
> ...

I'm not sure how to interpret this.

> 
> > diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> > index 5fdf63090451..c8cf12809fcf 100644
> > --- a/arch/x86/kvm/mmu/tdp_mmu.h
> > +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> > @@ -91,16 +91,18 @@ static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
> >  static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
> >  #endif
> >  
> > -static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
> > +static inline bool is_vcpu_using_tdp_mmu(struct kvm_vcpu *vcpu)
> 
> I normally like code reduction, but I think in this case we should stay with
> something closer to the existing code.
> 
> If interpreted literally as "is the vCPU using the TDP MMU", the helper is flat
> out broken if arch.mmu points at guest_mmu, in which case the function will
> evaluate "false" even if the _vCPU_ is using the TDP MMU for the non-nested MMU.

Good point. is_vcpu_using_tdp_mmu is ambiguous: It's not clear if it's
checking if the vcpu is using the TDP MMU "right now" versus "at all".

> 
> We could dance around that by doing something like:
> 
> 	if (is_current_mmu_tdp_mmu(vcpu))
> 		....
> 
> but IMO that's a net negative versus:
> 
> 	if (is_tdp_mmu(vcpu, vcpu->arch.mmu))
> 		...
> 
> because it's specifically the MMU that is of interest.

I agree this is more clear.

> 
> >  {
> > +	struct kvm *kvm = vcpu->kvm;
> >  	struct kvm_mmu_page *sp;
> > +	hpa_t root_hpa = vcpu->arch.mmu->root_hpa;
> >  
> >  	if (!is_tdp_mmu_enabled(kvm))
> 
> If we want to eliminate the second param from the prototype, I think we'd be
> better off evaluating whether or not this check buys us anything.  Or even better,
> eliminate redundant calls to minimize the benefits so that the "fast" check is
> unnecessary.
> 
> The extra check in kvm_tdp_mmu_map() can simply be dropped; it's sole caller
> literally wraps it with a is_tdp_mmu... check.
> 
> >  		return false;
> > -	if (WARN_ON(!VALID_PAGE(hpa)))
> > +	if (WARN_ON(!VALID_PAGE(root_hpa)))
> 
> And hoist this check out of is_tdp_mmu().  I think it's entirely reasonable to
> expect the caller to first test the validity of the root before caring about
> whether it's a TDP MMU or legacy MMU.
> 
> The VALID_PAGE() checks here and in__direct_map() and FNAME(page_fault) are also
> mostly worthless, but they're at least a marginally not-awful sanity check and
> not fully redundant.
> 
> E.g. achieve this over 2-4 patches:

Thanks for the suggestions, I'll take a look at cleaning that up. I am
thinking of making that a separate patch series (including removing this
patch from this series) as the interaction between the two is entirely
superficial. Let me know if that makes sense.

> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index f96fc3cd5c18..527305c8cede 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -2899,9 +2899,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>         gfn_t gfn = gpa >> PAGE_SHIFT;
>         gfn_t base_gfn = gfn;
> 
> -       if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
> -               return RET_PF_RETRY;
> -
>         level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
>                                         huge_page_disallowed, &req_level);
> 
> @@ -3635,7 +3632,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>                 return reserved;
>         }
> 
> -       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
> +       if (is_tdp_mmu(vcpu->arch.mmu))
>                 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
>         else
>                 leaf = get_walk(vcpu, addr, sptes, &root);
> @@ -3801,6 +3798,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
>  static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>                              bool prefault, int max_level, bool is_tdp)
>  {
> +       bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
>         bool write = error_code & PFERR_WRITE_MASK;
>         bool map_writable;
> 
> @@ -3810,10 +3808,13 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>         hva_t hva;
>         int r;
> 
> +       if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
> +               return RET_PF_RETRY;
> +
>         if (page_fault_handle_page_track(vcpu, error_code, gfn))
>                 return RET_PF_EMULATE;
> 
> -       if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
> +       if (!is_tdp_mmu_fault) {
>                 r = fast_page_fault(vcpu, gpa, error_code);
>                 if (r != RET_PF_INVALID)
>                         return r;
> @@ -3835,7 +3836,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
> 
>         r = RET_PF_RETRY;
> 
> -       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
> +       if (is_tdp_mmu_fault)
>                 read_lock(&vcpu->kvm->mmu_lock);
>         else
>                 write_lock(&vcpu->kvm->mmu_lock);
> @@ -3846,7 +3847,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>         if (r)
>                 goto out_unlock;
> 
> -       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
> +       if (is_tdp_mmu_fault)
>                 r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
>                                     pfn, prefault);
>         else
> @@ -3854,7 +3855,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>                                  prefault, is_tdp);
> 
>  out_unlock:
> -       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
> +       if (is_tdp_mmu_fault)
>                 read_unlock(&vcpu->kvm->mmu_lock);
>         else
>                 write_unlock(&vcpu->kvm->mmu_lock);
> 		diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index cc13e001f3de..7ce90bc50774 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -979,11 +979,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>         int level;
>         int req_level;
> 
> -       if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
> -               return RET_PF_RETRY;
> -       if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
> -               return RET_PF_RETRY;
> -
>         level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
>                                         huge_page_disallowed, &req_level);
> 
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> index f7a7990da11d..6ebe2331a641 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.h
> +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> @@ -92,16 +92,11 @@ static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
>  static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
>  #endif
> 
> -static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
> +static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
>  {
>         struct kvm_mmu_page *sp;
> 
> -       if (!is_tdp_mmu_enabled(kvm))
> -               return false;
> -       if (WARN_ON(!VALID_PAGE(hpa)))
> -               return false;
> -
> -       sp = to_shadow_page(hpa);
> +       sp = to_shadow_page(mmu->root_hpa);
>         if (WARN_ON(!sp))
>                 return false;
>
Sean Christopherson June 14, 2021, 9:39 p.m. UTC | #4
On Mon, Jun 14, 2021, David Matlack wrote:
> On Mon, Jun 14, 2021 at 07:07:56PM +0000, Sean Christopherson wrote:
> > On Fri, Jun 11, 2021, David Matlack wrote:
> > > Refactor is_tdp_mmu_root() into is_vcpu_using_tdp_mmu() to reduce
> > > duplicated code at call sites and make the code more readable.
> > > 
> > > Signed-off-by: David Matlack <dmatlack@google.com>
> > > ---
> > >  arch/x86/kvm/mmu/mmu.c     | 10 +++++-----
> > >  arch/x86/kvm/mmu/tdp_mmu.c |  2 +-
> > >  arch/x86/kvm/mmu/tdp_mmu.h |  8 +++++---
> > >  3 files changed, 11 insertions(+), 9 deletions(-)
> > > 
> > 
> > ...
> 
> I'm not sure how to interpret this.

Ha!  It took me a second to figure out what "this" was.  The ellipsis is just a
way of saying that I trimmed a chunk of text.  I throw it in specifically when
I'm trimming part of a patch to try to make it clear that my response is jumping
to a new context.

> > E.g. achieve this over 2-4 patches:
> 
> Thanks for the suggestions, I'll take a look at cleaning that up. I am
> thinking of making that a separate patch series (including removing this
> patch from this series) as the interaction between the two is entirely
> superficial. Let me know if that makes sense.

Hmm, make 'em a separate series unless there a semantic conflicts, e.g. if you
want to cache the result of in get_mmio_spte() or something.
David Matlack June 14, 2021, 10:01 p.m. UTC | #5
On Mon, Jun 14, 2021 at 09:39:33PM +0000, Sean Christopherson wrote:
> On Mon, Jun 14, 2021, David Matlack wrote:
> > On Mon, Jun 14, 2021 at 07:07:56PM +0000, Sean Christopherson wrote:
> > > On Fri, Jun 11, 2021, David Matlack wrote:
> > > > Refactor is_tdp_mmu_root() into is_vcpu_using_tdp_mmu() to reduce
> > > > duplicated code at call sites and make the code more readable.
> > > > 
> > > > Signed-off-by: David Matlack <dmatlack@google.com>
> > > > ---
> > > >  arch/x86/kvm/mmu/mmu.c     | 10 +++++-----
> > > >  arch/x86/kvm/mmu/tdp_mmu.c |  2 +-
> > > >  arch/x86/kvm/mmu/tdp_mmu.h |  8 +++++---
> > > >  3 files changed, 11 insertions(+), 9 deletions(-)
> > > > 
> > > 
> > > ...
> > 
> > I'm not sure how to interpret this.
> 
> Ha!  It took me a second to figure out what "this" was.  The ellipsis is just a
> way of saying that I trimmed a chunk of text.  I throw it in specifically when
> I'm trimming part of a patch to try to make it clear that my response is jumping
> to a new context.

Ohhh, that makes sense. Thanks.

> 
> > > E.g. achieve this over 2-4 patches:
> > 
> > Thanks for the suggestions, I'll take a look at cleaning that up. I am
> > thinking of making that a separate patch series (including removing this
> > patch from this series) as the interaction between the two is entirely
> > superficial. Let me know if that makes sense.
> 
> Hmm, make 'em a separate series unless there a semantic conflicts, e.g. if you
> want to cache the result of in get_mmio_spte() or something.

Ack, will do.
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0144c40d09c7..eccd889d20a5 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3545,7 +3545,7 @@  static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 		return reserved;
 	}
 
-	if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+	if (is_vcpu_using_tdp_mmu(vcpu))
 		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
 	else
 		leaf = get_walk(vcpu, addr, sptes, &root);
@@ -3729,7 +3729,7 @@  static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 	if (page_fault_handle_page_track(vcpu, error_code, gfn))
 		return RET_PF_EMULATE;
 
-	if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
+	if (!is_vcpu_using_tdp_mmu(vcpu)) {
 		r = fast_page_fault(vcpu, gpa, error_code);
 		if (r != RET_PF_INVALID)
 			return r;
@@ -3751,7 +3751,7 @@  static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 
 	r = RET_PF_RETRY;
 
-	if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+	if (is_vcpu_using_tdp_mmu(vcpu))
 		read_lock(&vcpu->kvm->mmu_lock);
 	else
 		write_lock(&vcpu->kvm->mmu_lock);
@@ -3762,7 +3762,7 @@  static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 	if (r)
 		goto out_unlock;
 
-	if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+	if (is_vcpu_using_tdp_mmu(vcpu))
 		r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
 				    pfn, prefault);
 	else
@@ -3770,7 +3770,7 @@  static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 				 prefault, is_tdp);
 
 out_unlock:
-	if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+	if (is_vcpu_using_tdp_mmu(vcpu))
 		read_unlock(&vcpu->kvm->mmu_lock);
 	else
 		write_unlock(&vcpu->kvm->mmu_lock);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 237317b1eddd..f4cc79dabeae 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -979,7 +979,7 @@  int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 
 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
 		return RET_PF_RETRY;
-	if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
+	if (WARN_ON(!is_vcpu_using_tdp_mmu(vcpu)))
 		return RET_PF_RETRY;
 
 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 5fdf63090451..c8cf12809fcf 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -91,16 +91,18 @@  static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
 #endif
 
-static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
+static inline bool is_vcpu_using_tdp_mmu(struct kvm_vcpu *vcpu)
 {
+	struct kvm *kvm = vcpu->kvm;
 	struct kvm_mmu_page *sp;
+	hpa_t root_hpa = vcpu->arch.mmu->root_hpa;
 
 	if (!is_tdp_mmu_enabled(kvm))
 		return false;
-	if (WARN_ON(!VALID_PAGE(hpa)))
+	if (WARN_ON(!VALID_PAGE(root_hpa)))
 		return false;
 
-	sp = to_shadow_page(hpa);
+	sp = to_shadow_page(root_hpa);
 	if (WARN_ON(!sp))
 		return false;