diff mbox series

KVM: MMU: Add wrapper to check whether MMU is in direct mode

Message ID 20221206073951.172450-1-yu.c.zhang@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: MMU: Add wrapper to check whether MMU is in direct mode | expand

Commit Message

Yu Zhang Dec. 6, 2022, 7:39 a.m. UTC
Simplify the code by introducing a wrapper, mmu_is_direct(),
instead of using vcpu->arch.mmu->root_role.direct everywhere.

Meanwhile, use temporary variable 'direct', in routines such
as kvm_mmu_load()/kvm_mmu_page_fault() etc. instead of checking
vcpu->arch.mmu->root_role.direct repeatedly.

No functional change intended.

Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
---
 arch/x86/kvm/mmu/mmu.c | 26 +++++++++++++-------------
 arch/x86/kvm/x86.c     |  9 +++++----
 arch/x86/kvm/x86.h     |  5 +++++
 3 files changed, 23 insertions(+), 17 deletions(-)

Comments

Sean Christopherson Jan. 20, 2023, 1:18 a.m. UTC | #1
+David and Ben

On Tue, Dec 06, 2022, Yu Zhang wrote:
> Simplify the code by introducing a wrapper, mmu_is_direct(),
> instead of using vcpu->arch.mmu->root_role.direct everywhere.
> 
> Meanwhile, use temporary variable 'direct', in routines such
> as kvm_mmu_load()/kvm_mmu_page_fault() etc. instead of checking
> vcpu->arch.mmu->root_role.direct repeatedly.

I've looked at this patch at least four times and still can't decide whether or
not I like the helper.  On one had, it's shorter and easier to read.  On the other
hand, I don't love that mmu_is_nested() looks at a completely different MMU, which
is weird if not confusing.

Anyone else have an opinion?

> No functional change intended.
> 
> Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 26 +++++++++++++-------------
>  arch/x86/kvm/x86.c     |  9 +++++----
>  arch/x86/kvm/x86.h     |  5 +++++
>  3 files changed, 23 insertions(+), 17 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 4736d7849c60..d2d0fabdb702 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -2280,7 +2280,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
>  
>  	if (iterator->level >= PT64_ROOT_4LEVEL &&
>  	    vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
> -	    !vcpu->arch.mmu->root_role.direct)
> +	    !mmu_is_direct(vcpu))
>  		iterator->level = PT32E_ROOT_LEVEL;
>  
>  	if (iterator->level == PT32E_ROOT_LEVEL) {
> @@ -2677,7 +2677,7 @@ static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
>  	gpa_t gpa;
>  	int r;
>  
> -	if (vcpu->arch.mmu->root_role.direct)
> +	if (mmu_is_direct(vcpu))
>  		return 0;
>  
>  	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
> @@ -3918,7 +3918,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
>  	int i;
>  	struct kvm_mmu_page *sp;
>  
> -	if (vcpu->arch.mmu->root_role.direct)
> +	if (mmu_is_direct(vcpu))
>  		return;
>  
>  	if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
> @@ -4147,7 +4147,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
>  
>  	arch.token = alloc_apf_token(vcpu);
>  	arch.gfn = gfn;
> -	arch.direct_map = vcpu->arch.mmu->root_role.direct;
> +	arch.direct_map = mmu_is_direct(vcpu);
>  	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
>  
>  	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
> @@ -4157,17 +4157,16 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
>  void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
>  {
>  	int r;
> +	bool direct = mmu_is_direct(vcpu);

I would prefer to not add local bools and instead due a 1:1 replacement.  "direct"
loses too much context (direct what?), and performance wise I doubt it will
influence the compiler.
Yu Zhang Jan. 20, 2023, 7:38 a.m. UTC | #2
On Fri, Jan 20, 2023 at 01:18:45AM +0000, Sean Christopherson wrote:
> +David and Ben
> 
> On Tue, Dec 06, 2022, Yu Zhang wrote:
> > Simplify the code by introducing a wrapper, mmu_is_direct(),
> > instead of using vcpu->arch.mmu->root_role.direct everywhere.
> > 
> > Meanwhile, use temporary variable 'direct', in routines such
> > as kvm_mmu_load()/kvm_mmu_page_fault() etc. instead of checking
> > vcpu->arch.mmu->root_role.direct repeatedly.

Thanks Sean. I already forgot the existence of this patch. :)
> 
> I've looked at this patch at least four times and still can't decide whether or
> not I like the helper.  On one had, it's shorter and easier to read.  On the other
> hand, I don't love that mmu_is_nested() looks at a completely different MMU, which
> is weird if not confusing.

Do you mean mmu_is_direct()? Why it's about a different MMU? 

> 
> Anyone else have an opinion?
> 
> > No functional change intended.
> > 
> > Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
> > ---
> >  arch/x86/kvm/mmu/mmu.c | 26 +++++++++++++-------------
> >  arch/x86/kvm/x86.c     |  9 +++++----
> >  arch/x86/kvm/x86.h     |  5 +++++
> >  3 files changed, 23 insertions(+), 17 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 4736d7849c60..d2d0fabdb702 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -2280,7 +2280,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
> >  
> >  	if (iterator->level >= PT64_ROOT_4LEVEL &&
> >  	    vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
> > -	    !vcpu->arch.mmu->root_role.direct)
> > +	    !mmu_is_direct(vcpu))
> >  		iterator->level = PT32E_ROOT_LEVEL;
> >  
> >  	if (iterator->level == PT32E_ROOT_LEVEL) {
> > @@ -2677,7 +2677,7 @@ static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
> >  	gpa_t gpa;
> >  	int r;
> >  
> > -	if (vcpu->arch.mmu->root_role.direct)
> > +	if (mmu_is_direct(vcpu))
> >  		return 0;
> >  
> >  	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
> > @@ -3918,7 +3918,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
> >  	int i;
> >  	struct kvm_mmu_page *sp;
> >  
> > -	if (vcpu->arch.mmu->root_role.direct)
> > +	if (mmu_is_direct(vcpu))
> >  		return;
> >  
> >  	if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
> > @@ -4147,7 +4147,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> >  
> >  	arch.token = alloc_apf_token(vcpu);
> >  	arch.gfn = gfn;
> > -	arch.direct_map = vcpu->arch.mmu->root_role.direct;
> > +	arch.direct_map = mmu_is_direct(vcpu);
> >  	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
> >  
> >  	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
> > @@ -4157,17 +4157,16 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> >  void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
> >  {
> >  	int r;
> > +	bool direct = mmu_is_direct(vcpu);
> 
> I would prefer to not add local bools and instead due a 1:1 replacement.  "direct"
> loses too much context (direct what?), and performance wise I doubt it will
> influence the compiler.

If we want to use a new temp value, how about "mmu_direct_mode"?

But I am also open to use mmu_is_direct(). Because I just realized the benifit
is too marginal: the second read of vcpu->arch.mmu->root_role.direct should be
a cache hit, so the gain of adding a local variable is to only reduce one L1
cache read.

Below are the dumpings of the current kvm_arch_async_page_ready() and of the
one with local bool (in case you are interested): 

The current kvm_arch_async_page_ready():
        if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
   11243:       48 8b 97 88 02 00 00    mov    0x288(%rdi),%rdx
{
   1124a:       65 48 8b 04 25 28 00    mov    %gs:0x28,%rax
   11251:       00 00
   11253:       48 89 44 24 48          mov    %rax,0x48(%rsp)
   11258:       31 c0                   xor    %eax,%eax
        if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
   1125a:       0f b6 42 50             movzbl 0x50(%rdx),%eax
   						^
						+- here comes the "vcpu->arch.mmu->root_role.direct"

   1125e:       c0 e8 07                shr    $0x7,%al
   11261:       3a 46 78                cmp    0x78(%rsi),%al
   11264:       0f 85 de 00 00 00       jne    11348 <kvm_arch_async_page_ready+0x118>
              work->wakeup_all)
		...
        if (!vcpu->arch.mmu->root_role.direct &&
   1128c:       44 0f b6 42 50          movzbl 0x50(%rdx),%r8d
   						^
						+-  %rdx is reused, storing the "vcpu->arch.mmu"

   11291:       45 84 c0                test   %r8b,%r8b
   11294:       78 24                   js     112ba <kvm_arch_async_page_ready+0x8a>


The new kvm_arch_async_page_ready():

        bool direct = vcpu->arch.mmu->root_role.direct;
   11243:       48 8b 97 88 02 00 00    mov    0x288(%rdi),%rdx
{
   1124a:       65 48 8b 04 25 28 00    mov    %gs:0x28,%rax
   11251:       00 00  
   11253:       48 89 44 24 48          mov    %rax,0x48(%rsp)
   11258:       31 c0                   xor    %eax,%eax
        bool direct = vcpu->arch.mmu->root_role.direct;
   1125a:       44 0f b6 62 50          movzbl 0x50(%rdx),%r12d
   1125f:       41 c0 ec 07             shr    $0x7,%r12b
   						     ^
						     +- %r12 is the local variable "direct"
        if ((work->arch.direct_map != direct) ||
   11263:       44 3a 66 78             cmp    0x78(%rsi),%r12b
   11267:       0f 85 d5 00 00 00       jne    11342 <kvm_arch_async_page_ready+0x112>
              work->wakeup_all)
		...
        if (!direct && work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
   1128f:       45 84 e4                test   %r12b,%r12b
   						^
						+- one less read from 0x50(%rdx)
   11292:       75 1f                   jne    112b3 <kvm_arch_async_page_ready+0x83>


B.R.
Yu


>
Yu Zhang Jan. 20, 2023, 7:50 a.m. UTC | #3
On Fri, Jan 20, 2023 at 03:38:24PM +0800, Yu Zhang wrote:
> On Fri, Jan 20, 2023 at 01:18:45AM +0000, Sean Christopherson wrote:
> > +David and Ben
> > 
> > On Tue, Dec 06, 2022, Yu Zhang wrote:
> > > Simplify the code by introducing a wrapper, mmu_is_direct(),
> > > instead of using vcpu->arch.mmu->root_role.direct everywhere.
> > > 
> > > Meanwhile, use temporary variable 'direct', in routines such
> > > as kvm_mmu_load()/kvm_mmu_page_fault() etc. instead of checking
> > > vcpu->arch.mmu->root_role.direct repeatedly.
> 
> Thanks Sean. I already forgot the existence of this patch. :)
> > 
> > I've looked at this patch at least four times and still can't decide whether or
> > not I like the helper.  On one had, it's shorter and easier to read.  On the other
> > hand, I don't love that mmu_is_nested() looks at a completely different MMU, which
> > is weird if not confusing.
> 
> Do you mean mmu_is_direct()? Why it's about a different MMU? 
> 
> > 
> > Anyone else have an opinion?
> > 
> > > No functional change intended.
> > > 
> > > Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
> > > ---
> > >  arch/x86/kvm/mmu/mmu.c | 26 +++++++++++++-------------
> > >  arch/x86/kvm/x86.c     |  9 +++++----
> > >  arch/x86/kvm/x86.h     |  5 +++++
> > >  3 files changed, 23 insertions(+), 17 deletions(-)
> > > 
> > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > > index 4736d7849c60..d2d0fabdb702 100644
> > > --- a/arch/x86/kvm/mmu/mmu.c
> > > +++ b/arch/x86/kvm/mmu/mmu.c
> > > @@ -2280,7 +2280,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
> > >  
> > >  	if (iterator->level >= PT64_ROOT_4LEVEL &&
> > >  	    vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
> > > -	    !vcpu->arch.mmu->root_role.direct)
> > > +	    !mmu_is_direct(vcpu))
> > >  		iterator->level = PT32E_ROOT_LEVEL;
> > >  
> > >  	if (iterator->level == PT32E_ROOT_LEVEL) {
> > > @@ -2677,7 +2677,7 @@ static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
> > >  	gpa_t gpa;
> > >  	int r;
> > >  
> > > -	if (vcpu->arch.mmu->root_role.direct)
> > > +	if (mmu_is_direct(vcpu))
> > >  		return 0;
> > >  
> > >  	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
> > > @@ -3918,7 +3918,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
> > >  	int i;
> > >  	struct kvm_mmu_page *sp;
> > >  
> > > -	if (vcpu->arch.mmu->root_role.direct)
> > > +	if (mmu_is_direct(vcpu))
> > >  		return;
> > >  
> > >  	if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
> > > @@ -4147,7 +4147,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> > >  
> > >  	arch.token = alloc_apf_token(vcpu);
> > >  	arch.gfn = gfn;
> > > -	arch.direct_map = vcpu->arch.mmu->root_role.direct;
> > > +	arch.direct_map = mmu_is_direct(vcpu);
> > >  	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
> > >  
> > >  	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
> > > @@ -4157,17 +4157,16 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> > >  void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
> > >  {
> > >  	int r;
> > > +	bool direct = mmu_is_direct(vcpu);
> > 
> > I would prefer to not add local bools and instead due a 1:1 replacement.  "direct"
> > loses too much context (direct what?), and performance wise I doubt it will
> > influence the compiler.
> 
> If we want to use a new temp value, how about "mmu_direct_mode"?
> 
> But I am also open to use mmu_is_direct(). Because I just realized the benifit
> is too marginal: the second read of vcpu->arch.mmu->root_role.direct should be
> a cache hit, so the gain of adding a local variable is to only reduce one L1
> cache read.
Sorry, should be one TLB and one cache access(I guess VIPT will also bring some
parallelism).

B.R.
Yu
Ben Gardon Jan. 21, 2023, 12:38 a.m. UTC | #4
On Thu, Jan 19, 2023 at 5:18 PM Sean Christopherson <seanjc@google.com> wrote:
>
> +David and Ben
>
> On Tue, Dec 06, 2022, Yu Zhang wrote:
> > Simplify the code by introducing a wrapper, mmu_is_direct(),
> > instead of using vcpu->arch.mmu->root_role.direct everywhere.
> >
> > Meanwhile, use temporary variable 'direct', in routines such
> > as kvm_mmu_load()/kvm_mmu_page_fault() etc. instead of checking
> > vcpu->arch.mmu->root_role.direct repeatedly.
>
> I've looked at this patch at least four times and still can't decide whether or
> not I like the helper.  On one had, it's shorter and easier to read.  On the other
> hand, I don't love that mmu_is_nested() looks at a completely different MMU, which
> is weird if not confusing.
>
> Anyone else have an opinion?

The slightly shorter line length is kinda nice, but I don't think it
really makes the code any clearer because any reader is still going to
have to do the mental acrobatics to remember what exactly "direct"
means, and why it matters in the given context. If there were some
more useful function names we could wrap that check in, that might be
nice. I don't see a ton of value otherwise. I'm thinking of something
like "mmu_shadows_guest_mappings()" because that actually explains why
we, for example, need to sync child SPTEs.
Sean Christopherson Jan. 25, 2023, 12:25 a.m. UTC | #5
On Fri, Jan 20, 2023, Ben Gardon wrote:
> On Thu, Jan 19, 2023 at 5:18 PM Sean Christopherson <seanjc@google.com> wrote:
> >
> > +David and Ben
> >
> > On Tue, Dec 06, 2022, Yu Zhang wrote:
> > > Simplify the code by introducing a wrapper, mmu_is_direct(),
> > > instead of using vcpu->arch.mmu->root_role.direct everywhere.
> > >
> > > Meanwhile, use temporary variable 'direct', in routines such
> > > as kvm_mmu_load()/kvm_mmu_page_fault() etc. instead of checking
> > > vcpu->arch.mmu->root_role.direct repeatedly.
> >
> > I've looked at this patch at least four times and still can't decide whether or
> > not I like the helper.  On one had, it's shorter and easier to read.  On the other
> > hand, I don't love that mmu_is_nested() looks at a completely different MMU, which
> > is weird if not confusing.
> >
> > Anyone else have an opinion?
> 
> The slightly shorter line length is kinda nice, but I don't think it
> really makes the code any clearer because any reader is still going to
> have to do the mental acrobatics to remember what exactly "direct"
> means, and why it matters in the given context. If there were some
> more useful function names we could wrap that check in, that might be
> nice. I don't see a ton of value otherwise. I'm thinking of something
> like "mmu_shadows_guest_mappings()" because that actually explains why
> we, for example, need to sync child SPTEs.

Ugh, right, thanks to nonpaging mode, something like is_shadow_mmu() isn't correct
even if/when KVM drops support for 32-bit builds.

Yu,
Unless you feel strongly about this one, I'm inclined to leave things as-is.
Yu Zhang Jan. 25, 2023, 2:44 a.m. UTC | #6
On Wed, Jan 25, 2023 at 12:25:40AM +0000, Sean Christopherson wrote:
> Yu,
> Unless you feel strongly about this one, I'm inclined to leave things as-is.

No problem. And thanks!

B.R.
Yu
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4736d7849c60..d2d0fabdb702 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2280,7 +2280,7 @@  static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
 
 	if (iterator->level >= PT64_ROOT_4LEVEL &&
 	    vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
-	    !vcpu->arch.mmu->root_role.direct)
+	    !mmu_is_direct(vcpu))
 		iterator->level = PT32E_ROOT_LEVEL;
 
 	if (iterator->level == PT32E_ROOT_LEVEL) {
@@ -2677,7 +2677,7 @@  static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 	gpa_t gpa;
 	int r;
 
-	if (vcpu->arch.mmu->root_role.direct)
+	if (mmu_is_direct(vcpu))
 		return 0;
 
 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
@@ -3918,7 +3918,7 @@  void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 	int i;
 	struct kvm_mmu_page *sp;
 
-	if (vcpu->arch.mmu->root_role.direct)
+	if (mmu_is_direct(vcpu))
 		return;
 
 	if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
@@ -4147,7 +4147,7 @@  static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 
 	arch.token = alloc_apf_token(vcpu);
 	arch.gfn = gfn;
-	arch.direct_map = vcpu->arch.mmu->root_role.direct;
+	arch.direct_map = mmu_is_direct(vcpu);
 	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
 
 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
@@ -4157,17 +4157,16 @@  static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
 {
 	int r;
+	bool direct = mmu_is_direct(vcpu);
 
-	if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
-	      work->wakeup_all)
+	if ((work->arch.direct_map != direct) || work->wakeup_all)
 		return;
 
 	r = kvm_mmu_reload(vcpu);
 	if (unlikely(r))
 		return;
 
-	if (!vcpu->arch.mmu->root_role.direct &&
-	      work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
+	if (!direct && work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
 		return;
 
 	kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
@@ -5331,14 +5330,15 @@  EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
 int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
 	int r;
+	bool direct = mmu_is_direct(vcpu);
 
-	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
+	r = mmu_topup_memory_caches(vcpu, !direct);
 	if (r)
 		goto out;
 	r = mmu_alloc_special_roots(vcpu);
 	if (r)
 		goto out;
-	if (vcpu->arch.mmu->root_role.direct)
+	if (direct)
 		r = mmu_alloc_direct_roots(vcpu);
 	else
 		r = mmu_alloc_shadow_roots(vcpu);
@@ -5575,7 +5575,7 @@  int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
 		       void *insn, int insn_len)
 {
 	int r, emulation_type = EMULTYPE_PF;
-	bool direct = vcpu->arch.mmu->root_role.direct;
+	bool direct = mmu_is_direct(vcpu);
 
 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
 		return RET_PF_RETRY;
@@ -5606,8 +5606,8 @@  int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
 	 * paging in both guests. If true, we simply unprotect the page
 	 * and resume the guest.
 	 */
-	if (vcpu->arch.mmu->root_role.direct &&
-	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
+	if (((error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) &&
+	    direct) {
 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
 		return 1;
 	}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 72ac6bf05c8b..b95984a49700 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8420,6 +8420,7 @@  static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 {
 	gpa_t gpa = cr2_or_gpa;
 	kvm_pfn_t pfn;
+	bool direct = mmu_is_direct(vcpu);
 
 	if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
 		return false;
@@ -8428,7 +8429,7 @@  static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 	    WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
 		return false;
 
-	if (!vcpu->arch.mmu->root_role.direct) {
+	if (!direct) {
 		/*
 		 * Write permission should be allowed since only
 		 * write access need to be emulated.
@@ -8461,7 +8462,7 @@  static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 	kvm_release_pfn_clean(pfn);
 
 	/* The instructions are well-emulated on direct mmu. */
-	if (vcpu->arch.mmu->root_role.direct) {
+	if (direct) {
 		unsigned int indirect_shadow_pages;
 
 		write_lock(&vcpu->kvm->mmu_lock);
@@ -8529,7 +8530,7 @@  static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
 	vcpu->arch.last_retry_eip = ctxt->eip;
 	vcpu->arch.last_retry_addr = cr2_or_gpa;
 
-	if (!vcpu->arch.mmu->root_role.direct)
+	if (!mmu_is_direct(vcpu))
 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
 
 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
@@ -8830,7 +8831,7 @@  int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 		ctxt->exception.address = cr2_or_gpa;
 
 		/* With shadow page tables, cr2 contains a GVA or nGPA. */
-		if (vcpu->arch.mmu->root_role.direct) {
+		if (mmu_is_direct(vcpu)) {
 			ctxt->gpa_available = true;
 			ctxt->gpa_val = cr2_or_gpa;
 		}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 9de72586f406..aca11db10a8f 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -171,6 +171,11 @@  static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
 }
 
+static inline bool mmu_is_direct(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.mmu->root_role.direct;
+}
+
 static inline int is_pae(struct kvm_vcpu *vcpu)
 {
 	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);