Message ID | 20210305011101.3597423-8-seanjc@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86/mmu: Lots of bug fixes | expand |
On Fri, 5 Mar 2021 at 09:12, Sean Christopherson <seanjc@google.com> wrote: > > Check the validity of the PDPTRs before allocating any of the PAE roots, > otherwise a bad PDPTR will cause KVM to leak any previously allocated > roots. > > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/x86/kvm/mmu/mmu.c | 20 ++++++++++++++------ > 1 file changed, 14 insertions(+), 6 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index 7ebfbc77b050..9fc2b46f8541 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -3269,7 +3269,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) > static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) > { > struct kvm_mmu *mmu = vcpu->arch.mmu; > - u64 pdptr, pm_mask; > + u64 pdptrs[4], pm_mask; > gfn_t root_gfn, root_pgd; > hpa_t root; > int i; > @@ -3280,6 +3280,17 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) > if (mmu_check_root(vcpu, root_gfn)) > return 1; > > + if (mmu->root_level == PT32E_ROOT_LEVEL) { > + for (i = 0; i < 4; ++i) { > + pdptrs[i] = mmu->get_pdptr(vcpu, i); > + if (!(pdptrs[i] & PT_PRESENT_MASK)) > + continue; > + > + if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT)) > + return 1; > + } > + } > + I saw this splatting: BUG: sleeping function called from invalid context at arch/x86/kvm/kvm_cache_regs.h:115 in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 3090, name: qemu-system-x86 3 locks held by qemu-system-x86/3090: #0: ffff8d499f8d00d0 (&vcpu->mutex){+.+.}-{3:3}, at: kvm_vcpu_ioctl+0x8e/0x680 [kvm] #1: ffffb1b540f873e8 (&kvm->srcu){....}-{0:0}, at: vcpu_enter_guest+0xb30/0x1b10 [kvm] #2: ffffb1b540f7d018 (&(kvm)->mmu_lock){+.+.}-{2:2}, at: kvm_mmu_load+0xb5/0x540 [kvm] Preemption disabled at: [<ffffffffc0787365>] kvm_mmu_load+0xb5/0x540 [kvm] CPU: 2 PID: 3090 Comm: qemu-system-x86 Tainted: G W OE 5.12.0-rc3+ #3 Call Trace: dump_stack+0x87/0xb7 ___might_sleep+0x202/0x250 __might_sleep+0x4a/0x80 kvm_pdptr_read+0x20/0x60 [kvm] kvm_mmu_load+0x3bd/0x540 [kvm] vcpu_enter_guest+0x1297/0x1b10 [kvm] kvm_arch_vcpu_ioctl_run+0x372/0x690 [kvm] kvm_vcpu_ioctl+0x3ca/0x680 [kvm] __x64_sys_ioctl+0x27a/0x800 do_syscall_64+0x38/0x50 entry_SYSCALL_64_after_hwframe+0x44/0xae There is a might_sleep() in kvm_pdptr_read(), however, the original commit didn't explain more. I can send a formal one if the below fix is acceptable. diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index efb41f3..f33026f 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3289,17 +3289,24 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) if (mmu_check_root(vcpu, root_gfn)) return 1; + write_unlock(&vcpu->kvm->mmu_lock); if (mmu->root_level == PT32E_ROOT_LEVEL) { for (i = 0; i < 4; ++i) { pdptrs[i] = mmu->get_pdptr(vcpu, i); if (!(pdptrs[i] & PT_PRESENT_MASK)) continue; - if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT)) + if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT)) { + write_lock(&vcpu->kvm->mmu_lock); return 1; + } } } + write_lock(&vcpu->kvm->mmu_lock); + if (make_mmu_pages_available(vcpu)) + return -ENOSPC; + /* * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root.
On 08/04/21 13:15, Wanpeng Li wrote: > I saw this splatting: > > BUG: sleeping function called from invalid context at > arch/x86/kvm/kvm_cache_regs.h:115 > kvm_pdptr_read+0x20/0x60 [kvm] > kvm_mmu_load+0x3bd/0x540 [kvm] > > There is a might_sleep() in kvm_pdptr_read(), however, the original > commit didn't explain more. I can send a formal one if the below fix > is acceptable. I think we can just push make_mmu_pages_available down into kvm_mmu_load's callees. This way it's not necessary to hold the lock until after the PDPTR check: diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0d92a269c5fa..f92c3695bfeb 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3244,6 +3244,12 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) u8 shadow_root_level = mmu->shadow_root_level; hpa_t root; unsigned i; + int r; + + write_lock(&vcpu->kvm->mmu_lock); + r = make_mmu_pages_available(vcpu); + if (r < 0) + goto out_unlock; if (is_tdp_mmu_enabled(vcpu->kvm)) { root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu); @@ -3266,13 +3272,16 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) mmu->root_hpa = __pa(mmu->pae_root); } else { WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level); - return -EIO; + r = -EIO; } +out_unlock: + write_unlock(&vcpu->kvm->mmu_lock); + /* root_pgd is ignored for direct MMUs. */ mmu->root_pgd = 0; - return 0; + return r; } static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) @@ -3282,6 +3291,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) gfn_t root_gfn, root_pgd; hpa_t root; int i; + int r; root_pgd = mmu->get_guest_pgd(vcpu); root_gfn = root_pgd >> PAGE_SHIFT; @@ -3300,6 +3310,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) } } + write_lock(&vcpu->kvm->mmu_lock); + r = make_mmu_pages_available(vcpu); + if (r < 0) + goto out_unlock; + /* * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root. @@ -3308,7 +3323,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) root = mmu_alloc_root(vcpu, root_gfn, 0, mmu->shadow_root_level, false); mmu->root_hpa = root; - goto set_root_pgd; + goto out_unlock; } if (WARN_ON_ONCE(!mmu->pae_root)) @@ -3350,7 +3365,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) else mmu->root_hpa = __pa(mmu->pae_root); -set_root_pgd: +out_unlock: + write_unlock(&vcpu->kvm->mmu_lock); mmu->root_pgd = root_pgd; return 0; @@ -4852,14 +4868,10 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) r = mmu_alloc_special_roots(vcpu); if (r) goto out; - write_lock(&vcpu->kvm->mmu_lock); - if (make_mmu_pages_available(vcpu)) - r = -ENOSPC; - else if (vcpu->arch.mmu->direct_map) + if (vcpu->arch.mmu->direct_map) r = mmu_alloc_direct_roots(vcpu); else r = mmu_alloc_shadow_roots(vcpu); - write_unlock(&vcpu->kvm->mmu_lock); if (r) goto out; Paolo
On Thu, Apr 08, 2021, Paolo Bonzini wrote: > On 08/04/21 13:15, Wanpeng Li wrote: > > I saw this splatting: > > > > BUG: sleeping function called from invalid context at > > arch/x86/kvm/kvm_cache_regs.h:115 > > kvm_pdptr_read+0x20/0x60 [kvm] > > kvm_mmu_load+0x3bd/0x540 [kvm] > > > > There is a might_sleep() in kvm_pdptr_read(), however, the original > > commit didn't explain more. I can send a formal one if the below fix > > is acceptable. We don't want to drop mmu_lock, even temporarily. The reason for holding it across the entire sequence is to ensure kvm_mmu_available_pages() isn't violated. > I think we can just push make_mmu_pages_available down into > kvm_mmu_load's callees. This way it's not necessary to hold the lock > until after the PDPTR check: ... > @@ -4852,14 +4868,10 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) > r = mmu_alloc_special_roots(vcpu); > if (r) > goto out; > - write_lock(&vcpu->kvm->mmu_lock); > - if (make_mmu_pages_available(vcpu)) > - r = -ENOSPC; > - else if (vcpu->arch.mmu->direct_map) > + if (vcpu->arch.mmu->direct_map) > r = mmu_alloc_direct_roots(vcpu); > else > r = mmu_alloc_shadow_roots(vcpu); > - write_unlock(&vcpu->kvm->mmu_lock); > if (r) > goto out; Freaking PDPTRs. I was really hoping we could keep the lock and pages_available() logic outside of the helpers. What if kvm_mmu_load() reads the PDPTRs and passes them into mmu_alloc_shadow_roots()? Or is that too ugly? diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index efb41f31e80a..e3c4938cd665 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3275,11 +3275,11 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) return 0; } -static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) +static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu, u64 pdptrs[4]) { struct kvm_mmu *mmu = vcpu->arch.mmu; - u64 pdptrs[4], pm_mask; gfn_t root_gfn, root_pgd; + u64 pm_mask; hpa_t root; int i; @@ -3291,11 +3291,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) if (mmu->root_level == PT32E_ROOT_LEVEL) { for (i = 0; i < 4; ++i) { - pdptrs[i] = mmu->get_pdptr(vcpu, i); - if (!(pdptrs[i] & PT_PRESENT_MASK)) - continue; - - if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT)) + if ((pdptrs[i] & PT_PRESENT_MASK) && + mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT)) return 1; } } @@ -4844,21 +4841,33 @@ EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); int kvm_mmu_load(struct kvm_vcpu *vcpu) { - int r; + struct kvm_mmu *mmu = vcpu->arch.mmu; + u64 pdptrs[4]; + int r, i; - r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map); + r = mmu_topup_memory_caches(vcpu, !mmu->direct_map); if (r) goto out; r = mmu_alloc_special_roots(vcpu); if (r) goto out; + + /* + * On SVM, reading PDPTRs might access guest memory, which might fault + * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock. + */ + if (!mmu->direct_map && mmu->root_level == PT32E_ROOT_LEVEL) { + for (i = 0; i < 4; ++i) + pdptrs[i] = mmu->get_pdptr(vcpu, i); + } + write_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu)) r = -ENOSPC; else if (vcpu->arch.mmu->direct_map) r = mmu_alloc_direct_roots(vcpu); else - r = mmu_alloc_shadow_roots(vcpu); + r = mmu_alloc_shadow_roots(vcpu, pdptrs); write_unlock(&vcpu->kvm->mmu_lock); if (r) goto out;
On 08/04/21 17:48, Sean Christopherson wrote: > Freaking PDPTRs. I was really hoping we could keep the lock and pages_available() > logic outside of the helpers. What if kvm_mmu_load() reads the PDPTRs and > passes them into mmu_alloc_shadow_roots()? Or is that too ugly? The patch I have posted (though untested) tries to do that in a slightly less ugly way by pushing make_mmu_pages_available down to mmu_alloc_*_roots. Paolo > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index efb41f31e80a..e3c4938cd665 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -3275,11 +3275,11 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) > return 0; > } > > -static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) > +static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu, u64 pdptrs[4]) > { > struct kvm_mmu *mmu = vcpu->arch.mmu; > - u64 pdptrs[4], pm_mask; > gfn_t root_gfn, root_pgd; > + u64 pm_mask; > hpa_t root; > int i; > > @@ -3291,11 +3291,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) > > if (mmu->root_level == PT32E_ROOT_LEVEL) { > for (i = 0; i < 4; ++i) { > - pdptrs[i] = mmu->get_pdptr(vcpu, i); > - if (!(pdptrs[i] & PT_PRESENT_MASK)) > - continue; > - > - if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT)) > + if ((pdptrs[i] & PT_PRESENT_MASK) && > + mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT)) > return 1; > } > } > @@ -4844,21 +4841,33 @@ EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); > > int kvm_mmu_load(struct kvm_vcpu *vcpu) > { > - int r; > + struct kvm_mmu *mmu = vcpu->arch.mmu; > + u64 pdptrs[4]; > + int r, i; > > - r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map); > + r = mmu_topup_memory_caches(vcpu, !mmu->direct_map); > if (r) > goto out; > r = mmu_alloc_special_roots(vcpu); > if (r) > goto out; > + > + /* > + * On SVM, reading PDPTRs might access guest memory, which might fault > + * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock. > + */ > + if (!mmu->direct_map && mmu->root_level == PT32E_ROOT_LEVEL) { > + for (i = 0; i < 4; ++i) > + pdptrs[i] = mmu->get_pdptr(vcpu, i); > + } > + > write_lock(&vcpu->kvm->mmu_lock); > if (make_mmu_pages_available(vcpu)) > r = -ENOSPC; > else if (vcpu->arch.mmu->direct_map) > r = mmu_alloc_direct_roots(vcpu); > else > - r = mmu_alloc_shadow_roots(vcpu); > + r = mmu_alloc_shadow_roots(vcpu, pdptrs); > write_unlock(&vcpu->kvm->mmu_lock); > if (r) > goto out; >
On Thu, Apr 08, 2021, Paolo Bonzini wrote: > On 08/04/21 17:48, Sean Christopherson wrote: > > Freaking PDPTRs. I was really hoping we could keep the lock and pages_available() > > logic outside of the helpers. What if kvm_mmu_load() reads the PDPTRs and > > passes them into mmu_alloc_shadow_roots()? Or is that too ugly? > > The patch I have posted (though untested) tries to do that in a slightly > less ugly way by pushing make_mmu_pages_available down to mmu_alloc_*_roots. Yeah, I agree it's less ugly. It would be nice to not duplicate that code, but it's probably not worth the ugliness. :-/ For your approach, can we put the out label after the success path? Setting mmu->root_pgd isn't wrong per se, but doing so might mislead future readers into thinking that it's functionally necessary. diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index efb41f31e80a..93f97d0a9e2e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3244,6 +3244,13 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) u8 shadow_root_level = mmu->shadow_root_level; hpa_t root; unsigned i; + int r; + + write_lock(&vcpu->kvm->mmu_lock); + + r = make_mmu_pages_available(vcpu); + if (r) + goto out_unlock; if (is_tdp_mmu_enabled(vcpu->kvm)) { root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu); @@ -3252,8 +3259,10 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true); mmu->root_hpa = root; } else if (shadow_root_level == PT32E_ROOT_LEVEL) { - if (WARN_ON_ONCE(!mmu->pae_root)) - return -EIO; + if (WARN_ON_ONCE(!mmu->pae_root)) { + r = -EIO; + goto out_unlock; + } for (i = 0; i < 4; ++i) { WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); @@ -3266,13 +3275,15 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) mmu->root_hpa = __pa(mmu->pae_root); } else { WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level); - return -EIO; + r = -EIO; + goto out_unlock; } /* root_pgd is ignored for direct MMUs. */ mmu->root_pgd = 0; - - return 0; +out_unlock: + write_unlock(&vcpu->kvm->mmu_lock); + return r; } static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) @@ -3281,7 +3292,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) u64 pdptrs[4], pm_mask; gfn_t root_gfn, root_pgd; hpa_t root; - int i; + int i, r; root_pgd = mmu->get_guest_pgd(vcpu); root_gfn = root_pgd >> PAGE_SHIFT; @@ -3289,6 +3300,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) if (mmu_check_root(vcpu, root_gfn)) return 1; + /* + * On SVM, reading PDPTRs might access guest memory, which might fault + * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock. + */ if (mmu->root_level == PT32E_ROOT_LEVEL) { for (i = 0; i < 4; ++i) { pdptrs[i] = mmu->get_pdptr(vcpu, i); @@ -3300,6 +3315,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) } } + write_lock(&vcpu->kvm->mmu_lock); + + r = make_mmu_pages_available(vcpu); + if (r) + goto out_unlock; + /* * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root. @@ -3311,8 +3332,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) goto set_root_pgd; } - if (WARN_ON_ONCE(!mmu->pae_root)) - return -EIO; + if (WARN_ON_ONCE(!mmu->pae_root)) { + r = -EIO; + goto out_unlock; + } /* * We shadow a 32 bit page table. This may be a legacy 2-level @@ -3323,8 +3346,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) { pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; - if (WARN_ON_ONCE(!mmu->lm_root)) - return -EIO; + if (WARN_ON_ONCE(!mmu->lm_root)) { + r = -EIO; + goto out_unlock; + } mmu->lm_root[0] = __pa(mmu->pae_root) | pm_mask; } @@ -3352,8 +3377,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) set_root_pgd: mmu->root_pgd = root_pgd; - - return 0; +out_unlock: + write_unlock(&vcpu->kvm->mmu_lock); + return r; } static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) @@ -4852,14 +4878,10 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) r = mmu_alloc_special_roots(vcpu); if (r) goto out; - write_lock(&vcpu->kvm->mmu_lock); - if (make_mmu_pages_available(vcpu)) - r = -ENOSPC; - else if (vcpu->arch.mmu->direct_map) + if (vcpu->arch.mmu->direct_map) r = mmu_alloc_direct_roots(vcpu); else r = mmu_alloc_shadow_roots(vcpu); - write_unlock(&vcpu->kvm->mmu_lock); if (r) goto out;
On 08/04/21 18:27, Sean Christopherson wrote: > For your approach, can we put the out label after the success path? Setting > mmu->root_pgd isn't wrong per se, but doing so might mislead future readers into > thinking that it's functionally necessary. Indeed, thanks for the speedy review. I'll get it queued tomorrow. Paolo
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7ebfbc77b050..9fc2b46f8541 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3269,7 +3269,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.mmu; - u64 pdptr, pm_mask; + u64 pdptrs[4], pm_mask; gfn_t root_gfn, root_pgd; hpa_t root; int i; @@ -3280,6 +3280,17 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) if (mmu_check_root(vcpu, root_gfn)) return 1; + if (mmu->root_level == PT32E_ROOT_LEVEL) { + for (i = 0; i < 4; ++i) { + pdptrs[i] = mmu->get_pdptr(vcpu, i); + if (!(pdptrs[i] & PT_PRESENT_MASK)) + continue; + + if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT)) + return 1; + } + } + /* * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root. @@ -3309,14 +3320,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) MMU_WARN_ON(VALID_PAGE(mmu->pae_root[i])); if (mmu->root_level == PT32E_ROOT_LEVEL) { - pdptr = mmu->get_pdptr(vcpu, i); - if (!(pdptr & PT_PRESENT_MASK)) { + if (!(pdptrs[i] & PT_PRESENT_MASK)) { mmu->pae_root[i] = 0; continue; } - root_gfn = pdptr >> PAGE_SHIFT; - if (mmu_check_root(vcpu, root_gfn)) - return 1; + root_gfn = pdptrs[i] >> PAGE_SHIFT; } root = mmu_alloc_root(vcpu, root_gfn, i << 30,
Check the validity of the PDPTRs before allocating any of the PAE roots, otherwise a bad PDPTR will cause KVM to leak any previously allocated roots. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/kvm/mmu/mmu.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-)