Message ID | 20200605213853.14959-17-sean.j.christopherson@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: Cleanup and unify kvm_mmu_memory_cache usage | expand |
On Fri, Jun 5, 2020 at 2:39 PM Sean Christopherson <sean.j.christopherson@intel.com> wrote: > > Replace the @max param in mmu_topup_memory_cache() and instead use > ARRAY_SIZE() to terminate the loop to fill the cache. This removes a > BUG_ON() and sets the stage for moving arm64 to the common memory cache > implementation. > > No functional change intended. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> > --- > arch/arm64/kvm/mmu.c | 12 ++++-------- > 1 file changed, 4 insertions(+), 8 deletions(-) > > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c > index a1f6bc70c4e4..9398b66f8a87 100644 > --- a/arch/arm64/kvm/mmu.c > +++ b/arch/arm64/kvm/mmu.c > @@ -124,15 +124,13 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) > put_page(virt_to_page(pudp)); > } > > -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, > - int min, int max) > +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min) > { > void *page; > > - BUG_ON(max > KVM_NR_MEM_OBJS); KVM_NR_MEM_OBJS should be undefined as of patch 14 in this series. I'd recommend changing this to use the new constant you defined in that patch. > if (cache->nobjs >= min) > return 0; > - while (cache->nobjs < max) { > + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { > page = (void *)__get_free_page(GFP_PGTABLE_USER); > if (!page) > return -ENOMEM; > @@ -1356,8 +1354,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, > pte = kvm_s2pte_mkwrite(pte); > > ret = mmu_topup_memory_cache(&cache, > - kvm_mmu_cache_min_pages(kvm), > - KVM_NR_MEM_OBJS); See above, KVM_NR_MEM_OBJS is undefined as of patch 14. > + kvm_mmu_cache_min_pages(kvm)); > if (ret) > goto out; > spin_lock(&kvm->mmu_lock); > @@ -1737,8 +1734,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > up_read(¤t->mm->mmap_sem); > > /* We need minimum second+third level pages */ > - ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm), > - KVM_NR_MEM_OBJS); See above, KVM_NR_MEM_OBJS is undefined as of patch 14. > + ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm)); > if (ret) > return ret; > > -- > 2.26.0 >
On Wed, Jun 10, 2020 at 03:00:47PM -0700, Ben Gardon wrote: > On Fri, Jun 5, 2020 at 2:39 PM Sean Christopherson > <sean.j.christopherson@intel.com> wrote: > > > > Replace the @max param in mmu_topup_memory_cache() and instead use > > ARRAY_SIZE() to terminate the loop to fill the cache. This removes a > > BUG_ON() and sets the stage for moving arm64 to the common memory cache > > implementation. > > > > No functional change intended. > > > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> > > --- > > arch/arm64/kvm/mmu.c | 12 ++++-------- > > 1 file changed, 4 insertions(+), 8 deletions(-) > > > > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c > > index a1f6bc70c4e4..9398b66f8a87 100644 > > --- a/arch/arm64/kvm/mmu.c > > +++ b/arch/arm64/kvm/mmu.c > > @@ -124,15 +124,13 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) > > put_page(virt_to_page(pudp)); > > } > > > > -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, > > - int min, int max) > > +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min) > > { > > void *page; > > > > - BUG_ON(max > KVM_NR_MEM_OBJS); > KVM_NR_MEM_OBJS should be undefined as of patch 14 in this series. I'd > recommend changing this to use the new constant you defined in that > patch. My intent was to leave KVM_NR_MEM_OBJS defined by arm64 and MIPS until they move to the common implementation, e.g. this should be defined in arch/arm64/include/asm/kvm_host.h until patch 18. I'll get cross-compiling setup so I can properly test bisection before sending v2. > > if (cache->nobjs >= min) > > return 0; > > - while (cache->nobjs < max) { > > + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { > > page = (void *)__get_free_page(GFP_PGTABLE_USER); > > if (!page) > > return -ENOMEM; > > @@ -1356,8 +1354,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, > > pte = kvm_s2pte_mkwrite(pte); > > > > ret = mmu_topup_memory_cache(&cache, > > - kvm_mmu_cache_min_pages(kvm), > > - KVM_NR_MEM_OBJS); > See above, KVM_NR_MEM_OBJS is undefined as of patch 14. > > + kvm_mmu_cache_min_pages(kvm)); > > if (ret) > > goto out; > > spin_lock(&kvm->mmu_lock); > > @@ -1737,8 +1734,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > > up_read(¤t->mm->mmap_sem); > > > > /* We need minimum second+third level pages */ > > - ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm), > > - KVM_NR_MEM_OBJS); > See above, KVM_NR_MEM_OBJS is undefined as of patch 14. > > + ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm)); > > if (ret) > > return ret; > > > > -- > > 2.26.0 > >
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index a1f6bc70c4e4..9398b66f8a87 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -124,15 +124,13 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) put_page(virt_to_page(pudp)); } -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, - int min, int max) +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min) { void *page; - BUG_ON(max > KVM_NR_MEM_OBJS); if (cache->nobjs >= min) return 0; - while (cache->nobjs < max) { + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { page = (void *)__get_free_page(GFP_PGTABLE_USER); if (!page) return -ENOMEM; @@ -1356,8 +1354,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, pte = kvm_s2pte_mkwrite(pte); ret = mmu_topup_memory_cache(&cache, - kvm_mmu_cache_min_pages(kvm), - KVM_NR_MEM_OBJS); + kvm_mmu_cache_min_pages(kvm)); if (ret) goto out; spin_lock(&kvm->mmu_lock); @@ -1737,8 +1734,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, up_read(¤t->mm->mmap_sem); /* We need minimum second+third level pages */ - ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm), - KVM_NR_MEM_OBJS); + ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm)); if (ret) return ret;
Replace the @max param in mmu_topup_memory_cache() and instead use ARRAY_SIZE() to terminate the loop to fill the cache. This removes a BUG_ON() and sets the stage for moving arm64 to the common memory cache implementation. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/arm64/kvm/mmu.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-)