Message ID | 1537524123-9578-19-git-send-email-paulus@ozlabs.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: PPC: Book3S HV: Nested HV virtualization | expand |
On Fri, Sep 21, 2018 at 08:01:49PM +1000, Paul Mackerras wrote: > kvmppc_unmap_pte() does a sequence of operations that are open-coded in > kvm_unmap_radix(). This extends kvmppc_unmap_pte() a little so that it > can be used by kvm_unmap_radix(), and makes kvm_unmap_radix() call it. > > Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> > --- > arch/powerpc/kvm/book3s_64_mmu_radix.c | 36 ++++++++++++++-------------------- > 1 file changed, 15 insertions(+), 21 deletions(-) > > diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c > index 47f2b18..d9357e0 100644 > --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c > +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c > @@ -240,21 +240,25 @@ static void kvmppc_pmd_free(pmd_t *pmdp) > } > > static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, > - unsigned long gpa, unsigned int shift) > + unsigned long gpa, unsigned int shift, > + struct kvm_memory_slot *memslot) > > { > - unsigned long page_size = 1ul << shift; > unsigned long old; > > old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); > kvmppc_radix_tlbie_page(kvm, gpa, shift); > if (old & _PAGE_DIRTY) { > unsigned long gfn = gpa >> PAGE_SHIFT; > - struct kvm_memory_slot *memslot; > + unsigned long page_size = PAGE_SIZE; > > - memslot = gfn_to_memslot(kvm, gfn); > - if (memslot && memslot->dirty_bitmap) > + if (shift) > + page_size = 1ul << shift; > + if (!memslot) > + memslot = gfn_to_memslot(kvm, gfn); > + if (memslot && memslot->dirty_bitmap) { > kvmppc_update_dirty_map(memslot, gfn, page_size); > + } > } > } > > @@ -282,7 +286,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) > WARN_ON_ONCE(1); > kvmppc_unmap_pte(kvm, p, > pte_pfn(*p) << PAGE_SHIFT, > - PAGE_SHIFT); > + PAGE_SHIFT, NULL); > } > } > > @@ -304,7 +308,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) > WARN_ON_ONCE(1); > kvmppc_unmap_pte(kvm, (pte_t *)p, > pte_pfn(*(pte_t *)p) << PAGE_SHIFT, > - PMD_SHIFT); > + PMD_SHIFT, NULL); > } > } else { > pte_t *pte; > @@ -468,7 +472,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > goto out_unlock; > } > /* Valid 1GB page here already, remove it */ > - kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT); > + kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL); > } > if (level == 2) { > if (!pud_none(*pud)) { > @@ -517,7 +521,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > goto out_unlock; > } > /* Valid 2MB page here already, remove it */ > - kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT); > + kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL); > } > if (level == 1) { > if (!pmd_none(*pmd)) { > @@ -780,20 +784,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, > pte_t *ptep; > unsigned long gpa = gfn << PAGE_SHIFT; > unsigned int shift; > - unsigned long old; > > ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); > - if (ptep && pte_present(*ptep)) { > - old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0, > - gpa, shift); > - kvmppc_radix_tlbie_page(kvm, gpa, shift); > - if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { > - unsigned long psize = PAGE_SIZE; > - if (shift) > - psize = 1ul << shift; > - kvmppc_update_dirty_map(memslot, gfn, psize); > - } > - } > + if (ptep && pte_present(*ptep)) > + kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot); > return 0; > } >
On Wed, Sep 26, 2018 at 02:08:37PM +1000, David Gibson wrote: > On Fri, Sep 21, 2018 at 08:01:49PM +1000, Paul Mackerras wrote: > > kvmppc_unmap_pte() does a sequence of operations that are open-coded in > > kvm_unmap_radix(). This extends kvmppc_unmap_pte() a little so that it > > can be used by kvm_unmap_radix(), and makes kvm_unmap_radix() call it. > > > > Signed-off-by: Paul Mackerras <paulus@ozlabs.org> > > Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Although... > > --- > > arch/powerpc/kvm/book3s_64_mmu_radix.c | 36 ++++++++++++++-------------------- > > 1 file changed, 15 insertions(+), 21 deletions(-) > > > > diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c > > index 47f2b18..d9357e0 100644 > > --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c > > +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c > > @@ -240,21 +240,25 @@ static void kvmppc_pmd_free(pmd_t *pmdp) > > } > > > > static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, > > - unsigned long gpa, unsigned int shift) > > + unsigned long gpa, unsigned int shift, > > + struct kvm_memory_slot *memslot) > > > > { > > - unsigned long page_size = 1ul << shift; > > unsigned long old; > > > > old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); > > kvmppc_radix_tlbie_page(kvm, gpa, shift); > > if (old & _PAGE_DIRTY) { > > unsigned long gfn = gpa >> PAGE_SHIFT; > > - struct kvm_memory_slot *memslot; > > + unsigned long page_size = PAGE_SIZE; > > > > - memslot = gfn_to_memslot(kvm, gfn); > > - if (memslot && memslot->dirty_bitmap) > > + if (shift) > > + page_size = 1ul << shift; > > + if (!memslot) > > + memslot = gfn_to_memslot(kvm, gfn); ..it might be nicer to avoid the explicit test on memslot by maping a __kvmppc_unmap_pte() which must have memslot passed in, and a wrapper which computes it from the gfn. > > + if (memslot && memslot->dirty_bitmap) { > > kvmppc_update_dirty_map(memslot, gfn, page_size); > > + } > > } > > } > > > > @@ -282,7 +286,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) > > WARN_ON_ONCE(1); > > kvmppc_unmap_pte(kvm, p, > > pte_pfn(*p) << PAGE_SHIFT, > > - PAGE_SHIFT); > > + PAGE_SHIFT, NULL); > > } > > } > > > > @@ -304,7 +308,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) > > WARN_ON_ONCE(1); > > kvmppc_unmap_pte(kvm, (pte_t *)p, > > pte_pfn(*(pte_t *)p) << PAGE_SHIFT, > > - PMD_SHIFT); > > + PMD_SHIFT, NULL); > > } > > } else { > > pte_t *pte; > > @@ -468,7 +472,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > > goto out_unlock; > > } > > /* Valid 1GB page here already, remove it */ > > - kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT); > > + kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL); > > } > > if (level == 2) { > > if (!pud_none(*pud)) { > > @@ -517,7 +521,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > > goto out_unlock; > > } > > /* Valid 2MB page here already, remove it */ > > - kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT); > > + kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL); > > } > > if (level == 1) { > > if (!pmd_none(*pmd)) { > > @@ -780,20 +784,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, > > pte_t *ptep; > > unsigned long gpa = gfn << PAGE_SHIFT; > > unsigned int shift; > > - unsigned long old; > > > > ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); > > - if (ptep && pte_present(*ptep)) { > > - old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0, > > - gpa, shift); > > - kvmppc_radix_tlbie_page(kvm, gpa, shift); > > - if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { > > - unsigned long psize = PAGE_SIZE; > > - if (shift) > > - psize = 1ul << shift; > > - kvmppc_update_dirty_map(memslot, gfn, psize); > > - } > > - } > > + if (ptep && pte_present(*ptep)) > > + kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot); > > return 0; > > } > > >
On Wed, Sep 26, 2018 at 02:30:02PM +1000, David Gibson wrote: > On Wed, Sep 26, 2018 at 02:08:37PM +1000, David Gibson wrote: > > > static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, > > > - unsigned long gpa, unsigned int shift) > > > + unsigned long gpa, unsigned int shift, > > > + struct kvm_memory_slot *memslot) > > > > > > { > > > - unsigned long page_size = 1ul << shift; > > > unsigned long old; > > > > > > old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); > > > kvmppc_radix_tlbie_page(kvm, gpa, shift); > > > if (old & _PAGE_DIRTY) { > > > unsigned long gfn = gpa >> PAGE_SHIFT; > > > - struct kvm_memory_slot *memslot; > > > + unsigned long page_size = PAGE_SIZE; > > > > > > - memslot = gfn_to_memslot(kvm, gfn); > > > - if (memslot && memslot->dirty_bitmap) > > > + if (shift) > > > + page_size = 1ul << shift; > > > + if (!memslot) > > > + memslot = gfn_to_memslot(kvm, gfn); > > ..it might be nicer to avoid the explicit test on memslot by maping a > __kvmppc_unmap_pte() which must have memslot passed in, and a wrapper > which computes it from the gfn. Then we would be looking up the memslot even when we aren't going to use it. In a subsequent patch this function gets used on shadow page tables and in that case we never need the memslot. Paul.
On Wed, Sep 26, 2018 at 09:18:56PM +1000, Paul Mackerras wrote: > On Wed, Sep 26, 2018 at 02:30:02PM +1000, David Gibson wrote: > > On Wed, Sep 26, 2018 at 02:08:37PM +1000, David Gibson wrote: > > > > static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, > > > > - unsigned long gpa, unsigned int shift) > > > > + unsigned long gpa, unsigned int shift, > > > > + struct kvm_memory_slot *memslot) > > > > > > > > { > > > > - unsigned long page_size = 1ul << shift; > > > > unsigned long old; > > > > > > > > old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); > > > > kvmppc_radix_tlbie_page(kvm, gpa, shift); > > > > if (old & _PAGE_DIRTY) { > > > > unsigned long gfn = gpa >> PAGE_SHIFT; > > > > - struct kvm_memory_slot *memslot; > > > > + unsigned long page_size = PAGE_SIZE; > > > > > > > > - memslot = gfn_to_memslot(kvm, gfn); > > > > - if (memslot && memslot->dirty_bitmap) > > > > + if (shift) > > > > + page_size = 1ul << shift; > > > > + if (!memslot) > > > > + memslot = gfn_to_memslot(kvm, gfn); > > > > ..it might be nicer to avoid the explicit test on memslot by maping a > > __kvmppc_unmap_pte() which must have memslot passed in, and a wrapper > > which computes it from the gfn. > > Then we would be looking up the memslot even when we aren't going to > use it. In a subsequent patch this function gets used on shadow page > tables and in that case we never need the memslot. Ah, good point.
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 47f2b18..d9357e0 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -240,21 +240,25 @@ static void kvmppc_pmd_free(pmd_t *pmdp) } static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, - unsigned long gpa, unsigned int shift) + unsigned long gpa, unsigned int shift, + struct kvm_memory_slot *memslot) { - unsigned long page_size = 1ul << shift; unsigned long old; old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); kvmppc_radix_tlbie_page(kvm, gpa, shift); if (old & _PAGE_DIRTY) { unsigned long gfn = gpa >> PAGE_SHIFT; - struct kvm_memory_slot *memslot; + unsigned long page_size = PAGE_SIZE; - memslot = gfn_to_memslot(kvm, gfn); - if (memslot && memslot->dirty_bitmap) + if (shift) + page_size = 1ul << shift; + if (!memslot) + memslot = gfn_to_memslot(kvm, gfn); + if (memslot && memslot->dirty_bitmap) { kvmppc_update_dirty_map(memslot, gfn, page_size); + } } } @@ -282,7 +286,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) WARN_ON_ONCE(1); kvmppc_unmap_pte(kvm, p, pte_pfn(*p) << PAGE_SHIFT, - PAGE_SHIFT); + PAGE_SHIFT, NULL); } } @@ -304,7 +308,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) WARN_ON_ONCE(1); kvmppc_unmap_pte(kvm, (pte_t *)p, pte_pfn(*(pte_t *)p) << PAGE_SHIFT, - PMD_SHIFT); + PMD_SHIFT, NULL); } } else { pte_t *pte; @@ -468,7 +472,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, goto out_unlock; } /* Valid 1GB page here already, remove it */ - kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT); + kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL); } if (level == 2) { if (!pud_none(*pud)) { @@ -517,7 +521,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, goto out_unlock; } /* Valid 2MB page here already, remove it */ - kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT); + kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL); } if (level == 1) { if (!pmd_none(*pmd)) { @@ -780,20 +784,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; - unsigned long old; ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); - if (ptep && pte_present(*ptep)) { - old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0, - gpa, shift); - kvmppc_radix_tlbie_page(kvm, gpa, shift); - if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { - unsigned long psize = PAGE_SIZE; - if (shift) - psize = 1ul << shift; - kvmppc_update_dirty_map(memslot, gfn, psize); - } - } + if (ptep && pte_present(*ptep)) + kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot); return 0; }
kvmppc_unmap_pte() does a sequence of operations that are open-coded in kvm_unmap_radix(). This extends kvmppc_unmap_pte() a little so that it can be used by kvm_unmap_radix(), and makes kvm_unmap_radix() call it. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 36 ++++++++++++++-------------------- 1 file changed, 15 insertions(+), 21 deletions(-)