diff mbox series

[06/14] KVM: arm64: Return next table from map callbacks

Message ID 20220830194132.962932-7-oliver.upton@linux.dev (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Parallel stage-2 fault handling | expand

Commit Message

Oliver Upton Aug. 30, 2022, 7:41 p.m. UTC
The map walkers install new page tables during their traversal. Return
the newly-installed table PTE from the map callbacks to point the walker
at the new table w/o rereading the ptep.

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
 arch/arm64/kvm/hyp/pgtable.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

Comments

David Matlack Sept. 7, 2022, 9:32 p.m. UTC | #1
On Tue, Aug 30, 2022 at 07:41:24PM +0000, Oliver Upton wrote:
> The map walkers install new page tables during their traversal. Return
> the newly-installed table PTE from the map callbacks to point the walker
> at the new table w/o rereading the ptep.
> 
> Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
> ---
>  arch/arm64/kvm/hyp/pgtable.c | 9 +++++----
>  1 file changed, 5 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
> index 331f6e3b2c20..f911509e6512 100644
> --- a/arch/arm64/kvm/hyp/pgtable.c
> +++ b/arch/arm64/kvm/hyp/pgtable.c
> @@ -202,13 +202,12 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
>  	if (!table && (flags & KVM_PGTABLE_WALK_LEAF)) {
>  		ret = kvm_pgtable_visitor_cb(data, addr, level, ptep, &pte,
>  					     KVM_PGTABLE_WALK_LEAF);
> -		pte = *ptep;
> -		table = kvm_pte_table(pte, level);
>  	}
>  
>  	if (ret)
>  		goto out;

Rather than passing a pointer to the local variable pte and requiring
all downstream code to update it (and deal with dereferencing to read
the old pte), wouldn't it be simpler to just re-read the PTE here? e.g.

        /*
         * Explicitly re-read the PTE since it may have been modified
         * during the TABLE_PRE or LEAF callback.
         */
        pte = kvm_pte_read(ptep);

This should also result in better behavior once parallelization is
introduced, because it will prevent the walker from traversing down and
doing a bunch of work on page tables that are in the process of being
freed by some other thread.

>  
> +	table = kvm_pte_table(pte, level);
>  	if (!table) {

nit: Technically there's no reason to set @table again. e.g. This could
just be:

        if (!kvm_pte_table(pte, level)) {

>  		data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
>  		data->addr += kvm_granule_size(level);
> @@ -427,6 +426,7 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, kvm_pte
>  	new = kvm_init_table_pte(childp, mm_ops);
>  	mm_ops->get_page(ptep);
>  	smp_store_release(ptep, new);
> +	*old = new;
>  
>  	return 0;
>  }
> @@ -768,7 +768,7 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
>  }
>  
>  static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
> -				struct stage2_map_data *data);
> +				kvm_pte_t *old, struct stage2_map_data *data);
>  
>  static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
>  				     kvm_pte_t *ptep, kvm_pte_t *old,
> @@ -791,7 +791,7 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
>  	 */
>  	kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
>  
> -	ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
> +	ret = stage2_map_walk_leaf(addr, end, level, ptep, old, data);
>  
>  	mm_ops->put_page(ptep);
>  	mm_ops->free_removed_table(childp, level + 1, pgt);
> @@ -832,6 +832,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
>  	new = kvm_init_table_pte(childp, mm_ops);
>  	mm_ops->get_page(ptep);
>  	smp_store_release(ptep, new);
> +	*old = new;
>  
>  	return 0;
>  }
> -- 
> 2.37.2.672.g94769d06f0-goog
>
Oliver Upton Sept. 9, 2022, 9:38 a.m. UTC | #2
Hi David,

On Wed, Sep 07, 2022 at 02:32:29PM -0700, David Matlack wrote:
> On Tue, Aug 30, 2022 at 07:41:24PM +0000, Oliver Upton wrote:
> > The map walkers install new page tables during their traversal. Return
> > the newly-installed table PTE from the map callbacks to point the walker
> > at the new table w/o rereading the ptep.
> > 
> > Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
> > ---
> >  arch/arm64/kvm/hyp/pgtable.c | 9 +++++----
> >  1 file changed, 5 insertions(+), 4 deletions(-)
> > 
> > diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
> > index 331f6e3b2c20..f911509e6512 100644
> > --- a/arch/arm64/kvm/hyp/pgtable.c
> > +++ b/arch/arm64/kvm/hyp/pgtable.c
> > @@ -202,13 +202,12 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
> >  	if (!table && (flags & KVM_PGTABLE_WALK_LEAF)) {
> >  		ret = kvm_pgtable_visitor_cb(data, addr, level, ptep, &pte,
> >  					     KVM_PGTABLE_WALK_LEAF);
> > -		pte = *ptep;
> > -		table = kvm_pte_table(pte, level);
> >  	}
> >  
> >  	if (ret)
> >  		goto out;
> 
> Rather than passing a pointer to the local variable pte and requiring
> all downstream code to update it (and deal with dereferencing to read
> the old pte), wouldn't it be simpler to just re-read the PTE here?

Yeah, you're right. I had some odd rationalization about this, but
there's no need to force a walker to descend into the new table level as
it is wasted work if another thread unlinks it.

[...]

> >  
> > +	table = kvm_pte_table(pte, level);
> >  	if (!table) {
> 
> nit: Technically there's no reason to set @table again. e.g. This could
> just be:
> 
>         if (!kvm_pte_table(pte, level)) {

Sure, I'll squish these lines together.

--
Thanks,
Oliver
diff mbox series

Patch

diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 331f6e3b2c20..f911509e6512 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -202,13 +202,12 @@  static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
 	if (!table && (flags & KVM_PGTABLE_WALK_LEAF)) {
 		ret = kvm_pgtable_visitor_cb(data, addr, level, ptep, &pte,
 					     KVM_PGTABLE_WALK_LEAF);
-		pte = *ptep;
-		table = kvm_pte_table(pte, level);
 	}
 
 	if (ret)
 		goto out;
 
+	table = kvm_pte_table(pte, level);
 	if (!table) {
 		data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
 		data->addr += kvm_granule_size(level);
@@ -427,6 +426,7 @@  static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, kvm_pte
 	new = kvm_init_table_pte(childp, mm_ops);
 	mm_ops->get_page(ptep);
 	smp_store_release(ptep, new);
+	*old = new;
 
 	return 0;
 }
@@ -768,7 +768,7 @@  static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
 }
 
 static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
-				struct stage2_map_data *data);
+				kvm_pte_t *old, struct stage2_map_data *data);
 
 static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
 				     kvm_pte_t *ptep, kvm_pte_t *old,
@@ -791,7 +791,7 @@  static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
 	 */
 	kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
 
-	ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
+	ret = stage2_map_walk_leaf(addr, end, level, ptep, old, data);
 
 	mm_ops->put_page(ptep);
 	mm_ops->free_removed_table(childp, level + 1, pgt);
@@ -832,6 +832,7 @@  static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
 	new = kvm_init_table_pte(childp, mm_ops);
 	mm_ops->get_page(ptep);
 	smp_store_release(ptep, new);
+	*old = new;
 
 	return 0;
 }