@@ -328,6 +328,8 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
* @prot: Permissions and attributes for the mapping.
* @mc: Cache of pre-allocated and zeroed memory from which to allocate
* page-table pages.
+ * @shared: true if multiple software walkers could be traversing the tables
+ * in parallel
*
* The offset of @addr within a page is ignored, @size is rounded-up to
* the next page boundary and @phys is rounded-down to the previous page
@@ -349,7 +351,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
*/
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot,
- void *mc);
+ void *mc, bool shared);
/**
* kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
@@ -251,7 +251,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
enum kvm_pgtable_prot prot)
{
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
- prot, &host_s2_pool);
+ prot, &host_s2_pool, false);
}
/*
@@ -1119,7 +1119,7 @@ static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, kvm_
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot,
- void *mc)
+ void *mc, bool shared)
{
int ret;
struct stage2_map_data map_data = {
@@ -1144,7 +1144,7 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
if (ret)
return ret;
- ret = kvm_pgtable_walk(pgt, addr, size, &walker, false);
+ ret = kvm_pgtable_walk(pgt, addr, size, &walker, shared);
dsb(ishst);
return ret;
}
@@ -832,7 +832,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
write_lock(&kvm->mmu_lock);
ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
- &cache);
+ &cache, false);
write_unlock(&kvm->mmu_lock);
if (ret)
break;
@@ -1326,7 +1326,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
__pfn_to_phys(pfn), prot,
- mmu_caches);
+ mmu_caches, true);
}
/* Mark the page dirty only if the fault is handled successfully */
@@ -1526,7 +1526,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
*/
kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
PAGE_SIZE, __pfn_to_phys(pfn),
- KVM_PGTABLE_PROT_R, NULL);
+ KVM_PGTABLE_PROT_R, NULL, false);
return false;
}
The map walker is now appraised of how to walk the tables in parallel with another table walker. Take a parameter indicating whether or not a walk is done in parallel so as to relax the atomicity/locking requirements on ptes. Defer actually using parallel walks to a later change. Signed-off-by: Oliver Upton <oupton@google.com> --- arch/arm64/include/asm/kvm_pgtable.h | 4 +++- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 2 +- arch/arm64/kvm/hyp/pgtable.c | 4 ++-- arch/arm64/kvm/mmu.c | 6 +++--- 4 files changed, 9 insertions(+), 7 deletions(-)