@@ -684,6 +684,7 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size,
* @size: Size of the range.
* @mc: Cache of pre-allocated and zeroed memory from which to allocate
* page-table pages.
+ * @flags: Page walker flags
*
* The function tries to split any level 1 or 2 entry that overlaps
* with the input range (given by @addr and @size).
@@ -693,7 +694,8 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size,
* blocks in the input range as allowed by @mc_capacity.
*/
int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
- struct kvm_mmu_memory_cache *mc);
+ struct kvm_mmu_memory_cache *mc,
+ enum kvm_pgtable_walk_flags flags);
/**
* kvm_pgtable_walk() - Walk a page-table.
@@ -1408,11 +1408,12 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
}
int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
- struct kvm_mmu_memory_cache *mc)
+ struct kvm_mmu_memory_cache *mc,
+ enum kvm_pgtable_walk_flags flags)
{
struct kvm_pgtable_walker walker = {
.cb = stage2_split_walker,
- .flags = KVM_PGTABLE_WALK_LEAF,
+ .flags = flags | KVM_PGTABLE_WALK_LEAF,
.arg = mc,
};
@@ -153,7 +153,7 @@ static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
return -EINVAL;
next = __stage2_range_addr_end(addr, end, chunk_size);
- ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache);
+ ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache, 0);
if (ret)
break;
} while (addr = next, addr != end);
Pass enum kvm_pgtable_walk_flags{} to kvm_pgtable_stage2_split() walker from its caller. This allows split walker users to specify if they want to run split logic via shared walker or non-shared walker. Signed-off-by: Vipin Sharma <vipinsh@google.com> --- arch/arm64/include/asm/kvm_pgtable.h | 4 +++- arch/arm64/kvm/hyp/pgtable.c | 5 +++-- arch/arm64/kvm/mmu.c | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-)