@@ -95,6 +95,8 @@ static bool kvm_is_device_pfn(unsigned long pfn)
return !pfn_is_map_memory(pfn);
}
+static atomic_t stage2_pages = ATOMIC_INIT(0);
+
static void *stage2_memcache_zalloc_page(void *arg)
{
struct kvm_mmu_caches *mmu_caches = arg;
@@ -112,6 +114,8 @@ static void *stage2_memcache_zalloc_page(void *arg)
return NULL;
}
+ atomic_inc(&stage2_pages);
+
hdr->page = virt_to_page(addr);
set_page_private(hdr->page, (unsigned long)hdr);
return addr;
@@ -121,6 +125,8 @@ static void stage2_free_page_now(struct stage2_page_header *hdr)
{
WARN_ON(page_ref_count(hdr->page) != 1);
+ atomic_dec(&stage2_pages);
+
__free_page(hdr->page);
kmem_cache_free(stage2_page_header_cache, hdr);
}
@@ -662,6 +668,8 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
.icache_inval_pou = invalidate_icache_guest_page,
};
+static atomic_t stage2_mmus = ATOMIC_INIT(0);
+
/**
* kvm_init_stage2_mmu - Initialise a S2 MMU structure
* @kvm: The pointer to the KVM structure
@@ -699,6 +707,8 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
for_each_possible_cpu(cpu)
*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
+ atomic_inc(&stage2_mmus);
+
mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd);
return 0;
@@ -796,6 +806,9 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
kvm_pgtable_stage2_destroy(pgt);
kfree(pgt);
}
+
+ if (atomic_dec_and_test(&stage2_mmus))
+ WARN_ON(atomic_read(&stage2_pages));
}
/**
Don't use this please. I was just being lazy but wanted to make sure tables are all accounted for. There's a race here too, do you see it? :) Signed-off-by: Oliver Upton <oupton@google.com> --- arch/arm64/kvm/mmu.c | 13 +++++++++++++ 1 file changed, 13 insertions(+)