@@ -173,6 +173,13 @@ static inline unsigned int kvm_get_hwpgd_size(void)
return PTRS_PER_S2_PGD * sizeof(pgd_t);
}
+static inline pgd_t *kvm_setup_fake_pgd(pgd_t *pgd)
+{
+ return pgd;
+}
+
+static inline void kvm_free_fake_pgd(pgd_t *pgd) {}
+
struct kvm;
#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
@@ -677,43 +677,11 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
* guest, we allocate a fake PGD and pre-populate it to point
* to the next-level page table, which will be the real
* initial page table pointed to by the VTTBR.
- *
- * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
- * the PMD and the kernel will use folded pud.
- * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
- * pages.
*/
- if (KVM_PREALLOC_LEVEL > 0) {
- int i;
-
- /*
- * Allocate fake pgd for the page table manipulation macros to
- * work. This is not used by the hardware and we have no
- * alignment requirement for this allocation.
- */
- pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
- GFP_KERNEL | __GFP_ZERO);
-
- if (!pgd) {
- kvm_free_hwpgd(hwpgd);
- return -ENOMEM;
- }
-
- /* Plug the HW PGD into the fake one. */
- for (i = 0; i < PTRS_PER_S2_PGD; i++) {
- if (KVM_PREALLOC_LEVEL == 1)
- pgd_populate(NULL, pgd + i,
- (pud_t *)hwpgd + i * PTRS_PER_PUD);
- else if (KVM_PREALLOC_LEVEL == 2)
- pud_populate(NULL, pud_offset(pgd, 0) + i,
- (pmd_t *)hwpgd + i * PTRS_PER_PMD);
- }
- } else {
- /*
- * Allocate actual first-level Stage-2 page table used by the
- * hardware for Stage-2 page table walks.
- */
- pgd = (pgd_t *)hwpgd;
+ pgd = kvm_setup_fake_pgd(hwpgd);
+ if (IS_ERR(pgd)) {
+ kvm_free_hwpgd(hwpgd);
+ return PTR_ERR(pgd);
}
kvm_clean_pgd(pgd);
@@ -820,9 +788,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
kvm_free_hwpgd(kvm_get_hwpgd(kvm));
- if (KVM_PREALLOC_LEVEL > 0)
- kfree(kvm->arch.pgd);
-
+ kvm_free_fake_pgd(kvm->arch.pgd);
kvm->arch.pgd = NULL;
}
@@ -198,6 +198,49 @@ static inline unsigned int kvm_get_hwpgd_size(void)
return PTRS_PER_S2_PGD * sizeof(pgd_t);
}
+/*
+ * Allocate fake pgd for the page table manipulation macros to
+ * work. This is not used by the hardware and we have no
+ * alignment requirement for this allocation.
+ */
+static inline pgd_t* kvm_setup_fake_pgd(pgd_t *hwpgd)
+{
+ int i;
+ pgd_t *pgd;
+
+ if (!KVM_PREALLOC_LEVEL)
+ return hwpgd;
+ /*
+ * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
+ * the PMD and the kernel will use folded pud.
+ * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
+ * pages.
+ */
+ pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
+ GFP_KERNEL | __GFP_ZERO);
+
+ if (!pgd)
+ return ERR_PTR(-ENOMEM);
+
+ /* Plug the HW PGD into the fake one. */
+ for (i = 0; i < PTRS_PER_S2_PGD; i++) {
+ if (KVM_PREALLOC_LEVEL == 1)
+ pgd_populate(NULL, pgd + i,
+ (pud_t *)hwpgd + i * PTRS_PER_PUD);
+ else if (KVM_PREALLOC_LEVEL == 2)
+ pud_populate(NULL, pud_offset(pgd, 0) + i,
+ (pmd_t *)hwpgd + i * PTRS_PER_PMD);
+ }
+
+ return pgd;
+}
+
+static inline void kvm_free_fake_pgd(pgd_t *pgd)
+{
+ if (KVM_PREALLOC_LEVEL > 0)
+ kfree(pgd);
+}
+
static inline bool kvm_page_empty(void *ptr)
{
struct page *ptr_page = virt_to_page(ptr);