@@ -1616,6 +1616,56 @@ static void kvm_send_hwpoison_signal(unsigned long address,
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
}
+static pud_t stage2_build_pud(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+ bool needs_exec)
+{
+ pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
+
+ new_pud = kvm_pud_mkhuge(new_pud);
+ if (writable)
+ new_pud = kvm_s2pud_mkwrite(new_pud);
+
+ if (needs_exec)
+ new_pud = kvm_s2pud_mkexec(new_pud);
+
+ return new_pud;
+}
+
+static pmd_t stage2_build_pmd(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+ bool needs_exec, bool contiguous)
+{
+ pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
+
+ new_pmd = kvm_pmd_mkhuge(new_pmd);
+ if (writable)
+ new_pmd = kvm_s2pmd_mkwrite(new_pmd);
+
+ if (needs_exec)
+ new_pmd = kvm_s2pmd_mkexec(new_pmd);
+
+ if (contiguous)
+ new_pmd = kvm_s2pmd_mkcont(new_pmd);
+
+ return new_pmd;
+}
+
+static pte_t stage2_build_pte(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+ bool needs_exec, bool contiguous)
+{
+ pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
+
+ if (writable)
+ new_pte = kvm_s2pte_mkwrite(new_pte);
+
+ if (needs_exec)
+ new_pte = kvm_s2pte_mkexec(new_pte);
+
+ if (contiguous)
+ new_pte = kvm_s2pte_mkcont(new_pte);
+
+ return new_pte;
+}
+
static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
unsigned long hva,
unsigned long map_size)
@@ -1807,38 +1857,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
if (vma_pagesize == PUD_SIZE) {
- pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
-
- new_pud = kvm_pud_mkhuge(new_pud);
- if (writable)
- new_pud = kvm_s2pud_mkwrite(new_pud);
-
- if (needs_exec)
- new_pud = kvm_s2pud_mkexec(new_pud);
+ pud_t new_pud = stage2_build_pud(pfn, mem_type, writable,
+ needs_exec);
ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
} else if (vma_pagesize == PMD_SIZE) {
- pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
-
- new_pmd = kvm_pmd_mkhuge(new_pmd);
-
- if (writable)
- new_pmd = kvm_s2pmd_mkwrite(new_pmd);
-
- if (needs_exec)
- new_pmd = kvm_s2pmd_mkexec(new_pmd);
+ pmd_t new_pmd = stage2_build_pmd(pfn, mem_type, writable,
+ needs_exec, false);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
- pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
+ pte_t new_pte = stage2_build_pte(pfn, mem_type, writable,
+ needs_exec, false);
- if (writable) {
- new_pte = kvm_s2pte_mkwrite(new_pte);
+ if (writable)
mark_page_dirty(kvm, gfn);
- }
-
- if (needs_exec)
- new_pte = kvm_s2pte_mkexec(new_pte);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
}
As we're going to support creating CONT_{PTE,PMD}_SIZE huge mappings in user_mem_abort(), the logic to check vma_pagesize and build page table entries will become longer, and looks almost the same (but actually they dont). Refactor this part to make it a bit cleaner. Add contiguous as a parameter of stage2_build_{pmd,pte}, to indicate if we're creating contiguous huge mappings. Signed-off-by: Zenghui Yu <yuzenghui@huawei.com> --- virt/kvm/arm/mmu.c | 81 ++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 57 insertions(+), 24 deletions(-)