Message ID | 20230801020206.1957986-3-zhaotianrui@loongson.cn (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | selftests: kvm: Add LoongArch support | expand |
On Tue, Aug 01, 2023, Tianrui Zhao wrote: > Add processor tests for LoongArch KVM, including vcpu initialize Nit, AFAICT these aren't tests, this is simply the core KVM selftests support for LoongArch. > and tlb refill exception handler. > > Based-on: <20230720062813.4126751-1-zhaotianrui@loongson.cn> > Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn> > --- > .../selftests/kvm/lib/loongarch/exception.S | 27 ++ > .../selftests/kvm/lib/loongarch/processor.c | 367 ++++++++++++++++++ > 2 files changed, 394 insertions(+) > create mode 100644 tools/testing/selftests/kvm/lib/loongarch/exception.S > create mode 100644 tools/testing/selftests/kvm/lib/loongarch/processor.c > > diff --git a/tools/testing/selftests/kvm/lib/loongarch/exception.S b/tools/testing/selftests/kvm/lib/loongarch/exception.S > new file mode 100644 > index 000000000000..19dc50993da4 > --- /dev/null > +++ b/tools/testing/selftests/kvm/lib/loongarch/exception.S > @@ -0,0 +1,27 @@ > +/* SPDX-License-Identifier: GPL-2.0 */ > + > +#include "sysreg.h" > + > +/* address of refill exception should be 4K aligned */ > +.align 12 .align works on bytes, not on shifts. I.e. this will make handle_tlb_refill 12-byte aligned, not 4096-byte aligned. > +.global handle_tlb_refill > +handle_tlb_refill: > + csrwr t0, LOONGARCH_CSR_TLBRSAVE > + csrrd t0, LOONGARCH_CSR_PGD > + lddir t0, t0, 3 > + lddir t0, t0, 1 > + ldpte t0, 0 > + ldpte t0, 1 > + tlbfill > + csrrd t0, LOONGARCH_CSR_TLBRSAVE > + ertn > + > +/* address of general exception should be 4K aligned */ > +.align 12 Same thing here. > +.global handle_exception > +handle_exception: > +1: > + nop > + b 1b > + nop > + ertn > diff --git a/tools/testing/selftests/kvm/lib/loongarch/processor.c b/tools/testing/selftests/kvm/lib/loongarch/processor.c > new file mode 100644 > index 000000000000..2e50b6e2c556 > --- /dev/null > +++ b/tools/testing/selftests/kvm/lib/loongarch/processor.c > @@ -0,0 +1,367 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * KVM selftest LoongArch library code, including CPU-related functions. > + * Again, unnecessary IMO. If you do keep the comment, the extra line with a bare asterisk should be dropped. > + */ > + > +#include <assert.h> > +#include <linux/bitfield.h> > +#include <linux/compiler.h> > + > +#include "kvm_util.h" > +#include "processor.h" > +#include "sysreg.h" > + > +#define DEFAULT_LOONGARCH_GUEST_STACK_VADDR_MIN 0xac0000 Why diverge from the common? #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 AFAIK, the common value is also mostly arbitrary, but that just makes it even more confusing as to why LoongArch needs to bump the min by 0x4000. > +uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) > +{ > + uint64_t *ptep; > + > + if (!vm->pgd_created) > + goto unmapped_gva; > + > + ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; > + if (!ptep) > + goto unmapped_gva; > + > + switch (vm->pgtable_levels) { > + case 4: > + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; > + if (!ptep) > + goto unmapped_gva; This wants a "fallthrough" annotation. > + case 3: > + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; > + if (!ptep) > + goto unmapped_gva; > + case 2: > + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; > + if (!ptep) > + goto unmapped_gva; > + break; > + default: > + TEST_FAIL("Page table levels must be 2, 3, or 4"); Obviously it shouldn't come up, but print the actual pgtable_levels to make debug a wee bit easier e.g. TEST_FAIL("Got %u page table levels, expected 2, 3, or 4", vm->pgtable_levels); Mostly out of curiosity, but also because it looks like this was heavily copy+pasted from ARM: does LoongArch actually support 2-level page tables? > +static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) > +{ > + uint64_t csrid; > + > + csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id; > + vcpu_set_reg(vcpu, csrid, val); > +} > + > +static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu) > +{ > + unsigned long val; > + int width; > + struct kvm_vm *vm = vcpu->vm; > + > + switch (vm->mode) { > + case VM_MODE_P48V48_16K: > + case VM_MODE_P40V48_16K: > + case VM_MODE_P36V48_16K: > + case VM_MODE_P36V47_16K: > + break; > + > + default: > + TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); > + } > + > + /* user mode and page enable mode */ > + val = PLV_USER | CSR_CRMD_PG; > + loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val); > + loongarch_set_csr(vcpu, LOONGARCH_CSR_PRMD, val); > + loongarch_set_csr(vcpu, LOONGARCH_CSR_EUEN, 1); > + loongarch_set_csr(vcpu, LOONGARCH_CSR_ECFG, 0); > + loongarch_set_csr(vcpu, LOONGARCH_CSR_TCFG, 0); > + loongarch_set_csr(vcpu, LOONGARCH_CSR_ASID, 1); > + > + width = vm->page_shift - 3; > + val = 0; > + switch (vm->pgtable_levels) { > + case 4: > + /* pud page shift and width */ > + val = (vm->page_shift + width * 2) << 20 | (width << 25); > + case 3: > + /* pmd page shift and width */ > + val |= (vm->page_shift + width) << 10 | (width << 15); > + case 2: > + /* pte page shift and width */ > + val |= vm->page_shift | width << 5; > + break; > + default: > + TEST_FAIL("Page table levels must be 2, 3, or 4"); > + } > + loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL0, val); > + > + /* pgd page shift and width */ > + val = (vm->page_shift + width * (vm->pgtable_levels - 1)) | width << 6; > + loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL1, val); > + > + loongarch_set_csr(vcpu, LOONGARCH_CSR_PGDL, vm->pgd); > + > + extern void handle_tlb_refill(void); > + extern void handle_exception(void); Eww. I get that it's probably undesirable to expose these via processor.h, but at least declare them outside of the function. > +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, > + void *guest_code) > +{ > + return loongarch_vcpu_add(vm, vcpu_id, guest_code); Please drop the single-line passthrough, i.e. drop loongarch_vcpu_add(). I'm guessing you copy+pasted much of this from ARM. ARM's passthrough isn't a pure passthrough, which is directly related to why the "passthrough" is ok: there are other callers to aarch64_vcpu_add() that pass a non-NULL @source.
在 2023/8/3 上午2:07, Sean Christopherson 写道: > On Tue, Aug 01, 2023, Tianrui Zhao wrote: >> Add processor tests for LoongArch KVM, including vcpu initialize > Nit, AFAICT these aren't tests, this is simply the core KVM selftests support > for LoongArch. Thanks, I will fix this comment. > >> and tlb refill exception handler. >> >> Based-on: <20230720062813.4126751-1-zhaotianrui@loongson.cn> >> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn> >> --- >> .../selftests/kvm/lib/loongarch/exception.S | 27 ++ >> .../selftests/kvm/lib/loongarch/processor.c | 367 ++++++++++++++++++ >> 2 files changed, 394 insertions(+) >> create mode 100644 tools/testing/selftests/kvm/lib/loongarch/exception.S >> create mode 100644 tools/testing/selftests/kvm/lib/loongarch/processor.c >> >> diff --git a/tools/testing/selftests/kvm/lib/loongarch/exception.S b/tools/testing/selftests/kvm/lib/loongarch/exception.S >> new file mode 100644 >> index 000000000000..19dc50993da4 >> --- /dev/null >> +++ b/tools/testing/selftests/kvm/lib/loongarch/exception.S >> @@ -0,0 +1,27 @@ >> +/* SPDX-License-Identifier: GPL-2.0 */ >> + >> +#include "sysreg.h" >> + >> +/* address of refill exception should be 4K aligned */ >> +.align 12 > .align works on bytes, not on shifts. I.e. this will make handle_tlb_refill > 12-byte aligned, not 4096-byte aligned. Thanks, I will fix it to .balign 4096. > >> +.global handle_tlb_refill >> +handle_tlb_refill: >> + csrwr t0, LOONGARCH_CSR_TLBRSAVE >> + csrrd t0, LOONGARCH_CSR_PGD >> + lddir t0, t0, 3 >> + lddir t0, t0, 1 >> + ldpte t0, 0 >> + ldpte t0, 1 >> + tlbfill >> + csrrd t0, LOONGARCH_CSR_TLBRSAVE >> + ertn >> + >> +/* address of general exception should be 4K aligned */ >> +.align 12 > Same thing here. I will fix it too. > >> +.global handle_exception >> +handle_exception: >> +1: >> + nop >> + b 1b >> + nop >> + ertn >> diff --git a/tools/testing/selftests/kvm/lib/loongarch/processor.c b/tools/testing/selftests/kvm/lib/loongarch/processor.c >> new file mode 100644 >> index 000000000000..2e50b6e2c556 >> --- /dev/null >> +++ b/tools/testing/selftests/kvm/lib/loongarch/processor.c >> @@ -0,0 +1,367 @@ >> +// SPDX-License-Identifier: GPL-2.0 >> +/* >> + * KVM selftest LoongArch library code, including CPU-related functions. >> + * > Again, unnecessary IMO. If you do keep the comment, the extra line with a bare > asterisk should be dropped. Thanks, I will remove this comment. > >> + */ >> + >> +#include <assert.h> >> +#include <linux/bitfield.h> >> +#include <linux/compiler.h> >> + >> +#include "kvm_util.h" >> +#include "processor.h" >> +#include "sysreg.h" >> + >> +#define DEFAULT_LOONGARCH_GUEST_STACK_VADDR_MIN 0xac0000 > Why diverge from the common? > > #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 > > AFAIK, the common value is also mostly arbitrary, but that just makes it even > more confusing as to why LoongArch needs to bump the min by 0x4000. This is reference from ARM, and I will fix it to use the the common value. > >> +uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) >> +{ >> + uint64_t *ptep; >> + >> + if (!vm->pgd_created) >> + goto unmapped_gva; >> + >> + ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; >> + if (!ptep) >> + goto unmapped_gva; >> + >> + switch (vm->pgtable_levels) { >> + case 4: >> + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; >> + if (!ptep) >> + goto unmapped_gva; > This wants a "fallthrough" annotation. Thanks, I will add the "fallthrough" annotation. > >> + case 3: >> + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; >> + if (!ptep) >> + goto unmapped_gva; >> + case 2: >> + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; >> + if (!ptep) >> + goto unmapped_gva; >> + break; >> + default: >> + TEST_FAIL("Page table levels must be 2, 3, or 4"); > Obviously it shouldn't come up, but print the actual pgtable_levels to make debug > a wee bit easier e.g. > TEST_FAIL("Got %u page table levels, expected 2, 3, or 4", > vm->pgtable_levels); Thanks, I will also print the actual pgtable_levels in this debug function. > > Mostly out of curiosity, but also because it looks like this was heavily copy+pasted > from ARM: does LoongArch actually support 2-level page tables? Yes, this codes are mostly copy pasted from ARM, but LoongArch does not support 2-levels page tables, it only support 3-level and 4-level page tables, and I will fix it. >> +static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) >> +{ >> + uint64_t csrid; >> + >> + csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id; >> + vcpu_set_reg(vcpu, csrid, val); >> +} >> + >> +static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu) >> +{ >> + unsigned long val; >> + int width; >> + struct kvm_vm *vm = vcpu->vm; >> + >> + switch (vm->mode) { >> + case VM_MODE_P48V48_16K: >> + case VM_MODE_P40V48_16K: >> + case VM_MODE_P36V48_16K: >> + case VM_MODE_P36V47_16K: >> + break; >> + >> + default: >> + TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); >> + } >> + >> + /* user mode and page enable mode */ >> + val = PLV_USER | CSR_CRMD_PG; >> + loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val); >> + loongarch_set_csr(vcpu, LOONGARCH_CSR_PRMD, val); >> + loongarch_set_csr(vcpu, LOONGARCH_CSR_EUEN, 1); >> + loongarch_set_csr(vcpu, LOONGARCH_CSR_ECFG, 0); >> + loongarch_set_csr(vcpu, LOONGARCH_CSR_TCFG, 0); >> + loongarch_set_csr(vcpu, LOONGARCH_CSR_ASID, 1); >> + >> + width = vm->page_shift - 3; >> + val = 0; >> + switch (vm->pgtable_levels) { >> + case 4: >> + /* pud page shift and width */ >> + val = (vm->page_shift + width * 2) << 20 | (width << 25); >> + case 3: >> + /* pmd page shift and width */ >> + val |= (vm->page_shift + width) << 10 | (width << 15); >> + case 2: >> + /* pte page shift and width */ >> + val |= vm->page_shift | width << 5; >> + break; >> + default: >> + TEST_FAIL("Page table levels must be 2, 3, or 4"); >> + } >> + loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL0, val); >> + >> + /* pgd page shift and width */ >> + val = (vm->page_shift + width * (vm->pgtable_levels - 1)) | width << 6; >> + loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL1, val); >> + >> + loongarch_set_csr(vcpu, LOONGARCH_CSR_PGDL, vm->pgd); >> + >> + extern void handle_tlb_refill(void); >> + extern void handle_exception(void); > Eww. I get that it's probably undesirable to expose these via processor.h, but > at least declare them outside of the function. Thanks, I will declare them outside of the function. > >> +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, >> + void *guest_code) >> +{ >> + return loongarch_vcpu_add(vm, vcpu_id, guest_code); > Please drop the single-line passthrough, i.e. drop loongarch_vcpu_add(). I'm > guessing you copy+pasted much of this from ARM. ARM's passthrough isn't a pure > passthrough, which is directly related to why the "passthrough" is ok: there are > other callers to aarch64_vcpu_add() that pass a non-NULL @source. Yes, this is also copy pasted from ARM, and I will drop the loongarch_vcpu_add() function and move the content of it to here.
diff --git a/tools/testing/selftests/kvm/lib/loongarch/exception.S b/tools/testing/selftests/kvm/lib/loongarch/exception.S new file mode 100644 index 000000000000..19dc50993da4 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/loongarch/exception.S @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include "sysreg.h" + +/* address of refill exception should be 4K aligned */ +.align 12 +.global handle_tlb_refill +handle_tlb_refill: + csrwr t0, LOONGARCH_CSR_TLBRSAVE + csrrd t0, LOONGARCH_CSR_PGD + lddir t0, t0, 3 + lddir t0, t0, 1 + ldpte t0, 0 + ldpte t0, 1 + tlbfill + csrrd t0, LOONGARCH_CSR_TLBRSAVE + ertn + +/* address of general exception should be 4K aligned */ +.align 12 +.global handle_exception +handle_exception: +1: + nop + b 1b + nop + ertn diff --git a/tools/testing/selftests/kvm/lib/loongarch/processor.c b/tools/testing/selftests/kvm/lib/loongarch/processor.c new file mode 100644 index 000000000000..2e50b6e2c556 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/loongarch/processor.c @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KVM selftest LoongArch library code, including CPU-related functions. + * + */ + +#include <assert.h> +#include <linux/bitfield.h> +#include <linux/compiler.h> + +#include "kvm_util.h" +#include "processor.h" +#include "sysreg.h" + +#define DEFAULT_LOONGARCH_GUEST_STACK_VADDR_MIN 0xac0000 + +static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + unsigned int shift; + uint64_t mask; + + shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; + mask = (1UL << (vm->va_bits - shift)) - 1; + return (gva >> shift) & mask; +} + +static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + unsigned int shift; + uint64_t mask; + + shift = 2 * (vm->page_shift - 3) + vm->page_shift; + mask = (1UL << (vm->page_shift - 3)) - 1; + TEST_ASSERT(vm->pgtable_levels == 4, + "Mode %d does not have 4 page table levels", vm->mode); + + return (gva >> shift) & mask; +} + +static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + unsigned int shift; + uint64_t mask; + + shift = (vm->page_shift - 3) + vm->page_shift; + mask = (1UL << (vm->page_shift - 3)) - 1; + TEST_ASSERT(vm->pgtable_levels >= 3, + "Mode %d does not have >= 3 page table levels", vm->mode); + + return (gva >> shift) & mask; +} + +static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + uint64_t mask; + + mask = (1UL << (vm->page_shift - 3)) - 1; + return (gva >> vm->page_shift) & mask; +} + +static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) +{ + uint64_t mask; + + mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift; + return entry & mask; +} + +static uint64_t ptrs_per_pgd(struct kvm_vm *vm) +{ + unsigned int shift; + + shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; + return 1 << (vm->va_bits - shift); +} + +static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm) +{ + return 1 << (vm->page_shift - 3); +} + +void virt_arch_pgd_alloc(struct kvm_vm *vm) +{ + if (vm->pgd_created) + return; + + vm->pgd = vm_alloc_page_table(vm); + vm->pgd_created = true; +} + +uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) +{ + uint64_t *ptep; + + if (!vm->pgd_created) + goto unmapped_gva; + + ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + + switch (vm->pgtable_levels) { + case 4: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + case 3: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + case 2: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + break; + default: + TEST_FAIL("Page table levels must be 2, 3, or 4"); + } + + return ptep; + +unmapped_gva: + TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva); + exit(EXIT_FAILURE); +} + +vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +{ + uint64_t *ptep; + + ptep = virt_get_pte_hva(vm, gva); + return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); +} + +void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +{ + uint32_t prot_bits; + uint64_t *ptep; + + TEST_ASSERT((vaddr % vm->page_size) == 0, + "Virtual address not on page boundary,\n" + "vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, + (vaddr >> vm->page_shift)), + "Invalid virtual address, vaddr: 0x%lx", vaddr); + TEST_ASSERT((paddr % vm->page_size) == 0, + "Physical address not on page boundary,\n" + "paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); + TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond maximum supported,\n" + "paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + paddr, vm->max_gfn, vm->page_size); + + ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; + if (!*ptep) + *ptep = vm_alloc_page_table(vm); + + switch (vm->pgtable_levels) { + case 4: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; + if (!*ptep) + *ptep = vm_alloc_page_table(vm); + case 3: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; + if (!*ptep) + *ptep = vm_alloc_page_table(vm); + case 2: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8; + break; + default: + TEST_FAIL("Page table levels must be 2, 3, or 4"); + } + + prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC; + prot_bits |= _PAGE_USER; + *ptep = paddr | prot_bits; +} + +static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level) +{ +#ifdef DEBUG + static const char * const type[] = { "", "pud", "pmd", "pte" }; + uint64_t pte, *ptep; + + if (level == 4) + return; + + for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) { + ptep = addr_gpa2hva(vm, pte); + if (!*ptep) + continue; + fprintf(stream, "%*s%s: %lx: %lx at %p\n", + indent, "", type[level], pte, *ptep, ptep); + pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1); + } +#endif +} + +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +{ + int level; + uint64_t pgd, *ptep; + + level = 4 - (vm->pgtable_levels - 1); + if (!vm->pgd_created) + return; + + for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) { + ptep = addr_gpa2hva(vm, pgd); + if (!*ptep) + continue; + fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep); + pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level); + } +} + +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +{ +} + +void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) +{ +} + +void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) +{ + va_list ap; + struct kvm_regs regs; + int i; + + TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" + "num: %u\n", num); + + vcpu_regs_get(vcpu, ®s); + va_start(ap, num); + for (i = 0; i < num; i++) + regs.gpr[i + 4] = va_arg(ap, uint64_t); + va_end(ap); + vcpu_regs_set(vcpu, ®s); +} + +static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr) +{ + uint64_t csrid; + + csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id; + vcpu_get_reg(vcpu, csrid, addr); +} + +static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) +{ + uint64_t csrid; + + csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id; + vcpu_set_reg(vcpu, csrid, val); +} + +static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu) +{ + unsigned long val; + int width; + struct kvm_vm *vm = vcpu->vm; + + switch (vm->mode) { + case VM_MODE_P48V48_16K: + case VM_MODE_P40V48_16K: + case VM_MODE_P36V48_16K: + case VM_MODE_P36V47_16K: + break; + + default: + TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); + } + + /* user mode and page enable mode */ + val = PLV_USER | CSR_CRMD_PG; + loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val); + loongarch_set_csr(vcpu, LOONGARCH_CSR_PRMD, val); + loongarch_set_csr(vcpu, LOONGARCH_CSR_EUEN, 1); + loongarch_set_csr(vcpu, LOONGARCH_CSR_ECFG, 0); + loongarch_set_csr(vcpu, LOONGARCH_CSR_TCFG, 0); + loongarch_set_csr(vcpu, LOONGARCH_CSR_ASID, 1); + + width = vm->page_shift - 3; + val = 0; + switch (vm->pgtable_levels) { + case 4: + /* pud page shift and width */ + val = (vm->page_shift + width * 2) << 20 | (width << 25); + case 3: + /* pmd page shift and width */ + val |= (vm->page_shift + width) << 10 | (width << 15); + case 2: + /* pte page shift and width */ + val |= vm->page_shift | width << 5; + break; + default: + TEST_FAIL("Page table levels must be 2, 3, or 4"); + } + loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL0, val); + + /* pgd page shift and width */ + val = (vm->page_shift + width * (vm->pgtable_levels - 1)) | width << 6; + loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL1, val); + + loongarch_set_csr(vcpu, LOONGARCH_CSR_PGDL, vm->pgd); + + extern void handle_tlb_refill(void); + extern void handle_exception(void); + /* + * refill exception runs on real mode, entry address should + * be physical address + */ + val = addr_gva2gpa(vm, (unsigned long)handle_tlb_refill); + loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBRENTRY, val); + + /* + * general exception runs on page-enabled mode, entry address should + * be virtual address + */ + val = (unsigned long)handle_exception; + loongarch_set_csr(vcpu, LOONGARCH_CSR_EENTRY, val); + + loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBIDX, &val); + val &= ~CSR_TLBIDX_SIZEM; + val |= PS_DEFAULT_SIZE << CSR_TLBIDX_SIZE; + loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBIDX, val); + + loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE); + + loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBREHI, &val); + val &= ~CSR_TLBREHI_PS; + val |= PS_DEFAULT_SIZE << CSR_TLBREHI_PS_SHIFT; + loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBREHI, val); + + loongarch_set_csr(vcpu, LOONGARCH_CSR_CPUID, vcpu->id); + loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID, vcpu->id); +} + +static struct kvm_vcpu *loongarch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, void *guest_code) +{ + size_t stack_size; + uint64_t stack_vaddr; + struct kvm_regs regs; + struct kvm_vcpu *vcpu; + + vcpu = __vm_vcpu_add(vm, vcpu_id); + stack_size = vm->page_size; + stack_vaddr = __vm_vaddr_alloc(vm, stack_size, + DEFAULT_LOONGARCH_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); + + loongarch_vcpu_setup(vcpu); + /* Setup guest general purpose registers */ + vcpu_regs_get(vcpu, ®s); + regs.gpr[3] = stack_vaddr + stack_size - 8; + regs.pc = (uint64_t)guest_code; + vcpu_regs_set(vcpu, ®s); + + return vcpu; +} + +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, + void *guest_code) +{ + return loongarch_vcpu_add(vm, vcpu_id, guest_code); +}
Add processor tests for LoongArch KVM, including vcpu initialize and tlb refill exception handler. Based-on: <20230720062813.4126751-1-zhaotianrui@loongson.cn> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn> --- .../selftests/kvm/lib/loongarch/exception.S | 27 ++ .../selftests/kvm/lib/loongarch/processor.c | 367 ++++++++++++++++++ 2 files changed, 394 insertions(+) create mode 100644 tools/testing/selftests/kvm/lib/loongarch/exception.S create mode 100644 tools/testing/selftests/kvm/lib/loongarch/processor.c