Message ID | 20230317113538.10878-19-andy.chiu@sifive.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | riscv: Add vector ISA support | expand |
On Fri, Mar 17, 2023 at 5:08 PM Andy Chiu <andy.chiu@sifive.com> wrote: > > From: Vincent Chen <vincent.chen@sifive.com> > > This patch adds vector context save/restore for guest VCPUs. To reduce the > impact on KVM performance, the implementation imitates the FP context > switch mechanism to lazily store and restore the vector context only when > the kernel enters/exits the in-kernel run loop and not during the KVM > world switch. > > Signed-off-by: Vincent Chen <vincent.chen@sifive.com> > Signed-off-by: Greentime Hu <greentime.hu@sifive.com> > Signed-off-by: Andy Chiu <andy.chiu@sifive.com> > --- > arch/riscv/include/asm/kvm_host.h | 2 + > arch/riscv/include/asm/kvm_vcpu_vector.h | 77 ++++++++++ > arch/riscv/include/uapi/asm/kvm.h | 7 + > arch/riscv/kvm/Makefile | 1 + > arch/riscv/kvm/vcpu.c | 30 ++++ > arch/riscv/kvm/vcpu_vector.c | 177 +++++++++++++++++++++++ > 6 files changed, 294 insertions(+) > create mode 100644 arch/riscv/include/asm/kvm_vcpu_vector.h > create mode 100644 arch/riscv/kvm/vcpu_vector.c > > diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h > index cc7da66ee0c0..7e7e23272d32 100644 > --- a/arch/riscv/include/asm/kvm_host.h > +++ b/arch/riscv/include/asm/kvm_host.h > @@ -14,6 +14,7 @@ > #include <linux/kvm_types.h> > #include <linux/spinlock.h> > #include <asm/hwcap.h> > +#include <asm/ptrace.h> > #include <asm/kvm_vcpu_fp.h> > #include <asm/kvm_vcpu_insn.h> > #include <asm/kvm_vcpu_sbi.h> > @@ -141,6 +142,7 @@ struct kvm_cpu_context { > unsigned long sstatus; > unsigned long hstatus; > union __riscv_fp_state fp; > + struct __riscv_v_ext_state vector; > }; > > struct kvm_vcpu_csr { > diff --git a/arch/riscv/include/asm/kvm_vcpu_vector.h b/arch/riscv/include/asm/kvm_vcpu_vector.h > new file mode 100644 > index 000000000000..a6dae7e2859d > --- /dev/null > +++ b/arch/riscv/include/asm/kvm_vcpu_vector.h > @@ -0,0 +1,77 @@ > +/* SPDX-License-Identifier: GPL-2.0-only */ > +/* > + * Copyright (C) 2021 Western Digital Corporation or its affiliates. > + * Copyright (C) 2022 SiFive > + * > + * Authors: > + * Atish Patra <atish.patra@wdc.com> > + * Anup Patel <anup.patel@wdc.com> Me and Atish have not contributed so please drop both names from here. Also, drop the WDC copyright. > + * Vincent Chen <vincent.chen@sifive.com> > + * Greentime Hu <greentime.hu@sifive.com> > + */ > + > +#ifndef __KVM_VCPU_RISCV_VECTOR_H > +#define __KVM_VCPU_RISCV_VECTOR_H > + > +#include <linux/types.h> > + > +#ifdef CONFIG_RISCV_ISA_V > +#include <asm/vector.h> > +#include <asm/kvm_host.h> > + > +static __always_inline void __kvm_riscv_vector_save(struct kvm_cpu_context *context) > +{ > + __riscv_v_vstate_save(&context->vector, context->vector.datap); > +} > + > +static __always_inline void __kvm_riscv_vector_restore(struct kvm_cpu_context *context) > +{ > + __riscv_v_vstate_restore(&context->vector, context->vector.datap); > +} > + > +void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu); > +void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, > + unsigned long *isa); > +void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, > + unsigned long *isa); > +void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx); > +void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx); > +void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu); > +#else > + > +struct kvm_cpu_context; > + > +static inline void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu) > +{ > +} > + > +static inline void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, > + unsigned long *isa) > +{ > +} > + > +static inline void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, > + unsigned long *isa) > +{ > +} > + > +static inline void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) > +{ > +} > + > +static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) > +{ > +} > + > +static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) > +{ > +} > +#endif > + > +int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, > + const struct kvm_one_reg *reg, > + unsigned long rtype); > +int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, > + const struct kvm_one_reg *reg, > + unsigned long rtype); > +#endif > diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h > index 3e3de7d486e1..b6d7f96d57ab 100644 > --- a/arch/riscv/include/uapi/asm/kvm.h > +++ b/arch/riscv/include/uapi/asm/kvm.h > @@ -153,6 +153,13 @@ enum KVM_RISCV_ISA_EXT_ID { > /* ISA Extension registers are mapped as type 7 */ > #define KVM_REG_RISCV_ISA_EXT (0x07 << KVM_REG_RISCV_TYPE_SHIFT) > > +/* V extension registers are mapped as type 8 */ > +#define KVM_REG_RISCV_VECTOR (0x08 << KVM_REG_RISCV_TYPE_SHIFT) > +#define KVM_REG_RISCV_VECTOR_CSR_REG(name) \ > + (offsetof(struct __riscv_v_ext_state, name) / sizeof(unsigned long)) > +#define KVM_REG_RISCV_VECTOR_REG(n) \ > + ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long)) > + > #endif > > #endif /* __LINUX_KVM_RISCV_H */ > diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile > index 278e97c06e0a..f29854333cf2 100644 > --- a/arch/riscv/kvm/Makefile > +++ b/arch/riscv/kvm/Makefile > @@ -17,6 +17,7 @@ kvm-y += mmu.o > kvm-y += vcpu.o > kvm-y += vcpu_exit.o > kvm-y += vcpu_fp.o > +kvm-y += vcpu_vector.o > kvm-y += vcpu_insn.o > kvm-y += vcpu_switch.o > kvm-y += vcpu_sbi.o > diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c > index a7ddb7cf813e..ffce2b8eef9a 100644 > --- a/arch/riscv/kvm/vcpu.c > +++ b/arch/riscv/kvm/vcpu.c > @@ -22,6 +22,8 @@ > #include <asm/cacheflush.h> > #include <asm/hwcap.h> > #include <asm/sbi.h> > +#include <asm/vector.h> > +#include <asm/kvm_vcpu_vector.h> > > const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { > KVM_GENERIC_VCPU_STATS(), > @@ -134,6 +136,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) > > kvm_riscv_vcpu_fp_reset(vcpu); > > + kvm_riscv_vcpu_vector_reset(vcpu); > + > kvm_riscv_vcpu_timer_reset(vcpu); > > WRITE_ONCE(vcpu->arch.irqs_pending, 0); > @@ -191,6 +195,15 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) > cntx->hstatus |= HSTATUS_SPVP; > cntx->hstatus |= HSTATUS_SPV; > > + if (has_vector()) { > + cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL); > + if (!cntx->vector.datap) > + return -ENOMEM; > + vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); > + if (!vcpu->arch.host_context.vector.datap) > + return -ENOMEM; > + } Move this to kvm_riscv_vcpu_alloc_vector_context() in vcpu_vector.c > + > /* By default, make CY, TM, and IR counters accessible in VU mode */ > reset_csr->scounteren = 0x7; > > @@ -226,6 +239,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) > > /* Free unused pages pre-allocated for G-stage page table mappings */ > kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); > + > + /* Free vector context space for host and guest kernel */ > + kvm_riscv_vcpu_free_vector_context(vcpu); > } > > int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) > @@ -602,6 +618,9 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, > KVM_REG_RISCV_FP_D); > case KVM_REG_RISCV_ISA_EXT: > return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); > + case KVM_REG_RISCV_VECTOR: > + return kvm_riscv_vcpu_set_reg_vector(vcpu, reg, > + KVM_REG_RISCV_VECTOR); > default: > break; > } > @@ -629,6 +648,9 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, > KVM_REG_RISCV_FP_D); > case KVM_REG_RISCV_ISA_EXT: > return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); > + case KVM_REG_RISCV_VECTOR: > + return kvm_riscv_vcpu_get_reg_vector(vcpu, reg, > + KVM_REG_RISCV_VECTOR); > default: > break; > } > @@ -895,6 +917,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) > kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); > kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, > vcpu->arch.isa); > + kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context); > + kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, > + vcpu->arch.isa); > > vcpu->cpu = cpu; > } > @@ -910,6 +935,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) > kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); > > kvm_riscv_vcpu_timer_save(vcpu); > + kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, > + vcpu->arch.isa); > + kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); > + > + csr_write(CSR_HGATP, 0); Drop this csr_write(). > > csr->vsstatus = csr_read(CSR_VSSTATUS); > csr->vsie = csr_read(CSR_VSIE); > diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c > new file mode 100644 > index 000000000000..68f194771794 > --- /dev/null > +++ b/arch/riscv/kvm/vcpu_vector.c > @@ -0,0 +1,177 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * Copyright (C) 2021 Western Digital Corporation or its affiliates. > + * Copyright (C) 2022 SiFive > + * > + * Authors: > + * Atish Patra <atish.patra@wdc.com> > + * Anup Patel <anup.patel@wdc.com> > + * Vincent Chen <vincent.chen@sifive.com> > + * Greentime Hu <greentime.hu@sifive.com> > + */ > + > +#include <linux/errno.h> > +#include <linux/err.h> > +#include <linux/kvm_host.h> > +#include <linux/uaccess.h> > +#include <asm/hwcap.h> > +#include <asm/kvm_vcpu_vector.h> > + > +#ifdef CONFIG_RISCV_ISA_V > +extern unsigned long riscv_v_vsize; I am assuming riscv_v_vsize is externed in some other header? If yes then drop the extern statement and include the header in this source file. > +void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu) > +{ > + unsigned long *isa = vcpu->arch.isa; > + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; > + > + cntx->sstatus &= ~SR_VS; > + if (riscv_isa_extension_available(isa, v)) { > + cntx->sstatus |= SR_VS_INITIAL; > + WARN_ON(!cntx->vector.datap); > + memset(cntx->vector.datap, 0, riscv_v_vsize); > + } else { > + cntx->sstatus |= SR_VS_OFF; > + } > +} > + > +static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx) > +{ > + cntx->sstatus &= ~SR_VS; > + cntx->sstatus |= SR_VS_CLEAN; > +} > + > +void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, > + unsigned long *isa) > +{ > + if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) { > + if (riscv_isa_extension_available(isa, v)) > + __kvm_riscv_vector_save(cntx); > + kvm_riscv_vcpu_vector_clean(cntx); > + } > +} > + > +void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, > + unsigned long *isa) > +{ > + if ((cntx->sstatus & SR_VS) != SR_VS_OFF) { > + if (riscv_isa_extension_available(isa, v)) > + __kvm_riscv_vector_restore(cntx); > + kvm_riscv_vcpu_vector_clean(cntx); > + } > +} > + > +void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) > +{ > + /* No need to check host sstatus as it can be modified outside */ > + if (riscv_isa_extension_available(NULL, v)) > + __kvm_riscv_vector_save(cntx); > +} > + > +void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) > +{ > + if (riscv_isa_extension_available(NULL, v)) > + __kvm_riscv_vector_restore(cntx); > +} > + > +void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) > +{ > + kfree(vcpu->arch.guest_reset_context.vector.datap); > + kfree(vcpu->arch.host_context.vector.datap); > +} > +#else > +#define riscv_v_vsize (0) > +#endif > + > +static void *kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu, > + unsigned long reg_num, > + size_t reg_size) > +{ > + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; > + void *reg_val; > + size_t vlenb = riscv_v_vsize / 32; > + > + if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) { > + if (reg_size != sizeof(unsigned long)) > + return NULL; > + switch (reg_num) { > + case KVM_REG_RISCV_VECTOR_CSR_REG(vstart): > + reg_val = &cntx->vector.vstart; > + break; > + case KVM_REG_RISCV_VECTOR_CSR_REG(vl): > + reg_val = &cntx->vector.vl; > + break; > + case KVM_REG_RISCV_VECTOR_CSR_REG(vtype): > + reg_val = &cntx->vector.vtype; > + break; > + case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr): > + reg_val = &cntx->vector.vcsr; > + break; > + case KVM_REG_RISCV_VECTOR_CSR_REG(datap): > + default: > + return NULL; > + } > + } else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) { > + if (reg_size != vlenb) > + return NULL; > + reg_val = cntx->vector.datap > + + (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb; > + } else { > + return NULL; > + } > + > + return reg_val; > +} > + > +int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, > + const struct kvm_one_reg *reg, > + unsigned long rtype) > +{ > + unsigned long *isa = vcpu->arch.isa; > + unsigned long __user *uaddr = > + (unsigned long __user *)(unsigned long)reg->addr; > + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | > + KVM_REG_SIZE_MASK | > + rtype); > + void *reg_val = NULL; > + size_t reg_size = KVM_REG_SIZE(reg->id); > + > + if (rtype == KVM_REG_RISCV_VECTOR && > + riscv_isa_extension_available(isa, v)) { > + reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size); > + } > + > + if (!reg_val) > + return -EINVAL; > + > + if (copy_to_user(uaddr, reg_val, reg_size)) > + return -EFAULT; > + > + return 0; > +} > + > +int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, > + const struct kvm_one_reg *reg, > + unsigned long rtype) > +{ > + unsigned long *isa = vcpu->arch.isa; > + unsigned long __user *uaddr = > + (unsigned long __user *)(unsigned long)reg->addr; > + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | > + KVM_REG_SIZE_MASK | > + rtype); > + void *reg_val = NULL; > + size_t reg_size = KVM_REG_SIZE(reg->id); > + > + if (rtype == KVM_REG_RISCV_VECTOR && > + riscv_isa_extension_available(isa, v)) { > + reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size); > + } > + > + if (!reg_val) > + return -EINVAL; > + > + if (copy_from_user(reg_val, uaddr, reg_size)) > + return -EFAULT; > + > + return 0; > +} > -- > 2.17.1 > Regards, Anup
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index cc7da66ee0c0..7e7e23272d32 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -14,6 +14,7 @@ #include <linux/kvm_types.h> #include <linux/spinlock.h> #include <asm/hwcap.h> +#include <asm/ptrace.h> #include <asm/kvm_vcpu_fp.h> #include <asm/kvm_vcpu_insn.h> #include <asm/kvm_vcpu_sbi.h> @@ -141,6 +142,7 @@ struct kvm_cpu_context { unsigned long sstatus; unsigned long hstatus; union __riscv_fp_state fp; + struct __riscv_v_ext_state vector; }; struct kvm_vcpu_csr { diff --git a/arch/riscv/include/asm/kvm_vcpu_vector.h b/arch/riscv/include/asm/kvm_vcpu_vector.h new file mode 100644 index 000000000000..a6dae7e2859d --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_vector.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 SiFive + * + * Authors: + * Atish Patra <atish.patra@wdc.com> + * Anup Patel <anup.patel@wdc.com> + * Vincent Chen <vincent.chen@sifive.com> + * Greentime Hu <greentime.hu@sifive.com> + */ + +#ifndef __KVM_VCPU_RISCV_VECTOR_H +#define __KVM_VCPU_RISCV_VECTOR_H + +#include <linux/types.h> + +#ifdef CONFIG_RISCV_ISA_V +#include <asm/vector.h> +#include <asm/kvm_host.h> + +static __always_inline void __kvm_riscv_vector_save(struct kvm_cpu_context *context) +{ + __riscv_v_vstate_save(&context->vector, context->vector.datap); +} + +static __always_inline void __kvm_riscv_vector_restore(struct kvm_cpu_context *context) +{ + __riscv_v_vstate_restore(&context->vector, context->vector.datap); +} + +void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, + unsigned long *isa); +void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, + unsigned long *isa); +void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx); +void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx); +void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu); +#else + +struct kvm_cpu_context; + +static inline void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu) +{ +} + +static inline void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, + unsigned long *isa) +{ +} + +static inline void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, + unsigned long *isa) +{ +} + +static inline void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) +{ +} + +static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) +{ +} + +static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) +{ +} +#endif + +int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype); +int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype); +#endif diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 3e3de7d486e1..b6d7f96d57ab 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -153,6 +153,13 @@ enum KVM_RISCV_ISA_EXT_ID { /* ISA Extension registers are mapped as type 7 */ #define KVM_REG_RISCV_ISA_EXT (0x07 << KVM_REG_RISCV_TYPE_SHIFT) +/* V extension registers are mapped as type 8 */ +#define KVM_REG_RISCV_VECTOR (0x08 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_VECTOR_CSR_REG(name) \ + (offsetof(struct __riscv_v_ext_state, name) / sizeof(unsigned long)) +#define KVM_REG_RISCV_VECTOR_REG(n) \ + ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long)) + #endif #endif /* __LINUX_KVM_RISCV_H */ diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 278e97c06e0a..f29854333cf2 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -17,6 +17,7 @@ kvm-y += mmu.o kvm-y += vcpu.o kvm-y += vcpu_exit.o kvm-y += vcpu_fp.o +kvm-y += vcpu_vector.o kvm-y += vcpu_insn.o kvm-y += vcpu_switch.o kvm-y += vcpu_sbi.o diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index a7ddb7cf813e..ffce2b8eef9a 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -22,6 +22,8 @@ #include <asm/cacheflush.h> #include <asm/hwcap.h> #include <asm/sbi.h> +#include <asm/vector.h> +#include <asm/kvm_vcpu_vector.h> const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { KVM_GENERIC_VCPU_STATS(), @@ -134,6 +136,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) kvm_riscv_vcpu_fp_reset(vcpu); + kvm_riscv_vcpu_vector_reset(vcpu); + kvm_riscv_vcpu_timer_reset(vcpu); WRITE_ONCE(vcpu->arch.irqs_pending, 0); @@ -191,6 +195,15 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) cntx->hstatus |= HSTATUS_SPVP; cntx->hstatus |= HSTATUS_SPV; + if (has_vector()) { + cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL); + if (!cntx->vector.datap) + return -ENOMEM; + vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); + if (!vcpu->arch.host_context.vector.datap) + return -ENOMEM; + } + /* By default, make CY, TM, and IR counters accessible in VU mode */ reset_csr->scounteren = 0x7; @@ -226,6 +239,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) /* Free unused pages pre-allocated for G-stage page table mappings */ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + + /* Free vector context space for host and guest kernel */ + kvm_riscv_vcpu_free_vector_context(vcpu); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) @@ -602,6 +618,9 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, KVM_REG_RISCV_FP_D); case KVM_REG_RISCV_ISA_EXT: return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); + case KVM_REG_RISCV_VECTOR: + return kvm_riscv_vcpu_set_reg_vector(vcpu, reg, + KVM_REG_RISCV_VECTOR); default: break; } @@ -629,6 +648,9 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, KVM_REG_RISCV_FP_D); case KVM_REG_RISCV_ISA_EXT: return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); + case KVM_REG_RISCV_VECTOR: + return kvm_riscv_vcpu_get_reg_vector(vcpu, reg, + KVM_REG_RISCV_VECTOR); default: break; } @@ -895,6 +917,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, vcpu->arch.isa); + kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context); + kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, + vcpu->arch.isa); vcpu->cpu = cpu; } @@ -910,6 +935,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); kvm_riscv_vcpu_timer_save(vcpu); + kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, + vcpu->arch.isa); + kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); + + csr_write(CSR_HGATP, 0); csr->vsstatus = csr_read(CSR_VSSTATUS); csr->vsie = csr_read(CSR_VSIE); diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c new file mode 100644 index 000000000000..68f194771794 --- /dev/null +++ b/arch/riscv/kvm/vcpu_vector.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 SiFive + * + * Authors: + * Atish Patra <atish.patra@wdc.com> + * Anup Patel <anup.patel@wdc.com> + * Vincent Chen <vincent.chen@sifive.com> + * Greentime Hu <greentime.hu@sifive.com> + */ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <linux/uaccess.h> +#include <asm/hwcap.h> +#include <asm/kvm_vcpu_vector.h> + +#ifdef CONFIG_RISCV_ISA_V +extern unsigned long riscv_v_vsize; +void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu) +{ + unsigned long *isa = vcpu->arch.isa; + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + + cntx->sstatus &= ~SR_VS; + if (riscv_isa_extension_available(isa, v)) { + cntx->sstatus |= SR_VS_INITIAL; + WARN_ON(!cntx->vector.datap); + memset(cntx->vector.datap, 0, riscv_v_vsize); + } else { + cntx->sstatus |= SR_VS_OFF; + } +} + +static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx) +{ + cntx->sstatus &= ~SR_VS; + cntx->sstatus |= SR_VS_CLEAN; +} + +void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, + unsigned long *isa) +{ + if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) { + if (riscv_isa_extension_available(isa, v)) + __kvm_riscv_vector_save(cntx); + kvm_riscv_vcpu_vector_clean(cntx); + } +} + +void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, + unsigned long *isa) +{ + if ((cntx->sstatus & SR_VS) != SR_VS_OFF) { + if (riscv_isa_extension_available(isa, v)) + __kvm_riscv_vector_restore(cntx); + kvm_riscv_vcpu_vector_clean(cntx); + } +} + +void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) +{ + /* No need to check host sstatus as it can be modified outside */ + if (riscv_isa_extension_available(NULL, v)) + __kvm_riscv_vector_save(cntx); +} + +void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) +{ + if (riscv_isa_extension_available(NULL, v)) + __kvm_riscv_vector_restore(cntx); +} + +void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) +{ + kfree(vcpu->arch.guest_reset_context.vector.datap); + kfree(vcpu->arch.host_context.vector.datap); +} +#else +#define riscv_v_vsize (0) +#endif + +static void *kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu, + unsigned long reg_num, + size_t reg_size) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + void *reg_val; + size_t vlenb = riscv_v_vsize / 32; + + if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) { + if (reg_size != sizeof(unsigned long)) + return NULL; + switch (reg_num) { + case KVM_REG_RISCV_VECTOR_CSR_REG(vstart): + reg_val = &cntx->vector.vstart; + break; + case KVM_REG_RISCV_VECTOR_CSR_REG(vl): + reg_val = &cntx->vector.vl; + break; + case KVM_REG_RISCV_VECTOR_CSR_REG(vtype): + reg_val = &cntx->vector.vtype; + break; + case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr): + reg_val = &cntx->vector.vcsr; + break; + case KVM_REG_RISCV_VECTOR_CSR_REG(datap): + default: + return NULL; + } + } else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) { + if (reg_size != vlenb) + return NULL; + reg_val = cntx->vector.datap + + (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb; + } else { + return NULL; + } + + return reg_val; +} + +int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype) +{ + unsigned long *isa = vcpu->arch.isa; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + rtype); + void *reg_val = NULL; + size_t reg_size = KVM_REG_SIZE(reg->id); + + if (rtype == KVM_REG_RISCV_VECTOR && + riscv_isa_extension_available(isa, v)) { + reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size); + } + + if (!reg_val) + return -EINVAL; + + if (copy_to_user(uaddr, reg_val, reg_size)) + return -EFAULT; + + return 0; +} + +int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype) +{ + unsigned long *isa = vcpu->arch.isa; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + rtype); + void *reg_val = NULL; + size_t reg_size = KVM_REG_SIZE(reg->id); + + if (rtype == KVM_REG_RISCV_VECTOR && + riscv_isa_extension_available(isa, v)) { + reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size); + } + + if (!reg_val) + return -EINVAL; + + if (copy_from_user(reg_val, uaddr, reg_size)) + return -EFAULT; + + return 0; +}