@@ -241,4 +241,6 @@ bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu);
+
#endif /* __RISCV_KVM_HOST_H__ */
@@ -9,6 +9,6 @@ ccflags-y := -Ivirt/kvm -Iarch/riscv/kvm
kvm-objs := $(common-objs-y)
kvm-objs += main.o vm.o vmid.o tlb.o mmu.o
-kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o
+kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o vcpu_sbi.o
obj-$(CONFIG_KVM) += kvm.o
@@ -534,6 +534,9 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
(vcpu->arch.guest_context.hstatus & HSTATUS_STL))
ret = stage2_page_fault(vcpu, run, scause, stval);
break;
+ case EXC_SUPERVISOR_SYSCALL:
+ if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
+ ret = kvm_riscv_vcpu_sbi_ecall(vcpu);
default:
break;
};
new file mode 100644
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/csr.h>
+#include <asm/kvm_vcpu_timer.h>
+
+#define SBI_VERSION_MAJOR 0
+#define SBI_VERSION_MINOR 1
+
+/* TODO: Handle traps due to unpriv load and redirect it back to VS-mode */
+static unsigned long kvm_sbi_unpriv_load(const unsigned long *addr,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned long flags, val;
+ unsigned long __hstatus, __sstatus;
+
+ local_irq_save(flags);
+ __hstatus = csr_read(CSR_HSTATUS);
+ __sstatus = csr_read(CSR_SSTATUS);
+ csr_write(CSR_HSTATUS, vcpu->arch.guest_context.hstatus | HSTATUS_SPRV);
+ csr_write(CSR_SSTATUS, vcpu->arch.guest_context.sstatus);
+ val = *addr;
+ csr_write(CSR_HSTATUS, __hstatus);
+ csr_write(CSR_SSTATUS, __sstatus);
+ local_irq_restore(flags);
+
+ return val;
+}
+
+static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, u32 type)
+{
+ int i;
+ struct kvm_vcpu *tmp;
+
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm)
+ tmp->arch.power_off = true;
+ kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+
+ memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
+ vcpu->run->system_event.type = type;
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+}
+
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu)
+{
+ int ret = 1;
+ u64 next_cycle;
+ int vcpuid;
+ struct kvm_vcpu *remote_vcpu;
+ ulong dhart_mask;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+
+ if (!cp)
+ return -EINVAL;
+ switch (cp->a7) {
+ case SBI_SET_TIMER:
+#if __riscv_xlen == 32
+ next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
+#else
+ next_cycle = (u64)cp->a0;
+#endif
+ kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
+ break;
+ case SBI_CONSOLE_PUTCHAR:
+ /* Not implemented */
+ cp->a0 = -ENOTSUPP;
+ break;
+ case SBI_CONSOLE_GETCHAR:
+ /* Not implemented */
+ cp->a0 = -ENOTSUPP;
+ break;
+ case SBI_CLEAR_IPI:
+ kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_S_SOFT);
+ break;
+ case SBI_SEND_IPI:
+ dhart_mask = kvm_sbi_unpriv_load((unsigned long *)cp->a0, vcpu);
+ for_each_set_bit(vcpuid, &dhart_mask, BITS_PER_LONG) {
+ remote_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, vcpuid);
+ kvm_riscv_vcpu_set_interrupt(remote_vcpu, IRQ_S_SOFT);
+ }
+ break;
+ case SBI_SHUTDOWN:
+ kvm_sbi_system_shutdown(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
+ ret = 0;
+ break;
+ case SBI_REMOTE_FENCE_I:
+ sbi_remote_fence_i(NULL);
+ break;
+ /*
+ * TODO: There should be a way to call remote hfence.bvma.
+ * Preferred method is now a SBI call. Until then, just flush
+ * all tlbs.
+ */
+ case SBI_REMOTE_SFENCE_VMA:
+ /*TODO: Parse vma range.*/
+ sbi_remote_sfence_vma(NULL, 0, 0);
+ break;
+ case SBI_REMOTE_SFENCE_VMA_ASID:
+ /*TODO: Parse vma range for given ASID */
+ sbi_remote_sfence_vma(NULL, 0, 0);
+ break;
+ default:
+ cp->a0 = ENOTSUPP;
+ break;
+ };
+
+ if (ret >= 0)
+ cp->sepc += 4;
+
+ return ret;
+}