@@ -88,6 +88,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pvlock;
#ifdef CONFIG_RISCV_PMU_SBI
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
@@ -35,6 +35,7 @@ enum sbi_ext_id {
SBI_EXT_DBCN = 0x4442434E,
SBI_EXT_STA = 0x535441,
SBI_EXT_NACL = 0x4E41434C,
+ SBI_EXT_PVLOCK = 0xAB0401,
/* Experimentals extensions must lie within this range */
SBI_EXT_EXPERIMENTAL_START = 0x08000000,
@@ -401,6 +402,10 @@ enum sbi_ext_nacl_feature {
#define SBI_NACL_SHMEM_SRET_X(__i) ((__riscv_xlen / 8) * (__i))
#define SBI_NACL_SHMEM_SRET_X_LAST 31
+enum sbi_ext_pvlock_fid {
+ SBI_EXT_PVLOCK_KICK_CPU = 0,
+};
+
/* SBI spec version fields */
#define SBI_SPEC_VERSION_DEFAULT 0x1
#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
@@ -198,6 +198,7 @@ enum KVM_RISCV_SBI_EXT_ID {
KVM_RISCV_SBI_EXT_VENDOR,
KVM_RISCV_SBI_EXT_DBCN,
KVM_RISCV_SBI_EXT_STA,
+ KVM_RISCV_SBI_EXT_PVLOCK,
KVM_RISCV_SBI_EXT_MAX,
};
@@ -32,6 +32,7 @@ kvm-y += vcpu_sbi_replace.o
kvm-y += vcpu_sbi_sta.o
kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
kvm-y += vcpu_switch.o
+kvm-y += vcpu_sbi_pvlock.o
kvm-y += vcpu_timer.o
kvm-y += vcpu_vector.o
kvm-y += vm.o
@@ -82,6 +82,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
.ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
.ext_ptr = &vcpu_sbi_ext_vendor,
},
+ {
+ .ext_idx = KVM_RISCV_SBI_EXT_PVLOCK,
+ .ext_ptr = &vcpu_sbi_ext_pvlock,
+ },
};
static const struct kvm_riscv_sbi_extension_entry *
new file mode 100644
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c), 2024 Alibaba Cloud
+ *
+ * Authors:
+ * Guo Ren <guoren@linux.alibaba.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/sbi.h>
+#include <asm/kvm_vcpu_sbi.h>
+
+static int kvm_sbi_ext_pvlock_kick_cpu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *target;
+
+ target = kvm_get_vcpu_by_id(kvm, cp->a0);
+ if (!target)
+ return SBI_ERR_INVALID_PARAM;
+
+ kvm_vcpu_kick(target);
+
+ if (READ_ONCE(target->ready))
+ kvm_vcpu_yield_to(target);
+
+ return SBI_SUCCESS;
+}
+
+static int kvm_sbi_ext_pvlock_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_vcpu_sbi_return *retdata)
+{
+ int ret = 0;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ unsigned long funcid = cp->a6;
+
+ switch (funcid) {
+ case SBI_EXT_PVLOCK_KICK_CPU:
+ ret = kvm_sbi_ext_pvlock_kick_cpu(vcpu);
+ break;
+ default:
+ ret = SBI_ERR_NOT_SUPPORTED;
+ }
+
+ retdata->err_val = ret;
+
+ return 0;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pvlock = {
+ .extid_start = SBI_EXT_PVLOCK,
+ .extid_end = SBI_EXT_PVLOCK,
+ .handler = kvm_sbi_ext_pvlock_handler,
+};