@@ -9,6 +9,11 @@
#include <asm/kvm_para.h>
#endif
+#ifdef CONFIG_KVM_INTEL_TDX
+#include <linux/kvm_types.h>
+#include "../kvm/vmx/tdx_arch.h"
+#endif
+
int main(void)
{
#ifdef CONFIG_PARAVIRT
@@ -25,6 +30,16 @@ int main(void)
BLANK();
#endif
+#ifdef CONFIG_KVM_INTEL_TDX
+ OFFSET(TDX_SEAM_rcx, tdx_ex_ret, rcx);
+ OFFSET(TDX_SEAM_rdx, tdx_ex_ret, rdx);
+ OFFSET(TDX_SEAM_r8, tdx_ex_ret, r8);
+ OFFSET(TDX_SEAM_r9, tdx_ex_ret, r9);
+ OFFSET(TDX_SEAM_r10, tdx_ex_ret, r10);
+ OFFSET(TDX_SEAM_r11, tdx_ex_ret, r11);
+ BLANK();
+#endif
+
#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
ENTRY(bx);
ENTRY(cx);
@@ -24,6 +24,7 @@ kvm-$(CONFIG_KVM_XEN) += xen.o
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
+kvm-intel-$(CONFIG_KVM_INTEL_TDX) += vmx/seamcall.o
kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
new file mode 100644
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* ASM helper to call SEAMCALL for P-SEAMLDR, TDX module */
+
+#include <linux/linkage.h>
+
+#include <asm/alternative.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.h>
+#include <asm/asm.h>
+
+#include "seamcall.h"
+
+/*
+ * __seamcall - helper function to invoke SEAMCALL to request service
+ * of TDX module for KVM.
+ *
+ * @op (RDI) SEAMCALL leaf ID
+ * @rcx (RSI) input 1 (optional based on leaf ID)
+ * @rdx (RDX) input 2 (optional based on leaf ID)
+ * @r8 (RCX) input 3 (optional based on leaf ID)
+ * @r9 (R8) input 4 (optional based on leaf ID)
+ * @r10 (R9) input 5 (optional based on leaf ID)
+ * @ex stack pointer to struct tdx_ex_ret. optional return value stored.
+ *
+ * @return RAX: completion code of P-SEAMLDR or TDX module
+ * 0 on success, non-0 on failure
+ * trapnumber on fault
+ */
+SYM_FUNC_START(__seamcall)
+ FRAME_BEGIN
+
+ /* shuffle registers from function call ABI to SEAMCALL ABI. */
+ movq %r9, %r10
+ movq %r8, %r9
+ movq %rcx, %r8
+ /* %rdx doesn't need shuffle. */
+ movq %rsi, %rcx
+ movq %rdi, %rax
+
+.Lseamcall:
+ seamcall
+ jmp .Lseamcall_ret
+.Lspurious_fault:
+ call kvm_spurious_fault
+.Lseamcall_ret:
+
+ movq (FRAME_OFFSET + 8)(%rsp), %rdi
+ testq %rdi, %rdi
+ jz 1f
+
+ /* If ex is non-NULL, store extra return values into it. */
+ movq %rcx, TDX_SEAM_rcx(%rdi)
+ movq %rdx, TDX_SEAM_rdx(%rdi)
+ movq %r8, TDX_SEAM_r8(%rdi)
+ movq %r9, TDX_SEAM_r9(%rdi)
+ movq %r10, TDX_SEAM_r10(%rdi)
+ movq %r11, TDX_SEAM_r11(%rdi)
+
+1:
+ FRAME_END
+ ret
+
+ _ASM_EXTABLE(.Lseamcall, .Lspurious_fault)
+SYM_FUNC_END(__seamcall)
new file mode 100644
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_VMX_SEAMCALL_H
+#define __KVM_VMX_SEAMCALL_H
+
+#ifdef __ASSEMBLY__
+
+#define seamcall .byte 0x66, 0x0f, 0x01, 0xcf
+
+#else
+
+#ifndef seamcall
+struct tdx_ex_ret;
+asmlinkage u64 __seamcall(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9, u64 r10,
+ struct tdx_ex_ret *ex);
+
+#define seamcall(op, rcx, rdx, r8, r9, r10, ex) \
+ __seamcall(SEAMCALL_##op, (rcx), (rdx), (r8), (r9), (r10), (ex))
+#endif
+
+static inline void __pr_seamcall_error(u64 op, const char *op_str,
+ u64 err, struct tdx_ex_ret *ex)
+{
+ pr_err_ratelimited("SEAMCALL[%s] failed on cpu %d: 0x%llx\n",
+ op_str, smp_processor_id(), (err));
+ if (ex)
+ pr_err_ratelimited(
+ "RCX 0x%llx, RDX 0x%llx, R8 0x%llx, R9 0x%llx, R10 0x%llx, R11 0x%llx\n",
+ (ex)->rcx, (ex)->rdx, (ex)->r8, (ex)->r9, (ex)->r10,
+ (ex)->r11);
+}
+
+#define pr_seamcall_error(op, err, ex) \
+ __pr_seamcall_error(SEAMCALL_##op, #op, (err), (ex))
+
+/* ex is a pointer to struct tdx_ex_ret or NULL. */
+#define TDX_ERR(err, op, ex) \
+({ \
+ u64 __ret_warn_on = WARN_ON_ONCE(err); \
+ \
+ if (unlikely(__ret_warn_on)) \
+ pr_seamcall_error(op, err, ex); \
+ __ret_warn_on; \
+})
+
+#endif
+
+#endif /* __KVM_VMX_SEAMCALL_H */