diff mbox series

[RFC,v2,08/69] KVM: TDX: add trace point before/after TDX SEAMCALLs

Message ID 28a0ae6b767260fcb410c6ddff7de84f4e13062c.1625186503.git.isaku.yamahata@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: X86: TDX support | expand

Commit Message

Isaku Yamahata July 2, 2021, 10:04 p.m. UTC
From: Isaku Yamahata <isaku.yamahata@intel.com>

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
 arch/x86/kvm/trace.h         | 80 ++++++++++++++++++++++++++++++
 arch/x86/kvm/vmx/seamcall.h  | 22 ++++++++-
 arch/x86/kvm/vmx/tdx_arch.h  | 47 ++++++++++++++++++
 arch/x86/kvm/vmx/tdx_errno.h | 96 ++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/x86.c           |  2 +
 5 files changed, 246 insertions(+), 1 deletion(-)

Comments

Paolo Bonzini July 6, 2021, 1:23 p.m. UTC | #1
On 03/07/21 00:04, isaku.yamahata@intel.com wrote:
> +	trace_kvm_tdx_seamcall_enter(smp_processor_id(), op,
> +				     rcx, rdx, r8, r9, r10);
> +	err = __seamcall(op, rcx, rdx, r8, r9, r10, ex);
> +	if (ex)
> +		trace_kvm_tdx_seamcall_exit(smp_processor_id(), op, err, ex->rcx,
> +					    ex->rdx, ex->r8, ex->r9, ex->r10,
> +					    ex->r11);
> +	else
> +		trace_kvm_tdx_seamcall_exit(smp_processor_id(), op, err,
> +					    0, 0, 0, 0, 0, 0);

Would it make sense to do the zeroing of ex directly in __seamcall in 
case there is an error?

Otherwise looks good.

Paolo
Sean Christopherson July 13, 2021, 7:33 p.m. UTC | #2
On Fri, Jul 02, 2021, isaku.yamahata@intel.com wrote:
> From: Isaku Yamahata <isaku.yamahata@intel.com>
> 
> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> ---
>  arch/x86/kvm/trace.h         | 80 ++++++++++++++++++++++++++++++
>  arch/x86/kvm/vmx/seamcall.h  | 22 ++++++++-
>  arch/x86/kvm/vmx/tdx_arch.h  | 47 ++++++++++++++++++
>  arch/x86/kvm/vmx/tdx_errno.h | 96 ++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/x86.c           |  2 +
>  5 files changed, 246 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
> index 4f839148948b..c3398d0de9a7 100644
> --- a/arch/x86/kvm/trace.h
> +++ b/arch/x86/kvm/trace.h
> @@ -8,6 +8,9 @@
>  #include <asm/clocksource.h>
>  #include <asm/pvclock-abi.h>
>  
> +#include "vmx/tdx_arch.h"
> +#include "vmx/tdx_errno.h"
> +
>  #undef TRACE_SYSTEM
>  #define TRACE_SYSTEM kvm
>  
> @@ -659,6 +662,83 @@ TRACE_EVENT(kvm_nested_vmexit_inject,
>  		  __entry->exit_int_info, __entry->exit_int_info_err)
>  );
>  
> +/*
> + * Tracepoint for the start of TDX SEAMCALLs.
> + */
> +TRACE_EVENT(kvm_tdx_seamcall_enter,

To avoid confusion, I think it makes sense to avoid "enter" and "exit".  E.g.
my first reaction was that the tracepoint was specific to TDENTER.  And under
the hood, SEAMCALL is technically an exit :-)

What about kvm_tdx_seamcall and kvm_tdx_seamret?  If the seamret usage is too
much of a stretch, kvm_tdx_seamcall_begin/end?

> +	TP_PROTO(int cpuid, __u64 op, __u64 rcx, __u64 rdx, __u64 r8,
> +		 __u64 r9, __u64 r10),
> +	TP_ARGS(cpuid, op, rcx, rdx, r8, r9, r10),

"cpuid" is potentially confusing without looking at the caller.  pcpu or pcpu_id
would be preferable.

> diff --git a/arch/x86/kvm/vmx/seamcall.h b/arch/x86/kvm/vmx/seamcall.h
> index a318940f62ed..2c83ab46eeac 100644
> --- a/arch/x86/kvm/vmx/seamcall.h
> +++ b/arch/x86/kvm/vmx/seamcall.h
> @@ -9,12 +9,32 @@
>  #else
>  
>  #ifndef seamcall
> +#include "trace.h"
> +
>  struct tdx_ex_ret;
>  asmlinkage u64 __seamcall(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9, u64 r10,
>  			  struct tdx_ex_ret *ex);
>  
> +static inline u64 _seamcall(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9, u64 r10,
> +			    struct tdx_ex_ret *ex)
> +{
> +	u64 err;
> +
> +	trace_kvm_tdx_seamcall_enter(smp_processor_id(), op,
> +				     rcx, rdx, r8, r9, r10);
> +	err = __seamcall(op, rcx, rdx, r8, r9, r10, ex);

What was the motivation behind switching from the macro magic[*] to a dedicated
asm subroutine?  The macros are gross, but IMO they yielded much more readable
code for the upper level helpers, which is what people will look at the vast
majority of time.  E.g.

  static inline u64 tdh_sys_lp_shutdown(void)
  {
  	return seamcall(TDH_SYS_LP_SHUTDOWN, 0, 0, 0, 0, 0, NULL);
  }

  static inline u64 tdh_mem_track(hpa_t tdr)
  {
  	return seamcall(TDH_MEM_TRACK, tdr, 0, 0, 0, 0, NULL);
  }

versus

  static inline u64 tdsysshutdownlp(void)
  {
  	seamcall_0(TDSYSSHUTDOWNLP);
  }

  static inline u64 tdtrack(hpa_t tdr)
  {
  	seamcall_1(TDTRACK, tdr);
  }


The new approach also generates very suboptimal code due to the need to shuffle
registers everywhere, e.g. gcc doesn't inline _seamcall because it's a whopping
200+ bytes.

[*] https://patchwork.kernel.org/project/kvm/patch/25f0d2c2f73c20309a1b578cc5fc15f4fd6b9a13.1605232743.git.isaku.yamahata@intel.com/

> +	if (ex)
> +		trace_kvm_tdx_seamcall_exit(smp_processor_id(), op, err, ex->rcx,

smp_processor_id() is not stable since this code runs with IRQs and preemption
enabled, e.g. if the task is preempted between the tracepoint and the actual
SEAMCALL then the tracepoint may be wrong.  There could also be weirdly "nested"
tracepoints since migrating the task will generate TDH_VP_FLUSH.

> +					    ex->rdx, ex->r8, ex->r9, ex->r10,
> +					    ex->r11);
> +	else
> +		trace_kvm_tdx_seamcall_exit(smp_processor_id(), op, err,
> +					    0, 0, 0, 0, 0, 0);
> +	return err;
> +}
> +
>  #define seamcall(op, rcx, rdx, r8, r9, r10, ex)				\
> -	__seamcall(SEAMCALL_##op, (rcx), (rdx), (r8), (r9), (r10), (ex))
> +	_seamcall(SEAMCALL_##op, (rcx), (rdx), (r8), (r9), (r10), (ex))
>  #endif
Sean Christopherson July 13, 2021, 7:53 p.m. UTC | #3
On Tue, Jul 06, 2021, Paolo Bonzini wrote:
> On 03/07/21 00:04, isaku.yamahata@intel.com wrote:
> > +	trace_kvm_tdx_seamcall_enter(smp_processor_id(), op,
> > +				     rcx, rdx, r8, r9, r10);
> > +	err = __seamcall(op, rcx, rdx, r8, r9, r10, ex);
> > +	if (ex)
> > +		trace_kvm_tdx_seamcall_exit(smp_processor_id(), op, err, ex->rcx,
> > +					    ex->rdx, ex->r8, ex->r9, ex->r10,
> > +					    ex->r11);
> > +	else
> > +		trace_kvm_tdx_seamcall_exit(smp_processor_id(), op, err,
> > +					    0, 0, 0, 0, 0, 0);
> 
> Would it make sense to do the zeroing of ex directly in __seamcall in case
> there is an error?

A better option would be to pass "ex" into the tracepoint.  tdx_arch.h is already
included by trace.h (though I'm not sure that's a good thing), and the cost of
checking ex against NULL over and over is a non-issue because it's buried in the
tracepoint, i.e. hidden behind a patch nop.  The below reduces the footprint of
_seamcall by 100+ bytes of code, presumably due to avoiding even more register
shuffling (I didn't look too closely).

That said, I'm not sure adding generic tracepoints is a good idea.  The flows
that truly benefit from tracepoints will likely want to provide more/different
information, e.g. the entry/exit flow already uses kvm_trace_entry/exit, and the
SEPT flows have dedicated tracepoints.  For flows like tdh_vp_flush(), which
might benefit from a tracepoint, they'll only provide the host PA of the TDVPR,
which is rather useless on its own.  It's probably possible to cross-reference
everything to understand what's going on, but it certainly won't be easy.

I can see the generic tracepoint being somewhat useful for debugging early
development and/or a new TDX module, but otherwise I think it will be mostly
overhead.  E.g. if a TDX failure pops up in production, enabling the tracepoint
might not even be viable.  And even for the cases where the tracepoint is useful,
I would be quite surprised if additional instrumentation wasn't needed to debug
non-trivial issues.


diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 58631124f08d..e2868f6d84f8 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -701,9 +701,8 @@ TRACE_EVENT(kvm_tdx_seamcall_enter,
  * Tracepoint for the end of TDX SEAMCALLs.
  */
 TRACE_EVENT(kvm_tdx_seamcall_exit,
-       TP_PROTO(int cpuid, __u64 op, __u64 err, __u64 rcx, __u64 rdx, __u64 r8,
-                __u64 r9, __u64 r10, __u64 r11),
-       TP_ARGS(cpuid, op, err, rcx, rdx, r8, r9, r10, r11),
+       TP_PROTO(int cpuid, __u64 op, __u64 err, struct tdx_ex_ret *ex),
+       TP_ARGS(cpuid, op, err, ex),

        TP_STRUCT__entry(
                __field(        int,            cpuid   )
@@ -721,12 +720,12 @@ TRACE_EVENT(kvm_tdx_seamcall_exit,
                __entry->cpuid                  = cpuid;
                __entry->op                     = op;
                __entry->err                    = err;
-               __entry->rcx                    = rcx;
-               __entry->rdx                    = rdx;
-               __entry->r8                     = r8;
-               __entry->r9                     = r9;
-               __entry->r10                    = r10;
-               __entry->r11                    = r11;
+               __entry->rcx                    = ex ? ex->rcx : 0;
+               __entry->rdx                    = ex ? ex->rdx : 0;
+               __entry->r8                     = ex ? ex->r8  : 0;
+               __entry->r9                     = ex ? ex->r9  : 0;
+               __entry->r10                    = ex ? ex->r10 : 0;
+               __entry->r11                    = ex ? ex->r11 : 0;
        ),

        TP_printk("cpu: %d op: %s err %s 0x%llx rcx: 0x%llx rdx: 0x%llx r8: 0x%llx r9: 0x%llx r10: 0x%llx r11: 0x%llx",
diff --git a/arch/x86/kvm/vmx/seamcall.h b/arch/x86/kvm/vmx/seamcall.h
index 85eeedc06a4f..b2067f7e6a9d 100644
--- a/arch/x86/kvm/vmx/seamcall.h
+++ b/arch/x86/kvm/vmx/seamcall.h
@@ -23,13 +23,8 @@ static inline u64 _seamcall(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9, u64 r10,
        trace_kvm_tdx_seamcall_enter(smp_processor_id(), op,
                                     rcx, rdx, r8, r9, r10);
        err = __seamcall(op, rcx, rdx, r8, r9, r10, ex);
-       if (ex)
-               trace_kvm_tdx_seamcall_exit(smp_processor_id(), op, err, ex->rcx,
-                                           ex->rdx, ex->r8, ex->r9, ex->r10,
-                                           ex->r11);
-       else
-               trace_kvm_tdx_seamcall_exit(smp_processor_id(), op, err,
-                                           0, 0, 0, 0, 0, 0);
+       trace_kvm_tdx_seamcall_exit(smp_processor_id(), op, err, ex);
+
        return err;
 }
diff mbox series

Patch

diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 4f839148948b..c3398d0de9a7 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -8,6 +8,9 @@ 
 #include <asm/clocksource.h>
 #include <asm/pvclock-abi.h>
 
+#include "vmx/tdx_arch.h"
+#include "vmx/tdx_errno.h"
+
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm
 
@@ -659,6 +662,83 @@  TRACE_EVENT(kvm_nested_vmexit_inject,
 		  __entry->exit_int_info, __entry->exit_int_info_err)
 );
 
+/*
+ * Tracepoint for the start of TDX SEAMCALLs.
+ */
+TRACE_EVENT(kvm_tdx_seamcall_enter,
+	TP_PROTO(int cpuid, __u64 op, __u64 rcx, __u64 rdx, __u64 r8,
+		 __u64 r9, __u64 r10),
+	TP_ARGS(cpuid, op, rcx, rdx, r8, r9, r10),
+
+	TP_STRUCT__entry(
+		__field(	int,		cpuid	)
+		__field(	__u64,		op	)
+		__field(	__u64,		rcx	)
+		__field(	__u64,		rdx	)
+		__field(	__u64,		r8	)
+		__field(	__u64,		r9	)
+		__field(	__u64,		r10	)
+	),
+
+	TP_fast_assign(
+		__entry->cpuid			= cpuid;
+		__entry->op			= op;
+		__entry->rcx			= rcx;
+		__entry->rdx			= rdx;
+		__entry->r8			= r8;
+		__entry->r9			= r9;
+		__entry->r10			= r10;
+	),
+
+	TP_printk("cpu: %d op: %s rcx: 0x%llx rdx: 0x%llx r8: 0x%llx r9: 0x%llx r10: 0x%llx",
+		  __entry->cpuid,
+		  __print_symbolic(__entry->op, TDX_SEAMCALL_OP_CODES),
+		  __entry->rcx, __entry->rdx, __entry->r8,
+		  __entry->r9, __entry->r10)
+);
+
+/*
+ * Tracepoint for the end of TDX SEAMCALLs.
+ */
+TRACE_EVENT(kvm_tdx_seamcall_exit,
+	TP_PROTO(int cpuid, __u64 op, __u64 err, __u64 rcx, __u64 rdx, __u64 r8,
+		 __u64 r9, __u64 r10, __u64 r11),
+	TP_ARGS(cpuid, op, err, rcx, rdx, r8, r9, r10, r11),
+
+	TP_STRUCT__entry(
+		__field(	int,		cpuid	)
+		__field(	__u64,		op	)
+		__field(	__u64,		err	)
+		__field(	__u64,		rcx	)
+		__field(	__u64,		rdx	)
+		__field(	__u64,		r8	)
+		__field(	__u64,		r9	)
+		__field(	__u64,		r10	)
+		__field(	__u64,		r11	)
+	),
+
+	TP_fast_assign(
+		__entry->cpuid			= cpuid;
+		__entry->op			= op;
+		__entry->err			= err;
+		__entry->rcx			= rcx;
+		__entry->rdx			= rdx;
+		__entry->r8			= r8;
+		__entry->r9			= r9;
+		__entry->r10			= r10;
+		__entry->r11			= r11;
+	),
+
+	TP_printk("cpu: %d op: %s err %s 0x%llx rcx: 0x%llx rdx: 0x%llx r8: 0x%llx r9: 0x%llx r10: 0x%llx r11: 0x%llx",
+		  __entry->cpuid,
+		  __print_symbolic(__entry->op, TDX_SEAMCALL_OP_CODES),
+		  __print_symbolic(__entry->err & TDX_SEAMCALL_STATUS_MASK,
+				   TDX_SEAMCALL_STATUS_CODES),
+		  __entry->err,
+		  __entry->rcx, __entry->rdx, __entry->r8,
+		  __entry->r9, __entry->r10, __entry->r11)
+);
+
 /*
  * Tracepoint for nested #vmexit because of interrupt pending
  */
diff --git a/arch/x86/kvm/vmx/seamcall.h b/arch/x86/kvm/vmx/seamcall.h
index a318940f62ed..2c83ab46eeac 100644
--- a/arch/x86/kvm/vmx/seamcall.h
+++ b/arch/x86/kvm/vmx/seamcall.h
@@ -9,12 +9,32 @@ 
 #else
 
 #ifndef seamcall
+#include "trace.h"
+
 struct tdx_ex_ret;
 asmlinkage u64 __seamcall(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9, u64 r10,
 			  struct tdx_ex_ret *ex);
 
+static inline u64 _seamcall(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9, u64 r10,
+			    struct tdx_ex_ret *ex)
+{
+	u64 err;
+
+	trace_kvm_tdx_seamcall_enter(smp_processor_id(), op,
+				     rcx, rdx, r8, r9, r10);
+	err = __seamcall(op, rcx, rdx, r8, r9, r10, ex);
+	if (ex)
+		trace_kvm_tdx_seamcall_exit(smp_processor_id(), op, err, ex->rcx,
+					    ex->rdx, ex->r8, ex->r9, ex->r10,
+					    ex->r11);
+	else
+		trace_kvm_tdx_seamcall_exit(smp_processor_id(), op, err,
+					    0, 0, 0, 0, 0, 0);
+	return err;
+}
+
 #define seamcall(op, rcx, rdx, r8, r9, r10, ex)				\
-	__seamcall(SEAMCALL_##op, (rcx), (rdx), (r8), (r9), (r10), (ex))
+	_seamcall(SEAMCALL_##op, (rcx), (rdx), (r8), (r9), (r10), (ex))
 #endif
 
 static inline void __pr_seamcall_error(u64 op, const char *op_str,
diff --git a/arch/x86/kvm/vmx/tdx_arch.h b/arch/x86/kvm/vmx/tdx_arch.h
index 57e9ea4a7fad..559a63290c4d 100644
--- a/arch/x86/kvm/vmx/tdx_arch.h
+++ b/arch/x86/kvm/vmx/tdx_arch.h
@@ -51,6 +51,53 @@ 
 #define SEAMCALL_TDH_SYS_LP_SHUTDOWN		44
 #define SEAMCALL_TDH_SYS_CONFIG			45
 
+#define TDX_BUILD_OP_CODE(name)	{ SEAMCALL_ ## name, #name }
+
+#define TDX_SEAMCALL_OP_CODES				\
+	TDX_BUILD_OP_CODE(TDH_VP_ENTER),		\
+	TDX_BUILD_OP_CODE(TDH_MNG_ADDCX),		\
+	TDX_BUILD_OP_CODE(TDH_MEM_PAGE_ADD),		\
+	TDX_BUILD_OP_CODE(TDH_MEM_SEPT_ADD),		\
+	TDX_BUILD_OP_CODE(TDH_VP_ADDCX),		\
+	TDX_BUILD_OP_CODE(TDH_MEM_PAGE_AUG),		\
+	TDX_BUILD_OP_CODE(TDH_MEM_RANGE_BLOCK),		\
+	TDX_BUILD_OP_CODE(TDH_MNG_KEY_CONFIG),		\
+	TDX_BUILD_OP_CODE(TDH_MNG_CREATE),		\
+	TDX_BUILD_OP_CODE(TDH_VP_CREATE),		\
+	TDX_BUILD_OP_CODE(TDH_MNG_RD),			\
+	TDX_BUILD_OP_CODE(TDH_PHYMEM_PAGE_RD),		\
+	TDX_BUILD_OP_CODE(TDH_MNG_WR),			\
+	TDX_BUILD_OP_CODE(TDH_PHYMEM_PAGE_WR),		\
+	TDX_BUILD_OP_CODE(TDH_MEM_PAGE_DEMOTE),		\
+	TDX_BUILD_OP_CODE(TDH_MR_EXTEND),		\
+	TDX_BUILD_OP_CODE(TDH_MR_FINALIZE),		\
+	TDX_BUILD_OP_CODE(TDH_VP_FLUSH),		\
+	TDX_BUILD_OP_CODE(TDH_MNG_VPFLUSHDONE),		\
+	TDX_BUILD_OP_CODE(TDH_MNG_KEY_FREEID),		\
+	TDX_BUILD_OP_CODE(TDH_MNG_INIT),		\
+	TDX_BUILD_OP_CODE(TDH_VP_INIT),			\
+	TDX_BUILD_OP_CODE(TDH_MEM_PAGE_PROMOTE),	\
+	TDX_BUILD_OP_CODE(TDH_PHYMEM_PAGE_RDMD),	\
+	TDX_BUILD_OP_CODE(TDH_MEM_SEPT_RD),		\
+	TDX_BUILD_OP_CODE(TDH_VP_RD),			\
+	TDX_BUILD_OP_CODE(TDH_MNG_KEY_RECLAIMID),	\
+	TDX_BUILD_OP_CODE(TDH_PHYMEM_PAGE_RECLAIM),	\
+	TDX_BUILD_OP_CODE(TDH_MEM_PAGE_REMOVE),		\
+	TDX_BUILD_OP_CODE(TDH_MEM_SEPT_REMOVE),		\
+	TDX_BUILD_OP_CODE(TDH_SYS_KEY_CONFIG),		\
+	TDX_BUILD_OP_CODE(TDH_SYS_INFO),		\
+	TDX_BUILD_OP_CODE(TDH_SYS_INIT),		\
+	TDX_BUILD_OP_CODE(TDH_SYS_LP_INIT),		\
+	TDX_BUILD_OP_CODE(TDH_SYS_TDMR_INIT),		\
+	TDX_BUILD_OP_CODE(TDH_MEM_TRACK),		\
+	TDX_BUILD_OP_CODE(TDH_MEM_RANGE_UNBLOCK),	\
+	TDX_BUILD_OP_CODE(TDH_PHYMEM_CACHE_WB),		\
+	TDX_BUILD_OP_CODE(TDH_PHYMEM_PAGE_WBINVD),	\
+	TDX_BUILD_OP_CODE(TDH_MEM_SEPT_WR),		\
+	TDX_BUILD_OP_CODE(TDH_VP_WR),			\
+	TDX_BUILD_OP_CODE(TDH_SYS_LP_SHUTDOWN),		\
+	TDX_BUILD_OP_CODE(TDH_SYS_CONFIG)
+
 #define TDG_VP_VMCALL_GET_TD_VM_CALL_INFO		0x10000
 #define TDG_VP_VMCALL_MAP_GPA				0x10001
 #define TDG_VP_VMCALL_GET_QUOTE				0x10002
diff --git a/arch/x86/kvm/vmx/tdx_errno.h b/arch/x86/kvm/vmx/tdx_errno.h
index 675acea412c9..90ee2b5364d6 100644
--- a/arch/x86/kvm/vmx/tdx_errno.h
+++ b/arch/x86/kvm/vmx/tdx_errno.h
@@ -2,6 +2,8 @@ 
 #ifndef __KVM_X86_TDX_ERRNO_H
 #define __KVM_X86_TDX_ERRNO_H
 
+#define TDX_SEAMCALL_STATUS_MASK		0xFFFFFFFF00000000
+
 /*
  * TDX SEAMCALL Status Codes (returned in RAX)
  */
@@ -96,6 +98,100 @@ 
 #define TDX_PAGE_ALREADY_ACCEPTED		0x00000B0A00000000
 #define TDX_PAGE_SIZE_MISMATCH			0xC0000B0B00000000
 
+#define TDX_BUILD_STATUS_CODE(name)	{ name, #name }
+
+#define TDX_SEAMCALL_STATUS_CODES					\
+	TDX_BUILD_STATUS_CODE(TDX_SUCCESS),				\
+	TDX_BUILD_STATUS_CODE(TDX_NON_RECOVERABLE_VCPU),		\
+	TDX_BUILD_STATUS_CODE(TDX_NON_RECOVERABLE_TD),			\
+	TDX_BUILD_STATUS_CODE(TDX_INTERRUPTED_RESUMABLE),		\
+	TDX_BUILD_STATUS_CODE(TDX_INTERRUPTED_RESTARTABLE),		\
+	TDX_BUILD_STATUS_CODE(TDX_NON_RECOVERABLE_TD_FATAL),		\
+	TDX_BUILD_STATUS_CODE(TDX_INVALID_RESUMPTION),			\
+	TDX_BUILD_STATUS_CODE(TDX_NON_RECOVERABLE_TD_NO_APIC),		\
+	TDX_BUILD_STATUS_CODE(TDX_OPERAND_INVALID),			\
+	TDX_BUILD_STATUS_CODE(TDX_OPERAND_ADDR_RANGE_ERROR),		\
+	TDX_BUILD_STATUS_CODE(TDX_OPERAND_BUSY),			\
+	TDX_BUILD_STATUS_CODE(TDX_PREVIOUS_TLB_EPOCH_BUSY),		\
+	TDX_BUILD_STATUS_CODE(TDX_SYS_BUSY),				\
+	TDX_BUILD_STATUS_CODE(TDX_PAGE_METADATA_INCORRECT),		\
+	TDX_BUILD_STATUS_CODE(TDX_PAGE_ALREADY_FREE),			\
+	TDX_BUILD_STATUS_CODE(TDX_PAGE_NOT_OWNED_BY_TD),		\
+	TDX_BUILD_STATUS_CODE(TDX_PAGE_NOT_FREE),			\
+	TDX_BUILD_STATUS_CODE(TDX_TD_ASSOCIATED_PAGES_EXIST),		\
+	TDX_BUILD_STATUS_CODE(TDX_SYSINIT_NOT_PENDING),			\
+	TDX_BUILD_STATUS_CODE(TDX_SYSINIT_NOT_DONE),			\
+	TDX_BUILD_STATUS_CODE(TDX_SYSINITLP_NOT_DONE),			\
+	TDX_BUILD_STATUS_CODE(TDX_SYSINITLP_DONE),			\
+	TDX_BUILD_STATUS_CODE(TDX_SYS_NOT_READY),			\
+	TDX_BUILD_STATUS_CODE(TDX_SYS_SHUTDOWN),			\
+	TDX_BUILD_STATUS_CODE(TDX_SYSCONFIG_NOT_DONE),			\
+	TDX_BUILD_STATUS_CODE(TDX_TD_NOT_INITIALIZED),			\
+	TDX_BUILD_STATUS_CODE(TDX_TD_INITIALIZED),			\
+	TDX_BUILD_STATUS_CODE(TDX_TD_NOT_FINALIZED),			\
+	TDX_BUILD_STATUS_CODE(TDX_TD_FINALIZED),			\
+	TDX_BUILD_STATUS_CODE(TDX_TD_FATAL),				\
+	TDX_BUILD_STATUS_CODE(TDX_TD_NON_DEBUG),			\
+	TDX_BUILD_STATUS_CODE(TDX_TDCX_NUM_INCORRECT),			\
+	TDX_BUILD_STATUS_CODE(TDX_VCPU_STATE_INCORRECT),		\
+	TDX_BUILD_STATUS_CODE(TDX_VCPU_ASSOCIATED),			\
+	TDX_BUILD_STATUS_CODE(TDX_VCPU_NOT_ASSOCIATED),			\
+	TDX_BUILD_STATUS_CODE(TDX_TDVPX_NUM_INCORRECT),			\
+	TDX_BUILD_STATUS_CODE(TDX_NO_VALID_VE_INFO),			\
+	TDX_BUILD_STATUS_CODE(TDX_MAX_VCPUS_EXCEEDED),			\
+	TDX_BUILD_STATUS_CODE(TDX_TSC_ROLLBACK),			\
+	TDX_BUILD_STATUS_CODE(TDX_FIELD_NOT_WRITABLE),			\
+	TDX_BUILD_STATUS_CODE(TDX_FIELD_NOT_READABLE),			\
+	TDX_BUILD_STATUS_CODE(TDX_TD_VMCS_FIELD_NOT_INITIALIZED),	\
+	TDX_BUILD_STATUS_CODE(TDX_KEY_GENERATION_FAILED),		\
+	TDX_BUILD_STATUS_CODE(TDX_TD_KEYS_NOT_CONFIGURED),		\
+	TDX_BUILD_STATUS_CODE(TDX_KEY_STATE_INCORRECT),			\
+	TDX_BUILD_STATUS_CODE(TDX_KEY_CONFIGURED),			\
+	TDX_BUILD_STATUS_CODE(TDX_WBCACHE_NOT_COMPLETE),		\
+	TDX_BUILD_STATUS_CODE(TDX_HKID_NOT_FREE),			\
+	TDX_BUILD_STATUS_CODE(TDX_NO_HKID_READY_TO_WBCACHE),		\
+	TDX_BUILD_STATUS_CODE(TDX_WBCACHE_RESUME_ERROR),		\
+	TDX_BUILD_STATUS_CODE(TDX_FLUSHVP_NOT_DONE),			\
+	TDX_BUILD_STATUS_CODE(TDX_NUM_ACTIVATED_HKIDS_NOT_SUPPORTED),	\
+	TDX_BUILD_STATUS_CODE(TDX_INCORRECT_CPUID_VALUE),		\
+	TDX_BUILD_STATUS_CODE(TDX_BOOT_NT4_SET),			\
+	TDX_BUILD_STATUS_CODE(TDX_INCONSISTENT_CPUID_FIELD),		\
+	TDX_BUILD_STATUS_CODE(TDX_CPUID_LEAF_1F_FORMAT_UNRECOGNIZED),	\
+	TDX_BUILD_STATUS_CODE(TDX_INVALID_WBINVD_SCOPE),		\
+	TDX_BUILD_STATUS_CODE(TDX_INVALID_PKG_ID),			\
+	TDX_BUILD_STATUS_CODE(TDX_CPUID_LEAF_NOT_SUPPORTED),		\
+	TDX_BUILD_STATUS_CODE(TDX_SMRR_NOT_LOCKED),			\
+	TDX_BUILD_STATUS_CODE(TDX_INVALID_SMRR_CONFIGURATION),		\
+	TDX_BUILD_STATUS_CODE(TDX_SMRR_OVERLAPS_CMR),			\
+	TDX_BUILD_STATUS_CODE(TDX_SMRR_LOCK_NOT_SUPPORTED),		\
+	TDX_BUILD_STATUS_CODE(TDX_SMRR_NOT_SUPPORTED),			\
+	TDX_BUILD_STATUS_CODE(TDX_INCONSISTENT_MSR),			\
+	TDX_BUILD_STATUS_CODE(TDX_INCORRECT_MSR_VALUE),			\
+	TDX_BUILD_STATUS_CODE(TDX_SEAMREPORT_NOT_AVAILABLE),		\
+	TDX_BUILD_STATUS_CODE(TDX_PERF_COUNTERS_ARE_PEBS_ENABLED),	\
+	TDX_BUILD_STATUS_CODE(TDX_INVALID_TDMR),			\
+	TDX_BUILD_STATUS_CODE(TDX_NON_ORDERED_TDMR),			\
+	TDX_BUILD_STATUS_CODE(TDX_TDMR_OUTSIDE_CMRS),			\
+	TDX_BUILD_STATUS_CODE(TDX_TDMR_ALREADY_INITIALIZED),		\
+	TDX_BUILD_STATUS_CODE(TDX_INVALID_PAMT),			\
+	TDX_BUILD_STATUS_CODE(TDX_PAMT_OUTSIDE_CMRS),			\
+	TDX_BUILD_STATUS_CODE(TDX_PAMT_OVERLAP),			\
+	TDX_BUILD_STATUS_CODE(TDX_INVALID_RESERVED_IN_TDMR),		\
+	TDX_BUILD_STATUS_CODE(TDX_NON_ORDERED_RESERVED_IN_TDMR),	\
+	TDX_BUILD_STATUS_CODE(TDX_CMR_LIST_INVALID),			\
+	TDX_BUILD_STATUS_CODE(TDX_EPT_WALK_FAILED),			\
+	TDX_BUILD_STATUS_CODE(TDX_EPT_ENTRY_FREE),			\
+	TDX_BUILD_STATUS_CODE(TDX_EPT_ENTRY_NOT_FREE),			\
+	TDX_BUILD_STATUS_CODE(TDX_EPT_ENTRY_NOT_PRESENT),		\
+	TDX_BUILD_STATUS_CODE(TDX_EPT_ENTRY_NOT_LEAF),			\
+	TDX_BUILD_STATUS_CODE(TDX_EPT_ENTRY_LEAF),			\
+	TDX_BUILD_STATUS_CODE(TDX_GPA_RANGE_NOT_BLOCKED),		\
+	TDX_BUILD_STATUS_CODE(TDX_GPA_RANGE_ALREADY_BLOCKED),		\
+	TDX_BUILD_STATUS_CODE(TDX_TLB_TRACKING_NOT_DONE),		\
+	TDX_BUILD_STATUS_CODE(TDX_EPT_INVALID_PROMOTE_CONDITIONS),	\
+	TDX_BUILD_STATUS_CODE(TDX_PAGE_ALREADY_ACCEPTED),		\
+	TDX_BUILD_STATUS_CODE(TDX_PAGE_SIZE_MISMATCH)
+
 /*
  * TDG.VP.VMCALL Status Codes (returned in R10)
  */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e0f4a46649d7..d11cf87674f3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -11970,6 +11970,8 @@  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_tdx_seamcall_enter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_tdx_seamcall_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);