@@ -6,6 +6,7 @@
#include "capabilities.h"
#include "x86_ops.h"
#include "tdx.h"
+#include "x86.h"
#undef pr_fmt
#define pr_fmt(fmt) "tdx: " fmt
@@ -61,6 +62,11 @@ static __always_inline hpa_t set_hkid_to_hpa(hpa_t pa, u16 hkid)
return pa;
}
+static inline bool is_td_vcpu_created(struct vcpu_tdx *tdx)
+{
+ return tdx->tdvpr.added;
+}
+
static inline bool is_td_created(struct kvm_tdx *kvm_tdx)
{
return kvm_tdx->tdr.added;
@@ -392,6 +398,135 @@ int tdx_vm_init(struct kvm *kvm)
return ret;
}
+int tdx_vcpu_create(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ int ret, i;
+
+ /* TDX only supports x2APIC, which requires an in-kernel local APIC. */
+ if (!vcpu->arch.apic)
+ return -EINVAL;
+
+ fpstate_set_confidential(&vcpu->arch.guest_fpu);
+
+ ret = tdx_alloc_td_page(&tdx->tdvpr);
+ if (ret)
+ return ret;
+
+ tdx->tdvpx = kcalloc(tdx_caps.tdvpx_nr_pages, sizeof(*tdx->tdvpx),
+ GFP_KERNEL_ACCOUNT);
+ if (!tdx->tdvpx) {
+ ret = -ENOMEM;
+ goto free_tdvpr;
+ }
+ for (i = 0; i < tdx_caps.tdvpx_nr_pages; i++) {
+ ret = tdx_alloc_td_page(&tdx->tdvpx[i]);
+ if (ret)
+ goto free_tdvpx;
+ }
+
+ vcpu->arch.efer = EFER_SCE | EFER_LME | EFER_LMA | EFER_NX;
+
+ vcpu->arch.cr0_guest_owned_bits = -1ul;
+ vcpu->arch.cr4_guest_owned_bits = -1ul;
+
+ vcpu->arch.tsc_offset = to_kvm_tdx(vcpu->kvm)->tsc_offset;
+ vcpu->arch.l1_tsc_offset = vcpu->arch.tsc_offset;
+ vcpu->arch.guest_state_protected =
+ !(to_kvm_tdx(vcpu->kvm)->attributes & TDX_TD_ATTRIBUTE_DEBUG);
+
+ return 0;
+
+free_tdvpx:
+ /* @i points at the TDVPX page that failed allocation. */
+ for (--i; i >= 0; i--)
+ free_page(tdx->tdvpx[i].va);
+ kfree(tdx->tdvpx);
+free_tdvpr:
+ free_page(tdx->tdvpr.va);
+
+ return ret;
+}
+
+void tdx_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ int i;
+
+ /* Can't reclaim or free pages if teardown failed. */
+ if (is_hkid_assigned(to_kvm_tdx(vcpu->kvm)))
+ return;
+
+ for (i = 0; i < tdx_caps.tdvpx_nr_pages; i++)
+ tdx_reclaim_td_page(&tdx->tdvpx[i]);
+ kfree(tdx->tdvpx);
+ tdx_reclaim_td_page(&tdx->tdvpr);
+}
+
+void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+ struct msr_data apic_base_msr;
+ u64 err;
+ int i;
+
+ /* TDX doesn't support INIT event. */
+ if (WARN_ON(init_event))
+ goto td_bugged;
+ if (WARN_ON(is_td_vcpu_created(tdx)))
+ goto td_bugged;
+
+ err = tdh_vp_create(kvm_tdx->tdr.pa, tdx->tdvpr.pa);
+ if (WARN_ON_ONCE(err)) {
+ pr_tdx_error(TDH_VP_CREATE, err, NULL);
+ goto td_bugged;
+ }
+ tdx_mark_td_page_added(&tdx->tdvpr);
+
+ for (i = 0; i < tdx_caps.tdvpx_nr_pages; i++) {
+ err = tdh_vp_addcx(tdx->tdvpr.pa, tdx->tdvpx[i].pa);
+ if (WARN_ON_ONCE(err)) {
+ pr_tdx_error(TDH_VP_ADDCX, err, NULL);
+ goto td_bugged;
+ }
+ tdx_mark_td_page_added(&tdx->tdvpx[i]);
+ }
+
+ if (!vcpu->arch.cpuid_entries) {
+ /*
+ * On cpu creation, cpuid entry is blank. Forcibly enable
+ * X2APIC feature to allow X2APIC.
+ */
+ struct kvm_cpuid_entry2 *e;
+
+ e = kvmalloc_array(1, sizeof(*e), GFP_KERNEL_ACCOUNT);
+ *e = (struct kvm_cpuid_entry2) {
+ .function = 1, /* Features for X2APIC */
+ .index = 0,
+ .eax = 0,
+ .ebx = 0,
+ .ecx = 1ULL << 21, /* X2APIC */
+ .edx = 0,
+ };
+ vcpu->arch.cpuid_entries = e;
+ vcpu->arch.cpuid_nent = 1;
+ }
+ apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | LAPIC_MODE_X2APIC;
+ if (kvm_vcpu_is_reset_bsp(vcpu))
+ apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
+ apic_base_msr.host_initiated = true;
+ if (WARN_ON(kvm_set_apic_base(vcpu, &apic_base_msr)))
+ goto td_bugged;
+
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+
+ return;
+
+td_bugged:
+ vcpu->kvm->vm_bugged = true;
+}
+
int tdx_dev_ioctl(void __user *argp)
{
struct kvm_tdx_capabilities __user *user_caps;