@@ -32,11 +32,30 @@ struct tdx_sys_info_cmr {
u64 cmr_size[32];
};
+struct tdx_sys_info_td_ctrl {
+ u16 tdr_base_size;
+ u16 tdcs_base_size;
+ u16 tdvps_base_size;
+};
+
+struct tdx_sys_info_td_conf {
+ u64 attributes_fixed0;
+ u64 attributes_fixed1;
+ u64 xfam_fixed0;
+ u64 xfam_fixed1;
+ u16 num_cpuid_config;
+ u16 max_vcpus_per_td;
+ u64 cpuid_config_leaves[128];
+ u64 cpuid_config_values[128][2];
+};
+
struct tdx_sys_info {
struct tdx_sys_info_version version;
struct tdx_sys_info_features features;
struct tdx_sys_info_tdmr tdmr;
struct tdx_sys_info_cmr cmr;
+ struct tdx_sys_info_td_ctrl td_ctrl;
+ struct tdx_sys_info_td_conf td_conf;
};
#endif
@@ -76,6 +76,54 @@ static int get_tdx_sys_info_cmr(struct tdx_sys_info_cmr *sysinfo_cmr)
return ret;
}
+static int get_tdx_sys_info_td_ctrl(struct tdx_sys_info_td_ctrl *sysinfo_td_ctrl)
+{
+ int ret = 0;
+ u64 val;
+
+ if (!ret && !(ret = read_sys_metadata_field(0x9800000100000000, &val)))
+ sysinfo_td_ctrl->tdr_base_size = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9800000100000100, &val)))
+ sysinfo_td_ctrl->tdcs_base_size = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9800000100000200, &val)))
+ sysinfo_td_ctrl->tdvps_base_size = val;
+
+ return ret;
+}
+
+static int get_tdx_sys_info_td_conf(struct tdx_sys_info_td_conf *sysinfo_td_conf)
+{
+ int ret = 0;
+ u64 val;
+ int i, j;
+
+ if (!ret && !(ret = read_sys_metadata_field(0x1900000300000000, &val)))
+ sysinfo_td_conf->attributes_fixed0 = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x1900000300000001, &val)))
+ sysinfo_td_conf->attributes_fixed1 = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x1900000300000002, &val)))
+ sysinfo_td_conf->xfam_fixed0 = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x1900000300000003, &val)))
+ sysinfo_td_conf->xfam_fixed1 = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9900000100000004, &val)))
+ sysinfo_td_conf->num_cpuid_config = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9900000100000008, &val)))
+ sysinfo_td_conf->max_vcpus_per_td = val;
+ if (sysinfo_td_conf->num_cpuid_config > ARRAY_SIZE(sysinfo_td_conf->cpuid_config_leaves))
+ return -EINVAL;
+ for (i = 0; i < sysinfo_td_conf->num_cpuid_config; i++)
+ if (!ret && !(ret = read_sys_metadata_field(0x9900000300000400 + i, &val)))
+ sysinfo_td_conf->cpuid_config_leaves[i] = val;
+ if (sysinfo_td_conf->num_cpuid_config > ARRAY_SIZE(sysinfo_td_conf->cpuid_config_values))
+ return -EINVAL;
+ for (i = 0; i < sysinfo_td_conf->num_cpuid_config; i++)
+ for (j = 0; j < 2; j++)
+ if (!ret && !(ret = read_sys_metadata_field(0x9900000300000500 + i * 2 + j, &val)))
+ sysinfo_td_conf->cpuid_config_values[i][j] = val;
+
+ return ret;
+}
+
static int get_tdx_sys_info(struct tdx_sys_info *sysinfo)
{
int ret = 0;
@@ -84,6 +132,8 @@ static int get_tdx_sys_info(struct tdx_sys_info *sysinfo)
ret = ret ?: get_tdx_sys_info_features(&sysinfo->features);
ret = ret ?: get_tdx_sys_info_tdmr(&sysinfo->tdmr);
ret = ret ?: get_tdx_sys_info_cmr(&sysinfo->cmr);
+ ret = ret ?: get_tdx_sys_info_td_ctrl(&sysinfo->td_ctrl);
+ ret = ret ?: get_tdx_sys_info_td_conf(&sysinfo->td_conf);
return ret;
}
KVM needs two classes of global metadata to create and run TDX guests: - "TD Control Structures" - "TD Configurability" The first class contains the sizes of TDX guest per-VM and per-vCPU control structures. KVM will need to use them to allocate enough space for those control structures. The second class contains info which reports things like which features are configurable to TDX guests. KVM will need to use them to properly configure TDX guests. Read them for KVM TDX to use. Basically, the code change is auto-generated by adding below to the script in [1]: "td_ctrl": [ "TDR_BASE_SIZE", "TDCS_BASE_SIZE", "TDVPS_BASE_SIZE", ], "td_conf": [ "ATTRIBUTES_FIXED0", "ATTRIBUTES_FIXED1", "XFAM_FIXED0", "XFAM_FIXED1", "NUM_CPUID_CONFIG", "MAX_VCPUS_PER_TD", "CPUID_CONFIG_LEAVES", "CPUID_CONFIG_VALUES", ], .. and re-running the script: #python tdx_global_metadata.py global_metadata.json \ tdx_global_metadata.h tdx_global_metadata.c .. but unfortunately with some tweaks: The "Intel TDX Module v1.5.09 ABI Definitions" JSON files[2], which describe the TDX module ABI to the kernel, were expected to maintain backward compatibility. However, it turns out there are plans to change the JSON per module release. Specifically, the maximum number of CPUID_CONFIGs, i.e., CPUID_CONFIG_{LEAVES|VALUES} is one of the fields expected to change. This is obviously problematic for the kernel, and needs to be addressed by the TDX Module team. Negotiations on clarifying ABI boundary in the spec for future models are ongoing. In the meantime, the TDX module team has agreed to not increase this specific field beyond 128 entries without an opt in. So for now just tweak the JSON to change "Num Fields" from 32 to 128 and generate a fixed-size (128) array for CPUID_CONFIG_{LEAVES|VALUES}. Also, due to all those ABI breakages (and module bugs), be paranoid by generating additional checks to make sure NUM_CPUID_CONFIG will never exceed the array size of CPUID_CONFIG_{LEAVES|VALUES} to protect the kernel from the module breakages. With those checks, detecting a breakage will just result in module initialization failure. Link: https://lore.kernel.org/762a50133300710771337398284567b299a86f67.camel@intel.com/ [1] Link: https://cdrdv2.intel.com/v1/dl/getContent/795381 [2] Signed-off-by: Kai Huang <kai.huang@intel.com> --- v2 -> v2.1 - Bump array size for CPUID_CONFIGs to 128 - Add paranoid checks to protect against incorrect NUM_CPUID_CONFIG. - Update changelog accordingly. Note: this is based on kvm-coco-queue which has v7 of TDX host metadata series which has patches to read TDX module version and CMRs. It will have conflicts to resolve when rebasing to the v9 patches currently queued in tip/x86/tdx. uAPI breakout v2: - New patch --- arch/x86/include/asm/tdx_global_metadata.h | 19 ++++++++ arch/x86/virt/vmx/tdx/tdx_global_metadata.c | 50 +++++++++++++++++++++ 2 files changed, 69 insertions(+)