diff mbox series

[v14,08/13] KVM: VMX: Add a synthetic MSR to allow userspace VMM to access GUEST_SSP

Message ID 20201106011637.14289-9-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Introduce support for guest CET feature | expand

Commit Message

Yang, Weijiang Nov. 6, 2020, 1:16 a.m. UTC
Introduce a host-only synthetic MSR, MSR_KVM_GUEST_SSP so that the VMM
can read/write the guest's SSP, e.g. to migrate CET state.  Use a
synthetic MSR, e.g. as opposed to a VCPU_REG_, as GUEST_SSP is subject
to the same consistency checks as the PL*_SSP MSRs, i.e. can share code.

Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/include/uapi/asm/kvm_para.h |  1 +
 arch/x86/kvm/vmx/vmx.c               | 14 ++++++++++++--
 2 files changed, 13 insertions(+), 2 deletions(-)

Comments

Paolo Bonzini Jan. 28, 2021, 5:41 p.m. UTC | #1
On 06/11/20 02:16, Yang Weijiang wrote:
> Introduce a host-only synthetic MSR, MSR_KVM_GUEST_SSP so that the VMM
> can read/write the guest's SSP, e.g. to migrate CET state.  Use a
> synthetic MSR, e.g. as opposed to a VCPU_REG_, as GUEST_SSP is subject
> to the same consistency checks as the PL*_SSP MSRs, i.e. can share code.
> 
> Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> ---
>   arch/x86/include/uapi/asm/kvm_para.h |  1 +
>   arch/x86/kvm/vmx/vmx.c               | 14 ++++++++++++--
>   2 files changed, 13 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
> index 812e9b4c1114..5203dc084125 100644
> --- a/arch/x86/include/uapi/asm/kvm_para.h
> +++ b/arch/x86/include/uapi/asm/kvm_para.h
> @@ -53,6 +53,7 @@
>   #define MSR_KVM_POLL_CONTROL	0x4b564d05
>   #define MSR_KVM_ASYNC_PF_INT	0x4b564d06
>   #define MSR_KVM_ASYNC_PF_ACK	0x4b564d07
> +#define MSR_KVM_GUEST_SSP	0x4b564d08
>   
>   struct kvm_steal_time {
>   	__u64 steal;
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index dd78d3a79e79..28ba8414a7a3 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -1817,7 +1817,8 @@ static bool cet_is_ssp_msr_accessible(struct kvm_vcpu *vcpu,
>   	if (msr->host_initiated)
>   		return true;
>   
> -	if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK))
> +	if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK) ||
> +	    msr->index == MSR_KVM_GUEST_SSP)
>   		return false;
>   
>   	if (msr->index == MSR_IA32_INT_SSP_TAB)
> @@ -1995,6 +1996,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   			return 1;
>   		msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
>   		break;
> +	case MSR_KVM_GUEST_SSP:
> +		if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
> +			return 1;
> +		msr_info->data = vmcs_readl(GUEST_SSP);
> +		break;
>   	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
>   		if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
>   			return 1;
> @@ -2287,12 +2293,16 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   			return 1;
>   		vmcs_writel(GUEST_INTR_SSP_TABLE, data);
>   		break;
> +	case MSR_KVM_GUEST_SSP:
>   	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
>   		if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
>   			return 1;
>   		if ((data & GENMASK(2, 0)) || is_noncanonical_address(data, vcpu))
>   			return 1;
> -		vmx_set_xsave_msr(msr_info);
> +		if (msr_index == MSR_KVM_GUEST_SSP)
> +			vmcs_writel(GUEST_SSP, data);
> +		else
> +			vmx_set_xsave_msr(msr_info);
>   		break;
>   	case MSR_TSC_AUX:
>   		if (!msr_info->host_initiated &&
> 

Better make this fail if !msr_info->host_initiated.

Paolo
Paolo Bonzini Jan. 28, 2021, 5:42 p.m. UTC | #2
On 06/11/20 02:16, Yang Weijiang wrote:
> Introduce a host-only synthetic MSR, MSR_KVM_GUEST_SSP so that the VMM
> can read/write the guest's SSP, e.g. to migrate CET state.  Use a
> synthetic MSR, e.g. as opposed to a VCPU_REG_, as GUEST_SSP is subject
> to the same consistency checks as the PL*_SSP MSRs, i.e. can share code.
> 
> Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> ---
>   arch/x86/include/uapi/asm/kvm_para.h |  1 +
>   arch/x86/kvm/vmx/vmx.c               | 14 ++++++++++++--
>   2 files changed, 13 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
> index 812e9b4c1114..5203dc084125 100644
> --- a/arch/x86/include/uapi/asm/kvm_para.h
> +++ b/arch/x86/include/uapi/asm/kvm_para.h
> @@ -53,6 +53,7 @@
>   #define MSR_KVM_POLL_CONTROL	0x4b564d05
>   #define MSR_KVM_ASYNC_PF_INT	0x4b564d06
>   #define MSR_KVM_ASYNC_PF_ACK	0x4b564d07
> +#define MSR_KVM_GUEST_SSP	0x4b564d08
>   
>   struct kvm_steal_time {
>   	__u64 steal;
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index dd78d3a79e79..28ba8414a7a3 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -1817,7 +1817,8 @@ static bool cet_is_ssp_msr_accessible(struct kvm_vcpu *vcpu,
>   	if (msr->host_initiated)
>   		return true;
>   
> -	if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK))
> +	if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK) ||
> +	    msr->index == MSR_KVM_GUEST_SSP)
>   		return false;
>   
>   	if (msr->index == MSR_IA32_INT_SSP_TAB)
> @@ -1995,6 +1996,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   			return 1;
>   		msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
>   		break;
> +	case MSR_KVM_GUEST_SSP:
> +		if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
> +			return 1;
> +		msr_info->data = vmcs_readl(GUEST_SSP);
> +		break;
>   	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
>   		if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
>   			return 1;
> @@ -2287,12 +2293,16 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   			return 1;
>   		vmcs_writel(GUEST_INTR_SSP_TABLE, data);
>   		break;
> +	case MSR_KVM_GUEST_SSP:
>   	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
>   		if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
>   			return 1;
>   		if ((data & GENMASK(2, 0)) || is_noncanonical_address(data, vcpu))
>   			return 1;
> -		vmx_set_xsave_msr(msr_info);
> +		if (msr_index == MSR_KVM_GUEST_SSP)
> +			vmcs_writel(GUEST_SSP, data);
> +		else
> +			vmx_set_xsave_msr(msr_info);
>   		break;
>   	case MSR_TSC_AUX:
>   		if (!msr_info->host_initiated &&
> 

Doh, I misread the change in cet_is_ssp_msr_accessible, sorry.  */
diff mbox series

Patch

diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 812e9b4c1114..5203dc084125 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -53,6 +53,7 @@ 
 #define MSR_KVM_POLL_CONTROL	0x4b564d05
 #define MSR_KVM_ASYNC_PF_INT	0x4b564d06
 #define MSR_KVM_ASYNC_PF_ACK	0x4b564d07
+#define MSR_KVM_GUEST_SSP	0x4b564d08
 
 struct kvm_steal_time {
 	__u64 steal;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index dd78d3a79e79..28ba8414a7a3 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1817,7 +1817,8 @@  static bool cet_is_ssp_msr_accessible(struct kvm_vcpu *vcpu,
 	if (msr->host_initiated)
 		return true;
 
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK))
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK) ||
+	    msr->index == MSR_KVM_GUEST_SSP)
 		return false;
 
 	if (msr->index == MSR_IA32_INT_SSP_TAB)
@@ -1995,6 +1996,11 @@  static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 1;
 		msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
 		break;
+	case MSR_KVM_GUEST_SSP:
+		if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
+			return 1;
+		msr_info->data = vmcs_readl(GUEST_SSP);
+		break;
 	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
 		if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
 			return 1;
@@ -2287,12 +2293,16 @@  static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 1;
 		vmcs_writel(GUEST_INTR_SSP_TABLE, data);
 		break;
+	case MSR_KVM_GUEST_SSP:
 	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
 		if (!cet_is_ssp_msr_accessible(vcpu, msr_info))
 			return 1;
 		if ((data & GENMASK(2, 0)) || is_noncanonical_address(data, vcpu))
 			return 1;
-		vmx_set_xsave_msr(msr_info);
+		if (msr_index == MSR_KVM_GUEST_SSP)
+			vmcs_writel(GUEST_SSP, data);
+		else
+			vmx_set_xsave_msr(msr_info);
 		break;
 	case MSR_TSC_AUX:
 		if (!msr_info->host_initiated &&