diff mbox series

[v1,5/6] KVM: x86: LASS protection on KVM emulation

Message ID 20230601142309.6307-6-guang.zeng@intel.com (mailing list archive)
State New, archived
Headers show
Series LASS KVM virtualization support | expand

Commit Message

Zeng Guang June 1, 2023, 2:23 p.m. UTC
Do LASS violation check for instructions emulated by KVM. Note that for
instructions executed in the guest directly, hardware will perform the
check.

Not all instruction emulation leads to accesses to guest linear addresses
because 1) some instructions like CPUID, RDMSR, don't take memory as
operands 2) instruction fetch in most cases is already done inside the
guest.

Four cases in which KVM uses a linear address to access guest memory:
- KVM emulates instruction fetches or data accesses
- KVM emulates implicit data access to a system data structure
- VMX instruction emulation
- SGX ENCLS instruction emulation

LASS violation check applies to these linear addresses so as to enforce
mode-based protections as hardware behaves.

As exceptions, the target memory address of emulation of invlpg, branch
and call instructions doesn't require LASS violation check.

Signed-off-by: Zeng Guang <guang.zeng@intel.com>
Tested-by: Xuelian Guo <xuelian.guo@intel.com>
---
 arch/x86/kvm/emulate.c    | 30 ++++++++++++++++++++++++++++--
 arch/x86/kvm/vmx/nested.c |  3 +++
 arch/x86/kvm/vmx/sgx.c    |  4 ++++
 3 files changed, 35 insertions(+), 2 deletions(-)

Comments

Binbin Wu June 6, 2023, 4:20 a.m. UTC | #1
On 6/1/2023 10:23 PM, Zeng Guang wrote:
> Do LASS violation check for instructions emulated by KVM. Note that for
> instructions executed in the guest directly, hardware will perform the
> check.
>
> Not all instruction emulation leads to accesses to guest linear addresses
> because 1) some instructions like CPUID, RDMSR, don't take memory as
> operands 2) instruction fetch in most cases is already done inside the
> guest.
>
> Four cases in which KVM uses a linear address to access guest memory:
> - KVM emulates instruction fetches or data accesses
> - KVM emulates implicit data access to a system data structure
> - VMX instruction emulation
> - SGX ENCLS instruction emulation
>
> LASS violation check applies to these linear addresses so as to enforce
> mode-based protections as hardware behaves.
>
> As exceptions, the target memory address of emulation of invlpg, branch
> and call instructions doesn't require LASS violation check.
I think LASS doesn't apply to the target addresses in the descriptors of 
INVPCID and INVVPID.
Although no code change needed, IMHO, it's better to describe it in the 
changelog or/and comments.

>
> Signed-off-by: Zeng Guang <guang.zeng@intel.com>
> Tested-by: Xuelian Guo <xuelian.guo@intel.com>
> ---
>   arch/x86/kvm/emulate.c    | 30 ++++++++++++++++++++++++++++--
>   arch/x86/kvm/vmx/nested.c |  3 +++
>   arch/x86/kvm/vmx/sgx.c    |  4 ++++
>   3 files changed, 35 insertions(+), 2 deletions(-)
>
[...]
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index e35cf0bd0df9..bb1c3fa13c13 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -4986,6 +4986,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
>   		 * destination for long mode!
>   		 */
>   		exn = is_noncanonical_address(*ret, vcpu);
> +
> +		if (!exn)
> +			exn = vmx_check_lass(vcpu, 0, *ret, 0);
Can be simpler by using logical-or:

exn = is_noncanonical_address(*ret, vcpu) || vmx_check_lass(vcpu, 0, *ret, 0);



>   	} else {
>   		/*
>   		 * When not in long mode, the virtual/linear address is
> diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
> index 2261b684a7d4..3825275827eb 100644
> --- a/arch/x86/kvm/vmx/sgx.c
> +++ b/arch/x86/kvm/vmx/sgx.c
> @@ -46,6 +46,10 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
>   			((s.base != 0 || s.limit != 0xffffffff) &&
>   			(((u64)*gva + size - 1) > s.limit + 1));
>   	}
> +
> +	if (!fault && is_long_mode(vcpu))
> +		fault = vmx_check_lass(vcpu, 0, *gva, 0);
> +
>   	if (fault)
>   		kvm_inject_gp(vcpu, 0);
>   	return fault ? -EINVAL : 0;
diff mbox series

Patch

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 9508836e8a35..ed5191fa2079 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -698,6 +698,7 @@  static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 	u8  va_bits;
 	bool fetch = !!(flags & X86EMUL_F_FETCH);
 	bool write = !!(flags & X86EMUL_F_WRITE);
+	u64 access = fetch ? PFERR_FETCH_MASK : 0;
 
 	la = seg_base(ctxt, addr.seg) + addr.ea;
 	*max_size = 0;
@@ -743,6 +744,10 @@  static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 		}
 		break;
 	}
+
+	if (ctxt->ops->check_lass(ctxt, access, *linear, flags))
+		goto bad;
+
 	if (la & (insn_alignment(ctxt, size) - 1))
 		return emulate_gp(ctxt, 0);
 	return X86EMUL_CONTINUE;
@@ -774,7 +779,11 @@  static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
 	unsigned max_size;
 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
 					   .ea = dst };
-	u32 flags = X86EMUL_F_FETCH;
+	/*
+	 * LASS doesn't apply to addresses that specify the targets of jump and
+	 * call instructions.
+	 */
+	u32 flags = X86EMUL_F_FETCH | X86EMUL_F_SKIPLASS;
 
 	if (ctxt->op_bytes != sizeof(unsigned long))
 		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
@@ -853,6 +862,13 @@  static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
 			      void *data, unsigned size)
 {
+	if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
+		ctxt->exception.vector = GP_VECTOR;
+		ctxt->exception.error_code = 0;
+		ctxt->exception.error_code_valid = true;
+		return X86EMUL_PROPAGATE_FAULT;
+	}
+
 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
 }
 
@@ -860,6 +876,13 @@  static int linear_write_system(struct x86_emulate_ctxt *ctxt,
 			       ulong linear, void *data,
 			       unsigned int size)
 {
+	if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
+		ctxt->exception.vector = GP_VECTOR;
+		ctxt->exception.error_code = 0;
+		ctxt->exception.error_code_valid = true;
+		return X86EMUL_PROPAGATE_FAULT;
+	}
+
 	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
 }
 
@@ -3448,8 +3471,11 @@  static int em_invlpg(struct x86_emulate_ctxt *ctxt)
 {
 	int rc;
 	ulong linear;
+	unsigned max_size;
 
-	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
+	/* LASS doesn't apply to the memory address for invlpg */
+	rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1,
+			 X86EMUL_F_SKIPLASS, ctxt->mode, &linear);
 	if (rc == X86EMUL_CONTINUE)
 		ctxt->ops->invlpg(ctxt, linear);
 	/* Disable writeback. */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index e35cf0bd0df9..bb1c3fa13c13 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4986,6 +4986,9 @@  int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
 		 * destination for long mode!
 		 */
 		exn = is_noncanonical_address(*ret, vcpu);
+
+		if (!exn)
+			exn = vmx_check_lass(vcpu, 0, *ret, 0);
 	} else {
 		/*
 		 * When not in long mode, the virtual/linear address is
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
index 2261b684a7d4..3825275827eb 100644
--- a/arch/x86/kvm/vmx/sgx.c
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -46,6 +46,10 @@  static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
 			((s.base != 0 || s.limit != 0xffffffff) &&
 			(((u64)*gva + size - 1) > s.limit + 1));
 	}
+
+	if (!fault && is_long_mode(vcpu))
+		fault = vmx_check_lass(vcpu, 0, *gva, 0);
+
 	if (fault)
 		kvm_inject_gp(vcpu, 0);
 	return fault ? -EINVAL : 0;