diff mbox series

[v9,5/6] KVM: x86: Untag address when LAM applicable

Message ID 20230606091842.13123-6-binbin.wu@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Linear Address Masking (LAM) KVM Enabling | expand

Commit Message

Binbin Wu June 6, 2023, 9:18 a.m. UTC
Untag address for 64-bit memory/MMIO operand in instruction emulations
and VMExit handlers when LAM is applicable.

For instruction emulation, untag address in __linearize() before
canonical check. LAM doesn't apply to addresses used for instruction
fetches or to those that specify the targets of jump and call instructions,
use X86EMUL_F_SKIPLAM to skip LAM untag.

For VMExit handlers related to 64-bit linear address:
- Cases need to untag address
  Operand(s) of VMX instructions and INVPCID.
  Operand(s) of SGX ENCLS.
- Cases LAM doesn't apply to
  Operand of INVLPG.
  Linear address in INVPCID descriptor (no change needed).
  Linear address in INVVPID descriptor (it has been confirmed, although it is
  not called out in LAM spec, no change needed).
  BASEADDR specified in SESC of ECREATE (no change needed).

Note:
LAM doesn't apply to the writes to control registers or MSRs.
LAM masking applies before paging, so the faulting linear address in CR2
doesn't contain the metadata.
The guest linear address saved in VMCS doesn't contain metadata.

Co-developed-by: Robert Hoo <robert.hu@linux.intel.com>
Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
Reviewed-by: Chao Gao <chao.gao@intel.com>
Tested-by: Xuelian Guo <xuelian.guo@intel.com>
---
 arch/x86/kvm/emulate.c     | 16 +++++++++++++---
 arch/x86/kvm/kvm_emulate.h |  2 ++
 arch/x86/kvm/vmx/nested.c  |  2 ++
 arch/x86/kvm/vmx/sgx.c     |  1 +
 arch/x86/kvm/x86.c         |  7 +++++++
 5 files changed, 25 insertions(+), 3 deletions(-)

Comments

Sean Christopherson June 28, 2023, 12:19 a.m. UTC | #1
On Tue, Jun 06, 2023, Binbin Wu wrote:
> Untag address for 64-bit memory/MMIO operand in instruction emulations
> and VMExit handlers when LAM is applicable.
> 
> For instruction emulation, untag address in __linearize() before
> canonical check. LAM doesn't apply to addresses used for instruction
> fetches or to those that specify the targets of jump and call instructions,
> use X86EMUL_F_SKIPLAM to skip LAM untag.
> 
> For VMExit handlers related to 64-bit linear address:
> - Cases need to untag address
>   Operand(s) of VMX instructions and INVPCID.
>   Operand(s) of SGX ENCLS.
> - Cases LAM doesn't apply to
>   Operand of INVLPG.
>   Linear address in INVPCID descriptor (no change needed).
>   Linear address in INVVPID descriptor (it has been confirmed, although it is
>   not called out in LAM spec, no change needed).
>   BASEADDR specified in SESC of ECREATE (no change needed).
> 
> Note:
> LAM doesn't apply to the writes to control registers or MSRs.
> LAM masking applies before paging, so the faulting linear address in CR2
> doesn't contain the metadata.
> The guest linear address saved in VMCS doesn't contain metadata.
> 
> Co-developed-by: Robert Hoo <robert.hu@linux.intel.com>
> Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
> Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
> Reviewed-by: Chao Gao <chao.gao@intel.com>
> Tested-by: Xuelian Guo <xuelian.guo@intel.com>
> ---
>  arch/x86/kvm/emulate.c     | 16 +++++++++++++---
>  arch/x86/kvm/kvm_emulate.h |  2 ++
>  arch/x86/kvm/vmx/nested.c  |  2 ++
>  arch/x86/kvm/vmx/sgx.c     |  1 +
>  arch/x86/kvm/x86.c         |  7 +++++++
>  5 files changed, 25 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index e89afc39e56f..c135adb26f1e 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -701,6 +701,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
>  	*max_size = 0;
>  	switch (mode) {
>  	case X86EMUL_MODE_PROT64:
> +		ctxt->ops->untag_addr(ctxt, &la, flags);
>  		*linear = la;

Ha!  Returning the untagged address does help:

		*linear = ctx->ops->get_untagged_address(ctxt, la, flags);

>  		va_bits = ctxt_virt_addr_bits(ctxt);
>  		if (!__is_canonical_address(la, va_bits))
> @@ -771,8 +772,12 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
>  
>  	if (ctxt->op_bytes != sizeof(unsigned long))
>  		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
> +	/*
> +	 * LAM doesn't apply to addresses that specify the targets of jump and
> +	 * call instructions.
> +	 */
>  	rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear,
> -			 X86EMUL_F_FETCH);
> +			 X86EMUL_F_FETCH | X86EMUL_F_SKIPLAM);

No need for anything LAM specific here, just skip all FETCH access (unlike LASS
which skips checks only for branch targets).

> -	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
> +	/* LAM doesn't apply to invlpg */

Comment unneeded if X86EMUL_F_INVLPG is added.
diff mbox series

Patch

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e89afc39e56f..c135adb26f1e 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -701,6 +701,7 @@  static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 	*max_size = 0;
 	switch (mode) {
 	case X86EMUL_MODE_PROT64:
+		ctxt->ops->untag_addr(ctxt, &la, flags);
 		*linear = la;
 		va_bits = ctxt_virt_addr_bits(ctxt);
 		if (!__is_canonical_address(la, va_bits))
@@ -771,8 +772,12 @@  static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
 
 	if (ctxt->op_bytes != sizeof(unsigned long))
 		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
+	/*
+	 * LAM doesn't apply to addresses that specify the targets of jump and
+	 * call instructions.
+	 */
 	rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear,
-			 X86EMUL_F_FETCH);
+			 X86EMUL_F_FETCH | X86EMUL_F_SKIPLAM);
 	if (rc == X86EMUL_CONTINUE)
 		ctxt->_eip = addr.ea;
 	return rc;
@@ -907,9 +912,11 @@  static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
 	 * __linearize is called with size 0 so that it does not do any
 	 * boundary check itself.  Instead, we use max_size to check
 	 * against op_size.
+	 *
+	 * LAM doesn't apply to addresses used for instruction fetches.
 	 */
 	rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear,
-			 X86EMUL_F_FETCH);
+			 X86EMUL_F_FETCH | X86EMUL_F_SKIPLAM);
 	if (unlikely(rc != X86EMUL_CONTINUE))
 		return rc;
 
@@ -3442,8 +3449,11 @@  static int em_invlpg(struct x86_emulate_ctxt *ctxt)
 {
 	int rc;
 	ulong linear;
+	unsigned max_size;
 
-	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
+	/* LAM doesn't apply to invlpg */
+	rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode,
+		&linear, X86EMUL_F_SKIPLAM);
 	if (rc == X86EMUL_CONTINUE)
 		ctxt->ops->invlpg(ctxt, linear);
 	/* Disable writeback. */
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index c2091e24a6b9..3875ab175cd2 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -230,6 +230,8 @@  struct x86_emulate_ops {
 	int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
 	void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
 	int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
+
+	void (*untag_addr)(struct x86_emulate_ctxt *ctxt, gva_t *addr, u32 flags);
 };
 
 /* Type, address-of, and value of an instruction's operand. */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 11b12a75ca91..6c8dab9999f2 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4981,6 +4981,7 @@  int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
 		else
 			*ret = off;
 
+		vmx_untag_addr(vcpu, ret, 0);
 		/* Long mode: #GP(0)/#SS(0) if the memory address is in a
 		 * non-canonical form. This is the only check on the memory
 		 * destination for long mode!
@@ -5798,6 +5799,7 @@  static int handle_invvpid(struct kvm_vcpu *vcpu)
 	vpid02 = nested_get_vpid02(vcpu);
 	switch (type) {
 	case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
+		/* LAM doesn't apply to the address in descriptor of invvpid */
 		if (!operand.vpid ||
 		    is_noncanonical_address(operand.gla, vcpu))
 			return nested_vmx_fail(vcpu,
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
index 2261b684a7d4..b4faa94bace7 100644
--- a/arch/x86/kvm/vmx/sgx.c
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -37,6 +37,7 @@  static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
 	if (!IS_ALIGNED(*gva, alignment)) {
 		fault = true;
 	} else if (likely(is_64_bit_mode(vcpu))) {
+		vmx_untag_addr(vcpu, gva, 0);
 		fault = is_noncanonical_address(*gva, vcpu);
 	} else {
 		*gva &= 0xffffffff;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 709fc920f378..ed2dca55573b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8296,6 +8296,11 @@  static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
 		kvm_vm_bugged(kvm);
 }
 
+static void emulator_untag_addr(struct x86_emulate_ctxt *ctxt, gva_t *addr, u32 flags)
+{
+	static_call(kvm_x86_untag_addr)(emul_to_vcpu(ctxt), addr, flags);
+}
+
 static const struct x86_emulate_ops emulate_ops = {
 	.vm_bugged           = emulator_vm_bugged,
 	.read_gpr            = emulator_read_gpr,
@@ -8341,6 +8346,7 @@  static const struct x86_emulate_ops emulate_ops = {
 	.leave_smm           = emulator_leave_smm,
 	.triple_fault        = emulator_triple_fault,
 	.set_xcr             = emulator_set_xcr,
+	.untag_addr          = emulator_untag_addr,
 };
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -13367,6 +13373,7 @@  int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
 
 	switch (type) {
 	case INVPCID_TYPE_INDIV_ADDR:
+		/* LAM doesn't apply to the address in descriptor of invpcid */
 		if ((!pcid_enabled && (operand.pcid != 0)) ||
 		    is_noncanonical_address(operand.gla, vcpu)) {
 			kvm_inject_gp(vcpu, 0);