@@ -701,6 +701,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
+ ctxt->ops->untag_addr(ctxt, &la, flags);
*linear = la;
va_bits = ctxt_virt_addr_bits(ctxt);
if (!__is_canonical_address(la, va_bits))
@@ -771,8 +772,12 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
+ /*
+ * LAM doesn't apply to addresses that specify the targets of jump and
+ * call instructions.
+ */
rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear,
- X86EMUL_F_FETCH);
+ X86EMUL_F_FETCH | X86EMUL_F_SKIPLAM);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
@@ -907,9 +912,11 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
* __linearize is called with size 0 so that it does not do any
* boundary check itself. Instead, we use max_size to check
* against op_size.
+ *
+ * LAM doesn't apply to addresses used for instruction fetches.
*/
rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear,
- X86EMUL_F_FETCH);
+ X86EMUL_F_FETCH | X86EMUL_F_SKIPLAM);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
@@ -3442,8 +3449,11 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
ulong linear;
+ unsigned max_size;
- rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
+ /* LAM doesn't apply to invlpg */
+ rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode,
+ &linear, X86EMUL_F_SKIPLAM);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */
@@ -230,6 +230,8 @@ struct x86_emulate_ops {
int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
+
+ void (*untag_addr)(struct x86_emulate_ctxt *ctxt, gva_t *addr, u32 flags);
};
/* Type, address-of, and value of an instruction's operand. */
@@ -4981,6 +4981,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
else
*ret = off;
+ vmx_untag_addr(vcpu, ret, 0);
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is the only check on the memory
* destination for long mode!
@@ -5798,6 +5799,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
vpid02 = nested_get_vpid02(vcpu);
switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
+ /* LAM doesn't apply to the address in descriptor of invvpid */
if (!operand.vpid ||
is_noncanonical_address(operand.gla, vcpu))
return nested_vmx_fail(vcpu,
@@ -37,6 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
if (!IS_ALIGNED(*gva, alignment)) {
fault = true;
} else if (likely(is_64_bit_mode(vcpu))) {
+ vmx_untag_addr(vcpu, gva, 0);
fault = is_noncanonical_address(*gva, vcpu);
} else {
*gva &= 0xffffffff;
@@ -8296,6 +8296,11 @@ static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
kvm_vm_bugged(kvm);
}
+static void emulator_untag_addr(struct x86_emulate_ctxt *ctxt, gva_t *addr, u32 flags)
+{
+ static_call(kvm_x86_untag_addr)(emul_to_vcpu(ctxt), addr, flags);
+}
+
static const struct x86_emulate_ops emulate_ops = {
.vm_bugged = emulator_vm_bugged,
.read_gpr = emulator_read_gpr,
@@ -8341,6 +8346,7 @@ static const struct x86_emulate_ops emulate_ops = {
.leave_smm = emulator_leave_smm,
.triple_fault = emulator_triple_fault,
.set_xcr = emulator_set_xcr,
+ .untag_addr = emulator_untag_addr,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -13367,6 +13373,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
switch (type) {
case INVPCID_TYPE_INDIV_ADDR:
+ /* LAM doesn't apply to the address in descriptor of invpcid */
if ((!pcid_enabled && (operand.pcid != 0)) ||
is_noncanonical_address(operand.gla, vcpu)) {
kvm_inject_gp(vcpu, 0);