diff mbox series

[v4,8/9] KVM: x86: emulation: Apply LAM when emulating data access

Message ID 20230209024022.3371768-9-robert.hu@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Linear Address Masking (LAM) KVM Enabling | expand

Commit Message

Robert Hoo Feb. 9, 2023, 2:40 a.m. UTC
When in KVM emulation, calculated a LA for data access, apply LAM if
guest is at that moment LAM active, so that the following canonical check
can pass.

Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
---
 arch/x86/kvm/emulate.c |  6 ++++++
 arch/x86/kvm/x86.h     | 13 +++++++++++++
 2 files changed, 19 insertions(+)

Comments

Chao Gao Feb. 13, 2023, 3:53 a.m. UTC | #1
On Thu, Feb 09, 2023 at 10:40:21AM +0800, Robert Hoo wrote:
>When in KVM emulation, calculated a LA for data access, apply LAM if
>guest is at that moment LAM active, so that the following canonical check
>can pass.

This sounds weird. Passing the canonical checking isn't the goal. Emulating
the behavior of a LAM-capable processor on memory accesses is.

>
>Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
>---
> arch/x86/kvm/emulate.c |  6 ++++++
> arch/x86/kvm/x86.h     | 13 +++++++++++++
> 2 files changed, 19 insertions(+)
>
>diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
>index 5cc3efa0e21c..d52037151133 100644
>--- a/arch/x86/kvm/emulate.c
>+++ b/arch/x86/kvm/emulate.c
>@@ -700,6 +700,12 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
> 	*max_size = 0;
> 	switch (mode) {
> 	case X86EMUL_MODE_PROT64:
>+		/*
>+		 * LAM applies only on data access
>+		 */

one-line comments look like /* Bla bla bla */

>+		if (!fetch && is_lam_active(ctxt->vcpu))
>+			la = kvm_untagged_addr(la, ctxt->vcpu);
>+
> 		*linear = la;
> 		va_bits = ctxt_virt_addr_bits(ctxt);
> 		if (!__is_canonical_address(la, va_bits))
>diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
>index 7228895d4a6f..9397e9f4e061 100644
>--- a/arch/x86/kvm/x86.h
>+++ b/arch/x86/kvm/x86.h
>@@ -135,6 +135,19 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
> #endif
> }
> 
>+#ifdef CONFIG_X86_64
>+static inline bool is_lam_active(struct kvm_vcpu *vcpu)

Drop this function because kvm_untagged_addr() already does these checks
(and taking user/supervisor pointers into consideration).

>+{
>+	return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57) ||
>+	       kvm_read_cr4_bits(vcpu, X86_CR4_LAM_SUP);
>+}
>+#else
>+static inline bool is_lam_active(struct kvm_vcpu *vcpu)
>+{
>+	return false;
>+}
>+#endif
>+
> static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
> {
> 	int cs_db, cs_l;
>-- 
>2.31.1
>
Robert Hoo Feb. 14, 2023, 5:38 a.m. UTC | #2
On Mon, 2023-02-13 at 11:53 +0800, Chao Gao wrote:
> On Thu, Feb 09, 2023 at 10:40:21AM +0800, Robert Hoo wrote:
> > When in KVM emulation, calculated a LA for data access, apply LAM
> > if
> > guest is at that moment LAM active, so that the following canonical
> > check
> > can pass.
> 
> This sounds weird. Passing the canonical checking isn't the goal.
> Emulating
> the behavior of a LAM-capable processor on memory accesses is.
> 
Emm, how about describe like this:
In KVM emulation, apply LAM rule for linear address calculated, (data
access only), i.e. clear possible meta data in LA, before doing
canonical check.

> > +#ifdef CONFIG_X86_64
> > +static inline bool is_lam_active(struct kvm_vcpu *vcpu)
> 
> Drop this function because kvm_untagged_addr() already does these
> checks
> (and taking user/supervisor pointers into consideration).
> 
OK
diff mbox series

Patch

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5cc3efa0e21c..d52037151133 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -700,6 +700,12 @@  static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 	*max_size = 0;
 	switch (mode) {
 	case X86EMUL_MODE_PROT64:
+		/*
+		 * LAM applies only on data access
+		 */
+		if (!fetch && is_lam_active(ctxt->vcpu))
+			la = kvm_untagged_addr(la, ctxt->vcpu);
+
 		*linear = la;
 		va_bits = ctxt_virt_addr_bits(ctxt);
 		if (!__is_canonical_address(la, va_bits))
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 7228895d4a6f..9397e9f4e061 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -135,6 +135,19 @@  static inline int is_long_mode(struct kvm_vcpu *vcpu)
 #endif
 }
 
+#ifdef CONFIG_X86_64
+static inline bool is_lam_active(struct kvm_vcpu *vcpu)
+{
+	return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57) ||
+	       kvm_read_cr4_bits(vcpu, X86_CR4_LAM_SUP);
+}
+#else
+static inline bool is_lam_active(struct kvm_vcpu *vcpu)
+{
+	return false;
+}
+#endif
+
 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
 {
 	int cs_db, cs_l;