diff mbox series

x86/sev: Fix position dependent variable references in startup code

Message ID 20240203125306.1887331-2-ardb+git@google.com (mailing list archive)
State New, archived
Headers show
Series x86/sev: Fix position dependent variable references in startup code | expand

Commit Message

Ard Biesheuvel Feb. 3, 2024, 12:53 p.m. UTC
From: Ard Biesheuvel <ardb@kernel.org>

The early startup code executes from a 1:1 mapping of memory, which
differs from the mapping that the code was linked and/or relocated to
run at. The latter mapping is not active yet at this point, and so
symbol references that rely on it will fault. Given that the core kernel
is built without -fPIC, symbol references are typically emitted as
absolute, and so any such references occuring in the early startup code
will therefore crash the kernel.

While an attempt was made to work around this for the early SEV/SME
startup code, by forcing RIP-relative addressing for certain global
SEV/SME variables via inline assembly (see snp_cpuid_get_table() for
example), RIP-relative addressing must be pervasively enforced for
SEV/SME global variables when accessed prior to page table fixups.

__startup_64() already handles this issue for select non-SEV/SME global
variables using fixup_pointer(), which adjusts the pointer relative to a
`physaddr` argument. To avoid having to pass around this `physaddr`
argument across all functions needing to apply pointer fixups, this
patch introduces the macro RIP_RELATIVE_REF(), which generates a
RIP-relative reference to a given global variable. The macro is used
where necessary to force RIP-relative accesses to global variables.

For backporting purposes, this patch makes no attempt at cleaning up
other occurrences of this pattern, involving either inline asm or
fixup_pointer(). Those will be addressed by subsequent patches.

Link: https://lore.kernel.org/all/20240130220845.1978329-1-kevinloughlin@google.com/
Co-developed-by: Kevin Loughlin <kevinloughlin@google.com>
Signed-off-by: Kevin Loughlin <kevinloughlin@google.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/x86/coco/core.c               |  7 +----
 arch/x86/include/asm/asm.h         | 13 ++++++++++
 arch/x86/include/asm/coco.h        |  8 +++++-
 arch/x86/include/asm/mem_encrypt.h | 13 ++++++----
 arch/x86/kernel/sev-shared.c       | 12 ++++-----
 arch/x86/kernel/sev.c              |  4 +--
 arch/x86/mm/mem_encrypt_identity.c | 27 +++++++++-----------
 7 files changed, 49 insertions(+), 35 deletions(-)


base-commit: f9e6f00d93d34f60f90b42c24e80194b11a72bb2

Comments

Borislav Petkov Feb. 6, 2024, 11:07 a.m. UTC | #1
On Sat, Feb 03, 2024 at 01:53:06PM +0100, Ard Biesheuvel wrote:
>  arch/x86/coco/core.c               |  7 +----
>  arch/x86/include/asm/asm.h         | 13 ++++++++++
>  arch/x86/include/asm/coco.h        |  8 +++++-
>  arch/x86/include/asm/mem_encrypt.h | 13 ++++++----
>  arch/x86/kernel/sev-shared.c       | 12 ++++-----
>  arch/x86/kernel/sev.c              |  4 +--
>  arch/x86/mm/mem_encrypt_identity.c | 27 +++++++++-----------
>  7 files changed, 49 insertions(+), 35 deletions(-)

Not bad - some touchups ontop like calling it "rip_rel_ref" everywhere
like other code shortens "rIP-relative reference" and making the asm
wrapper __always_inline.

Thx.

---

diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 41b408b3dfb6..ca8eed1d496a 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -115,14 +115,15 @@
 
 #ifndef __ASSEMBLY__
 #ifndef __pic__
-static inline __pure void *rip_relative_ptr(void *p)
+static __always_inline __pure void *rip_rel_ptr(void *p)
 {
 	asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
+
 	return p;
 }
-#define RIP_RELATIVE_REF(var)	(*(typeof(&(var)))rip_relative_ptr(&(var)))
+#define RIP_REL_REF(var)	(*(typeof(&(var)))rip_rel_ptr(&(var)))
 #else
-#define RIP_RELATIVE_REF(var)	(var)
+#define RIP_REL_REF(var)	(var)
 #endif
 #endif
 
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index d6865e0f7587..21940ef8d290 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -17,7 +17,7 @@ extern u64 cc_mask;
 #ifdef CONFIG_ARCH_HAS_CC_PLATFORM
 static inline void cc_set_mask(u64 mask)
 {
-	RIP_RELATIVE_REF(cc_mask) = mask;
+	RIP_REL_REF(cc_mask) = mask;
 }
 
 u64 cc_mkenc(u64 val);
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 6fa2ba58ed3f..b31eb9fd5954 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -61,7 +61,7 @@ void __init sev_es_init_vc_handling(void);
 
 static inline u64 sme_get_me_mask(void)
 {
-	return RIP_RELATIVE_REF(sme_me_mask);
+	return RIP_REL_REF(sme_me_mask);
 }
 
 #define __bss_decrypted __section(".bss..decrypted")
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index a54711300d0b..a200bd72fadc 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -562,9 +562,9 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
 		leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
 
 		/* Skip post-processing for out-of-range zero leafs. */
-		if (!(leaf->fn <= RIP_RELATIVE_REF(cpuid_std_range_max) ||
-		      (leaf->fn >= 0x40000000 && leaf->fn <= RIP_RELATIVE_REF(cpuid_hyp_range_max)) ||
-		      (leaf->fn >= 0x80000000 && leaf->fn <= RIP_RELATIVE_REF(cpuid_ext_range_max))))
+		if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
+		      (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
+		      (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
 			return 0;
 	}
 
@@ -1074,11 +1074,11 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
 		const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
 
 		if (fn->eax_in == 0x0)
-			RIP_RELATIVE_REF(cpuid_std_range_max) = fn->eax;
+			RIP_REL_REF(cpuid_std_range_max) = fn->eax;
 		else if (fn->eax_in == 0x40000000)
-			RIP_RELATIVE_REF(cpuid_hyp_range_max) = fn->eax;
+			RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
 		else if (fn->eax_in == 0x80000000)
-			RIP_RELATIVE_REF(cpuid_ext_range_max) = fn->eax;
+			RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
 	}
 }
 
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 1cf348e19556..1ef7ae806a01 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -748,7 +748,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
 	 * This eliminates worries about jump tables or checking boot_cpu_data
 	 * in the cc_platform_has() function.
 	 */
-	if (!(RIP_RELATIVE_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+	if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
 		return;
 
 	 /*
@@ -767,7 +767,7 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
 	 * This eliminates worries about jump tables or checking boot_cpu_data
 	 * in the cc_platform_has() function.
 	 */
-	if (!(RIP_RELATIVE_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+	if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
 		return;
 
 	 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index d8d14133b654..0166ab1780cc 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -305,7 +305,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
 	 * function.
 	 */
 	if (!sme_get_me_mask() ||
-	    RIP_RELATIVE_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
+	    RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
 		return;
 
 	/*
@@ -542,7 +542,7 @@ void __init sme_enable(struct boot_params *bp)
 	me_mask = 1UL << (ebx & 0x3f);
 
 	/* Check the SEV MSR whether SEV or SME is enabled */
-	RIP_RELATIVE_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
+	RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
 	feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
 
 	/* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
@@ -595,7 +595,7 @@ void __init sme_enable(struct boot_params *bp)
 		return;
 
 out:
-	RIP_RELATIVE_REF(sme_me_mask) = me_mask;
+	RIP_REL_REF(sme_me_mask) = me_mask;
 	physical_mask &= ~me_mask;
 	cc_vendor = CC_VENDOR_AMD;
 	cc_set_mask(me_mask);
Ard Biesheuvel Feb. 6, 2024, 11:19 a.m. UTC | #2
On Tue, 6 Feb 2024 at 11:07, Borislav Petkov <bp@alien8.de> wrote:
>
> On Sat, Feb 03, 2024 at 01:53:06PM +0100, Ard Biesheuvel wrote:
> >  arch/x86/coco/core.c               |  7 +----
> >  arch/x86/include/asm/asm.h         | 13 ++++++++++
> >  arch/x86/include/asm/coco.h        |  8 +++++-
> >  arch/x86/include/asm/mem_encrypt.h | 13 ++++++----
> >  arch/x86/kernel/sev-shared.c       | 12 ++++-----
> >  arch/x86/kernel/sev.c              |  4 +--
> >  arch/x86/mm/mem_encrypt_identity.c | 27 +++++++++-----------
> >  7 files changed, 49 insertions(+), 35 deletions(-)
>
> Not bad - some touchups ontop like calling it "rip_rel_ref" everywhere
> like other code shortens "rIP-relative reference" and making the asm
> wrapper __always_inline.
>

Looks good to me - thanks.
Borislav Petkov Feb. 6, 2024, 2 p.m. UTC | #3
On Tue, Feb 06, 2024 at 11:19:06AM +0000, Ard Biesheuvel wrote:
> Looks good to me - thanks.

Thanks.

Now, next question: I'm presuming we want this in stable?

If so, which one?

And if there are conflicts during backporting over there, would you guys
address them and test the backports?

I'm presuming there's only a subset of stable kernels which you care
about. If so, we probably don't want to backport it all the way back to
5.19... but mark it for a later kernel only...

Thoughts?

Thx.
Ard Biesheuvel Feb. 6, 2024, 2:22 p.m. UTC | #4
On Tue, 6 Feb 2024 at 14:00, Borislav Petkov <bp@alien8.de> wrote:
>
> On Tue, Feb 06, 2024 at 11:19:06AM +0000, Ard Biesheuvel wrote:
> > Looks good to me - thanks.
>
> Thanks.
>
> Now, next question: I'm presuming we want this in stable?
>

Yes.

> If so, which one?
>

Ideally, any kernel that could reasonably be expected to work
correctly in a SEV guest should get this. And IMHO, the state of SEV
host support for any of its flavors is irrelevant there. But let's see
how painful it gets working our way back. I think Kevin mentioned
v5.10 but I haven't had a look yet how feasible that is.

> And if there are conflicts during backporting over there, would you guys
> address them and test the backports?
>

Yes - I cannot speak for Kevin outright but I'm sure that between him
and me, we will be able to allocate the time and effort to make sure
that those conflicts are resolved and the resolutions tested
sufficiently. (With Kevin's help, I have been able to test these
changes myself on our internal SEV infrastructure)

> I'm presuming there's only a subset of stable kernels which you care
> about. If so, we probably don't want to backport it all the way back to
> 5.19... but mark it for a later kernel only...
>
> Thoughts?
>

One slight complication is that the change currently relies on your
patch to remove CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT, so if we
don't want to backport that, I will have to tweak the it. Personally,
I think it would be fine but others may feel differently.
Borislav Petkov Feb. 6, 2024, 2:37 p.m. UTC | #5
On Tue, Feb 06, 2024 at 02:22:25PM +0000, Ard Biesheuvel wrote:
> Ideally, any kernel that could reasonably be expected to work
> correctly in a SEV guest should get this. And IMHO, the state of SEV
> host support for any of its flavors is irrelevant there. But let's see
> how painful it gets working our way back. I think Kevin mentioned
> v5.10 but I haven't had a look yet how feasible that is.

Ok, I'll add Cc:stable and then you guys can sort it out when the stable
failed-to-apply messages come in.

> Yes - I cannot speak for Kevin outright but I'm sure that between him
> and me, we will be able to allocate the time and effort to make sure
> that those conflicts are resolved and the resolutions tested
> sufficiently. (With Kevin's help, I have been able to test these
> changes myself on our internal SEV infrastructure)

Sounds good.

> One slight complication is that the change currently relies on your
> patch to remove CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT, so if we
> don't want to backport that, I will have to tweak the it. Personally,
> I think it would be fine but others may feel differently.

Well, tip:x86/sev is cast in stone because the KVM folks will use it
eventually to base the KVM SNP host changes ontop so I cannot rebase.

If there's trouble backporting just holler.

Thx.
Tom Lendacky Feb. 27, 2024, 7:05 p.m. UTC | #6
On 2/3/24 06:53, Ard Biesheuvel wrote:
> From: Ard Biesheuvel <ardb@kernel.org>
> 
> The early startup code executes from a 1:1 mapping of memory, which
> differs from the mapping that the code was linked and/or relocated to
> run at. The latter mapping is not active yet at this point, and so
> symbol references that rely on it will fault. Given that the core kernel
> is built without -fPIC, symbol references are typically emitted as
> absolute, and so any such references occuring in the early startup code
> will therefore crash the kernel.
> 
> While an attempt was made to work around this for the early SEV/SME
> startup code, by forcing RIP-relative addressing for certain global
> SEV/SME variables via inline assembly (see snp_cpuid_get_table() for
> example), RIP-relative addressing must be pervasively enforced for
> SEV/SME global variables when accessed prior to page table fixups.
> 
> __startup_64() already handles this issue for select non-SEV/SME global
> variables using fixup_pointer(), which adjusts the pointer relative to a
> `physaddr` argument. To avoid having to pass around this `physaddr`
> argument across all functions needing to apply pointer fixups, this
> patch introduces the macro RIP_RELATIVE_REF(), which generates a
> RIP-relative reference to a given global variable. The macro is used
> where necessary to force RIP-relative accesses to global variables.
> 
> For backporting purposes, this patch makes no attempt at cleaning up
> other occurrences of this pattern, involving either inline asm or
> fixup_pointer(). Those will be addressed by subsequent patches.
> 
> Link: https://lore.kernel.org/all/20240130220845.1978329-1-kevinloughlin@google.com/
> Co-developed-by: Kevin Loughlin <kevinloughlin@google.com>
> Signed-off-by: Kevin Loughlin <kevinloughlin@google.com>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

>   
>   out:
> -	if (sme_me_mask) {
> -		physical_mask &= ~sme_me_mask;
> -		cc_vendor = CC_VENDOR_AMD;
> -		cc_set_mask(sme_me_mask);
> -	}
> +	RIP_RELATIVE_REF(sme_me_mask) = me_mask;
> +	physical_mask &= ~me_mask;
> +	cc_vendor = CC_VENDOR_AMD;

Sorry, just noticed this, but should physical_mask and cc_vendor also be 
RIP_RELATIVE_REF()?

Thanks,
Tom

> +	cc_set_mask(me_mask);
>   }
> 
> base-commit: f9e6f00d93d34f60f90b42c24e80194b11a72bb2
Ard Biesheuvel Feb. 29, 2024, 10:14 p.m. UTC | #7
On Tue, 27 Feb 2024 at 20:06, Tom Lendacky <thomas.lendacky@amd.com> wrote:
>
> On 2/3/24 06:53, Ard Biesheuvel wrote:
> > From: Ard Biesheuvel <ardb@kernel.org>
> >
> > The early startup code executes from a 1:1 mapping of memory, which
> > differs from the mapping that the code was linked and/or relocated to
> > run at. The latter mapping is not active yet at this point, and so
> > symbol references that rely on it will fault. Given that the core kernel
> > is built without -fPIC, symbol references are typically emitted as
> > absolute, and so any such references occuring in the early startup code
> > will therefore crash the kernel.
> >
> > While an attempt was made to work around this for the early SEV/SME
> > startup code, by forcing RIP-relative addressing for certain global
> > SEV/SME variables via inline assembly (see snp_cpuid_get_table() for
> > example), RIP-relative addressing must be pervasively enforced for
> > SEV/SME global variables when accessed prior to page table fixups.
> >
> > __startup_64() already handles this issue for select non-SEV/SME global
> > variables using fixup_pointer(), which adjusts the pointer relative to a
> > `physaddr` argument. To avoid having to pass around this `physaddr`
> > argument across all functions needing to apply pointer fixups, this
> > patch introduces the macro RIP_RELATIVE_REF(), which generates a
> > RIP-relative reference to a given global variable. The macro is used
> > where necessary to force RIP-relative accesses to global variables.
> >
> > For backporting purposes, this patch makes no attempt at cleaning up
> > other occurrences of this pattern, involving either inline asm or
> > fixup_pointer(). Those will be addressed by subsequent patches.
> >
> > Link: https://lore.kernel.org/all/20240130220845.1978329-1-kevinloughlin@google.com/
> > Co-developed-by: Kevin Loughlin <kevinloughlin@google.com>
> > Signed-off-by: Kevin Loughlin <kevinloughlin@google.com>
> > Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
>
> >
> >   out:
> > -     if (sme_me_mask) {
> > -             physical_mask &= ~sme_me_mask;
> > -             cc_vendor = CC_VENDOR_AMD;
> > -             cc_set_mask(sme_me_mask);
> > -     }
> > +     RIP_RELATIVE_REF(sme_me_mask) = me_mask;
> > +     physical_mask &= ~me_mask;
> > +     cc_vendor = CC_VENDOR_AMD;
>
> Sorry, just noticed this, but should physical_mask and cc_vendor also be
> RIP_RELATIVE_REF()?
>

Yes, they should. And as a matter of fact, this kind of proves my
point that we should have instrumentation for this.

I have no insight into the heuristics that the compilers use when
choosing between absolute and RIP-relative references, but both
physical_mask and cc_vendor happen to be accessed via a RIP-relative
reference here, even when using non-PIC mode. I suspect that the main
factor here is whether or not the address of the variable is [also]
taken, but I suppose the encoding size could be relevant as well.

I'll follow up with an additional patch for these two, but the
instrumentation will have to wait for the next cycle.
diff mbox series

Patch

diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
index eeec9986570e..d07be9d05cd0 100644
--- a/arch/x86/coco/core.c
+++ b/arch/x86/coco/core.c
@@ -14,7 +14,7 @@ 
 #include <asm/processor.h>
 
 enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
-static u64 cc_mask __ro_after_init;
+u64 cc_mask __ro_after_init;
 
 static bool noinstr intel_cc_platform_has(enum cc_attr attr)
 {
@@ -148,8 +148,3 @@  u64 cc_mkdec(u64 val)
 	}
 }
 EXPORT_SYMBOL_GPL(cc_mkdec);
-
-__init void cc_set_mask(u64 mask)
-{
-	cc_mask = mask;
-}
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index fbcfec4dc4cc..41b408b3dfb6 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -113,6 +113,19 @@ 
 
 #endif
 
+#ifndef __ASSEMBLY__
+#ifndef __pic__
+static inline __pure void *rip_relative_ptr(void *p)
+{
+	asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
+	return p;
+}
+#define RIP_RELATIVE_REF(var)	(*(typeof(&(var)))rip_relative_ptr(&(var)))
+#else
+#define RIP_RELATIVE_REF(var)	(var)
+#endif
+#endif
+
 /*
  * Macros to generate condition code outputs from inline assembly,
  * The output operand must be type "bool".
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index 6ae2d16a7613..d6865e0f7587 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -2,6 +2,7 @@ 
 #ifndef _ASM_X86_COCO_H
 #define _ASM_X86_COCO_H
 
+#include <asm/asm.h>
 #include <asm/types.h>
 
 enum cc_vendor {
@@ -11,9 +12,14 @@  enum cc_vendor {
 };
 
 extern enum cc_vendor cc_vendor;
+extern u64 cc_mask;
 
 #ifdef CONFIG_ARCH_HAS_CC_PLATFORM
-void cc_set_mask(u64 mask);
+static inline void cc_set_mask(u64 mask)
+{
+	RIP_RELATIVE_REF(cc_mask) = mask;
+}
+
 u64 cc_mkenc(u64 val);
 u64 cc_mkdec(u64 val);
 #else
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 359ada486fa9..1427b5fc416c 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -15,6 +15,7 @@ 
 #include <linux/init.h>
 #include <linux/cc_platform.h>
 
+#include <asm/asm.h>
 #include <asm/bootparam.h>
 
 #ifdef CONFIG_X86_MEM_ENCRYPT
@@ -58,6 +59,11 @@  void __init mem_encrypt_free_decrypted_mem(void);
 
 void __init sev_es_init_vc_handling(void);
 
+static inline u64 sme_get_me_mask(void)
+{
+	return RIP_RELATIVE_REF(sme_me_mask);
+}
+
 #define __bss_decrypted __section(".bss..decrypted")
 
 #else	/* !CONFIG_AMD_MEM_ENCRYPT */
@@ -89,6 +95,8 @@  early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool en
 
 static inline void mem_encrypt_free_decrypted_mem(void) { }
 
+static inline u64 sme_get_me_mask(void) { return 0; }
+
 #define __bss_decrypted
 
 #endif	/* CONFIG_AMD_MEM_ENCRYPT */
@@ -106,11 +114,6 @@  void add_encrypt_protection_map(void);
 
 extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
 
-static inline u64 sme_get_me_mask(void)
-{
-	return sme_me_mask;
-}
-
 #endif	/* __ASSEMBLY__ */
 
 #endif	/* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 5db24d0fc557..df3a89cbca46 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -560,9 +560,9 @@  static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
 		leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
 
 		/* Skip post-processing for out-of-range zero leafs. */
-		if (!(leaf->fn <= cpuid_std_range_max ||
-		      (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
-		      (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
+		if (!(leaf->fn <= RIP_RELATIVE_REF(cpuid_std_range_max) ||
+		      (leaf->fn >= 0x40000000 && leaf->fn <= RIP_RELATIVE_REF(cpuid_hyp_range_max)) ||
+		      (leaf->fn >= 0x80000000 && leaf->fn <= RIP_RELATIVE_REF(cpuid_ext_range_max))))
 			return 0;
 	}
 
@@ -1072,11 +1072,11 @@  static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
 		const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
 
 		if (fn->eax_in == 0x0)
-			cpuid_std_range_max = fn->eax;
+			RIP_RELATIVE_REF(cpuid_std_range_max) = fn->eax;
 		else if (fn->eax_in == 0x40000000)
-			cpuid_hyp_range_max = fn->eax;
+			RIP_RELATIVE_REF(cpuid_hyp_range_max) = fn->eax;
 		else if (fn->eax_in == 0x80000000)
-			cpuid_ext_range_max = fn->eax;
+			RIP_RELATIVE_REF(cpuid_ext_range_max) = fn->eax;
 	}
 }
 
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 002af6c30601..1cf348e19556 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -748,7 +748,7 @@  void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
 	 * This eliminates worries about jump tables or checking boot_cpu_data
 	 * in the cc_platform_has() function.
 	 */
-	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+	if (!(RIP_RELATIVE_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
 		return;
 
 	 /*
@@ -767,7 +767,7 @@  void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
 	 * This eliminates worries about jump tables or checking boot_cpu_data
 	 * in the cc_platform_has() function.
 	 */
-	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+	if (!(RIP_RELATIVE_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
 		return;
 
 	 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index efe9f217fcf9..d8d14133b654 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -304,7 +304,8 @@  void __init sme_encrypt_kernel(struct boot_params *bp)
 	 * instrumentation or checking boot_cpu_data in the cc_platform_has()
 	 * function.
 	 */
-	if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
+	if (!sme_get_me_mask() ||
+	    RIP_RELATIVE_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
 		return;
 
 	/*
@@ -541,11 +542,11 @@  void __init sme_enable(struct boot_params *bp)
 	me_mask = 1UL << (ebx & 0x3f);
 
 	/* Check the SEV MSR whether SEV or SME is enabled */
-	sev_status   = __rdmsr(MSR_AMD64_SEV);
-	feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+	RIP_RELATIVE_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
+	feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
 
 	/* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
-	if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+	if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
 		snp_abort();
 
 	/* Check if memory encryption is enabled */
@@ -571,7 +572,6 @@  void __init sme_enable(struct boot_params *bp)
 			return;
 	} else {
 		/* SEV state cannot be controlled by a command line option */
-		sme_me_mask = me_mask;
 		goto out;
 	}
 
@@ -590,16 +590,13 @@  void __init sme_enable(struct boot_params *bp)
 	cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
 				     ((u64)bp->ext_cmd_line_ptr << 32));
 
-	if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
-		goto out;
-
-	if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
-		sme_me_mask = me_mask;
+	if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0 ||
+	    strncmp(buffer, cmdline_on, sizeof(buffer)))
+		return;
 
 out:
-	if (sme_me_mask) {
-		physical_mask &= ~sme_me_mask;
-		cc_vendor = CC_VENDOR_AMD;
-		cc_set_mask(sme_me_mask);
-	}
+	RIP_RELATIVE_REF(sme_me_mask) = me_mask;
+	physical_mask &= ~me_mask;
+	cc_vendor = CC_VENDOR_AMD;
+	cc_set_mask(me_mask);
 }