diff mbox series

[v9a,10/14] x86/cpu/keylocker: Check Gather Data Sampling mitigation

Message ID 20240407230432.912290-1-chang.seok.bae@intel.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series None | expand

Commit Message

Chang S. Bae April 7, 2024, 11:04 p.m. UTC
Gather Data Sampling is a transient execution side channel issue in some
CPU models. The stale data in registers is not guaranteed as secure when
this vulnerability is not addressed.

In the Key Locker usage during AES transformations, the temporary storage
of the original key in registers poses a risk. The key material can be
staled in some implementations, leading to susceptibility to leakage of
the AES key.

To mitigate this vulnerability, a qualified microcode image must be
applied. Add code to ensure that the mitigation is installed and securely
locked. Disable the feature, otherwise.

Expand gds_ucode_mitigated() to examine the lock state.

Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
---
Changes from v9:
* Removed MSR reads and utilized the helper function. (Pawan Gupta)

Alternatively, 'gds_mitigation' can be exported and referenced directly.
Using 'gds_mitigation == GDS_MITIGATION_FULL_LOCKED' may also be
readable. However, it was opted to expand gds_ucode_mitigated() for
consistency, as it is already established.

Note that this approach aligns with Intel's guidance, as the bugs.c code
checks the following MSR bits:
  "Intel recommends that system software does not enable Key Locker (by
   setting CR4.KL) unless the GDS mitigation is enabled
   (IA32_MCU_OPT_CTRL[GDS_MITG_DIS] (bit 4) is 0) and locked
   (IA32_MCU_OPT_CTRL [GDS_MITG_LOCK](bit 5) is 1)."

For more information, refer to Intel's technical documentation on Gather
Data Sampling:
  https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/gather-data-sampling.html
---
 arch/x86/include/asm/processor.h |  7 ++++++-
 arch/x86/kernel/cpu/bugs.c       |  5 ++++-
 arch/x86/kernel/keylocker.c      | 12 ++++++++++++
 arch/x86/kvm/x86.c               |  2 +-
 4 files changed, 23 insertions(+), 3 deletions(-)

Comments

Pawan Gupta April 19, 2024, 12:01 a.m. UTC | #1
On Sun, Apr 07, 2024 at 04:04:32PM -0700, Chang S. Bae wrote:
> Gather Data Sampling is a transient execution side channel issue in some
> CPU models. The stale data in registers is not guaranteed as secure when
> this vulnerability is not addressed.
> 
> In the Key Locker usage during AES transformations, the temporary storage
> of the original key in registers poses a risk. The key material can be
> staled in some implementations, leading to susceptibility to leakage of
> the AES key.
> 
> To mitigate this vulnerability, a qualified microcode image must be
> applied. Add code to ensure that the mitigation is installed and securely
> locked. Disable the feature, otherwise.
> 
> Expand gds_ucode_mitigated() to examine the lock state.
> 
> Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
> Cc: Dave Hansen <dave.hansen@intel.com>
> Cc: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
> ---
> Changes from v9:
> * Removed MSR reads and utilized the helper function. (Pawan Gupta)
> 
> Alternatively, 'gds_mitigation' can be exported and referenced directly.
> Using 'gds_mitigation == GDS_MITIGATION_FULL_LOCKED' may also be
> readable. However, it was opted to expand gds_ucode_mitigated() for
> consistency, as it is already established.
> 
> Note that this approach aligns with Intel's guidance, as the bugs.c code
> checks the following MSR bits:
>   "Intel recommends that system software does not enable Key Locker (by
>    setting CR4.KL) unless the GDS mitigation is enabled
>    (IA32_MCU_OPT_CTRL[GDS_MITG_DIS] (bit 4) is 0) and locked
>    (IA32_MCU_OPT_CTRL [GDS_MITG_LOCK](bit 5) is 1)."
> 
> For more information, refer to Intel's technical documentation on Gather
> Data Sampling:
>   https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/gather-data-sampling.html
> ---
>  arch/x86/include/asm/processor.h |  7 ++++++-
>  arch/x86/kernel/cpu/bugs.c       |  5 ++++-
>  arch/x86/kernel/keylocker.c      | 12 ++++++++++++
>  arch/x86/kvm/x86.c               |  2 +-
>  4 files changed, 23 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
> index 811548f131f4..74eaa3a2b85b 100644
> --- a/arch/x86/include/asm/processor.h
> +++ b/arch/x86/include/asm/processor.h
> @@ -721,7 +721,12 @@ enum mds_mitigations {
>  	MDS_MITIGATION_VMWERV,
>  };
>  
> -extern bool gds_ucode_mitigated(void);
> +enum mitigation_info {
> +	MITG_FULL,
> +	MITG_LOCKED,
> +};
> +
> +extern bool gds_ucode_mitigated(enum mitigation_info mitg);
>  
>  /*
>   * Make previous memory operations globally visible before
> diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
> index e7ba936d798b..80f6e70619cb 100644
> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -752,8 +752,11 @@ static const char * const gds_strings[] = {
>  	[GDS_MITIGATION_HYPERVISOR]	= "Unknown: Dependent on hypervisor status",
>  };
>  
> -bool gds_ucode_mitigated(void)
> +bool gds_ucode_mitigated(enum mitigation_info mitg)
>  {
> +	if (mitg == MITG_LOCKED)
> +		return gds_mitigation == GDS_MITIGATION_FULL_LOCKED;
> +
>  	return (gds_mitigation == GDS_MITIGATION_FULL ||
>  		gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
>  }
> diff --git a/arch/x86/kernel/keylocker.c b/arch/x86/kernel/keylocker.c
> index 1e81d0704eea..23cf4a235f11 100644
> --- a/arch/x86/kernel/keylocker.c
> +++ b/arch/x86/kernel/keylocker.c
> @@ -113,6 +113,15 @@ void restore_keylocker(void)
>  	valid_wrapping_key = false;
>  }
>  
> +/* Check if Key Locker is secure enough to be used. */
> +static bool __init secure_keylocker(void)
> +{
> +	if (boot_cpu_has_bug(X86_BUG_GDS) && !gds_ucode_mitigated(MITG_LOCKED))
> +		return false;
> +
> +	return true;
> +}
> +
>  static int __init init_keylocker(void)
>  {
>  	u32 eax, ebx, ecx, edx;
> @@ -126,6 +135,9 @@ static int __init init_keylocker(void)
>  		goto clear_cap;
>  	}
>  
> +	if (!secure_keylocker())
> +		goto clear_cap;
> +
>  	cr4_set_bits(X86_CR4_KEYLOCKER);
>  
>  	/* AESKLE depends on CR4.KEYLOCKER */
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 47d9f03b7778..4ab50e95fdb5 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1675,7 +1675,7 @@ static u64 kvm_get_arch_capabilities(void)
>  		 */
>  	}
>  
> -	if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated())
> +	if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated(MITG_FULL))
>  		data |= ARCH_CAP_GDS_NO;
>  
>  	return data;

Repurposing gds_ucode_mitigated() to check for the locked state is
adding a bit of a churn. We can introduce gds_mitigation_locked()
instead.

Is below looking okay:

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 811548f131f4..8ba96e8a8754 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -722,6 +722,7 @@ enum mds_mitigations {
 };
 
 extern bool gds_ucode_mitigated(void);
+extern bool gds_mitigation_locked(void);
 
 /*
  * Make previous memory operations globally visible before
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ca295b0c1eee..a7ec26988ddb 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -753,6 +753,11 @@ bool gds_ucode_mitigated(void)
 }
 EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
 
+bool gds_mitigation_locked(void)
+{
+	return gds_mitigation == GDS_MITIGATION_FULL_LOCKED;
+}
+
 void update_gds_msr(void)
 {
 	u64 mcu_ctrl_after;
diff --git a/arch/x86/kernel/keylocker.c b/arch/x86/kernel/keylocker.c
index 1b57e11d93ad..c40e72f482b1 100644
--- a/arch/x86/kernel/keylocker.c
+++ b/arch/x86/kernel/keylocker.c
@@ -112,6 +112,15 @@ void restore_keylocker(void)
 	valid_wrapping_key = false;
 }
 
+/* Check if Key Locker is secure enough to be used. */
+static bool __init secure_keylocker(void)
+{
+	if (boot_cpu_has_bug(X86_BUG_GDS) && !gds_mitigation_locked())
+		return false;
+
+	return true;
+}
+
 static int __init init_keylocker(void)
 {
 	u32 eax, ebx, ecx, edx;
@@ -125,6 +134,9 @@ static int __init init_keylocker(void)
 		goto clear_cap;
 	}
 
+	if (!secure_keylocker())
+		goto clear_cap;
+
 	cr4_set_bits(X86_CR4_KEYLOCKER);
 
 	/* AESKLE depends on CR4.KEYLOCKER */
Chang S. Bae April 22, 2024, 7:49 a.m. UTC | #2
On 4/18/2024 5:01 PM, Pawan Gupta wrote:
> 
> Repurposing gds_ucode_mitigated() to check for the locked state is
> adding a bit of a churn. We can introduce gds_mitigation_locked()
> instead.

I thought this option but I was less convinced about adding a new 
function for every new but slightly different check.

Thanks,
Chang
diff mbox series

Patch

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 811548f131f4..74eaa3a2b85b 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -721,7 +721,12 @@  enum mds_mitigations {
 	MDS_MITIGATION_VMWERV,
 };
 
-extern bool gds_ucode_mitigated(void);
+enum mitigation_info {
+	MITG_FULL,
+	MITG_LOCKED,
+};
+
+extern bool gds_ucode_mitigated(enum mitigation_info mitg);
 
 /*
  * Make previous memory operations globally visible before
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index e7ba936d798b..80f6e70619cb 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -752,8 +752,11 @@  static const char * const gds_strings[] = {
 	[GDS_MITIGATION_HYPERVISOR]	= "Unknown: Dependent on hypervisor status",
 };
 
-bool gds_ucode_mitigated(void)
+bool gds_ucode_mitigated(enum mitigation_info mitg)
 {
+	if (mitg == MITG_LOCKED)
+		return gds_mitigation == GDS_MITIGATION_FULL_LOCKED;
+
 	return (gds_mitigation == GDS_MITIGATION_FULL ||
 		gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
 }
diff --git a/arch/x86/kernel/keylocker.c b/arch/x86/kernel/keylocker.c
index 1e81d0704eea..23cf4a235f11 100644
--- a/arch/x86/kernel/keylocker.c
+++ b/arch/x86/kernel/keylocker.c
@@ -113,6 +113,15 @@  void restore_keylocker(void)
 	valid_wrapping_key = false;
 }
 
+/* Check if Key Locker is secure enough to be used. */
+static bool __init secure_keylocker(void)
+{
+	if (boot_cpu_has_bug(X86_BUG_GDS) && !gds_ucode_mitigated(MITG_LOCKED))
+		return false;
+
+	return true;
+}
+
 static int __init init_keylocker(void)
 {
 	u32 eax, ebx, ecx, edx;
@@ -126,6 +135,9 @@  static int __init init_keylocker(void)
 		goto clear_cap;
 	}
 
+	if (!secure_keylocker())
+		goto clear_cap;
+
 	cr4_set_bits(X86_CR4_KEYLOCKER);
 
 	/* AESKLE depends on CR4.KEYLOCKER */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 47d9f03b7778..4ab50e95fdb5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1675,7 +1675,7 @@  static u64 kvm_get_arch_capabilities(void)
 		 */
 	}
 
-	if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated())
+	if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated(MITG_FULL))
 		data |= ARCH_CAP_GDS_NO;
 
 	return data;