@@ -2533,6 +2533,15 @@ config MITIGATION_ADDRESS_SPACE_ISOLATION
there are likely to be unhandled cases, in particular concerning TLB
flushes.
+
+config ADDRESS_SPACE_ISOLATION_DEFAULT_ON
+ bool "Enable address space isolation by default"
+ default n
+ depends on MITIGATION_ADDRESS_SPACE_ISOLATION
+ help
+ If selected, ASI is enabled by default at boot if the asi=on or
+ asi=off are not specified.
+
config MITIGATION_RETPOLINE
bool "Avoid speculative indirect branches in kernel"
select OBJTOOL if HAVE_OBJTOOL
@@ -8,6 +8,7 @@
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
+#include <asm/cpufeature.h>
#include <asm/processor.h>
#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
@@ -66,6 +67,8 @@
* the N ASI classes.
*/
+#define static_asi_enabled() cpu_feature_enabled(X86_FEATURE_ASI)
+
/*
* ASI uses a per-CPU tainting model to track what mitigation actions are
* required on domain transitions. Taints exist along two dimensions:
@@ -131,6 +134,8 @@ struct asi {
DECLARE_PER_CPU_ALIGNED(struct asi *, curr_asi);
+void asi_check_boottime_disable(void);
+
void asi_init_mm_state(struct mm_struct *mm);
int asi_init_class(enum asi_class_id class_id, struct asi_taint_policy *taint_policy);
@@ -155,7 +160,9 @@ void asi_exit(void);
/* The target is the domain we'll enter when returning to process context. */
static __always_inline struct asi *asi_get_target(struct task_struct *p)
{
- return p->thread.asi_state.target;
+ return static_asi_enabled()
+ ? p->thread.asi_state.target
+ : NULL;
}
static __always_inline void asi_set_target(struct task_struct *p,
@@ -166,7 +173,9 @@ static __always_inline void asi_set_target(struct task_struct *p,
static __always_inline struct asi *asi_get_current(void)
{
- return this_cpu_read(curr_asi);
+ return static_asi_enabled()
+ ? this_cpu_read(curr_asi)
+ : NULL;
}
/* Are we currently in a restricted address space? */
@@ -175,7 +184,11 @@ static __always_inline bool asi_is_restricted(void)
return (bool)asi_get_current();
}
-/* If we exit/have exited, can we stay that way until the next asi_enter? */
+/*
+ * If we exit/have exited, can we stay that way until the next asi_enter?
+ *
+ * When ASI is disabled, this returns true.
+ */
static __always_inline bool asi_is_relaxed(void)
{
return !asi_get_target(current);
@@ -474,6 +474,7 @@
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */
+#define X86_FEATURE_ASI (21*32+6) /* Kernel Address Space Isolation */
/*
* BUG word(s)
@@ -50,6 +50,12 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
+#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
+# define DISABLE_ASI 0
+#else
+# define DISABLE_ASI (1 << (X86_FEATURE_ASI & 31))
+#endif
+
#ifdef CONFIG_MITIGATION_RETPOLINE
# define DISABLE_RETPOLINE 0
#else
@@ -154,7 +160,7 @@
#define DISABLED_MASK17 0
#define DISABLED_MASK18 (DISABLE_IBT)
#define DISABLED_MASK19 (DISABLE_SEV_SNP)
-#define DISABLED_MASK20 0
+#define DISABLED_MASK20 (DISABLE_ASI)
#define DISABLED_MASK21 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
@@ -4,6 +4,7 @@
#include <linux/percpu.h>
#include <linux/spinlock.h>
+#include <linux/init.h>
#include <asm/asi.h>
#include <asm/cmdline.h>
#include <asm/cpufeature.h>
@@ -29,6 +30,9 @@ static inline bool asi_class_id_valid(enum asi_class_id class_id)
static inline bool asi_class_initialized(enum asi_class_id class_id)
{
+ if (!boot_cpu_has(X86_FEATURE_ASI))
+ return 0;
+
if (WARN_ON(!asi_class_id_valid(class_id)))
return false;
@@ -51,6 +55,9 @@ EXPORT_SYMBOL_GPL(asi_init_class);
void asi_uninit_class(enum asi_class_id class_id)
{
+ if (!boot_cpu_has(X86_FEATURE_ASI))
+ return;
+
if (!asi_class_initialized(class_id))
return;
@@ -66,10 +73,36 @@ const char *asi_class_name(enum asi_class_id class_id)
return asi_class_names[class_id];
}
+void __init asi_check_boottime_disable(void)
+{
+ bool enabled = IS_ENABLED(CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION_DEFAULT_ON);
+ char arg[4];
+ int ret;
+
+ ret = cmdline_find_option(boot_command_line, "asi", arg, sizeof(arg));
+ if (ret == 3 && !strncmp(arg, "off", 3)) {
+ enabled = false;
+ pr_info("ASI disabled through kernel command line.\n");
+ } else if (ret == 2 && !strncmp(arg, "on", 2)) {
+ enabled = true;
+ pr_info("Ignoring asi=on param while ASI implementation is incomplete.\n");
+ } else {
+ pr_info("ASI %s by default.\n",
+ enabled ? "enabled" : "disabled");
+ }
+
+ if (enabled)
+ pr_info("ASI enablement ignored due to incomplete implementation.\n");
+}
+
static void __asi_destroy(struct asi *asi)
{
- lockdep_assert_held(&asi->mm->asi_init_lock);
+ WARN_ON_ONCE(asi->ref_count <= 0);
+ if (--(asi->ref_count) > 0)
+ return;
+ free_pages((ulong)asi->pgd, PGD_ALLOCATION_ORDER);
+ memset(asi, 0, sizeof(struct asi));
}
int asi_init(struct mm_struct *mm, enum asi_class_id class_id, struct asi **out_asi)
@@ -79,6 +112,9 @@ int asi_init(struct mm_struct *mm, enum asi_class_id class_id, struct asi **out_
*out_asi = NULL;
+ if (!boot_cpu_has(X86_FEATURE_ASI))
+ return 0;
+
if (WARN_ON(!asi_class_initialized(class_id)))
return -EINVAL;
@@ -122,7 +158,7 @@ void asi_destroy(struct asi *asi)
{
struct mm_struct *mm;
- if (!asi)
+ if (!boot_cpu_has(X86_FEATURE_ASI) || !asi)
return;
if (WARN_ON(!asi_class_initialized(asi->class_id)))
@@ -134,11 +170,7 @@ void asi_destroy(struct asi *asi)
* to block concurrent asi_init calls.
*/
mutex_lock(&mm->asi_init_lock);
- WARN_ON_ONCE(asi->ref_count <= 0);
- if (--(asi->ref_count) == 0) {
- free_pages((ulong)asi->pgd, PGD_ALLOCATION_ORDER);
- memset(asi, 0, sizeof(struct asi));
- }
+ __asi_destroy(asi);
mutex_unlock(&mm->asi_init_lock);
}
EXPORT_SYMBOL_GPL(asi_destroy);
@@ -255,6 +287,9 @@ static noinstr void __asi_enter(void)
noinstr void asi_enter(struct asi *asi)
{
+ if (!static_asi_enabled())
+ return;
+
VM_WARN_ON_ONCE(!asi);
/* Should not have an asi_enter() without a prior asi_relax(). */
@@ -269,8 +304,10 @@ EXPORT_SYMBOL_GPL(asi_enter);
noinstr void asi_relax(void)
{
- barrier();
- asi_set_target(current, NULL);
+ if (static_asi_enabled()) {
+ barrier();
+ asi_set_target(current, NULL);
+ }
}
EXPORT_SYMBOL_GPL(asi_relax);
@@ -279,6 +316,9 @@ noinstr void asi_exit(void)
u64 unrestricted_cr3;
struct asi *asi;
+ if (!static_asi_enabled())
+ return;
+
preempt_disable_notrace();
VM_BUG_ON(this_cpu_read(cpu_tlbstate.loaded_mm) ==
@@ -310,6 +350,9 @@ EXPORT_SYMBOL_GPL(asi_exit);
void asi_init_mm_state(struct mm_struct *mm)
{
+ if (!boot_cpu_has(X86_FEATURE_ASI))
+ return;
+
memset(mm->asi, 0, sizeof(mm->asi));
mutex_init(&mm->asi_init_lock);
}
@@ -28,6 +28,7 @@
#include <asm/text-patching.h>
#include <asm/memtype.h>
#include <asm/paravirt.h>
+#include <asm/asi.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
@@ -251,7 +252,7 @@ static void __init probe_page_size_mask(void)
__default_kernel_pte_mask = __supported_pte_mask;
/* Except when with PTI where the kernel is mostly non-Global: */
if (cpu_feature_enabled(X86_FEATURE_PTI) ||
- IS_ENABLED(CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION))
+ cpu_feature_enabled(X86_FEATURE_ASI))
__default_kernel_pte_mask &= ~_PAGE_GLOBAL;
/* Enable 1 GB linear kernel mappings if available: */
@@ -754,6 +755,7 @@ void __init init_mem_mapping(void)
unsigned long end;
pti_check_boottime_disable();
+ asi_check_boottime_disable();
probe_page_size_mask();
setup_pcid();
@@ -65,6 +65,10 @@ static inline pgd_t *asi_pgd(struct asi *asi) { return NULL; }
static inline void asi_handle_switch_mm(void) { }
+#define static_asi_enabled() false
+
+static inline void asi_check_boottime_disable(void) { }
+
#endif /* !CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION */
#endif /* !_ASSEMBLY_ */