@@ -4,3 +4,4 @@ generated-y += syscall_table.h
generic-y += export.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
+generic-y += asi.h
@@ -4,3 +4,4 @@ generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += user.h
+generic-y += asi.h
@@ -6,3 +6,4 @@ generic-y += parport.h
generated-y += mach-types.h
generated-y += unistd-nr.h
+generic-y += asi.h
@@ -4,5 +4,6 @@ generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += user.h
+generic-y += asi.h
generated-y += cpucaps.h
@@ -6,3 +6,4 @@ generic-y += kvm_para.h
generic-y += qrwlock.h
generic-y += user.h
generic-y += vmlinux.lds.h
+generic-y += asi.h
@@ -6,3 +6,4 @@ generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += spinlock.h
+generic-y += asi.h
@@ -3,3 +3,4 @@ generic-y += extable.h
generic-y += iomap.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
+generic-y += asi.h
@@ -3,3 +3,4 @@ generated-y += syscall_table.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += vtime.h
+generic-y += asi.h
@@ -4,3 +4,4 @@ generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += spinlock.h
+generic-y += asi.h
@@ -8,3 +8,4 @@ generic-y += parport.h
generic-y += syscalls.h
generic-y += tlb.h
generic-y += user.h
+generic-y += asi.h
@@ -14,3 +14,4 @@ generic-y += parport.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += user.h
+generic-y += asi.h
@@ -6,3 +6,4 @@ generic-y += gpio.h
generic-y += kvm_para.h
generic-y += parport.h
generic-y += user.h
+generic-y += asi.h
@@ -5,3 +5,4 @@ generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += spinlock.h
generic-y += user.h
+generic-y += asi.h
@@ -7,3 +7,4 @@ generic-y += qspinlock.h
generic-y += qrwlock_types.h
generic-y += qrwlock.h
generic-y += user.h
+generic-y += asi.h
@@ -4,3 +4,4 @@ generated-y += syscall_table_64.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += user.h
+generic-y += asi.h
@@ -8,3 +8,4 @@ generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += vtime.h
generic-y += early_ioremap.h
+generic-y += asi.h
@@ -5,3 +5,4 @@ generic-y += flat.h
generic-y += kvm_para.h
generic-y += user.h
generic-y += vmlinux.lds.h
+generic-y += asi.h
@@ -8,3 +8,4 @@ generic-y += asm-offsets.h
generic-y += export.h
generic-y += kvm_types.h
generic-y += mcs_spinlock.h
+generic-y += asi.h
@@ -3,3 +3,4 @@ generated-y += syscall_table.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += parport.h
+generic-y += asi.h
@@ -4,3 +4,4 @@ generated-y += syscall_table_64.h
generic-y += export.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
+generic-y += asi.h
@@ -27,3 +27,4 @@ generic-y += word-at-a-time.h
generic-y += kprobes.h
generic-y += mm_hooks.h
generic-y += vga.h
+generic-y += asi.h
new file mode 100644
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_ASI_H
+#define _ASM_X86_ASI_H
+
+#include <asm-generic/asi.h>
+
+#include <asm/pgtable_types.h>
+#include <asm/percpu.h>
+
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+
+#define ASI_MAX_NUM_ORDER 2
+#define ASI_MAX_NUM (1 << ASI_MAX_NUM_ORDER)
+
+struct asi_state {
+ struct asi *curr_asi;
+ struct asi *target_asi;
+};
+
+struct asi_hooks {
+ /* Both of these functions MUST be idempotent and re-entrant. */
+
+ void (*post_asi_enter)(void);
+ void (*pre_asi_exit)(void);
+};
+
+struct asi_class {
+ struct asi_hooks ops;
+ uint flags;
+ const char *name;
+};
+
+struct asi {
+ pgd_t *pgd;
+ struct asi_class *class;
+ struct mm_struct *mm;
+};
+
+DECLARE_PER_CPU_ALIGNED(struct asi_state, asi_cpu_state);
+
+void asi_init_mm_state(struct mm_struct *mm);
+
+int asi_register_class(const char *name, uint flags,
+ const struct asi_hooks *ops);
+void asi_unregister_class(int index);
+
+int asi_init(struct mm_struct *mm, int asi_index);
+void asi_destroy(struct asi *asi);
+
+void asi_enter(struct asi *asi);
+void asi_exit(void);
+
+static inline void asi_set_target_unrestricted(void)
+{
+ barrier();
+ this_cpu_write(asi_cpu_state.target_asi, NULL);
+}
+
+static inline struct asi *asi_get_current(void)
+{
+ return this_cpu_read(asi_cpu_state.curr_asi);
+}
+
+static inline struct asi *asi_get_target(void)
+{
+ return this_cpu_read(asi_cpu_state.target_asi);
+}
+
+static inline bool is_asi_active(void)
+{
+ return (bool)asi_get_current();
+}
+
+static inline bool asi_is_target_unrestricted(void)
+{
+ return !asi_get_target();
+}
+
+#endif /* CONFIG_ADDRESS_SPACE_ISOLATION */
+
+#endif
@@ -259,6 +259,8 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+unsigned long build_cr3(pgd_t *pgd, u16 asid);
+
#endif /* !MODULE */
#endif /* _ASM_X86_TLBFLUSH_H */
@@ -51,6 +51,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o
+obj-$(CONFIG_ADDRESS_SPACE_ISOLATION) += asi.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o
new file mode 100644
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm/asi.h>
+#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "ASI: " fmt
+
+static struct asi_class asi_class[ASI_MAX_NUM];
+static DEFINE_SPINLOCK(asi_class_lock);
+
+DEFINE_PER_CPU_ALIGNED(struct asi_state, asi_cpu_state);
+EXPORT_PER_CPU_SYMBOL_GPL(asi_cpu_state);
+
+int asi_register_class(const char *name, uint flags,
+ const struct asi_hooks *ops)
+{
+ int i;
+
+ VM_BUG_ON(name == NULL);
+
+ spin_lock(&asi_class_lock);
+
+ for (i = 1; i < ASI_MAX_NUM; i++) {
+ if (asi_class[i].name == NULL) {
+ asi_class[i].name = name;
+ asi_class[i].flags = flags;
+ if (ops != NULL)
+ asi_class[i].ops = *ops;
+ break;
+ }
+ }
+
+ spin_unlock(&asi_class_lock);
+
+ if (i == ASI_MAX_NUM)
+ i = -ENOSPC;
+
+ return i;
+}
+EXPORT_SYMBOL_GPL(asi_register_class);
+
+void asi_unregister_class(int index)
+{
+ spin_lock(&asi_class_lock);
+
+ WARN_ON(asi_class[index].name == NULL);
+ memset(&asi_class[index], 0, sizeof(struct asi_class));
+
+ spin_unlock(&asi_class_lock);
+}
+EXPORT_SYMBOL_GPL(asi_unregister_class);
+
+int asi_init(struct mm_struct *mm, int asi_index)
+{
+ struct asi *asi = &mm->asi[asi_index];
+
+ /* Index 0 is reserved for special purposes. */
+ WARN_ON(asi_index == 0 || asi_index >= ASI_MAX_NUM);
+ WARN_ON(asi->pgd != NULL);
+
+ /*
+ * For now, we allocate 2 pages to avoid any potential problems with
+ * KPTI code. This won't be needed once KPTI is folded into the ASI
+ * framework.
+ */
+ asi->pgd = (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
+ PGD_ALLOCATION_ORDER);
+ if (!asi->pgd)
+ return -ENOMEM;
+
+ asi->class = &asi_class[asi_index];
+ asi->mm = mm;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asi_init);
+
+void asi_destroy(struct asi *asi)
+{
+ free_pages((ulong)asi->pgd, PGD_ALLOCATION_ORDER);
+ memset(asi, 0, sizeof(struct asi));
+}
+EXPORT_SYMBOL_GPL(asi_destroy);
+
+static void __asi_enter(void)
+{
+ u64 asi_cr3;
+ struct asi *target = this_cpu_read(asi_cpu_state.target_asi);
+
+ VM_BUG_ON(preemptible());
+
+ if (!target || target == this_cpu_read(asi_cpu_state.curr_asi))
+ return;
+
+ VM_BUG_ON(this_cpu_read(cpu_tlbstate.loaded_mm) ==
+ LOADED_MM_SWITCHING);
+
+ this_cpu_write(asi_cpu_state.curr_asi, target);
+
+ asi_cr3 = build_cr3(target->pgd,
+ this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+ write_cr3(asi_cr3);
+
+ if (target->class->ops.post_asi_enter)
+ target->class->ops.post_asi_enter();
+}
+
+void asi_enter(struct asi *asi)
+{
+ VM_WARN_ON_ONCE(!asi);
+
+ this_cpu_write(asi_cpu_state.target_asi, asi);
+ barrier();
+
+ __asi_enter();
+}
+EXPORT_SYMBOL_GPL(asi_enter);
+
+void asi_exit(void)
+{
+ u64 unrestricted_cr3;
+ struct asi *asi;
+
+ preempt_disable();
+
+ VM_BUG_ON(this_cpu_read(cpu_tlbstate.loaded_mm) ==
+ LOADED_MM_SWITCHING);
+
+ asi = this_cpu_read(asi_cpu_state.curr_asi);
+
+ if (asi) {
+ if (asi->class->ops.pre_asi_exit)
+ asi->class->ops.pre_asi_exit();
+
+ unrestricted_cr3 =
+ build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
+ this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+
+ write_cr3(unrestricted_cr3);
+ this_cpu_write(asi_cpu_state.curr_asi, NULL);
+ }
+
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(asi_exit);
+
+void asi_init_mm_state(struct mm_struct *mm)
+{
+ memset(mm->asi, 0, sizeof(mm->asi));
+}
@@ -238,8 +238,9 @@ static void __init probe_page_size_mask(void)
/* By the default is everything supported: */
__default_kernel_pte_mask = __supported_pte_mask;
- /* Except when with PTI where the kernel is mostly non-Global: */
- if (cpu_feature_enabled(X86_FEATURE_PTI))
+ /* Except when with PTI or ASI where the kernel is mostly non-Global: */
+ if (cpu_feature_enabled(X86_FEATURE_PTI) ||
+ IS_ENABLED(CONFIG_ADDRESS_SPACE_ISOLATION))
__default_kernel_pte_mask &= ~_PAGE_GLOBAL;
/* Enable 1 GB linear kernel mappings if available: */
@@ -153,7 +153,7 @@ static inline u16 user_pcid(u16 asid)
return ret;
}
-static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
+inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
{
if (static_cpu_has(X86_FEATURE_PCID)) {
return __sme_pa(pgd) | kern_pcid(asid);
@@ -7,3 +7,4 @@ generic-y += param.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += user.h
+generic-y += asi.h
new file mode 100644
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_ASI_H
+#define __ASM_GENERIC_ASI_H
+
+/* ASI class flags */
+#define ASI_MAP_STANDARD_NONSENSITIVE 1
+
+#ifndef CONFIG_ADDRESS_SPACE_ISOLATION
+
+#define ASI_MAX_NUM_ORDER 0
+#define ASI_MAX_NUM 0
+
+#ifndef _ASSEMBLY_
+
+struct asi_hooks {};
+struct asi {};
+
+static inline
+int asi_register_class(const char *name, uint flags,
+ const struct asi_hooks *ops)
+{
+ return 0;
+}
+
+static inline void asi_unregister_class(int asi_index) { }
+
+static inline void asi_init_mm_state(struct mm_struct *mm) { }
+
+static inline int asi_init(struct mm_struct *mm, int asi_index) { return 0; }
+
+static inline void asi_destroy(struct asi *asi) { }
+
+static inline void asi_enter(struct asi *asi) { }
+
+static inline void asi_set_target_unrestricted(void) { }
+
+static inline bool asi_is_target_unrestricted(void) { return true; }
+
+static inline void asi_exit(void) { }
+
+static inline bool is_asi_active(void) { return false; }
+
+static inline struct asi *asi_get_target(void) { return NULL; }
+
+static inline struct asi *asi_get_current(void) { return NULL; }
+
+#endif /* !_ASSEMBLY_ */
+
+#endif /* !CONFIG_ADDRESS_SPACE_ISOLATION */
+
+#endif
@@ -18,6 +18,7 @@
#include <linux/seqlock.h>
#include <asm/mmu.h>
+#include <asm/asi.h>
#ifndef AT_VECTOR_SIZE_ARCH
#define AT_VECTOR_SIZE_ARCH 0
@@ -495,6 +496,8 @@ struct mm_struct {
atomic_t membarrier_state;
#endif
+ struct asi asi[ASI_MAX_NUM];
+
/**
* @mm_users: The number of users including userspace.
*
@@ -102,6 +102,7 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
+#include <asm/asi.h>
#include <trace/events/sched.h>
@@ -1071,6 +1072,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->def_flags = 0;
}
+ asi_init_mm_state(mm);
+
if (mm_alloc_pgd(mm))
goto fail_nopgd;
@@ -65,6 +65,16 @@ config PAGE_TABLE_ISOLATION
See Documentation/x86/pti.rst for more details.
+config ADDRESS_SPACE_ISOLATION
+ bool "Allow code to run with a reduced kernel address space"
+ default n
+ depends on X86_64 && !UML
+ depends on !PARAVIRT
+ help
+ This feature provides the ability to run some kernel code
+ with a reduced kernel address space. This can be used to
+ mitigate some speculative execution attacks.
+
config SECURITY_INFINIBAND
bool "Infiniband Security Hooks"
depends on SECURITY && INFINIBAND
Introduce core API for Address Space Isolation (ASI). Kernel address space isolation provides the ability to run some kernel code with a reduced kernel address space. There can be multiple classes of such restricted kernel address spaces (e.g. KPTI, KVM-PTI etc.). Each ASI class is identified by an index. The ASI class can register some hooks to be called when entering/exiting the restricted address space. Currently, there is a fixed maximum number of ASI classes supported. In addition, each process can have at most one restricted address space from each ASI class. Neither of these are inherent limitations and are merely simplifying assumptions for the time being. (The Kconfig and the high-level ASI API are derived from the original ASI RFC by Alexandre Chartre). Originally-by: Alexandre Chartre <alexandre.chartre@oracle.com> Signed-off-by: Junaid Shahid <junaids@google.com> --- arch/alpha/include/asm/Kbuild | 1 + arch/arc/include/asm/Kbuild | 1 + arch/arm/include/asm/Kbuild | 1 + arch/arm64/include/asm/Kbuild | 1 + arch/csky/include/asm/Kbuild | 1 + arch/h8300/include/asm/Kbuild | 1 + arch/hexagon/include/asm/Kbuild | 1 + arch/ia64/include/asm/Kbuild | 1 + arch/m68k/include/asm/Kbuild | 1 + arch/microblaze/include/asm/Kbuild | 1 + arch/mips/include/asm/Kbuild | 1 + arch/nds32/include/asm/Kbuild | 1 + arch/nios2/include/asm/Kbuild | 1 + arch/openrisc/include/asm/Kbuild | 1 + arch/parisc/include/asm/Kbuild | 1 + arch/powerpc/include/asm/Kbuild | 1 + arch/riscv/include/asm/Kbuild | 1 + arch/s390/include/asm/Kbuild | 1 + arch/sh/include/asm/Kbuild | 1 + arch/sparc/include/asm/Kbuild | 1 + arch/um/include/asm/Kbuild | 1 + arch/x86/include/asm/asi.h | 81 +++++++++++++++ arch/x86/include/asm/tlbflush.h | 2 + arch/x86/mm/Makefile | 1 + arch/x86/mm/asi.c | 152 +++++++++++++++++++++++++++++ arch/x86/mm/init.c | 5 +- arch/x86/mm/tlb.c | 2 +- arch/xtensa/include/asm/Kbuild | 1 + include/asm-generic/asi.h | 51 ++++++++++ include/linux/mm_types.h | 3 + kernel/fork.c | 3 + security/Kconfig | 10 ++ 32 files changed, 329 insertions(+), 3 deletions(-) create mode 100644 arch/x86/include/asm/asi.h create mode 100644 arch/x86/mm/asi.c create mode 100644 include/asm-generic/asi.h