@@ -120,6 +120,12 @@
/* Slab deactivation flag */
#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+#define SLAB_GLOBAL_NONSENSITIVE ((slab_flags_t __force)0x20000000U)
+#else
+#define SLAB_GLOBAL_NONSENSITIVE 0
+#endif
+
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
@@ -329,6 +335,11 @@ enum kmalloc_cache_type {
extern struct kmem_cache *
kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+extern struct kmem_cache *
+nonsensitive_kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
+#endif
+
/*
* Define gfp bits that should not be set for KMALLOC_NORMAL.
*/
@@ -361,6 +372,17 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
return KMALLOC_CGROUP;
}
+static __always_inline struct kmem_cache *get_kmalloc_cache(gfp_t flags,
+ uint index)
+{
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+
+ if (static_asi_enabled() && (flags & __GFP_GLOBAL_NONSENSITIVE))
+ return nonsensitive_kmalloc_caches[kmalloc_type(flags)][index];
+#endif
+ return kmalloc_caches[kmalloc_type(flags)][index];
+}
+
/*
* Figure out which kmalloc slab an allocation of a certain size
* belongs to.
@@ -587,9 +609,8 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
if (!index)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_trace(
- kmalloc_caches[kmalloc_type(flags)][index],
- flags, size);
+ return kmem_cache_alloc_trace(get_kmalloc_cache(flags, index),
+ flags, size);
#endif
}
return __kmalloc(size, flags);
@@ -605,9 +626,8 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
if (!i)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_node_trace(
- kmalloc_caches[kmalloc_type(flags)][i],
- flags, node, size);
+ return kmem_cache_alloc_node_trace(get_kmalloc_cache(flags, i),
+ flags, node, size);
}
#endif
return __kmalloc_node(size, flags, node);
@@ -1956,6 +1956,9 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
size = ALIGN(size, REDZONE_ALIGN);
}
+ if (!static_asi_enabled())
+ flags &= ~SLAB_NONSENSITIVE;
+
/* 3) caller mandated alignment */
if (ralign < cachep->align) {
ralign = cachep->align;
@@ -2058,6 +2061,8 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
cachep->allocflags |= GFP_DMA32;
if (flags & SLAB_RECLAIM_ACCOUNT)
cachep->allocflags |= __GFP_RECLAIMABLE;
+ if (flags & SLAB_GLOBAL_NONSENSITIVE)
+ cachep->allocflags |= __GFP_GLOBAL_NONSENSITIVE;
cachep->size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
@@ -77,6 +77,10 @@ extern struct kmem_cache *kmem_cache;
/* A table of kmalloc cache names and sizes */
extern const struct kmalloc_info_struct {
const char *name[NR_KMALLOC_TYPES];
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+ const char *nonsensitive_name[NR_KMALLOC_TYPES];
+#endif
+ slab_flags_t flags[NR_KMALLOC_TYPES];
unsigned int size;
} kmalloc_info[];
@@ -124,11 +128,14 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
}
#endif
+/* This will also include SLAB_LOCAL_NONSENSITIVE in a later patch. */
+#define SLAB_NONSENSITIVE SLAB_GLOBAL_NONSENSITIVE
/* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
SLAB_CACHE_DMA32 | SLAB_PANIC | \
- SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
+ SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS | \
+ SLAB_NONSENSITIVE)
#if defined(CONFIG_DEBUG_SLAB)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
@@ -491,6 +498,11 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
might_alloc(flags);
+ if (static_asi_enabled()) {
+ VM_BUG_ON(!(s->flags & SLAB_GLOBAL_NONSENSITIVE) &&
+ (flags & __GFP_GLOBAL_NONSENSITIVE));
+ }
+
if (should_failslab(s, flags))
return NULL;
@@ -50,7 +50,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
SLAB_FAILSLAB | kasan_never_merge())
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
- SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
+ SLAB_CACHE_DMA32 | SLAB_ACCOUNT | SLAB_NONSENSITIVE)
/*
* Merge control. If this is set then no merging of slab caches will occur.
@@ -681,6 +681,15 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
EXPORT_SYMBOL(kmalloc_caches);
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+
+struct kmem_cache *
+nonsensitive_kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
+{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
+EXPORT_SYMBOL(nonsensitive_kmalloc_caches);
+
+#endif
+
/*
* Conversion table for small slabs sizes / 8 to the index in the
* kmalloc array. This is necessary for slabs < 192 since we have non power
@@ -738,25 +747,34 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
index = fls(size - 1);
}
- return kmalloc_caches[kmalloc_type(flags)][index];
+ return get_kmalloc_cache(flags, index);
}
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+#define __KMALLOC_NAME(type, base_name, sz) \
+ .name[type] = base_name "-" #sz, \
+ .nonsensitive_name[type] = "ns-" base_name "-" #sz,
+#else
+#define __KMALLOC_NAME(type, base_name, sz) \
+ .name[type] = base_name "-" #sz,
+#endif
+
#ifdef CONFIG_ZONE_DMA
-#define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
+#define KMALLOC_DMA_NAME(sz) __KMALLOC_NAME(KMALLOC_DMA, "dma-kmalloc", sz)
#else
#define KMALLOC_DMA_NAME(sz)
#endif
#ifdef CONFIG_MEMCG_KMEM
-#define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
+#define KMALLOC_CGROUP_NAME(sz) __KMALLOC_NAME(KMALLOC_CGROUP, "kmalloc-cg", sz)
#else
#define KMALLOC_CGROUP_NAME(sz)
#endif
#define INIT_KMALLOC_INFO(__size, __short_size) \
{ \
- .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
- .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
+ __KMALLOC_NAME(KMALLOC_NORMAL, "kmalloc", __short_size) \
+ __KMALLOC_NAME(KMALLOC_RECLAIM, "kmalloc-rcl", __short_size) \
KMALLOC_CGROUP_NAME(__short_size) \
KMALLOC_DMA_NAME(__short_size) \
.size = __size, \
@@ -846,18 +864,30 @@ void __init setup_kmalloc_cache_index_table(void)
static void __init
new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
{
+ struct kmem_cache *(*caches)[KMALLOC_SHIFT_HIGH + 1] = kmalloc_caches;
+ const char *name = kmalloc_info[idx].name[type];
+
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+
+ if (flags & SLAB_NONSENSITIVE) {
+ caches = nonsensitive_kmalloc_caches;
+ name = kmalloc_info[idx].nonsensitive_name[type];
+ }
+#endif
+
if (type == KMALLOC_RECLAIM) {
flags |= SLAB_RECLAIM_ACCOUNT;
} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
if (cgroup_memory_nokmem) {
- kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
+ caches[type][idx] = caches[KMALLOC_NORMAL][idx];
return;
}
flags |= SLAB_ACCOUNT;
+ } else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) {
+ flags |= SLAB_CACHE_DMA;
}
- kmalloc_caches[type][idx] = create_kmalloc_cache(
- kmalloc_info[idx].name[type],
+ caches[type][idx] = create_kmalloc_cache(name,
kmalloc_info[idx].size, flags, 0,
kmalloc_info[idx].size);
@@ -866,7 +896,7 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
* KMALLOC_NORMAL caches.
*/
if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
- kmalloc_caches[type][idx]->refcount = -1;
+ caches[type][idx]->refcount = -1;
}
/*
@@ -908,15 +938,24 @@ void __init create_kmalloc_caches(slab_flags_t flags)
for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
- if (s) {
- kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
- kmalloc_info[i].name[KMALLOC_DMA],
- kmalloc_info[i].size,
- SLAB_CACHE_DMA | flags, 0,
- kmalloc_info[i].size);
- }
+ if (s)
+ new_kmalloc_cache(i, KMALLOC_DMA, flags);
}
#endif
+ /*
+ * TODO: We may want to make slab allocations without exiting ASI.
+ * In that case, the cache metadata itself would need to be
+ * treated as non-sensitive and mapped as such, and we would need to
+ * do the bootstrap much more carefully. We can do that if we find
+ * that slab allocations while inside a restricted address space are
+ * frequent enough to warrant the additional complexity.
+ */
+ if (static_asi_enabled())
+ for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++)
+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++)
+ if (kmalloc_caches[type][i])
+ new_kmalloc_cache(i, type,
+ flags | SLAB_NONSENSITIVE);
}
#endif /* !CONFIG_SLOB */
@@ -68,7 +68,7 @@ config PAGE_TABLE_ISOLATION
config ADDRESS_SPACE_ISOLATION
bool "Allow code to run with a reduced kernel address space"
default n
- depends on X86_64 && !UML
+ depends on X86_64 && !UML && SLAB
depends on !PARAVIRT
help
This feature provides the ability to run some kernel code
A new flag SLAB_GLOBAL_NONSENSITIVE is added, which would designate all objects within that slab cache to be globally non-sensitive. Another flag SLAB_NONSENSITIVE is also added, which is currently just an alias for SLAB_GLOBAL_NONSENSITIVE, but will eventually be used to designate slab caches which can allocate either global or local non-sensitive objects. In addition, new kmalloc caches have been added that can be used to allocate non-sensitive objects. Signed-off-by: Junaid Shahid <junaids@google.com> --- include/linux/slab.h | 32 +++++++++++++++---- mm/slab.c | 5 +++ mm/slab.h | 14 ++++++++- mm/slab_common.c | 73 +++++++++++++++++++++++++++++++++----------- security/Kconfig | 2 +- 5 files changed, 101 insertions(+), 25 deletions(-)