Message ID | 20210315180229.1224655-3-keescook@chromium.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Optionally randomize kernel stack offset each syscall | expand |
On Mon, Mar 15, 2021 at 11:02:25AM -0700, Kees Cook wrote: > diff --git a/mm/slab.h b/mm/slab.h > index 076582f58f68..b0977d525c06 100644 > --- a/mm/slab.h > +++ b/mm/slab.h > @@ -601,7 +601,8 @@ static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } > > static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) > { > - if (static_branch_unlikely(&init_on_alloc)) { > + if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, Gah, this should be CONFIG_INIT_ON_ALLOC_DEFAULT_ON. I'll see if there are any more comments before sending a v7... -Kees > + &init_on_alloc)) { > if (c->ctor) > return false; > if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) > @@ -613,7 +614,8 @@ static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) > > static inline bool slab_want_init_on_free(struct kmem_cache *c) > { > - if (static_branch_unlikely(&init_on_free)) > + if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, > + &init_on_free)) > return !(c->ctor || > (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); > return false; > -- > 2.25.1 >
On 3/15/21 7:02 PM, Kees Cook wrote: > The state of CONFIG_INIT_ON_ALLOC_DEFAULT_ON (and ...ON_FREE...) did not > change the assembly ordering of the static branches: they were always out > of line. Use the new jump_label macros to check the CONFIG settings to > default to the "expected" state, which slightly optimizes the resulting > assembly code. > > Reviewed-by: Alexander Potapenko <glider@google.com> > Link: https://lore.kernel.org/lkml/CAG_fn=X0DVwqLaHJTO6Jw7TGcMSm77GKHinrd0m_6y0SzWOrFA@mail.gmail.com/ > Signed-off-by: Kees Cook <keescook@chromium.org> For the fixed version Acked-by: Vlastimil Babka <vbabka@suse.cz>
diff --git a/include/linux/mm.h b/include/linux/mm.h index 77e64e3eac80..2ccd856ac0d1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2871,18 +2871,20 @@ static inline void kernel_poison_pages(struct page *page, int numpages) { } static inline void kernel_unpoison_pages(struct page *page, int numpages) { } #endif -DECLARE_STATIC_KEY_FALSE(init_on_alloc); +DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); static inline bool want_init_on_alloc(gfp_t flags) { - if (static_branch_unlikely(&init_on_alloc)) + if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, + &init_on_alloc)) return true; return flags & __GFP_ZERO; } -DECLARE_STATIC_KEY_FALSE(init_on_free); +DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); static inline bool want_init_on_free(void) { - return static_branch_unlikely(&init_on_free); + return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, + &init_on_free); } extern bool _debug_pagealloc_enabled_early; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3e4b29ee2b1e..267c04b8911d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -167,10 +167,10 @@ unsigned long totalcma_pages __read_mostly; int percpu_pagelist_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; -DEFINE_STATIC_KEY_FALSE(init_on_alloc); +DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); EXPORT_SYMBOL(init_on_alloc); -DEFINE_STATIC_KEY_FALSE(init_on_free); +DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); EXPORT_SYMBOL(init_on_free); static bool _init_on_alloc_enabled_early __read_mostly diff --git a/mm/slab.h b/mm/slab.h index 076582f58f68..b0977d525c06 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -601,7 +601,8 @@ static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) { - if (static_branch_unlikely(&init_on_alloc)) { + if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, + &init_on_alloc)) { if (c->ctor) return false; if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) @@ -613,7 +614,8 @@ static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) static inline bool slab_want_init_on_free(struct kmem_cache *c) { - if (static_branch_unlikely(&init_on_free)) + if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, + &init_on_free)) return !(c->ctor || (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); return false;