Message ID | 20230415173650.5458-1-david.keisarschm@mail.huji.ac.il (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Replace invocations of prandom_u32() with get_random_u32() | expand |
On Sat, Apr 15, 2023 at 08:36:49PM +0300, david.keisarschm@mail.huji.ac.il wrote: > From: David Keisar Schmidt <david.keisarschm@mail.huji.ac.il> > > The Slab allocator randomization inside slab_common.c > uses the prandom_u32 PRNG. That was added to prevent attackers to obtain > information on the heap state. > > However, this PRNG turned out to be weak, as noted in commit c51f8f88d705 > To fix it, we have changed the invocation of prandom_u32_state to get_random_u32 > to ensure the PRNG is strong. > > Since a modulo operation is applied right after that, > in the Fisher-Yates shuffle, we used get_random_u32_below, to achieve uniformity. > > Signed-off-by: David Keisar Schmidt <david.keisarschm@mail.huji.ac.il> > --- same comment for the subject line. > > This fifth series changes only the arch/x86/mm/kaslr patch. > > Changes since v3: > * edited commit message. > > Changes since v2: > > * replaced instances of get_random_u32 with get_random_u32_below > in mm/slab_common.c. > > mm/slab_common.c | 11 +++-------- > 1 file changed, 3 insertions(+), 8 deletions(-) > > diff --git a/mm/slab_common.c b/mm/slab_common.c > index bf4e777cf..361da2191 100644 > --- a/mm/slab_common.c > +++ b/mm/slab_common.c > @@ -1146,7 +1146,7 @@ EXPORT_SYMBOL(kmalloc_large_node); > > #ifdef CONFIG_SLAB_FREELIST_RANDOM > /* Randomize a generic freelist */ > -static void freelist_randomize(struct rnd_state *state, unsigned int *list, > +static void freelist_randomize(unsigned int *list, > unsigned int count) > { > unsigned int rand; > @@ -1157,8 +1157,7 @@ static void freelist_randomize(struct rnd_state *state, unsigned int *list, > > /* Fisher-Yates shuffle */ > for (i = count - 1; i > 0; i--) { > - rand = prandom_u32_state(state); > - rand %= (i + 1); > + rand = get_random_u32_below(i+1); same here. otherwise looks good to me. > swap(list[i], list[rand]); > } > } > @@ -1167,7 +1166,6 @@ static void freelist_randomize(struct rnd_state *state, unsigned int *list, > int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, > gfp_t gfp) > { > - struct rnd_state state; > > if (count < 2 || cachep->random_seq) > return 0; > @@ -1176,10 +1174,7 @@ int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, > if (!cachep->random_seq) > return -ENOMEM; > > - /* Get best entropy at this stage of boot */ > - prandom_seed_state(&state, get_random_long()); > - > - freelist_randomize(&state, cachep->random_seq, count); > + freelist_randomize(cachep->random_seq, count); > return 0; > } > > -- > 2.37.3 >
diff --git a/mm/slab_common.c b/mm/slab_common.c index bf4e777cf..361da2191 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1146,7 +1146,7 @@ EXPORT_SYMBOL(kmalloc_large_node); #ifdef CONFIG_SLAB_FREELIST_RANDOM /* Randomize a generic freelist */ -static void freelist_randomize(struct rnd_state *state, unsigned int *list, +static void freelist_randomize(unsigned int *list, unsigned int count) { unsigned int rand; @@ -1157,8 +1157,7 @@ static void freelist_randomize(struct rnd_state *state, unsigned int *list, /* Fisher-Yates shuffle */ for (i = count - 1; i > 0; i--) { - rand = prandom_u32_state(state); - rand %= (i + 1); + rand = get_random_u32_below(i+1); swap(list[i], list[rand]); } } @@ -1167,7 +1166,6 @@ static void freelist_randomize(struct rnd_state *state, unsigned int *list, int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, gfp_t gfp) { - struct rnd_state state; if (count < 2 || cachep->random_seq) return 0; @@ -1176,10 +1174,7 @@ int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, if (!cachep->random_seq) return -ENOMEM; - /* Get best entropy at this stage of boot */ - prandom_seed_state(&state, get_random_long()); - - freelist_randomize(&state, cachep->random_seq, count); + freelist_randomize(cachep->random_seq, count); return 0; }