@@ -230,7 +230,7 @@ unsigned long arch_align_stack(unsigned long sp)
static inline unsigned long brk_rnd(void)
{
- return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
+ return (get_random_u16() & BRK_RND_MASK) << PAGE_SHIFT;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
@@ -80,7 +80,7 @@ static int random_size_align_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- rnd = prandom_u32();
+ rnd = get_random_u8();
/*
* Maximum 1024 pages, if PAGE_SIZE is 4096.
@@ -172,7 +172,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
* Avoid using the hashed IP ident generator.
*/
if (sk->sk_protocol == IPPROTO_TCP)
- iph->id = (__force __be16)prandom_u32();
+ iph->id = (__force __be16)get_random_u16();
else
__ip_select_ident(net, iph, 1);
}
@@ -468,7 +468,7 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
else
- off = prandom_u32();
+ off = get_random_u16();
attempts = range_size;
if (attempts > max_attempts)
@@ -490,7 +490,7 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
if (attempts >= range_size || attempts < 16)
return;
attempts /= 2;
- off = prandom_u32();
+ off = get_random_u16();
goto another_round;
}
@@ -104,7 +104,7 @@ static int rds_add_bound(struct rds_sock *rs, const struct in6_addr *addr,
return -EINVAL;
last = rover;
} else {
- rover = max_t(u16, prandom_u32(), 2);
+ rover = max_t(u16, get_random_u16(), 2);
last = rover - 1;
}
@@ -379,7 +379,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto enqueue;
}
- r = prandom_u32() & SFB_MAX_PROB;
+ r = get_random_u16() & SFB_MAX_PROB;
if (unlikely(r < p_min)) {
if (unlikely(p_min > SFB_MAX_PROB / 2)) {