@@ -96,7 +96,7 @@ int raw_hash_sk(struct sock *sk)
hlist = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
write_lock_bh(&h->lock);
- hlist_nulls_add_head_rcu(&sk->sk_nulls_node, hlist);
+ __sk_nulls_add_node_rcu(sk, hlist);
sock_set_flag(sk, SOCK_RCU_FREE);
write_unlock_bh(&h->lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -172,7 +172,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
hlist = &raw_v4_hashinfo.ht[hash];
rcu_read_lock();
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
if (!raw_v4_match(net, sk, iph->protocol,
iph->saddr, iph->daddr, dif, sdif))
continue;
@@ -275,7 +275,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
hlist = &raw_v4_hashinfo.ht[hash];
rcu_read_lock();
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
iph = (const struct iphdr *)skb->data;
if (!raw_v4_match(net, sk, iph->protocol,
iph->saddr, iph->daddr, dif, sdif))
@@ -954,7 +954,7 @@ static struct sock *raw_get_first(struct seq_file *seq, int bucket)
for (state->bucket = bucket; state->bucket < RAW_HTABLE_SIZE;
++state->bucket) {
hlist = &h->ht[state->bucket];
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
if (sock_net(sk) == seq_file_net(seq))
return sk;
}
@@ -68,7 +68,7 @@ static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2
rcu_read_lock();
for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) {
hlist = &hashinfo->ht[slot];
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
if (raw_lookup(net, sk, r)) {
/*
* Grab it and keep until we fill
@@ -161,7 +161,7 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
num = 0;
hlist = &hashinfo->ht[slot];
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))
@@ -155,7 +155,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
hlist = &raw_v6_hashinfo.ht[hash];
rcu_read_lock();
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
int filtered;
if (!raw_v6_match(net, sk, nexthdr, daddr, saddr,
@@ -342,7 +342,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
hlist = &raw_v6_hashinfo.ht[hash];
rcu_read_lock();
- hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) {
+ sk_nulls_for_each(sk, hnode, hlist) {
/* Note: ipv6_hdr(skb) != skb->data */
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
saddr = &ip6h->saddr;
hlist_nulls_add_head_rcu() and hlist_nulls_for_each_entry() have dedicated macros for sk. Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> --- net/ipv4/raw.c | 8 ++++---- net/ipv4/raw_diag.c | 4 ++-- net/ipv6/raw.c | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-)