@@ -48,6 +48,7 @@ enum cgroup_bpf_attach_type {
CGROUP_INET4_GETSOCKNAME,
CGROUP_INET6_GETSOCKNAME,
CGROUP_INET_SOCK_RELEASE,
+ CGROUP_INET_LPORT_INUSE,
MAX_CGROUP_BPF_ATTACH_TYPE
};
@@ -81,6 +82,7 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
+ CGROUP_ATYPE(CGROUP_INET_LPORT_INUSE);
default:
return CGROUP_BPF_ATTACH_TYPE_INVALID;
}
@@ -263,6 +265,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
+#define BPF_CGROUP_RUN_PROG_INET_LPORT_INUSE(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_LPORT_INUSE)
+
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
({ \
u32 __unused_flags; \
@@ -298,6 +298,17 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to,
int inet_sk_rebuild_header(struct sock *sk);
+static inline int inet_bind_conflict(struct sock *sk, int port)
+{
+ int res;
+ int old = sk->sk_num;
+
+ sk->sk_num = port;
+ res = BPF_CGROUP_RUN_PROG_INET_LPORT_INUSE(sk);
+ sk->sk_num = old;
+ return res;
+}
+
/**
* inet_sk_state_load - read sk->sk_state for lockless contexts
* @sk: socket pointer
@@ -995,6 +995,7 @@ enum bpf_attach_type {
BPF_SK_REUSEPORT_SELECT,
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
BPF_PERF_EVENT,
+ BPF_CGROUP_INET_LPORT_INUSE,
__MAX_BPF_ATTACH_TYPE
};
@@ -2088,6 +2088,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
case BPF_CGROUP_INET_SOCK_RELEASE:
case BPF_CGROUP_INET4_POST_BIND:
case BPF_CGROUP_INET6_POST_BIND:
+ case BPF_CGROUP_INET_LPORT_INUSE:
return 0;
default:
return -EINVAL;
@@ -3140,6 +3141,7 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type)
case BPF_CGROUP_INET_SOCK_RELEASE:
case BPF_CGROUP_INET4_POST_BIND:
case BPF_CGROUP_INET6_POST_BIND:
+ case BPF_CGROUP_INET_LPORT_INUSE:
return BPF_PROG_TYPE_CGROUP_SOCK;
case BPF_CGROUP_INET4_BIND:
case BPF_CGROUP_INET6_BIND:
@@ -3311,6 +3313,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
case BPF_CGROUP_SYSCTL:
case BPF_CGROUP_GETSOCKOPT:
case BPF_CGROUP_SETSOCKOPT:
+ case BPF_CGROUP_INET_LPORT_INUSE:
return cgroup_bpf_prog_query(attr, uattr);
case BPF_LIRC_MODE2:
return lirc_prog_query(attr, uattr);
@@ -130,10 +130,11 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
}
EXPORT_SYMBOL(inet_get_local_port_range);
-static int inet_csk_bind_conflict(const struct sock *sk,
+static int inet_csk_bind_conflict(struct sock *sk,
const struct inet_bind_bucket *tb,
bool relax, bool reuseport_ok)
{
+ int res;
struct sock *sk2;
bool reuseport_cb_ok;
bool reuse = sk->sk_reuse;
@@ -179,7 +180,10 @@ static int inet_csk_bind_conflict(const struct sock *sk,
}
}
}
- return sk2 != NULL;
+ res = !!sk2;
+ if (!res)
+ res = inet_bind_conflict(sk, tb->port);
+ return res;
}
/*
@@ -401,6 +405,8 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
goto success;
if (inet_csk_bind_conflict(sk, tb, true, true))
goto fail_unlock;
+ } else if (inet_bind_conflict(sk, port)) {
+ goto fail_unlock;
}
success:
inet_csk_update_fastreuse(tb, sk);
@@ -476,6 +476,9 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
}
}
+ if (inet_bind_conflict(sk, lport))
+ goto not_unique;
+
/* Must record num and sport now. Otherwise we will see
* in hash table socket with a funny identity.
*/
@@ -744,6 +747,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
+ if (inet_bind_conflict(sk, port))
+ return -EPERM;
inet_ehash_nolisten(sk, NULL, NULL);
spin_unlock_bh(&head->lock);
return 0;
@@ -799,6 +804,9 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
}
}
+ if (inet_bind_conflict(sk, port))
+ goto next_port;
+
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
net, head, port, l3mdev);
if (!tb) {
@@ -134,6 +134,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
struct sock *sk, unsigned int log)
{
struct sock *sk2;
+ int res = 0;
kuid_t uid = sock_i_uid(sk);
sk_for_each(sk2, &hslot->head) {
@@ -148,16 +149,21 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
!rcu_access_pointer(sk->sk_reuseport_cb) &&
uid_eq(uid, sock_i_uid(sk2))) {
if (!bitmap)
- return 0;
+ break;
} else {
- if (!bitmap)
- return 1;
+ if (!bitmap) {
+ res = 1;
+ break;
+ }
__set_bit(udp_sk(sk2)->udp_port_hash >> log,
bitmap);
}
}
}
- return 0;
+
+ if (!res)
+ res = inet_bind_conflict(sk, num);
+ return res;
}
/*
@@ -192,6 +198,11 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
}
}
spin_unlock(&hslot2->lock);
+ if (!res) {
+ sk->sk_num = num;
+ res = BPF_CGROUP_RUN_PROG_INET_LPORT_INUSE(sk);
+ sk->sk_num = 0;
+ }
return res;
}