@@ -123,6 +123,7 @@ struct mptcp_pm_ops {
struct mptcp_pm_addr_entry *skc);
bool (*get_priority)(struct mptcp_sock *msk,
struct mptcp_addr_info *skc);
+ bool (*accept_new_subflow)(struct mptcp_sock *msk, bool allow);
char name[MPTCP_PM_NAME_MAX];
struct module *owner;
@@ -460,38 +460,25 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int
mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
}
+bool mptcp_pm_accept_new_subflow(struct mptcp_sock *msk, bool allow)
+{
+ return msk->pm.ops->accept_new_subflow(msk, allow);
+}
+
bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
- unsigned int subflows_max;
int ret = 0;
- if (mptcp_pm_is_userspace(msk)) {
- if (mptcp_userspace_pm_active(msk)) {
- spin_lock_bh(&pm->lock);
- pm->subflows++;
- spin_unlock_bh(&pm->lock);
- return true;
- }
- return false;
- }
-
- subflows_max = mptcp_pm_get_subflows_max(msk);
-
- pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
- subflows_max, READ_ONCE(pm->accept_subflow));
-
- /* try to avoid acquiring the lock below */
- if (!READ_ONCE(pm->accept_subflow))
+ if (!mptcp_pm_accept_new_subflow(msk, true))
return false;
- spin_lock_bh(&pm->lock);
- if (READ_ONCE(pm->accept_subflow)) {
- ret = pm->subflows < subflows_max;
- if (ret && ++pm->subflows == subflows_max)
- WRITE_ONCE(pm->accept_subflow, false);
+ ret = mptcp_pm_accept_new_subflow(msk, false);
+ if (ret) {
+ spin_lock_bh(&pm->lock);
+ pm->subflows++;
+ spin_unlock_bh(&pm->lock);
}
- spin_unlock_bh(&pm->lock);
return ret;
}
@@ -1060,7 +1047,8 @@ struct mptcp_pm_ops *mptcp_pm_find(const char *name)
int mptcp_pm_validate(struct mptcp_pm_ops *pm_ops)
{
- if (!pm_ops->get_local_id || !pm_ops->get_priority) {
+ if (!pm_ops->get_local_id || !pm_ops->get_priority ||
+ !pm_ops->accept_new_subflow) {
pr_err("%s does not implement required ops\n", pm_ops->name);
return -EINVAL;
}
@@ -1399,6 +1399,21 @@ static struct pernet_operations mptcp_pm_pernet_ops = {
.size = sizeof(struct pm_nl_pernet),
};
+static bool mptcp_pm_kernel_accept_new_subflow(struct mptcp_sock *msk,
+ bool allow)
+{
+ bool ret = false;
+
+ if (READ_ONCE(msk->pm.accept_subflow)) {
+ if (allow)
+ return true;
+
+ ret = mptcp_pm_accept_subflow(msk);
+ }
+
+ return ret;
+}
+
static void mptcp_pm_kernel_init(struct mptcp_sock *msk)
{
bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk);
@@ -1422,6 +1437,7 @@ static void mptcp_pm_kernel_init(struct mptcp_sock *msk)
struct mptcp_pm_ops mptcp_pm_kernel = {
.get_local_id = mptcp_pm_kernel_get_local_id,
.get_priority = mptcp_pm_kernel_get_priority,
+ .accept_new_subflow = mptcp_pm_kernel_accept_new_subflow,
.init = mptcp_pm_kernel_init,
.name = "kernel",
.owner = THIS_MODULE,
@@ -683,6 +683,12 @@ int mptcp_userspace_pm_get_addr(u8 id, struct mptcp_pm_addr_entry *addr,
return ret;
}
+static bool mptcp_pm_userspace_accept_new_subflow(struct mptcp_sock *msk,
+ bool allow)
+{
+ return mptcp_userspace_pm_active(msk);
+}
+
static void mptcp_pm_userspace_release(struct mptcp_sock *msk)
{
mptcp_userspace_pm_free_local_addr_list(msk);
@@ -691,6 +697,7 @@ static void mptcp_pm_userspace_release(struct mptcp_sock *msk)
static struct mptcp_pm_ops mptcp_pm_userspace = {
.get_local_id = mptcp_pm_userspace_get_local_id,
.get_priority = mptcp_pm_userspace_get_priority,
+ .accept_new_subflow = mptcp_pm_userspace_accept_new_subflow,
.release = mptcp_pm_userspace_release,
.name = "userspace",
.owner = THIS_MODULE,
@@ -1008,6 +1008,7 @@ bool mptcp_pm_addr_families_match(const struct sock *sk,
void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side);
void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk);
+bool mptcp_pm_accept_new_subflow(struct mptcp_sock *msk, bool allow);
bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk);
void mptcp_pm_connection_closed(struct mptcp_sock *msk);
void mptcp_pm_subflow_established(struct mptcp_sock *msk);
@@ -1184,6 +1185,30 @@ static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk)
spin_unlock_bh(&msk->pm.lock);
}
+static inline bool __mptcp_pm_accept_subflow(struct mptcp_sock *msk)
+{
+ unsigned int subflows_max = mptcp_pm_get_subflows_max(msk);
+ struct mptcp_pm_data *pm = &msk->pm;
+ bool ret;
+
+ ret = pm->subflows < subflows_max;
+ if (ret && pm->subflows + 1 == subflows_max)
+ WRITE_ONCE(pm->accept_subflow, false);
+
+ return ret;
+}
+
+static inline bool mptcp_pm_accept_subflow(struct mptcp_sock *msk)
+{
+ bool ret;
+
+ spin_lock_bh(&msk->pm.lock);
+ ret = __mptcp_pm_accept_subflow(msk);
+ spin_unlock_bh(&msk->pm.lock);
+
+ return ret;
+}
+
void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
static inline struct mptcp_ext *mptcp_get_ext(const struct sk_buff *skb)
@@ -58,12 +58,10 @@ static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
}
-static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
+static bool mptcp_can_accept_new_subflow(struct mptcp_sock *msk)
{
return mptcp_is_fully_established((void *)msk) &&
- ((mptcp_pm_is_userspace(msk) &&
- mptcp_userspace_pm_active(msk)) ||
- READ_ONCE(msk->pm.accept_subflow));
+ mptcp_pm_accept_new_subflow(msk, true);
}
/* validate received token and create truncated hmac and nonce for SYN-ACK */