diff mbox series

[net-next,v3,14/24] ovpn: implement multi-peer support

Message ID 20240506011637.27272-15-antonio@openvpn.net (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series Introducing OpenVPN Data Channel Offload | expand

Checks

Context Check Description
netdev/series_format fail Series longer than 15 patches
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; GEN HAS DIFF 2 files changed, 2613 insertions(+);
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 926 this patch: 926
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 1 maintainers not CCed: openvpn-devel@lists.sourceforge.net
netdev/build_clang success Errors and warnings before: 937 this patch: 937
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 937 this patch: 937
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 256 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc fail Errors and warnings before: 6 this patch: 7
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-05-07--03-00 (tests: 1000)

Commit Message

Antonio Quartulli May 6, 2024, 1:16 a.m. UTC
With this change an ovpn instance will be able to stay connected to
multiple remote endpoints.

This functionality is strictly required when running ovpn on an
OpenVPN server.

Signed-off-by: Antonio Quartulli <antonio@openvpn.net>
---
 drivers/net/ovpn/io.c         |   1 +
 drivers/net/ovpn/main.c       |   8 +-
 drivers/net/ovpn/ovpnstruct.h |  10 +++
 drivers/net/ovpn/peer.c       | 149 ++++++++++++++++++++++++++++++++++
 drivers/net/ovpn/peer.h       |  14 ++++
 5 files changed, 181 insertions(+), 1 deletion(-)

Comments

Sabrina Dubroca May 28, 2024, 2:44 p.m. UTC | #1
Hi Antonio, I took a little break but I'm looking at your patches
again now.

2024-05-06, 03:16:27 +0200, Antonio Quartulli wrote:
> diff --git a/drivers/net/ovpn/ovpnstruct.h b/drivers/net/ovpn/ovpnstruct.h
> index 7414c2459fb9..58166fdeac63 100644
> --- a/drivers/net/ovpn/ovpnstruct.h
> +++ b/drivers/net/ovpn/ovpnstruct.h
> @@ -31,6 +35,12 @@ struct ovpn_struct {
>  	spinlock_t lock; /* protect writing to the ovpn_struct object */
>  	struct workqueue_struct *crypto_wq;
>  	struct workqueue_struct *events_wq;
> +	struct {
> +		DECLARE_HASHTABLE(by_id, 12);
> +		DECLARE_HASHTABLE(by_transp_addr, 12);
> +		DECLARE_HASHTABLE(by_vpn_addr, 12);

Those are really big. I guess for large servers they make sense, but
you're making clients hold 98kB in memory that they're not going to use.

Maybe they could be dynamically sized, but I think struct peers should
be allocated on demand (only for mode == MP) if you want this size.

> +		spinlock_t lock; /* protects writes to peers tables */
> +	} peers;
>  	struct ovpn_peer __rcu *peer;
>  	struct list_head dev_list;
>  };
> diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c
> index 99a2ae42a332..38a89595dade 100644
> --- a/drivers/net/ovpn/peer.c
> +++ b/drivers/net/ovpn/peer.c
> @@ -361,6 +362,91 @@ struct ovpn_peer *ovpn_peer_get_by_src(struct ovpn_struct *ovpn,
>  	return peer;
>  }
>  
> +/**
> + * ovpn_peer_add_mp - add per to related tables in a MP instance
                             ^
                             s/per/peer/

> + * @ovpn: the instance to add the peer to
> + * @peer: the peer to add
> + *
> + * Return: 0 on success or a negative error code otherwise
> + */
> +static int ovpn_peer_add_mp(struct ovpn_struct *ovpn, struct ovpn_peer *peer)
> +{
[...]
> +	index = ovpn_peer_index(ovpn->peers.by_id, &peer->id, sizeof(peer->id));
> +	hlist_add_head_rcu(&peer->hash_entry_id, &ovpn->peers.by_id[index]);
> +
> +	if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
> +		index = ovpn_peer_index(ovpn->peers.by_vpn_addr,
> +					&peer->vpn_addrs.ipv4,
> +					sizeof(peer->vpn_addrs.ipv4));
> +		hlist_add_head_rcu(&peer->hash_entry_addr4,
> +				   &ovpn->peers.by_vpn_addr[index]);
> +	}
> +
> +	hlist_del_init_rcu(&peer->hash_entry_addr6);

Why are hash_entry_transp_addr and hash_entry_addr6 getting a
hlist_del_init_rcu() call, but not hash_entry_id and hash_entry_addr4?

> +	if (memcmp(&peer->vpn_addrs.ipv6, &in6addr_any,
> +		   sizeof(peer->vpn_addrs.ipv6))) {

!ipv6_addr_any(&peer->vpn_addrs.ipv6)

> +		index = ovpn_peer_index(ovpn->peers.by_vpn_addr,
> +					&peer->vpn_addrs.ipv6,
> +					sizeof(peer->vpn_addrs.ipv6));
> +		hlist_add_head_rcu(&peer->hash_entry_addr6,
> +				   &ovpn->peers.by_vpn_addr[index]);
> +	}
> +
Antonio Quartulli May 28, 2024, 7:41 p.m. UTC | #2
On 28/05/2024 16:44, Sabrina Dubroca wrote:
> Hi Antonio, I took a little break but I'm looking at your patches
> again now.

Thanks Sabrina! Meanwhile I have been working on all your suggested changes.
Right now I am familiarizing with the strparser.

> 
> 2024-05-06, 03:16:27 +0200, Antonio Quartulli wrote:
>> diff --git a/drivers/net/ovpn/ovpnstruct.h b/drivers/net/ovpn/ovpnstruct.h
>> index 7414c2459fb9..58166fdeac63 100644
>> --- a/drivers/net/ovpn/ovpnstruct.h
>> +++ b/drivers/net/ovpn/ovpnstruct.h
>> @@ -31,6 +35,12 @@ struct ovpn_struct {
>>   	spinlock_t lock; /* protect writing to the ovpn_struct object */
>>   	struct workqueue_struct *crypto_wq;
>>   	struct workqueue_struct *events_wq;
>> +	struct {
>> +		DECLARE_HASHTABLE(by_id, 12);
>> +		DECLARE_HASHTABLE(by_transp_addr, 12);
>> +		DECLARE_HASHTABLE(by_vpn_addr, 12);
> 
> Those are really big. I guess for large servers they make sense, but
> you're making clients hold 98kB in memory that they're not going to use.

Right - for clients it doesn't make sense.

> 
> Maybe they could be dynamically sized, but I think struct peers should
> be allocated on demand (only for mode == MP) if you want this size.

Yeah, makes sense. I'll allocate it dynamically then.

> 
>> +		spinlock_t lock; /* protects writes to peers tables */
>> +	} peers;
>>   	struct ovpn_peer __rcu *peer;
>>   	struct list_head dev_list;
>>   };
>> diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c
>> index 99a2ae42a332..38a89595dade 100644
>> --- a/drivers/net/ovpn/peer.c
>> +++ b/drivers/net/ovpn/peer.c
>> @@ -361,6 +362,91 @@ struct ovpn_peer *ovpn_peer_get_by_src(struct ovpn_struct *ovpn,
>>   	return peer;
>>   }
>>   
>> +/**
>> + * ovpn_peer_add_mp - add per to related tables in a MP instance
>                               ^
>                               s/per/peer/

ACK

> 
>> + * @ovpn: the instance to add the peer to
>> + * @peer: the peer to add
>> + *
>> + * Return: 0 on success or a negative error code otherwise
>> + */
>> +static int ovpn_peer_add_mp(struct ovpn_struct *ovpn, struct ovpn_peer *peer)
>> +{
> [...]
>> +	index = ovpn_peer_index(ovpn->peers.by_id, &peer->id, sizeof(peer->id));
>> +	hlist_add_head_rcu(&peer->hash_entry_id, &ovpn->peers.by_id[index]);
>> +
>> +	if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
>> +		index = ovpn_peer_index(ovpn->peers.by_vpn_addr,
>> +					&peer->vpn_addrs.ipv4,
>> +					sizeof(peer->vpn_addrs.ipv4));
>> +		hlist_add_head_rcu(&peer->hash_entry_addr4,
>> +				   &ovpn->peers.by_vpn_addr[index]);
>> +	}
>> +
>> +	hlist_del_init_rcu(&peer->hash_entry_addr6);
> 
> Why are hash_entry_transp_addr and hash_entry_addr6 getting a
> hlist_del_init_rcu() call, but not hash_entry_id and hash_entry_addr4?

I think not calling del_init_rcu on hash_entry_addr4 was a mistake.

Calling del_init_rcu on addr4, addr6 and transp_addr is needed to put 
them in a known state in case they are not hashed.

While hash_entry_id always goes through hlist_add_head_rcu, therefore 
del_init_rcu is useless (to my understanding).

> 
>> +	if (memcmp(&peer->vpn_addrs.ipv6, &in6addr_any,
>> +		   sizeof(peer->vpn_addrs.ipv6))) {
> 
> !ipv6_addr_any(&peer->vpn_addrs.ipv6)

ACK

> 
>> +		index = ovpn_peer_index(ovpn->peers.by_vpn_addr,
>> +					&peer->vpn_addrs.ipv6,
>> +					sizeof(peer->vpn_addrs.ipv6));
>> +		hlist_add_head_rcu(&peer->hash_entry_addr6,
>> +				   &ovpn->peers.by_vpn_addr[index]);
>> +	}
>> +
>
Sabrina Dubroca May 29, 2024, 3:16 p.m. UTC | #3
2024-05-28, 21:41:15 +0200, Antonio Quartulli wrote:
> On 28/05/2024 16:44, Sabrina Dubroca wrote:
> > Hi Antonio, I took a little break but I'm looking at your patches
> > again now.
> 
> Thanks Sabrina! Meanwhile I have been working on all your suggested changes.
> Right now I am familiarizing with the strparser.

Cool :)

> > 2024-05-06, 03:16:27 +0200, Antonio Quartulli wrote:
> > > +	index = ovpn_peer_index(ovpn->peers.by_id, &peer->id, sizeof(peer->id));
> > > +	hlist_add_head_rcu(&peer->hash_entry_id, &ovpn->peers.by_id[index]);
> > > +
> > > +	if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
> > > +		index = ovpn_peer_index(ovpn->peers.by_vpn_addr,
> > > +					&peer->vpn_addrs.ipv4,
> > > +					sizeof(peer->vpn_addrs.ipv4));
> > > +		hlist_add_head_rcu(&peer->hash_entry_addr4,
> > > +				   &ovpn->peers.by_vpn_addr[index]);
> > > +	}
> > > +
> > > +	hlist_del_init_rcu(&peer->hash_entry_addr6);
> > 
> > Why are hash_entry_transp_addr and hash_entry_addr6 getting a
> > hlist_del_init_rcu() call, but not hash_entry_id and hash_entry_addr4?
> 
> I think not calling del_init_rcu on hash_entry_addr4 was a mistake.
> 
> Calling del_init_rcu on addr4, addr6 and transp_addr is needed to put them
> in a known state in case they are not hashed.

hlist_del_init_rcu does nothing if node is not already on a list.

> While hash_entry_id always goes through hlist_add_head_rcu, therefore
> del_init_rcu is useless (to my understanding).

I'm probably missing something about how this all fits together. In
patch 19, I see ovpn_nl_set_peer_doit can re-add a peer that is
already added (but I'm not sure why, since you don't allow changing
the addresses, so it won't actually be re-hashed).

I don't think doing a 2nd add of the same element to peers.by_id (or
any of the other hashtables) is correct, so I'd say you need
hlist_del_init_rcu for all of them.
Antonio Quartulli May 29, 2024, 8:15 p.m. UTC | #4
On 29/05/2024 17:16, Sabrina Dubroca wrote:
> 2024-05-28, 21:41:15 +0200, Antonio Quartulli wrote:
>> On 28/05/2024 16:44, Sabrina Dubroca wrote:
>>> Hi Antonio, I took a little break but I'm looking at your patches
>>> again now.
>>
>> Thanks Sabrina! Meanwhile I have been working on all your suggested changes.
>> Right now I am familiarizing with the strparser.
> 
> Cool :)
> 
>>> 2024-05-06, 03:16:27 +0200, Antonio Quartulli wrote:
>>>> +	index = ovpn_peer_index(ovpn->peers.by_id, &peer->id, sizeof(peer->id));
>>>> +	hlist_add_head_rcu(&peer->hash_entry_id, &ovpn->peers.by_id[index]);
>>>> +
>>>> +	if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
>>>> +		index = ovpn_peer_index(ovpn->peers.by_vpn_addr,
>>>> +					&peer->vpn_addrs.ipv4,
>>>> +					sizeof(peer->vpn_addrs.ipv4));
>>>> +		hlist_add_head_rcu(&peer->hash_entry_addr4,
>>>> +				   &ovpn->peers.by_vpn_addr[index]);
>>>> +	}
>>>> +
>>>> +	hlist_del_init_rcu(&peer->hash_entry_addr6);
>>>
>>> Why are hash_entry_transp_addr and hash_entry_addr6 getting a
>>> hlist_del_init_rcu() call, but not hash_entry_id and hash_entry_addr4?
>>
>> I think not calling del_init_rcu on hash_entry_addr4 was a mistake.
>>
>> Calling del_init_rcu on addr4, addr6 and transp_addr is needed to put them
>> in a known state in case they are not hashed.
> 
> hlist_del_init_rcu does nothing if node is not already on a list.

Mh you're right. I must have got confused for some reason.
Those del_init_rcu can go then.

> 
>> While hash_entry_id always goes through hlist_add_head_rcu, therefore
>> del_init_rcu is useless (to my understanding).
> 
> I'm probably missing something about how this all fits together. In
> patch 19, I see ovpn_nl_set_peer_doit can re-add a peer that is
> already added (but I'm not sure why, since you don't allow changing
> the addresses, so it won't actually be re-hashed).

Actually it's not a "re-add", but the intent is to "update" a peer that 
already exists. However, some fields are forbidden from being updated, 
like the address.

[NOTE: I found some issue with the "peer update" logic in 
ovpn_nl_set_peer_doit and it's being changed a bit]

> 
> I don't think doing a 2nd add of the same element to peers.by_id (or
> any of the other hashtables) is correct, so I'd say you need
> hlist_del_init_rcu for all of them.

This is exactly the bug I mentioned above: we should not go through the 
add again. Ideally we should just update the fields and be done with it, 
without re-hashing the object.

I hope it makes sense.

Cheers,

>
Sabrina Dubroca May 29, 2024, 8:45 p.m. UTC | #5
2024-05-29, 22:15:27 +0200, Antonio Quartulli wrote:
> On 29/05/2024 17:16, Sabrina Dubroca wrote:
> > 2024-05-28, 21:41:15 +0200, Antonio Quartulli wrote:
> > > On 28/05/2024 16:44, Sabrina Dubroca wrote:
> > > > Hi Antonio, I took a little break but I'm looking at your patches
> > > > again now.
> > > 
> > > Thanks Sabrina! Meanwhile I have been working on all your suggested changes.
> > > Right now I am familiarizing with the strparser.
> > 
> > Cool :)
> > 
> > > > 2024-05-06, 03:16:27 +0200, Antonio Quartulli wrote:
> > > > > +	index = ovpn_peer_index(ovpn->peers.by_id, &peer->id, sizeof(peer->id));
> > > > > +	hlist_add_head_rcu(&peer->hash_entry_id, &ovpn->peers.by_id[index]);
> > > > > +
> > > > > +	if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
> > > > > +		index = ovpn_peer_index(ovpn->peers.by_vpn_addr,
> > > > > +					&peer->vpn_addrs.ipv4,
> > > > > +					sizeof(peer->vpn_addrs.ipv4));
> > > > > +		hlist_add_head_rcu(&peer->hash_entry_addr4,
> > > > > +				   &ovpn->peers.by_vpn_addr[index]);
> > > > > +	}
> > > > > +
> > > > > +	hlist_del_init_rcu(&peer->hash_entry_addr6);
> > > > 
> > > > Why are hash_entry_transp_addr and hash_entry_addr6 getting a
> > > > hlist_del_init_rcu() call, but not hash_entry_id and hash_entry_addr4?
> > > 
> > > I think not calling del_init_rcu on hash_entry_addr4 was a mistake.
> > > 
> > > Calling del_init_rcu on addr4, addr6 and transp_addr is needed to put them
> > > in a known state in case they are not hashed.
> > 
> > hlist_del_init_rcu does nothing if node is not already on a list.
> 
> Mh you're right. I must have got confused for some reason.
> Those del_init_rcu can go then.
> 
> > 
> > > While hash_entry_id always goes through hlist_add_head_rcu, therefore
> > > del_init_rcu is useless (to my understanding).
> > 
> > I'm probably missing something about how this all fits together. In
> > patch 19, I see ovpn_nl_set_peer_doit can re-add a peer that is
> > already added (but I'm not sure why, since you don't allow changing
> > the addresses, so it won't actually be re-hashed).
> 
> Actually it's not a "re-add", but the intent is to "update" a peer that
> already exists. However, some fields are forbidden from being updated, like
> the address.
> 
> [NOTE: I found some issue with the "peer update" logic in
> ovpn_nl_set_peer_doit and it's being changed a bit]
> 
> > 
> > I don't think doing a 2nd add of the same element to peers.by_id (or
> > any of the other hashtables) is correct, so I'd say you need
> > hlist_del_init_rcu for all of them.
> 
> This is exactly the bug I mentioned above: we should not go through the add
> again. Ideally we should just update the fields and be done with it, without
> re-hashing the object.

Ok, if you only call ovpn_peer_add for new peers, this looks fine and
the hlist_del_init_rcu can all be removed as you said.

Thanks.
diff mbox series

Patch

diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c
index 49efcfff963c..8ccf2700a370 100644
--- a/drivers/net/ovpn/io.c
+++ b/drivers/net/ovpn/io.c
@@ -36,6 +36,7 @@  int ovpn_struct_init(struct net_device *dev)
 		return err;
 
 	spin_lock_init(&ovpn->lock);
+	spin_lock_init(&ovpn->peers.lock);
 
 	ovpn->crypto_wq = alloc_workqueue("ovpn-crypto-wq-%s",
 					  WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0,
diff --git a/drivers/net/ovpn/main.c b/drivers/net/ovpn/main.c
index a04d6e55a473..d6ba91c6571f 100644
--- a/drivers/net/ovpn/main.c
+++ b/drivers/net/ovpn/main.c
@@ -176,8 +176,14 @@  void ovpn_iface_destruct(struct ovpn_struct *ovpn)
 
 	ovpn->registered = false;
 
-	if (ovpn->mode == OVPN_MODE_P2P)
+	switch (ovpn->mode) {
+	case OVPN_MODE_P2P:
 		ovpn_peer_release_p2p(ovpn);
+		break;
+	default:
+		ovpn_peers_free(ovpn);
+		break;
+	}
 
 	unregister_netdevice(ovpn->dev);
 	synchronize_net();
diff --git a/drivers/net/ovpn/ovpnstruct.h b/drivers/net/ovpn/ovpnstruct.h
index 7414c2459fb9..58166fdeac63 100644
--- a/drivers/net/ovpn/ovpnstruct.h
+++ b/drivers/net/ovpn/ovpnstruct.h
@@ -21,6 +21,10 @@ 
  * @crypto_wq: used to schedule crypto work that may sleep during TX/RX
  * @event_wq: used to schedule generic events that may sleep and that need to be
  *            performed outside of softirq context
+ * @peers.by_id: table of peers index by ID
+ * @peers.by_transp_addr: table of peers indexed by transport address
+ * @peers.by_vpn_addr: table of peers indexed by VPN IP address
+ * @peers.lock: protects writes to peers tables
  * @peer: in P2P mode, this is the only remote peer
  * @dev_list: entry for the module wide device list
  */
@@ -31,6 +35,12 @@  struct ovpn_struct {
 	spinlock_t lock; /* protect writing to the ovpn_struct object */
 	struct workqueue_struct *crypto_wq;
 	struct workqueue_struct *events_wq;
+	struct {
+		DECLARE_HASHTABLE(by_id, 12);
+		DECLARE_HASHTABLE(by_transp_addr, 12);
+		DECLARE_HASHTABLE(by_vpn_addr, 12);
+		spinlock_t lock; /* protects writes to peers tables */
+	} peers;
 	struct ovpn_peer __rcu *peer;
 	struct list_head dev_list;
 };
diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c
index 99a2ae42a332..38a89595dade 100644
--- a/drivers/net/ovpn/peer.c
+++ b/drivers/net/ovpn/peer.c
@@ -9,6 +9,7 @@ 
 
 #include <linux/skbuff.h>
 #include <linux/list.h>
+#include <linux/hashtable.h>
 #include <linux/workqueue.h>
 
 #include "ovpnstruct.h"
@@ -361,6 +362,91 @@  struct ovpn_peer *ovpn_peer_get_by_src(struct ovpn_struct *ovpn,
 	return peer;
 }
 
+/**
+ * ovpn_peer_add_mp - add per to related tables in a MP instance
+ * @ovpn: the instance to add the peer to
+ * @peer: the peer to add
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_add_mp(struct ovpn_struct *ovpn, struct ovpn_peer *peer)
+{
+	struct sockaddr_storage sa = { 0 };
+	struct sockaddr_in6 *sa6;
+	struct sockaddr_in *sa4;
+	struct ovpn_bind *bind;
+	struct ovpn_peer *tmp;
+	size_t salen;
+	int ret = 0;
+	u32 index;
+
+	spin_lock_bh(&ovpn->peers.lock);
+	/* do not add duplicates */
+	tmp = ovpn_peer_get_by_id(ovpn, peer->id);
+	if (tmp) {
+		ovpn_peer_put(tmp);
+		ret = -EEXIST;
+		goto unlock;
+	}
+
+	hlist_del_init_rcu(&peer->hash_entry_transp_addr);
+	bind = rcu_dereference_protected(peer->bind, true);
+	/* peers connected via TCP have bind == NULL */
+	if (bind) {
+		switch (bind->sa.in4.sin_family) {
+		case AF_INET:
+			sa4 = (struct sockaddr_in *)&sa;
+
+			sa4->sin_family = AF_INET;
+			sa4->sin_addr.s_addr = bind->sa.in4.sin_addr.s_addr;
+			sa4->sin_port = bind->sa.in4.sin_port;
+			salen = sizeof(*sa4);
+			break;
+		case AF_INET6:
+			sa6 = (struct sockaddr_in6 *)&sa;
+
+			sa6->sin6_family = AF_INET6;
+			sa6->sin6_addr = bind->sa.in6.sin6_addr;
+			sa6->sin6_port = bind->sa.in6.sin6_port;
+			salen = sizeof(*sa6);
+			break;
+		default:
+			ret = -EPROTONOSUPPORT;
+			goto unlock;
+		}
+
+		index = ovpn_peer_index(ovpn->peers.by_transp_addr, &sa, salen);
+		hlist_add_head_rcu(&peer->hash_entry_transp_addr,
+				   &ovpn->peers.by_transp_addr[index]);
+	}
+
+	index = ovpn_peer_index(ovpn->peers.by_id, &peer->id, sizeof(peer->id));
+	hlist_add_head_rcu(&peer->hash_entry_id, &ovpn->peers.by_id[index]);
+
+	if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
+		index = ovpn_peer_index(ovpn->peers.by_vpn_addr,
+					&peer->vpn_addrs.ipv4,
+					sizeof(peer->vpn_addrs.ipv4));
+		hlist_add_head_rcu(&peer->hash_entry_addr4,
+				   &ovpn->peers.by_vpn_addr[index]);
+	}
+
+	hlist_del_init_rcu(&peer->hash_entry_addr6);
+	if (memcmp(&peer->vpn_addrs.ipv6, &in6addr_any,
+		   sizeof(peer->vpn_addrs.ipv6))) {
+		index = ovpn_peer_index(ovpn->peers.by_vpn_addr,
+					&peer->vpn_addrs.ipv6,
+					sizeof(peer->vpn_addrs.ipv6));
+		hlist_add_head_rcu(&peer->hash_entry_addr6,
+				   &ovpn->peers.by_vpn_addr[index]);
+	}
+
+unlock:
+	spin_unlock_bh(&ovpn->peers.lock);
+
+	return ret;
+}
+
 /**
  * ovpn_peer_add_p2p - add per to related tables in a P2P instance
  * @ovpn: the instance to add the peer to
@@ -391,6 +477,8 @@  static int ovpn_peer_add_p2p(struct ovpn_struct *ovpn, struct ovpn_peer *peer)
 int ovpn_peer_add(struct ovpn_struct *ovpn, struct ovpn_peer *peer)
 {
 	switch (ovpn->mode) {
+	case OVPN_MODE_MP:
+		return ovpn_peer_add_mp(ovpn, peer);
 	case OVPN_MODE_P2P:
 		return ovpn_peer_add_p2p(ovpn, peer);
 	default:
@@ -398,6 +486,53 @@  int ovpn_peer_add(struct ovpn_struct *ovpn, struct ovpn_peer *peer)
 	}
 }
 
+/**
+ * ovpn_peer_unhash - remove peer reference from all hashtables
+ * @peer: the peer to remove
+ * @reason: the delete reason to attach to the peer
+ */
+static void ovpn_peer_unhash(struct ovpn_peer *peer,
+			     enum ovpn_del_peer_reason reason)
+{
+	hlist_del_init_rcu(&peer->hash_entry_id);
+	hlist_del_init_rcu(&peer->hash_entry_addr4);
+	hlist_del_init_rcu(&peer->hash_entry_addr6);
+	hlist_del_init_rcu(&peer->hash_entry_transp_addr);
+
+	ovpn_peer_put(peer);
+	peer->delete_reason = reason;
+}
+
+/**
+ * ovpn_peer_del_mp - delete peer from related tables in a MP instance
+ * @peer: the peer to delete
+ * @reason: reason why the peer was deleted (sent to userspace)
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_del_mp(struct ovpn_peer *peer,
+			    enum ovpn_del_peer_reason reason)
+{
+	struct ovpn_peer *tmp;
+	int ret = 0;
+
+	spin_lock_bh(&peer->ovpn->peers.lock);
+	tmp = ovpn_peer_get_by_id(peer->ovpn, peer->id);
+	if (tmp != peer) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+	ovpn_peer_unhash(peer, reason);
+
+unlock:
+	spin_unlock_bh(&peer->ovpn->peers.lock);
+
+	if (tmp)
+		ovpn_peer_put(tmp);
+
+	return ret;
+}
+
 /**
  * ovpn_peer_del_p2p - delete peer from related tables in a P2P instance
  * @peer: the peer to delete
@@ -444,9 +579,23 @@  void ovpn_peer_release_p2p(struct ovpn_struct *ovpn)
 int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason)
 {
 	switch (peer->ovpn->mode) {
+	case OVPN_MODE_MP:
+		return ovpn_peer_del_mp(peer, reason);
 	case OVPN_MODE_P2P:
 		return ovpn_peer_del_p2p(peer, reason);
 	default:
 		return -EOPNOTSUPP;
 	}
 }
+
+void ovpn_peers_free(struct ovpn_struct *ovpn)
+{
+	struct hlist_node *tmp;
+	struct ovpn_peer *peer;
+	int bkt;
+
+	spin_lock_bh(&ovpn->peers.lock);
+	hash_for_each_safe(ovpn->peers.by_id, bkt, tmp, peer, hash_entry_id)
+		ovpn_peer_unhash(peer, OVPN_DEL_PEER_REASON_TEARDOWN);
+	spin_unlock_bh(&ovpn->peers.lock);
+}
diff --git a/drivers/net/ovpn/peer.h b/drivers/net/ovpn/peer.h
index ac4907705d98..10f4153f7c8f 100644
--- a/drivers/net/ovpn/peer.h
+++ b/drivers/net/ovpn/peer.h
@@ -26,6 +26,10 @@ 
  * @id: unique identifier
  * @vpn_addrs.ipv4: IPv4 assigned to peer on the tunnel
  * @vpn_addrs.ipv6: IPv6 assigned to peer on the tunnel
+ * @hash_entry_id: entry in the peer ID hashtable
+ * @hash_entry_addr4: entry in the peer IPv4 hashtable
+ * @hash_entry_addr6: entry in the peer IPv6 hashtable
+ * @hash_entry_transp_addr: entry in the peer transport address hashtable
  * @encrypt_work: work used to process outgoing packets
  * @decrypt_work: work used to process incoming packets
  * @tx_ring: queue of outgoing poackets to this peer
@@ -62,6 +66,10 @@  struct ovpn_peer {
 		struct in_addr ipv4;
 		struct in6_addr ipv6;
 	} vpn_addrs;
+	struct hlist_node hash_entry_id;
+	struct hlist_node hash_entry_addr4;
+	struct hlist_node hash_entry_addr6;
+	struct hlist_node hash_entry_transp_addr;
 	struct work_struct encrypt_work;
 	struct work_struct decrypt_work;
 	struct ptr_ring tx_ring;
@@ -208,4 +216,10 @@  struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_struct *ovpn,
 struct ovpn_peer *ovpn_peer_get_by_src(struct ovpn_struct *ovpn,
 				       struct sk_buff *skb);
 
+/**
+ * ovpn_peers_free - free all peers in the instance
+ * @ovpn: the instance whose peers should be released
+ */
+void ovpn_peers_free(struct ovpn_struct *ovpn);
+
 #endif /* _NET_OVPN_OVPNPEER_H_ */