diff mbox series

[wpan-next,2/2] 6lowpan: nhc: drop EEXIST limitation

Message ID 20220613032922.1030739-2-aahringo@redhat.com (mailing list archive)
State New, archived
Headers show
Series [wpan-next,1/2] 6lowpan: nhc: more constify api | expand

Commit Message

Alexander Aring June 13, 2022, 3:29 a.m. UTC
In nhc we have compression() and uncompression(). Currently we have a
limitation that we return -EEXIST when it's the nhc is already
registered according the nexthdr. But on receiving handling and the
nhcid we can indeed support both at the same time. We remove the current
static array implementation and replace it by a dynamic list handling to
get rid of this limitation.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 net/6lowpan/nhc.c | 69 ++++++++++++++++++++++++++++++-----------------
 1 file changed, 44 insertions(+), 25 deletions(-)

Comments

Stefan Schmidt June 16, 2022, 7:57 a.m. UTC | #1
Hello Alex.

On 13.06.22 05:29, Alexander Aring wrote:
> In nhc we have compression() and uncompression(). Currently we have a
> limitation that we return -EEXIST when it's the nhc is already
> registered according the nexthdr. But on receiving handling and the
> nhcid we can indeed support both at the same time. 

The sentence above is not really clear to me. Do you want to say that on 
rx we can support more than one nhcid? I am a bit confused why you write 
both here. Where does the limit to two come from?

We remove the current
> static array implementation and replace it by a dynamic list handling to
> get rid of this limitation.
> 
> Signed-off-by: Alexander Aring <aahringo@redhat.com>
> ---
>   net/6lowpan/nhc.c | 69 ++++++++++++++++++++++++++++++-----------------
>   1 file changed, 44 insertions(+), 25 deletions(-)
> 
> diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c
> index 7b374595328d..3d7c50139142 100644
> --- a/net/6lowpan/nhc.c
> +++ b/net/6lowpan/nhc.c
> @@ -12,13 +12,30 @@
>   
>   #include "nhc.h"
>   
> -static const struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX + 1];
> +struct lowpan_nhc_entry {
> +	const struct lowpan_nhc *nhc;
> +	struct list_head list;
> +};
> +
>   static DEFINE_SPINLOCK(lowpan_nhc_lock);
> +static LIST_HEAD(lowpan_nexthdr_nhcs);
> +
> +const struct lowpan_nhc *lowpan_nhc_by_nexthdr(u8 nexthdr)
> +{
> +	const struct lowpan_nhc_entry *e;
> +
> +	list_for_each_entry(e, &lowpan_nexthdr_nhcs, list) {
> +		if (e->nhc->nexthdr == nexthdr &&
> +		    e->nhc->compress)
> +			return e->nhc;

We will always go with the first one we find? Do I miss something or 
does that mean the one registered as seond and above will never be taken 
into acount?

regards
Stefan Schmidt
Alexander Aring June 16, 2022, 12:57 p.m. UTC | #2
Hi,

On Thu, Jun 16, 2022 at 3:57 AM Stefan Schmidt
<stefan@datenfreihafen.org> wrote:
>
>
> Hello Alex.
>
> On 13.06.22 05:29, Alexander Aring wrote:
> > In nhc we have compression() and uncompression(). Currently we have a
> > limitation that we return -EEXIST when it's the nhc is already
> > registered according the nexthdr. But on receiving handling and the
> > nhcid we can indeed support both at the same time.
>
> The sentence above is not really clear to me. Do you want to say that on
> rx we can support more than one nhcid? I am a bit confused why you write
> both here. Where does the limit to two come from?
>

It's simple when you look at how it's working. On rx we have nhcid
lookup and on tx we have nexthdr lookup. These are two different
registration numbers and there can be multiple compression for one
nexthdr:

N:1

The limit was always there because we did not support multiple nexthdr
registrations.

> We remove the current
> > static array implementation and replace it by a dynamic list handling to
> > get rid of this limitation.
> >
> > Signed-off-by: Alexander Aring <aahringo@redhat.com>
> > ---
> >   net/6lowpan/nhc.c | 69 ++++++++++++++++++++++++++++++-----------------
> >   1 file changed, 44 insertions(+), 25 deletions(-)
> >
> > diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c
> > index 7b374595328d..3d7c50139142 100644
> > --- a/net/6lowpan/nhc.c
> > +++ b/net/6lowpan/nhc.c
> > @@ -12,13 +12,30 @@
> >
> >   #include "nhc.h"
> >
> > -static const struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX + 1];
> > +struct lowpan_nhc_entry {
> > +     const struct lowpan_nhc *nhc;
> > +     struct list_head list;
> > +};
> > +
> >   static DEFINE_SPINLOCK(lowpan_nhc_lock);
> > +static LIST_HEAD(lowpan_nexthdr_nhcs);
> > +
> > +const struct lowpan_nhc *lowpan_nhc_by_nexthdr(u8 nexthdr)
> > +{
> > +     const struct lowpan_nhc_entry *e;
> > +
> > +     list_for_each_entry(e, &lowpan_nexthdr_nhcs, list) {
> > +             if (e->nhc->nexthdr == nexthdr &&
> > +                 e->nhc->compress)
> > +                     return e->nhc;
>
> We will always go with the first one we find? Do I miss something or
> does that mean the one registered as seond and above will never be taken
> into acount?

That is currently true for the tx side. This just allows more than we
currently support without breaking the past.

- Alex
diff mbox series

Patch

diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c
index 7b374595328d..3d7c50139142 100644
--- a/net/6lowpan/nhc.c
+++ b/net/6lowpan/nhc.c
@@ -12,13 +12,30 @@ 
 
 #include "nhc.h"
 
-static const struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX + 1];
+struct lowpan_nhc_entry {
+	const struct lowpan_nhc *nhc;
+	struct list_head list;
+};
+
 static DEFINE_SPINLOCK(lowpan_nhc_lock);
+static LIST_HEAD(lowpan_nexthdr_nhcs);
+
+const struct lowpan_nhc *lowpan_nhc_by_nexthdr(u8 nexthdr)
+{
+	const struct lowpan_nhc_entry *e;
+
+	list_for_each_entry(e, &lowpan_nexthdr_nhcs, list) {
+		if (e->nhc->nexthdr == nexthdr &&
+		    e->nhc->compress)
+			return e->nhc;
+	}
+
+	return NULL;
+}
 
 static const struct lowpan_nhc *lowpan_nhc_by_nhcid(struct sk_buff *skb)
 {
-	const struct lowpan_nhc *nhc;
-	int i;
+	const struct lowpan_nhc_entry *e;
 	u8 id;
 
 	if (!pskb_may_pull(skb, 1))
@@ -26,13 +43,9 @@  static const struct lowpan_nhc *lowpan_nhc_by_nhcid(struct sk_buff *skb)
 
 	id = *skb->data;
 
-	for (i = 0; i < NEXTHDR_MAX + 1; i++) {
-		nhc = lowpan_nexthdr_nhcs[i];
-		if (!nhc)
-			continue;
-
-		if ((id & nhc->idmask) == nhc->id)
-			return nhc;
+	list_for_each_entry(e, &lowpan_nexthdr_nhcs, list) {
+		if ((id & e->nhc->idmask) == e->nhc->id)
+			return e->nhc;
 	}
 
 	return NULL;
@@ -46,8 +59,8 @@  int lowpan_nhc_check_compression(struct sk_buff *skb,
 
 	spin_lock_bh(&lowpan_nhc_lock);
 
-	nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
-	if (!(nhc && nhc->compress))
+	nhc = lowpan_nhc_by_nexthdr(hdr->nexthdr);
+	if (!nhc)
 		ret = -ENOENT;
 
 	spin_unlock_bh(&lowpan_nhc_lock);
@@ -63,7 +76,7 @@  int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
 
 	spin_lock_bh(&lowpan_nhc_lock);
 
-	nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
+	nhc = lowpan_nhc_by_nexthdr(hdr->nexthdr);
 	/* check if the nhc module was removed in unlocked part.
 	 * TODO: this is a workaround we should prevent unloading
 	 * of nhc modules while unlocked part, this will always drop
@@ -140,28 +153,34 @@  int lowpan_nhc_do_uncompression(struct sk_buff *skb,
 
 int lowpan_nhc_add(const struct lowpan_nhc *nhc)
 {
-	int ret = 0;
+	struct lowpan_nhc_entry *e;
 
-	spin_lock_bh(&lowpan_nhc_lock);
+	e = kmalloc(sizeof(*e), GFP_KERNEL);
+	if (!e)
+		return -ENOMEM;
 
-	if (lowpan_nexthdr_nhcs[nhc->nexthdr]) {
-		ret = -EEXIST;
-		goto out;
-	}
+	e->nhc = nhc;
 
-	lowpan_nexthdr_nhcs[nhc->nexthdr] = nhc;
-out:
+	spin_lock_bh(&lowpan_nhc_lock);
+	list_add(&e->list, &lowpan_nexthdr_nhcs);
 	spin_unlock_bh(&lowpan_nhc_lock);
-	return ret;
+
+	return 0;
 }
 EXPORT_SYMBOL(lowpan_nhc_add);
 
 void lowpan_nhc_del(const struct lowpan_nhc *nhc)
 {
-	spin_lock_bh(&lowpan_nhc_lock);
-
-	lowpan_nexthdr_nhcs[nhc->nexthdr] = NULL;
+	struct lowpan_nhc_entry *e, *tmp;
 
+	spin_lock_bh(&lowpan_nhc_lock);
+	list_for_each_entry_safe(e, tmp, &lowpan_nexthdr_nhcs, list) {
+		if (e->nhc == nhc) {
+			list_del(&e->list);
+			kfree(e);
+			break;
+		}
+	}
 	spin_unlock_bh(&lowpan_nhc_lock);
 
 	synchronize_net();