diff mbox series

[RFC,net-next,19/19] net: dsa: tag_qca: add GRO callbacks

Message ID 20191230143028.27313-20-alobakin@dlink.ru (mailing list archive)
State New, archived
Headers show
Series net: dsa: add GRO support | expand

Commit Message

Alexander Lobakin Dec. 30, 2019, 2:30 p.m. UTC
...so that frames tagged with this CPU tag type can be correctly
processed by the GRO layer.
Misc: fix qca_netdev_ops structure identation.

Signed-off-by: Alexander Lobakin <alobakin@dlink.ru>
---
 net/dsa/tag_qca.c | 88 +++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 82 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index bee2788e034d..d0cb2299cbe9 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -106,13 +106,89 @@  static void qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
 	*proto = qca_tag_encap_proto(skb->data);
 }
 
+static struct sk_buff *qca_tag_gro_receive(struct list_head *head,
+					   struct sk_buff *skb)
+{
+	const struct packet_offload *ptype;
+	struct sk_buff *p, *pp = NULL;
+	u32 data_off, data_end;
+	const u8 *data;
+	int flush = 1;
+
+	data_off = skb_gro_offset(skb);
+	data_end = data_off + QCA_HDR_LEN;
+
+	data = skb_gro_header_fast(skb, data_off);
+	if (skb_gro_header_hard(skb, data_end)) {
+		data = skb_gro_header_slow(skb, data_end, data_off);
+		if (unlikely(!data))
+			goto out;
+	}
+
+	/* Data that is to the left to the current position is already
+	 * pulled to the head
+	 */
+	if (unlikely(!qca_tag_sanity_check(skb->data + data_off)))
+		goto out;
+
+	rcu_read_lock();
+
+	ptype = gro_find_receive_by_type(qca_tag_encap_proto(data));
+	if (!ptype)
+		goto out_unlock;
+
+	flush = 0;
+
+	list_for_each_entry(p, head, list) {
+		if (!NAPI_GRO_CB(p)->same_flow)
+			continue;
+
+		if (qca_tag_source_port(skb->data + data_off) ^
+		    qca_tag_source_port(p->data + data_off))
+			NAPI_GRO_CB(p)->same_flow = 0;
+	}
+
+	skb_gro_pull(skb, QCA_HDR_LEN);
+	skb_gro_postpull_rcsum(skb, data, QCA_HDR_LEN);
+
+	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+
+out_unlock:
+	rcu_read_unlock();
+out:
+	skb_gro_flush_final(skb, pp, flush);
+
+	return pp;
+}
+
+static int qca_tag_gro_complete(struct sk_buff *skb, int nhoff)
+{
+	const struct packet_offload *ptype;
+	int err = -ENOENT;
+	__be16 proto;
+
+	proto = qca_tag_encap_proto(skb->data + nhoff);
+
+	rcu_read_lock();
+
+	ptype = gro_find_complete_by_type(proto);
+	if (ptype)
+		err = ptype->callbacks.gro_complete(skb, nhoff + QCA_HDR_LEN);
+
+	rcu_read_unlock();
+
+	return err;
+}
+
 static const struct dsa_device_ops qca_netdev_ops = {
-	.name	= "qca",
-	.proto	= DSA_TAG_PROTO_QCA,
-	.xmit	= qca_tag_xmit,
-	.rcv	= qca_tag_rcv,
-	.flow_dissect = qca_tag_flow_dissect,
-	.overhead = QCA_HDR_LEN,
+	.name		= "qca",
+	.proto		= DSA_TAG_PROTO_QCA,
+	.xmit		= qca_tag_xmit,
+	.rcv		= qca_tag_rcv,
+	.flow_dissect	= qca_tag_flow_dissect,
+	.gro_receive	= qca_tag_gro_receive,
+	.gro_complete	= qca_tag_gro_complete,
+	.overhead	= QCA_HDR_LEN,
 };
 
 MODULE_LICENSE("GPL");