@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-3.0
*
* ulp_ddp.h
* Author: Boris Pismenny <borisp@mellanox.com>
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <net/inet_connection_sock.h>
#include <net/sock.h>
+#include <net/tcp.h>
/* limits returned by the offload driver, zero means don't care */
struct ulp_ddp_limits {
@@ -67,6 +68,26 @@ struct ulp_ddp_io {
struct scatterlist first_sgl[SG_CHUNK_SIZE];
};
+/**
+ * struct ulp_ddp_pdu_info - pdu info for tcp ddp crc Tx offload.
+ *
+ * @end_seq: tcp seq of the last byte in the pdu.
+ * @start_seq: tcp seq of the first byte in the pdu.
+ * @data_len: pdu data size (in bytes).
+ * @hdr_len: the size (in bytes) of the pdu header.
+ * @hdr: pdu header.
+ * @req: the ulp request for the original pdu.
+ */
+struct ulp_ddp_pdu_info {
+ struct list_head list;
+ u32 end_seq;
+ u32 start_seq;
+ u32 data_len;
+ u32 hdr_len;
+ void *hdr;
+ struct request *req;
+};
+
/* struct ulp_ddp_dev_ops - operations used by an upper layer protocol to configure ddp offload
*
* @ulp_ddp_limits: limit the number of scatter gather entries per IO.
@@ -113,10 +134,25 @@ struct ulp_ddp_ulp_ops {
/**
* struct ulp_ddp_ctx - Generic ulp ddp context: device driver per queue contexts must
* use this as the first member.
+ *
+ * @netdev: the coresponding netdev for this tcp ddp.
+ * @ddgst_len: data digest len in bytes.
+ * @expected_seq: indicates for next tcp seq.
+ * @open_info: the current pdu_info.
+ * @pdu_hint: hint for ulp_ddp_get_pdu_info.
+ * @info_list: list of the mapped pdu_infos.
+ * @info_lock: lock for info_list.
*/
struct ulp_ddp_ctx {
- enum ulp_ddp_type type;
- unsigned char buf[];
+ enum ulp_ddp_type type;
+ struct net_device *netdev;
+ int ddgst_len;
+ u32 expected_seq;
+ struct ulp_ddp_pdu_info *open_info;
+ struct ulp_ddp_pdu_info *pdu_hint;
+ struct list_head info_list;
+ spinlock_t info_lock;
+ unsigned char buf[];
};
static inline struct ulp_ddp_ctx *ulp_ddp_get_ctx(const struct sock *sk)
@@ -133,4 +169,17 @@ static inline void ulp_ddp_set_ctx(struct sock *sk, void *ctx)
rcu_assign_pointer(icsk->icsk_ulp_ddp_data, ctx);
}
+static inline void ulp_ddp_destroy_info(struct ulp_ddp_pdu_info *info)
+{
+ kfree(info);
+}
+
+void ulp_ddp_ack_handle(struct sock *sk, u32 acked_seq);
+int ulp_ddp_init_tx_offload(struct sock *sk);
+void ulp_ddp_release_tx_offload(struct sock *sk);
+int ulp_ddp_map_pdu_info(struct sock *sk, u32 start_seq, void *hdr,
+ u32 hdr_len, u32 data_len, struct request *req);
+void ulp_ddp_close_pdu_info(struct sock *sk);
+bool ulp_ddp_need_map(struct sock *sk);
+struct ulp_ddp_pdu_info *ulp_ddp_get_pdu_info(struct sock *sk, u32 seq);
#endif //_ULP_DDP_H
@@ -14,6 +14,7 @@ obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
fib_notifier.o xdp.o flow_offload.o
obj-y += net-sysfs.o
+obj-$(CONFIG_ULP_DDP) += ulp_ddp.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o
obj-$(CONFIG_PROC_FS) += net-procfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
new file mode 100644
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-3.0
+ *
+ * ulp_ddp.c
+ * Author: Yoray Zack <yorayz@mellanox.com>
+ * Copyright (C) 2020 Mellanox Technologies.
+ */
+#include <net/ulp_ddp.h>
+
+void ulp_ddp_ack_handle(struct sock *sk, u32 acked_seq)
+{
+ struct ulp_ddp_ctx *ctx = ulp_ddp_get_ctx(sk);
+ struct ulp_ddp_pdu_info *info, *temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->info_lock, flags);
+ info = ctx->pdu_hint;
+ if (info && !before(acked_seq, info->end_seq))
+ ctx->pdu_hint = NULL;
+
+ list_for_each_entry_safe(info, temp, &ctx->info_list, list) {
+ if (before(acked_seq, info->end_seq - 1))
+ break;
+
+ list_del(&info->list);
+ ulp_ddp_destroy_info(info);
+ }
+
+ spin_unlock_irqrestore(&ctx->info_lock, flags);
+}
+
+static void ulp_ddp_delete_all_info(struct sock *sk)
+{
+ struct ulp_ddp_ctx *ctx = ulp_ddp_get_ctx(sk);
+ struct ulp_ddp_pdu_info *info, *temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->info_lock, flags);
+ list_for_each_entry_safe(info, temp, &ctx->info_list, list) {
+ list_del(&info->list);
+ ulp_ddp_destroy_info(info);
+ }
+
+ spin_unlock_irqrestore(&ctx->info_lock, flags);
+ ctx->pdu_hint = NULL;
+}
+
+int ulp_ddp_init_tx_offload(struct sock *sk)
+{
+ struct ulp_ddp_ctx *ctx = ulp_ddp_get_ctx(sk);
+ struct ulp_ddp_pdu_info *start_marker_info;
+ unsigned long flags;
+
+ start_marker_info = kzalloc(sizeof(*start_marker_info), GFP_KERNEL);
+ if (!start_marker_info)
+ return -ENOMEM;
+
+ start_marker_info->end_seq = tcp_sk(sk)->write_seq;
+ start_marker_info->start_seq = tcp_sk(sk)->write_seq;
+ spin_lock_init(&ctx->info_lock);
+ INIT_LIST_HEAD(&ctx->info_list);
+ spin_lock_irqsave(&ctx->info_lock, flags);
+ list_add_tail(&start_marker_info->list, &ctx->info_list);
+ spin_unlock_irqrestore(&ctx->info_lock, flags);
+ ctx->pdu_hint = NULL;
+ ctx->open_info = NULL;
+ clean_acked_data_enable(inet_csk(sk),
+ &ulp_ddp_ack_handle);
+ return 0;
+} EXPORT_SYMBOL(ulp_ddp_init_tx_offload);
+
+void ulp_ddp_release_tx_offload(struct sock *sk)
+{
+ clean_acked_data_disable(inet_csk(sk));
+ ulp_ddp_delete_all_info(sk);
+} EXPORT_SYMBOL(ulp_ddp_release_tx_offload);
+
+int ulp_ddp_map_pdu_info(struct sock *sk, u32 start_seq, void *hdr,
+ u32 hdr_len, u32 data_len, struct request *req)
+{
+ struct ulp_ddp_ctx *ctx = ulp_ddp_get_ctx(sk);
+ struct ulp_ddp_pdu_info *pdu_info;
+ u32 ddgst_len;
+
+ pdu_info = kmalloc(sizeof(*pdu_info), GFP_KERNEL);
+ if (!pdu_info)
+ return -ENOMEM;
+
+ ddgst_len = data_len ? ctx->ddgst_len : 0;
+
+ pdu_info->end_seq = start_seq + hdr_len + data_len + ddgst_len;
+ pdu_info->start_seq = start_seq;
+ pdu_info->data_len = data_len;
+ pdu_info->hdr_len = hdr_len;
+ pdu_info->hdr = hdr;
+ pdu_info->req = req;
+ pdu_info->ddgst = 0;
+
+ ctx->open_info = pdu_info;
+ return 0;
+} EXPORT_SYMBOL(ulp_ddp_map_pdu_info);
+
+void ulp_ddp_close_pdu_info(struct sock *sk)
+{
+ struct ulp_ddp_ctx *ctx = ulp_ddp_get_ctx(sk);
+ struct ulp_ddp_pdu_info *pdu_info = ctx->open_info;
+ unsigned long flags;
+
+ if (!pdu_info)
+ return;
+
+ pdu_info->end_seq = tcp_sk(sk)->write_seq;
+
+ spin_lock_irqsave(&ctx->info_lock, flags);
+ list_add_tail_rcu(&pdu_info->list, &ctx->info_list);
+ spin_unlock_irqrestore(&ctx->info_lock, flags);
+
+ ctx->open_info = NULL;
+} EXPORT_SYMBOL(ulp_ddp_close_pdu_info);
+
+bool ulp_ddp_need_map(struct sock *sk)
+{
+ struct ulp_ddp_ctx *ctx = ulp_ddp_get_ctx(sk);
+
+ return !ctx->open_info;
+} EXPORT_SYMBOL(ulp_ddp_need_map);
+
+struct ulp_ddp_pdu_info *ulp_ddp_get_pdu_info(struct sock *sk, u32 seq)
+{
+ struct ulp_ddp_ctx *ctx = ulp_ddp_get_ctx(sk);
+ struct ulp_ddp_pdu_info *info;
+ u32 open_start = 0;
+
+ if (!ctx)
+ return NULL;
+
+ if (ctx->open_info) {
+ open_start = ctx->open_info->start_seq;
+ if (before(open_start, seq) || seq == open_start)
+ return ctx->open_info;
+ }
+
+ info = ctx->pdu_hint;
+ if (!info || before(seq, info->start_seq))
+ info = list_first_entry_or_null(&ctx->info_list,
+ struct ulp_ddp_pdu_info, list);
+
+ if (!info)
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_from_rcu(info, &ctx->info_list, list) {
+ if (!info)
+ goto out;
+
+ if (between(seq, info->start_seq, info->end_seq - 1)) {
+ ctx->pdu_hint = info;
+ goto out;
+ }
+ }
+
+ info = NULL;
+out:
+ rcu_read_unlock();
+ return info;
+} EXPORT_SYMBOL(ulp_ddp_get_pdu_info);
+