@@ -77,6 +77,7 @@ struct ulp_ddp_io {
* @hdr_len: the size (in bytes) of the pdu header.
* @hdr: pdu header.
* @req: the ulp request for the original pdu.
+ * @ddgst: pdu data digest.
*/
struct ulp_ddp_pdu_info {
struct list_head list;
@@ -86,6 +87,7 @@ struct ulp_ddp_pdu_info {
u32 hdr_len;
void *hdr;
struct request *req;
+ __le32 ddgst;
};
/* struct ulp_ddp_dev_ops - operations used by an upper layer protocol to configure ddp offload
@@ -129,6 +131,8 @@ struct ulp_ddp_ulp_ops {
bool (*resync_request)(struct sock *sk, u32 seq, u32 flags);
/* NIC driver informs the ulp that ddp teardown is done - used for async completions*/
void (*ddp_teardown_done)(void *ddp_ctx);
+ /* NIC request ulp to calculate the ddgst and store it in pdu_info->ddgst */
+ void (*ddp_ddgst_fallback)(struct ulp_ddp_pdu_info *pdu_info);
};
/**
@@ -182,4 +186,7 @@ int ulp_ddp_map_pdu_info(struct sock *sk, u32 start_seq, void *hdr,
void ulp_ddp_close_pdu_info(struct sock *sk);
bool ulp_ddp_need_map(struct sock *sk);
struct ulp_ddp_pdu_info *ulp_ddp_get_pdu_info(struct sock *sk, u32 seq);
+struct sk_buff *ulp_ddp_validate_xmit_skb(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb);
#endif //_ULP_DDP_H
@@ -164,3 +164,72 @@ struct ulp_ddp_pdu_info *ulp_ddp_get_pdu_info(struct sock *sk, u32 seq)
return info;
} EXPORT_SYMBOL(ulp_ddp_get_pdu_info);
+static void ulp_ddp_ddgst_recalc(const struct ulp_ddp_ulp_ops *ulp_ops,
+ struct ulp_ddp_pdu_info *pdu_info)
+{
+ if (pdu_info->ddgst)
+ return;
+
+ ulp_ops->ddp_ddgst_fallback(pdu_info);
+}
+
+static struct sk_buff *ulp_ddp_fallback_skb(struct ulp_ddp_ctx *ctx,
+ struct sk_buff *skb,
+ struct sock *sk)
+{
+ const struct ulp_ddp_ulp_ops *ulp_ops = inet_csk(sk)->icsk_ulp_ddp_ops;
+ int datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ struct ulp_ddp_pdu_info *pdu_info = NULL;
+ int ddgst_start, ddgst_offset, ddgst_len;
+ u32 seq = ntohl(tcp_hdr(skb)->seq);
+ u32 end_skb_seq = seq + datalen;
+ u32 first_seq = seq;
+
+ if (!(ulp_ops && ulp_ops->ddp_ddgst_fallback))
+ return skb;
+
+again:
+ /* check if we can't use the last pdu_info
+ * Reasons we can't use it:
+ * 1. first time and then pdu_info is NULL.
+ * 2. seq doesn't Map to this pdu_info (out of bounds).
+ */
+ if (!pdu_info || !between(seq, pdu_info->start_seq, pdu_info->end_seq - 1)) {
+ pdu_info = ulp_ddp_get_pdu_info(sk, seq);
+ if (!pdu_info)
+ return skb;
+ }
+
+ ddgst_start = pdu_info->end_seq - ctx->ddgst_len;
+
+ //check if this skb contains ddgst field
+ if (between(ddgst_start, seq, end_skb_seq - 1) && pdu_info->data_len) {
+ ulp_ddp_ddgst_recalc(ulp_ops, pdu_info);
+ ddgst_offset = ddgst_start - first_seq + skb_headlen(skb);
+ ddgst_len = min_t(int, ctx->ddgst_len, end_skb_seq - ddgst_start);
+ skb_store_bits(skb, ddgst_offset, &pdu_info->ddgst, ddgst_len);
+ }
+
+ //check if there is more PDU's in this skb
+ if (between(pdu_info->end_seq, seq + 1, end_skb_seq - 1)) {
+ seq = pdu_info->end_seq;
+ goto again;
+ }
+
+ return skb;
+}
+
+struct sk_buff *ulp_ddp_validate_xmit_skb(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct ulp_ddp_ctx *ctx = ulp_ddp_get_ctx(sk);
+
+ if (!ctx)
+ return skb;
+
+ if (dev == ctx->netdev)
+ return skb;
+
+ return ulp_ddp_fallback_skb(ctx, skb, sk);
+} EXPORT_SYMBOL(ulp_ddp_validate_xmit_skb);