@@ -154,6 +154,55 @@ static const struct blk_mq_ops nvme_tcp_mq_ops;
static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
+#ifdef CONFIG_ULP_DDP
+static int nvme_tcp_map_pdu_info(struct nvme_tcp_queue *queue,
+ size_t sent_size,
+ u32 pdu_len, u32 data_len)
+{
+ u32 start_seq = tcp_sk(queue->sock->sk)->write_seq - sent_size;
+ struct nvme_tcp_request *req = queue->request;
+ struct request *rq = blk_mq_rq_from_pdu(req);
+
+ return ulp_ddp_map_pdu_info(queue->sock->sk, start_seq, req->pdu,
+ pdu_len, data_len, rq);
+}
+
+static void nvme_tcp_close_pdu_info(struct nvme_tcp_queue *queue)
+{
+ if (queue->data_digest &&
+ test_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags))
+ ulp_ddp_close_pdu_info(queue->sock->sk);
+}
+
+bool nvme_tcp_need_map(struct nvme_tcp_queue *queue)
+{
+ return queue->data_digest &&
+ test_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags)
+ && queue->sock && queue->sock->sk
+ && ulp_ddp_need_map(queue->sock->sk);
+
+}
+#else
+
+static int nvme_tcp_map_pdu_info(struct nvme_tcp_queue *queue,
+ size_t sent_size,
+ u32 pdu_len, u32 data_len)
+{
+ return 0;
+}
+
+static void nvme_tcp_close_pdu_info(struct nvme_tcp_queue *queue)
+{
+}
+
+bool nvme_tcp_need_map(struct nvme_tcp_queue *queue)
+{
+ return false;
+}
+#endif
+
+
+
static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
{
return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
@@ -285,11 +334,13 @@ static void nvme_tcp_ddp_ddgst_recalc(struct ahash_request *hash,
#ifdef CONFIG_ULP_DDP
+void nvme_tcp_ddp_ddgst_fallback(struct ulp_ddp_pdu_info *pdu_info);
static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
static void nvme_tcp_ddp_teardown_done(void *ddp_ctx);
static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = {
.resync_request = nvme_tcp_resync_request,
.ddp_teardown_done = nvme_tcp_ddp_teardown_done,
+ .ddp_ddgst_fallback = nvme_tcp_ddp_ddgst_fallback,
};
static int nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue,
@@ -371,6 +422,12 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = &nvme_tcp_ddp_ulp_ops;
if (netdev->features & NETIF_F_HW_ULP_DDP) {
+ if (ulp_ddp_init_tx_offload(queue->sock->sk)) {
+ netdev->ulp_ddp_ops->ulp_ddp_sk_del(netdev, queue->sock->sk);
+ dev_put(netdev);
+ return -ENOMEM;
+ }
+
set_bit(NVME_TCP_Q_OFF_DDP, &queue->flags);
set_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags);
set_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags);
@@ -392,6 +449,9 @@ static void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue)
clear_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags);
clear_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags);
+ if (test_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags))
+ ulp_ddp_release_tx_offload(queue->sock->sk);
+
netdev->ulp_ddp_ops->ulp_ddp_sk_del(netdev, queue->sock->sk);
inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = NULL;
@@ -1269,6 +1329,19 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
}
+#ifdef CONFIG_ULP_DDP
+void nvme_tcp_ddp_ddgst_fallback(struct ulp_ddp_pdu_info *pdu_info)
+{
+ struct request *rq = pdu_info->req;
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_queue *queue = req->queue;
+
+ nvme_tcp_ddp_ddgst_recalc(queue->snd_hash, rq);
+ nvme_tcp_ddgst_final(queue->snd_hash, &pdu_info->ddgst);
+}
+
+#endif
+
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
@@ -1333,7 +1406,8 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
int len = sizeof(*pdu) + hdgst - req->offset;
struct request *rq = blk_mq_rq_from_pdu(req);
int flags = MSG_DONTWAIT;
- int ret;
+ int ret, check;
+ u32 data_len;
if (test_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags) && queue->data_digest)
flags |= MSG_DDP_CRC;
@@ -1353,6 +1427,13 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
if (unlikely(ret <= 0))
return ret;
+ if (nvme_tcp_need_map(queue)) {
+ data_len = inline_data ? req->data_len : 0;
+ check = nvme_tcp_map_pdu_info(queue, ret, len, data_len);
+ if (unlikely(check))
+ return check;
+ }
+
len -= ret;
if (!len) {
if (inline_data) {
@@ -1360,6 +1441,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
if (queue->data_digest)
crypto_ahash_init(queue->snd_hash);
} else {
+ nvme_tcp_close_pdu_info(queue);
nvme_tcp_done_send_req(queue);
}
return 1;
@@ -1376,7 +1458,7 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
struct nvme_tcp_data_pdu *pdu = req->pdu;
u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) - req->offset + hdgst;
- int ret;
+ int ret, check;
if (test_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags) && queue->data_digest)
flags |= MSG_DDP_CRC;
@@ -1389,6 +1471,12 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
if (unlikely(ret <= 0))
return ret;
+ if (nvme_tcp_need_map(queue)) {
+ check = nvme_tcp_map_pdu_info(queue, ret, len, req->data_len);
+ if (unlikely(check))
+ return check;
+ }
+
len -= ret;
if (!len) {
req->state = NVME_TCP_SEND_DATA;
@@ -1424,6 +1512,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
return ret;
if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
+ nvme_tcp_close_pdu_info(queue);
nvme_tcp_done_send_req(queue);
return 1;
}