@@ -115,6 +115,7 @@ enum nvme_tcp_queue_flags {
NVME_TCP_Q_LIVE = 1,
NVME_TCP_Q_POLLING = 2,
NVME_TCP_Q_OFF_DDP = 3,
+ NVME_TCP_Q_OFF_DDGST_RX = 4,
};
enum nvme_tcp_recv_state {
@@ -142,6 +143,9 @@ struct nvme_tcp_queue {
size_t ddgst_remaining;
unsigned int nr_cqe;
+#ifdef CONFIG_ULP_DDP
+ bool ddp_ddgst_valid;
+
/*
* resync_req is a speculative PDU header tcp seq number (with
* an additional flag at 32 lower bits) that the HW send to
@@ -151,6 +155,7 @@ struct nvme_tcp_queue {
* is pending (ULP_DDP_RESYNC_PENDING).
*/
atomic64_t resync_req;
+#endif
/* send state */
struct nvme_tcp_request *request;
@@ -287,9 +292,21 @@ static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
#ifdef CONFIG_ULP_DDP
-static inline bool is_netdev_ulp_offload_active(struct net_device *netdev)
+static inline bool is_netdev_ulp_offload_active(struct net_device *netdev,
+ struct nvme_tcp_queue *queue)
{
- return test_bit(ULP_DDP_C_NVME_TCP_BIT, netdev->ulp_ddp_caps.active);
+ bool ddgst_offload;
+
+ if (test_bit(ULP_DDP_C_NVME_TCP_BIT, netdev->ulp_ddp_caps.active))
+ return true;
+
+ ddgst_offload = test_bit(ULP_DDP_C_NVME_TCP_DDGST_RX_BIT, netdev->ulp_ddp_caps.active);
+ if (!queue && ddgst_offload)
+ return true;
+ if (queue && queue->data_digest && ddgst_offload)
+ return true;
+
+ return false;
}
static bool nvme_tcp_ddp_query_limits(struct net_device *netdev,
@@ -297,7 +314,7 @@ static bool nvme_tcp_ddp_query_limits(struct net_device *netdev,
{
int ret;
- if (!netdev || !is_netdev_ulp_offload_active(netdev) ||
+ if (!netdev || !is_netdev_ulp_offload_active(netdev, NULL) ||
!netdev->netdev_ops->ulp_ddp_ops->ulp_ddp_limits)
return false;
@@ -313,6 +330,18 @@ static bool nvme_tcp_ddp_query_limits(struct net_device *netdev,
return true;
}
+static inline bool nvme_tcp_ddp_ddgst_ok(struct nvme_tcp_queue *queue)
+{
+ return queue->ddp_ddgst_valid;
+}
+
+static inline void nvme_tcp_ddp_ddgst_update(struct nvme_tcp_queue *queue,
+ struct sk_buff *skb)
+{
+ if (queue->ddp_ddgst_valid)
+ queue->ddp_ddgst_valid = skb_is_ulp_crc(skb);
+}
+
static int nvme_tcp_req_map_sg(struct nvme_tcp_request *req, struct request *rq)
{
int ret;
@@ -327,6 +356,38 @@ static int nvme_tcp_req_map_sg(struct nvme_tcp_request *req, struct request *rq)
return 0;
}
+static void nvme_tcp_ddp_ddgst_recalc(struct ahash_request *hash,
+ struct request *rq,
+ __le32 *ddgst)
+{
+ struct nvme_tcp_request *req;
+
+ if (!rq)
+ return;
+
+ req = blk_mq_rq_to_pdu(rq);
+
+ if (!req->offloaded) {
+ /* if we have DDGST_RX offload without DDP the request
+ * wasn't mapped, so we need to map it here
+ */
+ if (nvme_tcp_req_map_sg(req, rq))
+ return;
+ }
+
+ req->ddp.sg_table.sgl = req->ddp.first_sgl;
+ ahash_request_set_crypt(hash, req->ddp.sg_table.sgl, (u8 *)ddgst,
+ req->data_len);
+ crypto_ahash_digest(hash);
+
+ if (!req->offloaded) {
+ /* without DDP, ddp_teardown() won't be called, so
+ * free the table here
+ */
+ sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
+ }
+}
+
static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
static void nvme_tcp_ddp_teardown_done(void *ddp_ctx);
static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = {
@@ -386,6 +447,9 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
{
struct net_device *netdev = queue->ctrl->offloading_netdev;
struct ulp_ddp_config config = {.type = ULP_DDP_NVME};
+ bool offload_ddp = test_bit(ULP_DDP_C_NVME_TCP_BIT, netdev->ulp_ddp_caps.active);
+ bool offload_ddgst_rx = test_bit(ULP_DDP_C_NVME_TCP_DDGST_RX_BIT,
+ netdev->ulp_ddp_caps.active);
int ret;
config.nvmeotcp.pfv = NVME_TCP_PFV_1_0;
@@ -410,7 +474,10 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
}
inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = &nvme_tcp_ddp_ulp_ops;
- set_bit(NVME_TCP_Q_OFF_DDP, &queue->flags);
+ if (offload_ddp)
+ set_bit(NVME_TCP_Q_OFF_DDP, &queue->flags);
+ if (queue->data_digest && offload_ddgst_rx)
+ set_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags);
return 0;
}
@@ -424,6 +491,7 @@ static void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue)
}
clear_bit(NVME_TCP_Q_OFF_DDP, &queue->flags);
+ clear_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags);
netdev->netdev_ops->ulp_ddp_ops->ulp_ddp_sk_del(netdev, queue->sock->sk);
@@ -511,11 +579,26 @@ static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags)
#else
-static inline bool is_netdev_ulp_offload_active(struct net_device *netdev)
+static inline bool is_netdev_ulp_offload_active(struct net_device *netdev,
+ struct nvme_tcp_queue *queue)
{
return false;
}
+static inline bool nvme_tcp_ddp_ddgst_ok(struct nvme_tcp_queue *queue)
+{
+ return true;
+}
+
+static inline void nvme_tcp_ddp_ddgst_update(struct nvme_tcp_queue *queue,
+ struct sk_buff *skb)
+{}
+
+static void nvme_tcp_ddp_ddgst_recalc(struct ahash_request *hash,
+ struct request *rq,
+ __le32 *ddgst)
+{}
+
static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, u16 command_id,
struct request *rq)
{
@@ -797,6 +880,9 @@ static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
queue->pdu_offset = 0;
queue->data_remaining = -1;
queue->ddgst_remaining = 0;
+#ifdef CONFIG_ULP_DDP
+ queue->ddp_ddgst_valid = true;
+#endif
}
static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
@@ -999,7 +1085,8 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
int ret;
- if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags))
+ if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags) ||
+ test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
nvme_tcp_resync_response(queue, skb, *offset);
ret = skb_copy_bits(skb, *offset,
@@ -1062,6 +1149,10 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ if (queue->data_digest &&
+ test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
+ nvme_tcp_ddp_ddgst_update(queue, skb);
+
while (true) {
int recv_len, ret;
@@ -1090,7 +1181,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
recv_len = min_t(size_t, recv_len,
iov_iter_count(&req->iter));
- if (queue->data_digest)
+ if (queue->data_digest &&
+ !test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
ret = skb_copy_and_hash_datagram_iter(skb, *offset,
&req->iter, recv_len, queue->rcv_hash);
else
@@ -1132,8 +1224,11 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
char *ddgst = (char *)&queue->recv_ddgst;
size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
+ struct request *rq;
int ret;
+ if (test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
+ nvme_tcp_ddp_ddgst_update(queue, skb);
ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
if (unlikely(ret))
return ret;
@@ -1144,9 +1239,24 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
if (queue->ddgst_remaining)
return 0;
+ rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+
+ if (test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags)) {
+ /*
+ * If HW successfully offloaded the digest
+ * verification, we can skip it
+ */
+ if (nvme_tcp_ddp_ddgst_ok(queue))
+ goto out;
+ /*
+ * Otherwise we have to recalculate and verify the
+ * digest with the software-fallback
+ */
+ nvme_tcp_ddp_ddgst_recalc(queue->rcv_hash, rq, &queue->exp_ddgst);
+ }
+
if (queue->recv_ddgst != queue->exp_ddgst) {
- struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
@@ -1157,9 +1267,8 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
le32_to_cpu(queue->exp_ddgst));
}
+out:
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
nvme_tcp_end_request(rq, le16_to_cpu(req->status));
@@ -1966,7 +2075,8 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
nvme_tcp_restore_sock_calls(queue);
cancel_work_sync(&queue->io_work);
- if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags))
+ if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags) ||
+ test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
nvme_tcp_unoffload_socket(queue);
}
@@ -1996,7 +2106,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
goto err;
netdev = ctrl->queues[idx].ctrl->offloading_netdev;
- if (netdev && is_netdev_ulp_offload_active(netdev)) {
+ if (netdev && is_netdev_ulp_offload_active(netdev, &ctrl->queues[idx])) {
ret = nvme_tcp_offload_socket(&ctrl->queues[idx]);
if (ret) {
dev_err(nctrl->device,
@@ -2015,7 +2125,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
ctrl->offloading_netdev = NULL;
goto done;
}
- if (is_netdev_ulp_offload_active(netdev))
+ if (is_netdev_ulp_offload_active(netdev, &ctrl->queues[idx]))
nvme_tcp_offload_limits(&ctrl->queues[idx], netdev);
/* release the device as no offload context is established yet. */
dev_put(netdev);