@@ -754,6 +754,30 @@ mlx5e_nvmeotcp_ddp_setup(struct net_device *netdev,
struct sock *sk,
struct tcp_ddp_io *ddp)
{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct scatterlist *sg = ddp->sg_table.sgl;
+ struct mlx5e_nvmeotcp_queue *queue;
+ struct mlx5_core_dev *mdev;
+ int i, size = 0, count = 0;
+
+ queue = container_of(tcp_ddp_get_ctx(sk), struct mlx5e_nvmeotcp_queue, tcp_ddp_ctx);
+
+ mdev = queue->priv->mdev;
+ count = dma_map_sg(mdev->device, ddp->sg_table.sgl, ddp->nents,
+ DMA_FROM_DEVICE);
+
+ if (WARN_ON(count > mlx5e_get_max_sgl(mdev)))
+ return -ENOSPC;
+
+ for (i = 0; i < count; i++)
+ size += sg[i].length;
+
+ queue->ccid_table[ddp->command_id].size = size;
+ queue->ccid_table[ddp->command_id].ddp = ddp;
+ queue->ccid_table[ddp->command_id].sgl = sg;
+ queue->ccid_table[ddp->command_id].ccid_gen++;
+ queue->ccid_table[ddp->command_id].sgl_length = count;
+
return 0;
}
@@ -791,11 +815,11 @@ mlx5e_nvmeotcp_ddp_teardown(struct net_device *netdev,
struct tcp_ddp_io *ddp,
void *ddp_ctx)
{
- struct mlx5e_nvmeotcp_queue *queue =
- (struct mlx5e_nvmeotcp_queue *)tcp_ddp_get_ctx(sk);
+ struct mlx5e_nvmeotcp_queue *queue;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct nvmeotcp_queue_entry *q_entry;
+ queue = container_of(tcp_ddp_get_ctx(sk), struct mlx5e_nvmeotcp_queue, tcp_ddp_ctx);
q_entry = &queue->ccid_table[ddp->command_id];
WARN_ON(q_entry->sgl_length == 0);
@@ -811,6 +835,11 @@ static void
mlx5e_nvmeotcp_dev_resync(struct net_device *netdev,
struct sock *sk, u32 seq)
{
+ struct mlx5e_nvmeotcp_queue *queue =
+ container_of(tcp_ddp_get_ctx(sk), struct mlx5e_nvmeotcp_queue, tcp_ddp_ctx);
+
+ queue->after_resync_cqe = 1;
+ mlx5e_nvmeotcp_rx_post_static_params_wqe(queue, seq);
}
static const struct tcp_ddp_dev_ops mlx5e_nvmeotcp_ops = {