@@ -762,6 +762,30 @@ mlx5e_nvmeotcp_ddp_setup(struct net_device *netdev,
struct sock *sk,
struct ulp_ddp_io *ddp)
{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct scatterlist *sg = ddp->sg_table.sgl;
+ struct mlx5e_nvmeotcp_queue *queue;
+ struct mlx5_core_dev *mdev;
+ int i, size = 0, count = 0;
+
+ queue = container_of(ulp_ddp_get_ctx(sk), struct mlx5e_nvmeotcp_queue, ulp_ddp_ctx);
+
+ mdev = queue->priv->mdev;
+ count = dma_map_sg(mdev->device, ddp->sg_table.sgl, ddp->nents,
+ DMA_FROM_DEVICE);
+
+ if (WARN_ON(count > mlx5e_get_max_sgl(mdev)))
+ return -ENOSPC;
+
+ for (i = 0; i < count; i++)
+ size += sg[i].length;
+
+ queue->ccid_table[ddp->command_id].size = size;
+ queue->ccid_table[ddp->command_id].ddp = ddp;
+ queue->ccid_table[ddp->command_id].sgl = sg;
+ queue->ccid_table[ddp->command_id].ccid_gen++;
+ queue->ccid_table[ddp->command_id].sgl_length = count;
+
return 0;
}
@@ -819,6 +843,11 @@ static void
mlx5e_nvmeotcp_dev_resync(struct net_device *netdev,
struct sock *sk, u32 seq)
{
+ struct mlx5e_nvmeotcp_queue *queue =
+ container_of(ulp_ddp_get_ctx(sk), struct mlx5e_nvmeotcp_queue, ulp_ddp_ctx);
+
+ queue->after_resync_cqe = 1;
+ mlx5e_nvmeotcp_rx_post_static_params_wqe(queue, seq);
}
static const struct ulp_ddp_dev_ops mlx5e_nvmeotcp_ops = {