@@ -329,6 +329,12 @@ static int mlx4_poll_one(struct mlx4_cq *cq,
wc->sl = ntohs(cqe->sl_vid) >> 13;
else
wc->sl = ntohs(cqe->sl_vid) >> 12;
+
+ if ((*cur_qp) && ((*cur_qp)->qp_cap_cache & MLX4_RX_CSUM_VALID)) {
+ wc->wc_flags |= ((cqe->status & htonl(MLX4_CQE_STATUS_IPV4_CSUM_OK)) ==
+ htonl(MLX4_CQE_STATUS_IPV4_CSUM_OK)) <<
+ IBV_WC_IP_CSUM_OK_SHIFT;
+ }
}
return CQ_OK;
@@ -257,6 +257,7 @@ struct mlx4_qp {
struct mlx4_wq rq;
uint8_t link_layer;
+ uint32_t qp_cap_cache;
};
struct mlx4_av {
@@ -279,6 +280,22 @@ struct mlx4_ah {
uint8_t mac[6];
};
+enum {
+ MLX4_CSUM_SUPPORT_UD_OVER_IB = (1 << 0),
+ MLX4_CSUM_SUPPORT_RAW_OVER_ETH = (1 << 1),
+ /* Only report rx checksum when the validation is valid */
+ MLX4_RX_CSUM_VALID = (1 << 16),
+};
+
+enum mlx4_cqe_status {
+ MLX4_CQE_STATUS_TCP_UDP_CSUM_OK = (1 << 2),
+ MLX4_CQE_STATUS_IPV4_PKT = (1 << 22),
+ MLX4_CQE_STATUS_IP_HDR_CSUM_OK = (1 << 28),
+ MLX4_CQE_STATUS_IPV4_CSUM_OK = MLX4_CQE_STATUS_IPV4_PKT |
+ MLX4_CQE_STATUS_IP_HDR_CSUM_OK |
+ MLX4_CQE_STATUS_TCP_UDP_CSUM_OK
+};
+
struct mlx4_cqe {
uint32_t vlan_my_qpn;
uint32_t immed_rss_invalid;
@@ -286,7 +303,7 @@ struct mlx4_cqe {
uint8_t sl_vid;
uint8_t reserved1;
uint16_t rlid;
- uint32_t reserved2;
+ uint32_t status;
uint32_t byte_cnt;
uint16_t wqe_index;
uint16_t checksum;
@@ -289,12 +289,31 @@ int mlx4_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
set_datagram_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
+
+ if (wr->send_flags & IBV_SEND_IP_CSUM) {
+ if (!(qp->qp_cap_cache & MLX4_CSUM_SUPPORT_UD_OVER_IB)) {
+ ret = EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+ ctrl->srcrb_flags |= htonl(MLX4_WQE_CTRL_IP_HDR_CSUM |
+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ }
break;
case IBV_QPT_RAW_PACKET:
/* For raw eth, the MLX4_WQE_CTRL_SOLICIT flag is used
* to indicate that no icrc should be calculated */
ctrl->srcrb_flags |= htonl(MLX4_WQE_CTRL_SOLICIT);
+ if (wr->send_flags & IBV_SEND_IP_CSUM) {
+ if (!(qp->qp_cap_cache & MLX4_CSUM_SUPPORT_RAW_OVER_ETH)) {
+ ret = EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+ ctrl->srcrb_flags |= htonl(MLX4_WQE_CTRL_IP_HDR_CSUM |
+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ }
break;
default:
@@ -606,14 +606,38 @@ int mlx4_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
struct ibv_modify_qp cmd;
struct ibv_port_attr port_attr;
struct mlx4_qp *mqp = to_mqp(qp);
+ struct ibv_device_attr device_attr;
int ret;
+ memset(&device_attr, 0, sizeof(device_attr));
if (attr_mask & IBV_QP_PORT) {
ret = ibv_query_port(qp->context, attr->port_num,
&port_attr);
if (ret)
return ret;
mqp->link_layer = port_attr.link_layer;
+
+ ret = ibv_query_device(qp->context, &device_attr);
+ if (ret)
+ return ret;
+
+ switch(qp->qp_type) {
+ case IBV_QPT_UD:
+ if ((mqp->link_layer == IBV_LINK_LAYER_INFINIBAND) &&
+ (device_attr.device_cap_flags & IBV_DEVICE_UD_IP_CSUM))
+ mqp->qp_cap_cache |= MLX4_CSUM_SUPPORT_UD_OVER_IB |
+ MLX4_RX_CSUM_VALID;
+ break;
+ case IBV_QPT_RAW_PACKET:
+ if ((mqp->link_layer == IBV_LINK_LAYER_ETHERNET) &&
+ (device_attr.device_cap_flags & IBV_DEVICE_RAW_IP_CSUM))
+ mqp->qp_cap_cache |= MLX4_CSUM_SUPPORT_RAW_OVER_ETH |
+ MLX4_RX_CSUM_VALID;
+ break;
+ default:
+ break;
+ }
+
}
if (qp->state == IBV_QPS_RESET &&
@@ -38,9 +38,11 @@ enum {
};
enum {
- MLX4_WQE_CTRL_FENCE = 1 << 6,
- MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
- MLX4_WQE_CTRL_SOLICIT = 1 << 1,
+ MLX4_WQE_CTRL_FENCE = 1 << 6,
+ MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
+ MLX4_WQE_CTRL_SOLICIT = 1 << 1,
+ MLX4_WQE_CTRL_IP_HDR_CSUM = 1 << 4,
+ MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
};
enum {