@@ -1318,6 +1318,10 @@ static int mlx4_ib_alloc_pv_bufs(struct
tun_qp->ring[i].addr,
rx_buf_size,
DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map))) {
+ WARN(1, "DMA mapping error in mlx4_ib_alloc_pv_bufs in ring");
+ goto err;
+ }
}
for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
@@ -1330,6 +1334,12 @@ static int mlx4_ib_alloc_pv_bufs(struct
tun_qp->tx_ring[i].buf.addr,
tx_buf_size,
DMA_TO_DEVICE);
+ if (unlikely(ib_dma_mapping_error(ctx->ib_dev, tun_qp->tx_ring[i].buf.map))) {
+ WARN(1, "DMA mapping error in mlx4_ib_alloc_pv_bufs in tx_ring");
+ goto tx_err;
+ }
+
+
tun_qp->tx_ring[i].ah = NULL;
}
spin_lock_init(&tun_qp->tx_lock);
@@ -556,6 +556,12 @@ static int alloc_proxy_bufs(struct ib_de
ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
sizeof (struct mlx4_ib_proxy_sqp_hdr),
DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map))) {
+ WARN(1, "DMA mapping error in mlx4_qp:alloc_proxy_bufs");
+ goto err;
+ }
+
+
}
return 0;
@@ -1022,12 +1022,23 @@ int ib_send_mad(struct ib_mad_send_wr_pr
mad_send_wr->send_buf.mad,
sge[0].length,
DMA_TO_DEVICE);
+ if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) {
+ WARN(1, "DMA mapping error in ib_send_mad on header");
+ return -EIO;
+ }
mad_send_wr->header_mapping = sge[0].addr;
sge[1].addr = ib_dma_map_single(mad_agent->device,
ib_get_payload(mad_send_wr),
sge[1].length,
DMA_TO_DEVICE);
+ if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
+ WARN(1, "DMA mapping error in ib_send_mad on payload");
+ ib_dma_unmap_single(mad_agent->device,
+ mad_send_wr->header_mapping,
+ sge[0].length, DMA_TO_DEVICE);
+ return -EIO;
+ }
mad_send_wr->payload_mapping = sge[1].addr;
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
@@ -2590,6 +2601,12 @@ static int ib_mad_post_receive_mads(stru
sizeof *mad_priv -
sizeof mad_priv->header,
DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) {
+ WARN(1, "DMA mapping error in ib_mad_post_receive_mads");
+ ret = -EIO;
+ break;
+ }
+
mad_priv->header.mapping = sg_list.addr;
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
mad_priv->header.mad_list.mad_queue = recv_queue;
On my Fedora Rawhide boxes, I noticed I was getting warnings saying that Infiniband modules were not checking for DMA mapping errors. I wrote this patch to silence the warnings. I tested it on a pair of x86_64 machines running 3.9.0-0.rc7.git2.1.fc20 with InfiniBand: Mellanox Technologies MT25418 [ConnectX VPI PCIe 2.0 2.5GT/s - IB DDR / 10GigE] (rev a0) cards in them. It silences the warnings. [V2 changed return values to -EIO] Signed-off-by: Jay Fenlason <fenlason@redhat.com> -- To unsubscribe from this list: send the line "unsubscribe linux-rdma" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html