@@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
int err;
err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
- PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
+ &buf->buf, GFP_KERNEL);
if (err)
goto out;
@@ -768,7 +768,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
*qp->db.db = 0;
}
- if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) {
+ if (mlx4_buf_alloc(dev->dev, qp->buf_size, &qp->buf, gfp)) {
err = -ENOMEM;
goto err_db;
}
@@ -140,8 +140,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
*srq->db.db = 0;
- if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
- GFP_KERNEL)) {
+ if (mlx4_buf_alloc(dev->dev, buf_size, &srq->buf, GFP_KERNEL)) {
err = -ENOMEM;
goto err_db;
}
@@ -576,103 +576,41 @@ out:
return res;
}
-/*
- * Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0. If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
+
+/* Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0.
*/
-int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size,
struct mlx4_buf *buf, gfp_t gfp)
{
dma_addr_t t;
- if (size <= max_direct) {
- buf->nbufs = 1;
- buf->npages = 1;
- buf->page_shift = get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev,
- size, &t, gfp);
- if (!buf->direct.buf)
- return -ENOMEM;
-
- buf->direct.map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
+ buf->nbufs = 1;
+ buf->npages = 1;
+ buf->page_shift = get_order(size) + PAGE_SHIFT;
+ buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev,
+ size, &t, gfp);
+ if (!buf->direct.buf)
+ return -ENOMEM;
- memset(buf->direct.buf, 0, size);
- } else {
- int i;
-
- buf->direct.buf = NULL;
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- buf->npages = buf->nbufs;
- buf->page_shift = PAGE_SHIFT;
- buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- gfp);
- if (!buf->page_list)
- return -ENOMEM;
-
- for (i = 0; i < buf->nbufs; ++i) {
- buf->page_list[i].buf =
- dma_alloc_coherent(&dev->persist->pdev->dev,
- PAGE_SIZE,
- &t, gfp);
- if (!buf->page_list[i].buf)
- goto err_free;
-
- buf->page_list[i].map = t;
-
- memset(buf->page_list[i].buf, 0, PAGE_SIZE);
- }
+ buf->direct.map = t;
- if (BITS_PER_LONG == 64) {
- struct page **pages;
- pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
- if (!pages)
- goto err_free;
- for (i = 0; i < buf->nbufs; ++i)
- pages[i] = virt_to_page(buf->page_list[i].buf);
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- goto err_free;
- }
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
}
- return 0;
-
-err_free:
- mlx4_buf_free(dev, size, buf);
+ memset(buf->direct.buf, 0, size);
- return -ENOMEM;
+ return 0;
}
EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
{
- int i;
-
- if (buf->nbufs == 1)
- dma_free_coherent(&dev->persist->pdev->dev, size,
- buf->direct.buf,
- buf->direct.map);
- else {
- if (BITS_PER_LONG == 64)
- vunmap(buf->direct.buf);
-
- for (i = 0; i < buf->nbufs; ++i)
- if (buf->page_list[i].buf)
- dma_free_coherent(&dev->persist->pdev->dev,
- PAGE_SIZE,
- buf->page_list[i].buf,
- buf->page_list[i].map);
- kfree(buf->page_list);
- }
+ dma_free_coherent(&dev->persist->pdev->dev, size,
+ buf->direct.buf, buf->direct.map);
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
@@ -789,7 +727,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
EXPORT_SYMBOL_GPL(mlx4_db_free);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
- int size, int max_direct)
+ int size)
{
int err;
@@ -799,7 +737,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
*wqres->db.db = 0;
- err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
+ err = mlx4_buf_alloc(dev, size, &wqres->buf, GFP_KERNEL);
if (err)
goto err_db;
@@ -72,22 +72,16 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
*/
set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
- cq->buf_size, 2 * PAGE_SIZE);
+ cq->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_cq;
- err = mlx4_en_map_buffer(&cq->wqres.buf);
- if (err)
- goto err_res;
-
cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
*pcq = cq;
return 0;
-err_res:
- mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
err_cq:
kfree(cq);
*pcq = NULL;
@@ -189,7 +183,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq = *pcq;
- mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector) {
mlx4_release_eq(priv->mdev->dev, cq->vector);
@@ -2744,7 +2744,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
- MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+ MLX4_EN_PAGE_SIZE);
if (err) {
en_err(priv, "Failed to allocate page for rx qps\n");
goto out;
@@ -80,38 +80,6 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
}
}
-
-int mlx4_en_map_buffer(struct mlx4_buf *buf)
-{
- struct page **pages;
- int i;
-
- if (BITS_PER_LONG == 64 || buf->nbufs == 1)
- return 0;
-
- pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- for (i = 0; i < buf->nbufs; ++i)
- pages[i] = virt_to_page(buf->page_list[i].buf);
-
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- return -ENOMEM;
-
- return 0;
-}
-
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
-{
- if (BITS_PER_LONG == 64 || buf->nbufs == 1)
- return;
-
- vunmap(buf->direct.buf);
-}
-
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
{
return;
@@ -392,17 +392,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
- err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
- ring->buf_size, 2 * PAGE_SIZE);
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_info;
- err = mlx4_en_map_buffer(&ring->wqres.buf);
- if (err) {
- en_err(priv, "Failed to map RX buffer\n");
- goto err_hwq;
- }
ring->buf = ring->wqres.buf.direct.buf;
ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
@@ -410,8 +404,6 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
*pring = ring;
return 0;
-err_hwq:
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_info:
vfree(ring->rx_info);
ring->rx_info = NULL;
@@ -498,7 +490,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring = *pring;
- mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
@@ -92,20 +92,13 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
- err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
- 2 * PAGE_SIZE);
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
}
- err = mlx4_en_map_buffer(&ring->wqres.buf);
- if (err) {
- en_err(priv, "Failed to map TX buffer\n");
- goto err_hwq_res;
- }
-
ring->buf = ring->wqres.buf.direct.buf;
en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
@@ -116,7 +109,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
MLX4_RESERVE_ETH_BF_QP);
if (err) {
en_err(priv, "failed reserving qp for TX ring\n");
- goto err_map;
+ goto err_hwq_res;
}
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -151,8 +144,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err_reserve:
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
-err_map:
- mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_bounce:
@@ -178,7 +169,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
mlx4_bf_free(mdev->dev, &ring->bf);
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
- mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
@@ -808,8 +808,6 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
struct mlx4_qp_context *context);
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
-int mlx4_en_map_buffer(struct mlx4_buf *buf);
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
void mlx4_en_calc_rx_buf(struct net_device *dev);
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
@@ -802,10 +802,7 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
return -ENOMEM;
for (i = 0; i < buf->npages; ++i)
- if (buf->nbufs == 1)
- page_list[i] = buf->direct.map + (i << buf->page_shift);
- else
- page_list[i] = buf->page_list[i].map;
+ page_list[i] = buf->direct.map + (i << buf->page_shift);
err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
@@ -573,7 +573,6 @@ struct mlx4_buf_list {
struct mlx4_buf {
struct mlx4_buf_list direct;
- struct mlx4_buf_list *page_list;
int nbufs;
int npages;
int page_shift;
@@ -982,16 +981,12 @@ static inline int mlx4_is_slave(struct mlx4_dev *dev)
return dev->flags & MLX4_FLAG_SLAVE;
}
-int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size,
struct mlx4_buf *buf, gfp_t gfp);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
- if (BITS_PER_LONG == 64 || buf->nbufs == 1)
- return buf->direct.buf + offset;
- else
- return buf->page_list[offset >> PAGE_SHIFT].buf +
- (offset & (PAGE_SIZE - 1));
+ return buf->direct.buf + offset;
}
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
@@ -1027,7 +1022,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
- int size, int max_direct);
+ int size);
void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
int size);