@@ -75,6 +75,9 @@ u8 rxe_get_next_key(u32 last_key);
void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr);
+int rxe_mr_dmabuf_init_user(struct rxe_pd *pd, int fd, u64 start, u64 length,
+ u64 iova, int access, struct rxe_mr *mr);
+
int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir, u32 *crcp);
@@ -4,6 +4,8 @@
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*/
+#include <linux/dma-buf.h>
+
#include "rxe.h"
#include "rxe_loc.h"
@@ -207,6 +209,105 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
return err;
}
+static int rxe_map_dmabuf_mr(struct rxe_mr *mr)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
+ struct ib_umem *umem = mr->umem;
+ int err;
+
+ err = ib_umem_dmabuf_map_pages(umem_dmabuf);
+ if (err)
+ goto err1;
+
+ err = rxe_mr_gen_map(mr, umem);
+ if (err)
+ goto err2;
+
+ return ib_umem_num_pages(umem);
+
+err2:
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+err1:
+ return err;
+}
+
+/* A function called from the dma-buf exporter when the mapped pages
+ * become invalid.
+ */
+static void rxe_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
+{
+ int err;
+ struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
+ struct rxe_mr *mr = umem_dmabuf->private;
+
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+
+ /* all of memory region is immediately mapped again */
+ err = rxe_map_dmabuf_mr(mr);
+ if (err)
+ pr_err("%s: failed to map the dma-buf region", __func__);
+}
+
+static struct dma_buf_attach_ops rxe_ib_dmabuf_attach_ops = {
+ .move_notify = rxe_ib_dmabuf_invalidate_cb,
+};
+
+/* initialize a umem and map all the areas of dma-buf. */
+int rxe_mr_dmabuf_init_user(struct rxe_pd *pd, int fd, u64 start, u64 length,
+ u64 iova, int access, struct rxe_mr *mr)
+{
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int num_buf;
+ int err;
+
+ umem_dmabuf = ib_umem_dmabuf_get(pd->ibpd.device, start, length, fd,
+ access, &rxe_ib_dmabuf_attach_ops);
+ if (IS_ERR(umem_dmabuf)) {
+ err = PTR_ERR(umem_dmabuf);
+ pr_err("%s: failed to get umem_dmabuf (%d)", __func__, err);
+ goto err1;
+ }
+
+ umem_dmabuf->private = mr;
+
+ mr->umem = &umem_dmabuf->umem;
+ mr->umem->iova = iova;
+ num_buf = ib_umem_num_pages(mr->umem);
+
+ rxe_mr_init(access, mr);
+
+ err = rxe_mr_alloc(mr, num_buf);
+ if (err)
+ goto err1;
+
+ mr->page_shift = PAGE_SHIFT;
+ mr->page_mask = PAGE_SIZE - 1;
+
+ mr->ibmr.pd = &pd->ibpd;
+ mr->access = access;
+ mr->length = length;
+ mr->iova = iova;
+ mr->va = start;
+ mr->offset = ib_umem_offset(mr->umem);
+ mr->state = RXE_MR_STATE_VALID;
+ mr->type = RXE_MR_TYPE_MR;
+
+ err = rxe_map_dmabuf_mr(mr);
+ if (err) {
+ pr_err("%s: failed to map the dma-buf region", __func__);
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
+ kfree(mr->map);
+err1:
+ return err;
+}
+
int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
{
int err;
@@ -959,6 +959,39 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
return ERR_PTR(err);
}
+static struct ib_mr *rxe_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
+ u64 length, u64 iova, int fd,
+ int access, struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_mr *mr;
+
+ mr = rxe_alloc(&rxe->mr_pool);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ rxe_add_index(mr);
+
+ rxe_add_ref(pd);
+
+ err = rxe_mr_dmabuf_init_user(pd, fd, start, length, iova, access, mr);
+ if (err)
+ goto err2;
+
+ return &mr->ibmr;
+
+err2:
+ rxe_drop_ref(pd);
+ rxe_drop_index(mr);
+ rxe_drop_ref(mr);
+err1:
+ return ERR_PTR(err);
+}
+
static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
u32 max_num_sg)
{
@@ -1139,6 +1172,7 @@ static const struct ib_device_ops rxe_dev_ops = {
.query_qp = rxe_query_qp,
.query_srq = rxe_query_srq,
.reg_user_mr = rxe_reg_user_mr,
+ .reg_user_mr_dmabuf = rxe_reg_user_mr_dmabuf,
.req_notify_cq = rxe_req_notify_cq,
.resize_cq = rxe_resize_cq,
@@ -1181,6 +1215,8 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
}
rxe->tfm = tfm;
+ dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64));
+
err = ib_register_device(dev, ibdev_name, NULL);
if (err)
pr_warn("%s failed with error %d\n", __func__, err);
Implement a ib device operation ‘reg_user_mr_dmabuf’. Import dma-buf using the IB core API and map the memory area linked the dma-buf. Signed-off-by: Shunsuke Mie <mie@igel.co.jp> --- drivers/infiniband/sw/rxe/rxe_loc.h | 3 + drivers/infiniband/sw/rxe/rxe_mr.c | 101 ++++++++++++++++++++++++++ drivers/infiniband/sw/rxe/rxe_verbs.c | 36 +++++++++ 3 files changed, 140 insertions(+)