diff mbox series

[2/6] libxscale: Add support for pd and mr

Message ID 20241010081049.1448826-3-tianx@yunsilicon.com (mailing list archive)
State Changes Requested
Headers show
Series [1/6] libxscale: Introduce xscale user space RDMA provider | expand

Commit Message

Tian Xin Oct. 10, 2024, 8:10 a.m. UTC
This patch adds support for pd and mr operations including:
1. alloc_pd
2. dealloc_pd
3. reg_mr
4. dereg_mr

Signed-off-by: Tian Xin <tianx@yunsilicon.com>
Signed-off-by: Wei Honggang <weihg@yunsilicon.com>
Signed-off-by: Zhao Qianwei <zhaoqw@yunsilicon.com>
Signed-off-by: Li Qiang <liq@yunsilicon.com>
Signed-off-by: Yan Lei <jacky@yunsilicon.com>
---
 providers/xscale/verbs.c  | 85 +++++++++++++++++++++++++++++++++++++++
 providers/xscale/xscale.c |  4 ++
 providers/xscale/xscale.h | 29 +++++++++++++
 3 files changed, 118 insertions(+)
diff mbox series

Patch

diff --git a/providers/xscale/verbs.c b/providers/xscale/verbs.c
index 943665a8..ed265d6e 100644
--- a/providers/xscale/verbs.c
+++ b/providers/xscale/verbs.c
@@ -36,6 +36,91 @@  int xsc_query_port(struct ibv_context *context, u8 port,
 	return ibv_cmd_query_port(context, port, attr, &cmd, sizeof(cmd));
 }
 
+struct ibv_pd *xsc_alloc_pd(struct ibv_context *context)
+{
+	struct ibv_alloc_pd cmd;
+	struct xsc_alloc_pd_resp resp;
+	struct xsc_pd *pd;
+
+	pd = calloc(1, sizeof(*pd));
+	if (!pd)
+		return NULL;
+
+	if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof(cmd),
+			     &resp.ibv_resp, sizeof(resp))) {
+		free(pd);
+		return NULL;
+	}
+
+	atomic_init(&pd->refcount, 1);
+	pd->pdn = resp.pdn;
+	xsc_dbg(to_xctx(context)->dbg_fp, XSC_DBG_PD, "pd number:%u\n",
+		pd->pdn);
+
+	return &pd->ibv_pd;
+}
+
+int xsc_free_pd(struct ibv_pd *pd)
+{
+	int ret;
+	struct xsc_pd *xpd = to_xpd(pd);
+
+	if (atomic_load(&xpd->refcount) > 1)
+		return EBUSY;
+
+	ret = ibv_cmd_dealloc_pd(pd);
+	if (ret)
+		return ret;
+
+	xsc_dbg(to_xctx(pd->context)->dbg_fp, XSC_DBG_PD, "dealloc pd\n");
+	free(xpd);
+
+	return 0;
+}
+
+struct ibv_mr *xsc_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
+			  u64 hca_va, int acc)
+{
+	struct xsc_mr *mr;
+	struct ibv_reg_mr cmd;
+	int ret;
+	enum ibv_access_flags access = (enum ibv_access_flags)acc;
+	struct ib_uverbs_reg_mr_resp resp;
+
+	mr = calloc(1, sizeof(*mr));
+	if (!mr)
+		return NULL;
+
+	ret = ibv_cmd_reg_mr(pd, addr, length, hca_va, access, &mr->vmr, &cmd,
+			     sizeof(cmd), &resp, sizeof(resp));
+	if (ret) {
+		free(mr);
+		return NULL;
+	}
+	mr->alloc_flags = acc;
+
+	xsc_dbg(to_xctx(pd->context)->dbg_fp, XSC_DBG_MR, "lkey:%u, rkey:%u\n",
+		mr->vmr.ibv_mr.lkey, mr->vmr.ibv_mr.rkey);
+
+	return &mr->vmr.ibv_mr;
+}
+
+int xsc_dereg_mr(struct verbs_mr *vmr)
+{
+	int ret;
+
+	if (vmr->mr_type == IBV_MR_TYPE_NULL_MR)
+		goto free;
+
+	ret = ibv_cmd_dereg_mr(vmr);
+	if (ret)
+		return ret;
+
+free:
+	free(vmr);
+	return 0;
+}
+
 static void xsc_set_fw_version(struct ibv_device_attr *attr,
 			       union xsc_ib_fw_ver *fw_ver)
 {
diff --git a/providers/xscale/xscale.c b/providers/xscale/xscale.c
index c7be8127..cdc37fbd 100644
--- a/providers/xscale/xscale.c
+++ b/providers/xscale/xscale.c
@@ -33,6 +33,10 @@  static void xsc_free_context(struct ibv_context *ibctx);
 
 static const struct verbs_context_ops xsc_ctx_common_ops = {
 	.query_port = xsc_query_port,
+	.alloc_pd = xsc_alloc_pd,
+	.dealloc_pd = xsc_free_pd,
+	.reg_mr = xsc_reg_mr,
+	.dereg_mr = xsc_dereg_mr,
 	.query_device_ex = xsc_query_device_ex,
 	.free_context = xsc_free_context,
 };
diff --git a/providers/xscale/xscale.h b/providers/xscale/xscale.h
index 85538d93..3cef6781 100644
--- a/providers/xscale/xscale.h
+++ b/providers/xscale/xscale.h
@@ -120,6 +120,17 @@  struct xsc_context {
 	struct xsc_hw_ops *hw_ops;
 };
 
+struct xsc_pd {
+	struct ibv_pd ibv_pd;
+	u32 pdn;
+	atomic_int refcount;
+};
+
+struct xsc_mr {
+	struct verbs_mr vmr;
+	u32 alloc_flags;
+};
+
 union xsc_ib_fw_ver {
 	u64 data;
 	struct {
@@ -154,6 +165,17 @@  static inline struct xsc_context *to_xctx(struct ibv_context *ibctx)
 	return container_of(ibctx, struct xsc_context, ibv_ctx.context);
 }
 
+/* to_xpd always returns the real xsc_pd object ie the protection domain. */
+static inline struct xsc_pd *to_xpd(struct ibv_pd *ibpd)
+{
+	return container_of(ibpd, struct xsc_pd, ibv_pd);
+}
+
+static inline struct xsc_mr *to_xmr(struct ibv_mr *ibmr)
+{
+	return container_of(ibmr, struct xsc_mr, vmr.ibv_mr);
+}
+
 int xsc_query_device(struct ibv_context *context, struct ibv_device_attr *attr);
 int xsc_query_device_ex(struct ibv_context *context,
 			const struct ibv_query_device_ex_input *input,
@@ -161,4 +183,11 @@  int xsc_query_device_ex(struct ibv_context *context,
 int xsc_query_port(struct ibv_context *context, u8 port,
 		   struct ibv_port_attr *attr);
 
+struct ibv_pd *xsc_alloc_pd(struct ibv_context *context);
+int xsc_free_pd(struct ibv_pd *pd);
+
+struct ibv_mr *xsc_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
+			  u64 hca_va, int access);
+int xsc_dereg_mr(struct verbs_mr *mr);
+
 #endif /* XSC_H */