diff mbox

IB/srpt: Add RDMA/CM support

Message ID 20180222220943.10402-1-bart.vanassche@wdc.com (mailing list archive)
State Superseded
Headers show

Commit Message

Bart Van Assche Feb. 22, 2018, 10:09 p.m. UTC
Add a parameter for configuring the port on which the ib_srpt driver
listens for incoming RDMA/CM connections, namely
/sys/kernel/config/target/srpt/discovery_auth/rdma_cm_port. The default
value for this parameter is 0 which means "do not listen for incoming
RDMA/CM connections". Add RDMA/CM support to all code that handles
connection state changes. Modify srpt_init_nodeacl() such that ACLs can
be configured for IPv4 addresses.

Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
---
 drivers/infiniband/ulp/srpt/ib_srpt.c | 394 ++++++++++++++++++++++++++++------
 drivers/infiniband/ulp/srpt/ib_srpt.h |   6 +
 2 files changed, 338 insertions(+), 62 deletions(-)

Comments

Doug Ledford Feb. 23, 2018, 2:07 a.m. UTC | #1
On Thu, 2018-02-22 at 14:09 -0800, Bart Van Assche wrote:
> +static struct rdma_cm_id *srpt_create_rdma_id(u16 rdma_cm_port)
> +{
> +       struct rdma_cm_id *rdma_cm_id;
> +       struct sockaddr_in addr = { };
> +       int ret;
> +
> +       rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
                                      ^^^^^^^^^
> +                                   NULL, RDMA_PS_TCP, IB_QPT_RC);
> +       if (IS_ERR(rdma_cm_id)) {
> +               pr_err("RDMA/CM ID creation failed: %ld\n",
> +                      PTR_ERR(rdma_cm_id));
> +               goto out;
> +       }
> +
> +       /* We will listen on any RDMA device. */
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

The above code, and this comment, do not seem to be in agreement.  
What happens if we have some RoCE devices on non-init_net namespaces and
we get an incoming connect on their IP?  We're supposedly listening on
the wildcard address, but will we see it?

Otherwise, I'm generally happy with this patch now.
Bart Van Assche Feb. 23, 2018, 6:28 p.m. UTC | #2
On Thu, 2018-02-22 at 21:07 -0500, Doug Ledford wrote:
> On Thu, 2018-02-22 at 14:09 -0800, Bart Van Assche wrote:

> > +static struct rdma_cm_id *srpt_create_rdma_id(u16 rdma_cm_port)

> > +{

> > +       struct rdma_cm_id *rdma_cm_id;

> > +       struct sockaddr_in addr = { };

> > +       int ret;

> > +

> > +       rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,

> 

>                                       ^^^^^^^^^

> > +                                   NULL, RDMA_PS_TCP, IB_QPT_RC);

> > +       if (IS_ERR(rdma_cm_id)) {

> > +               pr_err("RDMA/CM ID creation failed: %ld\n",

> > +                      PTR_ERR(rdma_cm_id));

> > +               goto out;

> > +       }

> > +

> > +       /* We will listen on any RDMA device. */

> 

>              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

> 

> The above code, and this comment, do not seem to be in agreement.  

> What happens if we have some RoCE devices on non-init_net namespaces and

> we get an incoming connect on their IP?  We're supposedly listening on

> the wildcard address, but will we see it?


If I follow the following call chain:
rdma_listen()
-> cma_ib_listen()
-> cma_req_handler()
-> cma_id_from_event()
-> cma_find_listener()
-> cma_match_net_dev()

then I see that the namespace passed to rdma_create_id() is ignored if an
RDMA ID has not been bound to a specific interface (bound_dev_if == 0).

Did I perhaps overlook something?

BTW, while reviewing the code I noticed that the comment above
srpt_rdma_cm_port_show() is incorrect so I have to repost this patch anyway.

> Otherwise, I'm generally happy with this patch now.


Thanks for the review!

Bart.
diff mbox

Patch

diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0373b7c40902..9f70d7074585 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -41,6 +41,7 @@ 
 #include <linux/string.h>
 #include <linux/delay.h>
 #include <linux/atomic.h>
+#include <linux/inet.h>
 #include <rdma/ib_cache.h>
 #include <scsi/scsi_proto.h>
 #include <scsi/scsi_tcq.h>
@@ -92,6 +93,11 @@  MODULE_PARM_DESC(srpt_service_guid,
 		 " instead of using the node_guid of the first HCA.");
 
 static struct ib_client srpt_client;
+/* Protects both rdma_cm_port and rdma_cm_id. */
+static DEFINE_MUTEX(rdma_cm_mutex);
+/* Port number RDMA/CM will bind to. */
+static u16 rdma_cm_port;
+static struct rdma_cm_id *rdma_cm_id;
 static void srpt_release_cmd(struct se_cmd *se_cmd);
 static void srpt_free_ch(struct kref *kref);
 static int srpt_queue_status(struct se_cmd *cmd);
@@ -220,7 +226,10 @@  static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
 
 	switch (event->event) {
 	case IB_EVENT_COMM_EST:
-		ib_cm_notify(ch->ib_cm.cm_id, event->event);
+		if (ch->using_rdma_cm)
+			rdma_notify(ch->rdma_cm.cm_id, event->event);
+		else
+			ib_cm_notify(ch->ib_cm.cm_id, event->event);
 		break;
 	case IB_EVENT_QP_LAST_WQE_REACHED:
 		pr_debug("%s-%d, state %s: received Last WQE event.\n",
@@ -1057,6 +1066,8 @@  static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
 	struct ib_qp_attr *attr;
 	int ret;
 
+	WARN_ON_ONCE(ch->using_rdma_cm);
+
 	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
 	if (!attr)
 		return -ENOMEM;
@@ -1096,6 +1107,8 @@  static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
 	int attr_mask;
 	int ret;
 
+	WARN_ON_ONCE(ch->using_rdma_cm);
+
 	qp_attr.qp_state = IB_QPS_RTR;
 	ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
 	if (ret)
@@ -1746,18 +1759,33 @@  static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
 		qp_init->cap.max_recv_sge = qp_init->cap.max_send_sge;
 	}
 
-	ch->qp = ib_create_qp(sdev->pd, qp_init);
-	if (IS_ERR(ch->qp)) {
-		ret = PTR_ERR(ch->qp);
-		if (ret == -ENOMEM) {
-			sq_size /= 2;
-			if (sq_size >= MIN_SRPT_SQ_SIZE) {
-				ib_destroy_cq(ch->cq);
-				goto retry;
-			}
+	if (ch->using_rdma_cm) {
+		ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
+		ch->qp = ch->rdma_cm.cm_id->qp;
+	} else {
+		ch->qp = ib_create_qp(sdev->pd, qp_init);
+		if (!IS_ERR(ch->qp)) {
+			ret = srpt_init_ch_qp(ch, ch->qp);
+			if (ret)
+				ib_destroy_qp(ch->qp);
+		} else {
+			ret = PTR_ERR(ch->qp);
+		}
+	}
+	if (ret) {
+		bool retry = sq_size > MIN_SRPT_SQ_SIZE;
+
+		if (retry) {
+			pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n",
+				 sq_size, ret);
+			ib_free_cq(ch->cq);
+			sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE);
+			goto retry;
+		} else {
+			pr_err("failed to create queue pair with sq_size = %d (%d)\n",
+			       sq_size, ret);
+			goto err_destroy_cq;
 		}
-		pr_err("failed to create_qp ret= %d\n", ret);
-		goto err_destroy_cq;
 	}
 
 	atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
@@ -1766,10 +1794,6 @@  static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
 		 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
 		 qp_init->cap.max_send_wr, ch);
 
-	ret = srpt_init_ch_qp(ch, ch->qp);
-	if (ret)
-		goto err_destroy_qp;
-
 	if (!sdev->use_srq)
 		for (i = 0; i < ch->rq_size; i++)
 			srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
@@ -1778,9 +1802,8 @@  static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
 	kfree(qp_init);
 	return ret;
 
-err_destroy_qp:
-	ib_destroy_qp(ch->qp);
 err_destroy_cq:
+	ch->qp = NULL;
 	ib_free_cq(ch->cq);
 	goto out;
 }
@@ -1849,9 +1872,13 @@  static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
 	if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
 		return -ENOTCONN;
 
-	ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
-	if (ret < 0)
-		ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
+	if (ch->using_rdma_cm) {
+		ret = rdma_disconnect(ch->rdma_cm.cm_id);
+	} else {
+		ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
+		if (ret < 0)
+			ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
+	}
 
 	if (ret < 0 && srpt_close_ch(ch))
 		ret = 0;
@@ -2002,7 +2029,10 @@  static void srpt_release_channel_work(struct work_struct *w)
 	transport_deregister_session(se_sess);
 	ch->sess = NULL;
 
-	ib_destroy_cm_id(ch->ib_cm.cm_id);
+	if (ch->using_rdma_cm)
+		rdma_destroy_id(ch->rdma_cm.cm_id);
+	else
+		ib_destroy_cm_id(ch->ib_cm.cm_id);
 
 	srpt_destroy_ch_ib(ch);
 
@@ -2026,26 +2056,33 @@  static void srpt_release_channel_work(struct work_struct *w)
 
 /**
  * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
- * @cm_id: IB/CM connection identifier.
- * @port_num: Port through which the IB/CM REQ message was received.
+ * @sdev: HCA through which the login request was received.
+ * @ib_cm_id: IB/CM connection identifier in case of IB/CM.
+ * @rdma_cm_id: RDMA/CM connection identifier in case of RDMA/CM.
+ * @port_num: Port through which the REQ message was received.
  * @pkey: P_Key of the incoming connection.
  * @req: SRP login request.
- * @src_addr: GID of the port that submitted the login request.
+ * @src_addr: GID (IB/CM) or IP address (RDMA/CM) of the port that submitted
+ * the login request.
  *
  * Ownership of the cm_id is transferred to the target session if this
- * functions returns zero. Otherwise the caller remains the owner of cm_id.
+ * function returns zero. Otherwise the caller remains the owner of cm_id.
  */
-static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
+static int srpt_cm_req_recv(struct srpt_device *const sdev,
+			    struct ib_cm_id *ib_cm_id,
+			    struct rdma_cm_id *rdma_cm_id,
 			    u8 port_num, __be16 pkey,
 			    const struct srp_login_req *req,
 			    const char *src_addr)
 {
-	struct srpt_device *sdev = cm_id->context;
 	struct srpt_port *sport = &sdev->port[port_num - 1];
 	struct srpt_nexus *nexus;
 	struct srp_login_rsp *rsp = NULL;
 	struct srp_login_rej *rej = NULL;
-	struct ib_cm_rep_param *rep_param = NULL;
+	union {
+		struct rdma_conn_param rdma_cm;
+		struct ib_cm_rep_param ib_cm;
+	} *rep_param = NULL;
 	struct srpt_rdma_ch *ch;
 	char i_port_id[36];
 	u32 it_iu_len;
@@ -2115,8 +2152,14 @@  static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 	ch->zw_cqe.done = srpt_zerolength_write_done;
 	INIT_WORK(&ch->release_work, srpt_release_channel_work);
 	ch->sport = sport;
-	ch->ib_cm.cm_id = cm_id;
-	cm_id->context = ch;
+	if (ib_cm_id) {
+		ch->ib_cm.cm_id = ib_cm_id;
+		ib_cm_id->context = ch;
+	} else {
+		ch->using_rdma_cm = true;
+		ch->rdma_cm.cm_id = rdma_cm_id;
+		rdma_cm_id->context = ch;
+	}
 	/*
 	 * ch->rq_size should be at least as large as the initiator queue
 	 * depth to avoid that the initiator driver has to report QUEUE_FULL
@@ -2227,7 +2270,7 @@  static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 
 	mutex_unlock(&sport->mutex);
 
-	ret = srpt_ch_qp_rtr(ch, ch->qp);
+	ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rtr(ch, ch->qp);
 	if (ret) {
 		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
 		pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
@@ -2251,25 +2294,38 @@  static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 	atomic_set(&ch->req_lim_delta, 0);
 
 	/* create cm reply */
-	rep_param->qp_num = ch->qp->qp_num;
-	rep_param->private_data = (void *)rsp;
-	rep_param->private_data_len = sizeof(*rsp);
-	rep_param->rnr_retry_count = 7;
-	rep_param->flow_control = 1;
-	rep_param->failover_accepted = 0;
-	rep_param->srq = 1;
-	rep_param->responder_resources = 4;
-	rep_param->initiator_depth = 4;
+	if (ch->using_rdma_cm) {
+		rep_param->rdma_cm.private_data = (void *)rsp;
+		rep_param->rdma_cm.private_data_len = sizeof(*rsp);
+		rep_param->rdma_cm.rnr_retry_count = 7;
+		rep_param->rdma_cm.flow_control = 1;
+		rep_param->rdma_cm.responder_resources = 4;
+		rep_param->rdma_cm.initiator_depth = 4;
+	} else {
+		rep_param->ib_cm.qp_num = ch->qp->qp_num;
+		rep_param->ib_cm.private_data = (void *)rsp;
+		rep_param->ib_cm.private_data_len = sizeof(*rsp);
+		rep_param->ib_cm.rnr_retry_count = 7;
+		rep_param->ib_cm.flow_control = 1;
+		rep_param->ib_cm.failover_accepted = 0;
+		rep_param->ib_cm.srq = 1;
+		rep_param->ib_cm.responder_resources = 4;
+		rep_param->ib_cm.initiator_depth = 4;
+	}
 
 	/*
 	 * Hold the sport mutex while accepting a connection to avoid that
 	 * srpt_disconnect_ch() is invoked concurrently with this code.
 	 */
 	mutex_lock(&sport->mutex);
-	if (sport->enabled && ch->state == CH_CONNECTING)
-		ret = ib_send_cm_rep(cm_id, rep_param);
-	else
+	if (sport->enabled && ch->state == CH_CONNECTING) {
+		if (ch->using_rdma_cm)
+			ret = rdma_accept(rdma_cm_id, &rep_param->rdma_cm);
+		else
+			ret = ib_send_cm_rep(ib_cm_id, &rep_param->ib_cm);
+	} else {
 		ret = -EINVAL;
+	}
 	mutex_unlock(&sport->mutex);
 
 	switch (ret) {
@@ -2299,7 +2355,8 @@  static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 			     ch->sport->sdev, ch->rq_size,
 			     ch->max_rsp_size, DMA_TO_DEVICE);
 free_ch:
-	cm_id->context = NULL;
+	if (ib_cm_id)
+		ib_cm_id->context = NULL;
 	kfree(ch);
 	ch = NULL;
 
@@ -2312,8 +2369,11 @@  static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 	rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
 				   SRP_BUF_FORMAT_INDIRECT);
 
-	ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
-			     (void *)rej, sizeof(*rej));
+	if (rdma_cm_id)
+		rdma_reject(rdma_cm_id, rej, sizeof(*rej));
+	else
+		ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
+			       rej, sizeof(*rej));
 
 out:
 	kfree(rep_param);
@@ -2332,10 +2392,61 @@  static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
 	srpt_format_guid(sguid, sizeof(sguid),
 			 &param->primary_path->dgid.global.interface_id);
 
-	return srpt_cm_req_recv(cm_id, param->port, param->primary_path->pkey,
+	return srpt_cm_req_recv(cm_id->context, cm_id, NULL, param->port,
+				param->primary_path->pkey,
 				private_data, sguid);
 }
 
+static const char *inet_ntop(const void *sa, char *dst, unsigned int size)
+{
+	switch (((struct sockaddr *)sa)->sa_family) {
+	case AF_INET:
+		snprintf(dst, size, "%pI4",
+			 &((struct sockaddr_in *)sa)->sin_addr);
+		break;
+	case AF_INET6:
+		snprintf(dst, size, "%pI6",
+			 &((struct sockaddr_in6 *)sa)->sin6_addr);
+		break;
+	default:
+		snprintf(dst, size, "???");
+		break;
+	}
+	return dst;
+}
+
+static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
+				 struct rdma_cm_event *event)
+{
+	struct srpt_device *sdev;
+	struct srp_login_req req;
+	const struct srp_login_req_rdma *req_rdma;
+	char src_addr[40];
+
+	sdev = ib_get_client_data(cm_id->device, &srpt_client);
+	if (!sdev)
+		return -ECONNREFUSED;
+
+	if (event->param.conn.private_data_len < sizeof(*req_rdma))
+		return -EINVAL;
+
+	/* Transform srp_login_req_rdma into srp_login_req. */
+	req_rdma = event->param.conn.private_data;
+	memset(&req, 0, sizeof(req));
+	req.opcode		= req_rdma->opcode;
+	req.tag			= req_rdma->tag;
+	req.req_it_iu_len	= req_rdma->req_it_iu_len;
+	req.req_buf_fmt		= req_rdma->req_buf_fmt;
+	req.req_flags		= req_rdma->req_flags;
+	memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16);
+	memcpy(req.target_port_id, req_rdma->target_port_id, 16);
+
+	inet_ntop(&cm_id->route.addr.src_addr, src_addr, sizeof(src_addr));
+
+	return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
+				cm_id->route.path_rec->pkey, &req, src_addr);
+}
+
 static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
 			     enum ib_cm_rej_reason reason,
 			     const u8 *private_data,
@@ -2359,14 +2470,14 @@  static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
  * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
  * @ch: SRPT RDMA channel.
  *
- * An IB_CM_RTU_RECEIVED message indicates that the connection is established
- * and that the recipient may begin transmitting (RTU = ready to use).
+ * An RTU (ready to use) message indicates that the connection has been
+ * established and that the recipient may begin transmitting.
  */
 static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
 {
 	int ret;
 
-	ret = srpt_ch_qp_rts(ch, ch->qp);
+	ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rts(ch, ch->qp);
 	if (ret < 0) {
 		pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
 		       ch->qp->qp_num);
@@ -2453,6 +2564,49 @@  static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
 	return ret;
 }
 
+static int srpt_rdma_cm_handler(struct rdma_cm_id *cm_id,
+				struct rdma_cm_event *event)
+{
+	struct srpt_rdma_ch *ch = cm_id->context;
+	int ret = 0;
+
+	switch (event->event) {
+	case RDMA_CM_EVENT_CONNECT_REQUEST:
+		ret = srpt_rdma_cm_req_recv(cm_id, event);
+		break;
+	case RDMA_CM_EVENT_REJECTED:
+		srpt_cm_rej_recv(ch, event->status,
+				 event->param.conn.private_data,
+				 event->param.conn.private_data_len);
+		break;
+	case RDMA_CM_EVENT_ESTABLISHED:
+		srpt_cm_rtu_recv(ch);
+		break;
+	case RDMA_CM_EVENT_DISCONNECTED:
+		if (ch->state < CH_DISCONNECTING)
+			srpt_disconnect_ch(ch);
+		else
+			srpt_close_ch(ch);
+		break;
+	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+		srpt_close_ch(ch);
+		break;
+	case RDMA_CM_EVENT_UNREACHABLE:
+		pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
+			ch->qp->qp_num);
+		break;
+	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+	case RDMA_CM_EVENT_ADDR_CHANGE:
+		break;
+	default:
+		pr_err("received unrecognized RDMA CM event %d\n",
+		       event->event);
+		break;
+	}
+
+	return ret;
+}
+
 static int srpt_write_pending_status(struct se_cmd *se_cmd)
 {
 	struct srpt_send_ioctx *ioctx;
@@ -2824,7 +2978,7 @@  static void srpt_add_one(struct ib_device *device)
 {
 	struct srpt_device *sdev;
 	struct srpt_port *sport;
-	int i;
+	int i, ret;
 
 	pr_debug("device = %p\n", device);
 
@@ -2848,9 +3002,15 @@  static void srpt_add_one(struct ib_device *device)
 	if (!srpt_service_guid)
 		srpt_service_guid = be64_to_cpu(device->node_guid);
 
-	sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
-	if (IS_ERR(sdev->cm_id))
-		goto err_ring;
+	if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
+		sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
+	if (IS_ERR(sdev->cm_id)) {
+		pr_info("ib_create_cm_id() failed: %ld\n",
+			PTR_ERR(sdev->cm_id));
+		sdev->cm_id = NULL;
+		if (!rdma_cm_id)
+			goto err_ring;
+	}
 
 	/* print out target login information */
 	pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
@@ -2863,8 +3023,14 @@  static void srpt_add_one(struct ib_device *device)
 	 * in the system as service_id; therefore, the target_id will change
 	 * if this HCA is gone bad and replaced by different HCA
 	 */
-	if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0))
+	ret = sdev->cm_id ?
+		ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0) :
+		0;
+	if (ret < 0) {
+		pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret,
+		       sdev->cm_id->state);
 		goto err_cm;
+	}
 
 	INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
 			      srpt_event_handler);
@@ -2904,7 +3070,8 @@  static void srpt_add_one(struct ib_device *device)
 err_event:
 	ib_unregister_event_handler(&sdev->event_handler);
 err_cm:
-	ib_destroy_cm_id(sdev->cm_id);
+	if (sdev->cm_id)
+		ib_destroy_cm_id(sdev->cm_id);
 err_ring:
 	srpt_free_srq(sdev);
 	ib_dealloc_pd(sdev->pd);
@@ -2939,7 +3106,10 @@  static void srpt_remove_one(struct ib_device *device, void *client_data)
 	for (i = 0; i < sdev->device->phys_port_cnt; i++)
 		cancel_work_sync(&sdev->port[i].work);
 
-	ib_destroy_cm_id(sdev->cm_id);
+	if (sdev->cm_id)
+		ib_destroy_cm_id(sdev->cm_id);
+
+	ib_set_client_data(device, &srpt_client, NULL);
 
 	/*
 	 * Unregistering a target must happen after destroying sdev->cm_id
@@ -3103,18 +3273,26 @@  static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
 	leading_zero_bytes = 16 - count;
 	memset(i_port_id, 0, leading_zero_bytes);
 	ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
-	if (ret < 0)
-		pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
+
 out:
 	return ret;
 }
 
 /*
- * configfs callback function invoked for
- * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ * configfs callback function invoked for mkdir
+ * /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ *
+ * i_port_id must be an initiator port GUID, GID or IP address. See also the
+ * target_alloc_session() calls in this driver. Examples of valid initiator
+ * port IDs:
+ * 0x0000000000000000505400fffe4a0b7b
+ * 0000000000000000505400fffe4a0b7b
+ * 5054:00ff:fe4a:0b7b
+ * 192.168.122.76
  */
 static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
 {
+	struct sockaddr_storage sa;
 	u64 guid;
 	u8 i_port_id[16];
 	int ret;
@@ -3122,6 +3300,9 @@  static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
 	ret = srpt_parse_guid(&guid, name);
 	if (ret < 0)
 		ret = srpt_parse_i_port_id(i_port_id, name);
+	if (ret < 0)
+		ret = inet_pton_with_scope(&init_net, AF_UNSPEC, name, NULL,
+					   &sa);
 	if (ret < 0)
 		pr_err("invalid initiator port ID %s\n", name);
 	return ret;
@@ -3296,6 +3477,92 @@  static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
 	NULL,
 };
 
+static struct rdma_cm_id *srpt_create_rdma_id(u16 rdma_cm_port)
+{
+	struct rdma_cm_id *rdma_cm_id;
+	struct sockaddr_in addr = { };
+	int ret;
+
+	rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
+				    NULL, RDMA_PS_TCP, IB_QPT_RC);
+	if (IS_ERR(rdma_cm_id)) {
+		pr_err("RDMA/CM ID creation failed: %ld\n",
+		       PTR_ERR(rdma_cm_id));
+		goto out;
+	}
+
+	/* We will listen on any RDMA device. */
+	addr.sin_family = AF_INET;
+	addr.sin_port = cpu_to_be16(rdma_cm_port);
+	ret = rdma_bind_addr(rdma_cm_id, (void *)&addr);
+	if (ret) {
+		pr_err("Binding RDMA/CM ID to port %u failed: %d\n",
+		       rdma_cm_port, ret);
+		rdma_destroy_id(rdma_cm_id);
+		rdma_cm_id = ERR_PTR(ret);
+		goto out;
+	}
+
+	ret = rdma_listen(rdma_cm_id, 128);
+	if (ret) {
+		pr_err("rdma_listen() failed: %d\n", ret);
+		rdma_destroy_id(rdma_cm_id);
+		rdma_cm_id = ERR_PTR(ret);
+	}
+
+out:
+	return rdma_cm_id;
+}
+
+/*
+ * Note: although the rdma_cm_port variable is a global, it is exposed to
+ * user space as a tpg attribute because both the target core and configfs
+ * would have to be modified to add this attribute under
+ * /sys/kernel/config/target/$driver.
+ */
+static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "%d\n", rdma_cm_port);
+}
+
+static ssize_t srpt_rdma_cm_port_store(struct config_item *item,
+				       const char *page, size_t count)
+{
+	struct rdma_cm_id *new_id;
+	u16 val;
+	int ret;
+
+	ret = kstrtou16(page, 0, &val);
+	if (ret < 0)
+		return ret;
+	ret = count;
+	if (rdma_cm_port == val)
+		goto out;
+	new_id = val ? srpt_create_rdma_id(val) : NULL;
+	if (IS_ERR(new_id)) {
+		ret = PTR_ERR(new_id);
+		goto out;
+	}
+
+	mutex_lock(&rdma_cm_mutex);
+	rdma_cm_port = val;
+	swap(rdma_cm_id, new_id);
+	mutex_unlock(&rdma_cm_mutex);
+
+	if (new_id)
+		rdma_destroy_id(new_id);
+	ret = count;
+out:
+	return ret;
+}
+
+CONFIGFS_ATTR(srpt_, rdma_cm_port);
+
+static struct configfs_attribute *srpt_da_attrs[] = {
+	&srpt_attr_rdma_cm_port,
+	NULL,
+};
+
 static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
 {
 	struct se_portal_group *se_tpg = to_tpg(item);
@@ -3441,6 +3708,7 @@  static const struct target_core_fabric_ops srpt_template = {
 	.fabric_drop_tpg		= srpt_drop_tpg,
 	.fabric_init_nodeacl		= srpt_init_nodeacl,
 
+	.tfc_discovery_attrs		= srpt_da_attrs,
 	.tfc_wwn_attrs			= srpt_wwn_attrs,
 	.tfc_tpg_base_attrs		= srpt_tpg_attrs,
 	.tfc_tpg_attrib_attrs		= srpt_tpg_attrib_attrs,
@@ -3494,6 +3762,8 @@  static int __init srpt_init_module(void)
 
 static void __exit srpt_cleanup_module(void)
 {
+	if (rdma_cm_id)
+		rdma_destroy_id(rdma_cm_id);
 	ib_unregister_client(&srpt_client);
 	target_unregister_template(&srpt_template);
 }
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 4d9199fd00dc..02883f8e9c71 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -42,6 +42,7 @@ 
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_sa.h>
 #include <rdma/ib_cm.h>
+#include <rdma/rdma_cm.h>
 #include <rdma/rw.h>
 
 #include <scsi/srp.h>
@@ -261,6 +262,7 @@  enum rdma_ch_state {
  * @spinlock:      Protects free_list and state.
  * @free_list:     Head of list with free send I/O contexts.
  * @state:         channel state. See also enum rdma_ch_state.
+ * @using_rdma_cm: Whether the RDMA/CM or IB/CM is used for this channel.
  * @processing_wait_list: Whether or not cmd_wait_list is being processed.
  * @ioctx_ring:    Send ring.
  * @ioctx_recv_ring: Receive I/O context ring.
@@ -280,6 +282,9 @@  struct srpt_rdma_ch {
 		struct {
 			struct ib_cm_id		*cm_id;
 		} ib_cm;
+		struct {
+			struct rdma_cm_id	*cm_id;
+		} rdma_cm;
 	};
 	struct ib_cq		*cq;
 	struct ib_cqe		zw_cqe;
@@ -300,6 +305,7 @@  struct srpt_rdma_ch {
 	struct list_head	list;
 	struct list_head	cmd_wait_list;
 	uint16_t		pkey;
+	bool			using_rdma_cm;
 	bool			processing_wait_list;
 	struct se_session	*sess;
 	u8			sess_name[24];