diff mbox

[v2,13/17] IB/Verbs: Reform cma/ucma with management helpers

Message ID 5523CF74.8020004@profitbricks.com (mailing list archive)
State Rejected
Headers show

Commit Message

Michael Wang April 7, 2015, 12:37 p.m. UTC
Reform cma/ucma with management helpers.

Cc: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
Cc: Doug Ledford <dledford@redhat.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Michael Wang <yun.wang@profitbricks.com>
---
 drivers/infiniband/core/cma.c  | 182 +++++++++++++----------------------------
 drivers/infiniband/core/ucma.c |  25 ++----
 2 files changed, 65 insertions(+), 142 deletions(-)

Comments

Steve Wise April 7, 2015, 9:11 p.m. UTC | #1
> -----Original Message-----
> From: Michael Wang [mailto:yun.wang@profitbricks.com]
> Sent: Tuesday, April 07, 2015 7:37 AM
> To: Roland Dreier; Sean Hefty; linux-rdma@vger.kernel.org; linux-kernel@vger.kernel.org; linux-nfs@vger.kernel.org;
> netdev@vger.kernel.org
> Cc: Hal Rosenstock; Tom Tucker; Steve Wise; Hoang-Nam Nguyen; Christoph Raisch; Mike Marciniszyn; Eli Cohen; Faisal Latif; Upinder
> Malhi; Trond Myklebust; J. Bruce Fields; David S. Miller; Ira Weiny; PJ Waskiewicz; Tatyana Nikolova; Or Gerlitz; Jack Morgenstein; Haggai
> Eran; Ilya Nelkenbaum; Yann Droneaud; Bart Van Assche; Shachar Raindel; Sagi Grimberg; Devesh Sharma; Matan Barak; Moni Shoua; Jiri
> Kosina; Selvin Xavier; Mitesh Ahuja; Li RongQing; Rasmus Villemoes; Alex Estrin; Doug Ledford; Eric Dumazet; Erez Shitrit; Tom
> Gundersen; Chuck Lever; Michael Wang
> Subject: [PATCH v2 13/17] IB/Verbs: Reform cma/ucma with management helpers
> 
> 
> Reform cma/ucma with management helpers.
> 
> Cc: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
> Cc: Doug Ledford <dledford@redhat.com>
> Cc: Ira Weiny <ira.weiny@intel.com>
> Cc: Sean Hefty <sean.hefty@intel.com>
> Signed-off-by: Michael Wang <yun.wang@profitbricks.com>
> ---
>  drivers/infiniband/core/cma.c  | 182 +++++++++++++----------------------------
>  drivers/infiniband/core/ucma.c |  25 ++----
>  2 files changed, 65 insertions(+), 142 deletions(-)
> 
> diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
> index d8a8ea7..c23f483 100644
> --- a/drivers/infiniband/core/cma.c
> +++ b/drivers/infiniband/core/cma.c
> @@ -435,10 +435,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
>  	pkey = ntohs(addr->sib_pkey);
> 
>  	list_for_each_entry(cur_dev, &dev_list, list) {
> -		if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
> -			continue;
> -
>  		for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
> +			if (!rdma_ib_mgmt(cur_dev->device, p))
> +				continue;
> +
>  			if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
>  				continue;
> 
> @@ -633,10 +633,10 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
>  	if (ret)
>  		goto out;
> 
> -	if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
> -	    == RDMA_TRANSPORT_IB &&
> -	    rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
> -	    == IB_LINK_LAYER_ETHERNET) {
> +	/* Will this happen? */
> +	BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
> +
> +	if (rdma_transport_iboe(id_priv->id.device, id_priv->id.port_num)) {
>  		ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
> 
>  		if (ret)
> @@ -700,8 +700,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
>  	int ret;
>  	u16 pkey;
> 
> -	if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
> -	    IB_LINK_LAYER_INFINIBAND)
> +	if (rdma_transport_ib(id_priv->id.device, id_priv->id.port_num))
>  		pkey = ib_addr_get_pkey(dev_addr);
>  	else
>  		pkey = 0xffff;
> @@ -735,8 +734,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
>  	int ret = 0;
> 
>  	id_priv = container_of(id, struct rdma_id_private, id);
> -	switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
> -	case RDMA_TRANSPORT_IB:
> +	if (rdma_ib_mgmt(id_priv->id.device, id_priv->id.port_num)) {
>  		if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
>  			ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
>  		else
> @@ -745,19 +743,16 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
> 
>  		if (qp_attr->qp_state == IB_QPS_RTR)
>  			qp_attr->rq_psn = id_priv->seq_num;
> -		break;
> -	case RDMA_TRANSPORT_IWARP:
> +	} else if (rdma_transport_iwarp(id_priv->id.device,
> +						id_priv->id.port_num)) {
>  		if (!id_priv->cm_id.iw) {
>  			qp_attr->qp_access_flags = 0;
>  			*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
>  		} else
>  			ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
>  						 qp_attr_mask);
> -		break;
> -	default:
> +	} else
>  		ret = -ENOSYS;
> -		break;
> -	}
> 
>  	return ret;
>  }
> @@ -928,13 +923,9 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
> 
>  static void cma_cancel_route(struct rdma_id_private *id_priv)
>  {
> -	switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
> -	case IB_LINK_LAYER_INFINIBAND:
> +	if (rdma_transport_ib(id_priv->id.device, id_priv->id.port_num)) {
>  		if (id_priv->query)
>  			ib_sa_cancel_query(id_priv->query_id, id_priv->query);
> -		break;
> -	default:
> -		break;
>  	}
>  }
> 
> @@ -1006,17 +997,14 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
>  		mc = container_of(id_priv->mc_list.next,
>  				  struct cma_multicast, list);
>  		list_del(&mc->list);
> -		switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
> -		case IB_LINK_LAYER_INFINIBAND:
> +		if (rdma_transport_ib(id_priv->cma_dev->device,
> +				      id_priv->id.port_num)) {
>  			ib_sa_free_multicast(mc->multicast.ib);
>  			kfree(mc);
>  			break;
> -		case IB_LINK_LAYER_ETHERNET:
> +		} else if (rdma_transport_ib(id_priv->cma_dev->device,
> +					     id_priv->id.port_num))
>  			kref_put(&mc->mcref, release_mc);
> -			break;
> -		default:
> -			break;
> -		}
>  	}
>  }
>

Doesn't the above change result in:

if (rdma_transport_ib()) {
} else if (rdma_transport_ib()) {
}
 
????

> @@ -1037,17 +1025,13 @@ void rdma_destroy_id(struct rdma_cm_id *id)
>  	mutex_unlock(&id_priv->handler_mutex);
> 
>  	if (id_priv->cma_dev) {
> -		switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
> -		case RDMA_TRANSPORT_IB:
> +		if (rdma_ib_mgmt(id_priv->id.device, id_priv->id.port_num)) {
>  			if (id_priv->cm_id.ib)
>  				ib_destroy_cm_id(id_priv->cm_id.ib);
> -			break;
> -		case RDMA_TRANSPORT_IWARP:
> +		} else if (rdma_transport_iwarp(id_priv->id.device,
> +							id_priv->id.port_num)) {
>  			if (id_priv->cm_id.iw)
>  				iw_destroy_cm_id(id_priv->cm_id.iw);
> -			break;
> -		default:
> -			break;
>  		}
>  		cma_leave_mc_groups(id_priv);
>  		cma_release_dev(id_priv);
> @@ -1966,26 +1950,14 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
>  		return -EINVAL;
> 
>  	atomic_inc(&id_priv->refcount);
> -	switch (rdma_node_get_transport(id->device->node_type)) {
> -	case RDMA_TRANSPORT_IB:
> -		switch (rdma_port_get_link_layer(id->device, id->port_num)) {
> -		case IB_LINK_LAYER_INFINIBAND:
> -			ret = cma_resolve_ib_route(id_priv, timeout_ms);
> -			break;
> -		case IB_LINK_LAYER_ETHERNET:
> -			ret = cma_resolve_iboe_route(id_priv);
> -			break;
> -		default:
> -			ret = -ENOSYS;
> -		}
> -		break;
> -	case RDMA_TRANSPORT_IWARP:
> +	if (rdma_transport_ib(id->device, id->port_num))
> +		ret = cma_resolve_ib_route(id_priv, timeout_ms);
> +	else if (rdma_transport_iboe(id->device, id->port_num))
> +		ret = cma_resolve_iboe_route(id_priv);
> +	else if (rdma_transport_iwarp(id->device, id->port_num))
>  		ret = cma_resolve_iw_route(id_priv, timeout_ms);
> -		break;
> -	default:
> +	else
>  		ret = -ENOSYS;
> -		break;
> -	}
>  	if (ret)
>  		goto err;
> 
> @@ -2059,7 +2031,7 @@ port_found:
>  		goto out;
> 
>  	id_priv->id.route.addr.dev_addr.dev_type =
> -		(rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
> +		(rdma_transport_ib(cma_dev->device, p)) ?
>  		ARPHRD_INFINIBAND : ARPHRD_ETHER;
> 
>  	rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
> @@ -2536,18 +2508,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
> 
>  	id_priv->backlog = backlog;
>  	if (id->device) {
> -		switch (rdma_node_get_transport(id->device->node_type)) {
> -		case RDMA_TRANSPORT_IB:
> +		if (rdma_ib_mgmt(id->device, id->port_num)) {
>  			ret = cma_ib_listen(id_priv);
>  			if (ret)
>  				goto err;
> -			break;
> -		case RDMA_TRANSPORT_IWARP:
> +		} else if (rdma_transport_iwarp(id->device, id->port_num)) {
>  			ret = cma_iw_listen(id_priv, backlog);
>  			if (ret)
>  				goto err;
> -			break;
> -		default:
> +		} else {
>  			ret = -ENOSYS;
>  			goto err;
>  		}
> @@ -2883,20 +2852,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
>  		id_priv->srq = conn_param->srq;
>  	}
> 
> -	switch (rdma_node_get_transport(id->device->node_type)) {
> -	case RDMA_TRANSPORT_IB:
> +	if (rdma_ib_mgmt(id->device, id->port_num)) {
>  		if (id->qp_type == IB_QPT_UD)
>  			ret = cma_resolve_ib_udp(id_priv, conn_param);
>  		else
>  			ret = cma_connect_ib(id_priv, conn_param);
> -		break;
> -	case RDMA_TRANSPORT_IWARP:
> +	} else if (rdma_transport_iwarp(id->device, id->port_num))
>  		ret = cma_connect_iw(id_priv, conn_param);
> -		break;
> -	default:
> +	else
>  		ret = -ENOSYS;
> -		break;
> -	}
>  	if (ret)
>  		goto err;
> 
> @@ -2999,8 +2963,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
>  		id_priv->srq = conn_param->srq;
>  	}
> 
> -	switch (rdma_node_get_transport(id->device->node_type)) {
> -	case RDMA_TRANSPORT_IB:
> +	if (rdma_ib_mgmt(id->device, id->port_num)) {
>  		if (id->qp_type == IB_QPT_UD) {
>  			if (conn_param)
>  				ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
> @@ -3016,14 +2979,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
>  			else
>  				ret = cma_rep_recv(id_priv);
>  		}
> -		break;
> -	case RDMA_TRANSPORT_IWARP:
> +	} else if (rdma_transport_iwarp(id->device, id->port_num))
>  		ret = cma_accept_iw(id_priv, conn_param);
> -		break;
> -	default:
> +	else
>  		ret = -ENOSYS;
> -		break;
> -	}
> 
>  	if (ret)
>  		goto reject;
> @@ -3067,8 +3026,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
>  	if (!id_priv->cm_id.ib)
>  		return -EINVAL;
> 
> -	switch (rdma_node_get_transport(id->device->node_type)) {
> -	case RDMA_TRANSPORT_IB:
> +	if (rdma_ib_mgmt(id->device, id->port_num)) {
>  		if (id->qp_type == IB_QPT_UD)
>  			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
>  						private_data, private_data_len);
> @@ -3076,15 +3034,11 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
>  			ret = ib_send_cm_rej(id_priv->cm_id.ib,
>  					     IB_CM_REJ_CONSUMER_DEFINED, NULL,
>  					     0, private_data, private_data_len);
> -		break;
> -	case RDMA_TRANSPORT_IWARP:
> +	} else if (rdma_transport_iwarp(id->device, id->port_num)) {
>  		ret = iw_cm_reject(id_priv->cm_id.iw,
>  				   private_data, private_data_len);
> -		break;
> -	default:
> +	} else
>  		ret = -ENOSYS;
> -		break;
> -	}
>  	return ret;
>  }
>  EXPORT_SYMBOL(rdma_reject);
> @@ -3098,22 +3052,17 @@ int rdma_disconnect(struct rdma_cm_id *id)
>  	if (!id_priv->cm_id.ib)
>  		return -EINVAL;
> 
> -	switch (rdma_node_get_transport(id->device->node_type)) {
> -	case RDMA_TRANSPORT_IB:
> +	if (rdma_ib_mgmt(id->device, id->port_num)) {
>  		ret = cma_modify_qp_err(id_priv);
>  		if (ret)
>  			goto out;
>  		/* Initiate or respond to a disconnect. */
>  		if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
>  			ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
> -		break;
> -	case RDMA_TRANSPORT_IWARP:
> +	} else if (rdma_transport_iwarp(id->device, id->port_num)) {
>  		ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
> -		break;
> -	default:
> +	} else
>  		ret = -EINVAL;
> -		break;
> -	}
>  out:
>  	return ret;
>  }
> @@ -3359,24 +3308,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
>  	list_add(&mc->list, &id_priv->mc_list);
>  	spin_unlock(&id_priv->lock);
> 
> -	switch (rdma_node_get_transport(id->device->node_type)) {
> -	case RDMA_TRANSPORT_IB:
> -		switch (rdma_port_get_link_layer(id->device, id->port_num)) {
> -		case IB_LINK_LAYER_INFINIBAND:
> -			ret = cma_join_ib_multicast(id_priv, mc);
> -			break;
> -		case IB_LINK_LAYER_ETHERNET:
> -			kref_init(&mc->mcref);
> -			ret = cma_iboe_join_multicast(id_priv, mc);
> -			break;
> -		default:
> -			ret = -EINVAL;
> -		}
> -		break;
> -	default:
> +	if (rdma_transport_iboe(id->device, id->port_num)) {
> +		kref_init(&mc->mcref);
> +		ret = cma_iboe_join_multicast(id_priv, mc);
> +	} else if (rdma_transport_ib(id->device, id->port_num))
> +		ret = cma_join_ib_multicast(id_priv, mc);
> +	else
>  		ret = -ENOSYS;
> -		break;
> -	}
> 
>  	if (ret) {
>  		spin_lock_irq(&id_priv->lock);
> @@ -3404,19 +3342,17 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
>  				ib_detach_mcast(id->qp,
>  						&mc->multicast.ib->rec.mgid,
>  						be16_to_cpu(mc->multicast.ib->rec.mlid));
> -			if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
> -				switch (rdma_port_get_link_layer(id->device, id->port_num)) {
> -				case IB_LINK_LAYER_INFINIBAND:
> -					ib_sa_free_multicast(mc->multicast.ib);
> -					kfree(mc);
> -					break;
> -				case IB_LINK_LAYER_ETHERNET:
> -					kref_put(&mc->mcref, release_mc);
> -					break;
> -				default:
> -					break;
> -				}
> -			}
> +
> +			/* Will this happen? */
> +			BUG_ON(id_priv->cma_dev->device != id->device);
> +
> +			if (rdma_transport_ib(id->device, id->port_num)) {
> +				ib_sa_free_multicast(mc->multicast.ib);
> +				kfree(mc);
> +			} else if (rdma_transport_iboe(id->device,
> +						       id->port_num))
> +				kref_put(&mc->mcref, release_mc);
> +
>  			return;
>  		}
>  	}
> diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
> index 45d67e9..42c9bf6 100644
> --- a/drivers/infiniband/core/ucma.c
> +++ b/drivers/infiniband/core/ucma.c
> @@ -722,26 +722,13 @@ static ssize_t ucma_query_route(struct ucma_file *file,
> 
>  	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
>  	resp.port_num = ctx->cm_id->port_num;
> -	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
> -	case RDMA_TRANSPORT_IB:
> -		switch (rdma_port_get_link_layer(ctx->cm_id->device,
> -			ctx->cm_id->port_num)) {
> -		case IB_LINK_LAYER_INFINIBAND:
> -			ucma_copy_ib_route(&resp, &ctx->cm_id->route);
> -			break;
> -		case IB_LINK_LAYER_ETHERNET:
> -			ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
> -			break;
> -		default:
> -			break;
> -		}
> -		break;
> -	case RDMA_TRANSPORT_IWARP:
> +
> +	if (rdma_transport_ib(ctx->cm_id->device, ctx->cm_id->port_num))
> +		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
> +	else if (rdma_transport_iboe(ctx->cm_id->device, ctx->cm_id->port_num))
> +		ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
> +	else if (rdma_transport_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
>  		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
> -		break;
> -	default:
> -		break;
> -	}
> 
>  out:
>  	if (copy_to_user((void __user *)(unsigned long)cmd.response,
> --
> 2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Hefty, Sean April 7, 2015, 9:36 p.m. UTC | #2
PiBkaWZmIC0tZ2l0IGEvZHJpdmVycy9pbmZpbmliYW5kL2NvcmUvY21hLmMgYi9kcml2ZXJzL2lu
ZmluaWJhbmQvY29yZS9jbWEuYw0KPiBpbmRleCBkOGE4ZWE3Li5jMjNmNDgzIDEwMDY0NA0KPiAt
LS0gYS9kcml2ZXJzL2luZmluaWJhbmQvY29yZS9jbWEuYw0KPiArKysgYi9kcml2ZXJzL2luZmlu
aWJhbmQvY29yZS9jbWEuYw0KPiBAQCAtNDM1LDEwICs0MzUsMTAgQEAgc3RhdGljIGludCBjbWFf
cmVzb2x2ZV9pYl9kZXYoc3RydWN0IHJkbWFfaWRfcHJpdmF0ZQ0KPiAqaWRfcHJpdikNCj4gIAlw
a2V5ID0gbnRvaHMoYWRkci0+c2liX3BrZXkpOw0KPiANCj4gIAlsaXN0X2Zvcl9lYWNoX2VudHJ5
KGN1cl9kZXYsICZkZXZfbGlzdCwgbGlzdCkgew0KPiAtCQlpZiAocmRtYV9ub2RlX2dldF90cmFu
c3BvcnQoY3VyX2Rldi0+ZGV2aWNlLT5ub2RlX3R5cGUpICE9DQo+IFJETUFfVFJBTlNQT1JUX0lC
KQ0KPiAtCQkJY29udGludWU7DQo+IC0NCj4gIAkJZm9yIChwID0gMTsgcCA8PSBjdXJfZGV2LT5k
ZXZpY2UtPnBoeXNfcG9ydF9jbnQ7ICsrcCkgew0KPiArCQkJaWYgKCFyZG1hX2liX21nbXQoY3Vy
X2Rldi0+ZGV2aWNlLCBwKSkNCj4gKwkJCQljb250aW51ZTsNCg0KVGhpcyBjaGVjayB3YW50cyB0
byBiZSBzb21ldGhpbmcgbGlrZSBpc19hZl9pYl9zdXBwb3J0ZWQoKS4gIENoZWNraW5nIGZvciBJ
QiB0cmFuc3BvcnQgbWF5IGFjdHVhbGx5IGJlIGJldHRlciB0aGFuIGNoZWNraW5nIGZvciBJQiBt
YW5hZ2VtZW50LiAgSSBkb24ndCBrbm93IGlmIElCb0UvUm9DRSBkZXZpY2VzIHN1cHBvcnQgQUZf
SUIuDQoNCg0KPiArDQo+ICAJCQlpZiAoaWJfZmluZF9jYWNoZWRfcGtleShjdXJfZGV2LT5kZXZp
Y2UsIHAsIHBrZXksDQo+ICZpbmRleCkpDQo+ICAJCQkJY29udGludWU7DQo+IA0KPiBAQCAtNjMz
LDEwICs2MzMsMTAgQEAgc3RhdGljIGludCBjbWFfbW9kaWZ5X3FwX3J0cihzdHJ1Y3QgcmRtYV9p
ZF9wcml2YXRlDQo+ICppZF9wcml2LA0KPiAgCWlmIChyZXQpDQo+ICAJCWdvdG8gb3V0Ow0KPiAN
Cj4gLQlpZiAocmRtYV9ub2RlX2dldF90cmFuc3BvcnQoaWRfcHJpdi0+Y21hX2Rldi0+ZGV2aWNl
LT5ub2RlX3R5cGUpDQo+IC0JICAgID09IFJETUFfVFJBTlNQT1JUX0lCICYmDQo+IC0JICAgIHJk
bWFfcG9ydF9nZXRfbGlua19sYXllcihpZF9wcml2LT5pZC5kZXZpY2UsIGlkX3ByaXYtDQo+ID5p
ZC5wb3J0X251bSkNCj4gLQkgICAgPT0gSUJfTElOS19MQVlFUl9FVEhFUk5FVCkgew0KPiArCS8q
IFdpbGwgdGhpcyBoYXBwZW4/ICovDQo+ICsJQlVHX09OKGlkX3ByaXYtPmNtYV9kZXYtPmRldmlj
ZSAhPSBpZF9wcml2LT5pZC5kZXZpY2UpOw0KDQpUaGlzIHNob3VsZG4ndCBoYXBwZW4uICBUaGUg
QlVHX09OIGxvb2tzIG9rYXkuDQoNCg0KPiArCWlmIChyZG1hX3RyYW5zcG9ydF9pYm9lKGlkX3By
aXYtPmlkLmRldmljZSwgaWRfcHJpdi0+aWQucG9ydF9udW0pKSB7DQo+ICAJCXJldCA9IHJkbWFf
YWRkcl9maW5kX3NtYWNfYnlfc2dpZCgmc2dpZCwgcXBfYXR0ci5zbWFjLCBOVUxMKTsNCj4gDQo+
ICAJCWlmIChyZXQpDQo+IEBAIC03MDAsOCArNzAwLDcgQEAgc3RhdGljIGludCBjbWFfaWJfaW5p
dF9xcF9hdHRyKHN0cnVjdCByZG1hX2lkX3ByaXZhdGUNCj4gKmlkX3ByaXYsDQo+ICAJaW50IHJl
dDsNCj4gIAl1MTYgcGtleTsNCj4gDQo+IC0JaWYgKHJkbWFfcG9ydF9nZXRfbGlua19sYXllcihp
ZF9wcml2LT5pZC5kZXZpY2UsIGlkX3ByaXYtDQo+ID5pZC5wb3J0X251bSkgPT0NCj4gLQkgICAg
SUJfTElOS19MQVlFUl9JTkZJTklCQU5EKQ0KPiArCWlmIChyZG1hX3RyYW5zcG9ydF9pYihpZF9w
cml2LT5pZC5kZXZpY2UsIGlkX3ByaXYtPmlkLnBvcnRfbnVtKSkNCj4gIAkJcGtleSA9IGliX2Fk
ZHJfZ2V0X3BrZXkoZGV2X2FkZHIpOw0KPiAgCWVsc2UNCj4gIAkJcGtleSA9IDB4ZmZmZjsNCg0K
Q2hlY2sgaGVyZSBzaG91bGQgYmUgYWdhaW5zdCB0aGUgbGluayBsYXllciwgbm90IHRyYW5zcG9y
dC4NCg0KDQo+IEBAIC03MzUsOCArNzM0LDcgQEAgaW50IHJkbWFfaW5pdF9xcF9hdHRyKHN0cnVj
dCByZG1hX2NtX2lkICppZCwgc3RydWN0DQo+IGliX3FwX2F0dHIgKnFwX2F0dHIsDQo+ICAJaW50
IHJldCA9IDA7DQo+IA0KPiAgCWlkX3ByaXYgPSBjb250YWluZXJfb2YoaWQsIHN0cnVjdCByZG1h
X2lkX3ByaXZhdGUsIGlkKTsNCj4gLQlzd2l0Y2ggKHJkbWFfbm9kZV9nZXRfdHJhbnNwb3J0KGlk
X3ByaXYtPmlkLmRldmljZS0+bm9kZV90eXBlKSkgew0KPiAtCWNhc2UgUkRNQV9UUkFOU1BPUlRf
SUI6DQo+ICsJaWYgKHJkbWFfaWJfbWdtdChpZF9wcml2LT5pZC5kZXZpY2UsIGlkX3ByaXYtPmlk
LnBvcnRfbnVtKSkgew0KPiAgCQlpZiAoIWlkX3ByaXYtPmNtX2lkLmliIHx8IChpZF9wcml2LT5p
ZC5xcF90eXBlID09IElCX1FQVF9VRCkpDQo+ICAJCQlyZXQgPSBjbWFfaWJfaW5pdF9xcF9hdHRy
KGlkX3ByaXYsIHFwX2F0dHIsDQo+IHFwX2F0dHJfbWFzayk7DQo+ICAJCWVsc2UNCj4gQEAgLTc0
NSwxOSArNzQzLDE2IEBAIGludCByZG1hX2luaXRfcXBfYXR0cihzdHJ1Y3QgcmRtYV9jbV9pZCAq
aWQsIHN0cnVjdA0KPiBpYl9xcF9hdHRyICpxcF9hdHRyLA0KPiANCj4gIAkJaWYgKHFwX2F0dHIt
PnFwX3N0YXRlID09IElCX1FQU19SVFIpDQo+ICAJCQlxcF9hdHRyLT5ycV9wc24gPSBpZF9wcml2
LT5zZXFfbnVtOw0KPiAtCQlicmVhazsNCj4gLQljYXNlIFJETUFfVFJBTlNQT1JUX0lXQVJQOg0K
PiArCX0gZWxzZSBpZiAocmRtYV90cmFuc3BvcnRfaXdhcnAoaWRfcHJpdi0+aWQuZGV2aWNlLA0K
PiArCQkJCQkJaWRfcHJpdi0+aWQucG9ydF9udW0pKSB7DQo+ICAJCWlmICghaWRfcHJpdi0+Y21f
aWQuaXcpIHsNCj4gIAkJCXFwX2F0dHItPnFwX2FjY2Vzc19mbGFncyA9IDA7DQo+ICAJCQkqcXBf
YXR0cl9tYXNrID0gSUJfUVBfU1RBVEUgfCBJQl9RUF9BQ0NFU1NfRkxBR1M7DQo+ICAJCX0gZWxz
ZQ0KPiAgCQkJcmV0ID0gaXdfY21faW5pdF9xcF9hdHRyKGlkX3ByaXYtPmNtX2lkLml3LCBxcF9h
dHRyLA0KPiAgCQkJCQkJIHFwX2F0dHJfbWFzayk7DQo+IC0JCWJyZWFrOw0KPiAtCWRlZmF1bHQ6
DQo+ICsJfSBlbHNlDQo+ICAJCXJldCA9IC1FTk9TWVM7DQo+IC0JCWJyZWFrOw0KPiAtCX0NCj4g
DQo+ICAJcmV0dXJuIHJldDsNCj4gIH0NCj4gQEAgLTkyOCwxMyArOTIzLDkgQEAgc3RhdGljIGlu
bGluZSBpbnQgY21hX3VzZXJfZGF0YV9vZmZzZXQoc3RydWN0DQo+IHJkbWFfaWRfcHJpdmF0ZSAq
aWRfcHJpdikNCj4gDQo+ICBzdGF0aWMgdm9pZCBjbWFfY2FuY2VsX3JvdXRlKHN0cnVjdCByZG1h
X2lkX3ByaXZhdGUgKmlkX3ByaXYpDQo+ICB7DQo+IC0Jc3dpdGNoIChyZG1hX3BvcnRfZ2V0X2xp
bmtfbGF5ZXIoaWRfcHJpdi0+aWQuZGV2aWNlLCBpZF9wcml2LQ0KPiA+aWQucG9ydF9udW0pKSB7
DQo+IC0JY2FzZSBJQl9MSU5LX0xBWUVSX0lORklOSUJBTkQ6DQo+ICsJaWYgKHJkbWFfdHJhbnNw
b3J0X2liKGlkX3ByaXYtPmlkLmRldmljZSwgaWRfcHJpdi0+aWQucG9ydF9udW0pKSB7DQoNClRo
ZSBjaGVjayBzaG91bGQgYmUgY2FwX2liX3NhKCkNCg0KDQo+ICAJCWlmIChpZF9wcml2LT5xdWVy
eSkNCj4gIAkJCWliX3NhX2NhbmNlbF9xdWVyeShpZF9wcml2LT5xdWVyeV9pZCwgaWRfcHJpdi0+
cXVlcnkpOw0KPiAtCQlicmVhazsNCj4gLQlkZWZhdWx0Og0KPiAtCQlicmVhazsNCj4gIAl9DQo+
ICB9DQo+IA0KPiBAQCAtMTAwNiwxNyArOTk3LDE0IEBAIHN0YXRpYyB2b2lkIGNtYV9sZWF2ZV9t
Y19ncm91cHMoc3RydWN0DQo+IHJkbWFfaWRfcHJpdmF0ZSAqaWRfcHJpdikNCj4gIAkJbWMgPSBj
b250YWluZXJfb2YoaWRfcHJpdi0+bWNfbGlzdC5uZXh0LA0KPiAgCQkJCSAgc3RydWN0IGNtYV9t
dWx0aWNhc3QsIGxpc3QpOw0KPiAgCQlsaXN0X2RlbCgmbWMtPmxpc3QpOw0KPiAtCQlzd2l0Y2gg
KHJkbWFfcG9ydF9nZXRfbGlua19sYXllcihpZF9wcml2LT5jbWFfZGV2LT5kZXZpY2UsDQo+IGlk
X3ByaXYtPmlkLnBvcnRfbnVtKSkgew0KPiAtCQljYXNlIElCX0xJTktfTEFZRVJfSU5GSU5JQkFO
RDoNCj4gKwkJaWYgKHJkbWFfdHJhbnNwb3J0X2liKGlkX3ByaXYtPmNtYV9kZXYtPmRldmljZSwN
Cj4gKwkJCQkgICAgICBpZF9wcml2LT5pZC5wb3J0X251bSkpIHsNCj4gIAkJCWliX3NhX2ZyZWVf
bXVsdGljYXN0KG1jLT5tdWx0aWNhc3QuaWIpOw0KPiAgCQkJa2ZyZWUobWMpOw0KPiAgCQkJYnJl
YWs7DQoNCldhbnQgY2FwX2liX21jYXN0KCkNCg0KDQo+IC0JCWNhc2UgSUJfTElOS19MQVlFUl9F
VEhFUk5FVDoNCj4gKwkJfSBlbHNlIGlmIChyZG1hX3RyYW5zcG9ydF9pYihpZF9wcml2LT5jbWFf
ZGV2LT5kZXZpY2UsDQo+ICsJCQkJCSAgICAgaWRfcHJpdi0+aWQucG9ydF9udW0pKQ0KPiAgCQkJ
a3JlZl9wdXQoJm1jLT5tY3JlZiwgcmVsZWFzZV9tYyk7DQo+IC0JCQlicmVhazsNCj4gLQkJZGVm
YXVsdDoNCj4gLQkJCWJyZWFrOw0KDQpKdXN0IHdhbnQgZWxzZSAvKiAhY2FwX2liX21jYXN0ICov
DQoNCg0KPiAtCQl9DQo+ICAJfQ0KPiAgfQ0KPiANCj4gQEAgLTEwMzcsMTcgKzEwMjUsMTMgQEAg
dm9pZCByZG1hX2Rlc3Ryb3lfaWQoc3RydWN0IHJkbWFfY21faWQgKmlkKQ0KPiAgCW11dGV4X3Vu
bG9jaygmaWRfcHJpdi0+aGFuZGxlcl9tdXRleCk7DQo+IA0KPiAgCWlmIChpZF9wcml2LT5jbWFf
ZGV2KSB7DQo+IC0JCXN3aXRjaCAocmRtYV9ub2RlX2dldF90cmFuc3BvcnQoaWRfcHJpdi0+aWQu
ZGV2aWNlLQ0KPiA+bm9kZV90eXBlKSkgew0KPiAtCQljYXNlIFJETUFfVFJBTlNQT1JUX0lCOg0K
PiArCQlpZiAocmRtYV9pYl9tZ210KGlkX3ByaXYtPmlkLmRldmljZSwgaWRfcHJpdi0+aWQucG9y
dF9udW0pKSB7DQo+ICAJCQlpZiAoaWRfcHJpdi0+Y21faWQuaWIpDQo+ICAJCQkJaWJfZGVzdHJv
eV9jbV9pZChpZF9wcml2LT5jbV9pZC5pYik7DQo+IC0JCQlicmVhazsNCj4gLQkJY2FzZSBSRE1B
X1RSQU5TUE9SVF9JV0FSUDoNCj4gKwkJfSBlbHNlIGlmIChyZG1hX3RyYW5zcG9ydF9pd2FycChp
ZF9wcml2LT5pZC5kZXZpY2UsDQo+ICsJCQkJCQkJaWRfcHJpdi0+aWQucG9ydF9udW0pKSB7DQo+
ICAJCQlpZiAoaWRfcHJpdi0+Y21faWQuaXcpDQo+ICAJCQkJaXdfZGVzdHJveV9jbV9pZChpZF9w
cml2LT5jbV9pZC5pdyk7DQo+IC0JCQlicmVhazsNCj4gLQkJZGVmYXVsdDoNCj4gLQkJCWJyZWFr
Ow0KPiAgCQl9DQo+ICAJCWNtYV9sZWF2ZV9tY19ncm91cHMoaWRfcHJpdik7DQo+ICAJCWNtYV9y
ZWxlYXNlX2RldihpZF9wcml2KTsNCj4gQEAgLTE5NjYsMjYgKzE5NTAsMTQgQEAgaW50IHJkbWFf
cmVzb2x2ZV9yb3V0ZShzdHJ1Y3QgcmRtYV9jbV9pZCAqaWQsIGludA0KPiB0aW1lb3V0X21zKQ0K
PiAgCQlyZXR1cm4gLUVJTlZBTDsNCj4gDQo+ICAJYXRvbWljX2luYygmaWRfcHJpdi0+cmVmY291
bnQpOw0KPiAtCXN3aXRjaCAocmRtYV9ub2RlX2dldF90cmFuc3BvcnQoaWQtPmRldmljZS0+bm9k
ZV90eXBlKSkgew0KPiAtCWNhc2UgUkRNQV9UUkFOU1BPUlRfSUI6DQo+IC0JCXN3aXRjaCAocmRt
YV9wb3J0X2dldF9saW5rX2xheWVyKGlkLT5kZXZpY2UsIGlkLT5wb3J0X251bSkpIHsNCj4gLQkJ
Y2FzZSBJQl9MSU5LX0xBWUVSX0lORklOSUJBTkQ6DQo+IC0JCQlyZXQgPSBjbWFfcmVzb2x2ZV9p
Yl9yb3V0ZShpZF9wcml2LCB0aW1lb3V0X21zKTsNCj4gLQkJCWJyZWFrOw0KPiAtCQljYXNlIElC
X0xJTktfTEFZRVJfRVRIRVJORVQ6DQo+IC0JCQlyZXQgPSBjbWFfcmVzb2x2ZV9pYm9lX3JvdXRl
KGlkX3ByaXYpOw0KPiAtCQkJYnJlYWs7DQo+IC0JCWRlZmF1bHQ6DQo+IC0JCQlyZXQgPSAtRU5P
U1lTOw0KPiAtCQl9DQo+IC0JCWJyZWFrOw0KPiAtCWNhc2UgUkRNQV9UUkFOU1BPUlRfSVdBUlA6
DQo+ICsJaWYgKHJkbWFfdHJhbnNwb3J0X2liKGlkLT5kZXZpY2UsIGlkLT5wb3J0X251bSkpDQo+
ICsJCXJldCA9IGNtYV9yZXNvbHZlX2liX3JvdXRlKGlkX3ByaXYsIHRpbWVvdXRfbXMpOw0KDQpC
ZXN0IGZpdCB3b3VsZCBiZSBjYXBfaWJfc2EoKQ0KDQoNCj4gKwllbHNlIGlmIChyZG1hX3RyYW5z
cG9ydF9pYm9lKGlkLT5kZXZpY2UsIGlkLT5wb3J0X251bSkpDQo+ICsJCXJldCA9IGNtYV9yZXNv
bHZlX2lib2Vfcm91dGUoaWRfcHJpdik7DQo+ICsJZWxzZSBpZiAocmRtYV90cmFuc3BvcnRfaXdh
cnAoaWQtPmRldmljZSwgaWQtPnBvcnRfbnVtKSkNCj4gIAkJcmV0ID0gY21hX3Jlc29sdmVfaXdf
cm91dGUoaWRfcHJpdiwgdGltZW91dF9tcyk7DQo+IC0JCWJyZWFrOw0KPiAtCWRlZmF1bHQ6DQo+
ICsJZWxzZQ0KPiAgCQlyZXQgPSAtRU5PU1lTOw0KPiAtCQlicmVhazsNCj4gLQl9DQo+ICAJaWYg
KHJldCkNCj4gIAkJZ290byBlcnI7DQo+IA0KPiBAQCAtMjA1OSw3ICsyMDMxLDcgQEAgcG9ydF9m
b3VuZDoNCj4gIAkJZ290byBvdXQ7DQo+IA0KPiAgCWlkX3ByaXYtPmlkLnJvdXRlLmFkZHIuZGV2
X2FkZHIuZGV2X3R5cGUgPQ0KPiAtCQkocmRtYV9wb3J0X2dldF9saW5rX2xheWVyKGNtYV9kZXYt
PmRldmljZSwgcCkgPT0NCj4gSUJfTElOS19MQVlFUl9JTkZJTklCQU5EKSA/DQo+ICsJCShyZG1h
X3RyYW5zcG9ydF9pYihjbWFfZGV2LT5kZXZpY2UsIHApKSA/DQo+ICAJCUFSUEhSRF9JTkZJTklC
QU5EIDogQVJQSFJEX0VUSEVSOw0KDQpUaGlzIHdhbnRzIHRoZSBsaW5rIGxheWVyLCBvciBtYXli
ZSB1c2UgY2FwX2lwb2liLg0KDQoNCj4gDQo+ICAJcmRtYV9hZGRyX3NldF9zZ2lkKCZpZF9wcml2
LT5pZC5yb3V0ZS5hZGRyLmRldl9hZGRyLCAmZ2lkKTsNCj4gQEAgLTI1MzYsMTggKzI1MDgsMTUg
QEAgaW50IHJkbWFfbGlzdGVuKHN0cnVjdCByZG1hX2NtX2lkICppZCwgaW50DQo+IGJhY2tsb2cp
DQo+IA0KPiAgCWlkX3ByaXYtPmJhY2tsb2cgPSBiYWNrbG9nOw0KPiAgCWlmIChpZC0+ZGV2aWNl
KSB7DQo+IC0JCXN3aXRjaCAocmRtYV9ub2RlX2dldF90cmFuc3BvcnQoaWQtPmRldmljZS0+bm9k
ZV90eXBlKSkgew0KPiAtCQljYXNlIFJETUFfVFJBTlNQT1JUX0lCOg0KPiArCQlpZiAocmRtYV9p
Yl9tZ210KGlkLT5kZXZpY2UsIGlkLT5wb3J0X251bSkpIHsNCg0KV2FudCBjYXBfaWJfY20oKQ0K
DQoNCj4gIAkJCXJldCA9IGNtYV9pYl9saXN0ZW4oaWRfcHJpdik7DQo+ICAJCQlpZiAocmV0KQ0K
PiAgCQkJCWdvdG8gZXJyOw0KPiAtCQkJYnJlYWs7DQo+IC0JCWNhc2UgUkRNQV9UUkFOU1BPUlRf
SVdBUlA6DQo+ICsJCX0gZWxzZSBpZiAocmRtYV90cmFuc3BvcnRfaXdhcnAoaWQtPmRldmljZSwg
aWQtPnBvcnRfbnVtKSkgew0KPiAgCQkJcmV0ID0gY21hX2l3X2xpc3RlbihpZF9wcml2LCBiYWNr
bG9nKTsNCj4gIAkJCWlmIChyZXQpDQo+ICAJCQkJZ290byBlcnI7DQo+IC0JCQlicmVhazsNCj4g
LQkJZGVmYXVsdDoNCj4gKwkJfSBlbHNlIHsNCj4gIAkJCXJldCA9IC1FTk9TWVM7DQo+ICAJCQln
b3RvIGVycjsNCj4gIAkJfQ0KPiBAQCAtMjg4MywyMCArMjg1MiwxNSBAQCBpbnQgcmRtYV9jb25u
ZWN0KHN0cnVjdCByZG1hX2NtX2lkICppZCwgc3RydWN0DQo+IHJkbWFfY29ubl9wYXJhbSAqY29u
bl9wYXJhbSkNCj4gIAkJaWRfcHJpdi0+c3JxID0gY29ubl9wYXJhbS0+c3JxOw0KPiAgCX0NCj4g
DQo+IC0Jc3dpdGNoIChyZG1hX25vZGVfZ2V0X3RyYW5zcG9ydChpZC0+ZGV2aWNlLT5ub2RlX3R5
cGUpKSB7DQo+IC0JY2FzZSBSRE1BX1RSQU5TUE9SVF9JQjoNCj4gKwlpZiAocmRtYV9pYl9tZ210
KGlkLT5kZXZpY2UsIGlkLT5wb3J0X251bSkpIHsNCg0KY2FwX2liX2NtKCkNCg0KDQo+ICAJCWlm
IChpZC0+cXBfdHlwZSA9PSBJQl9RUFRfVUQpDQo+ICAJCQlyZXQgPSBjbWFfcmVzb2x2ZV9pYl91
ZHAoaWRfcHJpdiwgY29ubl9wYXJhbSk7DQo+ICAJCWVsc2UNCj4gIAkJCXJldCA9IGNtYV9jb25u
ZWN0X2liKGlkX3ByaXYsIGNvbm5fcGFyYW0pOw0KPiAtCQlicmVhazsNCj4gLQljYXNlIFJETUFf
VFJBTlNQT1JUX0lXQVJQOg0KPiArCX0gZWxzZSBpZiAocmRtYV90cmFuc3BvcnRfaXdhcnAoaWQt
PmRldmljZSwgaWQtPnBvcnRfbnVtKSkNCj4gIAkJcmV0ID0gY21hX2Nvbm5lY3RfaXcoaWRfcHJp
diwgY29ubl9wYXJhbSk7DQo+IC0JCWJyZWFrOw0KPiAtCWRlZmF1bHQ6DQo+ICsJZWxzZQ0KPiAg
CQlyZXQgPSAtRU5PU1lTOw0KPiAtCQlicmVhazsNCj4gLQl9DQo+ICAJaWYgKHJldCkNCj4gIAkJ
Z290byBlcnI7DQo+IA0KPiBAQCAtMjk5OSw4ICsyOTYzLDcgQEAgaW50IHJkbWFfYWNjZXB0KHN0
cnVjdCByZG1hX2NtX2lkICppZCwgc3RydWN0DQo+IHJkbWFfY29ubl9wYXJhbSAqY29ubl9wYXJh
bSkNCj4gIAkJaWRfcHJpdi0+c3JxID0gY29ubl9wYXJhbS0+c3JxOw0KPiAgCX0NCj4gDQo+IC0J
c3dpdGNoIChyZG1hX25vZGVfZ2V0X3RyYW5zcG9ydChpZC0+ZGV2aWNlLT5ub2RlX3R5cGUpKSB7
DQo+IC0JY2FzZSBSRE1BX1RSQU5TUE9SVF9JQjoNCj4gKwlpZiAocmRtYV9pYl9tZ210KGlkLT5k
ZXZpY2UsIGlkLT5wb3J0X251bSkpIHsNCg0KY2FwX2liX2NtKCkNCg0KDQo+ICAJCWlmIChpZC0+
cXBfdHlwZSA9PSBJQl9RUFRfVUQpIHsNCj4gIAkJCWlmIChjb25uX3BhcmFtKQ0KPiAgCQkJCXJl
dCA9IGNtYV9zZW5kX3NpZHJfcmVwKGlkX3ByaXYsIElCX1NJRFJfU1VDQ0VTUywNCj4gQEAgLTMw
MTYsMTQgKzI5NzksMTAgQEAgaW50IHJkbWFfYWNjZXB0KHN0cnVjdCByZG1hX2NtX2lkICppZCwg
c3RydWN0DQo+IHJkbWFfY29ubl9wYXJhbSAqY29ubl9wYXJhbSkNCj4gIAkJCWVsc2UNCj4gIAkJ
CQlyZXQgPSBjbWFfcmVwX3JlY3YoaWRfcHJpdik7DQo+ICAJCX0NCj4gLQkJYnJlYWs7DQo+IC0J
Y2FzZSBSRE1BX1RSQU5TUE9SVF9JV0FSUDoNCj4gKwl9IGVsc2UgaWYgKHJkbWFfdHJhbnNwb3J0
X2l3YXJwKGlkLT5kZXZpY2UsIGlkLT5wb3J0X251bSkpDQo+ICAJCXJldCA9IGNtYV9hY2NlcHRf
aXcoaWRfcHJpdiwgY29ubl9wYXJhbSk7DQoNCklmIGNhcF9pYl9jbSgpIGlzIHVzZWQgaW4gdGhl
IHBsYWNlcyBtYXJrZWQgYWJvdmUsIG1heWJlIGFkZCBhIGNhcF9pd19jbSgpIGZvciB0aGUgZWxz
ZSBjb25kaXRpb25zLg0KDQoNCj4gLQkJYnJlYWs7DQo+IC0JZGVmYXVsdDoNCj4gKwllbHNlDQo+
ICAJCXJldCA9IC1FTk9TWVM7DQo+IC0JCWJyZWFrOw0KPiAtCX0NCj4gDQo+ICAJaWYgKHJldCkN
Cj4gIAkJZ290byByZWplY3Q7DQo+IEBAIC0zMDY3LDggKzMwMjYsNyBAQCBpbnQgcmRtYV9yZWpl
Y3Qoc3RydWN0IHJkbWFfY21faWQgKmlkLCBjb25zdCB2b2lkDQo+ICpwcml2YXRlX2RhdGEsDQo+
ICAJaWYgKCFpZF9wcml2LT5jbV9pZC5pYikNCj4gIAkJcmV0dXJuIC1FSU5WQUw7DQo+IA0KPiAt
CXN3aXRjaCAocmRtYV9ub2RlX2dldF90cmFuc3BvcnQoaWQtPmRldmljZS0+bm9kZV90eXBlKSkg
ew0KPiAtCWNhc2UgUkRNQV9UUkFOU1BPUlRfSUI6DQo+ICsJaWYgKHJkbWFfaWJfbWdtdChpZC0+
ZGV2aWNlLCBpZC0+cG9ydF9udW0pKSB7DQoNCmNhcF9pYl9jbSgpDQoNCg0KPiAgCQlpZiAoaWQt
PnFwX3R5cGUgPT0gSUJfUVBUX1VEKQ0KPiAgCQkJcmV0ID0gY21hX3NlbmRfc2lkcl9yZXAoaWRf
cHJpdiwgSUJfU0lEUl9SRUpFQ1QsIDAsDQo+ICAJCQkJCQlwcml2YXRlX2RhdGEsIHByaXZhdGVf
ZGF0YV9sZW4pOw0KPiBAQCAtMzA3NiwxNSArMzAzNCwxMSBAQCBpbnQgcmRtYV9yZWplY3Qoc3Ry
dWN0IHJkbWFfY21faWQgKmlkLCBjb25zdCB2b2lkDQo+ICpwcml2YXRlX2RhdGEsDQo+ICAJCQly
ZXQgPSBpYl9zZW5kX2NtX3JlaihpZF9wcml2LT5jbV9pZC5pYiwNCj4gIAkJCQkJICAgICBJQl9D
TV9SRUpfQ09OU1VNRVJfREVGSU5FRCwgTlVMTCwNCj4gIAkJCQkJICAgICAwLCBwcml2YXRlX2Rh
dGEsIHByaXZhdGVfZGF0YV9sZW4pOw0KPiAtCQlicmVhazsNCj4gLQljYXNlIFJETUFfVFJBTlNQ
T1JUX0lXQVJQOg0KPiArCX0gZWxzZSBpZiAocmRtYV90cmFuc3BvcnRfaXdhcnAoaWQtPmRldmlj
ZSwgaWQtPnBvcnRfbnVtKSkgew0KPiAgCQlyZXQgPSBpd19jbV9yZWplY3QoaWRfcHJpdi0+Y21f
aWQuaXcsDQo+ICAJCQkJICAgcHJpdmF0ZV9kYXRhLCBwcml2YXRlX2RhdGFfbGVuKTsNCj4gLQkJ
YnJlYWs7DQo+IC0JZGVmYXVsdDoNCj4gKwl9IGVsc2UNCj4gIAkJcmV0ID0gLUVOT1NZUzsNCj4g
LQkJYnJlYWs7DQo+IC0JfQ0KPiAgCXJldHVybiByZXQ7DQo+ICB9DQo+ICBFWFBPUlRfU1lNQk9M
KHJkbWFfcmVqZWN0KTsNCj4gQEAgLTMwOTgsMjIgKzMwNTIsMTcgQEAgaW50IHJkbWFfZGlzY29u
bmVjdChzdHJ1Y3QgcmRtYV9jbV9pZCAqaWQpDQo+ICAJaWYgKCFpZF9wcml2LT5jbV9pZC5pYikN
Cj4gIAkJcmV0dXJuIC1FSU5WQUw7DQo+IA0KPiAtCXN3aXRjaCAocmRtYV9ub2RlX2dldF90cmFu
c3BvcnQoaWQtPmRldmljZS0+bm9kZV90eXBlKSkgew0KPiAtCWNhc2UgUkRNQV9UUkFOU1BPUlRf
SUI6DQo+ICsJaWYgKHJkbWFfaWJfbWdtdChpZC0+ZGV2aWNlLCBpZC0+cG9ydF9udW0pKSB7DQo+
ICAJCXJldCA9IGNtYV9tb2RpZnlfcXBfZXJyKGlkX3ByaXYpOw0KPiAgCQlpZiAocmV0KQ0KPiAg
CQkJZ290byBvdXQ7DQo+ICAJCS8qIEluaXRpYXRlIG9yIHJlc3BvbmQgdG8gYSBkaXNjb25uZWN0
LiAqLw0KPiAgCQlpZiAoaWJfc2VuZF9jbV9kcmVxKGlkX3ByaXYtPmNtX2lkLmliLCBOVUxMLCAw
KSkNCj4gIAkJCWliX3NlbmRfY21fZHJlcChpZF9wcml2LT5jbV9pZC5pYiwgTlVMTCwgMCk7DQoN
CmNhcF9pYl9jbSgpDQoNCg0KPiAtCQlicmVhazsNCj4gLQljYXNlIFJETUFfVFJBTlNQT1JUX0lX
QVJQOg0KPiArCX0gZWxzZSBpZiAocmRtYV90cmFuc3BvcnRfaXdhcnAoaWQtPmRldmljZSwgaWQt
PnBvcnRfbnVtKSkgew0KPiAgCQlyZXQgPSBpd19jbV9kaXNjb25uZWN0KGlkX3ByaXYtPmNtX2lk
Lml3LCAwKTsNCj4gLQkJYnJlYWs7DQo+IC0JZGVmYXVsdDoNCj4gKwl9IGVsc2UNCj4gIAkJcmV0
ID0gLUVJTlZBTDsNCj4gLQkJYnJlYWs7DQo+IC0JfQ0KPiAgb3V0Og0KPiAgCXJldHVybiByZXQ7
DQo+ICB9DQo+IEBAIC0zMzU5LDI0ICszMzA4LDEzIEBAIGludCByZG1hX2pvaW5fbXVsdGljYXN0
KHN0cnVjdCByZG1hX2NtX2lkICppZCwNCj4gc3RydWN0IHNvY2thZGRyICphZGRyLA0KPiAgCWxp
c3RfYWRkKCZtYy0+bGlzdCwgJmlkX3ByaXYtPm1jX2xpc3QpOw0KPiAgCXNwaW5fdW5sb2NrKCZp
ZF9wcml2LT5sb2NrKTsNCj4gDQo+IC0Jc3dpdGNoIChyZG1hX25vZGVfZ2V0X3RyYW5zcG9ydChp
ZC0+ZGV2aWNlLT5ub2RlX3R5cGUpKSB7DQo+IC0JY2FzZSBSRE1BX1RSQU5TUE9SVF9JQjoNCj4g
LQkJc3dpdGNoIChyZG1hX3BvcnRfZ2V0X2xpbmtfbGF5ZXIoaWQtPmRldmljZSwgaWQtPnBvcnRf
bnVtKSkgew0KPiAtCQljYXNlIElCX0xJTktfTEFZRVJfSU5GSU5JQkFORDoNCj4gLQkJCXJldCA9
IGNtYV9qb2luX2liX211bHRpY2FzdChpZF9wcml2LCBtYyk7DQo+IC0JCQlicmVhazsNCj4gLQkJ
Y2FzZSBJQl9MSU5LX0xBWUVSX0VUSEVSTkVUOg0KPiAtCQkJa3JlZl9pbml0KCZtYy0+bWNyZWYp
Ow0KPiAtCQkJcmV0ID0gY21hX2lib2Vfam9pbl9tdWx0aWNhc3QoaWRfcHJpdiwgbWMpOw0KPiAt
CQkJYnJlYWs7DQo+IC0JCWRlZmF1bHQ6DQo+IC0JCQlyZXQgPSAtRUlOVkFMOw0KPiAtCQl9DQo+
IC0JCWJyZWFrOw0KPiAtCWRlZmF1bHQ6DQo+ICsJaWYgKHJkbWFfdHJhbnNwb3J0X2lib2UoaWQt
PmRldmljZSwgaWQtPnBvcnRfbnVtKSkgew0KPiArCQlrcmVmX2luaXQoJm1jLT5tY3JlZik7DQo+
ICsJCXJldCA9IGNtYV9pYm9lX2pvaW5fbXVsdGljYXN0KGlkX3ByaXYsIG1jKTsNCj4gKwl9IGVs
c2UgaWYgKHJkbWFfdHJhbnNwb3J0X2liKGlkLT5kZXZpY2UsIGlkLT5wb3J0X251bSkpDQo+ICsJ
CXJldCA9IGNtYV9qb2luX2liX211bHRpY2FzdChpZF9wcml2LCBtYyk7DQoNCmNhcF9pYl9tY2Fz
dCgpDQoNCg0KPiArCWVsc2UNCj4gIAkJcmV0ID0gLUVOT1NZUzsNCj4gLQkJYnJlYWs7DQo+IC0J
fQ0KPiANCj4gIAlpZiAocmV0KSB7DQo+ICAJCXNwaW5fbG9ja19pcnEoJmlkX3ByaXYtPmxvY2sp
Ow0KPiBAQCAtMzQwNCwxOSArMzM0MiwxNyBAQCB2b2lkIHJkbWFfbGVhdmVfbXVsdGljYXN0KHN0
cnVjdCByZG1hX2NtX2lkICppZCwNCj4gc3RydWN0IHNvY2thZGRyICphZGRyKQ0KPiAgCQkJCWli
X2RldGFjaF9tY2FzdChpZC0+cXAsDQo+ICAJCQkJCQkmbWMtPm11bHRpY2FzdC5pYi0+cmVjLm1n
aWQsDQo+ICAJCQkJCQliZTE2X3RvX2NwdShtYy0+bXVsdGljYXN0LmliLQ0KPiA+cmVjLm1saWQp
KTsNCj4gLQkJCWlmIChyZG1hX25vZGVfZ2V0X3RyYW5zcG9ydChpZF9wcml2LT5jbWFfZGV2LT5k
ZXZpY2UtDQo+ID5ub2RlX3R5cGUpID09IFJETUFfVFJBTlNQT1JUX0lCKSB7DQo+IC0JCQkJc3dp
dGNoIChyZG1hX3BvcnRfZ2V0X2xpbmtfbGF5ZXIoaWQtPmRldmljZSwgaWQtDQo+ID5wb3J0X251
bSkpIHsNCj4gLQkJCQljYXNlIElCX0xJTktfTEFZRVJfSU5GSU5JQkFORDoNCj4gLQkJCQkJaWJf
c2FfZnJlZV9tdWx0aWNhc3QobWMtPm11bHRpY2FzdC5pYik7DQo+IC0JCQkJCWtmcmVlKG1jKTsN
Cj4gLQkJCQkJYnJlYWs7DQo+IC0JCQkJY2FzZSBJQl9MSU5LX0xBWUVSX0VUSEVSTkVUOg0KPiAt
CQkJCQlrcmVmX3B1dCgmbWMtPm1jcmVmLCByZWxlYXNlX21jKTsNCj4gLQkJCQkJYnJlYWs7DQo+
IC0JCQkJZGVmYXVsdDoNCj4gLQkJCQkJYnJlYWs7DQo+IC0JCQkJfQ0KPiAtCQkJfQ0KPiArDQo+
ICsJCQkvKiBXaWxsIHRoaXMgaGFwcGVuPyAqLw0KPiArCQkJQlVHX09OKGlkX3ByaXYtPmNtYV9k
ZXYtPmRldmljZSAhPSBpZC0+ZGV2aWNlKTsNCg0KU2hvdWxkIG5vdCBoYXBwZW4NCg0KPiArDQo+
ICsJCQlpZiAocmRtYV90cmFuc3BvcnRfaWIoaWQtPmRldmljZSwgaWQtPnBvcnRfbnVtKSkgew0K
PiArCQkJCWliX3NhX2ZyZWVfbXVsdGljYXN0KG1jLT5tdWx0aWNhc3QuaWIpOw0KPiArCQkJCWtm
cmVlKG1jKTsNCg0KY2FwX2liX21jYXN0KCkNCg0KDQo+ICsJCQl9IGVsc2UgaWYgKHJkbWFfdHJh
bnNwb3J0X2lib2UoaWQtPmRldmljZSwNCj4gKwkJCQkJCSAgICAgICBpZC0+cG9ydF9udW0pKQ0K
PiArCQkJCWtyZWZfcHV0KCZtYy0+bWNyZWYsIHJlbGVhc2VfbWMpOw0KPiArDQo+ICAJCQlyZXR1
cm47DQo+ICAJCX0NCj4gIAl9DQo+IGRpZmYgLS1naXQgYS9kcml2ZXJzL2luZmluaWJhbmQvY29y
ZS91Y21hLmMNCj4gYi9kcml2ZXJzL2luZmluaWJhbmQvY29yZS91Y21hLmMNCj4gaW5kZXggNDVk
NjdlOS4uNDJjOWJmNiAxMDA2NDQNCj4gLS0tIGEvZHJpdmVycy9pbmZpbmliYW5kL2NvcmUvdWNt
YS5jDQo+ICsrKyBiL2RyaXZlcnMvaW5maW5pYmFuZC9jb3JlL3VjbWEuYw0KPiBAQCAtNzIyLDI2
ICs3MjIsMTMgQEAgc3RhdGljIHNzaXplX3QgdWNtYV9xdWVyeV9yb3V0ZShzdHJ1Y3QgdWNtYV9m
aWxlDQo+ICpmaWxlLA0KPiANCj4gIAlyZXNwLm5vZGVfZ3VpZCA9IChfX2ZvcmNlIF9fdTY0KSBj
dHgtPmNtX2lkLT5kZXZpY2UtPm5vZGVfZ3VpZDsNCj4gIAlyZXNwLnBvcnRfbnVtID0gY3R4LT5j
bV9pZC0+cG9ydF9udW07DQo+IC0Jc3dpdGNoIChyZG1hX25vZGVfZ2V0X3RyYW5zcG9ydChjdHgt
PmNtX2lkLT5kZXZpY2UtPm5vZGVfdHlwZSkpIHsNCj4gLQljYXNlIFJETUFfVFJBTlNQT1JUX0lC
Og0KPiAtCQlzd2l0Y2ggKHJkbWFfcG9ydF9nZXRfbGlua19sYXllcihjdHgtPmNtX2lkLT5kZXZp
Y2UsDQo+IC0JCQljdHgtPmNtX2lkLT5wb3J0X251bSkpIHsNCj4gLQkJY2FzZSBJQl9MSU5LX0xB
WUVSX0lORklOSUJBTkQ6DQo+IC0JCQl1Y21hX2NvcHlfaWJfcm91dGUoJnJlc3AsICZjdHgtPmNt
X2lkLT5yb3V0ZSk7DQo+IC0JCQlicmVhazsNCj4gLQkJY2FzZSBJQl9MSU5LX0xBWUVSX0VUSEVS
TkVUOg0KPiAtCQkJdWNtYV9jb3B5X2lib2Vfcm91dGUoJnJlc3AsICZjdHgtPmNtX2lkLT5yb3V0
ZSk7DQo+IC0JCQlicmVhazsNCj4gLQkJZGVmYXVsdDoNCj4gLQkJCWJyZWFrOw0KPiAtCQl9DQo+
IC0JCWJyZWFrOw0KPiAtCWNhc2UgUkRNQV9UUkFOU1BPUlRfSVdBUlA6DQo+ICsNCj4gKwlpZiAo
cmRtYV90cmFuc3BvcnRfaWIoY3R4LT5jbV9pZC0+ZGV2aWNlLCBjdHgtPmNtX2lkLT5wb3J0X251
bSkpDQo+ICsJCXVjbWFfY29weV9pYl9yb3V0ZSgmcmVzcCwgJmN0eC0+Y21faWQtPnJvdXRlKTsN
Cg0KY2FwX2liX3NhKCkNCg0KDQo+ICsJZWxzZSBpZiAocmRtYV90cmFuc3BvcnRfaWJvZShjdHgt
PmNtX2lkLT5kZXZpY2UsIGN0eC0+Y21faWQtDQo+ID5wb3J0X251bSkpDQo+ICsJCXVjbWFfY29w
eV9pYm9lX3JvdXRlKCZyZXNwLCAmY3R4LT5jbV9pZC0+cm91dGUpOw0KPiArCWVsc2UgaWYgKHJk
bWFfdHJhbnNwb3J0X2l3YXJwKGN0eC0+Y21faWQtPmRldmljZSwgY3R4LT5jbV9pZC0NCj4gPnBv
cnRfbnVtKSkNCj4gIAkJdWNtYV9jb3B5X2l3X3JvdXRlKCZyZXNwLCAmY3R4LT5jbV9pZC0+cm91
dGUpOw0KPiAtCQlicmVhazsNCj4gLQlkZWZhdWx0Og0KPiAtCQlicmVhazsNCj4gLQl9DQo+IA0K
PiAgb3V0Og0KPiAgCWlmIChjb3B5X3RvX3VzZXIoKHZvaWQgX191c2VyICopKHVuc2lnbmVkIGxv
bmcpY21kLnJlc3BvbnNlLA0KDQoNCi0gU2Vhbg0K
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Michael Wang April 8, 2015, 8:39 a.m. UTC | #3
On 04/07/2015 11:11 PM, Steve Wise wrote:
[snip]
>> @@ -1006,17 +997,14 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
>>  		mc = container_of(id_priv->mc_list.next,
>>  				  struct cma_multicast, list);
>>  		list_del(&mc->list);
>> -		switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
>> -		case IB_LINK_LAYER_INFINIBAND:
>> +		if (rdma_transport_ib(id_priv->cma_dev->device,
>> +				      id_priv->id.port_num)) {
>>  			ib_sa_free_multicast(mc->multicast.ib);
>>  			kfree(mc);
>>  			break;
>> -		case IB_LINK_LAYER_ETHERNET:
>> +		} else if (rdma_transport_ib(id_priv->cma_dev->device,
>> +					     id_priv->id.port_num))
>>  			kref_put(&mc->mcref, release_mc);
>> -			break;
>> -		default:
>> -			break;
>> -		}
>>  	}
>>  }
>>
> 
> Doesn't the above change result in:
> 
> if (rdma_transport_ib()) {
> } else if (rdma_transport_ib()) {
> }
>  

My bad here.. I guess 'else' is enough.

Regards,
Michael Wang

> ????
> 
>> @@ -1037,17 +1025,13 @@ void rdma_destroy_id(struct rdma_cm_id *id)
>>  	mutex_unlock(&id_priv->handler_mutex);
>>
>>  	if (id_priv->cma_dev) {
>> -		switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
>> -		case RDMA_TRANSPORT_IB:
>> +		if (rdma_ib_mgmt(id_priv->id.device, id_priv->id.port_num)) {
>>  			if (id_priv->cm_id.ib)
>>  				ib_destroy_cm_id(id_priv->cm_id.ib);
>> -			break;
>> -		case RDMA_TRANSPORT_IWARP:
>> +		} else if (rdma_transport_iwarp(id_priv->id.device,
>> +							id_priv->id.port_num)) {
>>  			if (id_priv->cm_id.iw)
>>  				iw_destroy_cm_id(id_priv->cm_id.iw);
>> -			break;
>> -		default:
>> -			break;
>>  		}
>>  		cma_leave_mc_groups(id_priv);
>>  		cma_release_dev(id_priv);
>> @@ -1966,26 +1950,14 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
>>  		return -EINVAL;
>>
>>  	atomic_inc(&id_priv->refcount);
>> -	switch (rdma_node_get_transport(id->device->node_type)) {
>> -	case RDMA_TRANSPORT_IB:
>> -		switch (rdma_port_get_link_layer(id->device, id->port_num)) {
>> -		case IB_LINK_LAYER_INFINIBAND:
>> -			ret = cma_resolve_ib_route(id_priv, timeout_ms);
>> -			break;
>> -		case IB_LINK_LAYER_ETHERNET:
>> -			ret = cma_resolve_iboe_route(id_priv);
>> -			break;
>> -		default:
>> -			ret = -ENOSYS;
>> -		}
>> -		break;
>> -	case RDMA_TRANSPORT_IWARP:
>> +	if (rdma_transport_ib(id->device, id->port_num))
>> +		ret = cma_resolve_ib_route(id_priv, timeout_ms);
>> +	else if (rdma_transport_iboe(id->device, id->port_num))
>> +		ret = cma_resolve_iboe_route(id_priv);
>> +	else if (rdma_transport_iwarp(id->device, id->port_num))
>>  		ret = cma_resolve_iw_route(id_priv, timeout_ms);
>> -		break;
>> -	default:
>> +	else
>>  		ret = -ENOSYS;
>> -		break;
>> -	}
>>  	if (ret)
>>  		goto err;
>>
>> @@ -2059,7 +2031,7 @@ port_found:
>>  		goto out;
>>
>>  	id_priv->id.route.addr.dev_addr.dev_type =
>> -		(rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
>> +		(rdma_transport_ib(cma_dev->device, p)) ?
>>  		ARPHRD_INFINIBAND : ARPHRD_ETHER;
>>
>>  	rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
>> @@ -2536,18 +2508,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
>>
>>  	id_priv->backlog = backlog;
>>  	if (id->device) {
>> -		switch (rdma_node_get_transport(id->device->node_type)) {
>> -		case RDMA_TRANSPORT_IB:
>> +		if (rdma_ib_mgmt(id->device, id->port_num)) {
>>  			ret = cma_ib_listen(id_priv);
>>  			if (ret)
>>  				goto err;
>> -			break;
>> -		case RDMA_TRANSPORT_IWARP:
>> +		} else if (rdma_transport_iwarp(id->device, id->port_num)) {
>>  			ret = cma_iw_listen(id_priv, backlog);
>>  			if (ret)
>>  				goto err;
>> -			break;
>> -		default:
>> +		} else {
>>  			ret = -ENOSYS;
>>  			goto err;
>>  		}
>> @@ -2883,20 +2852,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
>>  		id_priv->srq = conn_param->srq;
>>  	}
>>
>> -	switch (rdma_node_get_transport(id->device->node_type)) {
>> -	case RDMA_TRANSPORT_IB:
>> +	if (rdma_ib_mgmt(id->device, id->port_num)) {
>>  		if (id->qp_type == IB_QPT_UD)
>>  			ret = cma_resolve_ib_udp(id_priv, conn_param);
>>  		else
>>  			ret = cma_connect_ib(id_priv, conn_param);
>> -		break;
>> -	case RDMA_TRANSPORT_IWARP:
>> +	} else if (rdma_transport_iwarp(id->device, id->port_num))
>>  		ret = cma_connect_iw(id_priv, conn_param);
>> -		break;
>> -	default:
>> +	else
>>  		ret = -ENOSYS;
>> -		break;
>> -	}
>>  	if (ret)
>>  		goto err;
>>
>> @@ -2999,8 +2963,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
>>  		id_priv->srq = conn_param->srq;
>>  	}
>>
>> -	switch (rdma_node_get_transport(id->device->node_type)) {
>> -	case RDMA_TRANSPORT_IB:
>> +	if (rdma_ib_mgmt(id->device, id->port_num)) {
>>  		if (id->qp_type == IB_QPT_UD) {
>>  			if (conn_param)
>>  				ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
>> @@ -3016,14 +2979,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
>>  			else
>>  				ret = cma_rep_recv(id_priv);
>>  		}
>> -		break;
>> -	case RDMA_TRANSPORT_IWARP:
>> +	} else if (rdma_transport_iwarp(id->device, id->port_num))
>>  		ret = cma_accept_iw(id_priv, conn_param);
>> -		break;
>> -	default:
>> +	else
>>  		ret = -ENOSYS;
>> -		break;
>> -	}
>>
>>  	if (ret)
>>  		goto reject;
>> @@ -3067,8 +3026,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
>>  	if (!id_priv->cm_id.ib)
>>  		return -EINVAL;
>>
>> -	switch (rdma_node_get_transport(id->device->node_type)) {
>> -	case RDMA_TRANSPORT_IB:
>> +	if (rdma_ib_mgmt(id->device, id->port_num)) {
>>  		if (id->qp_type == IB_QPT_UD)
>>  			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
>>  						private_data, private_data_len);
>> @@ -3076,15 +3034,11 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
>>  			ret = ib_send_cm_rej(id_priv->cm_id.ib,
>>  					     IB_CM_REJ_CONSUMER_DEFINED, NULL,
>>  					     0, private_data, private_data_len);
>> -		break;
>> -	case RDMA_TRANSPORT_IWARP:
>> +	} else if (rdma_transport_iwarp(id->device, id->port_num)) {
>>  		ret = iw_cm_reject(id_priv->cm_id.iw,
>>  				   private_data, private_data_len);
>> -		break;
>> -	default:
>> +	} else
>>  		ret = -ENOSYS;
>> -		break;
>> -	}
>>  	return ret;
>>  }
>>  EXPORT_SYMBOL(rdma_reject);
>> @@ -3098,22 +3052,17 @@ int rdma_disconnect(struct rdma_cm_id *id)
>>  	if (!id_priv->cm_id.ib)
>>  		return -EINVAL;
>>
>> -	switch (rdma_node_get_transport(id->device->node_type)) {
>> -	case RDMA_TRANSPORT_IB:
>> +	if (rdma_ib_mgmt(id->device, id->port_num)) {
>>  		ret = cma_modify_qp_err(id_priv);
>>  		if (ret)
>>  			goto out;
>>  		/* Initiate or respond to a disconnect. */
>>  		if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
>>  			ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
>> -		break;
>> -	case RDMA_TRANSPORT_IWARP:
>> +	} else if (rdma_transport_iwarp(id->device, id->port_num)) {
>>  		ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
>> -		break;
>> -	default:
>> +	} else
>>  		ret = -EINVAL;
>> -		break;
>> -	}
>>  out:
>>  	return ret;
>>  }
>> @@ -3359,24 +3308,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
>>  	list_add(&mc->list, &id_priv->mc_list);
>>  	spin_unlock(&id_priv->lock);
>>
>> -	switch (rdma_node_get_transport(id->device->node_type)) {
>> -	case RDMA_TRANSPORT_IB:
>> -		switch (rdma_port_get_link_layer(id->device, id->port_num)) {
>> -		case IB_LINK_LAYER_INFINIBAND:
>> -			ret = cma_join_ib_multicast(id_priv, mc);
>> -			break;
>> -		case IB_LINK_LAYER_ETHERNET:
>> -			kref_init(&mc->mcref);
>> -			ret = cma_iboe_join_multicast(id_priv, mc);
>> -			break;
>> -		default:
>> -			ret = -EINVAL;
>> -		}
>> -		break;
>> -	default:
>> +	if (rdma_transport_iboe(id->device, id->port_num)) {
>> +		kref_init(&mc->mcref);
>> +		ret = cma_iboe_join_multicast(id_priv, mc);
>> +	} else if (rdma_transport_ib(id->device, id->port_num))
>> +		ret = cma_join_ib_multicast(id_priv, mc);
>> +	else
>>  		ret = -ENOSYS;
>> -		break;
>> -	}
>>
>>  	if (ret) {
>>  		spin_lock_irq(&id_priv->lock);
>> @@ -3404,19 +3342,17 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
>>  				ib_detach_mcast(id->qp,
>>  						&mc->multicast.ib->rec.mgid,
>>  						be16_to_cpu(mc->multicast.ib->rec.mlid));
>> -			if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
>> -				switch (rdma_port_get_link_layer(id->device, id->port_num)) {
>> -				case IB_LINK_LAYER_INFINIBAND:
>> -					ib_sa_free_multicast(mc->multicast.ib);
>> -					kfree(mc);
>> -					break;
>> -				case IB_LINK_LAYER_ETHERNET:
>> -					kref_put(&mc->mcref, release_mc);
>> -					break;
>> -				default:
>> -					break;
>> -				}
>> -			}
>> +
>> +			/* Will this happen? */
>> +			BUG_ON(id_priv->cma_dev->device != id->device);
>> +
>> +			if (rdma_transport_ib(id->device, id->port_num)) {
>> +				ib_sa_free_multicast(mc->multicast.ib);
>> +				kfree(mc);
>> +			} else if (rdma_transport_iboe(id->device,
>> +						       id->port_num))
>> +				kref_put(&mc->mcref, release_mc);
>> +
>>  			return;
>>  		}
>>  	}
>> diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
>> index 45d67e9..42c9bf6 100644
>> --- a/drivers/infiniband/core/ucma.c
>> +++ b/drivers/infiniband/core/ucma.c
>> @@ -722,26 +722,13 @@ static ssize_t ucma_query_route(struct ucma_file *file,
>>
>>  	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
>>  	resp.port_num = ctx->cm_id->port_num;
>> -	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
>> -	case RDMA_TRANSPORT_IB:
>> -		switch (rdma_port_get_link_layer(ctx->cm_id->device,
>> -			ctx->cm_id->port_num)) {
>> -		case IB_LINK_LAYER_INFINIBAND:
>> -			ucma_copy_ib_route(&resp, &ctx->cm_id->route);
>> -			break;
>> -		case IB_LINK_LAYER_ETHERNET:
>> -			ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
>> -			break;
>> -		default:
>> -			break;
>> -		}
>> -		break;
>> -	case RDMA_TRANSPORT_IWARP:
>> +
>> +	if (rdma_transport_ib(ctx->cm_id->device, ctx->cm_id->port_num))
>> +		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
>> +	else if (rdma_transport_iboe(ctx->cm_id->device, ctx->cm_id->port_num))
>> +		ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
>> +	else if (rdma_transport_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
>>  		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
>> -		break;
>> -	default:
>> -		break;
>> -	}
>>
>>  out:
>>  	if (copy_to_user((void __user *)(unsigned long)cmd.response,
>> --
>> 2.1.0
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Michael Wang April 8, 2015, 9:37 a.m. UTC | #4
Hi, Sean

Thanks for the review :-) cma is the most tough part during
reform, I really need some guide in here.


On 04/07/2015 11:36 PM, Hefty, Sean wrote:
>> diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
>> index d8a8ea7..c23f483 100644
>> --- a/drivers/infiniband/core/cma.c
>> +++ b/drivers/infiniband/core/cma.c
>> @@ -435,10 +435,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private
>> *id_priv)
>>  	pkey = ntohs(addr->sib_pkey);
>>
>>  	list_for_each_entry(cur_dev, &dev_list, list) {
>> -		if (rdma_node_get_transport(cur_dev->device->node_type) !=
>> RDMA_TRANSPORT_IB)
>> -			continue;
>> -
>>  		for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
>> +			if (!rdma_ib_mgmt(cur_dev->device, p))
>> +				continue;
> 
> This check wants to be something like is_af_ib_supported().  Checking for IB transport may actually be better than checking for IB management.  I don't know if IBoE/RoCE devices support AF_IB.

The wrapper make sense, but do we have the guarantee that IBoE port won't
be used for AF_IB address? I just can't locate the place we filtered it out...

> 
[snip]
>> -	    == IB_LINK_LAYER_ETHERNET) {
>> +	/* Will this happen? */
>> +	BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
> 
> This shouldn't happen.  The BUG_ON looks okay.

Got it :-)

> 
> 
>> +	if (rdma_transport_iboe(id_priv->id.device, id_priv->id.port_num)) {
>>  		ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
>>
>>  		if (ret)
>> @@ -700,8 +700,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private
>> *id_priv,
>>  	int ret;
>>  	u16 pkey;
>>
>> -	if (rdma_port_get_link_layer(id_priv->id.device, id_priv-
>>> id.port_num) ==
>> -	    IB_LINK_LAYER_INFINIBAND)
>> +	if (rdma_transport_ib(id_priv->id.device, id_priv->id.port_num))
>>  		pkey = ib_addr_get_pkey(dev_addr);
>>  	else
>>  		pkey = 0xffff;
> 
> Check here should be against the link layer, not transport.

I guess the name confusing us again... what if use rdma_tech_ib() here?
it's the only tech using IB link layers, others are all ETH.

> 
> 
>> @@ -735,8 +734,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct
[snip]
>>
>>  static void cma_cancel_route(struct rdma_id_private *id_priv)
>>  {
>> -	switch (rdma_port_get_link_layer(id_priv->id.device, id_priv-
>>> id.port_num)) {
>> -	case IB_LINK_LAYER_INFINIBAND:
>> +	if (rdma_transport_ib(id_priv->id.device, id_priv->id.port_num)) {
> 
> The check should be cap_ib_sa()

Got it, will be in next version :-)

All the mcast/sa suggestion below will be applied too.

>
[snip]
>>
>>  	id_priv->id.route.addr.dev_addr.dev_type =
>> -		(rdma_port_get_link_layer(cma_dev->device, p) ==
>> IB_LINK_LAYER_INFINIBAND) ?
>> +		(rdma_transport_ib(cma_dev->device, p)) ?
>>  		ARPHRD_INFINIBAND : ARPHRD_ETHER;
> 
> This wants the link layer, or maybe use cap_ipoib.

Is this related with ipoib only?

> 
> 
>>
>>  	rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
>> @@ -2536,18 +2508,15 @@ int rdma_listen(struct rdma_cm_id *id, int
>> backlog)
>>
>>  	id_priv->backlog = backlog;
>>  	if (id->device) {
>> -		switch (rdma_node_get_transport(id->device->node_type)) {
>> -		case RDMA_TRANSPORT_IB:
>> +		if (rdma_ib_mgmt(id->device, id->port_num)) {
> 
> Want cap_ib_cm()

Will be in next version :-) and the other cap_ib_cm() suggestion too.

> 
> 
>>  			ret = cma_ib_listen(id_priv);
[snip]
>> @@ -3016,14 +2979,10 @@ int rdma_accept(struct rdma_cm_id *id, struct
>> rdma_conn_param *conn_param)
>>  			else
>>  				ret = cma_rep_recv(id_priv);
>>  		}
>> -		break;
>> -	case RDMA_TRANSPORT_IWARP:
>> +	} else if (rdma_transport_iwarp(id->device, id->port_num))
>>  		ret = cma_accept_iw(id_priv, conn_param);
> 
> If cap_ib_cm() is used in the places marked above, maybe add a cap_iw_cm() for the else conditions.

Sounds good, will be in next version :-)

Regards,
Michael Wang

> 
> 
>> -		break;
>> -	default:
>> +	else
>>  		ret = -ENOSYS;
>> -		break;
>> -	}
>>
>>  	if (ret)
>>  		goto reject;
>> @@ -3067,8 +3026,7 @@ int rdma_reject(struct rdma_cm_id *id, const void
>> *private_data,
>>  	if (!id_priv->cm_id.ib)
>>  		return -EINVAL;
>>
>> -	switch (rdma_node_get_transport(id->device->node_type)) {
>> -	case RDMA_TRANSPORT_IB:
>> +	if (rdma_ib_mgmt(id->device, id->port_num)) {
> 
> cap_ib_cm()
> 
> 
>>  		if (id->qp_type == IB_QPT_UD)
>>  			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
>>  						private_data, private_data_len);
>> @@ -3076,15 +3034,11 @@ int rdma_reject(struct rdma_cm_id *id, const void
>> *private_data,
>>  			ret = ib_send_cm_rej(id_priv->cm_id.ib,
>>  					     IB_CM_REJ_CONSUMER_DEFINED, NULL,
>>  					     0, private_data, private_data_len);
>> -		break;
>> -	case RDMA_TRANSPORT_IWARP:
>> +	} else if (rdma_transport_iwarp(id->device, id->port_num)) {
>>  		ret = iw_cm_reject(id_priv->cm_id.iw,
>>  				   private_data, private_data_len);
>> -		break;
>> -	default:
>> +	} else
>>  		ret = -ENOSYS;
>> -		break;
>> -	}
>>  	return ret;
>>  }
>>  EXPORT_SYMBOL(rdma_reject);
>> @@ -3098,22 +3052,17 @@ int rdma_disconnect(struct rdma_cm_id *id)
>>  	if (!id_priv->cm_id.ib)
>>  		return -EINVAL;
>>
>> -	switch (rdma_node_get_transport(id->device->node_type)) {
>> -	case RDMA_TRANSPORT_IB:
>> +	if (rdma_ib_mgmt(id->device, id->port_num)) {
>>  		ret = cma_modify_qp_err(id_priv);
>>  		if (ret)
>>  			goto out;
>>  		/* Initiate or respond to a disconnect. */
>>  		if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
>>  			ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
> 
> cap_ib_cm()
> 
> 
>> -		break;
>> -	case RDMA_TRANSPORT_IWARP:
>> +	} else if (rdma_transport_iwarp(id->device, id->port_num)) {
>>  		ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
>> -		break;
>> -	default:
>> +	} else
>>  		ret = -EINVAL;
>> -		break;
>> -	}
>>  out:
>>  	return ret;
>>  }
>> @@ -3359,24 +3308,13 @@ int rdma_join_multicast(struct rdma_cm_id *id,
>> struct sockaddr *addr,
>>  	list_add(&mc->list, &id_priv->mc_list);
>>  	spin_unlock(&id_priv->lock);
>>
>> -	switch (rdma_node_get_transport(id->device->node_type)) {
>> -	case RDMA_TRANSPORT_IB:
>> -		switch (rdma_port_get_link_layer(id->device, id->port_num)) {
>> -		case IB_LINK_LAYER_INFINIBAND:
>> -			ret = cma_join_ib_multicast(id_priv, mc);
>> -			break;
>> -		case IB_LINK_LAYER_ETHERNET:
>> -			kref_init(&mc->mcref);
>> -			ret = cma_iboe_join_multicast(id_priv, mc);
>> -			break;
>> -		default:
>> -			ret = -EINVAL;
>> -		}
>> -		break;
>> -	default:
>> +	if (rdma_transport_iboe(id->device, id->port_num)) {
>> +		kref_init(&mc->mcref);
>> +		ret = cma_iboe_join_multicast(id_priv, mc);
>> +	} else if (rdma_transport_ib(id->device, id->port_num))
>> +		ret = cma_join_ib_multicast(id_priv, mc);
> 
> cap_ib_mcast()
> 
> 
>> +	else
>>  		ret = -ENOSYS;
>> -		break;
>> -	}
>>
>>  	if (ret) {
>>  		spin_lock_irq(&id_priv->lock);
>> @@ -3404,19 +3342,17 @@ void rdma_leave_multicast(struct rdma_cm_id *id,
>> struct sockaddr *addr)
>>  				ib_detach_mcast(id->qp,
>>  						&mc->multicast.ib->rec.mgid,
>>  						be16_to_cpu(mc->multicast.ib-
>>> rec.mlid));
>> -			if (rdma_node_get_transport(id_priv->cma_dev->device-
>>> node_type) == RDMA_TRANSPORT_IB) {
>> -				switch (rdma_port_get_link_layer(id->device, id-
>>> port_num)) {
>> -				case IB_LINK_LAYER_INFINIBAND:
>> -					ib_sa_free_multicast(mc->multicast.ib);
>> -					kfree(mc);
>> -					break;
>> -				case IB_LINK_LAYER_ETHERNET:
>> -					kref_put(&mc->mcref, release_mc);
>> -					break;
>> -				default:
>> -					break;
>> -				}
>> -			}
>> +
>> +			/* Will this happen? */
>> +			BUG_ON(id_priv->cma_dev->device != id->device);
> 
> Should not happen
> 
>> +
>> +			if (rdma_transport_ib(id->device, id->port_num)) {
>> +				ib_sa_free_multicast(mc->multicast.ib);
>> +				kfree(mc);
> 
> cap_ib_mcast()
> 
> 
>> +			} else if (rdma_transport_iboe(id->device,
>> +						       id->port_num))
>> +				kref_put(&mc->mcref, release_mc);
>> +
>>  			return;
>>  		}
>>  	}
>> diff --git a/drivers/infiniband/core/ucma.c
>> b/drivers/infiniband/core/ucma.c
>> index 45d67e9..42c9bf6 100644
>> --- a/drivers/infiniband/core/ucma.c
>> +++ b/drivers/infiniband/core/ucma.c
>> @@ -722,26 +722,13 @@ static ssize_t ucma_query_route(struct ucma_file
>> *file,
>>
>>  	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
>>  	resp.port_num = ctx->cm_id->port_num;
>> -	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
>> -	case RDMA_TRANSPORT_IB:
>> -		switch (rdma_port_get_link_layer(ctx->cm_id->device,
>> -			ctx->cm_id->port_num)) {
>> -		case IB_LINK_LAYER_INFINIBAND:
>> -			ucma_copy_ib_route(&resp, &ctx->cm_id->route);
>> -			break;
>> -		case IB_LINK_LAYER_ETHERNET:
>> -			ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
>> -			break;
>> -		default:
>> -			break;
>> -		}
>> -		break;
>> -	case RDMA_TRANSPORT_IWARP:
>> +
>> +	if (rdma_transport_ib(ctx->cm_id->device, ctx->cm_id->port_num))
>> +		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
> 
> cap_ib_sa()
> 
> 
>> +	else if (rdma_transport_iboe(ctx->cm_id->device, ctx->cm_id-
>>> port_num))
>> +		ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
>> +	else if (rdma_transport_iwarp(ctx->cm_id->device, ctx->cm_id-
>>> port_num))
>>  		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
>> -		break;
>> -	default:
>> -		break;
>> -	}
>>
>>  out:
>>  	if (copy_to_user((void __user *)(unsigned long)cmd.response,
> 
> 
> - Sean
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Hefty, Sean April 8, 2015, 5:02 p.m. UTC | #5
> On 04/07/2015 11:36 PM, Hefty, Sean wrote:

> >> diff --git a/drivers/infiniband/core/cma.c

> b/drivers/infiniband/core/cma.c

> >> index d8a8ea7..c23f483 100644

> >> --- a/drivers/infiniband/core/cma.c

> >> +++ b/drivers/infiniband/core/cma.c

> >> @@ -435,10 +435,10 @@ static int cma_resolve_ib_dev(struct

> rdma_id_private

> >> *id_priv)

> >>  	pkey = ntohs(addr->sib_pkey);

> >>

> >>  	list_for_each_entry(cur_dev, &dev_list, list) {

> >> -		if (rdma_node_get_transport(cur_dev->device->node_type) !=

> >> RDMA_TRANSPORT_IB)

> >> -			continue;

> >> -

> >>  		for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {

> >> +			if (!rdma_ib_mgmt(cur_dev->device, p))

> >> +				continue;

> >

> > This check wants to be something like is_af_ib_supported().  Checking

> for IB transport may actually be better than checking for IB management.

> I don't know if IBoE/RoCE devices support AF_IB.

> 

> The wrapper make sense, but do we have the guarantee that IBoE port won't

> be used for AF_IB address? I just can't locate the place we filtered it

> out...


I can't think of a reason why IBoE wouldn't work with AF_IB, but I'm not sure if anyone has tested it.  The original check would have let IBoE through.  When I suggested checking for IB transport, I meant the actual transport protocol, which would have included both IB and IBoE.

> >> @@ -700,8 +700,7 @@ static int cma_ib_init_qp_attr(struct

> rdma_id_private

> >> *id_priv,

> >>  	int ret;

> >>  	u16 pkey;

> >>

> >> -	if (rdma_port_get_link_layer(id_priv->id.device, id_priv-

> >>> id.port_num) ==

> >> -	    IB_LINK_LAYER_INFINIBAND)

> >> +	if (rdma_transport_ib(id_priv->id.device, id_priv->id.port_num))

> >>  		pkey = ib_addr_get_pkey(dev_addr);

> >>  	else

> >>  		pkey = 0xffff;

> >

> > Check here should be against the link layer, not transport.

> 

> I guess the name confusing us again... what if use rdma_tech_ib() here?

> it's the only tech using IB link layers, others are all ETH.


Yes, that would work.

> >>  	id_priv->id.route.addr.dev_addr.dev_type =

> >> -		(rdma_port_get_link_layer(cma_dev->device, p) ==

> >> IB_LINK_LAYER_INFINIBAND) ?

> >> +		(rdma_transport_ib(cma_dev->device, p)) ?

> >>  		ARPHRD_INFINIBAND : ARPHRD_ETHER;

> >

> > This wants the link layer, or maybe use cap_ipoib.

> 

> Is this related with ipoib only?


ARPHDR_INFINIBAND is related to ipoib.  In your next update, maybe go with tech_ib.  I don't know the status of ipoib over iboe.
Michael Wang April 9, 2015, 8:05 a.m. UTC | #6
On 04/08/2015 07:02 PM, Hefty, Sean wrote:
[snip]
>>
>> The wrapper make sense, but do we have the guarantee that IBoE port won't
>> be used for AF_IB address? I just can't locate the place we filtered it
>> out...
> 
> I can't think of a reason why IBoE wouldn't work with AF_IB, but I'm not sure if anyone has tested it.  The original check would have let IBoE through.  When I suggested checking for IB transport, I meant the actual transport protocol, which would have included both IB and IBoE.

Got it :-)

> 
>>>> @@ -700,8 +700,7 @@ static int cma_ib_init_qp_attr(struct
[snip]
> 
>>>>  	id_priv->id.route.addr.dev_addr.dev_type =
>>>> -		(rdma_port_get_link_layer(cma_dev->device, p) ==
>>>> IB_LINK_LAYER_INFINIBAND) ?
>>>> +		(rdma_transport_ib(cma_dev->device, p)) ?
>>>>  		ARPHRD_INFINIBAND : ARPHRD_ETHER;
>>>
>>> This wants the link layer, or maybe use cap_ipoib.
>>
>> Is this related with ipoib only?
> 
> ARPHDR_INFINIBAND is related to ipoib.  In your next update, maybe go with tech_ib.  I don't know the status of ipoib over iboe.

Will be in next version :-)

Regards,
Michael Wang

> 
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d8a8ea7..c23f483 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -435,10 +435,10 @@  static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
 	pkey = ntohs(addr->sib_pkey);
 
 	list_for_each_entry(cur_dev, &dev_list, list) {
-		if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
-			continue;
-
 		for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
+			if (!rdma_ib_mgmt(cur_dev->device, p))
+				continue;
+
 			if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
 				continue;
 
@@ -633,10 +633,10 @@  static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
 	if (ret)
 		goto out;
 
-	if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
-	    == RDMA_TRANSPORT_IB &&
-	    rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
-	    == IB_LINK_LAYER_ETHERNET) {
+	/* Will this happen? */
+	BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
+
+	if (rdma_transport_iboe(id_priv->id.device, id_priv->id.port_num)) {
 		ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
 
 		if (ret)
@@ -700,8 +700,7 @@  static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
 	int ret;
 	u16 pkey;
 
-	if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
-	    IB_LINK_LAYER_INFINIBAND)
+	if (rdma_transport_ib(id_priv->id.device, id_priv->id.port_num))
 		pkey = ib_addr_get_pkey(dev_addr);
 	else
 		pkey = 0xffff;
@@ -735,8 +734,7 @@  int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
 	int ret = 0;
 
 	id_priv = container_of(id, struct rdma_id_private, id);
-	switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
-	case RDMA_TRANSPORT_IB:
+	if (rdma_ib_mgmt(id_priv->id.device, id_priv->id.port_num)) {
 		if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
 			ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
 		else
@@ -745,19 +743,16 @@  int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
 
 		if (qp_attr->qp_state == IB_QPS_RTR)
 			qp_attr->rq_psn = id_priv->seq_num;
-		break;
-	case RDMA_TRANSPORT_IWARP:
+	} else if (rdma_transport_iwarp(id_priv->id.device,
+						id_priv->id.port_num)) {
 		if (!id_priv->cm_id.iw) {
 			qp_attr->qp_access_flags = 0;
 			*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
 		} else
 			ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
 						 qp_attr_mask);
-		break;
-	default:
+	} else
 		ret = -ENOSYS;
-		break;
-	}
 
 	return ret;
 }
@@ -928,13 +923,9 @@  static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
 
 static void cma_cancel_route(struct rdma_id_private *id_priv)
 {
-	switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
-	case IB_LINK_LAYER_INFINIBAND:
+	if (rdma_transport_ib(id_priv->id.device, id_priv->id.port_num)) {
 		if (id_priv->query)
 			ib_sa_cancel_query(id_priv->query_id, id_priv->query);
-		break;
-	default:
-		break;
 	}
 }
 
@@ -1006,17 +997,14 @@  static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
 		mc = container_of(id_priv->mc_list.next,
 				  struct cma_multicast, list);
 		list_del(&mc->list);
-		switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
-		case IB_LINK_LAYER_INFINIBAND:
+		if (rdma_transport_ib(id_priv->cma_dev->device,
+				      id_priv->id.port_num)) {
 			ib_sa_free_multicast(mc->multicast.ib);
 			kfree(mc);
 			break;
-		case IB_LINK_LAYER_ETHERNET:
+		} else if (rdma_transport_ib(id_priv->cma_dev->device,
+					     id_priv->id.port_num))
 			kref_put(&mc->mcref, release_mc);
-			break;
-		default:
-			break;
-		}
 	}
 }
 
@@ -1037,17 +1025,13 @@  void rdma_destroy_id(struct rdma_cm_id *id)
 	mutex_unlock(&id_priv->handler_mutex);
 
 	if (id_priv->cma_dev) {
-		switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
-		case RDMA_TRANSPORT_IB:
+		if (rdma_ib_mgmt(id_priv->id.device, id_priv->id.port_num)) {
 			if (id_priv->cm_id.ib)
 				ib_destroy_cm_id(id_priv->cm_id.ib);
-			break;
-		case RDMA_TRANSPORT_IWARP:
+		} else if (rdma_transport_iwarp(id_priv->id.device,
+							id_priv->id.port_num)) {
 			if (id_priv->cm_id.iw)
 				iw_destroy_cm_id(id_priv->cm_id.iw);
-			break;
-		default:
-			break;
 		}
 		cma_leave_mc_groups(id_priv);
 		cma_release_dev(id_priv);
@@ -1966,26 +1950,14 @@  int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
 		return -EINVAL;
 
 	atomic_inc(&id_priv->refcount);
-	switch (rdma_node_get_transport(id->device->node_type)) {
-	case RDMA_TRANSPORT_IB:
-		switch (rdma_port_get_link_layer(id->device, id->port_num)) {
-		case IB_LINK_LAYER_INFINIBAND:
-			ret = cma_resolve_ib_route(id_priv, timeout_ms);
-			break;
-		case IB_LINK_LAYER_ETHERNET:
-			ret = cma_resolve_iboe_route(id_priv);
-			break;
-		default:
-			ret = -ENOSYS;
-		}
-		break;
-	case RDMA_TRANSPORT_IWARP:
+	if (rdma_transport_ib(id->device, id->port_num))
+		ret = cma_resolve_ib_route(id_priv, timeout_ms);
+	else if (rdma_transport_iboe(id->device, id->port_num))
+		ret = cma_resolve_iboe_route(id_priv);
+	else if (rdma_transport_iwarp(id->device, id->port_num))
 		ret = cma_resolve_iw_route(id_priv, timeout_ms);
-		break;
-	default:
+	else
 		ret = -ENOSYS;
-		break;
-	}
 	if (ret)
 		goto err;
 
@@ -2059,7 +2031,7 @@  port_found:
 		goto out;
 
 	id_priv->id.route.addr.dev_addr.dev_type =
-		(rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
+		(rdma_transport_ib(cma_dev->device, p)) ?
 		ARPHRD_INFINIBAND : ARPHRD_ETHER;
 
 	rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
@@ -2536,18 +2508,15 @@  int rdma_listen(struct rdma_cm_id *id, int backlog)
 
 	id_priv->backlog = backlog;
 	if (id->device) {
-		switch (rdma_node_get_transport(id->device->node_type)) {
-		case RDMA_TRANSPORT_IB:
+		if (rdma_ib_mgmt(id->device, id->port_num)) {
 			ret = cma_ib_listen(id_priv);
 			if (ret)
 				goto err;
-			break;
-		case RDMA_TRANSPORT_IWARP:
+		} else if (rdma_transport_iwarp(id->device, id->port_num)) {
 			ret = cma_iw_listen(id_priv, backlog);
 			if (ret)
 				goto err;
-			break;
-		default:
+		} else {
 			ret = -ENOSYS;
 			goto err;
 		}
@@ -2883,20 +2852,15 @@  int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
 		id_priv->srq = conn_param->srq;
 	}
 
-	switch (rdma_node_get_transport(id->device->node_type)) {
-	case RDMA_TRANSPORT_IB:
+	if (rdma_ib_mgmt(id->device, id->port_num)) {
 		if (id->qp_type == IB_QPT_UD)
 			ret = cma_resolve_ib_udp(id_priv, conn_param);
 		else
 			ret = cma_connect_ib(id_priv, conn_param);
-		break;
-	case RDMA_TRANSPORT_IWARP:
+	} else if (rdma_transport_iwarp(id->device, id->port_num))
 		ret = cma_connect_iw(id_priv, conn_param);
-		break;
-	default:
+	else
 		ret = -ENOSYS;
-		break;
-	}
 	if (ret)
 		goto err;
 
@@ -2999,8 +2963,7 @@  int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
 		id_priv->srq = conn_param->srq;
 	}
 
-	switch (rdma_node_get_transport(id->device->node_type)) {
-	case RDMA_TRANSPORT_IB:
+	if (rdma_ib_mgmt(id->device, id->port_num)) {
 		if (id->qp_type == IB_QPT_UD) {
 			if (conn_param)
 				ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
@@ -3016,14 +2979,10 @@  int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
 			else
 				ret = cma_rep_recv(id_priv);
 		}
-		break;
-	case RDMA_TRANSPORT_IWARP:
+	} else if (rdma_transport_iwarp(id->device, id->port_num))
 		ret = cma_accept_iw(id_priv, conn_param);
-		break;
-	default:
+	else
 		ret = -ENOSYS;
-		break;
-	}
 
 	if (ret)
 		goto reject;
@@ -3067,8 +3026,7 @@  int rdma_reject(struct rdma_cm_id *id, const void *private_data,
 	if (!id_priv->cm_id.ib)
 		return -EINVAL;
 
-	switch (rdma_node_get_transport(id->device->node_type)) {
-	case RDMA_TRANSPORT_IB:
+	if (rdma_ib_mgmt(id->device, id->port_num)) {
 		if (id->qp_type == IB_QPT_UD)
 			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
 						private_data, private_data_len);
@@ -3076,15 +3034,11 @@  int rdma_reject(struct rdma_cm_id *id, const void *private_data,
 			ret = ib_send_cm_rej(id_priv->cm_id.ib,
 					     IB_CM_REJ_CONSUMER_DEFINED, NULL,
 					     0, private_data, private_data_len);
-		break;
-	case RDMA_TRANSPORT_IWARP:
+	} else if (rdma_transport_iwarp(id->device, id->port_num)) {
 		ret = iw_cm_reject(id_priv->cm_id.iw,
 				   private_data, private_data_len);
-		break;
-	default:
+	} else
 		ret = -ENOSYS;
-		break;
-	}
 	return ret;
 }
 EXPORT_SYMBOL(rdma_reject);
@@ -3098,22 +3052,17 @@  int rdma_disconnect(struct rdma_cm_id *id)
 	if (!id_priv->cm_id.ib)
 		return -EINVAL;
 
-	switch (rdma_node_get_transport(id->device->node_type)) {
-	case RDMA_TRANSPORT_IB:
+	if (rdma_ib_mgmt(id->device, id->port_num)) {
 		ret = cma_modify_qp_err(id_priv);
 		if (ret)
 			goto out;
 		/* Initiate or respond to a disconnect. */
 		if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
 			ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
-		break;
-	case RDMA_TRANSPORT_IWARP:
+	} else if (rdma_transport_iwarp(id->device, id->port_num)) {
 		ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
-		break;
-	default:
+	} else
 		ret = -EINVAL;
-		break;
-	}
 out:
 	return ret;
 }
@@ -3359,24 +3308,13 @@  int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
 	list_add(&mc->list, &id_priv->mc_list);
 	spin_unlock(&id_priv->lock);
 
-	switch (rdma_node_get_transport(id->device->node_type)) {
-	case RDMA_TRANSPORT_IB:
-		switch (rdma_port_get_link_layer(id->device, id->port_num)) {
-		case IB_LINK_LAYER_INFINIBAND:
-			ret = cma_join_ib_multicast(id_priv, mc);
-			break;
-		case IB_LINK_LAYER_ETHERNET:
-			kref_init(&mc->mcref);
-			ret = cma_iboe_join_multicast(id_priv, mc);
-			break;
-		default:
-			ret = -EINVAL;
-		}
-		break;
-	default:
+	if (rdma_transport_iboe(id->device, id->port_num)) {
+		kref_init(&mc->mcref);
+		ret = cma_iboe_join_multicast(id_priv, mc);
+	} else if (rdma_transport_ib(id->device, id->port_num))
+		ret = cma_join_ib_multicast(id_priv, mc);
+	else
 		ret = -ENOSYS;
-		break;
-	}
 
 	if (ret) {
 		spin_lock_irq(&id_priv->lock);
@@ -3404,19 +3342,17 @@  void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
 				ib_detach_mcast(id->qp,
 						&mc->multicast.ib->rec.mgid,
 						be16_to_cpu(mc->multicast.ib->rec.mlid));
-			if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
-				switch (rdma_port_get_link_layer(id->device, id->port_num)) {
-				case IB_LINK_LAYER_INFINIBAND:
-					ib_sa_free_multicast(mc->multicast.ib);
-					kfree(mc);
-					break;
-				case IB_LINK_LAYER_ETHERNET:
-					kref_put(&mc->mcref, release_mc);
-					break;
-				default:
-					break;
-				}
-			}
+
+			/* Will this happen? */
+			BUG_ON(id_priv->cma_dev->device != id->device);
+
+			if (rdma_transport_ib(id->device, id->port_num)) {
+				ib_sa_free_multicast(mc->multicast.ib);
+				kfree(mc);
+			} else if (rdma_transport_iboe(id->device,
+						       id->port_num))
+				kref_put(&mc->mcref, release_mc);
+
 			return;
 		}
 	}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 45d67e9..42c9bf6 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -722,26 +722,13 @@  static ssize_t ucma_query_route(struct ucma_file *file,
 
 	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
 	resp.port_num = ctx->cm_id->port_num;
-	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
-	case RDMA_TRANSPORT_IB:
-		switch (rdma_port_get_link_layer(ctx->cm_id->device,
-			ctx->cm_id->port_num)) {
-		case IB_LINK_LAYER_INFINIBAND:
-			ucma_copy_ib_route(&resp, &ctx->cm_id->route);
-			break;
-		case IB_LINK_LAYER_ETHERNET:
-			ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
-			break;
-		default:
-			break;
-		}
-		break;
-	case RDMA_TRANSPORT_IWARP:
+
+	if (rdma_transport_ib(ctx->cm_id->device, ctx->cm_id->port_num))
+		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
+	else if (rdma_transport_iboe(ctx->cm_id->device, ctx->cm_id->port_num))
+		ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
+	else if (rdma_transport_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
 		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
-		break;
-	default:
-		break;
-	}
 
 out:
 	if (copy_to_user((void __user *)(unsigned long)cmd.response,