From f890782127b7febb11f90a1733a8220bcc944c47 Mon Sep 17 00:00:00 2001
From: Jack Wang <jinpu.wang@profitbricks.com>
Date: Tue, 6 Dec 2016 09:01:04 +0100
Subject: [PATCH] cma: resolve to first active ib_port
When resolve addr if we don't give src addr, cma core will try to resolve to
ib device on itself, current logic is only check if it has same
subnet_prefix, which is not enough if we use default well known gid,
we should also check if port is active.
v2: cache port_active state in cma_add_one, also register event_handler
to track port events.
Signed-off-by: Jack Wang <jinpu.wang@profitbricks.com>
---
drivers/infiniband/core/cma.c | 42 +++++++++++++++++++++++++++++++++++++++++-
1 file changed, 41 insertions(+), 1 deletion(-)
@@ -151,10 +151,12 @@ static struct idr *cma_pernet_idr(struct net *net, enum rdma_port_space ps)
struct cma_device {
struct list_head list;
struct ib_device *device;
+ struct ib_event_handler event_handler;
struct completion comp;
atomic_t refcount;
struct list_head id_list;
enum ib_gid_type *default_gid_type;
+ int *port_active;
};
struct rdma_bind_list {
@@ -692,7 +694,8 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
}
if (!cma_dev && (gid.global.subnet_prefix ==
- dgid->global.subnet_prefix)) {
+ dgid->global.subnet_prefix) &&
+ cur_dev->port_active[p]) {
cma_dev = cur_dev;
sgid = gid;
id_priv->id.port_num = p;
@@ -4170,17 +4173,43 @@ static struct notifier_block cma_nb = {
.notifier_call = cma_netdev_callback
};
+static void cma_event_handler(struct ib_event_handler *handler,
+ struct ib_event *event)
+{
+ struct cma_device *cma_dev =
+ container_of(handler, typeof(*cma_dev), event_handler);
+ u8 port = event->element.port_num;
+
+ /* we're only interested in port Up/Down events */
+ if ( event->event != IB_EVENT_PORT_ACTIVE &&
+ event->event != IB_EVENT_PORT_ERR)
+ return;
+
+ /* cache the state of the port */
+ if (event->event == IB_EVENT_PORT_ACTIVE)
+ cma_dev->port_active[port] = 1;
+ else
+ cma_dev->port_active[port] = 0;
+}
+
static void cma_add_one(struct ib_device *device)
{
struct cma_device *cma_dev;
struct rdma_id_private *id_priv;
unsigned int i;
unsigned long supported_gids = 0;
+ struct ib_port_attr port_attr;
cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
if (!cma_dev)
return;
+ cma_dev->port_active = kmalloc(sizeof (*cma_dev->port_active) *
+ (device->phys_port_cnt + 1), GFP_KERNEL);
+ if (!cma_dev->port_active) {
+ kfree(cma_dev);
+ return;
+ }
cma_dev->device = device;
cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
sizeof(*cma_dev->default_gid_type),
@@ -4194,12 +4223,21 @@ static void cma_add_one(struct ib_device *device)
WARN_ON(!supported_gids);
cma_dev->default_gid_type[i - rdma_start_port(device)] =
find_first_bit(&supported_gids, BITS_PER_LONG);
+ if (!ib_query_port(cma_dev->device, i, &port_attr)) {
+ cma_dev->port_active[i] =
+ port_attr.state == IB_PORT_ACTIVE ? 1 : 0;
+ } else
+ cma_dev->port_active[i] = 0;
}
init_completion(&cma_dev->comp);
atomic_set(&cma_dev->refcount, 1);
INIT_LIST_HEAD(&cma_dev->id_list);
ib_set_client_data(device, &cma_client, cma_dev);
+ INIT_IB_EVENT_HANDLER(&cma_dev->event_handler, device,
+ cma_event_handler);
+ if (ib_register_event_handler(&cma_dev->event_handler))
+ pr_warn("fail to register event handler\n");
mutex_lock(&lock);
list_add_tail(&cma_dev->list, &dev_list);
@@ -4269,12 +4307,14 @@ static void cma_remove_one(struct ib_device *device, void *client_data)
if (!cma_dev)
return;
+ ib_unregister_event_handler(&cma_dev->event_handler);
mutex_lock(&lock);
list_del(&cma_dev->list);
mutex_unlock(&lock);
cma_process_remove(cma_dev);
kfree(cma_dev->default_gid_type);
+ kfree(cma_dev->port_active);
kfree(cma_dev);
}
--
2.7.4