@@ -3680,7 +3680,8 @@ static void cm_add_one(struct ib_device *ib_device)
u8 i;
enum rdma_transport_type tt;
- if (!rdma_is_transport_supported(ib_device, RDMA_TRANSPORT_IB))
+ if (!rdma_is_transport_supported(ib_device, RDMA_TRANSPORT_IB) &&
+ !rdma_is_transport_supported(ib_device, RDMA_TRANSPORT_RDMAOE))
return;
cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
@@ -3702,7 +3703,7 @@ static void cm_add_one(struct ib_device *ib_device)
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
tt = rdma_port_get_transport(ib_device, i);
- if (tt != RDMA_TRANSPORT_IB)
+ if (tt != RDMA_TRANSPORT_IB && tt != RDMA_TRANSPORT_RDMAOE)
continue;
port = kzalloc(sizeof *port, GFP_KERNEL);
@@ -1241,13 +1241,19 @@ static void ib_ucm_add_one(struct ib_device *device)
{
struct ib_ucm_device *ucm_dev;
int i;
+ enum rdma_transport_type tt;
if (!device->alloc_ucontext)
return;
- for (i = 1; i <= device->phys_port_cnt; ++i)
- if (rdma_port_get_transport(device, i) != RDMA_TRANSPORT_IB)
- return;
+ for (i = 1; i <= device->phys_port_cnt; ++i) {
+ tt = rdma_port_get_transport(device, i);
+ if (tt == RDMA_TRANSPORT_IB || tt == RDMA_TRANSPORT_RDMAOE)
+ break;
+ }
+
+ if (i > device->phys_port_cnt)
+ return;
ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
if (!ucm_dev)