@@ -1342,7 +1342,7 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
IB_DEVICE_MEM_MGT_EXTENSIONS |
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC;
+ IB_DEVICE_RDMA_NETDEV_OPA;
rdi->dparms.props.page_size_cap = PAGE_SIZE;
rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
rdi->dparms.props.vendor_part_id = dd->pcidev->device;
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 - 2019 Intel Corporation.
+ * Copyright(c) 2016 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -525,15 +525,18 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
* @rdi: rvt device info structure
* @qpt: queue pair number table pointer
* @port_num: IB port number, 1 based, comes from core
+ * @exclude_prefix: prefix of special queue pair number being allocated
*
* Return: The queue pair number
*/
static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num)
+ enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
u32 ret;
+ u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
+ RVT_AIP_QPN_MAX : RVT_QPN_MAX;
if (rdi->driver_f.alloc_qpn)
return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
@@ -553,7 +556,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
}
qpn = qpt->last + qpt->incr;
- if (qpn >= RVT_QPN_MAX)
+ if (qpn >= max_qpn)
qpn = qpt->incr | ((qpt->last & 1) ^ 1);
/* offset carries bit 0 */
offset = qpn & RVT_BITS_PER_PAGE_MASK;
@@ -987,6 +990,9 @@ static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
{
struct rvt_qpn_map *map;
+ if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
+ qpn &= RVT_AIP_QP_SUFFIX;
+
map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
if (map->page)
clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
@@ -1074,13 +1080,15 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
void *priv = NULL;
size_t sqsize;
+ u8 exclude_prefix = 0;
if (!rdi)
return ERR_PTR(-EINVAL);
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
- init_attr->create_flags)
+ (init_attr->create_flags &&
+ init_attr->create_flags != IB_QP_CREATE_NETDEV_USE))
return ERR_PTR(-EINVAL);
/* Check receive queue parameters if no SRQ is specified. */
@@ -1199,14 +1207,20 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
goto bail_driver_priv;
}
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ exclude_prefix = RVT_AIP_QP_PREFIX;
+
err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
init_attr->qp_type,
- init_attr->port_num);
+ init_attr->port_num,
+ exclude_prefix);
if (err < 0) {
ret = ERR_PTR(err);
goto bail_rq_wq;
}
qp->ibqp.qp_num = err;
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
qp->port_num = init_attr->port_num;
rvt_init_qp(rdi, qp, init_attr->qp_type);
if (rdi->driver_f.qp_priv_init) {
@@ -206,6 +206,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
+ if (priv->hca_caps & IB_DEVICE_RDMA_NETDEV_OPA)
+ init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE;
+
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
pr_warn("%s: failed to create QP\n", ca->name);
@@ -305,7 +305,7 @@ enum ib_device_cap_flags {
IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
+ IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35),
/* The device supports padding incoming writes to cacheline. */
IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
@@ -1111,7 +1111,7 @@ enum ib_qp_create_flags {
IB_QP_CREATE_MANAGED_RECV = 1 << 4,
IB_QP_CREATE_NETIF_QP = 1 << 5,
IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
- /* FREE = 1 << 7, */
+ IB_QP_CREATE_NETDEV_USE = 1 << 7,
IB_QP_CREATE_SCATTER_FCS = 1 << 8,
IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
IB_QP_CREATE_SOURCE_QPN = 1 << 10,
@@ -1,7 +1,7 @@
#ifndef _OPA_VNIC_H
#define _OPA_VNIC_H
/*
- * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -132,7 +132,7 @@ struct opa_vnic_stats {
static inline bool rdma_cap_opa_vnic(struct ib_device *device)
{
return !!(device->attrs.device_cap_flags &
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC);
+ IB_DEVICE_RDMA_NETDEV_OPA);
}
#endif /* _OPA_VNIC_H */