diff mbox series

[for-next,v7,10/19] rdma_rxe: Add support for ibv_query_device_ex

Message ID 20201001174847.4268-11-rpearson@hpe.com (mailing list archive)
State Changes Requested
Headers show
Series rdma_rxe: API extensions | expand

Commit Message

Bob Pearson Oct. 1, 2020, 5:48 p.m. UTC
Add code to initialize new struct members in
ib_device_attr as place holders.

Signed-off-by: Bob Pearson <rpearson@hpe.com>
---
 drivers/infiniband/sw/rxe/rxe.c       | 101 ++++++++++++++++++--------
 drivers/infiniband/sw/rxe/rxe_verbs.c |   7 +-
 2 files changed, 75 insertions(+), 33 deletions(-)

Comments

Jason Gunthorpe Oct. 3, 2020, 11:21 p.m. UTC | #1
On Thu, Oct 01, 2020 at 12:48:38PM -0500, Bob Pearson wrote:
> Add code to initialize new struct members in
> ib_device_attr as place holders.
> 
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> ---
>  drivers/infiniband/sw/rxe/rxe.c       | 101 ++++++++++++++++++--------
>  drivers/infiniband/sw/rxe/rxe_verbs.c |   7 +-
>  2 files changed, 75 insertions(+), 33 deletions(-)

This series should eliminate this patch and notably change the others

https://patchwork.kernel.org/project/linux-rdma/list/?series=359361

Can you take a look that it works for this?

Thanks,
Jason
Bob Pearson Oct. 5, 2020, 2:42 p.m. UTC | #2
On 10/3/20 6:21 PM, Jason Gunthorpe wrote:
> On Thu, Oct 01, 2020 at 12:48:38PM -0500, Bob Pearson wrote:
>> Add code to initialize new struct members in
>> ib_device_attr as place holders.
>>
>> Signed-off-by: Bob Pearson <rpearson@hpe.com>
>> ---
>>  drivers/infiniband/sw/rxe/rxe.c       | 101 ++++++++++++++++++--------
>>  drivers/infiniband/sw/rxe/rxe_verbs.c |   7 +-
>>  2 files changed, 75 insertions(+), 33 deletions(-)
> 
> This series should eliminate this patch and notably change the others
> 
> https://patchwork.kernel.org/project/linux-rdma/list/?series=359361
> 
> Can you take a look that it works for this?
> 
> Thanks,
> Jason
> 

I'll take a look. Let me know if/when you take it into for-next. Should be easy to adjust for these.
Currently I am working to try to improve performance. I have replaced tasklets with work queues. Low
QP count performance is not changed much but it should help high QP count performance. At the moment I am
trying to optimize UD since it is a lot simpler. Once that is done I want to look at core affinity.

Bob
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 8e0f9c489cab..ecc61f960c58 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -40,40 +40,77 @@  void rxe_dealloc(struct ib_device *ib_dev)
 /* initialize rxe device parameters */
 static void rxe_init_device_param(struct rxe_dev *rxe)
 {
-	rxe->max_inline_data			= RXE_MAX_INLINE_DATA;
-
-	rxe->attr.vendor_id			= RXE_VENDOR_ID;
-	rxe->attr.max_mr_size			= RXE_MAX_MR_SIZE;
-	rxe->attr.page_size_cap			= RXE_PAGE_SIZE_CAP;
-	rxe->attr.max_qp			= RXE_MAX_QP;
-	rxe->attr.max_qp_wr			= RXE_MAX_QP_WR;
-	rxe->attr.device_cap_flags		= RXE_DEVICE_CAP_FLAGS;
-	rxe->attr.max_send_sge			= RXE_MAX_SGE;
-	rxe->attr.max_recv_sge			= RXE_MAX_SGE;
-	rxe->attr.max_sge_rd			= RXE_MAX_SGE_RD;
-	rxe->attr.max_cq			= RXE_MAX_CQ;
-	rxe->attr.max_cqe			= (1 << RXE_MAX_LOG_CQE) - 1;
-	rxe->attr.max_mr			= RXE_MAX_MR;
-	rxe->attr.max_mw			= RXE_MAX_MW;
-	rxe->attr.max_pd			= RXE_MAX_PD;
-	rxe->attr.max_qp_rd_atom		= RXE_MAX_QP_RD_ATOM;
-	rxe->attr.max_res_rd_atom		= RXE_MAX_RES_RD_ATOM;
-	rxe->attr.max_qp_init_rd_atom		= RXE_MAX_QP_INIT_RD_ATOM;
-	rxe->attr.atomic_cap			= IB_ATOMIC_HCA;
-	rxe->attr.max_mcast_grp			= RXE_MAX_MCAST_GRP;
-	rxe->attr.max_mcast_qp_attach		= RXE_MAX_MCAST_QP_ATTACH;
-	rxe->attr.max_total_mcast_qp_attach	= RXE_MAX_TOT_MCAST_QP_ATTACH;
-	rxe->attr.max_ah			= RXE_MAX_AH;
-	rxe->attr.max_srq			= RXE_MAX_SRQ;
-	rxe->attr.max_srq_wr			= RXE_MAX_SRQ_WR;
-	rxe->attr.max_srq_sge			= RXE_MAX_SRQ_SGE;
-	rxe->attr.max_fast_reg_page_list_len	= RXE_MAX_FMR_PAGE_LIST_LEN;
-	rxe->attr.max_pkeys			= RXE_MAX_PKEYS;
-	rxe->attr.local_ca_ack_delay		= RXE_LOCAL_CA_ACK_DELAY;
-	addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
-			rxe->ndev->dev_addr);
+	struct ib_device_attr *a = &rxe->attr;
 
+	rxe->max_inline_data			= RXE_MAX_INLINE_DATA;
 	rxe->max_ucontext			= RXE_MAX_UCONTEXT;
+
+	a->atomic_cap				= IB_ATOMIC_HCA;
+	a->cq_caps.max_cq_moderation_count	= 0;
+	a->cq_caps.max_cq_moderation_period	= 0;
+	a->device_cap_flags			= RXE_DEVICE_CAP_FLAGS;
+	a->fw_ver				= 0;
+	a->hca_core_clock			= 0;
+	a->hw_ver				= 0;
+	a->local_ca_ack_delay			= RXE_LOCAL_CA_ACK_DELAY;
+	a->masked_atomic_cap			= 0;
+	a->max_ah				= RXE_MAX_AH;
+	a->max_cqe				= (1 << RXE_MAX_LOG_CQE) - 1;
+	a->max_cq				= RXE_MAX_CQ;
+	a->max_dm_size				= 0;
+	a->max_ee_init_rd_atom			= 0;
+	a->max_ee				= 0;
+	a->max_ee_rd_atom			= 0;
+	a->max_fast_reg_page_list_len		= RXE_MAX_FMR_PAGE_LIST_LEN;
+	a->max_mcast_grp			= RXE_MAX_MCAST_GRP;
+	a->max_mcast_qp_attach			= RXE_MAX_MCAST_QP_ATTACH;
+	a->max_mr				= RXE_MAX_MR;
+	a->max_mr_size				= RXE_MAX_MR_SIZE;
+	a->max_mw				= RXE_MAX_MW;
+	a->max_pd				= RXE_MAX_PD;
+	a->max_pi_fast_reg_page_list_len	= 0;
+	a->max_pkeys				= RXE_MAX_PKEYS;
+	a->max_qp_init_rd_atom			= RXE_MAX_QP_INIT_RD_ATOM;
+	a->max_qp_rd_atom			= RXE_MAX_QP_RD_ATOM;
+	a->max_qp				= RXE_MAX_QP;
+	a->max_qp_wr				= RXE_MAX_QP_WR;
+	a->max_raw_ethy_qp			= 0;
+	a->max_raw_ipv6_qp			= 0;
+	a->max_rdd				= 0;
+	a->max_recv_sge				= RXE_MAX_SGE;
+	a->max_res_rd_atom			= RXE_MAX_RES_RD_ATOM;
+	a->max_send_sge				= RXE_MAX_SGE;
+	a->max_sge_rd				= RXE_MAX_SGE_RD;
+	a->max_sgl_rd				= 0;
+	a->max_srq				= RXE_MAX_SRQ;
+	a->max_srq_sge				= RXE_MAX_SRQ_SGE;
+	a->max_srq_wr				= RXE_MAX_SRQ_WR;
+	a->max_total_mcast_qp_attach		= RXE_MAX_TOT_MCAST_QP_ATTACH;
+	a->max_wq_type_rq			= 0;
+	a->odp_caps.general_caps		= 0;
+	a->odp_caps.per_transport_caps.rc_odp_caps = 0;
+	a->odp_caps.per_transport_caps.uc_odp_caps = 0;
+	a->odp_caps.per_transport_caps.ud_odp_caps = 0;
+	a->odp_caps.per_transport_caps.xrc_odp_caps = 0;
+	a->page_size_cap			= RXE_PAGE_SIZE_CAP;
+	a->raw_packet_caps			= 0;
+	a->rss_caps.supported_qpts		= 0;
+	a->rss_caps.max_rwq_indirection_tables	= 0;
+	a->rss_caps.max_rwq_indirection_table_size = 0;
+	a->sig_guard_cap			= 0;
+	a->sig_prot_cap				= 0;
+	a->sys_image_guid			= 0;
+	a->timestamp_mask			= 0;
+	a->tm_caps.max_rndv_hdr_size		= 0;
+	a->tm_caps.max_num_tags			= 0;
+	a->tm_caps.flags			= 0;
+	a->tm_caps.max_ops			= 0;
+	a->tm_caps.max_sge			= 0;
+	a->vendor_id				= RXE_VENDOR_ID;
+	a->vendor_part_id			= 0;
+
+	addrconf_addr_eui48((unsigned char *)&a->sys_image_guid,
+			    rxe->ndev->dev_addr);
 }
 
 /* initialize port attributes */
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 807c9a3b22ea..2695b286cd8e 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1150,7 +1150,8 @@  int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
 	dma_coerce_mask_and_coherent(&dev->dev,
 				     dma_get_required_mask(&dev->dev));
 
-	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
+	dev->uverbs_cmd_mask =
+	      BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
@@ -1185,6 +1186,10 @@  int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
 	    | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW)
 	    ;
 
+	dev->uverbs_ex_cmd_mask =
+	      BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE)
+	    ;
+
 	ib_set_device_ops(dev, &rxe_dev_ops);
 	err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
 	if (err)