diff mbox series

[v7,4/5] RDMA/mana_ib: Query adapter capabilities

Message ID 1697494322-26814-5-git-send-email-sharmaajay@linuxonhyperv.com (mailing list archive)
State Not Applicable
Headers show
Series RDMA/mana_ib | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

sharmaajay@linuxonhyperv.com Oct. 16, 2023, 10:12 p.m. UTC
From: Ajay Sharma <sharmaajay@microsoft.com>

Query the adapter capabilities to expose to
other clients and VF. This checks against
the user supplied values and protects against
overflows.

Signed-off-by: Ajay Sharma <sharmaajay@microsoft.com>
---
 drivers/infiniband/hw/mana/device.c  |  4 ++
 drivers/infiniband/hw/mana/main.c    | 67 ++++++++++++++++++++++------
 drivers/infiniband/hw/mana/mana_ib.h | 53 +++++++++++++++++++++-
 3 files changed, 110 insertions(+), 14 deletions(-)

Comments

Kalesh Anakkur Purayil Oct. 17, 2023, 4:31 a.m. UTC | #1
Hi Ajay,

One comment in line.

Regards,
Kalesh

On Tue, Oct 17, 2023 at 3:42 AM <sharmaajay@linuxonhyperv.com> wrote:

> From: Ajay Sharma <sharmaajay@microsoft.com>
>
> Query the adapter capabilities to expose to
> other clients and VF. This checks against
> the user supplied values and protects against
> overflows.
>
> Signed-off-by: Ajay Sharma <sharmaajay@microsoft.com>
> ---
>  drivers/infiniband/hw/mana/device.c  |  4 ++
>  drivers/infiniband/hw/mana/main.c    | 67 ++++++++++++++++++++++------
>  drivers/infiniband/hw/mana/mana_ib.h | 53 +++++++++++++++++++++-
>  3 files changed, 110 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mana/device.c
> b/drivers/infiniband/hw/mana/device.c
> index 4077e440657a..e15da43c73a0 100644
> --- a/drivers/infiniband/hw/mana/device.c
> +++ b/drivers/infiniband/hw/mana/device.c
> @@ -97,6 +97,10 @@ static int mana_ib_probe(struct auxiliary_device *adev,
>                 goto free_error_eq;
>         }
>
> +       ret = mana_ib_query_adapter_caps(mib_dev);
> +       if (ret)
> +               ibdev_dbg(&mib_dev->ib_dev, "Failed to get caps, use
> defaults");
>
[Kalesh]: You are ignoring the failure here and continuing with the IB
register. When the FW command fails, you won't populate the
"mib_dev->adapter_caps". Subsequent "mana_ib_query_device" may return stale
values?
Is that what you want?

> +
>         ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
>                                  mdev->gdma_context->dev);
>         if (ret)
> diff --git a/drivers/infiniband/hw/mana/main.c
> b/drivers/infiniband/hw/mana/main.c
> index 5b5d7abe79ac..82923475267d 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -469,20 +469,15 @@ int mana_ib_get_port_immutable(struct ib_device
> *ibdev, u32 port_num,
>  int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr
> *props,
>                          struct ib_udata *uhw)
>  {
> -       props->max_qp = MANA_MAX_NUM_QUEUES;
> -       props->max_qp_wr = MAX_SEND_BUFFERS_PER_QUEUE;
> +       struct mana_ib_dev *mib_dev = container_of(ibdev,
> +                       struct mana_ib_dev, ib_dev);
>
> -       /*
> -        * max_cqe could be potentially much bigger.
> -        * As this version of driver only support RAW QP, set it to the
> same
> -        * value as max_qp_wr
> -        */
> -       props->max_cqe = MAX_SEND_BUFFERS_PER_QUEUE;
> -
> -       props->max_mr_size = MANA_IB_MAX_MR_SIZE;
> -       props->max_mr = MANA_IB_MAX_MR;
> -       props->max_send_sge = MAX_TX_WQE_SGL_ENTRIES;
> -       props->max_recv_sge = MAX_RX_WQE_SGL_ENTRIES;
> +       props->max_qp = mib_dev->adapter_caps.max_qp_count;
> +       props->max_qp_wr = mib_dev->adapter_caps.max_requester_sq_size;
> +       props->max_cqe = mib_dev->adapter_caps.max_requester_sq_size;
> +       props->max_mr = mib_dev->adapter_caps.max_mr_count;
> +       props->max_send_sge = mib_dev->adapter_caps.max_send_wqe_size;
> +       props->max_recv_sge = mib_dev->adapter_caps.max_recv_wqe_size;
>
>         return 0;
>  }
> @@ -601,3 +596,49 @@ int mana_ib_create_error_eq(struct mana_ib_dev
> *mib_dev)
>
>         return 0;
>  }
> +
> +static void assign_caps(struct mana_ib_adapter_caps *caps,
> +                       struct mana_ib_query_adapter_caps_resp *resp)
> +{
> +       caps->max_sq_id = resp->max_sq_id;
> +       caps->max_rq_id = resp->max_rq_id;
> +       caps->max_cq_id = resp->max_cq_id;
> +       caps->max_qp_count = resp->max_qp_count;
> +       caps->max_cq_count = resp->max_cq_count;
> +       caps->max_mr_count = resp->max_mr_count;
> +       caps->max_pd_count = resp->max_pd_count;
> +       caps->max_inbound_read_limit = resp->max_inbound_read_limit;
> +       caps->max_outbound_read_limit = resp->max_outbound_read_limit;
> +       caps->mw_count = resp->mw_count;
> +       caps->max_srq_count = resp->max_srq_count;
> +       caps->max_requester_sq_size = resp->max_requester_sq_size;
> +       caps->max_responder_sq_size = resp->max_responder_sq_size;
> +       caps->max_requester_rq_size = resp->max_requester_rq_size;
> +       caps->max_responder_rq_size = resp->max_responder_rq_size;
> +       caps->max_send_wqe_size = resp->max_send_wqe_size;
> +       caps->max_recv_wqe_size = resp->max_recv_wqe_size;
> +       caps->max_inline_data_size = resp->max_inline_data_size;
> +}
> +
> +int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev)
> +{
> +       struct mana_ib_query_adapter_caps_resp resp = {};
> +       struct mana_ib_query_adapter_caps_req req = {};
> +       int err;
> +
> +       mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP,
> sizeof(req),
> +                            sizeof(resp));
> +       req.hdr.resp.msg_version = MANA_IB_GET_ADAPTER_CAP_RESPONSE_V3;
> +       req.hdr.dev_id = mib_dev->gc->mana_ib.dev_id;
> +
> +       err = mana_gd_send_request(mib_dev->gc, sizeof(req), &req,
> +                                  sizeof(resp), &resp);
> +
> +       if (err) {
> +               ibdev_err(&mib_dev->ib_dev, "Failed to query adapter caps
> err %d", err);
> +               return err;
> +       }
> +
> +       assign_caps(&mib_dev->adapter_caps, &resp);
> +       return 0;
> +}
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index 8a652bccd978..6b9406738cb2 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -20,19 +20,41 @@
>
>  /* MANA doesn't have any limit for MR size */
>  #define MANA_IB_MAX_MR_SIZE    U64_MAX
> -
> +#define MANA_IB_GET_ADAPTER_CAP_RESPONSE_V3 3
>  /*
>   * The hardware limit of number of MRs is greater than maximum number of
> MRs
>   * that can possibly represent in 24 bits
>   */
>  #define MANA_IB_MAX_MR         0xFFFFFFu
>
> +struct mana_ib_adapter_caps {
> +       u32 max_sq_id;
> +       u32 max_rq_id;
> +       u32 max_cq_id;
> +       u32 max_qp_count;
> +       u32 max_cq_count;
> +       u32 max_mr_count;
> +       u32 max_pd_count;
> +       u32 max_inbound_read_limit;
> +       u32 max_outbound_read_limit;
> +       u32 mw_count;
> +       u32 max_srq_count;
> +       u32 max_requester_sq_size;
> +       u32 max_responder_sq_size;
> +       u32 max_requester_rq_size;
> +       u32 max_responder_rq_size;
> +       u32 max_send_wqe_size;
> +       u32 max_recv_wqe_size;
> +       u32 max_inline_data_size;
> +};
> +
>  struct mana_ib_dev {
>         struct ib_device ib_dev;
>         struct gdma_dev *gdma_dev;
>         struct gdma_context *gc;
>         struct gdma_queue *fatal_err_eq;
>         mana_handle_t adapter_handle;
> +       struct mana_ib_adapter_caps adapter_caps;
>  };
>
>  struct mana_ib_wq {
> @@ -96,6 +118,7 @@ struct mana_ib_rwq_ind_table {
>  };
>
>  enum mana_ib_command_code {
> +       MANA_IB_GET_ADAPTER_CAP = 0x30001,
>         MANA_IB_CREATE_ADAPTER  = 0x30002,
>         MANA_IB_DESTROY_ADAPTER = 0x30003,
>  };
> @@ -120,6 +143,32 @@ struct mana_ib_destroy_adapter_resp {
>         struct gdma_resp_hdr hdr;
>  }; /* HW Data */
>
> +struct mana_ib_query_adapter_caps_req {
> +       struct gdma_req_hdr hdr;
> +}; /*HW Data */
> +
> +struct mana_ib_query_adapter_caps_resp {
> +       struct gdma_resp_hdr hdr;
> +       u32 max_sq_id;
> +       u32 max_rq_id;
> +       u32 max_cq_id;
> +       u32 max_qp_count;
> +       u32 max_cq_count;
> +       u32 max_mr_count;
> +       u32 max_pd_count;
> +       u32 max_inbound_read_limit;
> +       u32 max_outbound_read_limit;
> +       u32 mw_count;
> +       u32 max_srq_count;
> +       u32 max_requester_sq_size;
> +       u32 max_responder_sq_size;
> +       u32 max_requester_rq_size;
> +       u32 max_responder_rq_size;
> +       u32 max_send_wqe_size;
> +       u32 max_recv_wqe_size;
> +       u32 max_inline_data_size;
> +}; /* HW Data */
> +
>  int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
>                                  struct ib_umem *umem,
>                                  mana_handle_t *gdma_region);
> @@ -194,4 +243,6 @@ int mana_ib_create_adapter(struct mana_ib_dev
> *mib_dev);
>
>  int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev);
>
> +int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev);
> +
>  #endif
> --
> 2.25.1
>
>
>
Kalesh Anakkur Purayil Oct. 18, 2023, 4:10 a.m. UTC | #2
On Tue, Oct 17, 2023 at 11:17 AM Ajay Sharma <sharmaajay@microsoft.com>
wrote:

>
>
> On Oct 16, 2023, at 9:32 PM, Kalesh Anakkur Purayil <
> kalesh-anakkur.purayil@broadcom.com> wrote:
>
> 
> Hi Ajay,
>
> One comment in line.
>
> Regards,
> Kalesh
>
> On Tue, Oct 17, 2023 at 3:42 AM <sharmaajay@linuxonhyperv.com> wrote:
>
>> From: Ajay Sharma <sharmaajay@microsoft.com>
>>
>> Query the adapter capabilities to expose to
>> other clients and VF. This checks against
>> the user supplied values and protects against
>> overflows.
>>
>> Signed-off-by: Ajay Sharma <sharmaajay@microsoft.com>
>> ---
>>  drivers/infiniband/hw/mana/device.c  |  4 ++
>>  drivers/infiniband/hw/mana/main.c    | 67 ++++++++++++++++++++++------
>>  drivers/infiniband/hw/mana/mana_ib.h | 53 +++++++++++++++++++++-
>>  3 files changed, 110 insertions(+), 14 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/mana/device.c
>> b/drivers/infiniband/hw/mana/device.c
>> index 4077e440657a..e15da43c73a0 100644
>> --- a/drivers/infiniband/hw/mana/device.c
>> +++ b/drivers/infiniband/hw/mana/device.c
>> @@ -97,6 +97,10 @@ static int mana_ib_probe(struct auxiliary_device *adev,
>>                 goto free_error_eq;
>>         }
>>
>> +       ret = mana_ib_query_adapter_caps(mib_dev);
>> +       if (ret)
>> +               ibdev_dbg(&mib_dev->ib_dev, "Failed to get caps, use
>> defaults");
>>
> [Kalesh]: You are ignoring the failure here and continuing with the IB
> register. When the FW command fails, you won't populate the
> "mib_dev->adapter_caps". Subsequent "mana_ib_query_device" may return stale
> values?
> Is that what you want?
>
> It will use default capabilities.
>
[Kalesh]: Maybe I am missing something here. I could not see that code
where you are initializing "mib_dev->adapter_caps" with default values.

> +
>>         ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
>>                                  mdev->gdma_context->dev);
>>         if (ret)
>> diff --git a/drivers/infiniband/hw/mana/main.c
>> b/drivers/infiniband/hw/mana/main.c
>> index 5b5d7abe79ac..82923475267d 100644
>> --- a/drivers/infiniband/hw/mana/main.c
>> +++ b/drivers/infiniband/hw/mana/main.c
>> @@ -469,20 +469,15 @@ int mana_ib_get_port_immutable(struct ib_device
>> *ibdev, u32 port_num,
>>  int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr
>> *props,
>>                          struct ib_udata *uhw)
>>  {
>> -       props->max_qp = MANA_MAX_NUM_QUEUES;
>> -       props->max_qp_wr = MAX_SEND_BUFFERS_PER_QUEUE;
>> +       struct mana_ib_dev *mib_dev = container_of(ibdev,
>> +                       struct mana_ib_dev, ib_dev);
>>
>> -       /*
>> -        * max_cqe could be potentially much bigger.
>> -        * As this version of driver only support RAW QP, set it to the
>> same
>> -        * value as max_qp_wr
>> -        */
>> -       props->max_cqe = MAX_SEND_BUFFERS_PER_QUEUE;
>> -
>> -       props->max_mr_size = MANA_IB_MAX_MR_SIZE;
>> -       props->max_mr = MANA_IB_MAX_MR;
>> -       props->max_send_sge = MAX_TX_WQE_SGL_ENTRIES;
>> -       props->max_recv_sge = MAX_RX_WQE_SGL_ENTRIES;
>> +       props->max_qp = mib_dev->adapter_caps.max_qp_count;
>> +       props->max_qp_wr = mib_dev->adapter_caps.max_requester_sq_size;
>> +       props->max_cqe = mib_dev->adapter_caps.max_requester_sq_size;
>> +       props->max_mr = mib_dev->adapter_caps.max_mr_count;
>> +       props->max_send_sge = mib_dev->adapter_caps.max_send_wqe_size;
>> +       props->max_recv_sge = mib_dev->adapter_caps.max_recv_wqe_size;
>>
>>         return 0;
>>  }
>> @@ -601,3 +596,49 @@ int mana_ib_create_error_eq(struct mana_ib_dev
>> *mib_dev)
>>
>>         return 0;
>>  }
>> +
>> +static void assign_caps(struct mana_ib_adapter_caps *caps,
>> +                       struct mana_ib_query_adapter_caps_resp *resp)
>> +{
>> +       caps->max_sq_id = resp->max_sq_id;
>> +       caps->max_rq_id = resp->max_rq_id;
>> +       caps->max_cq_id = resp->max_cq_id;
>> +       caps->max_qp_count = resp->max_qp_count;
>> +       caps->max_cq_count = resp->max_cq_count;
>> +       caps->max_mr_count = resp->max_mr_count;
>> +       caps->max_pd_count = resp->max_pd_count;
>> +       caps->max_inbound_read_limit = resp->max_inbound_read_limit;
>> +       caps->max_outbound_read_limit = resp->max_outbound_read_limit;
>> +       caps->mw_count = resp->mw_count;
>> +       caps->max_srq_count = resp->max_srq_count;
>> +       caps->max_requester_sq_size = resp->max_requester_sq_size;
>> +       caps->max_responder_sq_size = resp->max_responder_sq_size;
>> +       caps->max_requester_rq_size = resp->max_requester_rq_size;
>> +       caps->max_responder_rq_size = resp->max_responder_rq_size;
>> +       caps->max_send_wqe_size = resp->max_send_wqe_size;
>> +       caps->max_recv_wqe_size = resp->max_recv_wqe_size;
>> +       caps->max_inline_data_size = resp->max_inline_data_size;
>> +}
>> +
>> +int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev)
>> +{
>> +       struct mana_ib_query_adapter_caps_resp resp = {};
>> +       struct mana_ib_query_adapter_caps_req req = {};
>> +       int err;
>> +
>> +       mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP,
>> sizeof(req),
>> +                            sizeof(resp));
>> +       req.hdr.resp.msg_version = MANA_IB_GET_ADAPTER_CAP_RESPONSE_V3;
>> +       req.hdr.dev_id = mib_dev->gc->mana_ib.dev_id;
>> +
>> +       err = mana_gd_send_request(mib_dev->gc, sizeof(req), &req,
>> +                                  sizeof(resp), &resp);
>> +
>> +       if (err) {
>> +               ibdev_err(&mib_dev->ib_dev, "Failed to query adapter caps
>> err %d", err);
>> +               return err;
>> +       }
>> +
>> +       assign_caps(&mib_dev->adapter_caps, &resp);
>> +       return 0;
>> +}
>> diff --git a/drivers/infiniband/hw/mana/mana_ib.h
>> b/drivers/infiniband/hw/mana/mana_ib.h
>> index 8a652bccd978..6b9406738cb2 100644
>> --- a/drivers/infiniband/hw/mana/mana_ib.h
>> +++ b/drivers/infiniband/hw/mana/mana_ib.h
>> @@ -20,19 +20,41 @@
>>
>>  /* MANA doesn't have any limit for MR size */
>>  #define MANA_IB_MAX_MR_SIZE    U64_MAX
>> -
>> +#define MANA_IB_GET_ADAPTER_CAP_RESPONSE_V3 3
>>  /*
>>   * The hardware limit of number of MRs is greater than maximum number of
>> MRs
>>   * that can possibly represent in 24 bits
>>   */
>>  #define MANA_IB_MAX_MR         0xFFFFFFu
>>
>> +struct mana_ib_adapter_caps {
>> +       u32 max_sq_id;
>> +       u32 max_rq_id;
>> +       u32 max_cq_id;
>> +       u32 max_qp_count;
>> +       u32 max_cq_count;
>> +       u32 max_mr_count;
>> +       u32 max_pd_count;
>> +       u32 max_inbound_read_limit;
>> +       u32 max_outbound_read_limit;
>> +       u32 mw_count;
>> +       u32 max_srq_count;
>> +       u32 max_requester_sq_size;
>> +       u32 max_responder_sq_size;
>> +       u32 max_requester_rq_size;
>> +       u32 max_responder_rq_size;
>> +       u32 max_send_wqe_size;
>> +       u32 max_recv_wqe_size;
>> +       u32 max_inline_data_size;
>> +};
>> +
>>  struct mana_ib_dev {
>>         struct ib_device ib_dev;
>>         struct gdma_dev *gdma_dev;
>>         struct gdma_context *gc;
>>         struct gdma_queue *fatal_err_eq;
>>         mana_handle_t adapter_handle;
>> +       struct mana_ib_adapter_caps adapter_caps;
>>  };
>>
>>  struct mana_ib_wq {
>> @@ -96,6 +118,7 @@ struct mana_ib_rwq_ind_table {
>>  };
>>
>>  enum mana_ib_command_code {
>> +       MANA_IB_GET_ADAPTER_CAP = 0x30001,
>>         MANA_IB_CREATE_ADAPTER  = 0x30002,
>>         MANA_IB_DESTROY_ADAPTER = 0x30003,
>>  };
>> @@ -120,6 +143,32 @@ struct mana_ib_destroy_adapter_resp {
>>         struct gdma_resp_hdr hdr;
>>  }; /* HW Data */
>>
>> +struct mana_ib_query_adapter_caps_req {
>> +       struct gdma_req_hdr hdr;
>> +}; /*HW Data */
>> +
>> +struct mana_ib_query_adapter_caps_resp {
>> +       struct gdma_resp_hdr hdr;
>> +       u32 max_sq_id;
>> +       u32 max_rq_id;
>> +       u32 max_cq_id;
>> +       u32 max_qp_count;
>> +       u32 max_cq_count;
>> +       u32 max_mr_count;
>> +       u32 max_pd_count;
>> +       u32 max_inbound_read_limit;
>> +       u32 max_outbound_read_limit;
>> +       u32 mw_count;
>> +       u32 max_srq_count;
>> +       u32 max_requester_sq_size;
>> +       u32 max_responder_sq_size;
>> +       u32 max_requester_rq_size;
>> +       u32 max_responder_rq_size;
>> +       u32 max_send_wqe_size;
>> +       u32 max_recv_wqe_size;
>> +       u32 max_inline_data_size;
>> +}; /* HW Data */
>> +
>>  int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
>>                                  struct ib_umem *umem,
>>                                  mana_handle_t *gdma_region);
>> @@ -194,4 +243,6 @@ int mana_ib_create_adapter(struct mana_ib_dev
>> *mib_dev);
>>
>>  int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev);
>>
>> +int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev);
>> +
>>  #endif
>> --
>> 2.25.1
>>
>>
>>
>
> --
> Regards,
> Kalesh A P
>
>
Kalesh Anakkur Purayil Oct. 18, 2023, 5:25 p.m. UTC | #3
On Wed, Oct 18, 2023 at 10:46 PM Ajay Sharma <sharmaajay@microsoft.com>
wrote:

>
>
>
>
> *From:* Kalesh Anakkur Purayil <kalesh-anakkur.purayil@broadcom.com>
> *Sent:* Tuesday, October 17, 2023 9:11 PM
> *To:* Ajay Sharma <sharmaajay@microsoft.com>
> *Cc:* sharmaajay@linuxonhyperv.com; Long Li <longli@microsoft.com>; Jason
> Gunthorpe <jgg@ziepe.ca>; Leon Romanovsky <leon@kernel.org>; Dexuan Cui <
> decui@microsoft.com>; Wei Liu <wei.liu@kernel.org>; David S. Miller <
> davem@davemloft.net>; Eric Dumazet <edumazet@google.com>; Jakub Kicinski <
> kuba@kernel.org>; Paolo Abeni <pabeni@redhat.com>;
> linux-rdma@vger.kernel.org; linux-hyperv@vger.kernel.org;
> netdev@vger.kernel.org; linux-kernel@vger.kernel.org
> *Subject:* Re: [EXTERNAL] Re: [Patch v7 4/5] RDMA/mana_ib: Query adapter
> capabilities
>
>
>
>
>
>
>
> On Tue, Oct 17, 2023 at 11:17 AM Ajay Sharma <sharmaajay@microsoft.com>
> wrote:
>
>
>
>
>
> On Oct 16, 2023, at 9:32 PM, Kalesh Anakkur Purayil <
> kalesh-anakkur.purayil@broadcom.com> wrote:
>
> 
>
> Hi Ajay,
>
>
>
> One comment in line.
>
>
>
> Regards,
>
> Kalesh
>
>
>
> On Tue, Oct 17, 2023 at 3:42 AM <sharmaajay@linuxonhyperv.com> wrote:
>
> From: Ajay Sharma <sharmaajay@microsoft.com>
>
> Query the adapter capabilities to expose to
> other clients and VF. This checks against
> the user supplied values and protects against
> overflows.
>
> Signed-off-by: Ajay Sharma <sharmaajay@microsoft.com>
> ---
>  drivers/infiniband/hw/mana/device.c  |  4 ++
>  drivers/infiniband/hw/mana/main.c    | 67 ++++++++++++++++++++++------
>  drivers/infiniband/hw/mana/mana_ib.h | 53 +++++++++++++++++++++-
>  3 files changed, 110 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mana/device.c
> b/drivers/infiniband/hw/mana/device.c
> index 4077e440657a..e15da43c73a0 100644
> --- a/drivers/infiniband/hw/mana/device.c
> +++ b/drivers/infiniband/hw/mana/device.c
> @@ -97,6 +97,10 @@ static int mana_ib_probe(struct auxiliary_device *adev,
>                 goto free_error_eq;
>         }
>
> +       ret = mana_ib_query_adapter_caps(mib_dev);
> +       if (ret)
> +               ibdev_dbg(&mib_dev->ib_dev, "Failed to get caps, use
> defaults");
>
> [Kalesh]: You are ignoring the failure here and continuing with the IB
> register. When the FW command fails, you won't populate the
> "mib_dev->adapter_caps". Subsequent "mana_ib_query_device" may return stale
> values?
>
> Is that what you want?
>
> It will use default capabilities.
>
> [Kalesh]: Maybe I am missing something here. I could not see that code
> where you are initializing "mib_dev->adapter_caps" with default values.
>
>
>
> [Ajay] : If this call fails , then any other subsequent call to management
> will fail because this is the basis of communication with management. So
> ignoring the failure is fine in this case.
>
[Kalesh]: OK, thank you for the response.
I thought failing mana_ib_probe() would be best in this case as
mana_ib_query_device() will return some uninitialized values.

> +
>         ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
>                                  mdev->gdma_context->dev);
>         if (ret)
> diff --git a/drivers/infiniband/hw/mana/main.c
> b/drivers/infiniband/hw/mana/main.c
> index 5b5d7abe79ac..82923475267d 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -469,20 +469,15 @@ int mana_ib_get_port_immutable(struct ib_device
> *ibdev, u32 port_num,
>  int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr
> *props,
>                          struct ib_udata *uhw)
>  {
> -       props->max_qp = MANA_MAX_NUM_QUEUES;
> -       props->max_qp_wr = MAX_SEND_BUFFERS_PER_QUEUE;
> +       struct mana_ib_dev *mib_dev = container_of(ibdev,
> +                       struct mana_ib_dev, ib_dev);
>
> -       /*
> -        * max_cqe could be potentially much bigger.
> -        * As this version of driver only support RAW QP, set it to the
> same
> -        * value as max_qp_wr
> -        */
> -       props->max_cqe = MAX_SEND_BUFFERS_PER_QUEUE;
> -
> -       props->max_mr_size = MANA_IB_MAX_MR_SIZE;
> -       props->max_mr = MANA_IB_MAX_MR;
> -       props->max_send_sge = MAX_TX_WQE_SGL_ENTRIES;
> -       props->max_recv_sge = MAX_RX_WQE_SGL_ENTRIES;
> +       props->max_qp = mib_dev->adapter_caps.max_qp_count;
> +       props->max_qp_wr = mib_dev->adapter_caps.max_requester_sq_size;
> +       props->max_cqe = mib_dev->adapter_caps.max_requester_sq_size;
> +       props->max_mr = mib_dev->adapter_caps.max_mr_count;
> +       props->max_send_sge = mib_dev->adapter_caps.max_send_wqe_size;
> +       props->max_recv_sge = mib_dev->adapter_caps.max_recv_wqe_size;
>
>         return 0;
>  }
> @@ -601,3 +596,49 @@ int mana_ib_create_error_eq(struct mana_ib_dev
> *mib_dev)
>
>         return 0;
>  }
> +
> +static void assign_caps(struct mana_ib_adapter_caps *caps,
> +                       struct mana_ib_query_adapter_caps_resp *resp)
> +{
> +       caps->max_sq_id = resp->max_sq_id;
> +       caps->max_rq_id = resp->max_rq_id;
> +       caps->max_cq_id = resp->max_cq_id;
> +       caps->max_qp_count = resp->max_qp_count;
> +       caps->max_cq_count = resp->max_cq_count;
> +       caps->max_mr_count = resp->max_mr_count;
> +       caps->max_pd_count = resp->max_pd_count;
> +       caps->max_inbound_read_limit = resp->max_inbound_read_limit;
> +       caps->max_outbound_read_limit = resp->max_outbound_read_limit;
> +       caps->mw_count = resp->mw_count;
> +       caps->max_srq_count = resp->max_srq_count;
> +       caps->max_requester_sq_size = resp->max_requester_sq_size;
> +       caps->max_responder_sq_size = resp->max_responder_sq_size;
> +       caps->max_requester_rq_size = resp->max_requester_rq_size;
> +       caps->max_responder_rq_size = resp->max_responder_rq_size;
> +       caps->max_send_wqe_size = resp->max_send_wqe_size;
> +       caps->max_recv_wqe_size = resp->max_recv_wqe_size;
> +       caps->max_inline_data_size = resp->max_inline_data_size;
> +}
> +
> +int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev)
> +{
> +       struct mana_ib_query_adapter_caps_resp resp = {};
> +       struct mana_ib_query_adapter_caps_req req = {};
> +       int err;
> +
> +       mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP,
> sizeof(req),
> +                            sizeof(resp));
> +       req.hdr.resp.msg_version = MANA_IB_GET_ADAPTER_CAP_RESPONSE_V3;
> +       req.hdr.dev_id = mib_dev->gc->mana_ib.dev_id;
> +
> +       err = mana_gd_send_request(mib_dev->gc, sizeof(req), &req,
> +                                  sizeof(resp), &resp);
> +
> +       if (err) {
> +               ibdev_err(&mib_dev->ib_dev, "Failed to query adapter caps
> err %d", err);
> +               return err;
> +       }
> +
> +       assign_caps(&mib_dev->adapter_caps, &resp);
> +       return 0;
> +}
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index 8a652bccd978..6b9406738cb2 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -20,19 +20,41 @@
>
>  /* MANA doesn't have any limit for MR size */
>  #define MANA_IB_MAX_MR_SIZE    U64_MAX
> -
> +#define MANA_IB_GET_ADAPTER_CAP_RESPONSE_V3 3
>  /*
>   * The hardware limit of number of MRs is greater than maximum number of
> MRs
>   * that can possibly represent in 24 bits
>   */
>  #define MANA_IB_MAX_MR         0xFFFFFFu
>
> +struct mana_ib_adapter_caps {
> +       u32 max_sq_id;
> +       u32 max_rq_id;
> +       u32 max_cq_id;
> +       u32 max_qp_count;
> +       u32 max_cq_count;
> +       u32 max_mr_count;
> +       u32 max_pd_count;
> +       u32 max_inbound_read_limit;
> +       u32 max_outbound_read_limit;
> +       u32 mw_count;
> +       u32 max_srq_count;
> +       u32 max_requester_sq_size;
> +       u32 max_responder_sq_size;
> +       u32 max_requester_rq_size;
> +       u32 max_responder_rq_size;
> +       u32 max_send_wqe_size;
> +       u32 max_recv_wqe_size;
> +       u32 max_inline_data_size;
> +};
> +
>  struct mana_ib_dev {
>         struct ib_device ib_dev;
>         struct gdma_dev *gdma_dev;
>         struct gdma_context *gc;
>         struct gdma_queue *fatal_err_eq;
>         mana_handle_t adapter_handle;
> +       struct mana_ib_adapter_caps adapter_caps;
>  };
>
>  struct mana_ib_wq {
> @@ -96,6 +118,7 @@ struct mana_ib_rwq_ind_table {
>  };
>
>  enum mana_ib_command_code {
> +       MANA_IB_GET_ADAPTER_CAP = 0x30001,
>         MANA_IB_CREATE_ADAPTER  = 0x30002,
>         MANA_IB_DESTROY_ADAPTER = 0x30003,
>  };
> @@ -120,6 +143,32 @@ struct mana_ib_destroy_adapter_resp {
>         struct gdma_resp_hdr hdr;
>  }; /* HW Data */
>
> +struct mana_ib_query_adapter_caps_req {
> +       struct gdma_req_hdr hdr;
> +}; /*HW Data */
> +
> +struct mana_ib_query_adapter_caps_resp {
> +       struct gdma_resp_hdr hdr;
> +       u32 max_sq_id;
> +       u32 max_rq_id;
> +       u32 max_cq_id;
> +       u32 max_qp_count;
> +       u32 max_cq_count;
> +       u32 max_mr_count;
> +       u32 max_pd_count;
> +       u32 max_inbound_read_limit;
> +       u32 max_outbound_read_limit;
> +       u32 mw_count;
> +       u32 max_srq_count;
> +       u32 max_requester_sq_size;
> +       u32 max_responder_sq_size;
> +       u32 max_requester_rq_size;
> +       u32 max_responder_rq_size;
> +       u32 max_send_wqe_size;
> +       u32 max_recv_wqe_size;
> +       u32 max_inline_data_size;
> +}; /* HW Data */
> +
>  int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
>                                  struct ib_umem *umem,
>                                  mana_handle_t *gdma_region);
> @@ -194,4 +243,6 @@ int mana_ib_create_adapter(struct mana_ib_dev
> *mib_dev);
>
>  int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev);
>
> +int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev);
> +
>  #endif
> --
> 2.25.1
>
>
>
>
> --
>
> Regards,
>
> Kalesh A P
>
>
>
>
> --
>
> Regards,
>
> Kalesh A P
>
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index 4077e440657a..e15da43c73a0 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -97,6 +97,10 @@  static int mana_ib_probe(struct auxiliary_device *adev,
 		goto free_error_eq;
 	}
 
+	ret = mana_ib_query_adapter_caps(mib_dev);
+	if (ret)
+		ibdev_dbg(&mib_dev->ib_dev, "Failed to get caps, use defaults");
+
 	ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
 				 mdev->gdma_context->dev);
 	if (ret)
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 5b5d7abe79ac..82923475267d 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -469,20 +469,15 @@  int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
 int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
 			 struct ib_udata *uhw)
 {
-	props->max_qp = MANA_MAX_NUM_QUEUES;
-	props->max_qp_wr = MAX_SEND_BUFFERS_PER_QUEUE;
+	struct mana_ib_dev *mib_dev = container_of(ibdev,
+			struct mana_ib_dev, ib_dev);
 
-	/*
-	 * max_cqe could be potentially much bigger.
-	 * As this version of driver only support RAW QP, set it to the same
-	 * value as max_qp_wr
-	 */
-	props->max_cqe = MAX_SEND_BUFFERS_PER_QUEUE;
-
-	props->max_mr_size = MANA_IB_MAX_MR_SIZE;
-	props->max_mr = MANA_IB_MAX_MR;
-	props->max_send_sge = MAX_TX_WQE_SGL_ENTRIES;
-	props->max_recv_sge = MAX_RX_WQE_SGL_ENTRIES;
+	props->max_qp = mib_dev->adapter_caps.max_qp_count;
+	props->max_qp_wr = mib_dev->adapter_caps.max_requester_sq_size;
+	props->max_cqe = mib_dev->adapter_caps.max_requester_sq_size;
+	props->max_mr = mib_dev->adapter_caps.max_mr_count;
+	props->max_send_sge = mib_dev->adapter_caps.max_send_wqe_size;
+	props->max_recv_sge = mib_dev->adapter_caps.max_recv_wqe_size;
 
 	return 0;
 }
@@ -601,3 +596,49 @@  int mana_ib_create_error_eq(struct mana_ib_dev *mib_dev)
 
 	return 0;
 }
+
+static void assign_caps(struct mana_ib_adapter_caps *caps,
+			struct mana_ib_query_adapter_caps_resp *resp)
+{
+	caps->max_sq_id = resp->max_sq_id;
+	caps->max_rq_id = resp->max_rq_id;
+	caps->max_cq_id = resp->max_cq_id;
+	caps->max_qp_count = resp->max_qp_count;
+	caps->max_cq_count = resp->max_cq_count;
+	caps->max_mr_count = resp->max_mr_count;
+	caps->max_pd_count = resp->max_pd_count;
+	caps->max_inbound_read_limit = resp->max_inbound_read_limit;
+	caps->max_outbound_read_limit = resp->max_outbound_read_limit;
+	caps->mw_count = resp->mw_count;
+	caps->max_srq_count = resp->max_srq_count;
+	caps->max_requester_sq_size = resp->max_requester_sq_size;
+	caps->max_responder_sq_size = resp->max_responder_sq_size;
+	caps->max_requester_rq_size = resp->max_requester_rq_size;
+	caps->max_responder_rq_size = resp->max_responder_rq_size;
+	caps->max_send_wqe_size = resp->max_send_wqe_size;
+	caps->max_recv_wqe_size = resp->max_recv_wqe_size;
+	caps->max_inline_data_size = resp->max_inline_data_size;
+}
+
+int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev)
+{
+	struct mana_ib_query_adapter_caps_resp resp = {};
+	struct mana_ib_query_adapter_caps_req req = {};
+	int err;
+
+	mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
+			     sizeof(resp));
+	req.hdr.resp.msg_version = MANA_IB_GET_ADAPTER_CAP_RESPONSE_V3;
+	req.hdr.dev_id = mib_dev->gc->mana_ib.dev_id;
+
+	err = mana_gd_send_request(mib_dev->gc, sizeof(req), &req,
+				   sizeof(resp), &resp);
+
+	if (err) {
+		ibdev_err(&mib_dev->ib_dev, "Failed to query adapter caps err %d", err);
+		return err;
+	}
+
+	assign_caps(&mib_dev->adapter_caps, &resp);
+	return 0;
+}
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 8a652bccd978..6b9406738cb2 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -20,19 +20,41 @@ 
 
 /* MANA doesn't have any limit for MR size */
 #define MANA_IB_MAX_MR_SIZE	U64_MAX
-
+#define MANA_IB_GET_ADAPTER_CAP_RESPONSE_V3 3
 /*
  * The hardware limit of number of MRs is greater than maximum number of MRs
  * that can possibly represent in 24 bits
  */
 #define MANA_IB_MAX_MR		0xFFFFFFu
 
+struct mana_ib_adapter_caps {
+	u32 max_sq_id;
+	u32 max_rq_id;
+	u32 max_cq_id;
+	u32 max_qp_count;
+	u32 max_cq_count;
+	u32 max_mr_count;
+	u32 max_pd_count;
+	u32 max_inbound_read_limit;
+	u32 max_outbound_read_limit;
+	u32 mw_count;
+	u32 max_srq_count;
+	u32 max_requester_sq_size;
+	u32 max_responder_sq_size;
+	u32 max_requester_rq_size;
+	u32 max_responder_rq_size;
+	u32 max_send_wqe_size;
+	u32 max_recv_wqe_size;
+	u32 max_inline_data_size;
+};
+
 struct mana_ib_dev {
 	struct ib_device ib_dev;
 	struct gdma_dev *gdma_dev;
 	struct gdma_context *gc;
 	struct gdma_queue *fatal_err_eq;
 	mana_handle_t adapter_handle;
+	struct mana_ib_adapter_caps adapter_caps;
 };
 
 struct mana_ib_wq {
@@ -96,6 +118,7 @@  struct mana_ib_rwq_ind_table {
 };
 
 enum mana_ib_command_code {
+	MANA_IB_GET_ADAPTER_CAP = 0x30001,
 	MANA_IB_CREATE_ADAPTER  = 0x30002,
 	MANA_IB_DESTROY_ADAPTER = 0x30003,
 };
@@ -120,6 +143,32 @@  struct mana_ib_destroy_adapter_resp {
 	struct gdma_resp_hdr hdr;
 }; /* HW Data */
 
+struct mana_ib_query_adapter_caps_req {
+	struct gdma_req_hdr hdr;
+}; /*HW Data */
+
+struct mana_ib_query_adapter_caps_resp {
+	struct gdma_resp_hdr hdr;
+	u32 max_sq_id;
+	u32 max_rq_id;
+	u32 max_cq_id;
+	u32 max_qp_count;
+	u32 max_cq_count;
+	u32 max_mr_count;
+	u32 max_pd_count;
+	u32 max_inbound_read_limit;
+	u32 max_outbound_read_limit;
+	u32 mw_count;
+	u32 max_srq_count;
+	u32 max_requester_sq_size;
+	u32 max_responder_sq_size;
+	u32 max_requester_rq_size;
+	u32 max_responder_rq_size;
+	u32 max_send_wqe_size;
+	u32 max_recv_wqe_size;
+	u32 max_inline_data_size;
+}; /* HW Data */
+
 int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
 				 struct ib_umem *umem,
 				 mana_handle_t *gdma_region);
@@ -194,4 +243,6 @@  int mana_ib_create_adapter(struct mana_ib_dev *mib_dev);
 
 int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev);
 
+int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev);
+
 #endif