diff mbox series

[rdma-next,1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues

Message ID 1710336299-27344-2-git-send-email-kotaranov@linux.microsoft.com (mailing list archive)
State Superseded
Headers show
Series Define and use mana queues for CQs and WQs | expand

Commit Message

Konstantin Taranov March 13, 2024, 1:24 p.m. UTC
From: Konstantin Taranov <kotaranov@microsoft.com>

Intoduce helpers to work with mana ib queues (struct mana_ib_queue).
A queue always consists of umem, gdma_region, and id.
A queue can be used for a WQ or a CQ.

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
---
 drivers/infiniband/hw/mana/main.c    | 40 ++++++++++++++++++++++++++++
 drivers/infiniband/hw/mana/mana_ib.h | 10 +++++++
 2 files changed, 50 insertions(+)

Comments

Long Li March 15, 2024, 4:45 p.m. UTC | #1
> -----Original Message-----
> From: Konstantin Taranov <kotaranov@linux.microsoft.com>
> Sent: Wednesday, March 13, 2024 6:25 AM
> To: Konstantin Taranov <kotaranov@microsoft.com>;
> sharmaajay@microsoft.com; Long Li <longli@microsoft.com>; jgg@ziepe.ca;
> leon@kernel.org
> Cc: linux-rdma@vger.kernel.org; linux-kernel@vger.kernel.org
> Subject: [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and
> destroy mana queues
> 
> From: Konstantin Taranov <kotaranov@microsoft.com>
> 
> Intoduce helpers to work with mana ib queues (struct mana_ib_queue).
> A queue always consists of umem, gdma_region, and id.
> A queue can be used for a WQ or a CQ.
> 
> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>

Reviewed-by: Long Li <longli@microsoft.com>
Zhu Yanjun March 17, 2024, 6:42 a.m. UTC | #2
在 2024/3/13 14:24, Konstantin Taranov 写道:
> From: Konstantin Taranov <kotaranov@microsoft.com>
> 
> Intoduce helpers to work with mana ib queues (struct mana_ib_queue).
> A queue always consists of umem, gdma_region, and id.
> A queue can be used for a WQ or a CQ.
> 
> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
> ---
>   drivers/infiniband/hw/mana/main.c    | 40 ++++++++++++++++++++++++++++
>   drivers/infiniband/hw/mana/mana_ib.h | 10 +++++++
>   2 files changed, 50 insertions(+)
> 
> diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
> index 71e33feee..0ec940b97 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -237,6 +237,46 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
>   		ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
>   }
>   
> +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
> +			 struct mana_ib_queue *queue)
> +{
> +	struct ib_umem *umem;
> +	int err;
> +
> +	queue->umem = NULL;
> +	queue->id = INVALID_QUEUE_ID;
> +	queue->gdma_region = GDMA_INVALID_DMA_REGION;
> +
> +	umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
> +	if (IS_ERR(umem)) {
> +		err = PTR_ERR(umem);
> +		ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
> +		return err;
> +	}
> +
> +	err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
> +	if (err) {
> +		ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
> +		goto free_umem;
> +	}
> +	queue->umem = umem;
> +
> +	ibdev_dbg(&mdev->ib_dev,
> +		  "create_dma_region ret %d gdma_region 0x%llx\n",
> +		  err, queue->gdma_region);
> +
> +	return 0;
> +free_umem:
> +	ib_umem_release(umem);
> +	return err;
> +}
> +
> +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
> +{
> +	mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);

The function mana_ib_gd_destroy_dma_region will call 
mana_gd_destroy_dma_region. In the function mana_gd_destroy_dma_region, 
the function mana_gd_send_request will return the error -EPROTO.
The procedure is as below. So the function mana_ib_destroy_queue should 
also handle this error?

mana_ib_gd_destroy_dma_region --- > mana_gd_destroy_dma_region

  693 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 
dma_region_handle)
  694 {

...

  706         err = mana_gd_send_request(gc, sizeof(req), &req, 
sizeof(resp), &resp);
  707         if (err || resp.hdr.status) {
  708                 dev_err(gc->dev, "Failed to destroy DMA region: 
%d, 0x%x\n",
  709                         err, resp.hdr.status);
  710                 return -EPROTO;
  711         }

...

  714 }

Zhu Yanjun

> +	ib_umem_release(queue->umem);
> +}
> +
>   static int
>   mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
>   			    struct gdma_context *gc,
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
> index f83390eeb..859fd3bfc 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -45,6 +45,12 @@ struct mana_ib_adapter_caps {
>   	u32 max_inline_data_size;
>   };
>   
> +struct mana_ib_queue {
> +	struct ib_umem *umem;
> +	u64 gdma_region;
> +	u64 id;
> +};
> +
>   struct mana_ib_dev {
>   	struct ib_device ib_dev;
>   	struct gdma_dev *gdma_dev;
> @@ -169,6 +175,10 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
>   int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
>   				  mana_handle_t gdma_region);
>   
> +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
> +			 struct mana_ib_queue *queue);
> +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
> +
>   struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
>   				struct ib_wq_init_attr *init_attr,
>   				struct ib_udata *udata);
Konstantin Taranov March 18, 2024, 9:31 a.m. UTC | #3
> > From: Konstantin Taranov <kotaranov@microsoft.com>
> >
> > Intoduce helpers to work with mana ib queues (struct mana_ib_queue).
> > A queue always consists of umem, gdma_region, and id.
> > A queue can be used for a WQ or a CQ.
> >
> > Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
> > ---
> >   drivers/infiniband/hw/mana/main.c    | 40
> ++++++++++++++++++++++++++++
> >   drivers/infiniband/hw/mana/mana_ib.h | 10 +++++++
> >   2 files changed, 50 insertions(+)
> >
> > diff --git a/drivers/infiniband/hw/mana/main.c
> > b/drivers/infiniband/hw/mana/main.c
> > index 71e33feee..0ec940b97 100644
> > --- a/drivers/infiniband/hw/mana/main.c
> > +++ b/drivers/infiniband/hw/mana/main.c
> > @@ -237,6 +237,46 @@ void mana_ib_dealloc_ucontext(struct
> ib_ucontext *ibcontext)
> >               ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
> >   }
> >
> > +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
> > +                      struct mana_ib_queue *queue) {
> > +     struct ib_umem *umem;
> > +     int err;
> > +
> > +     queue->umem = NULL;
> > +     queue->id = INVALID_QUEUE_ID;
> > +     queue->gdma_region = GDMA_INVALID_DMA_REGION;
> > +
> > +     umem = ib_umem_get(&mdev->ib_dev, addr, size,
> IB_ACCESS_LOCAL_WRITE);
> > +     if (IS_ERR(umem)) {
> > +             err = PTR_ERR(umem);
> > +             ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
> > +             return err;
> > +     }
> > +
> > +     err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue-
> >gdma_region);
> > +     if (err) {
> > +             ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n",
> err);
> > +             goto free_umem;
> > +     }
> > +     queue->umem = umem;
> > +
> > +     ibdev_dbg(&mdev->ib_dev,
> > +               "create_dma_region ret %d gdma_region 0x%llx\n",
> > +               err, queue->gdma_region);
> > +
> > +     return 0;
> > +free_umem:
> > +     ib_umem_release(umem);
> > +     return err;
> > +}
> > +
> > +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct
> > +mana_ib_queue *queue) {
> > +     mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
> 
> The function mana_ib_gd_destroy_dma_region will call
> mana_gd_destroy_dma_region. In the function
> mana_gd_destroy_dma_region, the function mana_gd_send_request will
> return the error -EPROTO.
> The procedure is as below. So the function mana_ib_destroy_queue should
> also handle this error?

Thanks for the comment!
This error can be ignored and it was ignored before this commit.
I checked the corresponding Windows driver code, and it is also intentionally ignored there.
I can add a comment that the error is ignored intentionally if you want. 

> 
> mana_ib_gd_destroy_dma_region --- > mana_gd_destroy_dma_region
> 
>   693 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64
> dma_region_handle)
>   694 {
> 
> ...
> 
>   706         err = mana_gd_send_request(gc, sizeof(req), &req,
> sizeof(resp), &resp);
>   707         if (err || resp.hdr.status) {
>   708                 dev_err(gc->dev, "Failed to destroy DMA region:
> %d, 0x%x\n",
>   709                         err, resp.hdr.status);
>   710                 return -EPROTO;
>   711         }
> 
> ...
> 
>   714 }
> 
> Zhu Yanjun
> 
> > +     ib_umem_release(queue->umem);
> > +}
> > +
> >   static int
> >   mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
> >                           struct gdma_context *gc, diff --git
> > a/drivers/infiniband/hw/mana/mana_ib.h
> > b/drivers/infiniband/hw/mana/mana_ib.h
> > index f83390eeb..859fd3bfc 100644
> > --- a/drivers/infiniband/hw/mana/mana_ib.h
> > +++ b/drivers/infiniband/hw/mana/mana_ib.h
> > @@ -45,6 +45,12 @@ struct mana_ib_adapter_caps {
> >       u32 max_inline_data_size;
> >   };
> >
> > +struct mana_ib_queue {
> > +     struct ib_umem *umem;
> > +     u64 gdma_region;
> > +     u64 id;
> > +};
> > +
> >   struct mana_ib_dev {
> >       struct ib_device ib_dev;
> >       struct gdma_dev *gdma_dev;
> > @@ -169,6 +175,10 @@ int mana_ib_create_dma_region(struct
> mana_ib_dev *dev, struct ib_umem *umem,
> >   int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
> >                                 mana_handle_t gdma_region);
> >
> > +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
> > +                      struct mana_ib_queue *queue); void
> > +mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct
> mana_ib_queue
> > +*queue);
> > +
> >   struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
> >                               struct ib_wq_init_attr *init_attr,
> >                               struct ib_udata *udata);
Zhu Yanjun March 18, 2024, 1:56 p.m. UTC | #4
On 18.03.24 10:31, Konstantin Taranov wrote:
>>> From: Konstantin Taranov <kotaranov@microsoft.com>
>>>
>>> Intoduce helpers to work with mana ib queues (struct mana_ib_queue).
>>> A queue always consists of umem, gdma_region, and id.
>>> A queue can be used for a WQ or a CQ.
>>>
>>> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
>>> ---
>>>    drivers/infiniband/hw/mana/main.c    | 40
>> ++++++++++++++++++++++++++++
>>>    drivers/infiniband/hw/mana/mana_ib.h | 10 +++++++
>>>    2 files changed, 50 insertions(+)
>>>
>>> diff --git a/drivers/infiniband/hw/mana/main.c
>>> b/drivers/infiniband/hw/mana/main.c
>>> index 71e33feee..0ec940b97 100644
>>> --- a/drivers/infiniband/hw/mana/main.c
>>> +++ b/drivers/infiniband/hw/mana/main.c
>>> @@ -237,6 +237,46 @@ void mana_ib_dealloc_ucontext(struct
>> ib_ucontext *ibcontext)
>>>                ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
>>>    }
>>>
>>> +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
>>> +                      struct mana_ib_queue *queue) {
>>> +     struct ib_umem *umem;
>>> +     int err;
>>> +
>>> +     queue->umem = NULL;
>>> +     queue->id = INVALID_QUEUE_ID;
>>> +     queue->gdma_region = GDMA_INVALID_DMA_REGION;
>>> +
>>> +     umem = ib_umem_get(&mdev->ib_dev, addr, size,
>> IB_ACCESS_LOCAL_WRITE);
>>> +     if (IS_ERR(umem)) {
>>> +             err = PTR_ERR(umem);
>>> +             ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
>>> +             return err;
>>> +     }
>>> +
>>> +     err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue-
>>> gdma_region);
>>> +     if (err) {
>>> +             ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n",
>> err);
>>> +             goto free_umem;
>>> +     }
>>> +     queue->umem = umem;
>>> +
>>> +     ibdev_dbg(&mdev->ib_dev,
>>> +               "create_dma_region ret %d gdma_region 0x%llx\n",
>>> +               err, queue->gdma_region);
>>> +
>>> +     return 0;
>>> +free_umem:
>>> +     ib_umem_release(umem);
>>> +     return err;
>>> +}
>>> +
>>> +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct
>>> +mana_ib_queue *queue) {
>>> +     mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
>> The function mana_ib_gd_destroy_dma_region will call
>> mana_gd_destroy_dma_region. In the function
>> mana_gd_destroy_dma_region, the function mana_gd_send_request will
>> return the error -EPROTO.
>> The procedure is as below. So the function mana_ib_destroy_queue should
>> also handle this error?
> Thanks for the comment!
> This error can be ignored and it was ignored before this commit.
> I checked the corresponding Windows driver code, and it is also intentionally ignored there.
> I can add a comment that the error is ignored intentionally if you want.

Sure. Thanks a lot.

Zhu Yanjun

>
>> mana_ib_gd_destroy_dma_region --- > mana_gd_destroy_dma_region
>>
>>    693 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64
>> dma_region_handle)
>>    694 {
>>
>> ...
>>
>>    706         err = mana_gd_send_request(gc, sizeof(req), &req,
>> sizeof(resp), &resp);
>>    707         if (err || resp.hdr.status) {
>>    708                 dev_err(gc->dev, "Failed to destroy DMA region:
>> %d, 0x%x\n",
>>    709                         err, resp.hdr.status);
>>    710                 return -EPROTO;
>>    711         }
>>
>> ...
>>
>>    714 }
>>
>> Zhu Yanjun
>>
>>> +     ib_umem_release(queue->umem);
>>> +}
>>> +
>>>    static int
>>>    mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
>>>                            struct gdma_context *gc, diff --git
>>> a/drivers/infiniband/hw/mana/mana_ib.h
>>> b/drivers/infiniband/hw/mana/mana_ib.h
>>> index f83390eeb..859fd3bfc 100644
>>> --- a/drivers/infiniband/hw/mana/mana_ib.h
>>> +++ b/drivers/infiniband/hw/mana/mana_ib.h
>>> @@ -45,6 +45,12 @@ struct mana_ib_adapter_caps {
>>>        u32 max_inline_data_size;
>>>    };
>>>
>>> +struct mana_ib_queue {
>>> +     struct ib_umem *umem;
>>> +     u64 gdma_region;
>>> +     u64 id;
>>> +};
>>> +
>>>    struct mana_ib_dev {
>>>        struct ib_device ib_dev;
>>>        struct gdma_dev *gdma_dev;
>>> @@ -169,6 +175,10 @@ int mana_ib_create_dma_region(struct
>> mana_ib_dev *dev, struct ib_umem *umem,
>>>    int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
>>>                                  mana_handle_t gdma_region);
>>>
>>> +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
>>> +                      struct mana_ib_queue *queue); void
>>> +mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct
>> mana_ib_queue
>>> +*queue);
>>> +
>>>    struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
>>>                                struct ib_wq_init_attr *init_attr,
>>>                                struct ib_udata *udata);
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 71e33feee..0ec940b97 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -237,6 +237,46 @@  void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
 		ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
 }
 
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+			 struct mana_ib_queue *queue)
+{
+	struct ib_umem *umem;
+	int err;
+
+	queue->umem = NULL;
+	queue->id = INVALID_QUEUE_ID;
+	queue->gdma_region = GDMA_INVALID_DMA_REGION;
+
+	umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
+	if (IS_ERR(umem)) {
+		err = PTR_ERR(umem);
+		ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
+		return err;
+	}
+
+	err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
+	if (err) {
+		ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
+		goto free_umem;
+	}
+	queue->umem = umem;
+
+	ibdev_dbg(&mdev->ib_dev,
+		  "create_dma_region ret %d gdma_region 0x%llx\n",
+		  err, queue->gdma_region);
+
+	return 0;
+free_umem:
+	ib_umem_release(umem);
+	return err;
+}
+
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
+{
+	mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
+	ib_umem_release(queue->umem);
+}
+
 static int
 mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
 			    struct gdma_context *gc,
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index f83390eeb..859fd3bfc 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -45,6 +45,12 @@  struct mana_ib_adapter_caps {
 	u32 max_inline_data_size;
 };
 
+struct mana_ib_queue {
+	struct ib_umem *umem;
+	u64 gdma_region;
+	u64 id;
+};
+
 struct mana_ib_dev {
 	struct ib_device ib_dev;
 	struct gdma_dev *gdma_dev;
@@ -169,6 +175,10 @@  int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
 				  mana_handle_t gdma_region);
 
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+			 struct mana_ib_queue *queue);
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
+
 struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
 				struct ib_wq_init_attr *init_attr,
 				struct ib_udata *udata);