diff mbox series

[rdma-next,1/1] RDMA/mana_ib: Fix integer overflow during queue creation

Message ID 1741287713-13812-1-git-send-email-kotaranov@linux.microsoft.com (mailing list archive)
State New
Headers show
Series [rdma-next,1/1] RDMA/mana_ib: Fix integer overflow during queue creation | expand

Commit Message

Konstantin Taranov March 6, 2025, 7:01 p.m. UTC
From: Konstantin Taranov <kotaranov@microsoft.com>

Use size_t instead of u32 in helpers for queue creations
to detect overflow of queue size. The queue size cannot
exceed size of u32.

Fixes: bd4ee700870a ("RDMA/mana_ib: UD/GSI QP creation for kernel")
Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
---
 drivers/infiniband/hw/mana/cq.c      |  9 +++++----
 drivers/infiniband/hw/mana/main.c    | 15 +++++++++++++--
 drivers/infiniband/hw/mana/mana_ib.h |  4 ++--
 drivers/infiniband/hw/mana/qp.c      | 11 ++++++-----
 4 files changed, 26 insertions(+), 13 deletions(-)

Comments

Long Li March 6, 2025, 8:50 p.m. UTC | #1
> Subject: [PATCH rdma-next 1/1] RDMA/mana_ib: Fix integer overflow during
> queue creation
> 
> From: Konstantin Taranov <kotaranov@microsoft.com>
> 
> Use size_t instead of u32 in helpers for queue creations to detect overflow of
> queue size. The queue size cannot exceed size of u32.
> 
> Fixes: bd4ee700870a ("RDMA/mana_ib: UD/GSI QP creation for kernel")
> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
> ---
>  drivers/infiniband/hw/mana/cq.c      |  9 +++++----
>  drivers/infiniband/hw/mana/main.c    | 15 +++++++++++++--
>  drivers/infiniband/hw/mana/mana_ib.h |  4 ++--
>  drivers/infiniband/hw/mana/qp.c      | 11 ++++++-----
>  4 files changed, 26 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
> index 5c325ef..07b97da 100644
> --- a/drivers/infiniband/hw/mana/cq.c
> +++ b/drivers/infiniband/hw/mana/cq.c
> @@ -18,7 +18,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct
> ib_cq_init_attr *attr,
>  	struct gdma_context *gc;
>  	bool is_rnic_cq;
>  	u32 doorbell;
> -	u32 buf_size;
> +	size_t buf_size;
>  	int err;
> 
>  	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); @@ -45,7
> +45,8 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr
> *attr,
>  		}
> 
>  		cq->cqe = attr->cqe;
> -		err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe *
> COMP_ENTRY_SIZE,
> +		buf_size = (size_t)cq->cqe * COMP_ENTRY_SIZE;
> +		err = mana_ib_create_queue(mdev, ucmd.buf_addr, buf_size,
>  					   &cq->queue);
>  		if (err) {
>  			ibdev_dbg(ibdev, "Failed to create queue for create
> cq, %d\n", err); @@ -57,8 +58,8 @@ int mana_ib_create_cq(struct ib_cq *ibcq,
> const struct ib_cq_init_attr *attr,
>  		doorbell = mana_ucontext->doorbell;
>  	} else {
>  		is_rnic_cq = true;
> -		buf_size = MANA_PAGE_ALIGN(roundup_pow_of_two(attr->cqe
> * COMP_ENTRY_SIZE));
> -		cq->cqe = buf_size / COMP_ENTRY_SIZE;
> +		cq->cqe = attr->cqe;
> +		buf_size =
> MANA_PAGE_ALIGN(roundup_pow_of_two((size_t)attr->cqe *
> +COMP_ENTRY_SIZE));

Why not do a check like:
If (attr->cqe > U32_MAX/COMP_ENTRY_SIZE)
	return -EINVAL;

And you don't need to check them in mana_ib_create_kernel_queue() and mana_ib_create_queue().
Konstantin Taranov March 11, 2025, 10:05 a.m. UTC | #2
> > Subject: [PATCH rdma-next 1/1] RDMA/mana_ib: Fix integer overflow
> > during queue creation
> >
> > From: Konstantin Taranov <kotaranov@microsoft.com>
> >
> > Use size_t instead of u32 in helpers for queue creations to detect
> > overflow of queue size. The queue size cannot exceed size of u32.
> >
> > Fixes: bd4ee700870a ("RDMA/mana_ib: UD/GSI QP creation for kernel")
> > Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
> > ---
> >  drivers/infiniband/hw/mana/cq.c      |  9 +++++----
> >  drivers/infiniband/hw/mana/main.c    | 15 +++++++++++++--
> >  drivers/infiniband/hw/mana/mana_ib.h |  4 ++--
> >  drivers/infiniband/hw/mana/qp.c      | 11 ++++++-----
> >  4 files changed, 26 insertions(+), 13 deletions(-)
> >
> > diff --git a/drivers/infiniband/hw/mana/cq.c
> > b/drivers/infiniband/hw/mana/cq.c index 5c325ef..07b97da 100644
> > --- a/drivers/infiniband/hw/mana/cq.c
> > +++ b/drivers/infiniband/hw/mana/cq.c
> > @@ -18,7 +18,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const
> > struct ib_cq_init_attr *attr,
> >  	struct gdma_context *gc;
> >  	bool is_rnic_cq;
> >  	u32 doorbell;
> > -	u32 buf_size;
> > +	size_t buf_size;
> >  	int err;
> >
> >  	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); @@ -45,7
> > +45,8 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct
> > +ib_cq_init_attr
> > *attr,
> >  		}
> >
> >  		cq->cqe = attr->cqe;
> > -		err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe
> *
> > COMP_ENTRY_SIZE,
> > +		buf_size = (size_t)cq->cqe * COMP_ENTRY_SIZE;
> > +		err = mana_ib_create_queue(mdev, ucmd.buf_addr, buf_size,
> >  					   &cq->queue);
> >  		if (err) {
> >  			ibdev_dbg(ibdev, "Failed to create queue for create
> cq, %d\n",
> > err); @@ -57,8 +58,8 @@ int mana_ib_create_cq(struct ib_cq *ibcq,
> > const struct ib_cq_init_attr *attr,
> >  		doorbell = mana_ucontext->doorbell;
> >  	} else {
> >  		is_rnic_cq = true;
> > -		buf_size = MANA_PAGE_ALIGN(roundup_pow_of_two(attr-
> >cqe
> > * COMP_ENTRY_SIZE));
> > -		cq->cqe = buf_size / COMP_ENTRY_SIZE;
> > +		cq->cqe = attr->cqe;
> > +		buf_size =
> > MANA_PAGE_ALIGN(roundup_pow_of_two((size_t)attr->cqe *
> > +COMP_ENTRY_SIZE));
> 
> Why not do a check like:
> If (attr->cqe > U32_MAX/COMP_ENTRY_SIZE)
> 	return -EINVAL;
> 
> And you don’t need to check them in mana_ib_create_kernel_queue() and
> mana_ib_create_queue().
> 

Yes, I was initially thinking about the small fix as you proposed and then ended up
adding checks to all paths. As I see the same can happen if a user asks for a large WQ of RC.
I believe a kernel client can also cause this overflow. We plan to add kernel RC soon and,
as far as I understand, a kernel user can also ask to create a large CQ resulting in similar overflow.

- Konstantin
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 5c325ef..07b97da 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -18,7 +18,7 @@  int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
 	struct gdma_context *gc;
 	bool is_rnic_cq;
 	u32 doorbell;
-	u32 buf_size;
+	size_t buf_size;
 	int err;
 
 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
@@ -45,7 +45,8 @@  int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
 		}
 
 		cq->cqe = attr->cqe;
-		err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
+		buf_size = (size_t)cq->cqe * COMP_ENTRY_SIZE;
+		err = mana_ib_create_queue(mdev, ucmd.buf_addr, buf_size,
 					   &cq->queue);
 		if (err) {
 			ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
@@ -57,8 +58,8 @@  int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
 		doorbell = mana_ucontext->doorbell;
 	} else {
 		is_rnic_cq = true;
-		buf_size = MANA_PAGE_ALIGN(roundup_pow_of_two(attr->cqe * COMP_ENTRY_SIZE));
-		cq->cqe = buf_size / COMP_ENTRY_SIZE;
+		cq->cqe = attr->cqe;
+		buf_size = MANA_PAGE_ALIGN(roundup_pow_of_two((size_t)attr->cqe * COMP_ENTRY_SIZE));
 		err = mana_ib_create_kernel_queue(mdev, buf_size, GDMA_CQ, &cq->queue);
 		if (err) {
 			ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 091e6b2..cc9de4b 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -240,7 +240,7 @@  void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
 		ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
 }
 
-int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
+int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, size_t size, enum gdma_queue_type type,
 				struct mana_ib_queue *queue)
 {
 	struct gdma_context *gc = mdev_to_gc(mdev);
@@ -249,6 +249,12 @@  int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_qu
 
 	queue->id = INVALID_QUEUE_ID;
 	queue->gdma_region = GDMA_INVALID_DMA_REGION;
+
+	if (size > U32_MAX) {
+		ibdev_dbg(&mdev->ib_dev, "Queue size exceeding limit %zu\n", size);
+		return -EINVAL;
+	}
+
 	spec.type = type;
 	spec.monitor_avl_buf = false;
 	spec.queue_size = size;
@@ -261,7 +267,7 @@  int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_qu
 	return 0;
 }
 
-int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, size_t size,
 			 struct mana_ib_queue *queue)
 {
 	struct ib_umem *umem;
@@ -271,6 +277,11 @@  int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
 	queue->id = INVALID_QUEUE_ID;
 	queue->gdma_region = GDMA_INVALID_DMA_REGION;
 
+	if (size > U32_MAX) {
+		ibdev_dbg(&mdev->ib_dev, "Queue size exceeding limit %zu\n", size);
+		return -EINVAL;
+	}
+
 	umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
 	if (IS_ERR(umem)) {
 		err = PTR_ERR(umem);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index bd47b7f..282b0ae 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -589,9 +589,9 @@  int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
 				  mana_handle_t gdma_region);
 
-int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
+int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, size_t size, enum gdma_queue_type type,
 				struct mana_ib_queue *queue);
-int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, size_t size,
 			 struct mana_ib_queue *queue);
 void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
 
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index c92465d..36050e7 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -377,18 +377,18 @@  static u32 mana_ib_wqe_size(u32 sge, u32 oob_size)
 	return ALIGN(wqe_size, GDMA_WQE_BU_SIZE);
 }
 
-static u32 mana_ib_queue_size(struct ib_qp_init_attr *attr, u32 queue_type)
+static size_t mana_ib_queue_size(struct ib_qp_init_attr *attr, u32 queue_type)
 {
-	u32 queue_size;
+	size_t queue_size;
 
 	switch (attr->qp_type) {
 	case IB_QPT_UD:
 	case IB_QPT_GSI:
 		if (queue_type == MANA_UD_SEND_QUEUE)
-			queue_size = attr->cap.max_send_wr *
+			queue_size = (size_t)attr->cap.max_send_wr *
 				mana_ib_wqe_size(attr->cap.max_send_sge, INLINE_OOB_LARGE_SIZE);
 		else
-			queue_size = attr->cap.max_recv_wr *
+			queue_size = (size_t)attr->cap.max_recv_wr *
 				mana_ib_wqe_size(attr->cap.max_recv_sge, INLINE_OOB_SMALL_SIZE);
 		break;
 	default:
@@ -608,7 +608,8 @@  static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
 	struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
 	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
 	struct gdma_context *gc = mdev_to_gc(mdev);
-	u32 doorbell, queue_size;
+	size_t queue_size;
+	u32 doorbell;
 	int i, err;
 
 	if (udata) {