Message ID | 20210622175232.439-2-tatyana.e.nikolova@intel.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | irdma coverity fixes | expand |
On Tue, Jun 22, 2021 at 12:52:30PM -0500, Tatyana Nikolova wrote: > From: Shiraz Saleem <shiraz.saleem@intel.com> > > The contents of user-space req object is used in array indexing > in irdma_handle_q_mem without checking for valid values. > > Guard against bad input on each of these req object pages by > limiting them to number of pages that make up the region. > > Reported-by: coverity-bot <keescook+coverity-bot@chromium.org> > Addresses-Coverity-ID: 1505160 ("TAINTED_SCALAR") > Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs") > Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com> > Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com> > drivers/infiniband/hw/irdma/verbs.c | 18 ++++++++++++++---- > 1 file changed, 14 insertions(+), 4 deletions(-) > > diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c > index e8b170f0d997..8bd31656a83a 100644 > +++ b/drivers/infiniband/hw/irdma/verbs.c > @@ -2360,10 +2360,8 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, > u64 *arr = iwmr->pgaddrmem; > u32 pg_size; > int err = 0; > - int total; > bool ret = true; > > - total = req->sq_pages + req->rq_pages + req->cq_pages; > pg_size = iwmr->page_size; > err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles); > if (err) > @@ -2381,7 +2379,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, > switch (iwmr->type) { > case IRDMA_MEMREG_TYPE_QP: > hmc_p = &qpmr->sq_pbl; > - qpmr->shadow = (dma_addr_t)arr[total]; > + qpmr->shadow = (dma_addr_t)arr[req->sq_pages + req->rq_pages]; > > if (use_pbles) { > ret = irdma_check_mem_contiguous(arr, req->sq_pages, > @@ -2406,7 +2404,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, > hmc_p = &cqmr->cq_pbl; > > if (!cqmr->split) > - cqmr->shadow = (dma_addr_t)arr[total]; > + cqmr->shadow = (dma_addr_t)arr[req->cq_pages]; > > if (use_pbles) > ret = irdma_check_mem_contiguous(arr, req->cq_pages, > @@ -2748,6 +2746,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, > struct ib_umem *region; > struct irdma_mem_reg_req req; > u32 stag = 0; > + u8 shadow_pgcnt = 1; > bool use_pbles = false; > unsigned long flags; > int err = -EINVAL; > @@ -2795,6 +2794,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, > > switch (req.reg_type) { > case IRDMA_MEMREG_TYPE_QP: > + if (req.sq_pages + req.rq_pages + shadow_pgcnt > iwmr->page_cnt) { Math on values from userspace should use the check overflow helpers or otherwise be designed to be overflow safe Jason
> -----Original Message----- > From: Jason Gunthorpe <jgg@nvidia.com> > Sent: Tuesday, June 22, 2021 12:59 PM > To: Nikolova, Tatyana E <tatyana.e.nikolova@intel.com> > Cc: dledford@redhat.com; linux-rdma@vger.kernel.org; Saleem, Shiraz > <shiraz.saleem@intel.com>; Ismail, Mustafa <mustafa.ismail@intel.com>; > coverity-bot <keescook+coverity-bot@chromium.org> > Subject: Re: [PATCH rdma-next 1/3] RDMA/irdma: Check contents of user- > space irdma_mem_reg_req object > > On Tue, Jun 22, 2021 at 12:52:30PM -0500, Tatyana Nikolova wrote: > > From: Shiraz Saleem <shiraz.saleem@intel.com> > > > > The contents of user-space req object is used in array indexing in > > irdma_handle_q_mem without checking for valid values. > > > > Guard against bad input on each of these req object pages by limiting > > them to number of pages that make up the region. > > > > Reported-by: coverity-bot <keescook+coverity-bot@chromium.org> > > Addresses-Coverity-ID: 1505160 ("TAINTED_SCALAR") > > Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb > > APIs") > > Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com> > > Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com> > > drivers/infiniband/hw/irdma/verbs.c | 18 ++++++++++++++---- > > 1 file changed, 14 insertions(+), 4 deletions(-) > > > > diff --git a/drivers/infiniband/hw/irdma/verbs.c > > b/drivers/infiniband/hw/irdma/verbs.c > > index e8b170f0d997..8bd31656a83a 100644 > > +++ b/drivers/infiniband/hw/irdma/verbs.c > > @@ -2360,10 +2360,8 @@ static int irdma_handle_q_mem(struct > irdma_device *iwdev, > > u64 *arr = iwmr->pgaddrmem; > > u32 pg_size; > > int err = 0; > > - int total; > > bool ret = true; > > > > - total = req->sq_pages + req->rq_pages + req->cq_pages; > > pg_size = iwmr->page_size; > > err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles); > > if (err) > > @@ -2381,7 +2379,7 @@ static int irdma_handle_q_mem(struct > irdma_device *iwdev, > > switch (iwmr->type) { > > case IRDMA_MEMREG_TYPE_QP: > > hmc_p = &qpmr->sq_pbl; > > - qpmr->shadow = (dma_addr_t)arr[total]; > > + qpmr->shadow = (dma_addr_t)arr[req->sq_pages + req- > >rq_pages]; > > > > if (use_pbles) { > > ret = irdma_check_mem_contiguous(arr, req- > >sq_pages, @@ -2406,7 > > +2404,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, > > hmc_p = &cqmr->cq_pbl; > > > > if (!cqmr->split) > > - cqmr->shadow = (dma_addr_t)arr[total]; > > + cqmr->shadow = (dma_addr_t)arr[req->cq_pages]; > > > > if (use_pbles) > > ret = irdma_check_mem_contiguous(arr, req- > >cq_pages, @@ -2748,6 > > +2746,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 > start, u64 len, > > struct ib_umem *region; > > struct irdma_mem_reg_req req; > > u32 stag = 0; > > + u8 shadow_pgcnt = 1; > > bool use_pbles = false; > > unsigned long flags; > > int err = -EINVAL; > > @@ -2795,6 +2794,10 @@ static struct ib_mr *irdma_reg_user_mr(struct > > ib_pd *pd, u64 start, u64 len, > > > > switch (req.reg_type) { > > case IRDMA_MEMREG_TYPE_QP: > > + if (req.sq_pages + req.rq_pages + shadow_pgcnt > iwmr- > >page_cnt) { > > Math on values from userspace should use the check overflow helpers or > otherwise be designed to be overflow safe > Hi Jason, The mem_reg_req fields sq_pages and rq_pages are u16 and the variable shadow_pgcnt is u8. They should be promoted to u32 when compared with iwmr->page_cnt which is u32. Isn't this overflow safe? Is the issue you are mentioning about this line: > > + qpmr->shadow = (dma_addr_t)arr[req->sq_pages + req- > >rq_pages]; Thank you, Tatyana
On Tue, Jun 22, 2021 at 09:56:42PM +0000, Nikolova, Tatyana E wrote: > > > switch (req.reg_type) { > > > case IRDMA_MEMREG_TYPE_QP: > > > + if (req.sq_pages + req.rq_pages + shadow_pgcnt > iwmr- > > >page_cnt) { > > > > Math on values from userspace should use the check overflow helpers or > > otherwise be designed to be overflow safe > > The mem_reg_req fields sq_pages and rq_pages are u16 and the > variable shadow_pgcnt is u8. They should be promoted to u32 when > compared with iwmr->page_cnt which is u32. Isn't this overflow safe? I didn't check the sizes carefully, and I'm always nervous about relying on implicit promotion for security properties as it is so subtle and easy to get screwed up during maintenance > Is the issue you are mentioning about this line: > > > + qpmr->shadow = (dma_addr_t)arr[req->sq_pages + req->rq_pages]; I assume this is safe because of the if above? Jason
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index e8b170f0d997..8bd31656a83a 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -2360,10 +2360,8 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, u64 *arr = iwmr->pgaddrmem; u32 pg_size; int err = 0; - int total; bool ret = true; - total = req->sq_pages + req->rq_pages + req->cq_pages; pg_size = iwmr->page_size; err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles); if (err) @@ -2381,7 +2379,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, switch (iwmr->type) { case IRDMA_MEMREG_TYPE_QP: hmc_p = &qpmr->sq_pbl; - qpmr->shadow = (dma_addr_t)arr[total]; + qpmr->shadow = (dma_addr_t)arr[req->sq_pages + req->rq_pages]; if (use_pbles) { ret = irdma_check_mem_contiguous(arr, req->sq_pages, @@ -2406,7 +2404,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, hmc_p = &cqmr->cq_pbl; if (!cqmr->split) - cqmr->shadow = (dma_addr_t)arr[total]; + cqmr->shadow = (dma_addr_t)arr[req->cq_pages]; if (use_pbles) ret = irdma_check_mem_contiguous(arr, req->cq_pages, @@ -2748,6 +2746,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, struct ib_umem *region; struct irdma_mem_reg_req req; u32 stag = 0; + u8 shadow_pgcnt = 1; bool use_pbles = false; unsigned long flags; int err = -EINVAL; @@ -2795,6 +2794,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, switch (req.reg_type) { case IRDMA_MEMREG_TYPE_QP: + if (req.sq_pages + req.rq_pages + shadow_pgcnt > iwmr->page_cnt) { + err = -EINVAL; + goto error; + } use_pbles = ((req.sq_pages + req.rq_pages) > 2); err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); if (err) @@ -2808,6 +2811,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); break; case IRDMA_MEMREG_TYPE_CQ: + if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE) + shadow_pgcnt = 0; + if (req.cq_pages + shadow_pgcnt > iwmr->page_cnt) { + err = -EINVAL; + goto error; + } + use_pbles = (req.cq_pages > 1); err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); if (err)