Message ID | 1566393276-42555-3-git-send-email-oulijun@huawei.com (mailing list archive) |
---|---|
State | Changes Requested |
Headers | show |
Series | Fixes for hip08 driver | expand |
On Wed, 2019-08-21 at 21:14 +0800, Lijun Ou wrote: > +static int hns_roce_alloc_recv_inline_buffer(struct hns_roce_qp > *hr_qp, > + struct ib_qp_init_attr > *init_attr) > +{ > + int ret; > + int i; > + > + /* allocate recv inline buf */ > + hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, > + sizeof(struct > hns_roce_rinl_wqe), > + GFP_KERNEL); > + if (!hr_qp->rq_inl_buf.wqe_list) { > + ret = -ENOMEM; > + goto err; > + } > + > + hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt; > + > + /* Firstly, allocate a list of sge space buffer */ > + hr_qp->rq_inl_buf.wqe_list[0].sg_list = > + kcalloc(hr_qp- > >rq_inl_buf.wqe_cnt, > + init_attr->cap.max_recv_sge * > + sizeof(struct > hns_roce_rinl_sge), > + GFP_KERNEL); > + if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) { > + ret = -ENOMEM; > + goto err_wqe_list; > + } > + > + for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++) > + /* Secondly, reallocate the buffer */ > + hr_qp->rq_inl_buf.wqe_list[i].sg_list = > + &hr_qp- > >rq_inl_buf.wqe_list[0].sg_list[i * > + init_attr->cap.max_recv_sge]; > + > + return 0; > + > +err_wqe_list: > + kfree(hr_qp->rq_inl_buf.wqe_list); > + > +err: > + return ret; > +} This function is klunky. You don't need int ret; at all as there are only two possible return values and you have distinct locations for each return, so each return can use a constant. It would be much more readable like this: +static int hns_roce_alloc_recv_inline_buffer(struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr) +{ + int num_sge = init_attr->cap.max_recv_sge; + int wqe_cnt = hr_qp->rq.wqe_cnt; + int i; + + /* allocate recv inline WQE bufs */ + hr_qp->rq_inl_buf.wqe_list = kcalloc(wqe_cnt, + sizeof(struct hns_roce_rinl_wqe), + GFP_KERNEL); + if (!hr_qp->rq_inl_buf.wqe_list) + goto err; + + hr_qp->rq_inl_buf.wqe_cnt = wqe_cnt; + + /* allocate a single sge array for all WQEs */ + hr_qp->rq_inl_buf.wqe_list[0].sg_list = + kcalloc(wqe_cnt, + num_sge * + sizeof(struct hns_roce_rinl_sge), + GFP_KERNEL); + if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) + goto err_wqe_list; + + for (i = 1; i < wqe_cnt; i++) + /* give each WQE a pointer to its array space */ + hr_qp->rq_inl_buf.wqe_list[i].sg_list = + &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i * num_sge]; + + return 0; + +err_wqe_list: + kfree(hr_qp->rq_inl_buf.wqe_list); +err: + return -ENOMEM; +}
在 2019/8/28 23:19, Doug Ledford 写道: > On Wed, 2019-08-21 at 21:14 +0800, Lijun Ou wrote: >> +static int hns_roce_alloc_recv_inline_buffer(struct hns_roce_qp >> *hr_qp, >> + struct ib_qp_init_attr >> *init_attr) >> +{ >> + int ret; >> + int i; >> + >> + /* allocate recv inline buf */ >> + hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, >> + sizeof(struct >> hns_roce_rinl_wqe), >> + GFP_KERNEL); >> + if (!hr_qp->rq_inl_buf.wqe_list) { >> + ret = -ENOMEM; >> + goto err; >> + } >> + >> + hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt; >> + >> + /* Firstly, allocate a list of sge space buffer */ >> + hr_qp->rq_inl_buf.wqe_list[0].sg_list = >> + kcalloc(hr_qp- >>> rq_inl_buf.wqe_cnt, >> + init_attr->cap.max_recv_sge * >> + sizeof(struct >> hns_roce_rinl_sge), >> + GFP_KERNEL); >> + if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) { >> + ret = -ENOMEM; >> + goto err_wqe_list; >> + } >> + >> + for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++) >> + /* Secondly, reallocate the buffer */ >> + hr_qp->rq_inl_buf.wqe_list[i].sg_list = >> + &hr_qp- >>> rq_inl_buf.wqe_list[0].sg_list[i * >> + init_attr->cap.max_recv_sge]; >> + >> + return 0; >> + >> +err_wqe_list: >> + kfree(hr_qp->rq_inl_buf.wqe_list); >> + >> +err: >> + return ret; >> +} > This function is klunky. You don't need int ret; at all as there are > only two possible return values and you have distinct locations for each > return, so each return can use a constant. It would be much more > readable like this: > > +static int hns_roce_alloc_recv_inline_buffer(struct hns_roce_qp *hr_qp, > + struct ib_qp_init_attr *init_attr) > +{ > + int num_sge = init_attr->cap.max_recv_sge; > + int wqe_cnt = hr_qp->rq.wqe_cnt; > + int i; > + > + /* allocate recv inline WQE bufs */ > + hr_qp->rq_inl_buf.wqe_list = kcalloc(wqe_cnt, > + sizeof(struct hns_roce_rinl_wqe), > + GFP_KERNEL); > + if (!hr_qp->rq_inl_buf.wqe_list) > + goto err; > + > + hr_qp->rq_inl_buf.wqe_cnt = wqe_cnt; > + > + /* allocate a single sge array for all WQEs */ > + hr_qp->rq_inl_buf.wqe_list[0].sg_list = > + kcalloc(wqe_cnt, > + num_sge * > + sizeof(struct hns_roce_rinl_sge), > + GFP_KERNEL); > + if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) > + goto err_wqe_list; > + > + for (i = 1; i < wqe_cnt; i++) > + /* give each WQE a pointer to its array space */ > + hr_qp->rq_inl_buf.wqe_list[i].sg_list = > + &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i * num_sge]; > + > + return 0; > + > +err_wqe_list: > + kfree(hr_qp->rq_inl_buf.wqe_list); > +err: > + return -ENOMEM; > +} > Thanks, I will consider accept your advice and fixes it.
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index ec6b5dd..7e10820 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -635,6 +635,55 @@ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) return 1; } +static int hns_roce_alloc_recv_inline_buffer(struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr) +{ + int ret; + int i; + + /* allocate recv inline buf */ + hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, + sizeof(struct hns_roce_rinl_wqe), + GFP_KERNEL); + if (!hr_qp->rq_inl_buf.wqe_list) { + ret = -ENOMEM; + goto err; + } + + hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt; + + /* Firstly, allocate a list of sge space buffer */ + hr_qp->rq_inl_buf.wqe_list[0].sg_list = + kcalloc(hr_qp->rq_inl_buf.wqe_cnt, + init_attr->cap.max_recv_sge * + sizeof(struct hns_roce_rinl_sge), + GFP_KERNEL); + if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) { + ret = -ENOMEM; + goto err_wqe_list; + } + + for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++) + /* Secondly, reallocate the buffer */ + hr_qp->rq_inl_buf.wqe_list[i].sg_list = + &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i * + init_attr->cap.max_recv_sge]; + + return 0; + +err_wqe_list: + kfree(hr_qp->rq_inl_buf.wqe_list); + +err: + return ret; +} + +static void hns_roce_free_recv_inline_buffer(struct hns_roce_qp *hr_qp) +{ + kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); + kfree(hr_qp->rq_inl_buf.wqe_list); +} + static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, @@ -676,33 +725,11 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && hns_roce_qp_has_rq(init_attr)) { - /* allocate recv inline buf */ - hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, - sizeof(struct hns_roce_rinl_wqe), - GFP_KERNEL); - if (!hr_qp->rq_inl_buf.wqe_list) { - ret = -ENOMEM; + ret = hns_roce_alloc_recv_inline_buffer(hr_qp, init_attr); + if (ret) { + dev_err(dev, "allocate receive inline buffer failed\n"); goto err_out; } - - hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt; - - /* Firstly, allocate a list of sge space buffer */ - hr_qp->rq_inl_buf.wqe_list[0].sg_list = - kcalloc(hr_qp->rq_inl_buf.wqe_cnt, - init_attr->cap.max_recv_sge * - sizeof(struct hns_roce_rinl_sge), - GFP_KERNEL); - if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) { - ret = -ENOMEM; - goto err_wqe_list; - } - - for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++) - /* Secondly, reallocate the buffer */ - hr_qp->rq_inl_buf.wqe_list[i].sg_list = - &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i * - init_attr->cap.max_recv_sge]; } page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; @@ -710,14 +737,14 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { dev_err(dev, "ib_copy_from_udata error for create qp\n"); ret = -EFAULT; - goto err_rq_sge_list; + goto err_alloc_recv_inline_buffer; } ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, &ucmd); if (ret) { dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); - goto err_rq_sge_list; + goto err_alloc_recv_inline_buffer; } hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr, @@ -725,7 +752,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if (IS_ERR(hr_qp->umem)) { dev_err(dev, "ib_umem_get error for create qp\n"); ret = PTR_ERR(hr_qp->umem); - goto err_rq_sge_list; + goto err_alloc_recv_inline_buffer; } hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp, hr_qp->regions, ARRAY_SIZE(hr_qp->regions), @@ -786,13 +813,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { dev_err(dev, "init_attr->create_flags error!\n"); ret = -EINVAL; - goto err_rq_sge_list; + goto err_alloc_recv_inline_buffer; } if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { dev_err(dev, "init_attr->create_flags error!\n"); ret = -EINVAL; - goto err_rq_sge_list; + goto err_alloc_recv_inline_buffer; } /* Set SQ size */ @@ -800,7 +827,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp); if (ret) { dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); - goto err_rq_sge_list; + goto err_alloc_recv_inline_buffer; } /* QP doorbell register address */ @@ -814,7 +841,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); if (ret) { dev_err(dev, "rq record doorbell alloc failed!\n"); - goto err_rq_sge_list; + goto err_alloc_recv_inline_buffer; } *hr_qp->rdb.db_record = 0; hr_qp->rdb_en = 1; @@ -980,15 +1007,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) hns_roce_free_db(hr_dev, &hr_qp->rdb); -err_rq_sge_list: - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && - hns_roce_qp_has_rq(init_attr)) - kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); - -err_wqe_list: +err_alloc_recv_inline_buffer: if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && hns_roce_qp_has_rq(init_attr)) - kfree(hr_qp->rq_inl_buf.wqe_list); + hns_roce_free_recv_inline_buffer(hr_qp); err_out: return ret;
Here packages the codes of allocating receive rq inline buffer in hns_roce_create_qp_common function in order to reduce the complexity. Signed-off-by: Lijun Ou <oulijun@huawei.com> --- drivers/infiniband/hw/hns/hns_roce_qp.c | 100 +++++++++++++++++++------------- 1 file changed, 61 insertions(+), 39 deletions(-)