Message ID | 20230109195402.1339737-3-yanjun.zhu@intel.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | RDMA/irdma: Refactor irdma_reg_user_mr function | expand |
> Subject: [PATCH for-next 2/4] RDMA/irdma: Split mr alloc and free into new > functions > > From: Zhu Yanjun <yanjun.zhu@linux.dev> > > In the function irdma_reg_user_mr, the mr allocation and free will be used by other > functions. As such, the source codes related with mr allocation and free are split > into the new functions. > > Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev> > --- > drivers/infiniband/hw/irdma/verbs.c | 78 ++++++++++++++++++----------- > 1 file changed, 50 insertions(+), 28 deletions(-) > > diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c > index 40109da6489a..5cff8656d79e 100644 > --- a/drivers/infiniband/hw/irdma/verbs.c > +++ b/drivers/infiniband/hw/irdma/verbs.c > @@ -2794,6 +2794,52 @@ static int irdma_reg_user_mr_type_mem(struct > irdma_device *iwdev, > return err; > } > > +static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region, > + struct ib_pd *pd, u64 virt, > + __u16 reg_type, enum irdma_memreg_type > + struct irdma_device *iwdev) > +{ > + struct irdma_mr *iwmr; > + struct irdma_pbl *iwpbl; > + > + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); > + if (!iwmr) > + return ERR_PTR(-ENOMEM); > + > + iwpbl = &iwmr->iwpbl; > + iwpbl->iwmr = iwmr; > + iwmr->region = region; > + iwmr->ibmr.pd = pd; > + iwmr->ibmr.device = pd->device; > + iwmr->ibmr.iova = virt; > + iwmr->page_size = PAGE_SIZE; Delete this and see comment below, > + iwmr->type = reg_type; > + > + if (reg_type == IRDMA_MEMREG_TYPE_MEM) { > + iwmr->page_size = ib_umem_find_best_pgsz(region, > + iwdev->rf- > >sc_dev.hw_attrs.page_size_cap, I think Jason made the comment to always validate the page size with this function before use in rdma_umem_for_each_dma_block. we can move it out of this if block with something like, pgsz_bitmask = reg_type == IRDMA_MEMREG_TYPE_MEM ? iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE; iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmask, virt); > + virt); > + if (unlikely(!iwmr->page_size)) { > + kfree(iwmr); > + return ERR_PTR(-EOPNOTSUPP); > + } > + } > + > + iwmr->len = region->length; > + iwpbl->user_base = virt; > + iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); > + > + return iwmr; > +} > + > +/* > + * This function frees the resources from irdma_alloc_iwmr */ static This doesn't follow kdoc format? And not very useful. I would delete it. > +void irdma_free_iwmr(struct irdma_mr *iwmr) { > + kfree(iwmr); > +} > + > /** > * irdma_reg_user_mr - Register a user memory region > * @pd: ptr of pd > @@ -2839,34 +2885,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd > *pd, u64 start, u64 len, > return ERR_PTR(-EFAULT); > } > > - iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); > - if (!iwmr) { > + iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type, iwdev); > + if (IS_ERR(iwmr)) { > ib_umem_release(region); > - return ERR_PTR(-ENOMEM); > + return (struct ib_mr *)iwmr; > } > > iwpbl = &iwmr->iwpbl; > - iwpbl->iwmr = iwmr; > - iwmr->region = region; > - iwmr->ibmr.pd = pd; > - iwmr->ibmr.device = pd->device; > - iwmr->ibmr.iova = virt; > - iwmr->page_size = PAGE_SIZE; > - > - if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) { > - iwmr->page_size = ib_umem_find_best_pgsz(region, > - iwdev->rf- > >sc_dev.hw_attrs.page_size_cap, > - virt); > - if (unlikely(!iwmr->page_size)) { > - kfree(iwmr); > - ib_umem_release(region); > - return ERR_PTR(-EOPNOTSUPP); > - } > - } > - iwmr->len = region->length; > - iwpbl->user_base = virt; > - iwmr->type = req.reg_type; > - iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); > > switch (req.reg_type) { > case IRDMA_MEMREG_TYPE_QP: > @@ -2918,13 +2943,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd > *pd, u64 start, u64 len, > goto error; > } > > - iwmr->type = req.reg_type; > - > return &iwmr->ibmr; > - > error: > ib_umem_release(region); > - kfree(iwmr); > + irdma_free_iwmr(iwmr); > > return ERR_PTR(err); > } > -- > 2.27.0
在 2023/1/10 12:11, Saleem, Shiraz 写道: >> Subject: [PATCH for-next 2/4] RDMA/irdma: Split mr alloc and free into new >> functions >> >> From: Zhu Yanjun <yanjun.zhu@linux.dev> >> >> In the function irdma_reg_user_mr, the mr allocation and free will be used by other >> functions. As such, the source codes related with mr allocation and free are split >> into the new functions. >> >> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev> >> --- >> drivers/infiniband/hw/irdma/verbs.c | 78 ++++++++++++++++++----------- >> 1 file changed, 50 insertions(+), 28 deletions(-) >> >> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c >> index 40109da6489a..5cff8656d79e 100644 >> --- a/drivers/infiniband/hw/irdma/verbs.c >> +++ b/drivers/infiniband/hw/irdma/verbs.c >> @@ -2794,6 +2794,52 @@ static int irdma_reg_user_mr_type_mem(struct >> irdma_device *iwdev, >> return err; >> } >> >> +static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region, >> + struct ib_pd *pd, u64 virt, >> + __u16 reg_type, > enum irdma_memreg_type Good catch > >> + struct irdma_device *iwdev) >> +{ >> + struct irdma_mr *iwmr; >> + struct irdma_pbl *iwpbl; >> + >> + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); >> + if (!iwmr) >> + return ERR_PTR(-ENOMEM); >> + >> + iwpbl = &iwmr->iwpbl; >> + iwpbl->iwmr = iwmr; >> + iwmr->region = region; >> + iwmr->ibmr.pd = pd; >> + iwmr->ibmr.device = pd->device; >> + iwmr->ibmr.iova = virt; >> + iwmr->page_size = PAGE_SIZE; > Delete this and see comment below, Will delete. > >> + iwmr->type = reg_type; >> + >> + if (reg_type == IRDMA_MEMREG_TYPE_MEM) { >> + iwmr->page_size = ib_umem_find_best_pgsz(region, >> + iwdev->rf- >>> sc_dev.hw_attrs.page_size_cap, > I think Jason made the comment to always validate the page size with this function before use in rdma_umem_for_each_dma_block. > > we can move it out of this if block with something like, > > pgsz_bitmask = reg_type == IRDMA_MEMREG_TYPE_MEM ? > iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE; > > iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmask, virt); Wonderful. I followed your suggestions in the latest commits. > > > >> + virt); >> + if (unlikely(!iwmr->page_size)) { >> + kfree(iwmr); >> + return ERR_PTR(-EOPNOTSUPP); >> + } >> + } >> + >> + iwmr->len = region->length; >> + iwpbl->user_base = virt; >> + iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); >> + >> + return iwmr; >> +} >> + >> +/* >> + * This function frees the resources from irdma_alloc_iwmr */ static > This doesn't follow kdoc format? And not very useful. I would delete it. Will delete. Appreciate your helps. I will send out the latest commits very soon. Zhu Yanjun > >> +void irdma_free_iwmr(struct irdma_mr *iwmr) { >> + kfree(iwmr); >> +} >> + >> /** >> * irdma_reg_user_mr - Register a user memory region >> * @pd: ptr of pd >> @@ -2839,34 +2885,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd >> *pd, u64 start, u64 len, >> return ERR_PTR(-EFAULT); >> } >> >> - iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); >> - if (!iwmr) { >> + iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type, iwdev); >> + if (IS_ERR(iwmr)) { >> ib_umem_release(region); >> - return ERR_PTR(-ENOMEM); >> + return (struct ib_mr *)iwmr; >> } >> >> iwpbl = &iwmr->iwpbl; >> - iwpbl->iwmr = iwmr; >> - iwmr->region = region; >> - iwmr->ibmr.pd = pd; >> - iwmr->ibmr.device = pd->device; >> - iwmr->ibmr.iova = virt; >> - iwmr->page_size = PAGE_SIZE; >> - >> - if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) { >> - iwmr->page_size = ib_umem_find_best_pgsz(region, >> - iwdev->rf- >>> sc_dev.hw_attrs.page_size_cap, >> - virt); >> - if (unlikely(!iwmr->page_size)) { >> - kfree(iwmr); >> - ib_umem_release(region); >> - return ERR_PTR(-EOPNOTSUPP); >> - } >> - } >> - iwmr->len = region->length; >> - iwpbl->user_base = virt; >> - iwmr->type = req.reg_type; >> - iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); >> >> switch (req.reg_type) { >> case IRDMA_MEMREG_TYPE_QP: >> @@ -2918,13 +2943,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd >> *pd, u64 start, u64 len, >> goto error; >> } >> >> - iwmr->type = req.reg_type; >> - >> return &iwmr->ibmr; >> - >> error: >> ib_umem_release(region); >> - kfree(iwmr); >> + irdma_free_iwmr(iwmr); >> >> return ERR_PTR(err); >> } >> -- >> 2.27.0
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 40109da6489a..5cff8656d79e 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -2794,6 +2794,52 @@ static int irdma_reg_user_mr_type_mem(struct irdma_device *iwdev, return err; } +static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region, + struct ib_pd *pd, u64 virt, + __u16 reg_type, + struct irdma_device *iwdev) +{ + struct irdma_mr *iwmr; + struct irdma_pbl *iwpbl; + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) + return ERR_PTR(-ENOMEM); + + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->region = region; + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + iwmr->ibmr.iova = virt; + iwmr->page_size = PAGE_SIZE; + iwmr->type = reg_type; + + if (reg_type == IRDMA_MEMREG_TYPE_MEM) { + iwmr->page_size = ib_umem_find_best_pgsz(region, + iwdev->rf->sc_dev.hw_attrs.page_size_cap, + virt); + if (unlikely(!iwmr->page_size)) { + kfree(iwmr); + return ERR_PTR(-EOPNOTSUPP); + } + } + + iwmr->len = region->length; + iwpbl->user_base = virt; + iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); + + return iwmr; +} + +/* + * This function frees the resources from irdma_alloc_iwmr + */ +static void irdma_free_iwmr(struct irdma_mr *iwmr) +{ + kfree(iwmr); +} + /** * irdma_reg_user_mr - Register a user memory region * @pd: ptr of pd @@ -2839,34 +2885,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, return ERR_PTR(-EFAULT); } - iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); - if (!iwmr) { + iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type, iwdev); + if (IS_ERR(iwmr)) { ib_umem_release(region); - return ERR_PTR(-ENOMEM); + return (struct ib_mr *)iwmr; } iwpbl = &iwmr->iwpbl; - iwpbl->iwmr = iwmr; - iwmr->region = region; - iwmr->ibmr.pd = pd; - iwmr->ibmr.device = pd->device; - iwmr->ibmr.iova = virt; - iwmr->page_size = PAGE_SIZE; - - if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) { - iwmr->page_size = ib_umem_find_best_pgsz(region, - iwdev->rf->sc_dev.hw_attrs.page_size_cap, - virt); - if (unlikely(!iwmr->page_size)) { - kfree(iwmr); - ib_umem_release(region); - return ERR_PTR(-EOPNOTSUPP); - } - } - iwmr->len = region->length; - iwpbl->user_base = virt; - iwmr->type = req.reg_type; - iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); switch (req.reg_type) { case IRDMA_MEMREG_TYPE_QP: @@ -2918,13 +2943,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, goto error; } - iwmr->type = req.reg_type; - return &iwmr->ibmr; - error: ib_umem_release(region); - kfree(iwmr); + irdma_free_iwmr(iwmr); return ERR_PTR(err); }