diff mbox series

[for-next] RDMA/rxe: Fix memory leak in error path code

Message ID 20210704223506.12795-1-rpearsonhpe@gmail.com (mailing list archive)
State Superseded
Headers show
Series [for-next] RDMA/rxe: Fix memory leak in error path code | expand

Commit Message

Bob Pearson July 4, 2021, 10:35 p.m. UTC
In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
free the memory at mr->map. This patch adds code to do that.
This error only occurs if page_address() fails to return a non zero address
which should never happen for 64 bit architectures.

Fixes: 8700e3e7c485 ("Soft RoCE driver")
Reported by: Haakon Bugge <haakon.bugge@oracle.com>
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
 1 file changed, 24 insertions(+), 17 deletions(-)

Comments

Xiao Yang July 5, 2021, 2:09 a.m. UTC | #1
On 2021/7/5 6:35, Bob Pearson wrote:
> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
> free the memory at mr->map. This patch adds code to do that.
> This error only occurs if page_address() fails to return a non zero address
> which should never happen for 64 bit architectures.
Hi Bob,

Thanks for your quick fix.

It looks good to me.
Reviewed-by: Xiao Yang <yangx.jy@fujitsu.com>

Best Regards,
Xiao Yang
> Fixes: 8700e3e7c485 ("Soft RoCE driver")
> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
>  1 file changed, 24 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> index 6aabcb4de235..f49baff9ca3d 100644
> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
>  int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>  		     int access, struct rxe_mr *mr)
>  {
> -	struct rxe_map		**map;
> -	struct rxe_phys_buf	*buf = NULL;
> -	struct ib_umem		*umem;
> -	struct sg_page_iter	sg_iter;
> -	int			num_buf;
> -	void			*vaddr;
> +	struct rxe_map **map;
> +	struct rxe_phys_buf *buf = NULL;
> +	struct ib_umem *umem;
> +	struct sg_page_iter sg_iter;
> +	int num_buf;
> +	void *vaddr;
>  	int err;
> +	int i;
>  
>  	umem = ib_umem_get(pd->ibpd.device, start, length, access);
>  	if (IS_ERR(umem)) {
> -		pr_warn("err %d from rxe_umem_get\n",
> -			(int)PTR_ERR(umem));
> +		pr_warn("%s: Unable to pin memory region err = %d\n",
> +			__func__, (int)PTR_ERR(umem));
>  		err = PTR_ERR(umem);
> -		goto err1;
> +		goto err_out;
>  	}
>  
>  	mr->umem = umem;
> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>  
>  	err = rxe_mr_alloc(mr, num_buf);
>  	if (err) {
> -		pr_warn("err %d from rxe_mr_alloc\n", err);
> -		ib_umem_release(umem);
> -		goto err1;
> +		pr_warn("%s: Unable to allocate memory for map\n",
> +				__func__);
> +		goto err_release_umem;
>  	}
>  
>  	mr->page_shift = PAGE_SHIFT;
>  	mr->page_mask = PAGE_SIZE - 1;
>  
> -	num_buf			= 0;
> +	num_buf = 0;
>  	map = mr->map;
>  	if (length > 0) {
>  		buf = map[0]->buf;
> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>  
>  			vaddr = page_address(sg_page_iter_page(&sg_iter));
>  			if (!vaddr) {
> -				pr_warn("null vaddr\n");
> -				ib_umem_release(umem);
> +				pr_warn("%s: Unable to get virtual address\n",
> +						__func__);
>  				err = -ENOMEM;
> -				goto err1;
> +				goto err_cleanup_map;
>  			}
>  
>  			buf->addr = (uintptr_t)vaddr;
> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>  
>  	return 0;
>  
> -err1:
> +err_cleanup_map:
> +	for (i = 0; i < mr->num_map; i++)
> +		kfree(mr->map[i]);
> +	kfree(mr->map);
> +err_release_umem:
> +	ib_umem_release(umem);
> +err_out:
>  	return err;
>  }
>
Zhu Yanjun July 5, 2021, 3:42 a.m. UTC | #2
On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>
> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
> free the memory at mr->map. This patch adds code to do that.
> This error only occurs if page_address() fails to return a non zero address
> which should never happen for 64 bit architectures.

If this will never happen for 64 bit architectures, is it possible to
exclude 64 bit architecture with some MACROs or others?

Thanks,

Zhu Yanjun

>
> Fixes: 8700e3e7c485 ("Soft RoCE driver")
> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
>  1 file changed, 24 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> index 6aabcb4de235..f49baff9ca3d 100644
> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
>  int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>                      int access, struct rxe_mr *mr)
>  {
> -       struct rxe_map          **map;
> -       struct rxe_phys_buf     *buf = NULL;
> -       struct ib_umem          *umem;
> -       struct sg_page_iter     sg_iter;
> -       int                     num_buf;
> -       void                    *vaddr;
> +       struct rxe_map **map;
> +       struct rxe_phys_buf *buf = NULL;
> +       struct ib_umem *umem;
> +       struct sg_page_iter sg_iter;
> +       int num_buf;
> +       void *vaddr;
>         int err;
> +       int i;
>
>         umem = ib_umem_get(pd->ibpd.device, start, length, access);
>         if (IS_ERR(umem)) {
> -               pr_warn("err %d from rxe_umem_get\n",
> -                       (int)PTR_ERR(umem));
> +               pr_warn("%s: Unable to pin memory region err = %d\n",
> +                       __func__, (int)PTR_ERR(umem));
>                 err = PTR_ERR(umem);
> -               goto err1;
> +               goto err_out;
>         }
>
>         mr->umem = umem;
> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>
>         err = rxe_mr_alloc(mr, num_buf);
>         if (err) {
> -               pr_warn("err %d from rxe_mr_alloc\n", err);
> -               ib_umem_release(umem);
> -               goto err1;
> +               pr_warn("%s: Unable to allocate memory for map\n",
> +                               __func__);
> +               goto err_release_umem;
>         }
>
>         mr->page_shift = PAGE_SHIFT;
>         mr->page_mask = PAGE_SIZE - 1;
>
> -       num_buf                 = 0;
> +       num_buf = 0;
>         map = mr->map;
>         if (length > 0) {
>                 buf = map[0]->buf;
> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>
>                         vaddr = page_address(sg_page_iter_page(&sg_iter));
>                         if (!vaddr) {
> -                               pr_warn("null vaddr\n");
> -                               ib_umem_release(umem);
> +                               pr_warn("%s: Unable to get virtual address\n",
> +                                               __func__);
>                                 err = -ENOMEM;
> -                               goto err1;
> +                               goto err_cleanup_map;
>                         }
>
>                         buf->addr = (uintptr_t)vaddr;
> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>
>         return 0;
>
> -err1:
> +err_cleanup_map:
> +       for (i = 0; i < mr->num_map; i++)
> +               kfree(mr->map[i]);
> +       kfree(mr->map);
> +err_release_umem:
> +       ib_umem_release(umem);
> +err_out:
>         return err;
>  }
>
> --
> 2.30.2
>
Haakon Bugge July 5, 2021, 8:16 a.m. UTC | #3
> On 5 Jul 2021, at 05:42, Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
> 
> On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>> 
>> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
>> free the memory at mr->map. This patch adds code to do that.
>> This error only occurs if page_address() fails to return a non zero address
>> which should never happen for 64 bit architectures.
> 
> If this will never happen for 64 bit architectures, is it possible to
> exclude 64 bit architecture with some MACROs or others?
> 
> Thanks,
> 
> Zhu Yanjun
> 
>> 
>> Fixes: 8700e3e7c485 ("Soft RoCE driver")
>> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
>> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
>> ---
>> drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
>> 1 file changed, 24 insertions(+), 17 deletions(-)
>> 
>> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
>> index 6aabcb4de235..f49baff9ca3d 100644
>> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
>> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
>> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
>> int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>>                     int access, struct rxe_mr *mr)
>> {
>> -       struct rxe_map          **map;
>> -       struct rxe_phys_buf     *buf = NULL;
>> -       struct ib_umem          *umem;
>> -       struct sg_page_iter     sg_iter;
>> -       int                     num_buf;
>> -       void                    *vaddr;
>> +       struct rxe_map **map;
>> +       struct rxe_phys_buf *buf = NULL;
>> +       struct ib_umem *umem;
>> +       struct sg_page_iter sg_iter;
>> +       int num_buf;
>> +       void *vaddr;

This white-space stripping must be another issue, not related to the memleak?

>>        int err;
>> +       int i;
>> 
>>        umem = ib_umem_get(pd->ibpd.device, start, length, access);
>>        if (IS_ERR(umem)) {
>> -               pr_warn("err %d from rxe_umem_get\n",
>> -                       (int)PTR_ERR(umem));
>> +               pr_warn("%s: Unable to pin memory region err = %d\n",
>> +                       __func__, (int)PTR_ERR(umem));
>>                err = PTR_ERR(umem);
>> -               goto err1;
>> +               goto err_out;
>>        }
>> 
>>        mr->umem = umem;
>> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>> 
>>        err = rxe_mr_alloc(mr, num_buf);
>>        if (err) {
>> -               pr_warn("err %d from rxe_mr_alloc\n", err);
>> -               ib_umem_release(umem);
>> -               goto err1;
>> +               pr_warn("%s: Unable to allocate memory for map\n",
>> +                               __func__);
>> +               goto err_release_umem;
>>        }
>> 
>>        mr->page_shift = PAGE_SHIFT;
>>        mr->page_mask = PAGE_SIZE - 1;
>> 
>> -       num_buf                 = 0;
>> +       num_buf = 0;

White-space change.

Otherwise:

Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>


Thxs, Håkon



>>        map = mr->map;
>>        if (length > 0) {
>>                buf = map[0]->buf;
>> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>> 
>>                        vaddr = page_address(sg_page_iter_page(&sg_iter));
>>                        if (!vaddr) {
>> -                               pr_warn("null vaddr\n");
>> -                               ib_umem_release(umem);
>> +                               pr_warn("%s: Unable to get virtual address\n",
>> +                                               __func__);
>>                                err = -ENOMEM;
>> -                               goto err1;
>> +                               goto err_cleanup_map;
>>                        }
>> 
>>                        buf->addr = (uintptr_t)vaddr;
>> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>> 
>>        return 0;
>> 
>> -err1:
>> +err_cleanup_map:
>> +       for (i = 0; i < mr->num_map; i++)
>> +               kfree(mr->map[i]);
>> +       kfree(mr->map);
>> +err_release_umem:
>> +       ib_umem_release(umem);
>> +err_out:
>>        return err;
>> }
>> 
>> --
>> 2.30.2
>>
Zhu Yanjun July 5, 2021, 8:35 a.m. UTC | #4
On Mon, Jul 5, 2021 at 4:16 PM Haakon Bugge <haakon.bugge@oracle.com> wrote:
>
>
>
> > On 5 Jul 2021, at 05:42, Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
> >
> > On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
> >>
> >> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
> >> free the memory at mr->map. This patch adds code to do that.
> >> This error only occurs if page_address() fails to return a non zero address
> >> which should never happen for 64 bit architectures.
> >
> > If this will never happen for 64 bit architectures, is it possible to
> > exclude 64 bit architecture with some MACROs or others?
> >
> > Thanks,
> >
> > Zhu Yanjun
> >
> >>
> >> Fixes: 8700e3e7c485 ("Soft RoCE driver")
> >> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
> >> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> >> ---
> >> drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
> >> 1 file changed, 24 insertions(+), 17 deletions(-)
> >>
> >> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> >> index 6aabcb4de235..f49baff9ca3d 100644
> >> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> >> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> >> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
> >> int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> >>                     int access, struct rxe_mr *mr)
> >> {
> >> -       struct rxe_map          **map;
> >> -       struct rxe_phys_buf     *buf = NULL;
> >> -       struct ib_umem          *umem;
> >> -       struct sg_page_iter     sg_iter;
> >> -       int                     num_buf;
> >> -       void                    *vaddr;
> >> +       struct rxe_map **map;
> >> +       struct rxe_phys_buf *buf = NULL;
> >> +       struct ib_umem *umem;
> >> +       struct sg_page_iter sg_iter;
> >> +       int num_buf;
> >> +       void *vaddr;
>
> This white-space stripping must be another issue, not related to the memleak?
>
> >>        int err;
> >> +       int i;
> >>
> >>        umem = ib_umem_get(pd->ibpd.device, start, length, access);
> >>        if (IS_ERR(umem)) {
> >> -               pr_warn("err %d from rxe_umem_get\n",
> >> -                       (int)PTR_ERR(umem));
> >> +               pr_warn("%s: Unable to pin memory region err = %d\n",
> >> +                       __func__, (int)PTR_ERR(umem));
> >>                err = PTR_ERR(umem);
> >> -               goto err1;
> >> +               goto err_out;
> >>        }
> >>
> >>        mr->umem = umem;
> >> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> >>
> >>        err = rxe_mr_alloc(mr, num_buf);
> >>        if (err) {
> >> -               pr_warn("err %d from rxe_mr_alloc\n", err);
> >> -               ib_umem_release(umem);
> >> -               goto err1;
> >> +               pr_warn("%s: Unable to allocate memory for map\n",
> >> +                               __func__);
> >> +               goto err_release_umem;
> >>        }
> >>
> >>        mr->page_shift = PAGE_SHIFT;
> >>        mr->page_mask = PAGE_SIZE - 1;
> >>
> >> -       num_buf                 = 0;
> >> +       num_buf = 0;
>
> White-space change.

Yeah. It seems that some white-space changes in this commit.

Zhu Yanjun

>
> Otherwise:
>
> Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>
>
>
> Thxs, Håkon
>
>
>
> >>        map = mr->map;
> >>        if (length > 0) {
> >>                buf = map[0]->buf;
> >> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> >>
> >>                        vaddr = page_address(sg_page_iter_page(&sg_iter));
> >>                        if (!vaddr) {
> >> -                               pr_warn("null vaddr\n");
> >> -                               ib_umem_release(umem);
> >> +                               pr_warn("%s: Unable to get virtual address\n",
> >> +                                               __func__);
> >>                                err = -ENOMEM;
> >> -                               goto err1;
> >> +                               goto err_cleanup_map;
> >>                        }
> >>
> >>                        buf->addr = (uintptr_t)vaddr;
> >> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> >>
> >>        return 0;
> >>
> >> -err1:
> >> +err_cleanup_map:
> >> +       for (i = 0; i < mr->num_map; i++)
> >> +               kfree(mr->map[i]);
> >> +       kfree(mr->map);
> >> +err_release_umem:
> >> +       ib_umem_release(umem);
> >> +err_out:
> >>        return err;
> >> }
> >>
> >> --
> >> 2.30.2
> >>
>
Bob Pearson July 5, 2021, 3:40 p.m. UTC | #5
Jason has been asking for patches to pass clang-format-patch so I've
been cleaning up the code near functional changes since it doesn't
like extra spaces such as for vertical alignment.

If I could figure out how ib_umem_works there is a chance that it
would fail if it couldn't map all the user space virtual memory into
kernel virtual addresses. But so far I have failed. It's fairly
complex.

Bob

On Mon, Jul 5, 2021 at 3:35 AM Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
>
> On Mon, Jul 5, 2021 at 4:16 PM Haakon Bugge <haakon.bugge@oracle.com> wrote:
> >
> >
> >
> > > On 5 Jul 2021, at 05:42, Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
> > >
> > > On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
> > >>
> > >> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
> > >> free the memory at mr->map. This patch adds code to do that.
> > >> This error only occurs if page_address() fails to return a non zero address
> > >> which should never happen for 64 bit architectures.
> > >
> > > If this will never happen for 64 bit architectures, is it possible to
> > > exclude 64 bit architecture with some MACROs or others?
> > >
> > > Thanks,
> > >
> > > Zhu Yanjun
> > >
> > >>
> > >> Fixes: 8700e3e7c485 ("Soft RoCE driver")
> > >> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
> > >> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> > >> ---
> > >> drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
> > >> 1 file changed, 24 insertions(+), 17 deletions(-)
> > >>
> > >> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> > >> index 6aabcb4de235..f49baff9ca3d 100644
> > >> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> > >> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> > >> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
> > >> int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > >>                     int access, struct rxe_mr *mr)
> > >> {
> > >> -       struct rxe_map          **map;
> > >> -       struct rxe_phys_buf     *buf = NULL;
> > >> -       struct ib_umem          *umem;
> > >> -       struct sg_page_iter     sg_iter;
> > >> -       int                     num_buf;
> > >> -       void                    *vaddr;
> > >> +       struct rxe_map **map;
> > >> +       struct rxe_phys_buf *buf = NULL;
> > >> +       struct ib_umem *umem;
> > >> +       struct sg_page_iter sg_iter;
> > >> +       int num_buf;
> > >> +       void *vaddr;
> >
> > This white-space stripping must be another issue, not related to the memleak?
> >
> > >>        int err;
> > >> +       int i;
> > >>
> > >>        umem = ib_umem_get(pd->ibpd.device, start, length, access);
> > >>        if (IS_ERR(umem)) {
> > >> -               pr_warn("err %d from rxe_umem_get\n",
> > >> -                       (int)PTR_ERR(umem));
> > >> +               pr_warn("%s: Unable to pin memory region err = %d\n",
> > >> +                       __func__, (int)PTR_ERR(umem));
> > >>                err = PTR_ERR(umem);
> > >> -               goto err1;
> > >> +               goto err_out;
> > >>        }
> > >>
> > >>        mr->umem = umem;
> > >> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > >>
> > >>        err = rxe_mr_alloc(mr, num_buf);
> > >>        if (err) {
> > >> -               pr_warn("err %d from rxe_mr_alloc\n", err);
> > >> -               ib_umem_release(umem);
> > >> -               goto err1;
> > >> +               pr_warn("%s: Unable to allocate memory for map\n",
> > >> +                               __func__);
> > >> +               goto err_release_umem;
> > >>        }
> > >>
> > >>        mr->page_shift = PAGE_SHIFT;
> > >>        mr->page_mask = PAGE_SIZE - 1;
> > >>
> > >> -       num_buf                 = 0;
> > >> +       num_buf = 0;
> >
> > White-space change.
>
> Yeah. It seems that some white-space changes in this commit.
>
> Zhu Yanjun
>
> >
> > Otherwise:
> >
> > Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>
> >
> >
> > Thxs, Håkon
> >
> >
> >
> > >>        map = mr->map;
> > >>        if (length > 0) {
> > >>                buf = map[0]->buf;
> > >> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > >>
> > >>                        vaddr = page_address(sg_page_iter_page(&sg_iter));
> > >>                        if (!vaddr) {
> > >> -                               pr_warn("null vaddr\n");
> > >> -                               ib_umem_release(umem);
> > >> +                               pr_warn("%s: Unable to get virtual address\n",
> > >> +                                               __func__);
> > >>                                err = -ENOMEM;
> > >> -                               goto err1;
> > >> +                               goto err_cleanup_map;
> > >>                        }
> > >>
> > >>                        buf->addr = (uintptr_t)vaddr;
> > >> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > >>
> > >>        return 0;
> > >>
> > >> -err1:
> > >> +err_cleanup_map:
> > >> +       for (i = 0; i < mr->num_map; i++)
> > >> +               kfree(mr->map[i]);
> > >> +       kfree(mr->map);
> > >> +err_release_umem:
> > >> +       ib_umem_release(umem);
> > >> +err_out:
> > >>        return err;
> > >> }
> > >>
> > >> --
> > >> 2.30.2
> > >>
> >
Bob Pearson July 5, 2021, 3:41 p.m. UTC | #6
Sorry that was ib_umem_get().

On Mon, Jul 5, 2021 at 10:40 AM Robert Pearson <rpearsonhpe@gmail.com> wrote:
>
> Jason has been asking for patches to pass clang-format-patch so I've
> been cleaning up the code near functional changes since it doesn't
> like extra spaces such as for vertical alignment.
>
> If I could figure out how ib_umem_works there is a chance that it
> would fail if it couldn't map all the user space virtual memory into
> kernel virtual addresses. But so far I have failed. It's fairly
> complex.
>
> Bob
>
> On Mon, Jul 5, 2021 at 3:35 AM Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
> >
> > On Mon, Jul 5, 2021 at 4:16 PM Haakon Bugge <haakon.bugge@oracle.com> wrote:
> > >
> > >
> > >
> > > > On 5 Jul 2021, at 05:42, Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
> > > >
> > > > On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
> > > >>
> > > >> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
> > > >> free the memory at mr->map. This patch adds code to do that.
> > > >> This error only occurs if page_address() fails to return a non zero address
> > > >> which should never happen for 64 bit architectures.
> > > >
> > > > If this will never happen for 64 bit architectures, is it possible to
> > > > exclude 64 bit architecture with some MACROs or others?
> > > >
> > > > Thanks,
> > > >
> > > > Zhu Yanjun
> > > >
> > > >>
> > > >> Fixes: 8700e3e7c485 ("Soft RoCE driver")
> > > >> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
> > > >> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> > > >> ---
> > > >> drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
> > > >> 1 file changed, 24 insertions(+), 17 deletions(-)
> > > >>
> > > >> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> > > >> index 6aabcb4de235..f49baff9ca3d 100644
> > > >> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> > > >> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> > > >> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
> > > >> int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > > >>                     int access, struct rxe_mr *mr)
> > > >> {
> > > >> -       struct rxe_map          **map;
> > > >> -       struct rxe_phys_buf     *buf = NULL;
> > > >> -       struct ib_umem          *umem;
> > > >> -       struct sg_page_iter     sg_iter;
> > > >> -       int                     num_buf;
> > > >> -       void                    *vaddr;
> > > >> +       struct rxe_map **map;
> > > >> +       struct rxe_phys_buf *buf = NULL;
> > > >> +       struct ib_umem *umem;
> > > >> +       struct sg_page_iter sg_iter;
> > > >> +       int num_buf;
> > > >> +       void *vaddr;
> > >
> > > This white-space stripping must be another issue, not related to the memleak?
> > >
> > > >>        int err;
> > > >> +       int i;
> > > >>
> > > >>        umem = ib_umem_get(pd->ibpd.device, start, length, access);
> > > >>        if (IS_ERR(umem)) {
> > > >> -               pr_warn("err %d from rxe_umem_get\n",
> > > >> -                       (int)PTR_ERR(umem));
> > > >> +               pr_warn("%s: Unable to pin memory region err = %d\n",
> > > >> +                       __func__, (int)PTR_ERR(umem));
> > > >>                err = PTR_ERR(umem);
> > > >> -               goto err1;
> > > >> +               goto err_out;
> > > >>        }
> > > >>
> > > >>        mr->umem = umem;
> > > >> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > > >>
> > > >>        err = rxe_mr_alloc(mr, num_buf);
> > > >>        if (err) {
> > > >> -               pr_warn("err %d from rxe_mr_alloc\n", err);
> > > >> -               ib_umem_release(umem);
> > > >> -               goto err1;
> > > >> +               pr_warn("%s: Unable to allocate memory for map\n",
> > > >> +                               __func__);
> > > >> +               goto err_release_umem;
> > > >>        }
> > > >>
> > > >>        mr->page_shift = PAGE_SHIFT;
> > > >>        mr->page_mask = PAGE_SIZE - 1;
> > > >>
> > > >> -       num_buf                 = 0;
> > > >> +       num_buf = 0;
> > >
> > > White-space change.
> >
> > Yeah. It seems that some white-space changes in this commit.
> >
> > Zhu Yanjun
> >
> > >
> > > Otherwise:
> > >
> > > Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>
> > >
> > >
> > > Thxs, Håkon
> > >
> > >
> > >
> > > >>        map = mr->map;
> > > >>        if (length > 0) {
> > > >>                buf = map[0]->buf;
> > > >> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > > >>
> > > >>                        vaddr = page_address(sg_page_iter_page(&sg_iter));
> > > >>                        if (!vaddr) {
> > > >> -                               pr_warn("null vaddr\n");
> > > >> -                               ib_umem_release(umem);
> > > >> +                               pr_warn("%s: Unable to get virtual address\n",
> > > >> +                                               __func__);
> > > >>                                err = -ENOMEM;
> > > >> -                               goto err1;
> > > >> +                               goto err_cleanup_map;
> > > >>                        }
> > > >>
> > > >>                        buf->addr = (uintptr_t)vaddr;
> > > >> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > > >>
> > > >>        return 0;
> > > >>
> > > >> -err1:
> > > >> +err_cleanup_map:
> > > >> +       for (i = 0; i < mr->num_map; i++)
> > > >> +               kfree(mr->map[i]);
> > > >> +       kfree(mr->map);
> > > >> +err_release_umem:
> > > >> +       ib_umem_release(umem);
> > > >> +err_out:
> > > >>        return err;
> > > >> }
> > > >>
> > > >> --
> > > >> 2.30.2
> > > >>
> > >
Jason Gunthorpe July 5, 2021, 3:54 p.m. UTC | #7
On Mon, Jul 05, 2021 at 10:40:14AM -0500, Robert Pearson wrote:
> Jason has been asking for patches to pass clang-format-patch so I've
> been cleaning up the code near functional changes since it doesn't
> like extra spaces such as for vertical alignment.

don't mix things though, new code should be closer to the standard
style, but don't mix significant style cleanups with bug fixes

> If I could figure out how ib_umem_works there is a chance that it
> would fail if it couldn't map all the user space virtual memory into
> kernel virtual addresses. But so far I have failed. It's fairly
> complex.

It can fail for lots of reasons?

Jason
Haakon Bugge July 5, 2021, 4:16 p.m. UTC | #8
> On 5 Jul 2021, at 17:40, Robert Pearson <rpearsonhpe@gmail.com> wrote:
> 
> Jason has been asking for patches to pass clang-format-patch so I've
> been cleaning up the code near functional changes since it doesn't
> like extra spaces such as for vertical alignment.

One of my former colleague "trained" me on this, and almost wrote "not related to commit" on my forehead :-)

My preference is that you make one commit with style changes, another with functional changes. If the latter is reverted, Jason would still be happy about the style, right? And, it makes the process of reviewing simpler (at least for me).

> If I could figure out how ib_umem_works there is a chance that it
> would fail if it couldn't map all the user space virtual memory into
> kernel virtual addresses. But so far I have failed. It's fairly
> complex.

;-)


Thxs, Håkon

> 
> Bob
> 
> On Mon, Jul 5, 2021 at 3:35 AM Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
>> 
>> On Mon, Jul 5, 2021 at 4:16 PM Haakon Bugge <haakon.bugge@oracle.com> wrote:
>>> 
>>> 
>>> 
>>>> On 5 Jul 2021, at 05:42, Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
>>>> 
>>>> On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>>>>> 
>>>>> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
>>>>> free the memory at mr->map. This patch adds code to do that.
>>>>> This error only occurs if page_address() fails to return a non zero address
>>>>> which should never happen for 64 bit architectures.
>>>> 
>>>> If this will never happen for 64 bit architectures, is it possible to
>>>> exclude 64 bit architecture with some MACROs or others?
>>>> 
>>>> Thanks,
>>>> 
>>>> Zhu Yanjun
>>>> 
>>>>> 
>>>>> Fixes: 8700e3e7c485 ("Soft RoCE driver")
>>>>> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
>>>>> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
>>>>> ---
>>>>> drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
>>>>> 1 file changed, 24 insertions(+), 17 deletions(-)
>>>>> 
>>>>> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
>>>>> index 6aabcb4de235..f49baff9ca3d 100644
>>>>> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
>>>>> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
>>>>> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
>>>>> int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>>>>>                    int access, struct rxe_mr *mr)
>>>>> {
>>>>> -       struct rxe_map          **map;
>>>>> -       struct rxe_phys_buf     *buf = NULL;
>>>>> -       struct ib_umem          *umem;
>>>>> -       struct sg_page_iter     sg_iter;
>>>>> -       int                     num_buf;
>>>>> -       void                    *vaddr;
>>>>> +       struct rxe_map **map;
>>>>> +       struct rxe_phys_buf *buf = NULL;
>>>>> +       struct ib_umem *umem;
>>>>> +       struct sg_page_iter sg_iter;
>>>>> +       int num_buf;
>>>>> +       void *vaddr;
>>> 
>>> This white-space stripping must be another issue, not related to the memleak?
>>> 
>>>>>       int err;
>>>>> +       int i;
>>>>> 
>>>>>       umem = ib_umem_get(pd->ibpd.device, start, length, access);
>>>>>       if (IS_ERR(umem)) {
>>>>> -               pr_warn("err %d from rxe_umem_get\n",
>>>>> -                       (int)PTR_ERR(umem));
>>>>> +               pr_warn("%s: Unable to pin memory region err = %d\n",
>>>>> +                       __func__, (int)PTR_ERR(umem));
>>>>>               err = PTR_ERR(umem);
>>>>> -               goto err1;
>>>>> +               goto err_out;
>>>>>       }
>>>>> 
>>>>>       mr->umem = umem;
>>>>> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>>>>> 
>>>>>       err = rxe_mr_alloc(mr, num_buf);
>>>>>       if (err) {
>>>>> -               pr_warn("err %d from rxe_mr_alloc\n", err);
>>>>> -               ib_umem_release(umem);
>>>>> -               goto err1;
>>>>> +               pr_warn("%s: Unable to allocate memory for map\n",
>>>>> +                               __func__);
>>>>> +               goto err_release_umem;
>>>>>       }
>>>>> 
>>>>>       mr->page_shift = PAGE_SHIFT;
>>>>>       mr->page_mask = PAGE_SIZE - 1;
>>>>> 
>>>>> -       num_buf                 = 0;
>>>>> +       num_buf = 0;
>>> 
>>> White-space change.
>> 
>> Yeah. It seems that some white-space changes in this commit.
>> 
>> Zhu Yanjun
>> 
>>> 
>>> Otherwise:
>>> 
>>> Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>
>>> 
>>> 
>>> Thxs, Håkon
>>> 
>>> 
>>> 
>>>>>       map = mr->map;
>>>>>       if (length > 0) {
>>>>>               buf = map[0]->buf;
>>>>> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>>>>> 
>>>>>                       vaddr = page_address(sg_page_iter_page(&sg_iter));
>>>>>                       if (!vaddr) {
>>>>> -                               pr_warn("null vaddr\n");
>>>>> -                               ib_umem_release(umem);
>>>>> +                               pr_warn("%s: Unable to get virtual address\n",
>>>>> +                                               __func__);
>>>>>                               err = -ENOMEM;
>>>>> -                               goto err1;
>>>>> +                               goto err_cleanup_map;
>>>>>                       }
>>>>> 
>>>>>                       buf->addr = (uintptr_t)vaddr;
>>>>> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>>>>> 
>>>>>       return 0;
>>>>> 
>>>>> -err1:
>>>>> +err_cleanup_map:
>>>>> +       for (i = 0; i < mr->num_map; i++)
>>>>> +               kfree(mr->map[i]);
>>>>> +       kfree(mr->map);
>>>>> +err_release_umem:
>>>>> +       ib_umem_release(umem);
>>>>> +err_out:
>>>>>       return err;
>>>>> }
>>>>> 
>>>>> --
>>>>> 2.30.2
>>>>> 
>>>
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 6aabcb4de235..f49baff9ca3d 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -106,20 +106,21 @@  void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
 int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 		     int access, struct rxe_mr *mr)
 {
-	struct rxe_map		**map;
-	struct rxe_phys_buf	*buf = NULL;
-	struct ib_umem		*umem;
-	struct sg_page_iter	sg_iter;
-	int			num_buf;
-	void			*vaddr;
+	struct rxe_map **map;
+	struct rxe_phys_buf *buf = NULL;
+	struct ib_umem *umem;
+	struct sg_page_iter sg_iter;
+	int num_buf;
+	void *vaddr;
 	int err;
+	int i;
 
 	umem = ib_umem_get(pd->ibpd.device, start, length, access);
 	if (IS_ERR(umem)) {
-		pr_warn("err %d from rxe_umem_get\n",
-			(int)PTR_ERR(umem));
+		pr_warn("%s: Unable to pin memory region err = %d\n",
+			__func__, (int)PTR_ERR(umem));
 		err = PTR_ERR(umem);
-		goto err1;
+		goto err_out;
 	}
 
 	mr->umem = umem;
@@ -129,15 +130,15 @@  int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
 	err = rxe_mr_alloc(mr, num_buf);
 	if (err) {
-		pr_warn("err %d from rxe_mr_alloc\n", err);
-		ib_umem_release(umem);
-		goto err1;
+		pr_warn("%s: Unable to allocate memory for map\n",
+				__func__);
+		goto err_release_umem;
 	}
 
 	mr->page_shift = PAGE_SHIFT;
 	mr->page_mask = PAGE_SIZE - 1;
 
-	num_buf			= 0;
+	num_buf = 0;
 	map = mr->map;
 	if (length > 0) {
 		buf = map[0]->buf;
@@ -151,10 +152,10 @@  int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
 			vaddr = page_address(sg_page_iter_page(&sg_iter));
 			if (!vaddr) {
-				pr_warn("null vaddr\n");
-				ib_umem_release(umem);
+				pr_warn("%s: Unable to get virtual address\n",
+						__func__);
 				err = -ENOMEM;
-				goto err1;
+				goto err_cleanup_map;
 			}
 
 			buf->addr = (uintptr_t)vaddr;
@@ -177,7 +178,13 @@  int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
 	return 0;
 
-err1:
+err_cleanup_map:
+	for (i = 0; i < mr->num_map; i++)
+		kfree(mr->map[i]);
+	kfree(mr->map);
+err_release_umem:
+	ib_umem_release(umem);
+err_out:
 	return err;
 }