diff mbox series

RDMA/siw: Fix user page pinning accounting

Message ID 20230201115540.360353-1-bmt@zurich.ibm.com (mailing list archive)
State Superseded
Headers show
Series RDMA/siw: Fix user page pinning accounting | expand

Commit Message

Bernard Metzler Feb. 1, 2023, 11:55 a.m. UTC
To avoid racing with other user memory reservations, immediately
account full amount of pages to be pinned.

Fixes: 2251334dcac9 ("rdma/siw: application buffer management")
Reported-by: Jason Gunthorpe <jgg@nvidia.com>
Suggested-by: Alistair Popple <apopple@nvidia.com>
Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com>
---
 drivers/infiniband/sw/siw/siw_mem.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

Comments

Alistair Popple Feb. 2, 2023, 7:44 a.m. UTC | #1
Thanks for cleaning this up, will make any potential follow on I do
much easier. Feel free to add:

Reviewed-by: Alistair Popple <apopple@nvidia.com>

Bernard Metzler <bmt@zurich.ibm.com> writes:

> To avoid racing with other user memory reservations, immediately
> account full amount of pages to be pinned.
>
> Fixes: 2251334dcac9 ("rdma/siw: application buffer management")
> Reported-by: Jason Gunthorpe <jgg@nvidia.com>
> Suggested-by: Alistair Popple <apopple@nvidia.com>
> Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com>
> ---
>  drivers/infiniband/sw/siw/siw_mem.c | 23 ++++++++++++-----------
>  1 file changed, 12 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
> index b2b33dd3b4fa..f51ab2ccf151 100644
> --- a/drivers/infiniband/sw/siw/siw_mem.c
> +++ b/drivers/infiniband/sw/siw/siw_mem.c
> @@ -398,7 +398,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
>  
>  	mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>  
> -	if (num_pages + atomic64_read(&mm_s->pinned_vm) > mlock_limit) {
> +	if (atomic64_add_return(num_pages, &mm_s->pinned_vm) > mlock_limit) {
>  		rv = -ENOMEM;
>  		goto out_sem_up;
>  	}
> @@ -411,30 +411,27 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
>  		goto out_sem_up;
>  	}
>  	for (i = 0; num_pages; i++) {
> -		int got, nents = min_t(int, num_pages, PAGES_PER_CHUNK);
> -
> -		umem->page_chunk[i].plist =
> +		int nents = min_t(int, num_pages, PAGES_PER_CHUNK);
> +		struct page **plist =
>  			kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
> -		if (!umem->page_chunk[i].plist) {
> +
> +		if (!plist) {
>  			rv = -ENOMEM;
>  			goto out_sem_up;
>  		}
> -		got = 0;
> +		umem->page_chunk[i].plist = plist;
>  		while (nents) {
> -			struct page **plist = &umem->page_chunk[i].plist[got];
> -
>  			rv = pin_user_pages(first_page_va, nents, foll_flags,
>  					    plist, NULL);
>  			if (rv < 0)
>  				goto out_sem_up;
>  
>  			umem->num_pages += rv;
> -			atomic64_add(rv, &mm_s->pinned_vm);
>  			first_page_va += rv * PAGE_SIZE;
> +			plist += rv;
>  			nents -= rv;
> -			got += rv;
> +			num_pages -= rv;
>  		}
> -		num_pages -= got;
>  	}
>  out_sem_up:
>  	mmap_read_unlock(mm_s);
> @@ -442,6 +439,10 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
>  	if (rv > 0)
>  		return umem;
>  
> +	/* Adjust accounting for pages not pinned */
> +	if (num_pages)
> +		atomic64_sub(num_pages, &mm_s->pinned_vm);
> +
>  	siw_umem_release(umem, false);
>  
>  	return ERR_PTR(rv);
Bernard Metzler Feb. 2, 2023, 9:04 a.m. UTC | #2
> -----Original Message-----
> From: Alistair Popple <apopple@nvidia.com>
> Sent: Thursday, 2 February 2023 08:44
> To: Bernard Metzler <BMT@zurich.ibm.com>
> Cc: linux-rdma@vger.kernel.org; jgg@nvidia.com; leonro@nvidia.com
> Subject: [EXTERNAL] Re: [PATCH] RDMA/siw: Fix user page pinning
> accounting
> 
> 
> Thanks for cleaning this up, will make any potential follow on I do
> much easier. Feel free to add:
> 
> Reviewed-by: Alistair Popple <apopple@nvidia.com>
> 

Thanks a lot! I'll resend for the last time (I hope), having you
as a reviewer.

Many thanks,
Bernard.
Leon Romanovsky Feb. 5, 2023, 1:05 p.m. UTC | #3
On Thu, Feb 02, 2023 at 09:04:06AM +0000, Bernard Metzler wrote:
> 
> 
> > -----Original Message-----
> > From: Alistair Popple <apopple@nvidia.com>
> > Sent: Thursday, 2 February 2023 08:44
> > To: Bernard Metzler <BMT@zurich.ibm.com>
> > Cc: linux-rdma@vger.kernel.org; jgg@nvidia.com; leonro@nvidia.com
> > Subject: [EXTERNAL] Re: [PATCH] RDMA/siw: Fix user page pinning
> > accounting
> > 
> > 
> > Thanks for cleaning this up, will make any potential follow on I do
> > much easier. Feel free to add:
> > 
> > Reviewed-by: Alistair Popple <apopple@nvidia.com>
> > 
> 
> Thanks a lot! I'll resend for the last time (I hope), having you
> as a reviewer.

You don't need to resend for this. Patchworks catches all *-by tags and
allows us to add them automatically.

However, it will be great if you write target rdma-next/rdma-rc in
subject of the patch.

Thanks

> 
> Many thanks,
> Bernard.
>
Jason Gunthorpe Feb. 6, 2023, 1:21 p.m. UTC | #4
On Sun, Feb 05, 2023 at 03:05:52PM +0200, Leon Romanovsky wrote:
> On Thu, Feb 02, 2023 at 09:04:06AM +0000, Bernard Metzler wrote:
> > 
> > 
> > > -----Original Message-----
> > > From: Alistair Popple <apopple@nvidia.com>
> > > Sent: Thursday, 2 February 2023 08:44
> > > To: Bernard Metzler <BMT@zurich.ibm.com>
> > > Cc: linux-rdma@vger.kernel.org; jgg@nvidia.com; leonro@nvidia.com
> > > Subject: [EXTERNAL] Re: [PATCH] RDMA/siw: Fix user page pinning
> > > accounting
> > > 
> > > 
> > > Thanks for cleaning this up, will make any potential follow on I do
> > > much easier. Feel free to add:
> > > 
> > > Reviewed-by: Alistair Popple <apopple@nvidia.com>
> > > 
> > 
> > Thanks a lot! I'll resend for the last time (I hope), having you
> > as a reviewer.
> 
> You don't need to resend for this. Patchworks catches all *-by tags and
> allows us to add them automatically.
> 
> However, it will be great if you write target rdma-next/rdma-rc in
> subject of the patch.

And use version numbers..

Jason
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index b2b33dd3b4fa..f51ab2ccf151 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -398,7 +398,7 @@  struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
 
 	mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
-	if (num_pages + atomic64_read(&mm_s->pinned_vm) > mlock_limit) {
+	if (atomic64_add_return(num_pages, &mm_s->pinned_vm) > mlock_limit) {
 		rv = -ENOMEM;
 		goto out_sem_up;
 	}
@@ -411,30 +411,27 @@  struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
 		goto out_sem_up;
 	}
 	for (i = 0; num_pages; i++) {
-		int got, nents = min_t(int, num_pages, PAGES_PER_CHUNK);
-
-		umem->page_chunk[i].plist =
+		int nents = min_t(int, num_pages, PAGES_PER_CHUNK);
+		struct page **plist =
 			kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
-		if (!umem->page_chunk[i].plist) {
+
+		if (!plist) {
 			rv = -ENOMEM;
 			goto out_sem_up;
 		}
-		got = 0;
+		umem->page_chunk[i].plist = plist;
 		while (nents) {
-			struct page **plist = &umem->page_chunk[i].plist[got];
-
 			rv = pin_user_pages(first_page_va, nents, foll_flags,
 					    plist, NULL);
 			if (rv < 0)
 				goto out_sem_up;
 
 			umem->num_pages += rv;
-			atomic64_add(rv, &mm_s->pinned_vm);
 			first_page_va += rv * PAGE_SIZE;
+			plist += rv;
 			nents -= rv;
-			got += rv;
+			num_pages -= rv;
 		}
-		num_pages -= got;
 	}
 out_sem_up:
 	mmap_read_unlock(mm_s);
@@ -442,6 +439,10 @@  struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
 	if (rv > 0)
 		return umem;
 
+	/* Adjust accounting for pages not pinned */
+	if (num_pages)
+		atomic64_sub(num_pages, &mm_s->pinned_vm);
+
 	siw_umem_release(umem, false);
 
 	return ERR_PTR(rv);