diff mbox series

sunrpc: clean-up cache downcall

Message ID 20201127161451.17922-1-rbergant@redhat.com (mailing list archive)
State New
Headers show
Series sunrpc: clean-up cache downcall | expand

Commit Message

Roberto Bergantinos Corpas Nov. 27, 2020, 4:14 p.m. UTC
We can simplifly code around cache_downcall unifying memory
allocations using kvmalloc, this have the benefit of getting rid of
cache_slow_downcall (and queue_io_mutex), and also matches userland
allocation size and limits

Signed-off-by: Roberto Bergantinos Corpas <rbergant@redhat.com>
---
 net/sunrpc/cache.c | 41 +++++++++++------------------------------
 1 file changed, 11 insertions(+), 30 deletions(-)

Comments

Chuck Lever Nov. 27, 2020, 4:50 p.m. UTC | #1
Hi Roberto-

I spotted some mechanical problems.


> On Nov 27, 2020, at 11:14 AM, Roberto Bergantinos Corpas <rbergant@redhat.com> wrote:
> 
> We can simplifly code around cache_downcall unifying memory

^simplifly^simplify

> allocations using kvmalloc, this have the benefit of getting rid of

^, this have^. This has

> cache_slow_downcall (and queue_io_mutex), and also matches userland
> allocation size and limits
> 
> Signed-off-by: Roberto Bergantinos Corpas <rbergant@redhat.com>

Assuming Bruce is copacetic with this patch, the change looks
appropriate for the v5.11 merge window. However, this patch
doesn't appear to apply to v5.10-rc5. Might be because
27a1e8a0f79e ("sunrpc: raise kernel RPC channel buffer size")
was already merged?


> ---
> net/sunrpc/cache.c | 41 +++++++++++------------------------------
> 1 file changed, 11 insertions(+), 30 deletions(-)
> 
> diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
> index baef5ee43dbb..1347ecae9c84 100644
> --- a/net/sunrpc/cache.c
> +++ b/net/sunrpc/cache.c
> @@ -777,7 +777,6 @@ void cache_clean_deferred(void *owner)
>  */
> 
> static DEFINE_SPINLOCK(queue_lock);
> -static DEFINE_MUTEX(queue_io_mutex);
> 
> struct cache_queue {
> 	struct list_head	list;
> @@ -905,44 +904,26 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
> 	return ret;
> }
> 
> -static ssize_t cache_slow_downcall(const char __user *buf,
> -				   size_t count, struct cache_detail *cd)
> -{
> -	static char write_buf[8192]; /* protected by queue_io_mutex */
> -	ssize_t ret = -EINVAL;
> -
> -	if (count >= sizeof(write_buf))
> -		goto out;
> -	mutex_lock(&queue_io_mutex);
> -	ret = cache_do_downcall(write_buf, buf, count, cd);
> -	mutex_unlock(&queue_io_mutex);
> -out:
> -	return ret;
> -}
> -
> static ssize_t cache_downcall(struct address_space *mapping,
> 			      const char __user *buf,
> 			      size_t count, struct cache_detail *cd)
> {
> -	struct page *page;
> -	char *kaddr;
> +	char *write_buf;
> 	ssize_t ret = -ENOMEM;
> 
> -	if (count >= PAGE_SIZE)
> -		goto out_slow;
> +	if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */
> +		ret = -EINVAL;
> +		goto out;
> +	}
> 
> -	page = find_or_create_page(mapping, 0, GFP_KERNEL);
> -	if (!page)
> -		goto out_slow;
> +	write_buf = kvmalloc(count + 1, GFP_KERNEL);
> +	if (!write_buf)
> +		goto out;
> 
> -	kaddr = kmap(page);
> -	ret = cache_do_downcall(kaddr, buf, count, cd);
> -	kunmap(page);
> -	unlock_page(page);
> -	put_page(page);
> +	ret = cache_do_downcall(write_buf, buf, count, cd);
> +	kvfree(write_buf);
> +out:
> 	return ret;
> -out_slow:
> -	return cache_slow_downcall(buf, count, cd);
> }
> 
> static ssize_t cache_write(struct file *filp, const char __user *buf,
> -- 
> 2.21.0
> 

--
Chuck Lever
Roberto Bergantinos Corpas Nov. 27, 2020, 5:14 p.m. UTC | #2
Hi Bruce!

 Thanks for comments!, i'll send a v2 with mechanical errors fixed
based on v5.10-rc5.

rgds
roberto

On Fri, Nov 27, 2020 at 5:52 PM Chuck Lever <chuck.lever@oracle.com> wrote:
>
> Hi Roberto-
>
> I spotted some mechanical problems.
>
>
> > On Nov 27, 2020, at 11:14 AM, Roberto Bergantinos Corpas <rbergant@redhat.com> wrote:
> >
> > We can simplifly code around cache_downcall unifying memory
>
> ^simplifly^simplify
>
> > allocations using kvmalloc, this have the benefit of getting rid of
>
> ^, this have^. This has
>
> > cache_slow_downcall (and queue_io_mutex), and also matches userland
> > allocation size and limits
> >
> > Signed-off-by: Roberto Bergantinos Corpas <rbergant@redhat.com>
>
> Assuming Bruce is copacetic with this patch, the change looks
> appropriate for the v5.11 merge window. However, this patch
> doesn't appear to apply to v5.10-rc5. Might be because
> 27a1e8a0f79e ("sunrpc: raise kernel RPC channel buffer size")
> was already merged?
>
>
> > ---
> > net/sunrpc/cache.c | 41 +++++++++++------------------------------
> > 1 file changed, 11 insertions(+), 30 deletions(-)
> >
> > diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
> > index baef5ee43dbb..1347ecae9c84 100644
> > --- a/net/sunrpc/cache.c
> > +++ b/net/sunrpc/cache.c
> > @@ -777,7 +777,6 @@ void cache_clean_deferred(void *owner)
> >  */
> >
> > static DEFINE_SPINLOCK(queue_lock);
> > -static DEFINE_MUTEX(queue_io_mutex);
> >
> > struct cache_queue {
> >       struct list_head        list;
> > @@ -905,44 +904,26 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
> >       return ret;
> > }
> >
> > -static ssize_t cache_slow_downcall(const char __user *buf,
> > -                                size_t count, struct cache_detail *cd)
> > -{
> > -     static char write_buf[8192]; /* protected by queue_io_mutex */
> > -     ssize_t ret = -EINVAL;
> > -
> > -     if (count >= sizeof(write_buf))
> > -             goto out;
> > -     mutex_lock(&queue_io_mutex);
> > -     ret = cache_do_downcall(write_buf, buf, count, cd);
> > -     mutex_unlock(&queue_io_mutex);
> > -out:
> > -     return ret;
> > -}
> > -
> > static ssize_t cache_downcall(struct address_space *mapping,
> >                             const char __user *buf,
> >                             size_t count, struct cache_detail *cd)
> > {
> > -     struct page *page;
> > -     char *kaddr;
> > +     char *write_buf;
> >       ssize_t ret = -ENOMEM;
> >
> > -     if (count >= PAGE_SIZE)
> > -             goto out_slow;
> > +     if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */
> > +             ret = -EINVAL;
> > +             goto out;
> > +     }
> >
> > -     page = find_or_create_page(mapping, 0, GFP_KERNEL);
> > -     if (!page)
> > -             goto out_slow;
> > +     write_buf = kvmalloc(count + 1, GFP_KERNEL);
> > +     if (!write_buf)
> > +             goto out;
> >
> > -     kaddr = kmap(page);
> > -     ret = cache_do_downcall(kaddr, buf, count, cd);
> > -     kunmap(page);
> > -     unlock_page(page);
> > -     put_page(page);
> > +     ret = cache_do_downcall(write_buf, buf, count, cd);
> > +     kvfree(write_buf);
> > +out:
> >       return ret;
> > -out_slow:
> > -     return cache_slow_downcall(buf, count, cd);
> > }
> >
> > static ssize_t cache_write(struct file *filp, const char __user *buf,
> > --
> > 2.21.0
> >
>
> --
> Chuck Lever
>
>
>
diff mbox series

Patch

diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index baef5ee43dbb..1347ecae9c84 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -777,7 +777,6 @@  void cache_clean_deferred(void *owner)
  */
 
 static DEFINE_SPINLOCK(queue_lock);
-static DEFINE_MUTEX(queue_io_mutex);
 
 struct cache_queue {
 	struct list_head	list;
@@ -905,44 +904,26 @@  static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
 	return ret;
 }
 
-static ssize_t cache_slow_downcall(const char __user *buf,
-				   size_t count, struct cache_detail *cd)
-{
-	static char write_buf[8192]; /* protected by queue_io_mutex */
-	ssize_t ret = -EINVAL;
-
-	if (count >= sizeof(write_buf))
-		goto out;
-	mutex_lock(&queue_io_mutex);
-	ret = cache_do_downcall(write_buf, buf, count, cd);
-	mutex_unlock(&queue_io_mutex);
-out:
-	return ret;
-}
-
 static ssize_t cache_downcall(struct address_space *mapping,
 			      const char __user *buf,
 			      size_t count, struct cache_detail *cd)
 {
-	struct page *page;
-	char *kaddr;
+	char *write_buf;
 	ssize_t ret = -ENOMEM;
 
-	if (count >= PAGE_SIZE)
-		goto out_slow;
+	if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */
+		ret = -EINVAL;
+		goto out;
+	}
 
-	page = find_or_create_page(mapping, 0, GFP_KERNEL);
-	if (!page)
-		goto out_slow;
+	write_buf = kvmalloc(count + 1, GFP_KERNEL);
+	if (!write_buf)
+		goto out;
 
-	kaddr = kmap(page);
-	ret = cache_do_downcall(kaddr, buf, count, cd);
-	kunmap(page);
-	unlock_page(page);
-	put_page(page);
+	ret = cache_do_downcall(write_buf, buf, count, cd);
+	kvfree(write_buf);
+out:
 	return ret;
-out_slow:
-	return cache_slow_downcall(buf, count, cd);
 }
 
 static ssize_t cache_write(struct file *filp, const char __user *buf,