diff mbox series

sunrpc : make RPC channel buffer dynamic for slow case

Message ID 20201026150530.29019-1-rbergant@redhat.com (mailing list archive)
State New, archived
Headers show
Series sunrpc : make RPC channel buffer dynamic for slow case | expand

Commit Message

Roberto Bergantinos Corpas Oct. 26, 2020, 3:05 p.m. UTC
RPC channel buffer size for slow case (user buffer bigger than
one page) can be converted into dymanic and also allows us to
prescind from queue_io_mutex

Signed-off-by: Roberto Bergantinos Corpas <rbergant@redhat.com>
---
 net/sunrpc/cache.c | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

Comments

J. Bruce Fields Nov. 6, 2020, 9:51 p.m. UTC | #1
On Mon, Oct 26, 2020 at 04:05:30PM +0100, Roberto Bergantinos Corpas wrote:
> RPC channel buffer size for slow case (user buffer bigger than
> one page) can be converted into dymanic and also allows us to
> prescind from queue_io_mutex

Sorry for the slow response.

Let's just remove cache_slow_downcall and the find_or_create_page()
thing and just do a kvmalloc() from the start.  I don't understand why
we need to be more complicated.

--b.

> 
> Signed-off-by: Roberto Bergantinos Corpas <rbergant@redhat.com>
> ---
>  net/sunrpc/cache.c | 13 ++++++++-----
>  1 file changed, 8 insertions(+), 5 deletions(-)
> 
> diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
> index baef5ee43dbb..325393f75e17 100644
> --- a/net/sunrpc/cache.c
> +++ b/net/sunrpc/cache.c
> @@ -777,7 +777,6 @@ void cache_clean_deferred(void *owner)
>   */
>  
>  static DEFINE_SPINLOCK(queue_lock);
> -static DEFINE_MUTEX(queue_io_mutex);
>  
>  struct cache_queue {
>  	struct list_head	list;
> @@ -908,14 +907,18 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
>  static ssize_t cache_slow_downcall(const char __user *buf,
>  				   size_t count, struct cache_detail *cd)
>  {
> -	static char write_buf[8192]; /* protected by queue_io_mutex */
> +	char *write_buf;
>  	ssize_t ret = -EINVAL;
>  
> -	if (count >= sizeof(write_buf))
> +	if (count >= 32768) /* 32k is max userland buffer, lets check anyway */
>  		goto out;
> -	mutex_lock(&queue_io_mutex);
> +
> +	write_buf = kvmalloc(count + 1, GFP_KERNEL);
> +	if (!write_buf)
> +		return -ENOMEM;
> +
>  	ret = cache_do_downcall(write_buf, buf, count, cd);
> -	mutex_unlock(&queue_io_mutex);
> +	kvfree(write_buf);
>  out:
>  	return ret;
>  }
> -- 
> 2.21.0
Roberto Bergantinos Corpas Nov. 21, 2020, 10:54 a.m. UTC | #2
Hi Bruce,

  Sorry for late response as well.

    Ok, here's a possible patch, let me know your thoughts

diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index baef5ee43dbb..1347ecae9c84 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -777,7 +777,6 @@ void cache_clean_deferred(void *owner)
  */

 static DEFINE_SPINLOCK(queue_lock);
-static DEFINE_MUTEX(queue_io_mutex);

 struct cache_queue {
        struct list_head        list;
@@ -905,44 +904,26 @@ static ssize_t cache_do_downcall(char *kaddr,
const char __user *buf,
        return ret;
 }

-static ssize_t cache_slow_downcall(const char __user *buf,
-                                  size_t count, struct cache_detail *cd)
-{
-       static char write_buf[8192]; /* protected by queue_io_mutex */
-       ssize_t ret = -EINVAL;
-
-       if (count >= sizeof(write_buf))
-               goto out;
-       mutex_lock(&queue_io_mutex);
-       ret = cache_do_downcall(write_buf, buf, count, cd);
-       mutex_unlock(&queue_io_mutex);
-out:
-       return ret;
-}
-
 static ssize_t cache_downcall(struct address_space *mapping,
                              const char __user *buf,
                              size_t count, struct cache_detail *cd)
 {
-       struct page *page;
-       char *kaddr;
+       char *write_buf;
        ssize_t ret = -ENOMEM;

-       if (count >= PAGE_SIZE)
-               goto out_slow;
+       if (count >= 32768) { /* 32k is max userland buffer, lets
check anyway */
+               ret = -EINVAL;
+               goto out;
+       }

-       page = find_or_create_page(mapping, 0, GFP_KERNEL);
-       if (!page)
-               goto out_slow;
+       write_buf = kvmalloc(count + 1, GFP_KERNEL);
+       if (!write_buf)
+               goto out;

-       kaddr = kmap(page);
-       ret = cache_do_downcall(kaddr, buf, count, cd);
-       kunmap(page);
-       unlock_page(page);
-       put_page(page);
+       ret = cache_do_downcall(write_buf, buf, count, cd);
+       kvfree(write_buf);
+out:
        return ret;
-out_slow:
-       return cache_slow_downcall(buf, count, cd);
 }

 static ssize_t cache_write(struct file *filp, const char __user *buf,

On Fri, Nov 6, 2020 at 10:51 PM J. Bruce Fields <bfields@fieldses.org> wrote:
>
> On Mon, Oct 26, 2020 at 04:05:30PM +0100, Roberto Bergantinos Corpas wrote:
> > RPC channel buffer size for slow case (user buffer bigger than
> > one page) can be converted into dymanic and also allows us to
> > prescind from queue_io_mutex
>
> Sorry for the slow response.
>
> Let's just remove cache_slow_downcall and the find_or_create_page()
> thing and just do a kvmalloc() from the start.  I don't understand why
> we need to be more complicated.
>
> --b.
>
> >
> > Signed-off-by: Roberto Bergantinos Corpas <rbergant@redhat.com>
> > ---
> >  net/sunrpc/cache.c | 13 ++++++++-----
> >  1 file changed, 8 insertions(+), 5 deletions(-)
> >
> > diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
> > index baef5ee43dbb..325393f75e17 100644
> > --- a/net/sunrpc/cache.c
> > +++ b/net/sunrpc/cache.c
> > @@ -777,7 +777,6 @@ void cache_clean_deferred(void *owner)
> >   */
> >
> >  static DEFINE_SPINLOCK(queue_lock);
> > -static DEFINE_MUTEX(queue_io_mutex);
> >
> >  struct cache_queue {
> >       struct list_head        list;
> > @@ -908,14 +907,18 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
> >  static ssize_t cache_slow_downcall(const char __user *buf,
> >                                  size_t count, struct cache_detail *cd)
> >  {
> > -     static char write_buf[8192]; /* protected by queue_io_mutex */
> > +     char *write_buf;
> >       ssize_t ret = -EINVAL;
> >
> > -     if (count >= sizeof(write_buf))
> > +     if (count >= 32768) /* 32k is max userland buffer, lets check anyway */
> >               goto out;
> > -     mutex_lock(&queue_io_mutex);
> > +
> > +     write_buf = kvmalloc(count + 1, GFP_KERNEL);
> > +     if (!write_buf)
> > +             return -ENOMEM;
> > +
> >       ret = cache_do_downcall(write_buf, buf, count, cd);
> > -     mutex_unlock(&queue_io_mutex);
> > +     kvfree(write_buf);
> >  out:
> >       return ret;
> >  }
> > --
> > 2.21.0
>
J. Bruce Fields Nov. 23, 2020, 3:36 p.m. UTC | #3
On Sat, Nov 21, 2020 at 11:54:30AM +0100, Roberto Bergantinos Corpas wrote:
> Hi Bruce,
> 
>   Sorry for late response as well.
> 
>     Ok, here's a possible patch, let me know your thoughts

Looks good to me!  Could you just submit with changelog and
Signed-off-by?

--b.

> 
> diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
> index baef5ee43dbb..1347ecae9c84 100644
> --- a/net/sunrpc/cache.c
> +++ b/net/sunrpc/cache.c
> @@ -777,7 +777,6 @@ void cache_clean_deferred(void *owner)
>   */
> 
>  static DEFINE_SPINLOCK(queue_lock);
> -static DEFINE_MUTEX(queue_io_mutex);
> 
>  struct cache_queue {
>         struct list_head        list;
> @@ -905,44 +904,26 @@ static ssize_t cache_do_downcall(char *kaddr,
> const char __user *buf,
>         return ret;
>  }
> 
> -static ssize_t cache_slow_downcall(const char __user *buf,
> -                                  size_t count, struct cache_detail *cd)
> -{
> -       static char write_buf[8192]; /* protected by queue_io_mutex */
> -       ssize_t ret = -EINVAL;
> -
> -       if (count >= sizeof(write_buf))
> -               goto out;
> -       mutex_lock(&queue_io_mutex);
> -       ret = cache_do_downcall(write_buf, buf, count, cd);
> -       mutex_unlock(&queue_io_mutex);
> -out:
> -       return ret;
> -}
> -
>  static ssize_t cache_downcall(struct address_space *mapping,
>                               const char __user *buf,
>                               size_t count, struct cache_detail *cd)
>  {
> -       struct page *page;
> -       char *kaddr;
> +       char *write_buf;
>         ssize_t ret = -ENOMEM;
> 
> -       if (count >= PAGE_SIZE)
> -               goto out_slow;
> +       if (count >= 32768) { /* 32k is max userland buffer, lets
> check anyway */
> +               ret = -EINVAL;
> +               goto out;
> +       }
> 
> -       page = find_or_create_page(mapping, 0, GFP_KERNEL);
> -       if (!page)
> -               goto out_slow;
> +       write_buf = kvmalloc(count + 1, GFP_KERNEL);
> +       if (!write_buf)
> +               goto out;
> 
> -       kaddr = kmap(page);
> -       ret = cache_do_downcall(kaddr, buf, count, cd);
> -       kunmap(page);
> -       unlock_page(page);
> -       put_page(page);
> +       ret = cache_do_downcall(write_buf, buf, count, cd);
> +       kvfree(write_buf);
> +out:
>         return ret;
> -out_slow:
> -       return cache_slow_downcall(buf, count, cd);
>  }
> 
>  static ssize_t cache_write(struct file *filp, const char __user *buf,
> 
> On Fri, Nov 6, 2020 at 10:51 PM J. Bruce Fields <bfields@fieldses.org> wrote:
> >
> > On Mon, Oct 26, 2020 at 04:05:30PM +0100, Roberto Bergantinos Corpas wrote:
> > > RPC channel buffer size for slow case (user buffer bigger than
> > > one page) can be converted into dymanic and also allows us to
> > > prescind from queue_io_mutex
> >
> > Sorry for the slow response.
> >
> > Let's just remove cache_slow_downcall and the find_or_create_page()
> > thing and just do a kvmalloc() from the start.  I don't understand why
> > we need to be more complicated.
> >
> > --b.
> >
> > >
> > > Signed-off-by: Roberto Bergantinos Corpas <rbergant@redhat.com>
> > > ---
> > >  net/sunrpc/cache.c | 13 ++++++++-----
> > >  1 file changed, 8 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
> > > index baef5ee43dbb..325393f75e17 100644
> > > --- a/net/sunrpc/cache.c
> > > +++ b/net/sunrpc/cache.c
> > > @@ -777,7 +777,6 @@ void cache_clean_deferred(void *owner)
> > >   */
> > >
> > >  static DEFINE_SPINLOCK(queue_lock);
> > > -static DEFINE_MUTEX(queue_io_mutex);
> > >
> > >  struct cache_queue {
> > >       struct list_head        list;
> > > @@ -908,14 +907,18 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
> > >  static ssize_t cache_slow_downcall(const char __user *buf,
> > >                                  size_t count, struct cache_detail *cd)
> > >  {
> > > -     static char write_buf[8192]; /* protected by queue_io_mutex */
> > > +     char *write_buf;
> > >       ssize_t ret = -EINVAL;
> > >
> > > -     if (count >= sizeof(write_buf))
> > > +     if (count >= 32768) /* 32k is max userland buffer, lets check anyway */
> > >               goto out;
> > > -     mutex_lock(&queue_io_mutex);
> > > +
> > > +     write_buf = kvmalloc(count + 1, GFP_KERNEL);
> > > +     if (!write_buf)
> > > +             return -ENOMEM;
> > > +
> > >       ret = cache_do_downcall(write_buf, buf, count, cd);
> > > -     mutex_unlock(&queue_io_mutex);
> > > +     kvfree(write_buf);
> > >  out:
> > >       return ret;
> > >  }
> > > --
> > > 2.21.0
> >
Chuck Lever Nov. 23, 2020, 3:48 p.m. UTC | #4
> On Nov 23, 2020, at 10:36 AM, J. Bruce Fields <bfields@fieldses.org> wrote:
> 
> On Sat, Nov 21, 2020 at 11:54:30AM +0100, Roberto Bergantinos Corpas wrote:
>> Hi Bruce,
>> 
>>  Sorry for late response as well.
>> 
>>    Ok, here's a possible patch, let me know your thoughts
> 
> Looks good to me!  Could you just submit with changelog and
> Signed-off-by?

Bruce, are you taking this for v5.10-rc, or shall I include it
with v5.11 ?

--
Chuck Lever
J. Bruce Fields Nov. 23, 2020, 4:05 p.m. UTC | #5
On Mon, Nov 23, 2020 at 10:48:02AM -0500, Chuck Lever wrote:
> 
> 
> > On Nov 23, 2020, at 10:36 AM, J. Bruce Fields <bfields@fieldses.org> wrote:
> > 
> > On Sat, Nov 21, 2020 at 11:54:30AM +0100, Roberto Bergantinos Corpas wrote:
> >> Hi Bruce,
> >> 
> >>  Sorry for late response as well.
> >> 
> >>    Ok, here's a possible patch, let me know your thoughts
> > 
> > Looks good to me!  Could you just submit with changelog and
> > Signed-off-by?
> 
> Bruce, are you taking this for v5.10-rc, or shall I include it
> with v5.11 ?

I think the immediate problem was fixed by 27a1e8a0f79e and this is more
clean-up, so it can wait for v5.11, if you don't mind taking it.

--b.
diff mbox series

Patch

diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index baef5ee43dbb..325393f75e17 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -777,7 +777,6 @@  void cache_clean_deferred(void *owner)
  */
 
 static DEFINE_SPINLOCK(queue_lock);
-static DEFINE_MUTEX(queue_io_mutex);
 
 struct cache_queue {
 	struct list_head	list;
@@ -908,14 +907,18 @@  static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
 static ssize_t cache_slow_downcall(const char __user *buf,
 				   size_t count, struct cache_detail *cd)
 {
-	static char write_buf[8192]; /* protected by queue_io_mutex */
+	char *write_buf;
 	ssize_t ret = -EINVAL;
 
-	if (count >= sizeof(write_buf))
+	if (count >= 32768) /* 32k is max userland buffer, lets check anyway */
 		goto out;
-	mutex_lock(&queue_io_mutex);
+
+	write_buf = kvmalloc(count + 1, GFP_KERNEL);
+	if (!write_buf)
+		return -ENOMEM;
+
 	ret = cache_do_downcall(write_buf, buf, count, cd);
-	mutex_unlock(&queue_io_mutex);
+	kvfree(write_buf);
 out:
 	return ret;
 }