diff mbox

[v2,03/18] nfsd: convert laundry_wq to something less nfsd4 specific

Message ID 1438809216-4846-4-git-send-email-jeff.layton@primarydata.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jeff Layton Aug. 5, 2015, 9:13 p.m. UTC
Currently, nfsd uses a singlethread workqueue for this, but that's not
necessary. If we have multiple namespaces, then there's no need to
serialize the laundromat runs since they are only requeued at the end of
the work itself.

Also, create_singlethread_workqueue adds the WQ_MEM_RECLAIM flag, which
doesn't really seem to be necessary. The laundromat jobs are always
kicked off via a timer, and not from memory reclaim paths. There's no
need for a rescuer thread.

Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
---
 fs/nfsd/nfs4state.c | 23 +++++------------------
 fs/nfsd/nfsd.h      |  1 +
 fs/nfsd/nfssvc.c    | 14 +++++++++++++-
 3 files changed, 19 insertions(+), 19 deletions(-)

Comments

Kinglong Mee Aug. 7, 2015, 3:26 p.m. UTC | #1
On 8/6/2015 05:13, Jeff Layton wrote:
> Currently, nfsd uses a singlethread workqueue for this, but that's not
> necessary. If we have multiple namespaces, then there's no need to
> serialize the laundromat runs since they are only requeued at the end of
> the work itself.
> 
> Also, create_singlethread_workqueue adds the WQ_MEM_RECLAIM flag, which
> doesn't really seem to be necessary. The laundromat jobs are always
> kicked off via a timer, and not from memory reclaim paths. There's no
> need for a rescuer thread.
> 
> Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
> ---
>  fs/nfsd/nfs4state.c | 23 +++++------------------
>  fs/nfsd/nfsd.h      |  1 +
>  fs/nfsd/nfssvc.c    | 14 +++++++++++++-
>  3 files changed, 19 insertions(+), 19 deletions(-)
> 
> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
> index b3b306bd1830..c94859122e6f 100644
> --- a/fs/nfsd/nfs4state.c
> +++ b/fs/nfsd/nfs4state.c
> @@ -4366,7 +4366,6 @@ nfs4_laundromat(struct nfsd_net *nn)
>  	return new_timeo;
>  }
>  
> -static struct workqueue_struct *laundry_wq;
>  static void laundromat_main(struct work_struct *);
>  
>  static void
> @@ -4380,7 +4379,7 @@ laundromat_main(struct work_struct *laundry)
>  
>  	t = nfs4_laundromat(nn);
>  	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
> -	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
> +	queue_delayed_work(nfsd_laundry_wq, &nn->laundromat_work, t*HZ);
>  }
>  
>  static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
> @@ -6569,7 +6568,8 @@ nfs4_state_start_net(struct net *net)
>  	nfsd4_client_tracking_init(net);
>  	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
>  	       nn->nfsd4_grace, net);
> -	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
> +	queue_delayed_work(nfsd_laundry_wq, &nn->laundromat_work,
> +				nn->nfsd4_grace * HZ);
>  	return 0;
>  }
>  
> @@ -6583,22 +6583,10 @@ nfs4_state_start(void)
>  	ret = set_callback_cred();
>  	if (ret)
>  		return -ENOMEM;
> -	laundry_wq = create_singlethread_workqueue("nfsd4");
> -	if (laundry_wq == NULL) {
> -		ret = -ENOMEM;
> -		goto out_recovery;
> -	}
>  	ret = nfsd4_create_callback_queue();
> -	if (ret)
> -		goto out_free_laundry;
> -
> -	set_max_delegations();
> -
> -	return 0;
> +	if (!ret)
> +		set_max_delegations();
>  
> -out_free_laundry:
> -	destroy_workqueue(laundry_wq);
> -out_recovery:
>  	return ret;
>  }
>  
> @@ -6635,7 +6623,6 @@ nfs4_state_shutdown_net(struct net *net)
>  void
>  nfs4_state_shutdown(void)
>  {
> -	destroy_workqueue(laundry_wq);
>  	nfsd4_destroy_callback_queue();
>  }
>  
> diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
> index cf980523898b..0199415344ff 100644
> --- a/fs/nfsd/nfsd.h
> +++ b/fs/nfsd/nfsd.h
> @@ -62,6 +62,7 @@ struct readdir_cd {
>  extern struct svc_program	nfsd_program;
>  extern struct svc_version	nfsd_version2, nfsd_version3,
>  				nfsd_version4;
> +extern struct workqueue_struct	*nfsd_laundry_wq;
>  extern struct mutex		nfsd_mutex;
>  extern spinlock_t		nfsd_drc_lock;
>  extern unsigned long		nfsd_drc_max_mem;
> diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
> index ad4e2377dd63..ced9944201a0 100644
> --- a/fs/nfsd/nfssvc.c
> +++ b/fs/nfsd/nfssvc.c
> @@ -17,6 +17,7 @@
>  #include <linux/lockd/bind.h>
>  #include <linux/nfsacl.h>
>  #include <linux/seq_file.h>
> +#include <linux/workqueue.h>
>  #include <net/net_namespace.h>
>  #include "nfsd.h"
>  #include "cache.h"
> @@ -28,6 +29,9 @@
>  extern struct svc_program	nfsd_program;
>  static int			nfsd(void *vrqstp);
>  
> +/* A workqueue for nfsd-related cleanup tasks */
> +struct workqueue_struct		*nfsd_laundry_wq;
> +
>  /*
>   * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and the members
>   * of the svc_serv struct. In particular, ->sv_nrthreads but also to some
> @@ -224,11 +228,19 @@ static int nfsd_startup_generic(int nrservs)
>  	if (ret)
>  		goto dec_users;
>  
> +	ret = -ENOMEM;
> +	nfsd_laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd-laundry");
> +	if (!nfsd_laundry_wq)
> +		goto out_racache;
> +
>  	ret = nfs4_state_start();
>  	if (ret)
> -		goto out_racache;
> +		goto out_wq;
>  	return 0;
>  
> +out_wq:
> +	destroy_workqueue(nfsd_laundry_wq);
> +	nfsd_laundry_wq = NULL;
>  out_racache:
>  	nfsd_racache_shutdown();
>  dec_users:

Is a destroy_workqueue(nfsd_laundry_wq) needed in nfsd_shutdown_generic() ?

thanks,
Kinglong Mee
--
To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jeff Layton Aug. 7, 2015, 5:12 p.m. UTC | #2
On Fri, 7 Aug 2015 23:26:02 +0800
Kinglong Mee <kinglongmee@gmail.com> wrote:

> On 8/6/2015 05:13, Jeff Layton wrote:
> > Currently, nfsd uses a singlethread workqueue for this, but that's not
> > necessary. If we have multiple namespaces, then there's no need to
> > serialize the laundromat runs since they are only requeued at the end of
> > the work itself.
> > 
> > Also, create_singlethread_workqueue adds the WQ_MEM_RECLAIM flag, which
> > doesn't really seem to be necessary. The laundromat jobs are always
> > kicked off via a timer, and not from memory reclaim paths. There's no
> > need for a rescuer thread.
> > 
> > Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
> > ---
> >  fs/nfsd/nfs4state.c | 23 +++++------------------
> >  fs/nfsd/nfsd.h      |  1 +
> >  fs/nfsd/nfssvc.c    | 14 +++++++++++++-
> >  3 files changed, 19 insertions(+), 19 deletions(-)
> > 
> > diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
> > index b3b306bd1830..c94859122e6f 100644
> > --- a/fs/nfsd/nfs4state.c
> > +++ b/fs/nfsd/nfs4state.c
> > @@ -4366,7 +4366,6 @@ nfs4_laundromat(struct nfsd_net *nn)
> >  	return new_timeo;
> >  }
> >  
> > -static struct workqueue_struct *laundry_wq;
> >  static void laundromat_main(struct work_struct *);
> >  
> >  static void
> > @@ -4380,7 +4379,7 @@ laundromat_main(struct work_struct *laundry)
> >  
> >  	t = nfs4_laundromat(nn);
> >  	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
> > -	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
> > +	queue_delayed_work(nfsd_laundry_wq, &nn->laundromat_work, t*HZ);
> >  }
> >  
> >  static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
> > @@ -6569,7 +6568,8 @@ nfs4_state_start_net(struct net *net)
> >  	nfsd4_client_tracking_init(net);
> >  	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
> >  	       nn->nfsd4_grace, net);
> > -	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
> > +	queue_delayed_work(nfsd_laundry_wq, &nn->laundromat_work,
> > +				nn->nfsd4_grace * HZ);
> >  	return 0;
> >  }
> >  
> > @@ -6583,22 +6583,10 @@ nfs4_state_start(void)
> >  	ret = set_callback_cred();
> >  	if (ret)
> >  		return -ENOMEM;
> > -	laundry_wq = create_singlethread_workqueue("nfsd4");
> > -	if (laundry_wq == NULL) {
> > -		ret = -ENOMEM;
> > -		goto out_recovery;
> > -	}
> >  	ret = nfsd4_create_callback_queue();
> > -	if (ret)
> > -		goto out_free_laundry;
> > -
> > -	set_max_delegations();
> > -
> > -	return 0;
> > +	if (!ret)
> > +		set_max_delegations();
> >  
> > -out_free_laundry:
> > -	destroy_workqueue(laundry_wq);
> > -out_recovery:
> >  	return ret;
> >  }
> >  
> > @@ -6635,7 +6623,6 @@ nfs4_state_shutdown_net(struct net *net)
> >  void
> >  nfs4_state_shutdown(void)
> >  {
> > -	destroy_workqueue(laundry_wq);
> >  	nfsd4_destroy_callback_queue();
> >  }
> >  
> > diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
> > index cf980523898b..0199415344ff 100644
> > --- a/fs/nfsd/nfsd.h
> > +++ b/fs/nfsd/nfsd.h
> > @@ -62,6 +62,7 @@ struct readdir_cd {
> >  extern struct svc_program	nfsd_program;
> >  extern struct svc_version	nfsd_version2, nfsd_version3,
> >  				nfsd_version4;
> > +extern struct workqueue_struct	*nfsd_laundry_wq;
> >  extern struct mutex		nfsd_mutex;
> >  extern spinlock_t		nfsd_drc_lock;
> >  extern unsigned long		nfsd_drc_max_mem;
> > diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
> > index ad4e2377dd63..ced9944201a0 100644
> > --- a/fs/nfsd/nfssvc.c
> > +++ b/fs/nfsd/nfssvc.c
> > @@ -17,6 +17,7 @@
> >  #include <linux/lockd/bind.h>
> >  #include <linux/nfsacl.h>
> >  #include <linux/seq_file.h>
> > +#include <linux/workqueue.h>
> >  #include <net/net_namespace.h>
> >  #include "nfsd.h"
> >  #include "cache.h"
> > @@ -28,6 +29,9 @@
> >  extern struct svc_program	nfsd_program;
> >  static int			nfsd(void *vrqstp);
> >  
> > +/* A workqueue for nfsd-related cleanup tasks */
> > +struct workqueue_struct		*nfsd_laundry_wq;
> > +
> >  /*
> >   * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and the members
> >   * of the svc_serv struct. In particular, ->sv_nrthreads but also to some
> > @@ -224,11 +228,19 @@ static int nfsd_startup_generic(int nrservs)
> >  	if (ret)
> >  		goto dec_users;
> >  
> > +	ret = -ENOMEM;
> > +	nfsd_laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd-laundry");
> > +	if (!nfsd_laundry_wq)
> > +		goto out_racache;
> > +
> >  	ret = nfs4_state_start();
> >  	if (ret)
> > -		goto out_racache;
> > +		goto out_wq;
> >  	return 0;
> >  
> > +out_wq:
> > +	destroy_workqueue(nfsd_laundry_wq);
> > +	nfsd_laundry_wq = NULL;
> >  out_racache:
> >  	nfsd_racache_shutdown();
> >  dec_users:
> 
> Is a destroy_workqueue(nfsd_laundry_wq) needed in nfsd_shutdown_generic() ?
> 
> thanks,
> Kinglong Mee

Yes! I had that in an earlier version of this, but somehow it got
dropped. Good catch. I'll fix that for the next respin.
Christoph Hellwig Aug. 9, 2015, 7:14 a.m. UTC | #3
On Wed, Aug 05, 2015 at 05:13:21PM -0400, Jeff Layton wrote:
> Currently, nfsd uses a singlethread workqueue for this, but that's not
> necessary. If we have multiple namespaces, then there's no need to
> serialize the laundromat runs since they are only requeued at the end of
> the work itself.
> 
> Also, create_singlethread_workqueue adds the WQ_MEM_RECLAIM flag, which
> doesn't really seem to be necessary. The laundromat jobs are always
> kicked off via a timer, and not from memory reclaim paths. There's no
> need for a rescuer thread.

Why would you change it to an unbound WQ?  I'd really prefer to split
the change of workqueue semantics and making it globall available, too.
--
To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jeff Layton Aug. 9, 2015, 11:11 a.m. UTC | #4
On Sun, 9 Aug 2015 00:14:38 -0700
Christoph Hellwig <hch@infradead.org> wrote:

> On Wed, Aug 05, 2015 at 05:13:21PM -0400, Jeff Layton wrote:
> > Currently, nfsd uses a singlethread workqueue for this, but that's not
> > necessary. If we have multiple namespaces, then there's no need to
> > serialize the laundromat runs since they are only requeued at the end of
> > the work itself.
> > 
> > Also, create_singlethread_workqueue adds the WQ_MEM_RECLAIM flag, which
> > doesn't really seem to be necessary. The laundromat jobs are always
> > kicked off via a timer, and not from memory reclaim paths. There's no
> > need for a rescuer thread.
> 
> Why would you change it to an unbound WQ?  I'd really prefer to split
> the change of workqueue semantics and making it globall available, too.

create_singlethread_workqueue already makes an unbound workqueue. This
patch just lifts the "max_active" value to the default, and removes the
WQ_MEM_RECLAIM flag.

We certainly could turn this into a bound workqueue, but given the sort
of job that the laundromat runs I'm not sure we'd benefit much from the
locality.

...and sure, I can turn this into two patches if you'd prefer.
Christoph Hellwig Aug. 10, 2015, 8:26 a.m. UTC | #5
On Sun, Aug 09, 2015 at 07:11:37AM -0400, Jeff Layton wrote:
> create_singlethread_workqueue already makes an unbound workqueue. This
> patch just lifts the "max_active" value to the default, and removes the
> WQ_MEM_RECLAIM flag.
> 
> We certainly could turn this into a bound workqueue, but given the sort
> of job that the laundromat runs I'm not sure we'd benefit much from the
> locality.
> 
> ...and sure, I can turn this into two patches if you'd prefer.

The patch was just rather confusing to me.  Do you want the existing
laundromat to scale better with lots of namespaces?  Sounds reasonable,
but I don't really see the use case.  

Looking at the later patches I now see you're overloading a totally
different job to it.  I don't think there's a point given how cheap
workqueues are these days.  Even more it seems like you really should
use the mm/list_lru.c infrastructure and a shrinker for a your file
cache.
--
To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jeff Layton Aug. 10, 2015, 11:23 a.m. UTC | #6
On Mon, 10 Aug 2015 01:26:22 -0700
Christoph Hellwig <hch@infradead.org> wrote:

> On Sun, Aug 09, 2015 at 07:11:37AM -0400, Jeff Layton wrote:
> > create_singlethread_workqueue already makes an unbound workqueue. This
> > patch just lifts the "max_active" value to the default, and removes the
> > WQ_MEM_RECLAIM flag.
> > 
> > We certainly could turn this into a bound workqueue, but given the sort
> > of job that the laundromat runs I'm not sure we'd benefit much from the
> > locality.
> > 
> > ...and sure, I can turn this into two patches if you'd prefer.
> 
> The patch was just rather confusing to me.  Do you want the existing
> laundromat to scale better with lots of namespaces?  Sounds reasonable,
> but I don't really see the use case.  
> 
> Looking at the later patches I now see you're overloading a totally
> different job to it.  I don't think there's a point given how cheap
> workqueues are these days.  Even more it seems like you really should
> use the mm/list_lru.c infrastructure and a shrinker for a your file
> cache.

Right, it's a laundry job of a different sort, so I figured using the
laundry_wq would make sense. I also just saw absolutely no reason to
serialize all of the nfsd4 laundromat jobs (if there were ever more
than one on the box at a time), so it was an opportunity to clean that
up.

I did consider a shrinker and LRU list for this. The problem there is
that shrinkers are triggered on memory pressure. Keeping these files
open after they've been idle for a long period of time would prevent
the kernel from handing out leases on them, so closing them after a
reasonable idle period seemed like the right thing to do.

I suppose however we could use a shrinker/LRU _and_ add a mechanism
that would cause the kernel to close idle nfsd_files for an inode when
there is an attempt to do a F_SETLEASE. That would probably work,
unless I'm missing other reasons that keeping unused files open might
be problematic. Are there any?
Christoph Hellwig Aug. 10, 2015, 12:10 p.m. UTC | #7
On Mon, Aug 10, 2015 at 07:23:51AM -0400, Jeff Layton wrote:
> I did consider a shrinker and LRU list for this. The problem there is
> that shrinkers are triggered on memory pressure. Keeping these files
> open after they've been idle for a long period of time would prevent
> the kernel from handing out leases on them, so closing them after a
> reasonable idle period seemed like the right thing to do.

True.

> I suppose however we could use a shrinker/LRU _and_ add a mechanism
> that would cause the kernel to close idle nfsd_files for an inode when
> there is an attempt to do a F_SETLEASE. That would probably work,
> unless I'm missing other reasons that keeping unused files open might
> be problematic. Are there any?

That seems reasonable.  Keepign the file open also will prevent
unmounting the file system, although currently any NFS export already
causes that as well.
--
To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jeff Layton Aug. 10, 2015, 12:14 p.m. UTC | #8
On Mon, 10 Aug 2015 05:10:35 -0700
Christoph Hellwig <hch@infradead.org> wrote:

> On Mon, Aug 10, 2015 at 07:23:51AM -0400, Jeff Layton wrote:
> > I did consider a shrinker and LRU list for this. The problem there is
> > that shrinkers are triggered on memory pressure. Keeping these files
> > open after they've been idle for a long period of time would prevent
> > the kernel from handing out leases on them, so closing them after a
> > reasonable idle period seemed like the right thing to do.
> 
> True.
> 
> > I suppose however we could use a shrinker/LRU _and_ add a mechanism
> > that would cause the kernel to close idle nfsd_files for an inode when
> > there is an attempt to do a F_SETLEASE. That would probably work,
> > unless I'm missing other reasons that keeping unused files open might
> > be problematic. Are there any?
> 
> That seems reasonable.  Keepign the file open also will prevent
> unmounting the file system, although currently any NFS export already
> causes that as well.

Yes, though that's the reason for the new ->flush hook in the sunrpc
cache code. On any export table change, we'll clean out the nfsd_file
cache to help ensure that you'll be able to unmount soon after
unexporting a filesystem.

In any case, I'll look at the shrinker/lru thing for the next respin
and see whether adding a hook into the setlease code might be
reasonable.

Thanks for the review so far,
J. Bruce Fields Aug. 10, 2015, 2:33 p.m. UTC | #9
On Mon, Aug 10, 2015 at 08:14:56AM -0400, Jeff Layton wrote:
> On Mon, 10 Aug 2015 05:10:35 -0700
> Christoph Hellwig <hch@infradead.org> wrote:
> 
> > On Mon, Aug 10, 2015 at 07:23:51AM -0400, Jeff Layton wrote:
> > > I did consider a shrinker and LRU list for this. The problem there is
> > > that shrinkers are triggered on memory pressure. Keeping these files
> > > open after they've been idle for a long period of time would prevent
> > > the kernel from handing out leases on them, so closing them after a
> > > reasonable idle period seemed like the right thing to do.
> > 
> > True.
> > 
> > > I suppose however we could use a shrinker/LRU _and_ add a mechanism
> > > that would cause the kernel to close idle nfsd_files for an inode when
> > > there is an attempt to do a F_SETLEASE. That would probably work,
> > > unless I'm missing other reasons that keeping unused files open might
> > > be problematic. Are there any?
> > 
> > That seems reasonable.  Keepign the file open also will prevent
> > unmounting the file system, although currently any NFS export already
> > causes that as well.
> 
> Yes, though that's the reason for the new ->flush hook in the sunrpc
> cache code. On any export table change, we'll clean out the nfsd_file
> cache to help ensure that you'll be able to unmount soon after
> unexporting a filesystem.

There are definitely people with scripts that try to unexport and then
immediately unmount, e.g. to migrate a filesystem elsewhere.  They
already run into problems thanks to export caches, locks, and v4 state.
A complete shutdown of nfsd is currently the only supported way to
unmount.  Still, I wouldn't be surprised if there are people who
(possibly just out of luck) have a working setup now that will start
failing after we take these additional references.

Extending the unlock_* interfaces or getting Kinglong's stuff working
would help.

--b.

> 
> In any case, I'll look at the shrinker/lru thing for the next respin
> and see whether adding a hook into the setlease code might be
> reasonable.
> 
> Thanks for the review so far,
> -- 
> Jeff Layton <jlayton@poochiereds.net>
--
To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index b3b306bd1830..c94859122e6f 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4366,7 +4366,6 @@  nfs4_laundromat(struct nfsd_net *nn)
 	return new_timeo;
 }
 
-static struct workqueue_struct *laundry_wq;
 static void laundromat_main(struct work_struct *);
 
 static void
@@ -4380,7 +4379,7 @@  laundromat_main(struct work_struct *laundry)
 
 	t = nfs4_laundromat(nn);
 	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
-	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
+	queue_delayed_work(nfsd_laundry_wq, &nn->laundromat_work, t*HZ);
 }
 
 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
@@ -6569,7 +6568,8 @@  nfs4_state_start_net(struct net *net)
 	nfsd4_client_tracking_init(net);
 	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
 	       nn->nfsd4_grace, net);
-	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
+	queue_delayed_work(nfsd_laundry_wq, &nn->laundromat_work,
+				nn->nfsd4_grace * HZ);
 	return 0;
 }
 
@@ -6583,22 +6583,10 @@  nfs4_state_start(void)
 	ret = set_callback_cred();
 	if (ret)
 		return -ENOMEM;
-	laundry_wq = create_singlethread_workqueue("nfsd4");
-	if (laundry_wq == NULL) {
-		ret = -ENOMEM;
-		goto out_recovery;
-	}
 	ret = nfsd4_create_callback_queue();
-	if (ret)
-		goto out_free_laundry;
-
-	set_max_delegations();
-
-	return 0;
+	if (!ret)
+		set_max_delegations();
 
-out_free_laundry:
-	destroy_workqueue(laundry_wq);
-out_recovery:
 	return ret;
 }
 
@@ -6635,7 +6623,6 @@  nfs4_state_shutdown_net(struct net *net)
 void
 nfs4_state_shutdown(void)
 {
-	destroy_workqueue(laundry_wq);
 	nfsd4_destroy_callback_queue();
 }
 
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index cf980523898b..0199415344ff 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -62,6 +62,7 @@  struct readdir_cd {
 extern struct svc_program	nfsd_program;
 extern struct svc_version	nfsd_version2, nfsd_version3,
 				nfsd_version4;
+extern struct workqueue_struct	*nfsd_laundry_wq;
 extern struct mutex		nfsd_mutex;
 extern spinlock_t		nfsd_drc_lock;
 extern unsigned long		nfsd_drc_max_mem;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index ad4e2377dd63..ced9944201a0 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -17,6 +17,7 @@ 
 #include <linux/lockd/bind.h>
 #include <linux/nfsacl.h>
 #include <linux/seq_file.h>
+#include <linux/workqueue.h>
 #include <net/net_namespace.h>
 #include "nfsd.h"
 #include "cache.h"
@@ -28,6 +29,9 @@ 
 extern struct svc_program	nfsd_program;
 static int			nfsd(void *vrqstp);
 
+/* A workqueue for nfsd-related cleanup tasks */
+struct workqueue_struct		*nfsd_laundry_wq;
+
 /*
  * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and the members
  * of the svc_serv struct. In particular, ->sv_nrthreads but also to some
@@ -224,11 +228,19 @@  static int nfsd_startup_generic(int nrservs)
 	if (ret)
 		goto dec_users;
 
+	ret = -ENOMEM;
+	nfsd_laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd-laundry");
+	if (!nfsd_laundry_wq)
+		goto out_racache;
+
 	ret = nfs4_state_start();
 	if (ret)
-		goto out_racache;
+		goto out_wq;
 	return 0;
 
+out_wq:
+	destroy_workqueue(nfsd_laundry_wq);
+	nfsd_laundry_wq = NULL;
 out_racache:
 	nfsd_racache_shutdown();
 dec_users: