@@ -63,17 +63,19 @@ static DEFINE_PER_CPU(unsigned long, nfsd_file_evictions);
struct nfsd_fcache_disposal {
spinlock_t lock;
+ struct list_lru file_lru;
struct list_head freeme;
+ struct delayed_work filecache_laundrette;
+ struct shrinker *file_shrinker;
};
static struct kmem_cache *nfsd_file_slab;
static struct kmem_cache *nfsd_file_mark_slab;
-static struct list_lru nfsd_file_lru;
static unsigned long nfsd_file_flags;
static struct fsnotify_group *nfsd_file_fsnotify_group;
-static struct delayed_work nfsd_filecache_laundrette;
static struct rhltable nfsd_file_rhltable
____cacheline_aligned_in_smp;
+static atomic_long_t nfsd_lru_total = ATOMIC_LONG_INIT(0);
static bool
nfsd_match_cred(const struct cred *c1, const struct cred *c2)
@@ -109,11 +111,18 @@ static const struct rhashtable_params nfsd_file_rhash_params = {
};
static void
-nfsd_file_schedule_laundrette(void)
+nfsd_file_schedule_laundrette(struct nfsd_fcache_disposal *l)
{
- if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags))
- queue_delayed_work(system_unbound_wq, &nfsd_filecache_laundrette,
- NFSD_LAUNDRETTE_DELAY);
+ queue_delayed_work(system_unbound_wq, &l->filecache_laundrette,
+ NFSD_LAUNDRETTE_DELAY);
+}
+
+static void
+nfsd_file_schedule_laundrette_net(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfsd_file_schedule_laundrette(nn->fcache_disposal);
}
static void
@@ -318,11 +327,14 @@ nfsd_file_check_writeback(struct nfsd_file *nf)
mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
}
-
static bool nfsd_file_lru_add(struct nfsd_file *nf)
{
+ struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
+ struct nfsd_fcache_disposal *l = nn->fcache_disposal;
+
set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
- if (list_lru_add_obj(&nfsd_file_lru, &nf->nf_lru)) {
+ if (list_lru_add_obj(&l->file_lru, &nf->nf_lru)) {
+ atomic_long_inc(&nfsd_lru_total);
trace_nfsd_file_lru_add(nf);
return true;
}
@@ -331,7 +343,11 @@ static bool nfsd_file_lru_add(struct nfsd_file *nf)
static bool nfsd_file_lru_remove(struct nfsd_file *nf)
{
- if (list_lru_del_obj(&nfsd_file_lru, &nf->nf_lru)) {
+ struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
+ struct nfsd_fcache_disposal *l = nn->fcache_disposal;
+
+ if (list_lru_del_obj(&l->file_lru, &nf->nf_lru)) {
+ atomic_long_dec(&nfsd_lru_total);
trace_nfsd_file_lru_del(nf);
return true;
}
@@ -373,7 +389,7 @@ nfsd_file_put(struct nfsd_file *nf)
if (nfsd_file_lru_add(nf)) {
/* If it's still hashed, we're done */
if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
- nfsd_file_schedule_laundrette();
+ nfsd_file_schedule_laundrette_net(nf->nf_net);
return;
}
@@ -539,18 +555,18 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
}
static void
-nfsd_file_gc(void)
+nfsd_file_gc(struct nfsd_fcache_disposal *l)
{
- unsigned long remaining = list_lru_count(&nfsd_file_lru);
+ unsigned long remaining = list_lru_count(&l->file_lru);
LIST_HEAD(dispose);
unsigned long ret;
while (remaining > 0) {
unsigned long num_to_scan = min(remaining, NFSD_FILE_GC_BATCH);
- ret = list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb,
+ ret = list_lru_walk(&l->file_lru, nfsd_file_lru_cb,
&dispose, num_to_scan);
- trace_nfsd_file_gc_removed(ret, list_lru_count(&nfsd_file_lru));
+ trace_nfsd_file_gc_removed(ret, list_lru_count(&l->file_lru));
nfsd_file_dispose_list_delayed(&dispose);
remaining -= num_to_scan;
}
@@ -559,32 +575,36 @@ nfsd_file_gc(void)
static void
nfsd_file_gc_worker(struct work_struct *work)
{
- nfsd_file_gc();
- if (list_lru_count(&nfsd_file_lru))
- nfsd_file_schedule_laundrette();
+ struct nfsd_fcache_disposal *l = container_of(
+ work, struct nfsd_fcache_disposal, filecache_laundrette.work);
+ nfsd_file_gc(l);
+ if (list_lru_count(&l->file_lru))
+ nfsd_file_schedule_laundrette(l);
}
static unsigned long
nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
{
- return list_lru_count(&nfsd_file_lru);
+ struct nfsd_fcache_disposal *l = s->private_data;
+
+ return list_lru_count(&l->file_lru);
}
static unsigned long
nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
{
+ struct nfsd_fcache_disposal *l = s->private_data;
+
LIST_HEAD(dispose);
unsigned long ret;
- ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
+ ret = list_lru_shrink_walk(&l->file_lru, sc,
nfsd_file_lru_cb, &dispose);
- trace_nfsd_file_shrinker_removed(ret, list_lru_count(&nfsd_file_lru));
+ trace_nfsd_file_shrinker_removed(ret, list_lru_count(&l->file_lru));
nfsd_file_dispose_list_delayed(&dispose);
return ret;
}
-static struct shrinker *nfsd_file_shrinker;
-
/**
* nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
* @nf: nfsd_file to attempt to queue
@@ -764,29 +784,10 @@ nfsd_file_cache_init(void)
goto out_err;
}
- ret = list_lru_init(&nfsd_file_lru);
- if (ret) {
- pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
- goto out_err;
- }
-
- nfsd_file_shrinker = shrinker_alloc(0, "nfsd-filecache");
- if (!nfsd_file_shrinker) {
- ret = -ENOMEM;
- pr_err("nfsd: failed to allocate nfsd_file_shrinker\n");
- goto out_lru;
- }
-
- nfsd_file_shrinker->count_objects = nfsd_file_lru_count;
- nfsd_file_shrinker->scan_objects = nfsd_file_lru_scan;
- nfsd_file_shrinker->seeks = 1;
-
- shrinker_register(nfsd_file_shrinker);
-
ret = lease_register_notifier(&nfsd_file_lease_notifier);
if (ret) {
pr_err("nfsd: unable to register lease notifier: %d\n", ret);
- goto out_shrinker;
+ goto out_err;
}
nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops,
@@ -799,17 +800,12 @@ nfsd_file_cache_init(void)
goto out_notifier;
}
- INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
out:
if (ret)
clear_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags);
return ret;
out_notifier:
lease_unregister_notifier(&nfsd_file_lease_notifier);
-out_shrinker:
- shrinker_free(nfsd_file_shrinker);
-out_lru:
- list_lru_destroy(&nfsd_file_lru);
out_err:
kmem_cache_destroy(nfsd_file_slab);
nfsd_file_slab = NULL;
@@ -861,13 +857,36 @@ nfsd_alloc_fcache_disposal(void)
if (!l)
return NULL;
spin_lock_init(&l->lock);
+ INIT_DELAYED_WORK(&l->filecache_laundrette, nfsd_file_gc_worker);
INIT_LIST_HEAD(&l->freeme);
+ l->file_shrinker = shrinker_alloc(0, "nfsd-filecache");
+ if (!l->file_shrinker) {
+ pr_err("nfsd: failed to allocate nfsd_file_shrinker\n");
+ kfree(l);
+ return NULL;
+ }
+ l->file_shrinker->count_objects = nfsd_file_lru_count;
+ l->file_shrinker->scan_objects = nfsd_file_lru_scan;
+ l->file_shrinker->seeks = 1;
+ l->file_shrinker->private_data = l;
+
+ if (list_lru_init(&l->file_lru)) {
+ pr_err("nfsd: failed to init nfsd_file_lru\n");
+ shrinker_free(l->file_shrinker);
+ kfree(l);
+ return NULL;
+ }
+
+ shrinker_register(l->file_shrinker);
return l;
}
static void
nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
{
+ cancel_delayed_work_sync(&l->filecache_laundrette);
+ shrinker_free(l->file_shrinker);
+ list_lru_destroy(&l->file_lru);
nfsd_file_dispose_list(&l->freeme);
kfree(l);
}
@@ -899,8 +918,7 @@ void
nfsd_file_cache_purge(struct net *net)
{
lockdep_assert_held(&nfsd_mutex);
- if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
- __nfsd_file_cache_purge(net);
+ __nfsd_file_cache_purge(net);
}
void
@@ -920,14 +938,7 @@ nfsd_file_cache_shutdown(void)
return;
lease_unregister_notifier(&nfsd_file_lease_notifier);
- shrinker_free(nfsd_file_shrinker);
- /*
- * make sure all callers of nfsd_file_lru_cb are done before
- * calling nfsd_file_cache_purge
- */
- cancel_delayed_work_sync(&nfsd_filecache_laundrette);
__nfsd_file_cache_purge(NULL);
- list_lru_destroy(&nfsd_file_lru);
rcu_barrier();
fsnotify_put_group(nfsd_file_fsnotify_group);
nfsd_file_fsnotify_group = NULL;
@@ -1298,7 +1309,7 @@ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
struct bucket_table *tbl;
struct rhashtable *ht;
- lru = list_lru_count(&nfsd_file_lru);
+ lru = atomic_long_read(&nfsd_lru_total);
rcu_read_lock();
ht = &nfsd_file_rhltable.ht;
The final freeing of nfsd files is done by per-net nfsd threads (which call nfsd_file_net_dispose()) so it makes some sense to make more of the freeing infrastructure to be per-net - in struct nfsd_fcache_disposal. This is a step towards replacing the list_lru with simple lists which each share the per-net lock in nfsd_fcache_disposal and will require less list walking. As the net is always shutdown before there is any chance that the rest of the filecache is shut down we can removed the tests on NFSD_FILE_CACHE_UP. For the filecache stats file, which assumes a global lru, we keep a separate counter which includes all files in all netns lrus. Signed-off-by: NeilBrown <neilb@suse.de> --- fs/nfsd/filecache.c | 125 ++++++++++++++++++++++++-------------------- 1 file changed, 68 insertions(+), 57 deletions(-)