@@ -28,7 +28,7 @@
#define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
#define NFSD_LAUNDRETTE_DELAY (2 * HZ)
-#define NFSD_FILE_SHUTDOWN (1)
+#define NFSD_FILE_CACHE_UP (0)
/* We only care about NFSD_MAY_READ/WRITE for this cache */
#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
@@ -59,7 +59,7 @@ static struct kmem_cache *nfsd_file_slab;
static struct kmem_cache *nfsd_file_mark_slab;
static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
static struct list_lru nfsd_file_lru;
-static long nfsd_file_lru_flags;
+static unsigned long nfsd_file_flags;
static struct fsnotify_group *nfsd_file_fsnotify_group;
static atomic_long_t nfsd_filecache_count;
static struct delayed_work nfsd_filecache_laundrette;
@@ -67,9 +67,8 @@ static struct delayed_work nfsd_filecache_laundrette;
static void
nfsd_file_schedule_laundrette(void)
{
- long count = atomic_long_read(&nfsd_filecache_count);
-
- if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
+ if ((atomic_long_read(&nfsd_filecache_count) == 0) ||
+ test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 0)
return;
queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
@@ -704,9 +703,8 @@ nfsd_file_cache_init(void)
int ret = -ENOMEM;
unsigned int i;
- clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
-
- if (nfsd_file_hashtbl)
+ lockdep_assert_held(&nfsd_mutex);
+ if (test_and_set_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
return 0;
nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
@@ -792,8 +790,8 @@ nfsd_file_cache_init(void)
/*
* Note this can deadlock with nfsd_file_lru_cb.
*/
-void
-nfsd_file_cache_purge(struct net *net)
+static void
+__nfsd_file_cache_purge(struct net *net)
{
unsigned int i;
struct nfsd_file *nf;
@@ -801,9 +799,6 @@ nfsd_file_cache_purge(struct net *net)
LIST_HEAD(dispose);
bool del;
- if (!nfsd_file_hashtbl)
- return;
-
for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
@@ -864,6 +859,19 @@ nfsd_file_cache_start_net(struct net *net)
return nn->fcache_disposal ? 0 : -ENOMEM;
}
+/**
+ * nfsd_file_cache_purge - Remove all cache items associated with @net
+ * @net: target net namespace
+ *
+ */
+void
+nfsd_file_cache_purge(struct net *net)
+{
+ lockdep_assert_held(&nfsd_mutex);
+ if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
+ __nfsd_file_cache_purge(net);
+}
+
void
nfsd_file_cache_shutdown_net(struct net *net)
{
@@ -876,7 +884,9 @@ nfsd_file_cache_shutdown(void)
{
int i;
- set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
+ lockdep_assert_held(&nfsd_mutex);
+ if (test_and_clear_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 0)
+ return;
lease_unregister_notifier(&nfsd_file_lease_notifier);
unregister_shrinker(&nfsd_file_shrinker);
@@ -885,7 +895,7 @@ nfsd_file_cache_shutdown(void)
* calling nfsd_file_cache_purge
*/
cancel_delayed_work_sync(&nfsd_filecache_laundrette);
- nfsd_file_cache_purge(NULL);
+ __nfsd_file_cache_purge(NULL);
list_lru_destroy(&nfsd_file_lru);
rcu_barrier();
fsnotify_put_group(nfsd_file_fsnotify_group);
@@ -1163,7 +1173,7 @@ static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
* don't end up racing with server shutdown
*/
mutex_lock(&nfsd_mutex);
- if (nfsd_file_hashtbl) {
+ if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1) {
for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
count += nfsd_file_hashtbl[i].nfb_count;
longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
In a moment, the nfsd_file_hashtbl global will be replaced with an rhashtable. Replace the one or two spots that need to check if the hash table is available. We can easily reuse the SHUTDOWN flag for this purpose. Document that this mechanism relies on callers to hold the nfsd_mutex to prevent init, shutdown, and purging to run concurrently. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> --- fs/nfsd/filecache.c | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-)