diff mbox series

[v2,5/7] cifs: Fix potential deadlock when updating vol in cifs_reconnect()

Message ID 20191125165758.3793-6-pc@cjr.nz (mailing list archive)
State New, archived
Headers show
Series DFS fixes | expand

Commit Message

Paulo Alcantara Nov. 25, 2019, 4:57 p.m. UTC
We can't hold the vol_lock spinlock while refreshing the DFS cache
because cifs_reconnect() may call dfs_cache_update_vol() while we are
walking through the volume list.

To prevent that, make vol_info refcounted, create a temp list with all
volumes eligible for refreshing, and then use it without any locks
held.

Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
---
v1 -> v2:
  - mention that vol_info is now refcounted in commit msg
  - document put_vol() to say that it must be called with vol_lock held
---
 fs/cifs/dfs_cache.c | 46 +++++++++++++++++++++++++++++++++------------
 1 file changed, 34 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index b790f37c0060..263d42d46acc 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -49,6 +49,8 @@  struct vol_info {
 	struct smb_vol smb_vol;
 	char *mntdata;
 	struct list_head list;
+	struct list_head rlist;
+	int vol_count;
 };
 
 static struct kmem_cache *cache_slab __read_mostly;
@@ -516,13 +518,16 @@  static struct cache_entry *lookup_cache_entry(const char *path,
 	return ce;
 }
 
-static inline void free_vol(struct vol_info *vi)
+/* Must be called with vol_lock held */
+static void put_vol(struct vol_info *vi)
 {
-	list_del(&vi->list);
-	kfree(vi->fullpath);
-	kfree(vi->mntdata);
-	cifs_cleanup_volume_info_contents(&vi->smb_vol);
-	kfree(vi);
+	if (!--vi->vol_count) {
+		list_del_init(&vi->list);
+		kfree(vi->fullpath);
+		kfree(vi->mntdata);
+		cifs_cleanup_volume_info_contents(&vi->smb_vol);
+		kfree(vi);
+	}
 }
 
 static inline void free_vol_list(void)
@@ -530,7 +535,7 @@  static inline void free_vol_list(void)
 	struct vol_info *vi, *nvi;
 
 	list_for_each_entry_safe(vi, nvi, &vol_list, list)
-		free_vol(vi);
+		put_vol(vi);
 }
 
 /**
@@ -1150,6 +1155,7 @@  int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
 		goto err_free_fullpath;
 
 	vi->mntdata = mntdata;
+	vi->vol_count++;
 
 	spin_lock(&vol_lock);
 	list_add_tail(&vi->list, &vol_list);
@@ -1229,11 +1235,9 @@  void dfs_cache_del_vol(const char *fullpath)
 	cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
 
 	spin_lock(&vol_lock);
-
 	vi = find_vol(fullpath);
 	if (!IS_ERR(vi))
-		free_vol(vi);
-
+		put_vol(vi);
 	spin_unlock(&vol_lock);
 }
 
@@ -1456,18 +1460,31 @@  static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
  */
 static void refresh_cache_worker(struct work_struct *work)
 {
-	struct vol_info *vi;
+	struct vol_info *vi, *nvi;
 	struct TCP_Server_Info *server;
+	LIST_HEAD(vols);
 	LIST_HEAD(tcons);
 	struct cifs_tcon *tcon, *ntcon;
 	int rc;
 
+	/* Get SMB volumes that are eligible (CifsGood) for refreshing */
 	spin_lock(&vol_lock);
 	list_for_each_entry(vi, &vol_list, list) {
 		server = get_tcp_server(&vi->smb_vol);
 		if (!server)
 			continue;
 
+		vi->vol_count++;
+		list_add_tail(&vi->rlist, &vols);
+		put_tcp_server(server);
+	}
+	spin_unlock(&vol_lock);
+
+	list_for_each_entry_safe(vi, nvi, &vols, rlist) {
+		server = get_tcp_server(&vi->smb_vol);
+		if (!server)
+			goto next_vol;
+
 		get_tcons(server, &tcons);
 		rc = 0;
 
@@ -1484,8 +1501,13 @@  static void refresh_cache_worker(struct work_struct *work)
 		}
 
 		put_tcp_server(server);
+
+next_vol:
+		list_del_init(&vi->rlist);
+		spin_lock(&vol_lock);
+		put_vol(vi);
+		spin_unlock(&vol_lock);
 	}
-	spin_unlock(&vol_lock);
 
 	queue_delayed_work(dfscache_wq, &refresh_task,
 			   atomic_read(&cache_ttl) * HZ);