diff mbox series

[5/7] cifs: Fix potential deadlock when updating vol in cifs_reconnect()

Message ID 20191122153057.6608-6-pc@cjr.nz (mailing list archive)
State New, archived
Headers show
Series DFS fixes | expand

Commit Message

Paulo Alcantara Nov. 22, 2019, 3:30 p.m. UTC
We can't hold the vol_lock spinlock while refreshing the DFS cache
because cifs_reconnect() may call dfs_cache_update_vol() while we are
walking through the volume list.

Create a temp list with all volumes eligible for refreshing and then
use it without any locks held.

Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
---
 fs/cifs/dfs_cache.c | 45 +++++++++++++++++++++++++++++++++------------
 1 file changed, 33 insertions(+), 12 deletions(-)

Comments

Aurélien Aptel Nov. 25, 2019, 12:01 p.m. UTC | #1
"Paulo Alcantara (SUSE)" <pc@cjr.nz> writes:

> We can't hold the vol_lock spinlock while refreshing the DFS cache
> because cifs_reconnect() may call dfs_cache_update_vol() while we are
> walking through the volume list.
>
> Create a temp list with all volumes eligible for refreshing and then
> use it without any locks held.

Commit msg should mention it makes the vol_info refcounted.

> Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
> ---
>  fs/cifs/dfs_cache.c | 45 +++++++++++++++++++++++++++++++++------------
>  1 file changed, 33 insertions(+), 12 deletions(-)
>
> diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
> index b082603c793a..5b9d7281dd67 100644
> --- a/fs/cifs/dfs_cache.c
> +++ b/fs/cifs/dfs_cache.c
> @@ -49,6 +49,8 @@ struct vol_info {
>  	struct smb_vol smb_vol;
>  	char *mntdata;
>  	struct list_head list;
> +	struct list_head rlist;
> +	int vol_count;
>  };
>  
>  static struct kmem_cache *cache_slab __read_mostly;
> @@ -516,13 +518,15 @@ static struct cache_entry *lookup_cache_entry(const char *path,
>  	return ce;
>  }
>  
> -static inline void free_vol(struct vol_info *vi)
> +static void put_vol(struct vol_info *vi)
>  {
> -	list_del(&vi->list);
> -	kfree(vi->fullpath);
> -	kfree(vi->mntdata);
> -	cifs_cleanup_volume_info_contents(&vi->smb_vol);
> -	kfree(vi);
> +	if (!--vi->vol_count) {
> +		list_del_init(&vi->list);
> +		kfree(vi->fullpath);
> +		kfree(vi->mntdata);
> +		cifs_cleanup_volume_info_contents(&vi->smb_vol);
> +		kfree(vi);
> +	}
>  }

Can we document that put_vol() assumes vol_lock is held?
diff mbox series

Patch

diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index b082603c793a..5b9d7281dd67 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -49,6 +49,8 @@  struct vol_info {
 	struct smb_vol smb_vol;
 	char *mntdata;
 	struct list_head list;
+	struct list_head rlist;
+	int vol_count;
 };
 
 static struct kmem_cache *cache_slab __read_mostly;
@@ -516,13 +518,15 @@  static struct cache_entry *lookup_cache_entry(const char *path,
 	return ce;
 }
 
-static inline void free_vol(struct vol_info *vi)
+static void put_vol(struct vol_info *vi)
 {
-	list_del(&vi->list);
-	kfree(vi->fullpath);
-	kfree(vi->mntdata);
-	cifs_cleanup_volume_info_contents(&vi->smb_vol);
-	kfree(vi);
+	if (!--vi->vol_count) {
+		list_del_init(&vi->list);
+		kfree(vi->fullpath);
+		kfree(vi->mntdata);
+		cifs_cleanup_volume_info_contents(&vi->smb_vol);
+		kfree(vi);
+	}
 }
 
 static inline void free_vol_list(void)
@@ -530,7 +534,7 @@  static inline void free_vol_list(void)
 	struct vol_info *vi, *nvi;
 
 	list_for_each_entry_safe(vi, nvi, &vol_list, list)
-		free_vol(vi);
+		put_vol(vi);
 }
 
 /**
@@ -1140,6 +1144,7 @@  int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
 		goto err_free_fullpath;
 
 	vi->mntdata = mntdata;
+	vi->vol_count++;
 
 	spin_lock(&vol_lock);
 	list_add_tail(&vi->list, &vol_list);
@@ -1219,11 +1224,9 @@  void dfs_cache_del_vol(const char *fullpath)
 	cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
 
 	spin_lock(&vol_lock);
-
 	vi = find_vol(fullpath);
 	if (!IS_ERR(vi))
-		free_vol(vi);
-
+		put_vol(vi);
 	spin_unlock(&vol_lock);
 }
 
@@ -1446,18 +1449,31 @@  static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
  */
 static void refresh_cache_worker(struct work_struct *work)
 {
-	struct vol_info *vi;
+	struct vol_info *vi, *nvi;
 	struct TCP_Server_Info *server;
+	LIST_HEAD(vols);
 	LIST_HEAD(tcons);
 	struct cifs_tcon *tcon, *ntcon;
 	int rc;
 
+	/* Get SMB volumes that are eligible (CifsGood) for refreshing */
 	spin_lock(&vol_lock);
 	list_for_each_entry(vi, &vol_list, list) {
 		server = get_tcp_server(&vi->smb_vol);
 		if (!server)
 			continue;
 
+		vi->vol_count++;
+		list_add_tail(&vi->rlist, &vols);
+		put_tcp_server(server);
+	}
+	spin_unlock(&vol_lock);
+
+	list_for_each_entry_safe(vi, nvi, &vols, rlist) {
+		server = get_tcp_server(&vi->smb_vol);
+		if (!server)
+			goto next_vol;
+
 		get_tcons(server, &tcons);
 		rc = 0;
 
@@ -1474,8 +1490,13 @@  static void refresh_cache_worker(struct work_struct *work)
 		}
 
 		put_tcp_server(server);
+
+next_vol:
+		list_del_init(&vi->rlist);
+		spin_lock(&vol_lock);
+		put_vol(vi);
+		spin_unlock(&vol_lock);
 	}
-	spin_unlock(&vol_lock);
 
 	queue_delayed_work(dfscache_wq, &refresh_task,
 			   atomic_read(&cache_ttl) * HZ);