diff mbox series

[v2] ceph: get snap_rwsem read lock in handle_cap_export for ceph_add_cap

Message ID 20220315152946.12912-1-dossche.niels@gmail.com (mailing list archive)
State New, archived
Headers show
Series [v2] ceph: get snap_rwsem read lock in handle_cap_export for ceph_add_cap | expand

Commit Message

Niels Dossche March 15, 2022, 3:29 p.m. UTC
ceph_add_cap says in its function documentation that the caller should
hold the read lock on the session snap_rwsem. Furthermore, not only
ceph_add_cap needs that lock, when it calls to ceph_lookup_snap_realm it
eventually calls ceph_get_snap_realm which states via lockdep that
snap_rwsem needs to be held. handle_cap_export calls ceph_add_cap
without that mdsc->snap_rwsem held. Thus, since ceph_get_snap_realm
and ceph_add_cap both need the lock, the common place to acquire that
lock is inside handle_cap_export.

Signed-off-by: Niels Dossche <dossche.niels@gmail.com>
---
 fs/ceph/caps.c | 3 +++
 1 file changed, 3 insertions(+)

Comments

Xiubo Li March 16, 2022, 4:30 a.m. UTC | #1
On 3/15/22 11:29 PM, Niels Dossche wrote:
> ceph_add_cap says in its function documentation that the caller should
> hold the read lock on the session snap_rwsem. Furthermore, not only
> ceph_add_cap needs that lock, when it calls to ceph_lookup_snap_realm it
> eventually calls ceph_get_snap_realm which states via lockdep that
> snap_rwsem needs to be held. handle_cap_export calls ceph_add_cap
> without that mdsc->snap_rwsem held. Thus, since ceph_get_snap_realm
> and ceph_add_cap both need the lock, the common place to acquire that
> lock is inside handle_cap_export.
>
> Signed-off-by: Niels Dossche <dossche.niels@gmail.com>
> ---
>   fs/ceph/caps.c | 3 +++
>   1 file changed, 3 insertions(+)
>
> diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
> index b472cd066d1c..a23cf2a528bc 100644
> --- a/fs/ceph/caps.c
> +++ b/fs/ceph/caps.c
> @@ -3856,6 +3856,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
>   	dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
>   	     inode, ci, mds, mseq, target);
>   retry:
> +	down_read(&mdsc->snap_rwsem);
>   	spin_lock(&ci->i_ceph_lock);
>   	cap = __get_cap_for_mds(ci, mds);
>   	if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
> @@ -3919,6 +3920,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
>   	}
>   
>   	spin_unlock(&ci->i_ceph_lock);
> +	up_read(&mdsc->snap_rwsem);
>   	mutex_unlock(&session->s_mutex);
>   
>   	/* open target session */
> @@ -3944,6 +3946,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
>   
>   out_unlock:
>   	spin_unlock(&ci->i_ceph_lock);
> +	up_read(&mdsc->snap_rwsem);
>   	mutex_unlock(&session->s_mutex);
>   	if (tsession) {
>   		mutex_unlock(&tsession->s_mutex);

Reviewed-by: Xiubo Li <xiubli@redhat.com>
Jeffrey Layton March 16, 2022, 2:12 p.m. UTC | #2
On Tue, 2022-03-15 at 16:29 +0100, Niels Dossche wrote:
> ceph_add_cap says in its function documentation that the caller should
> hold the read lock on the session snap_rwsem. Furthermore, not only
> ceph_add_cap needs that lock, when it calls to ceph_lookup_snap_realm it
> eventually calls ceph_get_snap_realm which states via lockdep that
> snap_rwsem needs to be held. handle_cap_export calls ceph_add_cap
> without that mdsc->snap_rwsem held. Thus, since ceph_get_snap_realm
> and ceph_add_cap both need the lock, the common place to acquire that
> lock is inside handle_cap_export.
> 
> Signed-off-by: Niels Dossche <dossche.niels@gmail.com>
> ---
>  fs/ceph/caps.c | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
> index b472cd066d1c..a23cf2a528bc 100644
> --- a/fs/ceph/caps.c
> +++ b/fs/ceph/caps.c
> @@ -3856,6 +3856,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
>  	dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
>  	     inode, ci, mds, mseq, target);
>  retry:
> +	down_read(&mdsc->snap_rwsem);
>  	spin_lock(&ci->i_ceph_lock);
>  	cap = __get_cap_for_mds(ci, mds);
>  	if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
> @@ -3919,6 +3920,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
>  	}
>  
>  	spin_unlock(&ci->i_ceph_lock);
> +	up_read(&mdsc->snap_rwsem);
>  	mutex_unlock(&session->s_mutex);
>  
>  	/* open target session */
> @@ -3944,6 +3946,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
>  
>  out_unlock:
>  	spin_unlock(&ci->i_ceph_lock);
> +	up_read(&mdsc->snap_rwsem);
>  	mutex_unlock(&session->s_mutex);
>  	if (tsession) {
>  		mutex_unlock(&tsession->s_mutex);

Looks good. Merged into testing branch.

Thanks!
diff mbox series

Patch

diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index b472cd066d1c..a23cf2a528bc 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -3856,6 +3856,7 @@  static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
 	dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
 	     inode, ci, mds, mseq, target);
 retry:
+	down_read(&mdsc->snap_rwsem);
 	spin_lock(&ci->i_ceph_lock);
 	cap = __get_cap_for_mds(ci, mds);
 	if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
@@ -3919,6 +3920,7 @@  static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
 	}
 
 	spin_unlock(&ci->i_ceph_lock);
+	up_read(&mdsc->snap_rwsem);
 	mutex_unlock(&session->s_mutex);
 
 	/* open target session */
@@ -3944,6 +3946,7 @@  static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
 
 out_unlock:
 	spin_unlock(&ci->i_ceph_lock);
+	up_read(&mdsc->snap_rwsem);
 	mutex_unlock(&session->s_mutex);
 	if (tsession) {
 		mutex_unlock(&tsession->s_mutex);