diff mbox series

ceph: defer flushing the capsnap if the Fb is used

Message ID 20210106014726.77614-1-xiubli@redhat.com (mailing list archive)
State New, archived
Headers show
Series ceph: defer flushing the capsnap if the Fb is used | expand

Commit Message

Xiubo Li Jan. 6, 2021, 1:47 a.m. UTC
From: Xiubo Li <xiubli@redhat.com>

If the Fb cap is used it means the client is flushing the dirty
data to OSD, just defer flushing the capsnap.

URL: https://tracker.ceph.com/issues/48679
URL: https://tracker.ceph.com/issues/48640
Signed-off-by: Xiubo Li <xiubli@redhat.com>
---
 fs/ceph/caps.c | 33 +++++++++++++++++++--------------
 fs/ceph/snap.c |  6 +++---
 2 files changed, 22 insertions(+), 17 deletions(-)

Comments

Jeff Layton Jan. 6, 2021, 5:58 p.m. UTC | #1
On Wed, 2021-01-06 at 09:47 +0800, xiubli@redhat.com wrote:
> From: Xiubo Li <xiubli@redhat.com>
> 
> If the Fb cap is used it means the client is flushing the dirty
> data to OSD, just defer flushing the capsnap.
> 
> URL: https://tracker.ceph.com/issues/48679
> URL: https://tracker.ceph.com/issues/48640
> Signed-off-by: Xiubo Li <xiubli@redhat.com>
> ---
>  fs/ceph/caps.c | 33 +++++++++++++++++++--------------
>  fs/ceph/snap.c |  6 +++---
>  2 files changed, 22 insertions(+), 17 deletions(-)
> 
> diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
> index abbf48fc6230..a61ca9183f41 100644
> --- a/fs/ceph/caps.c
> +++ b/fs/ceph/caps.c
> @@ -3047,6 +3047,7 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
>  {
>  	struct inode *inode = &ci->vfs_inode;
>  	int last = 0, put = 0, flushsnaps = 0, wake = 0;
> +	bool check_flushsnaps = false;
>  
> 
> 
> 
>  	spin_lock(&ci->i_ceph_lock);
>  	if (had & CEPH_CAP_PIN)
> @@ -3063,26 +3064,15 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
>  	if (had & CEPH_CAP_FILE_BUFFER) {
>  		if (--ci->i_wb_ref == 0) {
>  			last++;
> -			put++;
> +			check_flushsnaps = true;
>  		}

The "put" variable here is a counter of how many iputs need to be done
at the end of this function. If you just set check_flushsnaps instead of
incrementing "put", then you won't do an iput unless both
__ceph_have_pending_cap_snap and ceph_try_drop_cap_snap both return
true.

In the case where you're not dealing with snapshots at all, is this
going to cause an inode refcount leak?

>  		dout("put_cap_refs %p wb %d -> %d (?)\n",
>  		     inode, ci->i_wb_ref+1, ci->i_wb_ref);
>  	}
> -	if (had & CEPH_CAP_FILE_WR)
> +	if (had & CEPH_CAP_FILE_WR) {
>  		if (--ci->i_wr_ref == 0) {
>  			last++;
> -			if (__ceph_have_pending_cap_snap(ci)) {
> -				struct ceph_cap_snap *capsnap =
> -					list_last_entry(&ci->i_cap_snaps,
> -							struct ceph_cap_snap,
> -							ci_item);
> -				capsnap->writing = 0;
> -				if (ceph_try_drop_cap_snap(ci, capsnap))
> -					put++;
> -				else if (__ceph_finish_cap_snap(ci, capsnap))
> -					flushsnaps = 1;
> -				wake = 1;
> -			}
> +			check_flushsnaps = true;
>  			if (ci->i_wrbuffer_ref_head == 0 &&
>  			    ci->i_dirty_caps == 0 &&
>  			    ci->i_flushing_caps == 0) {
> @@ -3094,6 +3084,21 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
>  			if (!__ceph_is_any_real_caps(ci) && ci->i_snap_realm)
>  				drop_inode_snap_realm(ci);
>  		}
> +	}
> +	if (check_flushsnaps) {
> +		if (__ceph_have_pending_cap_snap(ci)) {
> +			struct ceph_cap_snap *capsnap =
> +				list_last_entry(&ci->i_cap_snaps,
> +						struct ceph_cap_snap,
> +						ci_item);
> +			capsnap->writing = 0;
> +			if (ceph_try_drop_cap_snap(ci, capsnap))
> +				put++;
> +			else if (__ceph_finish_cap_snap(ci, capsnap))
> +				flushsnaps = 1;
> +			wake = 1;
> +		}
> +	}
>  	spin_unlock(&ci->i_ceph_lock);
>  
> 
> 
> 
>  	dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
> diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
> index b611f829cb61..639fb91cc9db 100644
> --- a/fs/ceph/snap.c
> +++ b/fs/ceph/snap.c
> @@ -561,10 +561,10 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
>  	capsnap->context = old_snapc;
>  	list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
>  
> 
> 
> 
> -	if (used & CEPH_CAP_FILE_WR) {
> +	if (used & (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER)) {
>  		dout("queue_cap_snap %p cap_snap %p snapc %p"
> -		     " seq %llu used WR, now pending\n", inode,
> -		     capsnap, old_snapc, old_snapc->seq);
> +		     " seq %llu used WR | BUFFFER, now pending\n",
> +		     inode, capsnap, old_snapc, old_snapc->seq);
>  		capsnap->writing = 1;
>  	} else {
>  		/* note mtime, size NOW. */
Xiubo Li Jan. 7, 2021, 2:15 a.m. UTC | #2
On 2021/1/7 1:58, Jeff Layton wrote:
> On Wed, 2021-01-06 at 09:47 +0800, xiubli@redhat.com wrote:
>> From: Xiubo Li <xiubli@redhat.com>
>>
>> If the Fb cap is used it means the client is flushing the dirty
>> data to OSD, just defer flushing the capsnap.
>>
>> URL: https://tracker.ceph.com/issues/48679
>> URL: https://tracker.ceph.com/issues/48640
>> Signed-off-by: Xiubo Li <xiubli@redhat.com>
>> ---
>>   fs/ceph/caps.c | 33 +++++++++++++++++++--------------
>>   fs/ceph/snap.c |  6 +++---
>>   2 files changed, 22 insertions(+), 17 deletions(-)
>>
>> diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
>> index abbf48fc6230..a61ca9183f41 100644
>> --- a/fs/ceph/caps.c
>> +++ b/fs/ceph/caps.c
>> @@ -3047,6 +3047,7 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
>>   {
>>   	struct inode *inode = &ci->vfs_inode;
>>   	int last = 0, put = 0, flushsnaps = 0, wake = 0;
>> +	bool check_flushsnaps = false;
>>   
>>
>>
>>
>>   	spin_lock(&ci->i_ceph_lock);
>>   	if (had & CEPH_CAP_PIN)
>> @@ -3063,26 +3064,15 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
>>   	if (had & CEPH_CAP_FILE_BUFFER) {
>>   		if (--ci->i_wb_ref == 0) {
>>   			last++;
>> -			put++;
>> +			check_flushsnaps = true;
>>   		}
> The "put" variable here is a counter of how many iputs need to be done
> at the end of this function. If you just set check_flushsnaps instead of
> incrementing "put", then you won't do an iput unless both
> __ceph_have_pending_cap_snap and ceph_try_drop_cap_snap both return
> true.
>
> In the case where you're not dealing with snapshots at all, is this
> going to cause an inode refcount leak?

Yeah, I misread the code, I just find the inode reference will be 
increased in "ceph_take_cap_refs()" for the "CEPH_CAP_FILE_BUFFER" cap.

I will fix it in V2.

Thanks.

>
>>   		dout("put_cap_refs %p wb %d -> %d (?)\n",
>>   		     inode, ci->i_wb_ref+1, ci->i_wb_ref);
>>   	}
>> -	if (had & CEPH_CAP_FILE_WR)
>> +	if (had & CEPH_CAP_FILE_WR) {
>>   		if (--ci->i_wr_ref == 0) {
>>   			last++;
>> -			if (__ceph_have_pending_cap_snap(ci)) {
>> -				struct ceph_cap_snap *capsnap =
>> -					list_last_entry(&ci->i_cap_snaps,
>> -							struct ceph_cap_snap,
>> -							ci_item);
>> -				capsnap->writing = 0;
>> -				if (ceph_try_drop_cap_snap(ci, capsnap))
>> -					put++;
>> -				else if (__ceph_finish_cap_snap(ci, capsnap))
>> -					flushsnaps = 1;
>> -				wake = 1;
>> -			}
>> +			check_flushsnaps = true;
>>   			if (ci->i_wrbuffer_ref_head == 0 &&
>>   			    ci->i_dirty_caps == 0 &&
>>   			    ci->i_flushing_caps == 0) {
>> @@ -3094,6 +3084,21 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
>>   			if (!__ceph_is_any_real_caps(ci) && ci->i_snap_realm)
>>   				drop_inode_snap_realm(ci);
>>   		}
>> +	}
>> +	if (check_flushsnaps) {
>> +		if (__ceph_have_pending_cap_snap(ci)) {
>> +			struct ceph_cap_snap *capsnap =
>> +				list_last_entry(&ci->i_cap_snaps,
>> +						struct ceph_cap_snap,
>> +						ci_item);
>> +			capsnap->writing = 0;
>> +			if (ceph_try_drop_cap_snap(ci, capsnap))
>> +				put++;
>> +			else if (__ceph_finish_cap_snap(ci, capsnap))
>> +				flushsnaps = 1;
>> +			wake = 1;
>> +		}
>> +	}
>>   	spin_unlock(&ci->i_ceph_lock);
>>   
>>
>>
>>
>>   	dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
>> diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
>> index b611f829cb61..639fb91cc9db 100644
>> --- a/fs/ceph/snap.c
>> +++ b/fs/ceph/snap.c
>> @@ -561,10 +561,10 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
>>   	capsnap->context = old_snapc;
>>   	list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
>>   
>>
>>
>>
>> -	if (used & CEPH_CAP_FILE_WR) {
>> +	if (used & (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER)) {
>>   		dout("queue_cap_snap %p cap_snap %p snapc %p"
>> -		     " seq %llu used WR, now pending\n", inode,
>> -		     capsnap, old_snapc, old_snapc->seq);
>> +		     " seq %llu used WR | BUFFFER, now pending\n",
>> +		     inode, capsnap, old_snapc, old_snapc->seq);
>>   		capsnap->writing = 1;
>>   	} else {
>>   		/* note mtime, size NOW. */
diff mbox series

Patch

diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index abbf48fc6230..a61ca9183f41 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -3047,6 +3047,7 @@  static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
 {
 	struct inode *inode = &ci->vfs_inode;
 	int last = 0, put = 0, flushsnaps = 0, wake = 0;
+	bool check_flushsnaps = false;
 
 	spin_lock(&ci->i_ceph_lock);
 	if (had & CEPH_CAP_PIN)
@@ -3063,26 +3064,15 @@  static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
 	if (had & CEPH_CAP_FILE_BUFFER) {
 		if (--ci->i_wb_ref == 0) {
 			last++;
-			put++;
+			check_flushsnaps = true;
 		}
 		dout("put_cap_refs %p wb %d -> %d (?)\n",
 		     inode, ci->i_wb_ref+1, ci->i_wb_ref);
 	}
-	if (had & CEPH_CAP_FILE_WR)
+	if (had & CEPH_CAP_FILE_WR) {
 		if (--ci->i_wr_ref == 0) {
 			last++;
-			if (__ceph_have_pending_cap_snap(ci)) {
-				struct ceph_cap_snap *capsnap =
-					list_last_entry(&ci->i_cap_snaps,
-							struct ceph_cap_snap,
-							ci_item);
-				capsnap->writing = 0;
-				if (ceph_try_drop_cap_snap(ci, capsnap))
-					put++;
-				else if (__ceph_finish_cap_snap(ci, capsnap))
-					flushsnaps = 1;
-				wake = 1;
-			}
+			check_flushsnaps = true;
 			if (ci->i_wrbuffer_ref_head == 0 &&
 			    ci->i_dirty_caps == 0 &&
 			    ci->i_flushing_caps == 0) {
@@ -3094,6 +3084,21 @@  static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
 			if (!__ceph_is_any_real_caps(ci) && ci->i_snap_realm)
 				drop_inode_snap_realm(ci);
 		}
+	}
+	if (check_flushsnaps) {
+		if (__ceph_have_pending_cap_snap(ci)) {
+			struct ceph_cap_snap *capsnap =
+				list_last_entry(&ci->i_cap_snaps,
+						struct ceph_cap_snap,
+						ci_item);
+			capsnap->writing = 0;
+			if (ceph_try_drop_cap_snap(ci, capsnap))
+				put++;
+			else if (__ceph_finish_cap_snap(ci, capsnap))
+				flushsnaps = 1;
+			wake = 1;
+		}
+	}
 	spin_unlock(&ci->i_ceph_lock);
 
 	dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index b611f829cb61..639fb91cc9db 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -561,10 +561,10 @@  void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 	capsnap->context = old_snapc;
 	list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
 
-	if (used & CEPH_CAP_FILE_WR) {
+	if (used & (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER)) {
 		dout("queue_cap_snap %p cap_snap %p snapc %p"
-		     " seq %llu used WR, now pending\n", inode,
-		     capsnap, old_snapc, old_snapc->seq);
+		     " seq %llu used WR | BUFFFER, now pending\n",
+		     inode, capsnap, old_snapc, old_snapc->seq);
 		capsnap->writing = 1;
 	} else {
 		/* note mtime, size NOW. */