diff mbox series

[v2] ceph: stop forwarding the request when exceeding 256 times

Message ID 20220329122003.77740-1-xiubli@redhat.com (mailing list archive)
State New, archived
Headers show
Series [v2] ceph: stop forwarding the request when exceeding 256 times | expand

Commit Message

Xiubo Li March 29, 2022, 12:20 p.m. UTC
From: Xiubo Li <xiubli@redhat.com>

The type of 'num_fwd' in ceph 'MClientRequestForward' is 'int32_t',
while in 'ceph_mds_request_head' the type is '__u8'. So in case
the request bounces between MDSes exceeding 256 times, the client
will get stuck.

In this case it's ususally a bug in MDS and continue bouncing the
request makes no sense.

Signed-off-by: Xiubo Li <xiubli@redhat.com>
---

V2:
- s/EIO/EMULTIHOP/
- Fixed dereferencing NULL seq bug
- Removed the out lable


 fs/ceph/mds_client.c | 34 +++++++++++++++++++++++++++++-----
 1 file changed, 29 insertions(+), 5 deletions(-)

Comments

Jeffrey Layton March 29, 2022, 2:48 p.m. UTC | #1
On Tue, 2022-03-29 at 20:20 +0800, xiubli@redhat.com wrote:
> From: Xiubo Li <xiubli@redhat.com>
> 
> The type of 'num_fwd' in ceph 'MClientRequestForward' is 'int32_t',
> while in 'ceph_mds_request_head' the type is '__u8'. So in case
> the request bounces between MDSes exceeding 256 times, the client
> will get stuck.
> 
> In this case it's ususally a bug in MDS and continue bouncing the
> request makes no sense.
> 
> Signed-off-by: Xiubo Li <xiubli@redhat.com>
> ---
> 
> V2:
> - s/EIO/EMULTIHOP/
> - Fixed dereferencing NULL seq bug
> - Removed the out lable
> 
> 
>  fs/ceph/mds_client.c | 34 +++++++++++++++++++++++++++++-----
>  1 file changed, 29 insertions(+), 5 deletions(-)
> 
> diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
> index a89ee866ebbb..82c1f783feba 100644
> --- a/fs/ceph/mds_client.c
> +++ b/fs/ceph/mds_client.c
> @@ -3293,6 +3293,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
>  	int err = -EINVAL;
>  	void *p = msg->front.iov_base;
>  	void *end = p + msg->front.iov_len;
> +	bool aborted = false;
>  
>  	ceph_decode_need(&p, end, 2*sizeof(u32), bad);
>  	next_mds = ceph_decode_32(&p);
> @@ -3301,16 +3302,36 @@ static void handle_forward(struct ceph_mds_client *mdsc,
>  	mutex_lock(&mdsc->mutex);
>  	req = lookup_get_request(mdsc, tid);
>  	if (!req) {
> +		mutex_unlock(&mdsc->mutex);
>  		dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
> -		goto out;  /* dup reply? */
> +		return;  /* dup reply? */
>  	}
>  
>  	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
>  		dout("forward tid %llu aborted, unregistering\n", tid);
>  		__unregister_request(mdsc, req);
>  	} else if (fwd_seq <= req->r_num_fwd) {
> -		dout("forward tid %llu to mds%d - old seq %d <= %d\n",
> -		     tid, next_mds, req->r_num_fwd, fwd_seq);
> +		/*
> +		 * The type of 'num_fwd' in ceph 'MClientRequestForward'
> +		 * is 'int32_t', while in 'ceph_mds_request_head' the
> +		 * type is '__u8'. So in case the request bounces between
> +		 * MDSes exceeding 256 times, the client will get stuck.
> +		 *
> +		 * In this case it's ususally a bug in MDS and continue
> +		 * bouncing the request makes no sense.
> +		 */
> +		if (req->r_num_fwd == 256) {

Can you also fix this to be expressed as "> UCHAR_MAX"? Or preferably,
if you have a way to get to the ceph_mds_request_head from here, then
express it in terms of sizeof() the field that limits this.

> +			mutex_lock(&req->r_fill_mutex);
> +			req->r_err = -EMULTIHOP;
> +			set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
> +			mutex_unlock(&req->r_fill_mutex);
> +			aborted = true;
> +			pr_warn_ratelimited("forward tid %llu seq overflow\n",
> +					    tid);
> +		} else {
> +			dout("forward tid %llu to mds%d - old seq %d <= %d\n",
> +			     tid, next_mds, req->r_num_fwd, fwd_seq);
> +		}
>  	} else {
>  		/* resend. forward race not possible; mds would drop */
>  		dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
> @@ -3322,9 +3343,12 @@ static void handle_forward(struct ceph_mds_client *mdsc,
>  		put_request_session(req);
>  		__do_request(mdsc, req);
>  	}
> -	ceph_mdsc_put_request(req);
> -out:
>  	mutex_unlock(&mdsc->mutex);
> +
> +	/* kick calling process */
> +	if (aborted)
> +		complete_request(mdsc, req);
> +	ceph_mdsc_put_request(req);
>  	return;
>  
>  bad:

The code looks fine otherwise though...
Xiubo Li March 29, 2022, 11:57 p.m. UTC | #2
On 3/29/22 10:48 PM, Jeff Layton wrote:
> On Tue, 2022-03-29 at 20:20 +0800, xiubli@redhat.com wrote:
>> From: Xiubo Li <xiubli@redhat.com>
>>
>> The type of 'num_fwd' in ceph 'MClientRequestForward' is 'int32_t',
>> while in 'ceph_mds_request_head' the type is '__u8'. So in case
>> the request bounces between MDSes exceeding 256 times, the client
>> will get stuck.
>>
>> In this case it's ususally a bug in MDS and continue bouncing the
>> request makes no sense.
>>
>> Signed-off-by: Xiubo Li <xiubli@redhat.com>
>> ---
>>
>> V2:
>> - s/EIO/EMULTIHOP/
>> - Fixed dereferencing NULL seq bug
>> - Removed the out lable
>>
>>
>>   fs/ceph/mds_client.c | 34 +++++++++++++++++++++++++++++-----
>>   1 file changed, 29 insertions(+), 5 deletions(-)
>>
>> diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
>> index a89ee866ebbb..82c1f783feba 100644
>> --- a/fs/ceph/mds_client.c
>> +++ b/fs/ceph/mds_client.c
>> @@ -3293,6 +3293,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
>>   	int err = -EINVAL;
>>   	void *p = msg->front.iov_base;
>>   	void *end = p + msg->front.iov_len;
>> +	bool aborted = false;
>>   
>>   	ceph_decode_need(&p, end, 2*sizeof(u32), bad);
>>   	next_mds = ceph_decode_32(&p);
>> @@ -3301,16 +3302,36 @@ static void handle_forward(struct ceph_mds_client *mdsc,
>>   	mutex_lock(&mdsc->mutex);
>>   	req = lookup_get_request(mdsc, tid);
>>   	if (!req) {
>> +		mutex_unlock(&mdsc->mutex);
>>   		dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
>> -		goto out;  /* dup reply? */
>> +		return;  /* dup reply? */
>>   	}
>>   
>>   	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
>>   		dout("forward tid %llu aborted, unregistering\n", tid);
>>   		__unregister_request(mdsc, req);
>>   	} else if (fwd_seq <= req->r_num_fwd) {
>> -		dout("forward tid %llu to mds%d - old seq %d <= %d\n",
>> -		     tid, next_mds, req->r_num_fwd, fwd_seq);
>> +		/*
>> +		 * The type of 'num_fwd' in ceph 'MClientRequestForward'
>> +		 * is 'int32_t', while in 'ceph_mds_request_head' the
>> +		 * type is '__u8'. So in case the request bounces between
>> +		 * MDSes exceeding 256 times, the client will get stuck.
>> +		 *
>> +		 * In this case it's ususally a bug in MDS and continue
>> +		 * bouncing the request makes no sense.
>> +		 */
>> +		if (req->r_num_fwd == 256) {
> Can you also fix this to be expressed as "> UCHAR_MAX"? Or preferably,
> if you have a way to get to the ceph_mds_request_head from here, then
> express it in terms of sizeof() the field that limits this.

Sure, let me try the second one first.

-- Xiubo

>> +			mutex_lock(&req->r_fill_mutex);
>> +			req->r_err = -EMULTIHOP;
>> +			set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
>> +			mutex_unlock(&req->r_fill_mutex);
>> +			aborted = true;
>> +			pr_warn_ratelimited("forward tid %llu seq overflow\n",
>> +					    tid);
>> +		} else {
>> +			dout("forward tid %llu to mds%d - old seq %d <= %d\n",
>> +			     tid, next_mds, req->r_num_fwd, fwd_seq);
>> +		}
>>   	} else {
>>   		/* resend. forward race not possible; mds would drop */
>>   		dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
>> @@ -3322,9 +3343,12 @@ static void handle_forward(struct ceph_mds_client *mdsc,
>>   		put_request_session(req);
>>   		__do_request(mdsc, req);
>>   	}
>> -	ceph_mdsc_put_request(req);
>> -out:
>>   	mutex_unlock(&mdsc->mutex);
>> +
>> +	/* kick calling process */
>> +	if (aborted)
>> +		complete_request(mdsc, req);
>> +	ceph_mdsc_put_request(req);
>>   	return;
>>   
>>   bad:
> The code looks fine otherwise though...
diff mbox series

Patch

diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a89ee866ebbb..82c1f783feba 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3293,6 +3293,7 @@  static void handle_forward(struct ceph_mds_client *mdsc,
 	int err = -EINVAL;
 	void *p = msg->front.iov_base;
 	void *end = p + msg->front.iov_len;
+	bool aborted = false;
 
 	ceph_decode_need(&p, end, 2*sizeof(u32), bad);
 	next_mds = ceph_decode_32(&p);
@@ -3301,16 +3302,36 @@  static void handle_forward(struct ceph_mds_client *mdsc,
 	mutex_lock(&mdsc->mutex);
 	req = lookup_get_request(mdsc, tid);
 	if (!req) {
+		mutex_unlock(&mdsc->mutex);
 		dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
-		goto out;  /* dup reply? */
+		return;  /* dup reply? */
 	}
 
 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
 		dout("forward tid %llu aborted, unregistering\n", tid);
 		__unregister_request(mdsc, req);
 	} else if (fwd_seq <= req->r_num_fwd) {
-		dout("forward tid %llu to mds%d - old seq %d <= %d\n",
-		     tid, next_mds, req->r_num_fwd, fwd_seq);
+		/*
+		 * The type of 'num_fwd' in ceph 'MClientRequestForward'
+		 * is 'int32_t', while in 'ceph_mds_request_head' the
+		 * type is '__u8'. So in case the request bounces between
+		 * MDSes exceeding 256 times, the client will get stuck.
+		 *
+		 * In this case it's ususally a bug in MDS and continue
+		 * bouncing the request makes no sense.
+		 */
+		if (req->r_num_fwd == 256) {
+			mutex_lock(&req->r_fill_mutex);
+			req->r_err = -EMULTIHOP;
+			set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
+			mutex_unlock(&req->r_fill_mutex);
+			aborted = true;
+			pr_warn_ratelimited("forward tid %llu seq overflow\n",
+					    tid);
+		} else {
+			dout("forward tid %llu to mds%d - old seq %d <= %d\n",
+			     tid, next_mds, req->r_num_fwd, fwd_seq);
+		}
 	} else {
 		/* resend. forward race not possible; mds would drop */
 		dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
@@ -3322,9 +3343,12 @@  static void handle_forward(struct ceph_mds_client *mdsc,
 		put_request_session(req);
 		__do_request(mdsc, req);
 	}
-	ceph_mdsc_put_request(req);
-out:
 	mutex_unlock(&mdsc->mutex);
+
+	/* kick calling process */
+	if (aborted)
+		complete_request(mdsc, req);
+	ceph_mdsc_put_request(req);
 	return;
 
 bad: