diff mbox series

[V2] io_uring: consider the overflow of sequence for timeout req

Message ID 20191014115156.43151-1-yangerkun@huawei.com (mailing list archive)
State New, archived
Headers show
Series [V2] io_uring: consider the overflow of sequence for timeout req | expand

Commit Message

yangerkun Oct. 14, 2019, 11:51 a.m. UTC
The sequence for timeout req may overflow, and it will lead to wrong
order of timeout req list. And we should consider two situation:

1. ctx->cached_sq_head + count - 1 may overflow;
2. cached_sq_head of now may overflow compare with before
cached_sq_head.

Fix the wrong logic by add record of count and use type long long to
record the overflow.

Signed-off-by: yangerkun <yangerkun@huawei.com>
---
 fs/io_uring.c | 31 +++++++++++++++++++++++++------
 1 file changed, 25 insertions(+), 6 deletions(-)

Comments

Jens Axboe Oct. 14, 2019, 8:10 p.m. UTC | #1
On 10/14/19 5:51 AM, yangerkun wrote:
> The sequence for timeout req may overflow, and it will lead to wrong
> order of timeout req list. And we should consider two situation:
> 
> 1. ctx->cached_sq_head + count - 1 may overflow;
> 2. cached_sq_head of now may overflow compare with before
> cached_sq_head.
> 
> Fix the wrong logic by add record of count and use type long long to
> record the overflow.
> 
> Signed-off-by: yangerkun <yangerkun@huawei.com>
> ---
>   fs/io_uring.c | 31 +++++++++++++++++++++++++------
>   1 file changed, 25 insertions(+), 6 deletions(-)
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 76fdbe84aff5..c8dbf85c1c91 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -288,6 +288,7 @@ struct io_poll_iocb {
>   struct io_timeout {
>   	struct file			*file;
>   	struct hrtimer			timer;
> +	unsigned			count;
>   };

Can we reuse io_kiocb->submit->sequence for this? Unfortunately doing it
the way that you did, which does make the most logical sense, means that
struct io_kiocb will now spill into a 4th cacheline.

Or maybe fold ->sequence and ->submit.sequence to reclaim that space?

> @@ -1907,21 +1908,39 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
>   		count = 1;
>   
>   	req->sequence = ctx->cached_sq_head + count - 1;
> +	req->timeout.count = count;
>   	req->flags |= REQ_F_TIMEOUT;
>   
>   	/*
>   	 * Insertion sort, ensuring the first entry in the list is always
>   	 * the one we need first.
>   	 */
> -	tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
> -	req_dist = req->sequence - tail_index;
>   	spin_lock_irq(&ctx->completion_lock);
>   	list_for_each_prev(entry, &ctx->timeout_list) {
>   		struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
> -		unsigned dist;
> +		unsigned nxt_sq_head;
> +		long long tmp, tmp_nxt;
>   
> -		dist = nxt->sequence - tail_index;
> -		if (req_dist >= dist)
> +		/* count bigger than before should break directly. */
> +		if (count >= nxt->timeout.count)
> +			break;

Took me a bit, but I guess that's true. It's an optimization, maybe it'd be
cleaner if we just stuck to the sequence checking?
yangerkun Oct. 15, 2019, 1:04 p.m. UTC | #2
On 2019/10/15 4:10, Jens Axboe wrote:
> On 10/14/19 5:51 AM, yangerkun wrote:
>> The sequence for timeout req may overflow, and it will lead to wrong
>> order of timeout req list. And we should consider two situation:
>>
>> 1. ctx->cached_sq_head + count - 1 may overflow;
>> 2. cached_sq_head of now may overflow compare with before
>> cached_sq_head.
>>
>> Fix the wrong logic by add record of count and use type long long to
>> record the overflow.
>>
>> Signed-off-by: yangerkun <yangerkun@huawei.com>
>> ---
>>    fs/io_uring.c | 31 +++++++++++++++++++++++++------
>>    1 file changed, 25 insertions(+), 6 deletions(-)
>>
>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>> index 76fdbe84aff5..c8dbf85c1c91 100644
>> --- a/fs/io_uring.c
>> +++ b/fs/io_uring.c
>> @@ -288,6 +288,7 @@ struct io_poll_iocb {
>>    struct io_timeout {
>>    	struct file			*file;
>>    	struct hrtimer			timer;
>> +	unsigned			count;
>>    };
> 
> Can we reuse io_kiocb->submit->sequence for this? Unfortunately doing it
> the way that you did, which does make the most logical sense, means that
> struct io_kiocb will now spill into a 4th cacheline.
> 
> Or maybe fold ->sequence and ->submit.sequence to reclaim that space?

Yeah, prefer to reuse ->submit.sequence to dump the count. I have never 
thought about the cacheline before. Thanks a lot!

> 
>> @@ -1907,21 +1908,39 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
>>    		count = 1;
>>    
>>    	req->sequence = ctx->cached_sq_head + count - 1;
>> +	req->timeout.count = count;
>>    	req->flags |= REQ_F_TIMEOUT;
>>    
>>    	/*
>>    	 * Insertion sort, ensuring the first entry in the list is always
>>    	 * the one we need first.
>>    	 */
>> -	tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
>> -	req_dist = req->sequence - tail_index;
>>    	spin_lock_irq(&ctx->completion_lock);
>>    	list_for_each_prev(entry, &ctx->timeout_list) {
>>    		struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
>> -		unsigned dist;
>> +		unsigned nxt_sq_head;
>> +		long long tmp, tmp_nxt;
>>    
>> -		dist = nxt->sequence - tail_index;
>> -		if (req_dist >= dist)
>> +		/* count bigger than before should break directly. */
>> +		if (count >= nxt->timeout.count)
>> +			break;
> 
> Took me a bit, but I guess that's true. It's an optimization, maybe it'd be
> cleaner if we just stuck to the sequence checking?

It's a good idea and thanks for you suggestion! I will resend the patch 
soon!

Thanks,
Kun.

>
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 76fdbe84aff5..c8dbf85c1c91 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -288,6 +288,7 @@  struct io_poll_iocb {
 struct io_timeout {
 	struct file			*file;
 	struct hrtimer			timer;
+	unsigned			count;
 };
 
 /*
@@ -1884,7 +1885,7 @@  static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 
 static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-	unsigned count, req_dist, tail_index;
+	unsigned count;
 	struct io_ring_ctx *ctx = req->ctx;
 	struct list_head *entry;
 	struct timespec64 ts;
@@ -1907,21 +1908,39 @@  static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 		count = 1;
 
 	req->sequence = ctx->cached_sq_head + count - 1;
+	req->timeout.count = count;
 	req->flags |= REQ_F_TIMEOUT;
 
 	/*
 	 * Insertion sort, ensuring the first entry in the list is always
 	 * the one we need first.
 	 */
-	tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
-	req_dist = req->sequence - tail_index;
 	spin_lock_irq(&ctx->completion_lock);
 	list_for_each_prev(entry, &ctx->timeout_list) {
 		struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
-		unsigned dist;
+		unsigned nxt_sq_head;
+		long long tmp, tmp_nxt;
 
-		dist = nxt->sequence - tail_index;
-		if (req_dist >= dist)
+		/* count bigger than before should break directly. */
+		if (count >= nxt->timeout.count)
+			break;
+
+		/*
+		 * Since cached_sq_head + count - 1 can overflow, use type long
+		 * long to store it.
+		 */
+		tmp = (long long)ctx->cached_sq_head + count - 1;
+		nxt_sq_head = nxt->sequence - nxt->timeout.count + 1;
+		tmp_nxt = (long long)nxt_sq_head + nxt->timeout.count - 1;
+
+		/*
+		 * cached_sq_head may overflow, and it will never overflow twice
+		 * once there is some timeout req still be valid.
+		 */
+		if (ctx->cached_sq_head < nxt_sq_head)
+			tmp += UINT_MAX;
+
+		if (tmp >= tmp_nxt)
 			break;
 	}
 	list_add(&req->list, entry);