@@ -1043,32 +1043,6 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
-void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
-{
- struct task_struct *tsk = current;
-
- /*
- * If this is a nested plug, don't actually assign it.
- */
- if (tsk->plug)
- return;
-
- plug->mq_list = NULL;
- plug->cached_rq = NULL;
- plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
- plug->rq_count = 0;
- plug->multiple_queues = false;
- plug->has_elevator = false;
- plug->nowait = false;
- INIT_LIST_HEAD(&plug->cb_list);
-
- /*
- * Store ordering should not be needed here, since a potential
- * preempt will imply a full memory barrier
- */
- tsk->plug = plug;
-}
-
/**
* blk_start_plug - initialize blk_plug and track it inside the task_struct
* @plug: The &struct blk_plug that needs to be initialized
@@ -1094,7 +1068,28 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
*/
void blk_start_plug(struct blk_plug *plug)
{
- blk_start_plug_nr_ios(plug, 1);
+ struct task_struct *tsk = current;
+
+ /*
+ * If this is a nested plug, don't actually assign it.
+ */
+ if (tsk->plug)
+ return;
+
+ plug->mq_list = NULL;
+ plug->cached_rq = NULL;
+ plug->nr_ios = 1;
+ plug->rq_count = 0;
+ plug->multiple_queues = false;
+ plug->has_elevator = false;
+ plug->nowait = false;
+ INIT_LIST_HEAD(&plug->cb_list);
+
+ /*
+ * Store ordering should not be needed here, since a potential
+ * preempt will imply a full memory barrier
+ */
+ tsk->plug = plug;
}
EXPORT_SYMBOL(blk_start_plug);
@@ -524,7 +524,8 @@ static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
.q = q,
.flags = flags,
.cmd_flags = opf,
- .nr_tags = plug->nr_ios,
+ .nr_tags = min_t(unsigned int, plug->nr_ios,
+ BLK_MAX_REQUEST_COUNT),
.cached_rq = &plug->cached_rq,
};
struct request *rq;
@@ -2867,7 +2868,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_throttle(q, bio);
if (plug) {
- data.nr_tags = plug->nr_ios;
+ data.nr_tags = min_t(unsigned int, plug->nr_ios,
+ BLK_MAX_REQUEST_COUNT);
plug->nr_ios = 1;
data.cached_rq = &plug->cached_rq;
}
@@ -175,6 +175,7 @@ struct io_submit_state {
bool need_plug;
unsigned short submit_nr;
unsigned int cqes_count;
+ int fd;
struct blk_plug plug;
struct io_uring_cqe cqes[16];
};
@@ -2209,18 +2209,25 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
return -EINVAL;
if (def->needs_file) {
- struct io_submit_state *state = &ctx->submit_state;
-
req->cqe.fd = READ_ONCE(sqe->fd);
/*
* Plug now if we have more than 2 IO left after this, and the
* target is potentially a read/write to block based storage.
*/
- if (state->need_plug && def->plug) {
- state->plug_started = true;
- state->need_plug = false;
- blk_start_plug_nr_ios(&state->plug, state->submit_nr);
+ if (def->plug) {
+ struct io_submit_state *state = &ctx->submit_state;
+
+ if (state->need_plug) {
+ state->plug_started = true;
+ state->need_plug = false;
+ state->fd = req->cqe.fd;
+ blk_start_plug(&state->plug);
+ } else if (state->plug_started &&
+ state->fd == req->cqe.fd &&
+ !state->link.head) {
+ state->plug.nr_ios++;
+ }
}
}