@@ -150,6 +150,7 @@ struct ublk_io {
int res;
struct io_uring_cmd *cmd;
+ struct task_struct *task;
};
struct ublk_queue {
@@ -157,11 +158,9 @@ struct ublk_queue {
int q_depth;
unsigned long flags;
- struct task_struct *ubq_daemon;
struct ublksrv_io_desc *io_cmd_buf;
bool force_abort;
- bool timeout;
bool canceling;
bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
unsigned short nr_io_ready; /* how many ios setup */
@@ -1072,11 +1071,6 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
}
-static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
-{
- return ubq->ubq_daemon->flags & PF_EXITING;
-}
-
/* todo: handle partial completion */
static inline void __ublk_complete_rq(struct request *req)
{
@@ -1224,13 +1218,13 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
/*
* Task is exiting if either:
*
- * (1) current != ubq_daemon.
+ * (1) current != io->task.
* io_uring_cmd_complete_in_task() tries to run task_work
- * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
+ * in a workqueue if cmd's task is PF_EXITING.
*
* (2) current->flags & PF_EXITING.
*/
- if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
+ if (unlikely(current != io->task || current->flags & PF_EXITING)) {
__ublk_abort_rq(ubq, req);
return;
}
@@ -1336,23 +1330,20 @@ static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l)
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
{
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+ struct ublk_io *io = &ubq->ios[rq->tag];
unsigned int nr_inflight = 0;
int i;
if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
- if (!ubq->timeout) {
- send_sig(SIGKILL, ubq->ubq_daemon, 0);
- ubq->timeout = true;
- }
-
+ send_sig(SIGKILL, io->task, 0);
return BLK_EH_DONE;
}
- if (!ubq_daemon_is_dying(ubq))
+ if (!(io->task->flags & PF_EXITING))
return BLK_EH_RESET_TIMER;
for (i = 0; i < ubq->q_depth; i++) {
- struct ublk_io *io = &ubq->ios[i];
+ io = &ubq->ios[i];
if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
nr_inflight++;
@@ -1552,8 +1543,8 @@ static void ublk_commit_completion(struct ublk_device *ub,
}
/*
- * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
- * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
+ * Called from io task context via cancel fn, meantime quiesce ublk
+ * blk-mq queue, so we are called exclusively with blk-mq and io task
* context, so everything is serialized.
*/
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
@@ -1669,13 +1660,13 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
return;
task = io_uring_cmd_get_task(cmd);
- if (WARN_ON_ONCE(task && task != ubq->ubq_daemon))
+ io = &ubq->ios[pdu->tag];
+ if (WARN_ON_ONCE(task && task != io->task))
return;
ub = ubq->dev;
need_schedule = ublk_abort_requests(ub, ubq);
- io = &ubq->ios[pdu->tag];
WARN_ON_ONCE(io->cmd != cmd);
ublk_cancel_cmd(ubq, io, issue_flags);
@@ -1836,8 +1827,6 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
mutex_lock(&ub->mutex);
ubq->nr_io_ready++;
if (ublk_queue_ready(ubq)) {
- ubq->ubq_daemon = current;
- get_task_struct(ubq->ubq_daemon);
ub->nr_queues_ready++;
if (capable(CAP_SYS_ADMIN))
@@ -1934,6 +1923,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
const struct ublksrv_io_cmd *ub_cmd)
{
struct ublk_device *ub = cmd->file->private_data;
+ struct task_struct *task;
struct ublk_queue *ubq;
struct ublk_io *io;
u32 cmd_op = cmd->cmd_op;
@@ -1952,13 +1942,13 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
if (!ubq || ub_cmd->q_id != ubq->q_id)
goto out;
- if (ubq->ubq_daemon && ubq->ubq_daemon != current)
- goto out;
-
if (tag >= ubq->q_depth)
goto out;
io = &ubq->ios[tag];
+ task = READ_ONCE(io->task);
+ if (task && task != current)
+ goto out;
/* there is pending io cmd, something must be wrong */
if (io->flags & UBLK_IO_FLAG_ACTIVE) {
@@ -2011,6 +2001,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
}
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
+ WRITE_ONCE(io->task, get_task_struct(current));
ublk_mark_io_ready(ub, ubq);
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
@@ -2248,9 +2239,15 @@ static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
{
int size = ublk_queue_cmd_buf_size(ub, q_id);
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ struct ublk_io *io;
+ int i;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ io = &ubq->ios[i];
+ if (io->task)
+ put_task_struct(io->task);
+ }
- if (ubq->ubq_daemon)
- put_task_struct(ubq->ubq_daemon);
if (ubq->io_cmd_buf)
free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
}
@@ -2936,15 +2933,8 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
{
int i;
- WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
-
/* All old ioucmds have to be completed */
ubq->nr_io_ready = 0;
- /* old daemon is PF_EXITING, put it now */
- put_task_struct(ubq->ubq_daemon);
- /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
- ubq->ubq_daemon = NULL;
- ubq->timeout = false;
ubq->canceling = false;
for (i = 0; i < ubq->q_depth; i++) {
@@ -2954,6 +2944,10 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
io->flags = 0;
io->cmd = NULL;
io->addr = 0;
+
+ WARN_ON_ONCE(!(io->task && (io->task->flags & PF_EXITING)));
+ put_task_struct(io->task);
+ io->task = NULL;
}
}
@@ -2993,7 +2987,7 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_queue_reinit(ub, ublk_get_queue(ub, i));
- /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
+ /* set to NULL, otherwise new tasks cannot mmap the io_cmd_buf */
ub->mm = NULL;
ub->nr_queues_ready = 0;
ub->nr_privileged_daemon = 0;
@@ -3011,14 +3005,14 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
int ret = -EINVAL;
int i;
- pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
- __func__, ub->dev_info.nr_hw_queues, header->dev_id);
- /* wait until new ubq_daemon sending all FETCH_REQ */
+ pr_devel("%s: Waiting for all FETCH_REQs, dev id %d...\n", __func__,
+ header->dev_id);
+
if (wait_for_completion_interruptible(&ub->completion))
return -EINTR;
- pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
- __func__, ub->dev_info.nr_hw_queues, header->dev_id);
+ pr_devel("%s: All FETCH_REQs received, dev id %d\n", __func__,
+ header->dev_id);
mutex_lock(&ub->mutex);
if (ublk_nosrv_should_stop_dev(ub))