@@ -13,8 +13,22 @@ static enum io_uring_op ublk_to_uring_op(const struct ublksrv_io_desc *iod, int
assert(0);
}
+static int loop_queue_flush_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
+{
+ unsigned ublk_op = ublksrv_get_op(iod);
+ struct io_uring_sqe *sqe[1];
+
+ ublk_queue_alloc_sqes(q, sqe, 1);
+ io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
+ io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
+ /* bit63 marks us as tgt io */
+ sqe[0]->user_data = build_user_data(tag, ublk_op, 0, 1);
+ return 1;
+}
+
static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
{
+ unsigned ublk_op = ublksrv_get_op(iod);
int zc = ublk_queue_use_zc(q);
enum io_uring_op op = ublk_to_uring_op(iod, zc);
struct io_uring_sqe *sqe[3];
@@ -29,98 +43,87 @@ static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_de
iod->nr_sectors << 9,
iod->start_sector << 9);
io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
- q->io_inflight++;
/* bit63 marks us as tgt io */
- sqe[0]->user_data = build_user_data(tag, op, UBLK_IO_TGT_NORMAL, 1);
- return 0;
+ sqe[0]->user_data = build_user_data(tag, ublk_op, 0, 1);
+ return 1;
}
ublk_queue_alloc_sqes(q, sqe, 3);
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
- sqe[0]->user_data = build_user_data(tag, 0xfe, 1, 1);
- sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS;
- sqe[0]->flags |= IOSQE_IO_LINK;
+ sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
+ sqe[0]->user_data = build_user_data(tag,
+ ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1);
io_uring_prep_rw(op, sqe[1], 1 /*fds[1]*/, 0,
iod->nr_sectors << 9,
iod->start_sector << 9);
sqe[1]->buf_index = tag;
- sqe[1]->flags |= IOSQE_FIXED_FILE;
- sqe[1]->flags |= IOSQE_IO_LINK;
- sqe[1]->user_data = build_user_data(tag, op, UBLK_IO_TGT_ZC_OP, 1);
- q->io_inflight++;
+ sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK;
+ sqe[1]->user_data = build_user_data(tag, ublk_op, 0, 1);
io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, tag);
- sqe[2]->user_data = build_user_data(tag, 0xff, UBLK_IO_TGT_ZC_BUF, 1);
- q->io_inflight++;
+ sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, 1);
- return 0;
+ return 2;
}
static int loop_queue_tgt_io(struct ublk_queue *q, int tag)
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
unsigned ublk_op = ublksrv_get_op(iod);
- struct io_uring_sqe *sqe[1];
+ int ret;
switch (ublk_op) {
case UBLK_IO_OP_FLUSH:
- ublk_queue_alloc_sqes(q, sqe, 1);
- if (!sqe[0])
- return -ENOMEM;
- io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
- io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
- q->io_inflight++;
- sqe[0]->user_data = build_user_data(tag, ublk_op, UBLK_IO_TGT_NORMAL, 1);
+ ret = loop_queue_flush_io(q, iod, tag);
break;
case UBLK_IO_OP_WRITE_ZEROES:
case UBLK_IO_OP_DISCARD:
- return -ENOTSUP;
+ ret = -ENOTSUP;
+ break;
case UBLK_IO_OP_READ:
case UBLK_IO_OP_WRITE:
- loop_queue_tgt_rw_io(q, iod, tag);
+ ret = loop_queue_tgt_rw_io(q, iod, tag);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
ublk_dbg(UBLK_DBG_IO, "%s: tag %d ublk io %x %llx %u\n", __func__, tag,
iod->op_flags, iod->start_sector, iod->nr_sectors << 9);
- return 1;
+ return ret;
}
static int ublk_loop_queue_io(struct ublk_queue *q, int tag)
{
int queued = loop_queue_tgt_io(q, tag);
- if (queued < 0)
- ublk_complete_io(q, tag, queued);
-
+ ublk_queued_tgt_io(q, tag, queued);
return 0;
}
static void ublk_loop_io_done(struct ublk_queue *q, int tag,
const struct io_uring_cqe *cqe)
{
- int cqe_tag = user_data_to_tag(cqe->user_data);
- unsigned tgt_data = user_data_to_tgt_data(cqe->user_data);
- int res = cqe->res;
+ unsigned op = user_data_to_op(cqe->user_data);
+ struct ublk_io *io = ublk_get_io(q, tag);
+
+ if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
+ if (!io->result)
+ io->result = cqe->res;
+ if (cqe->res < 0)
+ ublk_err("%s: io failed op %x user_data %lx\n",
+ __func__, op, cqe->user_data);
+ }
- if (res < 0 || tgt_data == UBLK_IO_TGT_NORMAL)
- goto complete;
+ /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
+ if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
+ io->tgt_ios += 1;
- if (tgt_data == UBLK_IO_TGT_ZC_OP) {
- ublk_set_io_res(q, tag, cqe->res);
- goto exit;
- }
- assert(tgt_data == UBLK_IO_TGT_ZC_BUF);
- res = ublk_get_io_res(q, tag);
-complete:
- assert(tag == cqe_tag);
- ublk_complete_io(q, tag, res);
-exit:
- q->io_inflight--;
+ if (ublk_completed_tgt_io(q, tag))
+ ublk_complete_io(q, tag, io->result);
}
static int ublk_loop_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
@@ -44,10 +44,6 @@
#define UBLK_MAX_QUEUES 4
#define UBLK_QUEUE_DEPTH 128
-#define UBLK_IO_TGT_NORMAL 0
-#define UBLK_IO_TGT_ZC_BUF 1
-#define UBLK_IO_TGT_ZC_OP 2
-
#define UBLK_DBG_DEV (1U << 0)
#define UBLK_DBG_QUEUE (1U << 1)
#define UBLK_DBG_IO_CMD (1U << 2)
Use the added target io handling helpers for simplifying loop io completion. Signed-off-by: Ming Lei <ming.lei@redhat.com> --- tools/testing/selftests/ublk/file_backed.c | 91 +++++++++++----------- tools/testing/selftests/ublk/kublk.h | 4 - 2 files changed, 47 insertions(+), 48 deletions(-)