@@ -67,6 +67,10 @@ struct io_file_table {
unsigned int alloc_hint;
};
+struct io_buf_table {
+ struct io_rsrc_data data;
+};
+
struct io_hash_bucket {
struct hlist_head list;
} ____cacheline_aligned_in_smp;
@@ -290,7 +294,7 @@ struct io_ring_ctx {
struct io_wq_work_list iopoll_list;
struct io_file_table file_table;
- struct io_rsrc_data buf_table;
+ struct io_buf_table buf_table;
struct io_submit_state submit_state;
@@ -217,12 +217,12 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
seq_puts(m, "\n");
}
}
- seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.nr);
- for (i = 0; has_lock && i < ctx->buf_table.nr; i++) {
+ seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.data.nr);
+ for (i = 0; has_lock && i < ctx->buf_table.data.nr; i++) {
struct io_mapped_ubuf *buf = NULL;
- if (ctx->buf_table.nodes[i])
- buf = ctx->buf_table.nodes[i]->buf;
+ if (ctx->buf_table.data.nodes[i])
+ buf = ctx->buf_table.data.nodes[i]->buf;
if (buf)
seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
else
@@ -1348,7 +1348,7 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
ret = -EFAULT;
io_ring_submit_lock(ctx, issue_flags);
- node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
+ node = io_rsrc_node_lookup(&ctx->buf_table.data, sr->buf_index);
if (node) {
io_req_assign_buf_node(sr->notif, node);
ret = 0;
@@ -69,7 +69,7 @@ int io_nop(struct io_kiocb *req, unsigned int issue_flags)
ret = -EFAULT;
io_ring_submit_lock(ctx, issue_flags);
- node = io_rsrc_node_lookup(&ctx->buf_table, nop->buffer);
+ node = io_rsrc_node_lookup(&ctx->buf_table.data, nop->buffer);
if (node) {
io_req_assign_buf_node(req, node);
ret = 0;
@@ -919,7 +919,7 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
ret = __io_uring_register(ctx, opcode, arg, nr_args);
trace_io_uring_register(ctx, opcode, ctx->file_table.data.nr,
- ctx->buf_table.nr, ret);
+ ctx->buf_table.data.nr, ret);
mutex_unlock(&ctx->uring_lock);
fput(file);
@@ -234,17 +234,17 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
__u32 done;
int i, err;
- if (!ctx->buf_table.nr)
+ if (!ctx->buf_table.data.nr)
return -ENXIO;
- if (up->offset + nr_args > ctx->buf_table.nr)
+ if (up->offset + nr_args > ctx->buf_table.data.nr)
return -EINVAL;
for (done = 0; done < nr_args; done++) {
struct io_rsrc_node *node;
u64 tag = 0;
- i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
- node = io_rsrc_node_lookup(&ctx->buf_table, i);
+ i = array_index_nospec(up->offset + done, ctx->buf_table.data.nr);
+ node = io_rsrc_node_lookup(&ctx->buf_table.data, i);
if (node && node->type != IORING_RSRC_BUFFER) {
err = -EBUSY;
break;
@@ -276,8 +276,8 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
}
node->tag = tag;
}
- io_reset_rsrc_node(ctx, &ctx->buf_table, i);
- ctx->buf_table.nodes[i] = node;
+ io_reset_rsrc_node(ctx, &ctx->buf_table.data, i);
+ ctx->buf_table.data.nodes[i] = node;
if (ctx->compat)
user_data += sizeof(struct compat_iovec);
else
@@ -556,9 +556,9 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
{
- if (!ctx->buf_table.nr)
+ if (!ctx->buf_table.data.nr)
return -ENXIO;
- io_rsrc_data_free(ctx, &ctx->buf_table);
+ io_rsrc_data_free(ctx, &ctx->buf_table.data);
return 0;
}
@@ -585,8 +585,8 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
}
/* check previously registered pages */
- for (i = 0; i < ctx->buf_table.nr; i++) {
- struct io_rsrc_node *node = ctx->buf_table.nodes[i];
+ for (i = 0; i < ctx->buf_table.data.nr; i++) {
+ struct io_rsrc_node *node = ctx->buf_table.data.nodes[i];
struct io_mapped_ubuf *imu;
if (!node)
@@ -812,7 +812,7 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
- if (ctx->buf_table.nr)
+ if (ctx->buf_table.data.nr)
return -EBUSY;
if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
return -EINVAL;
@@ -865,7 +865,7 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
data.nodes[i] = node;
}
- ctx->buf_table = data;
+ ctx->buf_table.data = data;
if (ret)
io_sqe_buffers_unregister(ctx);
return ret;
@@ -901,7 +901,7 @@ static struct io_rsrc_node *io_buffer_alloc_node(struct io_ring_ctx *ctx,
int io_buffer_register_bvec(struct io_ring_ctx *ctx, const struct request *rq,
unsigned int index)
{
- struct io_rsrc_data *data = &ctx->buf_table;
+ struct io_rsrc_data *data = &ctx->buf_table.data;
u16 nr_bvecs = blk_rq_nr_phys_segments(rq);
struct req_iterator rq_iter;
struct io_rsrc_node *node;
@@ -938,7 +938,7 @@ EXPORT_SYMBOL_GPL(io_buffer_register_bvec);
void io_buffer_unregister_bvec(struct io_ring_ctx *ctx, unsigned int index)
{
- struct io_rsrc_data *data = &ctx->buf_table;
+ struct io_rsrc_data *data = &ctx->buf_table.data;
struct io_rsrc_node *node;
lockdep_assert_held(&ctx->uring_lock);
@@ -1054,10 +1054,10 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
if (!arg->nr && (arg->dst_off || arg->src_off))
return -EINVAL;
/* not allowed unless REPLACE is set */
- if (ctx->buf_table.nr && !(arg->flags & IORING_REGISTER_DST_REPLACE))
+ if (ctx->buf_table.data.nr && !(arg->flags & IORING_REGISTER_DST_REPLACE))
return -EBUSY;
- nbufs = src_ctx->buf_table.nr;
+ nbufs = src_ctx->buf_table.data.nr;
if (!arg->nr)
arg->nr = nbufs;
else if (arg->nr > nbufs)
@@ -1067,13 +1067,13 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
if (check_add_overflow(arg->nr, arg->dst_off, &nbufs))
return -EOVERFLOW;
- ret = io_rsrc_data_alloc(&data, max(nbufs, ctx->buf_table.nr));
+ ret = io_rsrc_data_alloc(&data, max(nbufs, ctx->buf_table.data.nr));
if (ret)
return ret;
/* Fill entries in data from dst that won't overlap with src */
- for (i = 0; i < min(arg->dst_off, ctx->buf_table.nr); i++) {
- struct io_rsrc_node *src_node = ctx->buf_table.nodes[i];
+ for (i = 0; i < min(arg->dst_off, ctx->buf_table.data.nr); i++) {
+ struct io_rsrc_node *src_node = ctx->buf_table.data.nodes[i];
if (src_node) {
data.nodes[i] = src_node;
@@ -1082,7 +1082,7 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
}
ret = -ENXIO;
- nbufs = src_ctx->buf_table.nr;
+ nbufs = src_ctx->buf_table.data.nr;
if (!nbufs)
goto out_free;
ret = -EINVAL;
@@ -1102,7 +1102,7 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
while (nr--) {
struct io_rsrc_node *dst_node, *src_node;
- src_node = io_rsrc_node_lookup(&src_ctx->buf_table, i);
+ src_node = io_rsrc_node_lookup(&src_ctx->buf_table.data, i);
if (!src_node) {
dst_node = NULL;
} else {
@@ -1124,7 +1124,7 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
* old and new nodes at this point.
*/
if (arg->flags & IORING_REGISTER_DST_REPLACE)
- io_rsrc_data_free(ctx, &ctx->buf_table);
+ io_sqe_buffers_unregister(ctx);
/*
* ctx->buf_table must be empty now - either the contents are being
@@ -1132,10 +1132,9 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
* copied to a ring that does not have buffers yet (checked at function
* entry).
*/
- WARN_ON_ONCE(ctx->buf_table.nr);
- ctx->buf_table = data;
+ WARN_ON_ONCE(ctx->buf_table.data.nr);
+ ctx->buf_table.data = data;
return 0;
-
out_free:
io_rsrc_data_free(ctx, &data);
return ret;
@@ -1160,7 +1159,7 @@ int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
return -EFAULT;
if (buf.flags & ~(IORING_REGISTER_SRC_REGISTERED|IORING_REGISTER_DST_REPLACE))
return -EINVAL;
- if (!(buf.flags & IORING_REGISTER_DST_REPLACE) && ctx->buf_table.nr)
+ if (!(buf.flags & IORING_REGISTER_DST_REPLACE) && ctx->buf_table.data.nr)
return -EBUSY;
if (memchr_inv(buf.pad, 0, sizeof(buf.pad)))
return -EINVAL;
@@ -387,7 +387,7 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
if (unlikely(ret))
return ret;
- node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
+ node = io_rsrc_node_lookup(&ctx->buf_table.data, req->buf_index);
if (!node)
return -EFAULT;
io_req_assign_buf_node(req, node);
@@ -213,7 +213,7 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_rsrc_node *node;
u16 index = READ_ONCE(sqe->buf_index);
- node = io_rsrc_node_lookup(&ctx->buf_table, index);
+ node = io_rsrc_node_lookup(&ctx->buf_table.data, index);
if (unlikely(!node))
return -EFAULT;
/*