@@ -23,6 +23,8 @@ struct io_wq_work {
int cancel_seq;
};
+struct io_mapped_ubuf;
+
struct io_fixed_file {
/* file * with additional FFS_* flags */
unsigned long file_ptr;
@@ -485,6 +485,10 @@ enum {
IORING_REGISTER_NOTIFIERS = 26,
IORING_UNREGISTER_NOTIFIERS = 27,
+ /* dma map registered buffers */
+ IORING_REGISTER_MAP_BUFFERS = 28,
+ IORING_REGISTER_UNMAP_BUFFERS = 29,
+
/* this goes last */
IORING_REGISTER_LAST
};
@@ -661,4 +665,12 @@ struct io_uring_recvmsg_out {
__u32 flags;
};
+struct io_uring_map_buffers {
+ __s32 fd;
+ __s32 buf_start;
+ __s32 buf_end;
+ __u32 flags;
+ __u64 rsvd[2];
+};
+
#endif
@@ -189,9 +189,10 @@ int io_file_slot_queue_removal(struct io_ring_ctx *ctx,
struct file *file;
int ret;
- file = (struct file *)(file_slot->file_ptr & FFS_MASK);
- ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
- ctx->rsrc_node, file);
+ file = io_file_from_fixed(file_slot);
+ io_dma_unmap_file(ctx, file_slot);
+ ret = io_queue_rsrc_removal(ctx->file_data, slot_index, ctx->rsrc_node,
+ file);
if (ret)
return ret;
@@ -58,12 +58,17 @@ io_fixed_file_slot(struct io_file_table *table, unsigned i)
return &table->files[i];
}
+static inline struct file *io_file_from_fixed(struct io_fixed_file *f)
+{
+ return (struct file *) (f->file_ptr & FFS_MASK);
+}
+
static inline struct file *io_file_from_index(struct io_file_table *table,
int index)
{
struct io_fixed_file *slot = io_fixed_file_slot(table, index);
- return (struct file *) (slot->file_ptr & FFS_MASK);
+ return io_file_from_fixed(slot);
}
static inline void io_fixed_file_set(struct io_fixed_file *file_slot,
@@ -3681,6 +3681,131 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
return ret;
}
+static int get_map_range(struct io_ring_ctx *ctx,
+ struct io_uring_map_buffers *map, void __user *arg)
+{
+ int ret;
+
+ if (copy_from_user(map, arg, sizeof(*map)))
+ return -EFAULT;
+ if (map->flags || map->rsvd[0] || map->rsvd[1])
+ return -EINVAL;
+ if (map->fd >= ctx->nr_user_files)
+ return -EINVAL;
+ if (map->buf_start < 0)
+ return -EINVAL;
+ if (map->buf_start >= ctx->nr_user_bufs)
+ return -EINVAL;
+ if (map->buf_end > ctx->nr_user_bufs)
+ map->buf_end = ctx->nr_user_bufs;
+
+ ret = map->buf_end - map->buf_start;
+ if (ret <= 0)
+ return -EINVAL;
+
+ return ret;
+}
+
+void io_dma_unmap(struct io_mapped_ubuf *imu)
+{
+ struct file *file;
+
+ if (!imu->dma_tag)
+ return;
+
+ file = io_file_from_fixed(imu->dma_file);
+ file_dma_unmap(file, imu->dma_tag);
+ imu->dma_file = NULL;
+ imu->dma_tag = NULL;
+}
+
+void io_dma_unmap_file(struct io_ring_ctx *ctx, struct io_fixed_file *file_slot)
+{
+ int i;
+
+ for (i = 0; i < ctx->nr_user_bufs; i++) {
+ struct io_mapped_ubuf *imu = ctx->user_bufs[i];
+
+ if (!imu->dma_tag)
+ continue;
+
+ if (imu->dma_file == file_slot)
+ io_dma_unmap(imu);
+ }
+}
+
+static int io_register_unmap_buffers(struct io_ring_ctx *ctx, void __user *arg)
+{
+ struct io_uring_map_buffers map;
+ int i, ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ ret = get_map_range(ctx, &map, arg);
+ if (ret < 0)
+ return ret;
+
+ for (i = map.buf_start; i < map.buf_end; i++) {
+ struct io_mapped_ubuf *imu = ctx->user_bufs[i];
+
+ io_dma_unmap(imu);
+ }
+
+ return 0;
+}
+
+static int io_register_map_buffers(struct io_ring_ctx *ctx, void __user *arg)
+{
+ struct io_uring_map_buffers map;
+ struct io_fixed_file *file_slot;
+ struct file *file;
+ int ret, i;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = get_map_range(ctx, &map, arg);
+ if (ret < 0)
+ return ret;
+
+ file_slot = io_fixed_file_slot(&ctx->file_table,
+ array_index_nospec(map.fd, ctx->nr_user_files));
+ if (!file_slot || !file_slot->file_ptr)
+ return -EBADF;
+
+ file = io_file_from_fixed(file_slot);
+ if (!(file->f_flags & O_DIRECT))
+ return -EOPNOTSUPP;
+
+ for (i = map.buf_start; i < map.buf_end; i++) {
+ struct io_mapped_ubuf *imu = ctx->user_bufs[i];
+ void *tag;
+
+ if (imu->dma_tag) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ tag = file_dma_map(file, imu->bvec, imu->nr_bvecs);
+ if (IS_ERR(tag)) {
+ ret = PTR_ERR(tag);
+ goto err;
+ }
+
+ imu->dma_tag = tag;
+ imu->dma_file = file_slot;
+ }
+
+ return 0;
+err:
+ while (--i >= map.buf_start) {
+ struct io_mapped_ubuf *imu = ctx->user_bufs[i];
+
+ io_dma_unmap(imu);
+ }
+ return ret;
+}
+
static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
void __user *arg, unsigned nr_args)
__releases(ctx->uring_lock)
@@ -3847,6 +3972,18 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_notif_unregister(ctx);
break;
+ case IORING_REGISTER_MAP_BUFFERS:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_map_buffers(ctx, arg);
+ break;
+ case IORING_REGISTER_UNMAP_BUFFERS:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_unmap_buffers(ctx, arg);
+ break;
default:
ret = -EINVAL;
break;
@@ -977,7 +977,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
- (u64)(uintptr_t)zc->buf, zc->len);
+ (u64)(uintptr_t)zc->buf, zc->len, NULL);
if (unlikely(ret))
return ret;
} else {
@@ -148,6 +148,7 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
unpin_user_page(imu->bvec[i].bv_page);
if (imu->acct_pages)
io_unaccount_mem(ctx, imu->acct_pages);
+ io_dma_unmap(imu);
kvfree(imu);
}
*slot = NULL;
@@ -809,12 +810,16 @@ void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
int i;
for (i = 0; i < ctx->nr_user_files; i++) {
- struct file *file = io_file_from_index(&ctx->file_table, i);
+ struct io_fixed_file *f = io_fixed_file_slot(&ctx->file_table, i);
+ struct file *file;
- if (!file)
+ if (!f)
continue;
- if (io_fixed_file_slot(&ctx->file_table, i)->file_ptr & FFS_SCM)
+ if (f->file_ptr & FFS_SCM)
continue;
+
+ io_dma_unmap_file(ctx, f);
+ file = io_file_from_fixed(f);
io_file_bitmap_clear(&ctx->file_table, i);
fput(file);
}
@@ -1282,6 +1287,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
imu->ubuf = (unsigned long) iov->iov_base;
imu->ubuf_end = imu->ubuf + iov->iov_len;
imu->nr_bvecs = nr_pages;
+ imu->dma_tag = NULL;
*pimu = imu;
ret = 0;
done:
@@ -1356,9 +1362,8 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
return ret;
}
-int io_import_fixed(int ddir, struct iov_iter *iter,
- struct io_mapped_ubuf *imu,
- u64 buf_addr, size_t len)
+int io_import_fixed(int ddir, struct iov_iter *iter, struct io_mapped_ubuf *imu,
+ u64 buf_addr, size_t len, struct file *file)
{
u64 buf_end;
size_t offset;
@@ -1376,6 +1381,10 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
* and advance us to the beginning.
*/
offset = buf_addr - imu->ubuf;
+ if (imu->dma_tag && file == io_file_from_fixed(imu->dma_file)) {
+ iov_iter_dma_tag(iter, ddir, imu->dma_tag, offset, len);
+ return 0;
+ }
iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
if (offset) {
@@ -50,6 +50,8 @@ struct io_mapped_ubuf {
u64 ubuf_end;
unsigned int nr_bvecs;
unsigned long acct_pages;
+ void *dma_tag;
+ struct io_fixed_file *dma_file;
struct bio_vec bvec[];
};
@@ -64,9 +66,11 @@ int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
void io_rsrc_node_switch(struct io_ring_ctx *ctx,
struct io_rsrc_data *data_to_kill);
-int io_import_fixed(int ddir, struct iov_iter *iter,
- struct io_mapped_ubuf *imu,
- u64 buf_addr, size_t len);
+int io_import_fixed(int ddir, struct iov_iter *iter, struct io_mapped_ubuf *imu,
+ u64 buf_addr, size_t len, struct file *file);
+
+void io_dma_unmap(struct io_mapped_ubuf *imu);
+void io_dma_unmap_file(struct io_ring_ctx *ctx, struct io_fixed_file *file_slot);
void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
@@ -359,7 +359,7 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
ssize_t ret;
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
- ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
+ ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len, req->file);
if (ret)
return ERR_PTR(ret);
return NULL;