@@ -20,6 +20,9 @@
/* BIDs are addressed by a 16-bit field in a CQE */
#define MAX_BIDS_PER_BGID (1 << 16)
+/* Mapped buffer ring, return io_uring_buf from head */
+#define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
+
struct io_provide_buf {
struct file *file;
__u64 addr;
@@ -29,6 +32,34 @@ struct io_provide_buf {
__u16 bid;
};
+bool io_kbuf_commit(struct io_kiocb *req,
+ struct io_buffer_list *bl, int len, int nr)
+{
+ if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
+ return true;
+
+ req->flags &= ~REQ_F_BUFFERS_COMMIT;
+
+ if (unlikely(len < 0))
+ return true;
+
+ if (bl->flags & IOBL_INC) {
+ struct io_uring_buf *buf;
+
+ buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
+ if (WARN_ON_ONCE(len > buf->len))
+ len = buf->len;
+ buf->len -= len;
+ if (buf->len) {
+ buf->addr += len;
+ return false;
+ }
+ }
+
+ bl->head += nr;
+ return true;
+}
+
static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
unsigned int bgid)
{
@@ -323,6 +354,35 @@ int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
}
+static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
+{
+ struct io_buffer_list *bl = req->buf_list;
+ bool ret = true;
+
+ if (bl) {
+ ret = io_kbuf_commit(req, bl, len, nr);
+ req->buf_index = bl->bgid;
+ }
+ req->flags &= ~REQ_F_BUFFER_RING;
+ return ret;
+}
+
+unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs)
+{
+ unsigned int ret;
+
+ ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
+
+ if (unlikely(!(req->flags & REQ_F_BUFFER_RING))) {
+ io_kbuf_drop_legacy(req);
+ return ret;
+ }
+
+ if (!__io_put_kbuf_ring(req, len, nbufs))
+ ret |= IORING_CQE_F_BUF_MORE;
+ return ret;
+}
+
static int __io_remove_buffers(struct io_ring_ctx *ctx,
struct io_buffer_list *bl, unsigned nbufs)
{
@@ -77,6 +77,10 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
void io_kbuf_drop_legacy(struct io_kiocb *req);
+unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs);
+bool io_kbuf_commit(struct io_kiocb *req,
+ struct io_buffer_list *bl, int len, int nr);
+
struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
unsigned int bgid);
@@ -115,77 +119,19 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
return false;
}
-/* Mapped buffer ring, return io_uring_buf from head */
-#define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
-
-static inline bool io_kbuf_commit(struct io_kiocb *req,
- struct io_buffer_list *bl, int len, int nr)
-{
- if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
- return true;
-
- req->flags &= ~REQ_F_BUFFERS_COMMIT;
-
- if (unlikely(len < 0))
- return true;
-
- if (bl->flags & IOBL_INC) {
- struct io_uring_buf *buf;
-
- buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
- if (WARN_ON_ONCE(len > buf->len))
- len = buf->len;
- buf->len -= len;
- if (buf->len) {
- buf->addr += len;
- return false;
- }
- }
-
- bl->head += nr;
- return true;
-}
-
-static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
-{
- struct io_buffer_list *bl = req->buf_list;
- bool ret = true;
-
- if (bl) {
- ret = io_kbuf_commit(req, bl, len, nr);
- req->buf_index = bl->bgid;
- }
- req->flags &= ~REQ_F_BUFFER_RING;
- return ret;
-}
-
-static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,
- int nbufs, unsigned issue_flags)
-{
- unsigned int ret;
-
- if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
- return 0;
-
- ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
- if (req->flags & REQ_F_BUFFER_RING) {
- if (!__io_put_kbuf_ring(req, len, nbufs))
- ret |= IORING_CQE_F_BUF_MORE;
- } else {
- io_kbuf_drop_legacy(req);
- }
- return ret;
-}
-
static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
unsigned issue_flags)
{
- return __io_put_kbufs(req, len, 1, issue_flags);
+ if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
+ return 0;
+ return __io_put_kbufs(req, len, 1);
}
static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
int nbufs, unsigned issue_flags)
{
- return __io_put_kbufs(req, len, nbufs, issue_flags);
+ if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
+ return 0;
+ return __io_put_kbufs(req, len, nbufs);
}
#endif
__io_put_kbufs() and other helper functions are too large to be inlined, compilers would normally refuse to do so. Uninline it and move together with io_kbuf_commit into kbuf.c. io_kbuf_commitSigned-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- io_uring/kbuf.c | 60 +++++++++++++++++++++++++++++++++++++++ io_uring/kbuf.h | 74 +++++++------------------------------------------ 2 files changed, 70 insertions(+), 64 deletions(-)