@@ -99,6 +99,9 @@ static inline void ublk_bpf_io_dec_ref(struct ublk_bpf_io *io)
ubq->bpf_ops->release_io_cmd(io);
}
+ if (test_bit(UBLK_BPF_BVEC_ALLOCATED, &io->flags))
+ kvfree(io->buf.bvec);
+
if (test_bit(UBLK_BPF_IO_COMPLETED, &io->flags)) {
smp_rmb();
__clear_bit(UBLK_BPF_IO_PREP, &io->flags);
@@ -158,6 +161,11 @@ static inline queue_io_cmd_t ublk_get_bpf_any_io_cb(struct ublk_queue *ubq)
return ublk_get_bpf_io_cb_daemon(ubq);
}
+static inline bool ublk_support_bpf_aio(const struct ublk_queue *ubq)
+{
+ return ublk_support_bpf(ubq) && ubq->bpf_aio_ops;
+}
+
int ublk_bpf_init(void);
int ublk_bpf_struct_ops_init(void);
int ublk_bpf_prog_attach(struct bpf_prog_consumer *consumer);
@@ -190,6 +198,11 @@ static inline queue_io_cmd_t ublk_get_bpf_any_io_cb(struct ublk_queue *ubq)
return NULL;
}
+static inline bool ublk_support_bpf_aio(const struct ublk_queue *ubq)
+{
+ return false;
+}
+
static inline int ublk_bpf_init(void)
{
return 0;
@@ -155,6 +155,49 @@ void ublk_bpf_prog_detach(struct bpf_prog_consumer *consumer)
mutex_unlock(&ublk_bpf_ops_lock);
}
+static int ublk_bpf_aio_prep_io_buf(const struct request *req)
+{
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu((struct request *)req);
+ struct ublk_bpf_io *io = &data->bpf_data;
+ struct req_iterator rq_iter;
+ struct bio_vec *bvec;
+ struct bio_vec bv;
+ unsigned offset;
+
+ io->buf.bvec = NULL;
+ io->buf.nr_bvec = 0;
+
+ if (!ublk_rq_has_data(req))
+ return 0;
+
+ rq_for_each_bvec(bv, req, rq_iter)
+ io->buf.nr_bvec++;
+
+ if (!io->buf.nr_bvec)
+ return 0;
+
+ if (req->bio != req->biotail) {
+ int idx = 0;
+
+ bvec = kvmalloc_array(io->buf.nr_bvec, sizeof(struct bio_vec),
+ GFP_NOIO);
+ if (!bvec)
+ return -ENOMEM;
+
+ offset = 0;
+ rq_for_each_bvec(bv, req, rq_iter)
+ bvec[idx++] = bv;
+ __set_bit(UBLK_BPF_BVEC_ALLOCATED, &io->flags);
+ } else {
+ struct bio *bio = req->bio;
+
+ offset = bio->bi_iter.bi_bvec_done;
+ bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ }
+ io->buf.bvec = bvec;
+ io->buf.bvec_off = offset;
+ return 0;
+}
static void ublk_bpf_prep_io(struct ublk_bpf_io *io,
const struct ublksrv_io_desc *iod)
@@ -180,8 +223,14 @@ bool ublk_run_bpf_handler(struct ublk_queue *ubq, struct request *req,
bool res = true;
int err;
- if (!test_bit(UBLK_BPF_IO_PREP, &bpf_io->flags))
+ if (!test_bit(UBLK_BPF_IO_PREP, &bpf_io->flags)) {
ublk_bpf_prep_io(bpf_io, iod);
+ if (ublk_support_bpf_aio(ubq)) {
+ err = ublk_bpf_aio_prep_io_buf(req);
+ if (err)
+ goto fail;
+ }
+ }
do {
enum ublk_bpf_disposition rc;
@@ -512,11 +512,6 @@ void ublk_put_device(struct ublk_device *ub)
put_device(&ub->cdev_dev);
}
-static inline bool ublk_rq_has_data(const struct request *rq)
-{
- return bio_has_data(rq->bio);
-}
-
static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
{
return ublk_get_queue(ub, q_id)->io_cmd_buf;
@@ -41,6 +41,7 @@
enum {
UBLK_BPF_IO_PREP = 0,
UBLK_BPF_IO_COMPLETED = 1,
+ UBLK_BPF_BVEC_ALLOCATED = 2,
};
struct ublk_bpf_io {
@@ -215,6 +216,11 @@ static inline bool ublk_dev_support_bpf_aio(const struct ublk_device *ub)
return ub->params.bpf.flags & UBLK_BPF_HAS_AIO_OPS_ID;
}
+static inline bool ublk_rq_has_data(const struct request *rq)
+{
+ return bio_has_data(rq->bio);
+}
+
struct ublk_device *ublk_get_device(struct ublk_device *ub);
struct ublk_device *ublk_get_device_from_id(int idx);
void ublk_put_device(struct ublk_device *ub);
Add ublk_bpf_aio_prep_io_buf() and call it before running ublk bpf prog, so wire everything together. Signed-off-by: Ming Lei <tom.leiming@gmail.com> --- drivers/block/ublk/bpf.h | 13 +++++++++ drivers/block/ublk/bpf_ops.c | 51 +++++++++++++++++++++++++++++++++++- drivers/block/ublk/main.c | 5 ---- drivers/block/ublk/ublk.h | 6 +++++ 4 files changed, 69 insertions(+), 6 deletions(-)