@@ -12,6 +12,7 @@
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <uapi/linux/io_uring.h>
#include "blk.h"
static struct kmem_cache *bip_slab;
@@ -337,6 +338,49 @@ static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
return nr_bvecs;
}
+static void bio_uio_meta_to_bip(struct bio *bio, struct uio_meta *meta)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ u16 bip_flags = 0;
+
+ if (meta->flags & INTEGRITY_CHK_GUARD)
+ bip_flags |= BIP_USER_CHK_GUARD;
+ if (meta->flags & INTEGRITY_CHK_APPTAG)
+ bip_flags |= BIP_USER_CHK_APPTAG;
+ if (meta->flags & INTEGRITY_CHK_REFTAG)
+ bip_flags |= BIP_USER_CHK_REFTAG;
+
+ bip->bip_flags |= bip_flags;
+ bip->apptag = meta->apptag;
+}
+
+int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
+{
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ unsigned int integrity_bytes;
+ int ret;
+ struct iov_iter it;
+
+ if (!bi)
+ return -EINVAL;
+ /*
+ * original meta iterator can be bigger.
+ * process integrity info corresponding to current data buffer only.
+ */
+ it = meta->iter;
+ integrity_bytes = bio_integrity_bytes(bi, bio_sectors(bio));
+ if (it.count < integrity_bytes)
+ return -EINVAL;
+
+ it.count = integrity_bytes;
+ ret = bio_integrity_map_user(bio, &it, 0);
+ if (!ret) {
+ bio_uio_meta_to_bip(bio, meta);
+ iov_iter_advance(&meta->iter, integrity_bytes);
+ }
+ return ret;
+}
+
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter,
u32 seed)
{
@@ -126,12 +126,13 @@ static void blkdev_bio_end_io(struct bio *bio)
{
struct blkdev_dio *dio = bio->bi_private;
bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
+ bool is_async = !(dio->flags & DIO_IS_SYNC);
if (bio->bi_status && !dio->bio.bi_status)
dio->bio.bi_status = bio->bi_status;
if (atomic_dec_and_test(&dio->ref)) {
- if (!(dio->flags & DIO_IS_SYNC)) {
+ if (is_async) {
struct kiocb *iocb = dio->iocb;
ssize_t ret;
@@ -154,6 +155,9 @@ static void blkdev_bio_end_io(struct bio *bio)
}
}
+ if (is_async && (dio->iocb->ki_flags & IOCB_HAS_META))
+ bio_integrity_unmap_free_user(bio);
+
if (should_dirty) {
bio_check_pages_dirty(bio);
} else {
@@ -231,6 +235,16 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
}
bio->bi_opf |= REQ_NOWAIT;
}
+ if (!is_sync && unlikely(iocb->ki_flags & IOCB_HAS_META)) {
+ ret = bio_integrity_map_iter(bio, iocb->private);
+ if (unlikely(ret)) {
+ bio_release_pages(bio, false);
+ bio_clear_flag(bio, BIO_REFFED);
+ bio_put(bio);
+ blk_finish_plug(&plug);
+ return ret;
+ }
+ }
if (is_read) {
if (dio->flags & DIO_SHOULD_DIRTY)
@@ -288,6 +302,9 @@ static void blkdev_bio_end_io_async(struct bio *bio)
ret = blk_status_to_errno(bio->bi_status);
}
+ if (unlikely(iocb->ki_flags & IOCB_HAS_META))
+ bio_integrity_unmap_free_user(bio);
+
iocb->ki_complete(iocb, ret);
if (dio->flags & DIO_SHOULD_DIRTY) {
@@ -348,6 +365,15 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
task_io_account_write(bio->bi_iter.bi_size);
}
+ if (unlikely(iocb->ki_flags & IOCB_HAS_META)) {
+ ret = bio_integrity_map_iter(bio, iocb->private);
+ WRITE_ONCE(iocb->private, NULL);
+ if (unlikely(ret)) {
+ bio_put(bio);
+ return ret;
+ }
+ }
+
if (iocb->ki_flags & IOCB_ATOMIC)
bio->bi_opf |= REQ_ATOMIC;
@@ -131,6 +131,8 @@ static void t10_pi_type1_prepare(struct request *rq)
/* Already remapped? */
if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
break;
+ if (bip->bip_flags & BIP_INTEGRITY_USER)
+ break;
bip_for_each_vec(iv, bip, iter) {
unsigned int j;
@@ -180,6 +182,8 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
struct bio_vec iv;
struct bvec_iter iter;
+ if (bip->bip_flags & BIP_INTEGRITY_USER)
+ break;
bip_for_each_vec(iv, bip, iter) {
unsigned int j;
void *p;
@@ -305,6 +309,8 @@ static void ext_pi_type1_prepare(struct request *rq)
/* Already remapped? */
if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
break;
+ if (bip->bip_flags & BIP_INTEGRITY_USER)
+ break;
bip_for_each_vec(iv, bip, iter) {
unsigned int j;
@@ -330,6 +330,9 @@ enum bip_flags {
BIP_INTEGRITY_USER = 1 << 5, /* Integrity payload is user address */
BIP_COPY_USER = 1 << 6, /* Kernel bounce buffer in use */
BIP_CLONED = 1 << 7, /* Indicates that bip is cloned */
+ BIP_USER_CHK_GUARD = 1 << 8,
+ BIP_USER_CHK_APPTAG = 1 << 9,
+ BIP_USER_CHK_REFTAG = 1 << 10,
};
struct uio_meta {
@@ -349,6 +352,7 @@ struct bio_integrity_payload {
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
unsigned short bip_flags; /* control flags */
+ u16 apptag; /* apptag */
struct bvec_iter bio_iter; /* for rewinding parent bio */
@@ -738,6 +742,7 @@ static inline bool bioset_initialized(struct bio_set *bs)
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter, u32 seed);
+int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta);
void bio_integrity_unmap_free_user(struct bio *bio);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
@@ -817,6 +822,11 @@ static inline int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter,
return -EINVAL;
}
+static inline int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
+{
+ return -EINVAL;
+}
+
static inline void bio_integrity_unmap_free_user(struct bio *bio)
{
}