@@ -16,6 +16,7 @@
#include "nvmet.h"
struct workqueue_struct *buffered_io_wq;
+struct workqueue_struct *verify_wq;
struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
@@ -1611,10 +1612,16 @@ static int __init nvmet_init(void)
nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
- zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
- if (!zbd_wq)
+ verify_wq = alloc_workqueue("nvmet-verify-wq", WQ_MEM_RECLAIM, 0);
+ if (!verify_wq)
return -ENOMEM;
+ zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
+ if (!zbd_wq) {
+ error = -ENOMEM;
+ goto out_free_verify_work_queue;
+ }
+
buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
WQ_MEM_RECLAIM, 0);
if (!buffered_io_wq) {
@@ -1645,6 +1652,8 @@ static int __init nvmet_init(void)
destroy_workqueue(buffered_io_wq);
out_free_zbd_work_queue:
destroy_workqueue(zbd_wq);
+out_free_verify_work_queue:
+ destroy_workqueue(verify_wq);
return error;
}
@@ -1656,6 +1665,7 @@ static void __exit nvmet_exit(void)
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq);
+ destroy_workqueue(verify_wq);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
@@ -447,35 +447,71 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
}
}
-static void nvmet_bdev_execute_verify(struct nvmet_req *req)
+static void __nvmet_req_to_verify_sectors(struct nvmet_req *req,
+ sector_t *sects, sector_t *nr_sects)
{
struct nvme_verify_cmd *verify = &req->cmd->verify;
+
+ *sects = le64_to_cpu(verify->slba) << (req->ns->blksize_shift - 9);
+ *nr_sects = (((sector_t)le16_to_cpu(verify->length) + 1) <<
+ (req->ns->blksize_shift - 9));
+}
+
+static void nvmet_bdev_emulate_verify_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, b.work);
+ sector_t nr_sector;
+ sector_t sector;
+ int ret = 0;
+
+ __nvmet_req_to_verify_sectors(req, §or, &nr_sector);
+ if (!nr_sector)
+ goto out;
+
+ /* blkdev_issue_verify() will automatically emulate */
+ ret = blkdev_issue_verify(req->ns->bdev, sector, nr_sector,
+ GFP_KERNEL);
+out:
+ nvmet_req_complete(req,
+ blk_to_nvme_status(req, errno_to_blk_status(ret)));
+}
+
+static void nvmet_bdev_submit_emulate_verify(struct nvmet_req *req)
+{
+ INIT_WORK(&req->b.work, nvmet_bdev_emulate_verify_work);
+ queue_work(verify_wq, &req->b.work);
+}
+
+static void nvmet_bdev_execute_verify(struct nvmet_req *req)
+{
struct bio *bio = NULL;
sector_t nr_sector;
sector_t sector;
- int ret;
+ int ret = 0;
if (!nvmet_check_transfer_len(req, 0))
return;
+ __nvmet_req_to_verify_sectors(req, §or, &nr_sector);
+ if (!nr_sector)
+ goto out;
+
+ /* offload emulation */
if (!bdev_verify_sectors(req->ns->bdev)) {
- nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+ nvmet_bdev_submit_emulate_verify(req);
return;
}
- sector = le64_to_cpu(verify->slba) << (req->ns->blksize_shift - 9);
- nr_sector = (((sector_t)le16_to_cpu(verify->length) + 1) <<
- (req->ns->blksize_shift - 9));
-
ret = __blkdev_issue_verify(req->ns->bdev, sector, nr_sector,
GFP_KERNEL, &bio);
- if (bio) {
+ if (ret == 0 && bio) {
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
submit_bio(bio);
- } else {
- nvmet_req_complete(req, errno_to_nvme_status(req, ret));
+ return;
}
+out:
+ nvmet_req_complete(req, errno_to_nvme_status(req, ret));
}
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
@@ -326,7 +326,8 @@ struct nvmet_req {
struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
union {
struct {
- struct bio inline_bio;
+ struct bio inline_bio;
+ struct work_struct work;
} b;
struct {
bool mpool_alloc;
@@ -365,6 +366,7 @@ struct nvmet_req {
};
extern struct workqueue_struct *buffered_io_wq;
+extern struct workqueue_struct *verify_wq;
extern struct workqueue_struct *zbd_wq;
extern struct workqueue_struct *nvmet_wq;
Not all devices can support verify requests which can be mapped to the controller specific command. This patch adds a way to emulate REQ_OP_VERIFY for NVMeOF block device namespace. We add a new workqueue to offload the emulation. Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com> --- drivers/nvme/target/core.c | 14 ++++++-- drivers/nvme/target/io-cmd-bdev.c | 56 +++++++++++++++++++++++++------ drivers/nvme/target/nvmet.h | 4 ++- 3 files changed, 61 insertions(+), 13 deletions(-)