@@ -12,6 +12,7 @@
#define NVMET_MAX_MPOOL_BVEC 16
#define NVMET_MIN_MPOOL_OBJ 16
+#define NVMET_VERIFY_BUF_LEN (BIO_MAX_PAGES << PAGE_SHIFT)
int nvmet_file_ns_revalidate(struct nvmet_ns *ns)
{
@@ -381,6 +382,153 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
schedule_work(&req->f.work);
}
+static void __nvmet_req_to_verify_offset(struct nvmet_req *req, loff_t *offset,
+ ssize_t *len)
+{
+ struct nvme_verify_cmd *verify = &req->cmd->verify;
+
+ *offset = le64_to_cpu(verify->slba) << req->ns->blksize_shift;
+ *len = (((sector_t)le16_to_cpu(verify->length) + 1) <<
+ req->ns->blksize_shift);
+}
+
+static int do_buffered_io_emulate_verify(struct file *f, loff_t offset,
+ ssize_t len)
+{
+ char *buf = NULL;
+ int ret = 0;
+ ssize_t rc;
+
+ buf = kmalloc(NVMET_VERIFY_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while (len > 0) {
+ ssize_t curr_len = min_t(ssize_t, len, NVMET_VERIFY_BUF_LEN);
+
+ rc = kernel_read(f, buf, curr_len, &offset);
+ if (rc != curr_len) {
+ pr_err("kernel_read %lu curr_len %lu\n", rc, curr_len);
+ ret = -EINVAL;
+ break;
+ }
+
+ len -= curr_len;
+ offset += curr_len;
+ cond_resched();
+ }
+
+ kfree(buf);
+ return ret;
+}
+
+static int do_direct_io_emulate_verify(struct file *f, loff_t offset,
+ ssize_t len)
+{
+ struct scatterlist *sgl = NULL;
+ struct bio_vec *bvec = NULL;
+ struct iov_iter iter = { 0 };
+ struct kiocb iocb = { 0 };
+ unsigned int sgl_nents;
+ ssize_t ret = 0;
+ int i;
+
+ while (len > 0) {
+ ssize_t curr_len = min_t(ssize_t, len, NVMET_VERIFY_BUF_LEN);
+ struct scatterlist *sg = NULL;
+ unsigned int bv_len = 0;
+ ssize_t rc;
+
+ sgl = sgl_alloc(curr_len, GFP_KERNEL, &sgl_nents);
+ if (!sgl) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ bvec = kmalloc_array(sgl_nents, sizeof(struct bio_vec),
+ GFP_KERNEL);
+ if (!bvec) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for_each_sg(sgl, sg, sgl_nents, i) {
+ nvmet_file_init_bvec(&bvec[i], sg);
+ bv_len += sg->length;
+ }
+
+ if (bv_len != curr_len) {
+ pr_err("length mismatch sgl & bvec\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ iocb.ki_pos = offset;
+ iocb.ki_filp = f;
+ iocb.ki_complete = NULL; /* Sync I/O */
+ iocb.ki_flags |= IOCB_DIRECT;
+
+ iov_iter_bvec(&iter, READ, bvec, sgl_nents, bv_len);
+
+ rc = call_read_iter(f, &iocb, &iter);
+ if (rc != curr_len) {
+ pr_err("read len mismatch expected %lu got %ld\n",
+ curr_len, rc);
+ ret = -EINVAL;
+ break;
+ }
+
+ cond_resched();
+
+ len -= curr_len;
+ offset += curr_len;
+
+ kfree(bvec);
+ sgl_free(sgl);
+ bvec = NULL;
+ sgl = NULL;
+ memset(&iocb, 0, sizeof(iocb));
+ memset(&iter, 0, sizeof(iter));
+ }
+
+ kfree(bvec);
+ sgl_free(sgl);
+ return ret;
+}
+
+static void nvmet_file_emulate_verify_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+ loff_t offset;
+ ssize_t len;
+ int ret = 0;
+
+ __nvmet_req_to_verify_offset(req, &offset, &len);
+ if (!len)
+ goto out;
+
+ if (unlikely(offset + len > req->ns->size)) {
+ nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
+ return;
+ }
+
+ if (req->ns->buffered_io)
+ ret = do_buffered_io_emulate_verify(req->ns->file, offset, len);
+ else
+ ret = do_direct_io_emulate_verify(req->ns->file, offset, len);
+out:
+ nvmet_req_complete(req, errno_to_nvme_status(req, ret));
+}
+
+static void nvmet_file_execute_verify(struct nvmet_req *req)
+{
+ if (!nvmet_check_data_len_lte(req, 0))
+ return;
+
+ INIT_WORK(&req->f.work, nvmet_file_emulate_verify_work);
+ queue_work(verify_wq, &req->f.work);
+}
+
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -399,6 +547,9 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_write_zeroes:
req->execute = nvmet_file_execute_write_zeroes;
return 0;
+ case nvme_cmd_verify:
+ req->execute = nvmet_file_execute_verify;
+ return 0;
default:
pr_err("unhandled cmd for file ns %d on qid %d\n",
cmd->common.opcode, req->sq->qid);