Message ID | 20230411081041.5328-7-anuj20.g@samsung.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v9,1/9] block: Introduce queue limits for copy-offload support | expand |
On 4/11/23 01:10, Anuj Gupta wrote: > From: Nitesh Shetty <nj.shetty@samsung.com> > > Add support for handling target command on target. what is target command ? command that you have added is :nvme_cmd_copy > For bdev-ns we call into blkdev_issue_copy, which the block layer > completes by a offloaded copy request to backend bdev or by emulating the > request. > > For file-ns we call vfs_copy_file_range to service our request. > > Currently target always shows copy capability by setting > NVME_CTRL_ONCS_COPY in controller ONCS. there is nothing mentioned about target/loop.c in commit log ? > Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com> > Signed-off-by: Anuj Gupta <anuj20.g@samsung.com> > --- > drivers/nvme/target/admin-cmd.c | 9 +++-- > drivers/nvme/target/io-cmd-bdev.c | 58 +++++++++++++++++++++++++++++++ > drivers/nvme/target/io-cmd-file.c | 52 +++++++++++++++++++++++++++ > drivers/nvme/target/loop.c | 6 ++++ > drivers/nvme/target/nvmet.h | 1 + > 5 files changed, 124 insertions(+), 2 deletions(-) > > diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c > index 80099df37314..978786ec6a9e 100644 > --- a/drivers/nvme/target/admin-cmd.c > +++ b/drivers/nvme/target/admin-cmd.c > @@ -433,8 +433,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) > id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); > id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); > id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | > - NVME_CTRL_ONCS_WRITE_ZEROES); > - > + NVME_CTRL_ONCS_WRITE_ZEROES | NVME_CTRL_ONCS_COPY); > /* XXX: don't report vwc if the underlying device is write through */ > id->vwc = NVME_CTRL_VWC_PRESENT; > > @@ -536,6 +535,12 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) > > if (req->ns->bdev) > nvmet_bdev_set_limits(req->ns->bdev, id); > + else { > + id->msrc = (u8)to0based(BIO_MAX_VECS - 1); > + id->mssrl = cpu_to_le16(BIO_MAX_VECS << > + (PAGE_SHIFT - SECTOR_SHIFT)); > + id->mcl = cpu_to_le32(le16_to_cpu(id->mssrl)); > + } > > /* > * We just provide a single LBA format that matches what the > diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c > index c2d6cea0236b..0af273097aa4 100644 > --- a/drivers/nvme/target/io-cmd-bdev.c > +++ b/drivers/nvme/target/io-cmd-bdev.c > @@ -46,6 +46,19 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) > id->npda = id->npdg; > /* NOWS = Namespace Optimal Write Size */ > id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev)); > + > + /*Copy limits*/ above comment doesn't make any sense ... > + if (bdev_max_copy_sectors(bdev)) { > + id->msrc = id->msrc; > + id->mssrl = cpu_to_le16((bdev_max_copy_sectors(bdev) << > + SECTOR_SHIFT) / bdev_logical_block_size(bdev)); > + id->mcl = cpu_to_le32(id->mssrl); > + } else { > + id->msrc = (u8)to0based(BIO_MAX_VECS - 1); > + id->mssrl = cpu_to_le16((BIO_MAX_VECS << PAGE_SHIFT) / > + bdev_logical_block_size(bdev)); > + id->mcl = cpu_to_le32(id->mssrl); > + } > } > > void nvmet_bdev_ns_disable(struct nvmet_ns *ns) > @@ -184,6 +197,19 @@ static void nvmet_bio_done(struct bio *bio) > nvmet_req_bio_put(req, bio); > } > > +static void nvmet_bdev_copy_end_io(void *private, int comp_len) > +{ > + struct nvmet_req *req = (struct nvmet_req *)private; > + > + if (comp_len == req->copy_len) { > + req->cqe->result.u32 = cpu_to_le32(1); > + nvmet_req_complete(req, errno_to_nvme_status(req, 0)); > + } else { > + req->cqe->result.u32 = cpu_to_le32(0); > + nvmet_req_complete(req, blk_to_nvme_status(req, BLK_STS_IOERR)); > + } > +} > + please reduce calls for nvmet_req_complete(). +static void nvmet_bdev_copy_end_io(void *private, int comp_len) +{ + struct nvmet_req *req = (struct nvmet_req *)private; + u16 status; + + if (comp_len == req->copy_len) { + req->cqe->result.u32 = cpu_to_le32(1); + status = errno_to_nvme_status(req, 0)); + } else { + req->cqe->result.u32 = cpu_to_le32(0); + status = blk_to_nvme_status(req, BLK_STS_IOERR)); + } + nvmet_req_complete(req, status); +} + > #ifdef CONFIG_BLK_DEV_INTEGRITY > static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, > struct sg_mapping_iter *miter) > @@ -450,6 +476,34 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) > } > } > > +/* At present we handle only one range entry */ please add explanation why ... > +static void nvmet_bdev_execute_copy(struct nvmet_req *req) > +{ > + struct nvme_copy_range range; > + struct nvme_command *cmnd = req->cmd; don't use cmnd, cmd is used everywhere and matches req->cmd, applies to everywhere in this patch... > + int ret; wrong return type is should be u16 since nvmet_copy_from_sgl() returns u16 if I remember correctly. > + > + no extra white line between declaration and body of functions > + ret = nvmet_copy_from_sgl(req, 0, &range, sizeof(range)); > + if (ret) > + goto out; > + > + ret = blkdev_issue_copy(req->ns->bdev, > + le64_to_cpu(cmnd->copy.sdlba) << req->ns->blksize_shift, > + req->ns->bdev, > + le64_to_cpu(range.slba) << req->ns->blksize_shift, > + (le16_to_cpu(range.nlb) + 1) << req->ns->blksize_shift, > + nvmet_bdev_copy_end_io, (void *)req, GFP_KERNEL); > + if (ret) { > + req->cqe->result.u32 = cpu_to_le32(0); > + nvmet_req_complete(req, blk_to_nvme_status(req, BLK_STS_IOERR)); > + } > + > + return; > +out: > + nvmet_req_complete(req, errno_to_nvme_status(req, ret)); > +} > + again one call to nvmet_req_complete() can do the same job. consider following totally untested :- /* TODO: add detailed comment here why you support one range ? */ static void nvmet_bdev_execute_copy(struct nvmet_req *req) { u32 blkshift = req->ns->blksize_shift; struct nvme_command *cmnd = req->cmd; struct nvme_copy_range range; u16 status; status = nvmet_copy_from_sgl(req, 0, &range, sizeof(range)); if (status) { goto out; } ret = blkdev_issue_copy(req->ns->bdev, le64_to_cpu(cmnd->copy.sdlba) << blkshift, req->ns->bdev, le64_to_cpu(range.slba) << blksize_shift, (le16_to_cpu(range.nlb) + 1) << blksize_shift, nvmet_bdev_copy_end_io, (void *)req, GFP_KERNEL); if (ret) { req->cqe->result.u32 = cpu_to_le32(0); status = blk_to_nvme_status(req, BLK_STS_IOERR); out: nvmet_req_complete(req, status); } } > u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) > { > switch (req->cmd->common.opcode) { > @@ -468,6 +522,10 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) > case nvme_cmd_write_zeroes: > req->execute = nvmet_bdev_execute_write_zeroes; > return 0; > + case nvme_cmd_copy: > + req->execute = nvmet_bdev_execute_copy; > + return 0; > + > default: > return nvmet_report_invalid_opcode(req); > } > diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c > index 2d068439b129..69f198ecec77 100644 > --- a/drivers/nvme/target/io-cmd-file.c > +++ b/drivers/nvme/target/io-cmd-file.c > @@ -322,6 +322,49 @@ static void nvmet_file_dsm_work(struct work_struct *w) > } > } > > +static void nvmet_file_copy_work(struct work_struct *w) > +{ > + struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); > + int nr_range; > + loff_t pos; > + struct nvme_command *cmnd = req->cmd; > + int ret = 0, len = 0, src, id; reverse tree style for declaration ... > + > + nr_range = cmnd->copy.nr_range + 1; > + pos = le64_to_cpu(req->cmd->copy.sdlba) << req->ns->blksize_shift; you have a cmd variable above and you are still using req->cmd ? why create a variable on stack then ? u don't need that variable anyways... > + if (unlikely(pos + req->transfer_len > req->ns->size)) { > + nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); > + return; > + } > + > + for (id = 0 ; id < nr_range; id++) { > + struct nvme_copy_range range; > + > + ret = nvmet_copy_from_sgl(req, id * sizeof(range), &range, > + sizeof(range)); > + if (ret) > + goto out; > + > + len = (le16_to_cpu(range.nlb) + 1) << (req->ns->blksize_shift); > + src = (le64_to_cpu(range.slba) << (req->ns->blksize_shift)); > + ret = vfs_copy_file_range(req->ns->file, src, req->ns->file, > + pos, len, 0); 5th paramaeter to vfs_copy_file_range() is size_t you have used int for len ? also vfs_copy_file_range() returns ssize_t you are catching it in int ? > +out: > + if (ret != len) { > + pos += ret; > + req->cqe->result.u32 = cpu_to_le32(id); > + nvmet_req_complete(req, ret < 0 ? > + errno_to_nvme_status(req, ret) : > + errno_to_nvme_status(req, -EIO)); again plz don't add multiple nvmet_req_complete() calls > + return; > + > + } else > + pos += len; > + } > + > + nvmet_req_complete(req, 0); > + > +} wrt above comments consider following totally untested :- static void nvmet_file_copy_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); int nr_range = req->cmd->copy.nr_range + 1; u16 status = 0; int src, id; ssize_t ret; size_t len; loff_t pos; pos = le64_to_cpu(req->cmd->copy.sdlba) << req->ns->blksize_shift; if (unlikely(pos + req->transfer_len > req->ns->size)) { nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); return; } for (id = 0 ; id < nr_range; id++) { struct nvme_copy_range range; status = nvmet_copy_from_sgl(req, id * sizeof(range), &range, sizeof(range)); if (status) goto out; src = (le64_to_cpu(range.slba) << (req->ns->blksize_shift)); len = (le16_to_cpu(range.nlb) + 1) << (req->ns->blksize_shift); ret = vfs_copy_file_range(req->ns->file, src, req->ns->file, pos, len, 0); if (ret != len) { req->cqe->result.u32 = cpu_to_le32(id); if (ret < 0) status = errno_to_nvme_status(req, ret); else status = errno_to_nvme_status(req, -EIO); goto out; } pos += ret; } out: nvmet_req_complete(req, status); } > static void nvmet_file_execute_dsm(struct nvmet_req *req) > { > if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) > @@ -330,6 +373,12 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req) > queue_work(nvmet_wq, &req->f.work); > } > > +static void nvmet_file_execute_copy(struct nvmet_req *req) > +{ > + INIT_WORK(&req->f.work, nvmet_file_copy_work); > + queue_work(nvmet_wq, &req->f.work); > +} > + > static void nvmet_file_write_zeroes_work(struct work_struct *w) > { > struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); > @@ -376,6 +425,9 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) > case nvme_cmd_write_zeroes: > req->execute = nvmet_file_execute_write_zeroes; > return 0; > + case nvme_cmd_copy: > + req->execute = nvmet_file_execute_copy; > + return 0; > default: > return nvmet_report_invalid_opcode(req); > } > diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c > index f2d24b2d992f..d18ed8067a15 100644 > --- a/drivers/nvme/target/loop.c > +++ b/drivers/nvme/target/loop.c > @@ -146,6 +146,12 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, > return ret; > > nvme_start_request(req); > + if (unlikely((req->cmd_flags & REQ_COPY) && > + (req_op(req) == REQ_OP_READ))) { > + blk_mq_set_request_complete(req); > + blk_mq_end_request(req, BLK_STS_OK); > + return BLK_STS_OK; > + } > iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; > iod->req.port = queue->ctrl->port; > if (!nvmet_req_init(&iod->req, &queue->nvme_cq, > diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h > index 89bedfcd974c..69ed4c8469e5 100644 > --- a/drivers/nvme/target/nvmet.h > +++ b/drivers/nvme/target/nvmet.h > @@ -393,6 +393,7 @@ struct nvmet_req { > struct device *p2p_client; > u16 error_loc; > u64 error_slba; > + size_t copy_len; > }; > > #define NVMET_MAX_MPOOL_BVEC 16 -ck
On Tue, Apr 25, 2023 at 06:36:51AM +0000, Chaitanya Kulkarni wrote: > On 4/11/23 01:10, Anuj Gupta wrote: > > From: Nitesh Shetty <nj.shetty@samsung.com> > > > > Add support for handling target command on target. > > what is target command ? > > command that you have added is :nvme_cmd_copy > acked. It was supposed to be nvme_cmd_copy. > > For bdev-ns we call into blkdev_issue_copy, which the block layer > > completes by a offloaded copy request to backend bdev or by emulating the > > request. > > > > For file-ns we call vfs_copy_file_range to service our request. > > > > Currently target always shows copy capability by setting > > NVME_CTRL_ONCS_COPY in controller ONCS. > > there is nothing mentioned about target/loop.c in commit log ? > acked, will add the description for loop device. > > Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com> > > Signed-off-by: Anuj Gupta <anuj20.g@samsung.com> > > --- > > drivers/nvme/target/admin-cmd.c | 9 +++-- > > drivers/nvme/target/io-cmd-bdev.c | 58 +++++++++++++++++++++++++++++++ > > drivers/nvme/target/io-cmd-file.c | 52 +++++++++++++++++++++++++++ > > drivers/nvme/target/loop.c | 6 ++++ > > drivers/nvme/target/nvmet.h | 1 + > > 5 files changed, 124 insertions(+), 2 deletions(-) > > > > diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c > > index 80099df37314..978786ec6a9e 100644 > > --- a/drivers/nvme/target/admin-cmd.c > > +++ b/drivers/nvme/target/admin-cmd.c > > @@ -433,8 +433,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) > > id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); > > id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); > > id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | > > - NVME_CTRL_ONCS_WRITE_ZEROES); > > - > > + NVME_CTRL_ONCS_WRITE_ZEROES | NVME_CTRL_ONCS_COPY); > > /* XXX: don't report vwc if the underlying device is write through */ > > id->vwc = NVME_CTRL_VWC_PRESENT; > > > > @@ -536,6 +535,12 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) > > > > if (req->ns->bdev) > > nvmet_bdev_set_limits(req->ns->bdev, id); > > + else { > > + id->msrc = (u8)to0based(BIO_MAX_VECS - 1); > > + id->mssrl = cpu_to_le16(BIO_MAX_VECS << > > + (PAGE_SHIFT - SECTOR_SHIFT)); > > + id->mcl = cpu_to_le32(le16_to_cpu(id->mssrl)); > > + } > > > > /* > > * We just provide a single LBA format that matches what the > > diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c > > index c2d6cea0236b..0af273097aa4 100644 > > --- a/drivers/nvme/target/io-cmd-bdev.c > > +++ b/drivers/nvme/target/io-cmd-bdev.c > > @@ -46,6 +46,19 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) > > id->npda = id->npdg; > > /* NOWS = Namespace Optimal Write Size */ > > id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev)); > > + > > + /*Copy limits*/ > > above comment doesn't make any sense ... > acked, will remove it next version. > > + if (bdev_max_copy_sectors(bdev)) { > > + id->msrc = id->msrc; > > + id->mssrl = cpu_to_le16((bdev_max_copy_sectors(bdev) << > > + SECTOR_SHIFT) / bdev_logical_block_size(bdev)); > > + id->mcl = cpu_to_le32(id->mssrl); > > + } else { > > + id->msrc = (u8)to0based(BIO_MAX_VECS - 1); > > + id->mssrl = cpu_to_le16((BIO_MAX_VECS << PAGE_SHIFT) / > > + bdev_logical_block_size(bdev)); > > + id->mcl = cpu_to_le32(id->mssrl); > > + } > > } > > > > void nvmet_bdev_ns_disable(struct nvmet_ns *ns) > > @@ -184,6 +197,19 @@ static void nvmet_bio_done(struct bio *bio) > > nvmet_req_bio_put(req, bio); > > } > > > > +static void nvmet_bdev_copy_end_io(void *private, int comp_len) > > +{ > > + struct nvmet_req *req = (struct nvmet_req *)private; > > + > > + if (comp_len == req->copy_len) { > > + req->cqe->result.u32 = cpu_to_le32(1); > > + nvmet_req_complete(req, errno_to_nvme_status(req, 0)); > > + } else { > > + req->cqe->result.u32 = cpu_to_le32(0); > > + nvmet_req_complete(req, blk_to_nvme_status(req, BLK_STS_IOERR)); > > + } > > +} > > + > > please reduce calls for nvmet_req_complete(). > > +static void nvmet_bdev_copy_end_io(void *private, int comp_len) > +{ > + struct nvmet_req *req = (struct nvmet_req *)private; > + u16 status; > + > + if (comp_len == req->copy_len) { > + req->cqe->result.u32 = cpu_to_le32(1); > + status = errno_to_nvme_status(req, 0)); > + } else { > + req->cqe->result.u32 = cpu_to_le32(0); > + status = blk_to_nvme_status(req, BLK_STS_IOERR)); > + } > + nvmet_req_complete(req, status); > +} > + > makes sense, will modify this snippet. > > #ifdef CONFIG_BLK_DEV_INTEGRITY > > static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, > > struct sg_mapping_iter *miter) > > @@ -450,6 +476,34 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) > > } > > } > > > > +/* At present we handle only one range entry */ > > please add explanation why ... > Because we aligned copy offload similar to copy_file_range in out recent revisions, discarding multi range support. Sure we will update comments to reflect the same. > > +static void nvmet_bdev_execute_copy(struct nvmet_req *req) > > +{ > > + struct nvme_copy_range range; > > + struct nvme_command *cmnd = req->cmd; > > don't use cmnd, cmd is used everywhere and matches req->cmd, > applies to everywhere in this patch... > acked > > + int ret; > > wrong return type is should be u16 since nvmet_copy_from_sgl() > returns u16 if I remember correctly. > > > + > > + > acked > no extra white line between declaration and body of functions > acked > > + ret = nvmet_copy_from_sgl(req, 0, &range, sizeof(range)); > > + if (ret) > > + goto out; > > + > > + ret = blkdev_issue_copy(req->ns->bdev, > > + le64_to_cpu(cmnd->copy.sdlba) << req->ns->blksize_shift, > > + req->ns->bdev, > > + le64_to_cpu(range.slba) << req->ns->blksize_shift, > > + (le16_to_cpu(range.nlb) + 1) << req->ns->blksize_shift, > > + nvmet_bdev_copy_end_io, (void *)req, GFP_KERNEL); > > + if (ret) { > > + req->cqe->result.u32 = cpu_to_le32(0); > > + nvmet_req_complete(req, blk_to_nvme_status(req, BLK_STS_IOERR)); > > + } > > + > > + return; > > +out: > > + nvmet_req_complete(req, errno_to_nvme_status(req, ret)); > > +} > > + > > again one call to nvmet_req_complete() can do the same job. > consider following totally untested :- > /* TODO: add detailed comment here why you support one range ? */ > static void nvmet_bdev_execute_copy(struct nvmet_req *req) > { > u32 blkshift = req->ns->blksize_shift; > struct nvme_command *cmnd = req->cmd; > struct nvme_copy_range range; > u16 status; > > status = nvmet_copy_from_sgl(req, 0, &range, sizeof(range)); > if (status) { > goto out; > } > > ret = blkdev_issue_copy(req->ns->bdev, > le64_to_cpu(cmnd->copy.sdlba) << blkshift, > req->ns->bdev, > le64_to_cpu(range.slba) << blksize_shift, > (le16_to_cpu(range.nlb) + 1) << > blksize_shift, > nvmet_bdev_copy_end_io, (void *)req, > GFP_KERNEL); > if (ret) { > req->cqe->result.u32 = cpu_to_le32(0); > status = blk_to_nvme_status(req, BLK_STS_IOERR); > out: > nvmet_req_complete(req, status); > } > } > acked, thanks for sharing the snippet > > u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) > > { > > switch (req->cmd->common.opcode) { > > @@ -468,6 +522,10 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) > > case nvme_cmd_write_zeroes: > > req->execute = nvmet_bdev_execute_write_zeroes; > > return 0; > > + case nvme_cmd_copy: > > + req->execute = nvmet_bdev_execute_copy; > > + return 0; > > + > > default: > > return nvmet_report_invalid_opcode(req); > > } > > diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c > > index 2d068439b129..69f198ecec77 100644 > > --- a/drivers/nvme/target/io-cmd-file.c > > +++ b/drivers/nvme/target/io-cmd-file.c > > @@ -322,6 +322,49 @@ static void nvmet_file_dsm_work(struct work_struct *w) > > } > > } > > > > +static void nvmet_file_copy_work(struct work_struct *w) > > +{ > > + struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); > > + int nr_range; > > + loff_t pos; > > + struct nvme_command *cmnd = req->cmd; > > + int ret = 0, len = 0, src, id; > > reverse tree style for declaration ... > acked > > + > > + nr_range = cmnd->copy.nr_range + 1; > > + pos = le64_to_cpu(req->cmd->copy.sdlba) << req->ns->blksize_shift; > > you have a cmd variable above and you are still using req->cmd ? > why create a variable on stack then ? u don't need that variable > anyways... > acked > > + if (unlikely(pos + req->transfer_len > req->ns->size)) { > > + nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); > > + return; > > + } > > + > > + for (id = 0 ; id < nr_range; id++) { > > + struct nvme_copy_range range; > > + > > + ret = nvmet_copy_from_sgl(req, id * sizeof(range), &range, > > + sizeof(range)); > > + if (ret) > > + goto out; > > + > > + len = (le16_to_cpu(range.nlb) + 1) << (req->ns->blksize_shift); > > + src = (le64_to_cpu(range.slba) << (req->ns->blksize_shift)); > > + ret = vfs_copy_file_range(req->ns->file, src, req->ns->file, > > + pos, len, 0); > > 5th paramaeter to vfs_copy_file_range() is size_t you have used int > for len ? also > vfs_copy_file_range() returns ssize_t you are catching it in int ? > acked, will change it to ssize_t. > > +out: > > + if (ret != len) { > > + pos += ret; > > + req->cqe->result.u32 = cpu_to_le32(id); > > + nvmet_req_complete(req, ret < 0 ? > > + errno_to_nvme_status(req, ret) : > > + errno_to_nvme_status(req, -EIO)); > > again plz don't add multiple nvmet_req_complete() calls > acked > > + return; > > + > > + } else > > + pos += len; > > + } > > + > > + nvmet_req_complete(req, 0); > > + > > +} > > wrt above comments consider following totally untested :- > > static void nvmet_file_copy_work(struct work_struct *w) > { > struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); > int nr_range = req->cmd->copy.nr_range + 1; > u16 status = 0; > int src, id; > ssize_t ret; > size_t len; > loff_t pos; > > pos = le64_to_cpu(req->cmd->copy.sdlba) << req->ns->blksize_shift; > if (unlikely(pos + req->transfer_len > req->ns->size)) { > nvmet_req_complete(req, errno_to_nvme_status(req, > -ENOSPC)); > return; > } > > for (id = 0 ; id < nr_range; id++) { > struct nvme_copy_range range; > > status = nvmet_copy_from_sgl(req, id * sizeof(range), > &range, > sizeof(range)); > if (status) > goto out; > > src = (le64_to_cpu(range.slba) << > (req->ns->blksize_shift)); > len = (le16_to_cpu(range.nlb) + 1) << > (req->ns->blksize_shift); > > ret = vfs_copy_file_range(req->ns->file, src, > req->ns->file, > pos, len, 0); > > if (ret != len) { > req->cqe->result.u32 = cpu_to_le32(id); > if (ret < 0) > status = errno_to_nvme_status(req, ret); > else > status = errno_to_nvme_status(req, -EIO); > goto out; > } > pos += ret; > } > out: > nvmet_req_complete(req, status); > } > > Thanks for snippet will update this in next version. -- Nitesh Shetty
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 80099df37314..978786ec6a9e 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -433,8 +433,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | - NVME_CTRL_ONCS_WRITE_ZEROES); - + NVME_CTRL_ONCS_WRITE_ZEROES | NVME_CTRL_ONCS_COPY); /* XXX: don't report vwc if the underlying device is write through */ id->vwc = NVME_CTRL_VWC_PRESENT; @@ -536,6 +535,12 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) if (req->ns->bdev) nvmet_bdev_set_limits(req->ns->bdev, id); + else { + id->msrc = (u8)to0based(BIO_MAX_VECS - 1); + id->mssrl = cpu_to_le16(BIO_MAX_VECS << + (PAGE_SHIFT - SECTOR_SHIFT)); + id->mcl = cpu_to_le32(le16_to_cpu(id->mssrl)); + } /* * We just provide a single LBA format that matches what the diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index c2d6cea0236b..0af273097aa4 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -46,6 +46,19 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) id->npda = id->npdg; /* NOWS = Namespace Optimal Write Size */ id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev)); + + /*Copy limits*/ + if (bdev_max_copy_sectors(bdev)) { + id->msrc = id->msrc; + id->mssrl = cpu_to_le16((bdev_max_copy_sectors(bdev) << + SECTOR_SHIFT) / bdev_logical_block_size(bdev)); + id->mcl = cpu_to_le32(id->mssrl); + } else { + id->msrc = (u8)to0based(BIO_MAX_VECS - 1); + id->mssrl = cpu_to_le16((BIO_MAX_VECS << PAGE_SHIFT) / + bdev_logical_block_size(bdev)); + id->mcl = cpu_to_le32(id->mssrl); + } } void nvmet_bdev_ns_disable(struct nvmet_ns *ns) @@ -184,6 +197,19 @@ static void nvmet_bio_done(struct bio *bio) nvmet_req_bio_put(req, bio); } +static void nvmet_bdev_copy_end_io(void *private, int comp_len) +{ + struct nvmet_req *req = (struct nvmet_req *)private; + + if (comp_len == req->copy_len) { + req->cqe->result.u32 = cpu_to_le32(1); + nvmet_req_complete(req, errno_to_nvme_status(req, 0)); + } else { + req->cqe->result.u32 = cpu_to_le32(0); + nvmet_req_complete(req, blk_to_nvme_status(req, BLK_STS_IOERR)); + } +} + #ifdef CONFIG_BLK_DEV_INTEGRITY static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, struct sg_mapping_iter *miter) @@ -450,6 +476,34 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) } } +/* At present we handle only one range entry */ +static void nvmet_bdev_execute_copy(struct nvmet_req *req) +{ + struct nvme_copy_range range; + struct nvme_command *cmnd = req->cmd; + int ret; + + + ret = nvmet_copy_from_sgl(req, 0, &range, sizeof(range)); + if (ret) + goto out; + + ret = blkdev_issue_copy(req->ns->bdev, + le64_to_cpu(cmnd->copy.sdlba) << req->ns->blksize_shift, + req->ns->bdev, + le64_to_cpu(range.slba) << req->ns->blksize_shift, + (le16_to_cpu(range.nlb) + 1) << req->ns->blksize_shift, + nvmet_bdev_copy_end_io, (void *)req, GFP_KERNEL); + if (ret) { + req->cqe->result.u32 = cpu_to_le32(0); + nvmet_req_complete(req, blk_to_nvme_status(req, BLK_STS_IOERR)); + } + + return; +out: + nvmet_req_complete(req, errno_to_nvme_status(req, ret)); +} + u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) { switch (req->cmd->common.opcode) { @@ -468,6 +522,10 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) case nvme_cmd_write_zeroes: req->execute = nvmet_bdev_execute_write_zeroes; return 0; + case nvme_cmd_copy: + req->execute = nvmet_bdev_execute_copy; + return 0; + default: return nvmet_report_invalid_opcode(req); } diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 2d068439b129..69f198ecec77 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -322,6 +322,49 @@ static void nvmet_file_dsm_work(struct work_struct *w) } } +static void nvmet_file_copy_work(struct work_struct *w) +{ + struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); + int nr_range; + loff_t pos; + struct nvme_command *cmnd = req->cmd; + int ret = 0, len = 0, src, id; + + nr_range = cmnd->copy.nr_range + 1; + pos = le64_to_cpu(req->cmd->copy.sdlba) << req->ns->blksize_shift; + if (unlikely(pos + req->transfer_len > req->ns->size)) { + nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); + return; + } + + for (id = 0 ; id < nr_range; id++) { + struct nvme_copy_range range; + + ret = nvmet_copy_from_sgl(req, id * sizeof(range), &range, + sizeof(range)); + if (ret) + goto out; + + len = (le16_to_cpu(range.nlb) + 1) << (req->ns->blksize_shift); + src = (le64_to_cpu(range.slba) << (req->ns->blksize_shift)); + ret = vfs_copy_file_range(req->ns->file, src, req->ns->file, + pos, len, 0); +out: + if (ret != len) { + pos += ret; + req->cqe->result.u32 = cpu_to_le32(id); + nvmet_req_complete(req, ret < 0 ? + errno_to_nvme_status(req, ret) : + errno_to_nvme_status(req, -EIO)); + return; + + } else + pos += len; + } + + nvmet_req_complete(req, 0); + +} static void nvmet_file_execute_dsm(struct nvmet_req *req) { if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) @@ -330,6 +373,12 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req) queue_work(nvmet_wq, &req->f.work); } +static void nvmet_file_execute_copy(struct nvmet_req *req) +{ + INIT_WORK(&req->f.work, nvmet_file_copy_work); + queue_work(nvmet_wq, &req->f.work); +} + static void nvmet_file_write_zeroes_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); @@ -376,6 +425,9 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) case nvme_cmd_write_zeroes: req->execute = nvmet_file_execute_write_zeroes; return 0; + case nvme_cmd_copy: + req->execute = nvmet_file_execute_copy; + return 0; default: return nvmet_report_invalid_opcode(req); } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index f2d24b2d992f..d18ed8067a15 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -146,6 +146,12 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, return ret; nvme_start_request(req); + if (unlikely((req->cmd_flags & REQ_COPY) && + (req_op(req) == REQ_OP_READ))) { + blk_mq_set_request_complete(req); + blk_mq_end_request(req, BLK_STS_OK); + return BLK_STS_OK; + } iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; iod->req.port = queue->ctrl->port; if (!nvmet_req_init(&iod->req, &queue->nvme_cq, diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 89bedfcd974c..69ed4c8469e5 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -393,6 +393,7 @@ struct nvmet_req { struct device *p2p_client; u16 error_loc; u64 error_slba; + size_t copy_len; }; #define NVMET_MAX_MPOOL_BVEC 16