Message ID | 20211104064634.4481-4-chaitanyak@nvidia.com (mailing list archive) |
---|---|
State | Not Applicable, archived |
Delegated to: | Mike Snitzer |
Headers | show |
Series | block: add support for REQ_OP_VERIFY | expand |
On Wed, Nov 03, 2021 at 11:46:29PM -0700, Chaitanya Kulkarni wrote: > +static inline blk_status_t nvme_setup_verify(struct nvme_ns *ns, > + struct request *req, struct nvme_command *cmnd) > +{ Due to recent driver changes, you need a "memset(cmnd, 0, sizeof(*cmnd))" prior to setting up the rest of the command, or you need to set each command dword individually. > + cmnd->verify.opcode = nvme_cmd_verify; > + cmnd->verify.nsid = cpu_to_le32(ns->head->ns_id); > + cmnd->verify.slba = > + cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); > + cmnd->verify.length = > + cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); > + cmnd->verify.control = 0; > + return BLK_STS_OK; > +} > +static void nvme_config_verify(struct gendisk *disk, struct nvme_ns *ns) > +{ > + u64 max_blocks; > + > + if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_VERIFY)) > + return; > + > + if (ns->ctrl->max_hw_sectors == UINT_MAX) > + max_blocks = (u64)USHRT_MAX + 1; > + else > + max_blocks = ns->ctrl->max_hw_sectors + 1; If supported by the controller, this maximum is defined in the non-mdts command limits in NVM command set specific Identify Controller VSL field. > + > + /* keep same as discard */ > + if (blk_queue_flag_test_and_set(QUEUE_FLAG_VERIFY, disk->queue)) > + return; > + > + blk_queue_max_verify_sectors(disk->queue, > + nvme_lba_to_sect(ns, max_blocks)); > + > +} -- dm-devel mailing list dm-devel@redhat.com https://listman.redhat.com/mailman/listinfo/dm-devel
On 11/4/2021 3:44 PM, Keith Busch wrote: > External email: Use caution opening links or attachments > > > On Wed, Nov 03, 2021 at 11:46:29PM -0700, Chaitanya Kulkarni wrote: >> +static inline blk_status_t nvme_setup_verify(struct nvme_ns *ns, >> + struct request *req, struct nvme_command *cmnd) >> +{ > > Due to recent driver changes, you need a "memset(cmnd, 0, sizeof(*cmnd))" > prior to setting up the rest of the command, or you need to set each > command dword individually. Agree, will fix that in V1. > >> + cmnd->verify.opcode = nvme_cmd_verify; >> + cmnd->verify.nsid = cpu_to_le32(ns->head->ns_id); >> + cmnd->verify.slba = >> + cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); >> + cmnd->verify.length = >> + cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); >> + cmnd->verify.control = 0; >> + return BLK_STS_OK; >> +} > >> +static void nvme_config_verify(struct gendisk *disk, struct nvme_ns *ns) >> +{ >> + u64 max_blocks; >> + >> + if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_VERIFY)) >> + return; >> + >> + if (ns->ctrl->max_hw_sectors == UINT_MAX) >> + max_blocks = (u64)USHRT_MAX + 1; >> + else >> + max_blocks = ns->ctrl->max_hw_sectors + 1; > > If supported by the controller, this maximum is defined in the non-mdts > command limits in NVM command set specific Identify Controller VSL field. > I need take a closer look at this. I'll fix that in V1. >> + >> + /* keep same as discard */ >> + if (blk_queue_flag_test_and_set(QUEUE_FLAG_VERIFY, disk->queue)) >> + return; >> + >> + blk_queue_max_verify_sectors(disk->queue, >> + nvme_lba_to_sect(ns, max_blocks)); >> + >> +} Thanks for the comment Keith. -- dm-devel mailing list dm-devel@redhat.com https://listman.redhat.com/mailman/listinfo/dm-devel
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 546a10407385..250647c3bb7b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -801,6 +801,19 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, return BLK_STS_OK; } +static inline blk_status_t nvme_setup_verify(struct nvme_ns *ns, + struct request *req, struct nvme_command *cmnd) +{ + cmnd->verify.opcode = nvme_cmd_verify; + cmnd->verify.nsid = cpu_to_le32(ns->head->ns_id); + cmnd->verify.slba = + cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); + cmnd->verify.length = + cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + cmnd->verify.control = 0; + return BLK_STS_OK; +} + static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, enum nvme_opcode op) @@ -904,6 +917,9 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, case REQ_OP_WRITE_ZEROES: ret = nvme_setup_write_zeroes(ns, req, cmd); break; + case REQ_OP_VERIFY: + ret = nvme_setup_verify(ns, req, cmd); + break; case REQ_OP_DISCARD: ret = nvme_setup_discard(ns, req, cmd); break; @@ -1974,6 +1990,28 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) nvme_lba_to_sect(ns, max_blocks)); } +static void nvme_config_verify(struct gendisk *disk, struct nvme_ns *ns) +{ + u64 max_blocks; + + if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_VERIFY)) + return; + + if (ns->ctrl->max_hw_sectors == UINT_MAX) + max_blocks = (u64)USHRT_MAX + 1; + else + max_blocks = ns->ctrl->max_hw_sectors + 1; + + /* keep same as discard */ + if (blk_queue_flag_test_and_set(QUEUE_FLAG_VERIFY, disk->queue)) + return; + + blk_queue_max_verify_sectors(disk->queue, + nvme_lba_to_sect(ns, max_blocks)); + +} + + static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) { return !uuid_is_null(&ids->uuid) || @@ -2144,6 +2182,7 @@ static void nvme_update_disk_info(struct gendisk *disk, nvme_config_discard(disk, ns); nvme_config_write_zeroes(disk, ns); + nvme_config_verify(disk, ns); set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || test_bit(NVME_NS_FORCE_RO, &ns->flags)); diff --git a/include/linux/nvme.h b/include/linux/nvme.h index b08787cd0881..14925602726a 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -318,6 +318,7 @@ enum { NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, NVME_CTRL_ONCS_DSM = 1 << 2, NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, + NVME_CTRL_ONCS_VERIFY = 1 << 7, NVME_CTRL_ONCS_RESERVATIONS = 1 << 5, NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, NVME_CTRL_VWC_PRESENT = 1 << 0, @@ -890,6 +891,23 @@ struct nvme_write_zeroes_cmd { __le16 appmask; }; +struct nvme_verify_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 rsvd3; + __le32 reftag; + __le16 eapptag; + __le16 eappmask; +}; + enum nvme_zone_mgmt_action { NVME_ZONE_CLOSE = 0x1, NVME_ZONE_FINISH = 0x2, @@ -1411,6 +1429,7 @@ struct nvme_command { struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; struct nvme_write_zeroes_cmd write_zeroes; + struct nvme_verify_cmd verify; struct nvme_zone_mgmt_send_cmd zms; struct nvme_zone_mgmt_recv_cmd zmr; struct nvme_abort_cmd abort;