Message ID | 20240610104329.3555488-11-john.g.garry@oracle.com (mailing list archive) |
---|---|
State | Superseded, archived |
Delegated to: | Benjamin Marzinski |
Headers | show |
Series | block atomic writes | expand |
On 6/10/2024 4:13 PM, John Garry wrote: > +static bool nvme_valid_atomic_write(struct request *req) > +{ > + struct request_queue *q = req->q; > + u32 boundary_bytes = queue_atomic_write_boundary_bytes(q); > + > + if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q)) > + return false; > + > + if (boundary_bytes) { > + u64 mask = boundary_bytes - 1, imask = ~mask; > + u64 start = blk_rq_pos(req) << SECTOR_SHIFT; > + u64 end = start + blk_rq_bytes(req) - 1; > + > + /* If greater then must be crossing a boundary */ > + if (blk_rq_bytes(req) > boundary_bytes) > + return false; Nit: I'd cache blk_rq_bytes(req), since that is repeating and this function is called for each atomic IO. > + > + if ((start & imask) != (end & imask)) > + return false; > + } > + > + return true; > +} > + > static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, > struct request *req, struct nvme_command *cmnd, > enum nvme_opcode op) > @@ -941,6 +965,12 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, > > if (req->cmd_flags & REQ_RAHEAD) > dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; > + /* > + * Ensure that nothing has been sent which cannot be executed > + * atomically. > + */ > + if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req)) > + return BLK_STS_INVAL; > Is this validity check specific to NVMe or should this be moved up to block layer as it also knows the limits?
On 17/06/2024 18:24, Kanchan Joshi wrote: > On 6/10/2024 4:13 PM, John Garry wrote: >> +static bool nvme_valid_atomic_write(struct request *req) >> +{ >> + struct request_queue *q = req->q; >> + u32 boundary_bytes = queue_atomic_write_boundary_bytes(q); >> + >> + if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q)) >> + return false; >> + >> + if (boundary_bytes) { >> + u64 mask = boundary_bytes - 1, imask = ~mask; >> + u64 start = blk_rq_pos(req) << SECTOR_SHIFT; >> + u64 end = start + blk_rq_bytes(req) - 1; >> + >> + /* If greater then must be crossing a boundary */ >> + if (blk_rq_bytes(req) > boundary_bytes) >> + return false; > > Nit: I'd cache blk_rq_bytes(req), since that is repeating and this > function is called for each atomic IO. blk_rq_bytes() is just a wrapper for rq->__data_len. I suppose that we could cache that value to stop re-reading that memory, but I would hope/expect that memory to be in the CPU cache anyway. > >> + >> + if ((start & imask) != (end & imask)) >> + return false; >> + } >> + >> + return true; >> +} >> + >> static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, >> struct request *req, struct nvme_command *cmnd, >> enum nvme_opcode op) >> @@ -941,6 +965,12 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, >> >> if (req->cmd_flags & REQ_RAHEAD) >> dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; >> + /* >> + * Ensure that nothing has been sent which cannot be executed >> + * atomically. >> + */ >> + if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req)) >> + return BLK_STS_INVAL; >> > > Is this validity check specific to NVMe or should this be moved up to > block layer as it also knows the limits? Only NVMe supports an LBA space boundary, so that part is specific to NVMe. Regardless, the block layer already should ensure that the atomic write length and boundary is respected. nvme_valid_atomic_write() is just an insurance policy against the block layer or some other component not doing its job. For SCSI, the device would error - for example - if the atomic write length was larger than the device supported. NVMe silently just does not execute the write atomically in that scenario, which we must avoid. Thanks, John
On Mon, Jun 17, 2024 at 07:04:23PM +0100, John Garry wrote: >> Nit: I'd cache blk_rq_bytes(req), since that is repeating and this >> function is called for each atomic IO. > > blk_rq_bytes() is just a wrapper for rq->__data_len. I suppose that we > could cache that value to stop re-reading that memory, but I would > hope/expect that memory to be in the CPU cache anyway. Yes, that feels a bit pointless. > Only NVMe supports an LBA space boundary, so that part is specific to NVMe. > > Regardless, the block layer already should ensure that the atomic write > length and boundary is respected. nvme_valid_atomic_write() is just an > insurance policy against the block layer or some other component not doing > its job. > > For SCSI, the device would error - for example - if the atomic write length > was larger than the device supported. NVMe silently just does not execute > the write atomically in that scenario, which we must avoid. It might be worth to expand the comment to include this information to help future readers.
On 18/06/2024 07:49, Christoph Hellwig wrote: >> Only NVMe supports an LBA space boundary, so that part is specific to NVMe. >> >> Regardless, the block layer already should ensure that the atomic write >> length and boundary is respected. nvme_valid_atomic_write() is just an >> insurance policy against the block layer or some other component not doing >> its job. >> >> For SCSI, the device would error - for example - if the atomic write length >> was larger than the device supported. NVMe silently just does not execute >> the write atomically in that scenario, which we must avoid. > It might be worth to expand the comment to include this information to > help future readers. OK, will do. I have been asked this more than once now. John
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f5d150c62955..91001892f60b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -927,6 +927,30 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, return BLK_STS_OK; } +static bool nvme_valid_atomic_write(struct request *req) +{ + struct request_queue *q = req->q; + u32 boundary_bytes = queue_atomic_write_boundary_bytes(q); + + if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q)) + return false; + + if (boundary_bytes) { + u64 mask = boundary_bytes - 1, imask = ~mask; + u64 start = blk_rq_pos(req) << SECTOR_SHIFT; + u64 end = start + blk_rq_bytes(req) - 1; + + /* If greater then must be crossing a boundary */ + if (blk_rq_bytes(req) > boundary_bytes) + return false; + + if ((start & imask) != (end & imask)) + return false; + } + + return true; +} + static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, enum nvme_opcode op) @@ -941,6 +965,12 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, if (req->cmd_flags & REQ_RAHEAD) dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; + /* + * Ensure that nothing has been sent which cannot be executed + * atomically. + */ + if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req)) + return BLK_STS_INVAL; cmnd->rw.opcode = op; cmnd->rw.flags = 0; @@ -1921,6 +1951,23 @@ static void nvme_configure_metadata(struct nvme_ctrl *ctrl, } } + +static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns, + struct nvme_id_ns *id, struct queue_limits *lim, + u32 bs, u32 atomic_bs) +{ + unsigned int boundary = 0; + + if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) { + if (le16_to_cpu(id->nabspf)) + boundary = (le16_to_cpu(id->nabspf) + 1) * bs; + } + lim->atomic_write_hw_max = atomic_bs; + lim->atomic_write_hw_boundary = boundary; + lim->atomic_write_hw_unit_min = bs; + lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs); +} + static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl) { return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1; @@ -1967,6 +2014,8 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; else atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; + + nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs); } if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {