@@ -3999,6 +3999,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
+ if (nvme_ctrl_sgl_bb_supported(ctrl) && ctrl->ops->flags & NVME_F_BB)
+ blk_queue_flag_set(QUEUE_FLAG_BIT_BUCKET, ns->queue);
+
ns->ctrl = ctrl;
kref_init(&ns->kref);
@@ -496,6 +496,7 @@ struct nvme_ctrl_ops {
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
#define NVME_F_PCI_P2PDMA (1 << 2)
+#define NVME_F_BB (1 << 3)
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -991,6 +992,11 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
return ctrl->sgls & ((1 << 0) | (1 << 1));
}
+static inline bool nvme_ctrl_sgl_bb_supported(struct nvme_ctrl *ctrl)
+{
+ return ctrl->sgls & (1 << 16);
+}
+
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
int nvme_execute_passthru_rq(struct request *rq);
@@ -535,6 +535,8 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
+ if (req->rq_flags & RQF_BIT_BUCKET)
+ return true;
if (!nvme_ctrl_sgl_supported(&dev->ctrl))
return false;
if (!iod->nvmeq->qid)
@@ -724,6 +726,13 @@ static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
sge->type = NVME_SGL_FMT_DATA_DESC << 4;
}
+static void nvme_pci_sgl_set_bb(struct nvme_sgl_desc *sge,
+ struct scatterlist *sg)
+{
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = NVME_SGL_FMT_BB_DESC << 4;
+}
+
static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
dma_addr_t dma_addr, int entries)
{
@@ -789,7 +798,10 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
nvme_pci_sgl_set_seg(link, sgl_dma, entries);
}
- nvme_pci_sgl_set_data(&sg_list[i++], sg);
+ if (rq_data_dir(req) == READ && blk_is_bit_bucket(sg_page(sg)))
+ nvme_pci_sgl_set_bb(&sg_list[i++], sg);
+ else
+ nvme_pci_sgl_set_data(&sg_list[i++], sg);
sg = sg_next(sg);
} while (--entries > 0);
@@ -3003,7 +3015,8 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
.flags = NVME_F_METADATA_SUPPORTED |
- NVME_F_PCI_P2PDMA,
+ NVME_F_PCI_P2PDMA |
+ NVME_F_BB,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
@@ -835,6 +835,7 @@ enum {
*
* For struct nvme_sgl_desc:
* @NVME_SGL_FMT_DATA_DESC: data block descriptor
+ * @NVME_SGL_FMT_BB_DESC: bit buckect descriptor
* @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
* @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
*
@@ -846,6 +847,7 @@ enum {
*/
enum {
NVME_SGL_FMT_DATA_DESC = 0x00,
+ NVME_SGL_FMT_BB_DESC = 0x01,
NVME_SGL_FMT_SEG_DESC = 0x02,
NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
NVME_KEY_SGL_FMT_DATA_DESC = 0x04,