@@ -45,6 +45,7 @@
#define NVME_MAX_SEGS 128
#define NVME_MAX_NR_DESCRIPTORS 5
+#define NVME_SMALL_DESCRIPTOR_SIZE 256
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0444);
@@ -224,8 +225,8 @@ struct nvme_iod {
struct nvme_request req;
struct nvme_command cmd;
bool aborted;
- /* # of PRP/SGL descriptors: (0 for small pool) */
- s8 nr_descriptors;
+ u8 nr_descriptors; /* # of PRP/SGL descriptors */
+ bool large_descriptors; /* uses the full page sized descriptor pool */
unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t first_dma;
dma_addr_t meta_dma;
@@ -514,13 +515,27 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
return true;
}
-static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
+static inline struct dma_pool *nvme_dma_pool(struct nvme_dev *dev,
+ struct nvme_iod *iod)
+{
+ if (iod->large_descriptors)
+ return dev->prp_page_pool;
+ return dev->prp_small_pool;
+}
+
+static void nvme_free_descriptors(struct nvme_dev *dev, struct request *req)
{
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_addr_t dma_addr = iod->first_dma;
int i;
+ if (iod->nr_descriptors == 1) {
+ dma_pool_free(nvme_dma_pool(dev, iod), iod->descriptors[0],
+ dma_addr);
+ return;
+ }
+
for (i = 0; i < iod->nr_descriptors; i++) {
__le64 *prp_list = iod->descriptors[i];
dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
@@ -543,15 +558,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
WARN_ON_ONCE(!iod->sgt.nents);
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
-
- if (iod->nr_descriptors == 0)
- dma_pool_free(dev->prp_small_pool, iod->descriptors[0],
- iod->first_dma);
- else if (iod->nr_descriptors == 1)
- dma_pool_free(dev->prp_page_pool, iod->descriptors[0],
- iod->first_dma);
- else
- nvme_free_prps(dev, req);
+ nvme_free_descriptors(dev, req);
mempool_free(iod->sgt.sgl, dev->iod_mempool);
}
@@ -573,7 +580,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
struct request *req, struct nvme_rw_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct dma_pool *pool;
int length = blk_rq_payload_bytes(req);
struct scatterlist *sg = iod->sgt.sgl;
int dma_len = sg_dma_len(sg);
@@ -581,7 +587,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
__le64 *prp_list;
dma_addr_t prp_dma;
- int nprps, i;
+ int i;
length -= (NVME_CTRL_PAGE_SIZE - offset);
if (length <= 0) {
@@ -603,27 +609,23 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
goto done;
}
- nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
- if (nprps <= (256 / 8)) {
- pool = dev->prp_small_pool;
- iod->nr_descriptors = 0;
- } else {
- pool = dev->prp_page_pool;
- iod->nr_descriptors = 1;
- }
+ if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) >
+ NVME_SMALL_DESCRIPTOR_SIZE / sizeof(__le64))
+ iod->large_descriptors = true;
- prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
- if (!prp_list) {
- iod->nr_descriptors = -1;
+ prp_list = dma_pool_alloc(nvme_dma_pool(dev, iod), GFP_ATOMIC,
+ &prp_dma);
+ if (!prp_list)
return BLK_STS_RESOURCE;
- }
- iod->descriptors[0] = prp_list;
+ iod->descriptors[iod->nr_descriptors++] = prp_list;
iod->first_dma = prp_dma;
i = 0;
for (;;) {
if (i == NVME_CTRL_PAGE_SIZE >> 3) {
__le64 *old_prp_list = prp_list;
- prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+
+ prp_list = dma_pool_alloc(dev->prp_page_pool,
+ GFP_ATOMIC, &prp_dma);
if (!prp_list)
goto free_prps;
iod->descriptors[iod->nr_descriptors++] = prp_list;
@@ -650,7 +652,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
return BLK_STS_OK;
free_prps:
- nvme_free_prps(dev, req);
+ nvme_free_descriptors(dev, req);
return BLK_STS_RESOURCE;
bad_sgl:
WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
@@ -679,7 +681,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
struct request *req, struct nvme_rw_command *cmd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
struct scatterlist *sg = iod->sgt.sgl;
unsigned int entries = iod->sgt.nents;
@@ -694,21 +695,13 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
return BLK_STS_OK;
}
- if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
- pool = dev->prp_small_pool;
- iod->nr_descriptors = 0;
- } else {
- pool = dev->prp_page_pool;
- iod->nr_descriptors = 1;
- }
+ if (entries > NVME_SMALL_DESCRIPTOR_SIZE / sizeof(*sg_list))
+ iod->large_descriptors = true;
- sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
- if (!sg_list) {
- iod->nr_descriptors = -1;
+ sg_list = dma_pool_alloc(nvme_dma_pool(dev, iod), GFP_ATOMIC, &sgl_dma);
+ if (!sg_list)
return BLK_STS_RESOURCE;
- }
-
- iod->descriptors[0] = sg_list;
+ iod->descriptors[iod->nr_descriptors++] = sg_list;
iod->first_dma = sgl_dma;
nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
@@ -834,7 +827,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
blk_status_t ret;
iod->aborted = false;
- iod->nr_descriptors = -1;
+ iod->nr_descriptors = 0;
+ iod->large_descriptors = false;
iod->sgt.nents = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
@@ -2694,7 +2688,8 @@ static int nvme_setup_prp_pools(struct nvme_dev *dev)
/* Optimisation for I/Os between 4k and 128k */
dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
- 256, 256, 0);
+ NVME_SMALL_DESCRIPTOR_SIZE,
+ NVME_SMALL_DESCRIPTOR_SIZE, 0);
if (!dev->prp_small_pool) {
dma_pool_destroy(dev->prp_page_pool);
return -ENOMEM;