@@ -1041,9 +1041,10 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
}
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth)
+ int depth, int node)
{
- struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
+ struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
+ node);
if (!nvmeq)
return NULL;
@@ -1219,7 +1220,8 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
nvmeq = dev->queues[0];
if (!nvmeq) {
- nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
+ nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
+ dev_to_node(dev->dev));
if (!nvmeq)
return -ENOMEM;
}
@@ -1309,9 +1311,18 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
{
unsigned i, max;
int ret = 0;
+ const struct cpumask *mask;
for (i = dev->queue_count; i <= dev->max_qid; i++) {
- if (!nvme_alloc_queue(dev, i, dev->q_depth)) {
+ int node = dev_to_node(dev->dev);
+
+ mask = pci_irq_get_affinity(to_pci_dev(dev->dev), i);
+ if (mask) {
+ node = cpu_to_node(cpumask_first(mask));
+ node = local_memory_node(node);
+ }
+
+ if (!nvme_alloc_queue(dev, i, dev->q_depth, node)) {
ret = -ENOMEM;
break;
}
nvme_queue is per-cpu queue (mostly). Allocating it in node where blk-mq will use it. Signed-off-by: Shaohua Li <shli@fb.com> --- drivers/nvme/host/pci.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-)