@@ -140,7 +140,8 @@ typedef int (poll_fn)(struct blk_mq_hw_ctx *);
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
typedef bool (busy_fn)(struct request_queue *);
typedef void (complete_fn)(struct request *);
-
+typedef const struct cpumask *(hctx_complete_queue_affinity_fn)(
+ struct blk_mq_hw_ctx *, int);
struct blk_mq_ops {
/*
@@ -207,6 +208,15 @@ struct blk_mq_ops {
map_queues_fn *map_queues;
+ /*
+ * Some SCSI devices support private complete queue, returns
+ * affinity of the complete queue, and the passed 'cpu' parameter
+ * has to be included in the complete queue's affinity cpumask, and
+ * used to figure out the mapped reply queue. If NULL is returns,
+ * it means this hctx hasn't private completion queues.
+ */
+ hctx_complete_queue_affinity_fn *complete_queue_affinity;
+
#ifdef CONFIG_BLK_DEBUG_FS
/*
* Used by the debugfs implementation to show driver-specific
Some SCSI devices support single hw queue(tags), meantime allow multiple private complete queues for handling request delivery & completion. And mapping between CPU and private completion queue is setup via pci_alloc_irq_vectors_affinity(PCI_IRQ_AFFINITY), just like normal blk-mq's queue mapping. Introduce .complete_queue_affinity callback for getting the complete queue's affinity, so that we can drain in-flight requests delivered from the complete queue if last CPU of the completion queue becomes offline. Signed-off-by: Ming Lei <ming.lei@redhat.com> --- include/linux/blk-mq.h | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-)