@@ -4145,6 +4145,36 @@ static void blk_mq_map_swqueue(struct request_queue *q)
}
}
+/**
+ * blk_mq_isolate_cpus() - rebuild hctx->cpumask considering isolated CPUs
+ * to avoid managed interrupts on those CPUs.
+ */
+
+void blk_mq_isolate_cpus(const struct cpumask *isolcpus)
+{
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+ while ((dev = class_dev_iter_next(&iter))) {
+ struct request_queue *q = bdev_get_queue(dev_to_bdev(dev));
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long i;
+
+ if (!queue_is_mq(q))
+ continue;
+
+ blk_mq_map_swqueue(q);
+ /*
+ * Postcondition:
+ * cpumask must not intersect with isolated CPUs.
+ */
+ queue_for_each_hw_ctx(q, hctx, i)
+ WARN_ON_ONCE(cpumask_intersects(hctx->cpumask, isolcpus));
+ }
+ class_dev_iter_exit(&iter);
+}
+
/*
* Caller needs to ensure that we're either frozen/quiesced, or that
* the queue isn't live yet.
@@ -924,6 +924,7 @@ void blk_freeze_queue_start_non_owner(struct request_queue *q);
void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
+void blk_mq_isolate_cpus(const struct cpumask *isolcpus);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
@@ -41,6 +41,7 @@
#include <linux/sched/isolation.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/blk-mq.h>
#undef pr_fmt
#define pr_fmt(fmt) "%s:%d: %s " fmt, __FILE__, __LINE__, __func__
@@ -1317,6 +1318,7 @@ static void update_isolation_cpumasks(bool isolcpus_updated)
return;
ret = housekeeping_exlude_isolcpus(isolated_cpus, HOUSEKEEPING_FLAGS);
WARN_ON_ONCE((ret < 0) && (ret != -EOPNOTSUPP));
+ blk_mq_isolate_cpus(isolated_cpus);
}
/**
The housekeeping CPU masks, set up by the "isolcpus" and "nohz_full" boot command line options, are used at boot time to exclude selected CPUs from running some kernel housekeeping subsystems to minimize disturbance to latency sensitive userspace applications such as DPDK. This options can only be changed with a reboot. This is a problem for containerized workloads running on OpenShift/Kubernetes where a mix of low latency and "normal" workloads can be created/destroyed dynamically and the number of CPUs allocated to each workload is often not known at boot time. Cgroups allow configuring isolated_cpus at runtime. However, blk-mq may still use managed interrupts on the newly isolated CPUs. Rebuild hctx->cpumask considering isolated CPUs to avoid managed interrupts on those CPUs and reclaim non-isolated ones. The patch is based on isolation: Exclude dynamically isolated CPUs from housekeeping masks: https://lore.kernel.org/lkml/20240821142312.236970-1-longman@redhat.com/ Signed-off-by: Costa Shulyupin <costa.shul@redhat.com> --- block/blk-mq.c | 30 ++++++++++++++++++++++++++++++ include/linux/blk-mq.h | 1 + kernel/cgroup/cpuset.c | 2 ++ 3 files changed, 33 insertions(+)